Datasets:
File size: 6,063 Bytes
f304b7a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 | #!/usr/bin/env python3
"""
Build the Linear A Phonotactics Validation dataset.
Criteria for inclusion:
1. Both Lang_A and Lang_B have Linear_A_Score >= 0.80
(open syllable ratio, no clusters, final vowels, CV structure)
2. Confidence = "certain" (expert-determined, not contested)
3. Phono_Quality = "strong" (SCA Score >= 0.5, clear phonological similarity)
This produces a gold-standard dataset of proven cognate pairs between
languages whose phonotactics resemble Linear A. Used to validate
cognate detection models before applying them to Linear A.
Input:
- analysis/typology_linear_a.tsv (Linear A similarity scores per language)
- data/training/cognate_pairs/cognate_pairs_inherited.parquet (full dataset)
Output:
- data/training/cognate_pairs/linear_a_phonotactics_validation.parquet
"""
import csv
import sys
from pathlib import Path
if sys.stdout.encoding != 'utf-8':
sys.stdout.reconfigure(encoding='utf-8')
THRESHOLD = 0.80
def main():
hf_dir = Path(__file__).parent.parent
# 1. Load typology scores
print('Loading Linear A typology scores...')
lang_scores = {}
with open(hf_dir / 'analysis' / 'typology_linear_a.tsv', encoding='utf-8') as f:
for row in csv.DictReader(f, delimiter='\t'):
lang_scores[row['Language']] = float(row['Linear_A_Score'])
qualified_langs = {l for l, s in lang_scores.items() if s >= THRESHOLD}
print(f' Languages with score >= {THRESHOLD}: {len(qualified_langs)}')
# 2. Load full Parquet
import pyarrow as pa
import pyarrow.parquet as pq
import pyarrow.compute as pc
print('Loading full inherited Parquet...')
table = pq.read_table(
hf_dir / 'data' / 'training' / 'cognate_pairs' / 'cognate_pairs_inherited.parquet'
)
print(f' Total rows: {table.num_rows:,}')
# 3. Filter: both languages qualified, certain confidence, strong phono
print('Filtering...')
lang_a = table['Lang_A'].to_pylist()
lang_b = table['Lang_B'].to_pylist()
conf = table['Confidence'].to_pylist()
phono = table['Phono_Quality'].to_pylist()
mask = []
for i in range(len(lang_a)):
keep = (
lang_a[i] in qualified_langs
and lang_b[i] in qualified_langs
and conf[i] == 'certain'
and phono[i] == 'strong'
)
mask.append(keep)
mask_arr = pa.array(mask)
filtered = table.filter(mask_arr)
print(f' Filtered rows: {filtered.num_rows:,}')
# 4. Add Linear_A_Score columns for both languages
scores_a = [lang_scores.get(la, 0.0) for la, m in zip(lang_a, mask) if m]
scores_b = [lang_scores.get(lb, 0.0) for lb, m in zip(lang_b, mask) if m]
filtered = filtered.append_column(
'Linear_A_Score_A', pa.array([round(s, 4) for s in scores_a], type=pa.float64())
)
filtered = filtered.append_column(
'Linear_A_Score_B', pa.array([round(s, 4) for s in scores_b], type=pa.float64())
)
# 5. Write output
out_path = hf_dir / 'data' / 'training' / 'cognate_pairs' / 'linear_a_phonotactics_validation.parquet'
pq.write_table(filtered, str(out_path), compression='zstd', compression_level=3)
import os
size = os.path.getsize(out_path)
print(f'\n Written to: {out_path}')
print(f' Size: {size/1024/1024:.1f} MB')
# 6. Statistics
print(f'\n=== VALIDATION DATASET STATISTICS ===')
print(f'Total pairs: {filtered.num_rows:,}')
# Unique languages
langs = set()
fa = filtered['Lang_A'].to_pylist()
fb = filtered['Lang_B'].to_pylist()
for a in fa:
langs.add(a)
for b in fb:
langs.add(b)
print(f'Unique languages: {len(langs)}')
# Load family map
t2 = pq.read_table(str(hf_dir / 'data' / 'training' / 'metadata' / 'languages.parquet'))
iso_to_family = dict(zip(t2['ISO'].to_pylist(), t2['Family'].to_pylist()))
# Family distribution
fam_counts = {}
for l in langs:
fam = iso_to_family.get(l, 'unknown')
fam_counts[fam] = fam_counts.get(fam, 0) + 1
print(f'\nLanguage families:')
for fam, c in sorted(fam_counts.items(), key=lambda x: -x[1]):
print(f' {fam}: {c}')
# Source distribution
sources = filtered['Source'].to_pylist()
src_counts = {}
for s in sources:
src_counts[s] = src_counts.get(s, 0) + 1
print(f'\nSource distribution:')
for src, c in sorted(src_counts.items(), key=lambda x: -x[1]):
print(f' {src}: {c:,}')
# Score distribution
scores = filtered['Score'].to_pylist()
float_scores = [float(s) for s in scores if s and s != '-1']
if float_scores:
print(f'\nSCA Score distribution:')
print(f' Min: {min(float_scores):.4f}')
print(f' Max: {max(float_scores):.4f}')
print(f' Mean: {sum(float_scores)/len(float_scores):.4f}')
bins = [(0.5, 0.6), (0.6, 0.7), (0.7, 0.8), (0.8, 0.9), (0.9, 1.01)]
for lo, hi in bins:
n = sum(1 for s in float_scores if lo <= s < hi)
print(f' [{lo:.1f}, {hi:.1f}): {n:,}')
# Load names for top contributing languages
iso_to_name = {}
glot_path = hf_dir / 'data' / 'training' / 'raw' / 'glottolog_cldf' / 'languages.csv'
with open(glot_path, encoding='utf-8') as f:
for row in csv.DictReader(f):
iso = row.get('ISO639P3code', '').strip()
name = row.get('Name', '').strip()
if iso and name:
iso_to_name[iso] = name
# Top 20 languages by pair count
lang_pair_counts = {}
for a, b in zip(fa, fb):
lang_pair_counts[a] = lang_pair_counts.get(a, 0) + 1
lang_pair_counts[b] = lang_pair_counts.get(b, 0) + 1
print(f'\nTop 20 languages by pair count:')
for lang, c in sorted(lang_pair_counts.items(), key=lambda x: -x[1])[:20]:
name = iso_to_name.get(lang, '?')
fam = iso_to_family.get(lang, '?')
score = lang_scores.get(lang, 0)
print(f' {lang} ({name}) - {fam} - LA score {score:.4f} - {c:,} pairs')
if __name__ == '__main__':
main()
|