ancient-scripts-datasets / scripts /analyze_typology_linear_a.py
Alvin
Add Linear A Phonotactics Validation dataset
f304b7a
#!/usr/bin/env python3
"""
Typological analysis: rank languages by similarity to Linear A phonotactic profile.
Linear A characteristics (from syllabary structure and linguistic analysis):
1. Open syllables (CV, V) — the syllabary is fundamentally CV-based
2. Limited/no consonant clusters — syllabary cannot represent CC sequences
3. Likely agglutinative morphology — observed prefix/suffix patterns
4. Simple vowel system — Linear B (descended from A) has 5 vowels (a,e,i,o,u)
5. Moderate consonant inventory
6. Word-final vowels predominate (open-syllable language)
For each language with IPA data, we compute:
- open_syllable_ratio: % of syllables that are open (end in vowel)
- cluster_ratio: % of words containing consonant clusters (CC+)
- mean_word_length: average number of segments per word
- final_vowel_ratio: % of words ending in a vowel
- cv_ratio: ratio of C to V segments (Linear A-like ≈ 1.0-1.5)
A composite "Linear A similarity score" ranks languages.
Input: cognate_pairs_phono_filtered.parquet (only phonologically reliable pairs)
Output: analysis/typology_linear_a.tsv
"""
import csv
import os
import sys
import unicodedata
from collections import defaultdict
from pathlib import Path
if sys.stdout.encoding != 'utf-8':
sys.stdout.reconfigure(encoding='utf-8')
# ── IPA Classification ──
VOWELS = set('aeiouyɑæɐəɛɪɨɔʊʉɯøœɤɒʌɜɞɵɘ')
# Include nasalized vowels (base char is vowel + combining tilde)
CONSONANTS = set(
'pbtdkgqɢʔcɟʈɖfvszʃʒxɣhɦθðçʝχʁħʕɸβʂʐɬɮ'
'mnŋɲɳɴɱ'
'lrɾɹɻʎɭʟɽ'
'wjʋɰ'
'ʦʧʤʣɕʑ'
)
SKIP_CHARS = set('ˈˌːˑ.ˤʰʷʲ̃ᵊ⁼ˀ‿ʼ()[]{}/ \t-')
def classify_segments(ipa: str) -> list:
"""Convert IPA string to list of (segment, type) where type is 'V' or 'C'."""
if not ipa or ipa == '-':
return []
ipa = unicodedata.normalize('NFC', ipa)
segments = []
for ch in ipa:
base = ch.lower()
cat = unicodedata.category(ch)
# Skip combining marks, suprasegmentals, brackets, whitespace
if cat.startswith('M') or ch in SKIP_CHARS or cat == 'Zs':
continue
if base in VOWELS:
segments.append((ch, 'V'))
elif base in CONSONANTS:
segments.append((ch, 'C'))
# Skip unknown (tone marks, numbers, etc.)
return segments
def compute_word_stats(ipa: str) -> dict:
"""Compute phonotactic statistics for a single IPA word."""
segments = classify_segments(ipa)
if not segments:
return None
types = ''.join(t for _, t in segments)
n_v = types.count('V')
n_c = types.count('C')
total = len(types)
if total == 0:
return None
# Open syllable heuristic: count CV and V sequences
# A syllable is "open" if it ends in V (no coda consonant)
# Simple heuristic: split into syllables at each V→C transition after V
syllables = []
current = ''
for t in types:
current += t
if t == 'V':
syllables.append(current)
current = ''
if current:
# Remaining consonants attach to last syllable as coda
if syllables:
syllables[-1] += current
else:
syllables.append(current)
open_count = sum(1 for s in syllables if s.endswith('V'))
total_syllables = len(syllables)
# Consonant clusters: CC or more in sequence
has_cluster = 'CC' in types
# Final segment
final_is_vowel = types[-1] == 'V' if types else False
# CV ratio
cv_ratio = n_c / n_v if n_v > 0 else float('inf')
return {
'n_segments': total,
'n_vowels': n_v,
'n_consonants': n_c,
'n_syllables': total_syllables,
'open_syllables': open_count,
'has_cluster': has_cluster,
'final_vowel': final_is_vowel,
'cv_ratio': cv_ratio,
}
def main():
hf_dir = Path(__file__).parent.parent
parquet_path = hf_dir / 'data' / 'training' / 'cognate_pairs' / 'cognate_pairs_phono_filtered.parquet'
# Use pyarrow to stream efficiently
import pyarrow.parquet as pq
print('Loading filtered Parquet...')
table = pq.read_table(parquet_path, columns=['Lang_A', 'IPA_A', 'Lang_B', 'IPA_B'])
print(f' {table.num_rows:,} pairs')
# Collect unique (language, ipa) entries
# We need per-language IPA forms — extract from both A and B sides
print('Extracting per-language IPA forms...')
lang_forms = defaultdict(set) # lang → set of IPA forms
lang_a = table['Lang_A'].to_pylist()
ipa_a = table['IPA_A'].to_pylist()
lang_b = table['Lang_B'].to_pylist()
ipa_b = table['IPA_B'].to_pylist()
for i in range(len(lang_a)):
la, ia = lang_a[i], ipa_a[i]
lb, ib = lang_b[i], ipa_b[i]
if ia and ia != '-':
lang_forms[la].add(ia)
if ib and ib != '-':
lang_forms[lb].add(ib)
# Free memory
del lang_a, ipa_a, lang_b, ipa_b, table
print(f' {len(lang_forms):,} languages with IPA data')
# Compute per-language statistics
print('Computing phonotactic statistics...')
results = []
for lang, forms in sorted(lang_forms.items()):
if len(forms) < 5: # Skip languages with too few forms
continue
total_words = 0
total_segments = 0
total_syllables = 0
total_open = 0
total_with_cluster = 0
total_final_vowel = 0
total_cv_ratios = []
for ipa in forms:
stats = compute_word_stats(ipa)
if stats is None:
continue
total_words += 1
total_segments += stats['n_segments']
total_syllables += stats['n_syllables']
total_open += stats['open_syllables']
if stats['has_cluster']:
total_with_cluster += 1
if stats['final_vowel']:
total_final_vowel += 1
if stats['cv_ratio'] != float('inf'):
total_cv_ratios.append(stats['cv_ratio'])
if total_words < 5:
continue
open_syl_ratio = total_open / total_syllables if total_syllables > 0 else 0
cluster_ratio = total_with_cluster / total_words
final_vowel_ratio = total_final_vowel / total_words
mean_word_len = total_segments / total_words
mean_cv_ratio = sum(total_cv_ratios) / len(total_cv_ratios) if total_cv_ratios else 0
# ── Linear A Similarity Score ──
# Components (each 0-1, higher = more Linear A-like):
#
# 1. Open syllable score: open_syl_ratio (already 0-1)
# Linear A syllabary is CV-based → high open syllable ratio expected
#
# 2. No-cluster score: 1 - cluster_ratio
# Linear A can't represent clusters → low cluster frequency expected
#
# 3. Final-vowel score: final_vowel_ratio (already 0-1)
# Open-syllable languages tend to end words with vowels
#
# 4. CV-ratio score: closeness to 1.2 (ideal CV balance for CV syllabary)
# Pure CV language has ratio ~1.0; allowing for some CVC, ideal ~1.0-1.5
# Score = 1 - min(|cv_ratio - 1.2| / 1.5, 1.0)
#
# 5. Word length score: moderate length preferred (agglutinative = longer words)
# Isolating languages have very short words (2-3), agglutinative have 5-10
# Score peaks at mean_word_len ≈ 5-7
# Score = 1 - min(|mean_word_len - 6| / 6, 1.0)
s_open = open_syl_ratio
s_nocluster = 1.0 - cluster_ratio
s_finalv = final_vowel_ratio
s_cvratio = 1.0 - min(abs(mean_cv_ratio - 1.2) / 1.5, 1.0)
s_wordlen = 1.0 - min(abs(mean_word_len - 6.0) / 6.0, 1.0)
# Weighted composite (open syllables and no-clusters are most diagnostic)
linear_a_score = (
0.30 * s_open +
0.25 * s_nocluster +
0.20 * s_finalv +
0.15 * s_cvratio +
0.10 * s_wordlen
)
results.append({
'Language': lang,
'N_Forms': total_words,
'Open_Syllable_Ratio': round(open_syl_ratio, 4),
'Cluster_Ratio': round(cluster_ratio, 4),
'Final_Vowel_Ratio': round(final_vowel_ratio, 4),
'Mean_Word_Length': round(mean_word_len, 2),
'Mean_CV_Ratio': round(mean_cv_ratio, 3),
'Score_Open': round(s_open, 4),
'Score_NoCluster': round(s_nocluster, 4),
'Score_FinalVowel': round(s_finalv, 4),
'Score_CVRatio': round(s_cvratio, 4),
'Score_WordLen': round(s_wordlen, 4),
'Linear_A_Score': round(linear_a_score, 4),
})
# Sort by Linear A similarity
results.sort(key=lambda x: -x['Linear_A_Score'])
# Write output
out_dir = hf_dir / 'analysis'
out_dir.mkdir(exist_ok=True)
out_path = out_dir / 'typology_linear_a.tsv'
COLUMNS = [
'Language', 'N_Forms', 'Open_Syllable_Ratio', 'Cluster_Ratio',
'Final_Vowel_Ratio', 'Mean_Word_Length', 'Mean_CV_Ratio',
'Score_Open', 'Score_NoCluster', 'Score_FinalVowel',
'Score_CVRatio', 'Score_WordLen', 'Linear_A_Score',
]
with open(out_path, 'w', encoding='utf-8', newline='') as f:
writer = csv.DictWriter(f, fieldnames=COLUMNS, delimiter='\t')
writer.writeheader()
for row in results:
writer.writerow(row)
print(f'\nWrote {len(results)} languages to {out_path}')
# Print top 50
print(f'\n{"="*100}')
print(f'TOP 50 LANGUAGES BY LINEAR A TYPOLOGICAL SIMILARITY')
print(f'{"="*100}')
print(f'{"Rank":>4} {"Lang":>8} {"Score":>6} {"OpenSyl":>8} {"NoClustr":>8} {"FinalV":>8} {"CVRatio":>8} {"WordLen":>8} {"N_Forms":>8}')
print(f'{"-"*4:>4} {"-"*8:>8} {"-"*6:>6} {"-"*8:>8} {"-"*8:>8} {"-"*8:>8} {"-"*8:>8} {"-"*8:>8} {"-"*8:>8}')
for i, r in enumerate(results[:50], 1):
print(f'{i:>4} {r["Language"]:>8} {r["Linear_A_Score"]:.4f} '
f'{r["Open_Syllable_Ratio"]:.4f} {1-r["Cluster_Ratio"]:.4f} '
f'{r["Final_Vowel_Ratio"]:.4f} {r["Mean_CV_Ratio"]:.3f} '
f'{r["Mean_Word_Length"]:.2f} {r["N_Forms"]:>6}')
# Print bottom 10 for contrast
print(f'\n--- BOTTOM 10 (least Linear A-like) ---')
for i, r in enumerate(results[-10:], len(results)-9):
print(f'{i:>4} {r["Language"]:>8} {r["Linear_A_Score"]:.4f} '
f'{r["Open_Syllable_Ratio"]:.4f} {1-r["Cluster_Ratio"]:.4f} '
f'{r["Final_Vowel_Ratio"]:.4f} {r["Mean_CV_Ratio"]:.3f} '
f'{r["Mean_Word_Length"]:.2f} {r["N_Forms"]:>6}')
# Language family distribution in top 50
print(f'\n--- LANGUAGE FAMILIES IN TOP 50 ---')
# We'll use a simple heuristic based on ISO codes — for a proper family
# classification we'd need Glottolog, but let's at least note patterns
if __name__ == '__main__':
main()