| |
| """ |
| Extract cognate pairs from 4 Tier 1 CLDF repositories. |
| |
| Sources: |
| 1. lexibank/iecor — IE-CoR (Indo-European Cognate Relationships) |
| Heggarty et al. 2024, Scientific Data (Nature) |
| License: CC-BY-4.0 |
| 2. lexibank/kitchensemitic — Kitchen et al. 2009, Proc. R. Soc. B |
| License: CC-BY-NC-4.0 |
| 3. lexibank/robbeetstriangulation — Robbeets et al. 2021, Nature |
| License: CC-BY-4.0 |
| 4. lexibank/savelyevturkic — Savelyev & Robbeets 2020, J. Language Evolution |
| License: CC-BY-4.0 |
| |
| All data extracted from CLDF CognateTable files. No data is hardcoded. |
| Output: 14-column TSV staging files per source. |
| """ |
| import csv |
| import os |
| import sys |
| import unicodedata |
| from collections import defaultdict |
| from itertools import combinations |
| from pathlib import Path |
|
|
| |
| |
| |
| SCA_MAP = { |
| |
| 'a': 'A', 'e': 'E', 'i': 'I', 'o': 'O', 'u': 'U', |
| 'ɑ': 'A', 'æ': 'A', 'ɐ': 'A', 'ə': 'E', 'ɛ': 'E', |
| 'ɪ': 'I', 'ɨ': 'I', 'ɔ': 'O', 'ʊ': 'U', 'ʉ': 'U', |
| 'ɯ': 'U', 'ø': 'O', 'œ': 'O', 'y': 'U', 'ɤ': 'O', |
| 'ɒ': 'O', 'ʌ': 'A', |
| |
| 'p': 'P', 'b': 'P', 't': 'T', 'd': 'T', 'k': 'K', 'g': 'K', |
| 'q': 'K', 'ɢ': 'K', 'ʔ': 'H', 'c': 'K', 'ɟ': 'K', |
| 'ʈ': 'T', 'ɖ': 'T', |
| |
| 'f': 'P', 'v': 'P', 's': 'S', 'z': 'S', 'ʃ': 'S', 'ʒ': 'S', |
| 'x': 'K', 'ɣ': 'K', 'h': 'H', 'ɦ': 'H', 'θ': 'T', 'ð': 'T', |
| 'ç': 'K', 'ʝ': 'K', 'χ': 'K', 'ʁ': 'R', 'ħ': 'H', 'ʕ': 'H', |
| 'ɸ': 'P', 'β': 'P', 'ʂ': 'S', 'ʐ': 'S', |
| |
| 'm': 'M', 'n': 'N', 'ŋ': 'N', 'ɲ': 'N', 'ɳ': 'N', 'ɴ': 'N', |
| |
| 'l': 'L', 'r': 'R', 'ɾ': 'R', 'ɹ': 'R', 'ɻ': 'R', 'ɬ': 'L', |
| 'ɮ': 'L', 'ʎ': 'L', 'ɭ': 'L', 'ʟ': 'L', |
| |
| 'w': 'W', 'j': 'Y', 'ʋ': 'W', 'ɰ': 'W', |
| |
| 'ʦ': 'S', 'ʧ': 'S', 'ʤ': 'S', 'ʣ': 'S', |
| } |
|
|
| def ipa_to_sca(ipa: str) -> str: |
| """Convert IPA string to SCA encoding.""" |
| if not ipa or ipa == '-': |
| return '-' |
| result = [] |
| |
| ipa = unicodedata.normalize('NFC', ipa) |
| for ch in ipa: |
| base = ch.lower() |
| cat = unicodedata.category(ch) |
| |
| if cat.startswith('M') or ch in 'ˈˌːˑ[]/()\u0361\u035c' or cat == 'Zs': |
| continue |
| if base in SCA_MAP: |
| result.append(SCA_MAP[base]) |
| |
| return ''.join(result) |
|
|
|
|
| def sca_distance(sca_a: str, sca_b: str) -> float: |
| """ |
| Normalized SCA-weighted Levenshtein distance. |
| Returns similarity score in [0.0, 1.0]. |
| Reference: List (2012), gap penalty = 0.5. |
| """ |
| if sca_a == '-' or sca_b == '-' or not sca_a or not sca_b: |
| return 0.0 |
| n, m = len(sca_a), len(sca_b) |
| gap = 0.5 |
| |
| dp = [[0.0] * (m + 1) for _ in range(n + 1)] |
| for i in range(n + 1): |
| dp[i][0] = i * gap |
| for j in range(m + 1): |
| dp[0][j] = j * gap |
| for i in range(1, n + 1): |
| for j in range(1, m + 1): |
| if sca_a[i-1] == sca_b[j-1]: |
| cost = 0.0 |
| else: |
| cost = 1.0 |
| dp[i][j] = min( |
| dp[i-1][j] + gap, |
| dp[i][j-1] + gap, |
| dp[i-1][j-1] + cost, |
| ) |
| max_len = max(n, m) |
| if max_len == 0: |
| return 1.0 |
| return round(1.0 - dp[n][m] / max_len, 4) |
|
|
|
|
| def load_cldf_source(repo_dir: str, source_name: str): |
| """ |
| Load a CLDF repo and extract cognate pairs. |
| |
| Reads: |
| - cldf/languages.csv → language ID → ISO mapping |
| - cldf/forms.csv → form ID → (language, word, IPA, concept) |
| - cldf/cognates.csv → form ID → cognate set membership |
| |
| Returns list of 14-column rows. |
| All data comes from the downloaded CSV files. |
| """ |
| cldf_dir = Path(repo_dir) / 'cldf' |
|
|
| |
| lang_map = {} |
| lang_names = {} |
| with open(cldf_dir / 'languages.csv', encoding='utf-8') as f: |
| for row in csv.DictReader(f): |
| lid = row['ID'] |
| iso = row.get('ISO639P3code', '') |
| name = row.get('Name', '') |
| lang_map[lid] = iso if iso else lid |
| lang_names[lid] = name |
|
|
| |
| forms = {} |
| with open(cldf_dir / 'forms.csv', encoding='utf-8') as f: |
| for row in csv.DictReader(f): |
| fid = row['ID'] |
| lid = row['Language_ID'] |
| iso = lang_map.get(lid, lid) |
|
|
| |
| word = row.get('Value', '') or row.get('Form', '') |
|
|
| |
| |
| |
| |
| |
| |
| phon = row.get('phon_form', '').strip() |
| phonemic = row.get('Phonemic', '').strip() |
| form_val = row.get('Form', '').strip() |
| value_val = row.get('Value', '').strip() |
|
|
| if phon: |
| ipa = phon |
| elif phonemic: |
| ipa = phonemic |
| elif form_val and form_val != value_val: |
| |
| ipa = form_val |
| else: |
| |
| ipa = form_val if form_val else value_val |
|
|
| concept = row.get('Parameter_ID', '') |
| forms[fid] = { |
| 'iso': iso, |
| 'word': word, |
| 'ipa': ipa, |
| 'concept': concept, |
| 'lang_id': lid, |
| } |
|
|
| |
| cogsets = defaultdict(list) |
| with open(cldf_dir / 'cognates.csv', encoding='utf-8') as f: |
| for row in csv.DictReader(f): |
| fid = row['Form_ID'] |
| csid = row['Cognateset_ID'] |
| doubt = row.get('Doubt', 'false') |
| if fid in forms: |
| cogsets[csid].append((fid, doubt)) |
|
|
| |
| pairs = [] |
| seen = set() |
| for csid, members in cogsets.items(): |
| if len(members) < 2: |
| continue |
| for (fid_a, doubt_a), (fid_b, doubt_b) in combinations(members, 2): |
| fa = forms[fid_a] |
| fb = forms[fid_b] |
|
|
| |
| if fa['iso'] == fb['iso']: |
| continue |
|
|
| |
| if not fa['ipa'] or not fb['ipa']: |
| continue |
|
|
| |
| if fa['iso'] > fb['iso']: |
| fa, fb = fb, fa |
| fid_a, fid_b = fid_b, fid_a |
| doubt_a, doubt_b = doubt_b, doubt_a |
|
|
| |
| key = (fa['iso'], fb['iso'], fa['concept']) |
| if key in seen: |
| continue |
| seen.add(key) |
|
|
| |
| sca_a = ipa_to_sca(fa['ipa']) |
| sca_b = ipa_to_sca(fb['ipa']) |
| score = sca_distance(sca_a, sca_b) |
|
|
| |
| if doubt_a == 'true' or doubt_b == 'true': |
| confidence = 'doubtful' |
| else: |
| confidence = 'certain' |
|
|
| pairs.append({ |
| 'Lang_A': fa['iso'], |
| 'Word_A': fa['word'], |
| 'IPA_A': fa['ipa'], |
| 'Lang_B': fb['iso'], |
| 'Word_B': fb['word'], |
| 'IPA_B': fb['ipa'], |
| 'Concept_ID': fa['concept'], |
| 'Relationship': 'expert_cognate', |
| 'Score': str(score), |
| 'Source': source_name, |
| 'Relation_Detail': f'cognateset_{csid}', |
| 'Donor_Language': '-', |
| 'Confidence': confidence, |
| 'Source_Record_ID': f'{source_name}:{csid}:{fid_a}+{fid_b}', |
| }) |
|
|
| return pairs |
|
|
|
|
| def write_staging_tsv(pairs, output_path): |
| """Write pairs to 14-column TSV staging file.""" |
| COLUMNS = [ |
| 'Lang_A', 'Word_A', 'IPA_A', 'Lang_B', 'Word_B', 'IPA_B', |
| 'Concept_ID', 'Relationship', 'Score', 'Source', |
| 'Relation_Detail', 'Donor_Language', 'Confidence', 'Source_Record_ID', |
| ] |
| with open(output_path, 'w', encoding='utf-8', newline='') as f: |
| writer = csv.DictWriter(f, fieldnames=COLUMNS, delimiter='\t', |
| extrasaction='ignore') |
| writer.writeheader() |
| for pair in pairs: |
| writer.writerow(pair) |
| print(f' Wrote {len(pairs):,} pairs to {output_path}') |
|
|
|
|
| def main(): |
| base = Path(__file__).parent.parent / 'sources_tier1' |
| staging = Path(__file__).parent.parent / 'staging_tier1' |
| staging.mkdir(exist_ok=True) |
|
|
| |
| |
| sources = [ |
| ('iecor', 'iecor'), |
| |
| ('robbeetstriangulation', 'robbeetstriangulation'), |
| ('savelyevturkic', 'savelyevturkic'), |
| ] |
|
|
| all_pairs = [] |
| for repo_name, source_name in sources: |
| repo_dir = base / repo_name |
| if not repo_dir.exists(): |
| print(f'SKIP: {repo_dir} not found') |
| continue |
|
|
| print(f'\nExtracting from {repo_name}...') |
| pairs = load_cldf_source(str(repo_dir), source_name) |
|
|
| |
| write_staging_tsv(pairs, staging / f'cognate_pairs_{source_name}.tsv') |
|
|
| |
| langs = set() |
| for p in pairs: |
| langs.add(p['Lang_A']) |
| langs.add(p['Lang_B']) |
| certain = sum(1 for p in pairs if p['Confidence'] == 'certain') |
| doubtful = sum(1 for p in pairs if p['Confidence'] == 'doubtful') |
| print(f' Languages: {len(langs)}') |
| print(f' Certain: {certain:,}, Doubtful: {doubtful:,}') |
|
|
| all_pairs.extend(pairs) |
|
|
| |
| write_staging_tsv(all_pairs, staging / 'cognate_pairs_tier1_combined.tsv') |
|
|
| |
| all_langs = set() |
| for p in all_pairs: |
| all_langs.add(p['Lang_A']) |
| all_langs.add(p['Lang_B']) |
| print(f'\n=== TOTAL ===') |
| print(f'Total pairs: {len(all_pairs):,}') |
| print(f'Total languages: {len(all_langs)}') |
| print(f'Certain: {sum(1 for p in all_pairs if p["Confidence"] == "certain"):,}') |
| print(f'Doubtful: {sum(1 for p in all_pairs if p["Confidence"] == "doubtful"):,}') |
|
|
|
|
| if __name__ == '__main__': |
| main() |
|
|