| |
| """Fix ABVD-sourced lexicon entries where Word == IPA (fake IPA). |
| |
| Scans all TSV files in data/training/lexicons/ for entries sourced from |
| ABVD where the Word column is identical to the IPA column. For those |
| entries, applies G2P conversion (transphone if available, otherwise a |
| lightweight Austronesian-specific rule-based fallback) and recomputes |
| the SCA encoding from the new IPA. |
| |
| TSV format: Word\tIPA\tSCA\tSource\tConcept_ID\tCognate_Set_ID |
| """ |
|
|
| from __future__ import annotations |
|
|
| import csv |
| import re |
| import sys |
| from pathlib import Path |
|
|
| ROOT = Path(__file__).resolve().parent.parent |
| sys.path.insert(0, str(ROOT / "cognate_pipeline" / "src")) |
|
|
| from cognate_pipeline.normalise.sound_class import ipa_to_sound_class |
|
|
| LEXICON_DIR = ROOT / "data" / "training" / "lexicons" |
| HEADER = "Word\tIPA\tSCA\tSource\tConcept_ID\tCognate_Set_ID\n" |
|
|
|
|
| |
| |
| |
|
|
| _USE_TRANSPHONE = False |
|
|
| try: |
| from transphone import read_ipa |
|
|
| _USE_TRANSPHONE = True |
| except ImportError: |
| pass |
|
|
|
|
| def _transphone_g2p(word: str) -> str: |
| """Attempt G2P via transphone.""" |
| try: |
| phones = read_ipa(word, "ind") |
| if phones: |
| return "".join(phones) |
| except Exception: |
| pass |
| return "" |
|
|
|
|
| |
| |
| |
|
|
| |
| |
| _AUSTRONESIAN_RULES: list[tuple[str, str]] = [ |
| |
| (r"ngg", "ŋɡ"), |
| (r"ngk", "ŋk"), |
|
|
| |
| (r"ng", "ŋ"), |
| (r"ny", "ɲ"), |
| (r"sy", "ʃ"), |
| (r"ch", "tʃ"), |
| (r"sh", "ʃ"), |
| (r"th", "t"), |
| (r"dj", "dʒ"), |
| (r"tj", "tʃ"), |
| (r"nj", "ɲ"), |
| (r"kh", "x"), |
| (r"gh", "ɣ"), |
| (r"ph", "p"), |
| (r"bh", "b"), |
|
|
| |
| (r"c", "tʃ"), |
| (r"j", "dʒ"), |
| (r"y", "j"), |
| (r"q", "ʔ"), |
| (r"x", "ks"), |
| (r"'", "ʔ"), |
|
|
| |
| |
| (r"aa", "aː"), |
| (r"ee", "eː"), |
| (r"ii", "iː"), |
| (r"oo", "oː"), |
| (r"uu", "uː"), |
| ] |
|
|
| |
| _RULE_PATTERNS = [(re.compile(pat, re.IGNORECASE), repl) for pat, repl in _AUSTRONESIAN_RULES] |
|
|
|
|
| def _austronesian_g2p(word: str) -> str: |
| """Best-effort rule-based orthographic-to-IPA for Austronesian words. |
| |
| Applies substitution rules in priority order (longest match first), |
| then passes through any remaining characters unchanged. |
| """ |
| text = word.lower().strip() |
| if not text: |
| return "" |
|
|
| |
| result: list[str] = [] |
| i = 0 |
| while i < len(text): |
| matched = False |
| for pat, repl in _RULE_PATTERNS: |
| m = pat.match(text, i) |
| if m: |
| result.append(repl) |
| i = m.end() |
| matched = True |
| break |
| if not matched: |
| |
| ch = text[i] |
| |
| if ch.isalpha(): |
| result.append(ch) |
| i += 1 |
|
|
| return "".join(result) |
|
|
|
|
| def g2p(word: str) -> str: |
| """Convert an orthographic word to IPA using the best available backend.""" |
| if _USE_TRANSPHONE: |
| ipa = _transphone_g2p(word) |
| if ipa: |
| return ipa |
| |
| return _austronesian_g2p(word) |
|
|
|
|
| |
| |
| |
|
|
| def fix_file(tsv_path: Path) -> tuple[int, int, int]: |
| """Fix fake-IPA entries in a single TSV file. |
| |
| Returns (total_abvd_fake, fixed, failed). |
| """ |
| rows: list[dict[str, str]] = [] |
| total_abvd_fake = 0 |
| fixed = 0 |
| failed = 0 |
|
|
| with open(tsv_path, encoding="utf-8", newline="") as f: |
| reader = csv.DictReader(f, delimiter="\t") |
| if reader.fieldnames is None: |
| return 0, 0, 0 |
| for row in reader: |
| source = row.get("Source", "") |
| word = row.get("Word", "") |
| ipa = row.get("IPA", "") |
|
|
| if "abvd" in source and word == ipa and word: |
| total_abvd_fake += 1 |
| new_ipa = g2p(word) |
| if new_ipa and new_ipa != word: |
| row["IPA"] = new_ipa |
| row["SCA"] = ipa_to_sound_class(new_ipa) |
| fixed += 1 |
| else: |
| failed += 1 |
|
|
| rows.append(row) |
|
|
| if fixed == 0: |
| |
| return total_abvd_fake, fixed, failed |
|
|
| |
| with open(tsv_path, "w", encoding="utf-8", newline="") as f: |
| f.write(HEADER) |
| for row in rows: |
| line = "\t".join([ |
| row.get("Word", ""), |
| row.get("IPA", ""), |
| row.get("SCA", ""), |
| row.get("Source", ""), |
| row.get("Concept_ID", ""), |
| row.get("Cognate_Set_ID", ""), |
| ]) |
| f.write(line + "\n") |
|
|
| return total_abvd_fake, fixed, failed |
|
|
|
|
| def main() -> None: |
| print("=" * 70) |
| print("fix_abvd_ipa: Fix ABVD entries where Word == IPA") |
| print("=" * 70) |
|
|
| if _USE_TRANSPHONE: |
| print(" G2P backend: transphone") |
| else: |
| print(" G2P backend: rule-based Austronesian fallback") |
|
|
| if not LEXICON_DIR.exists(): |
| print(f" ERROR: lexicon directory not found: {LEXICON_DIR}") |
| sys.exit(1) |
|
|
| tsv_files = sorted(LEXICON_DIR.glob("*.tsv")) |
| print(f" Lexicon directory: {LEXICON_DIR}") |
| print(f" TSV files found: {len(tsv_files)}") |
| print() |
|
|
| total_files_scanned = 0 |
| total_files_modified = 0 |
| grand_total_fake = 0 |
| grand_total_fixed = 0 |
| grand_total_failed = 0 |
|
|
| for tsv_path in tsv_files: |
| total_files_scanned += 1 |
| abvd_fake, fixed, failed = fix_file(tsv_path) |
|
|
| grand_total_fake += abvd_fake |
| grand_total_fixed += fixed |
| grand_total_failed += failed |
|
|
| if fixed > 0: |
| total_files_modified += 1 |
| print(f" {tsv_path.name}: {fixed} fixed, {failed} failed (of {abvd_fake} fake-IPA)") |
|
|
| print() |
| print("-" * 70) |
| print("SUMMARY") |
| print("-" * 70) |
| print(f" Files scanned: {total_files_scanned}") |
| print(f" Files modified: {total_files_modified}") |
| print(f" ABVD fake-IPA: {grand_total_fake}") |
| print(f" Entries fixed: {grand_total_fixed}") |
| print(f" Entries failed: {grand_total_failed}") |
|
|
| if grand_total_fake > 0: |
| pct = grand_total_fixed / grand_total_fake * 100 |
| print(f" Fix rate: {pct:.1f}%") |
|
|
| print("Done.") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|