| |
| """Build the Linear B (Mycenaean Greek) dataset from downloaded raw data. |
| |
| Parses and combines data from: |
| 1. Unicode UCD — Sign inventory (88 syllabograms + 123 ideograms) |
| 2. jhnwnstd/shannon — Linear B Lexicon (2,747 entries) |
| 3. Wiktionary — Mycenaean Greek lemmas (~435 entries with IPA) |
| 4. IE-CoR — Existing 43 Mycenaean Greek (gmy) words with expert IPA |
| |
| Output files: |
| data/linear_b/linear_b_signs.tsv — Full sign inventory |
| data/linear_b/sign_to_ipa.json — Sign transliteration → IPA mapping |
| data/linear_b/linear_b_words.tsv — Word list (Word, IPA, SCA, Source, Concept_ID, Cognate_Set_ID) |
| data/linear_b/README.md — Documentation |
| |
| Transliteration → IPA mapping: |
| Reference: Ventris & Chadwick (1973) "Documents in Mycenaean Greek", 2nd ed. |
| The Linear B syllabary encodes CV syllables. The conventional transliteration |
| uses Latin characters that are near-IPA with these systematic differences: |
| q = /kʷ/ (labiovelar stop) |
| z = /ts/ or /dz/ (affricate, exact value debated) |
| j = /j/ (palatal glide) |
| w = /w/ (labial glide) |
| p2 = /pʰ/ (aspirated p) |
| t2 = /tʰ/ (aspirated t) — actually written as "pu2" etc. in convention |
| |
| Usage: |
| python scripts/build_linear_b_dataset.py |
| """ |
|
|
| from __future__ import annotations |
|
|
| import csv |
| import io |
| import json |
| import re |
| import sys |
| import unicodedata |
| from collections import OrderedDict |
| from pathlib import Path |
|
|
| sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8") |
| sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8") |
|
|
| ROOT = Path(__file__).resolve().parent.parent |
| RAW_DIR = ROOT / "data" / "training" / "raw" / "linear_b" |
| OUT_DIR = ROOT / "data" / "linear_b" |
|
|
| |
| LINB_SYLLABARY_START = 0x10000 |
| LINB_SYLLABARY_END = 0x1007F |
| LINB_IDEOGRAM_START = 0x10080 |
| LINB_IDEOGRAM_END = 0x100FF |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| TRANSLIT_TO_IPA = { |
| |
| "a": "a", "e": "e", "i": "i", "o": "o", "u": "u", |
| |
| "da": "da", "de": "de", "di": "di", "do": "do", "du": "du", |
| |
| "ja": "ja", "je": "je", "jo": "jo", "ju": "ju", |
| |
| "ka": "ka", "ke": "ke", "ki": "ki", "ko": "ko", "ku": "ku", |
| |
| "ma": "ma", "me": "me", "mi": "mi", "mo": "mo", "mu": "mu", |
| |
| "na": "na", "ne": "ne", "ni": "ni", "no": "no", "nu": "nu", |
| |
| "pa": "pa", "pe": "pe", "pi": "pi", "po": "po", "pu": "pu", |
| |
| "qa": "kʷa", "qe": "kʷe", "qi": "kʷi", "qo": "kʷo", |
| |
| "ra": "ra", "re": "re", "ri": "ri", "ro": "ro", "ru": "ru", |
| |
| "sa": "sa", "se": "se", "si": "si", "so": "so", "su": "su", |
| |
| "ta": "ta", "te": "te", "ti": "ti", "to": "to", "tu": "tu", |
| |
| "wa": "wa", "we": "we", "wi": "wi", "wo": "wo", |
| |
| "za": "tsa", "ze": "tse", "zi": "tsi", "zo": "tso", "zu": "tsu", |
| |
| "a2": "ha", "a3": "ai", |
| "nwa": "nwa", |
| "pu2": "pʰu", |
| "ra2": "rja", "ra3": "rai", |
| "ro2": "rjo", |
| "ta2": "tja", |
| "two": "two", |
| "dwe": "dwe", |
| "dwo": "dwo", |
| "twe": "twe", |
| |
| } |
|
|
|
|
| def parse_unicode_signs(ucd_path: Path) -> list[dict]: |
| """Parse Linear B signs from UnicodeData.txt. |
| |
| Each line has format: codepoint;name;category;... |
| We extract signs in U+10000-U+100FF range. |
| """ |
| signs = [] |
| with open(ucd_path, encoding="utf-8") as f: |
| for line in f: |
| parts = line.strip().split(";") |
| if len(parts) < 2: |
| continue |
| cp_hex = parts[0] |
| name = parts[1] |
| cp = int(cp_hex, 16) |
|
|
| if LINB_SYLLABARY_START <= cp <= LINB_SYLLABARY_END: |
| sign_type = "syllabogram" |
| elif LINB_IDEOGRAM_START <= cp <= LINB_IDEOGRAM_END: |
| sign_type = "ideogram" |
| else: |
| continue |
|
|
| |
| |
| bennett = "" |
| phonetic = "" |
| m = re.match(r"LINEAR B (?:SYLLABLE|SYMBOL) (B\d+)\s*(.*)", name) |
| if m: |
| bennett = m.group(1) |
| phonetic = m.group(2).strip().lower() if m.group(2) else "" |
| else: |
| m = re.match(r"LINEAR B IDEOGRAM (B\d+\w*)\s*(.*)", name) |
| if m: |
| bennett = m.group(1) |
| phonetic = m.group(2).strip() if m.group(2) else "" |
|
|
| |
| ipa = TRANSLIT_TO_IPA.get(phonetic, "-") if phonetic else "-" |
|
|
| signs.append({ |
| "Codepoint": f"U+{cp_hex}", |
| "Unicode_Char": chr(cp), |
| "Bennett_Number": bennett, |
| "Name": name, |
| "Type": sign_type, |
| "Transliteration": phonetic if phonetic else "-", |
| "IPA": ipa, |
| }) |
|
|
| return signs |
|
|
|
|
| def parse_shannon_lexicon(csv_path: Path) -> list[dict]: |
| """Parse jhnwnstd/shannon Linear_B_Lexicon.csv. |
| |
| Columns: word (Unicode), transcription (Latin), definition (scholarly notes) |
| We extract: transliteration, clean definition, and classify as common/proper noun. |
| """ |
| entries = [] |
| with open(csv_path, encoding="utf-8") as f: |
| reader = csv.DictReader(f) |
| for row in reader: |
| word_unicode = row.get("word", "").strip() |
| translit = row.get("transcription", "").strip() |
| definition = row.get("definition", "").strip() |
|
|
| if not translit: |
| continue |
|
|
| |
| def_lower = definition.lower() |
| is_anthroponym = "anthroponym" in def_lower and ":" not in def_lower.split("anthroponym")[0][-20:] |
| is_toponym = "toponym" in def_lower and ":" not in def_lower.split("toponym")[0][-20:] |
|
|
| |
| |
| |
| |
| gloss = "" |
| |
| colon_parts = definition.split(":", 1) |
| if len(colon_parts) > 1: |
| after_colon = colon_parts[1].strip() |
| |
| |
| gloss_match = re.match( |
| r"([\w\s,/()?.!'\-]+?)(?:\s+(?:Chadwick|McArthur|Witczak|van |Palmer|" |
| r"Ruijgh|Bernabé|Appears|KN|PY|MY|TH|TI))", |
| after_colon, |
| ) |
| if gloss_match: |
| gloss = gloss_match.group(1).strip().rstrip(",;.") |
| else: |
| |
| gloss = after_colon[:80].strip() |
| |
| for cutoff in ["Chadwick", "McArthur", "Ventris", "John and"]: |
| if cutoff in gloss: |
| gloss = gloss[: gloss.index(cutoff)].strip().rstrip(",;.") |
| break |
|
|
| |
| if "anthroponym" in gloss.lower(): |
| word_type = "anthroponym" |
| elif "toponym" in gloss.lower(): |
| word_type = "toponym" |
| elif "theonym" in gloss.lower(): |
| word_type = "theonym" |
| elif "ethnic" in gloss.lower(): |
| word_type = "ethnic" |
| elif not gloss or gloss.lower() in ("meaning obscure", "meaning unknown", |
| "meaning uncertain", "hapax"): |
| word_type = "unknown" |
| else: |
| word_type = "common" |
|
|
| entries.append({ |
| "Word_Unicode": word_unicode, |
| "Transliteration": translit, |
| "Gloss": gloss, |
| "Word_Type": word_type, |
| "Source": "shannon_lexicon", |
| }) |
|
|
| return entries |
|
|
|
|
| def unicode_to_translit(title: str) -> str: |
| """Convert Linear B Unicode characters in a title to transliteration. |
| |
| Uses Python's unicodedata to get character names, then extracts the |
| phonetic value from names like "LINEAR B SYLLABLE B008 A" → "a". |
| """ |
| parts = [] |
| for ch in title: |
| cp = ord(ch) |
| if LINB_SYLLABARY_START <= cp <= LINB_SYLLABARY_END: |
| try: |
| name = unicodedata.name(ch, "") |
| m = re.match(r"LINEAR B (?:SYLLABLE|SYMBOL) B\d+\s*(.*)", name) |
| if m and m.group(1): |
| parts.append(m.group(1).strip().lower()) |
| else: |
| |
| m2 = re.match(r"LINEAR B SYMBOL (B\d+)", name) |
| if m2: |
| parts.append(f"*{m2.group(1)[1:]}") |
| except ValueError: |
| pass |
| elif LINB_IDEOGRAM_START <= cp <= LINB_IDEOGRAM_END: |
| |
| try: |
| name = unicodedata.name(ch, "") |
| m = re.match(r"LINEAR B IDEOGRAM (B\d+\w*)\s*(.*)", name) |
| if m: |
| parts.append(f"[{m.group(2).strip() or m.group(1)}]") |
| except ValueError: |
| pass |
| |
| return "-".join(parts) if parts else "" |
|
|
|
|
| def parse_wiktionary_lemmas(json_path: Path) -> list[dict]: |
| """Parse Wiktionary Mycenaean Greek lemma data. |
| |
| Extract from wikitext: |
| - ts= parameter → IPA transcription |
| - # [[gloss]] → English meaning |
| - head template → part of speech |
| """ |
| with open(json_path, encoding="utf-8") as f: |
| lemmas = json.load(f) |
|
|
| entries = [] |
| for lemma in lemmas: |
| title = lemma["title"] |
| wikitext = lemma["wikitext"] |
|
|
| |
| if "==Mycenaean Greek==" not in wikitext: |
| continue |
|
|
| |
| title_translit = unicode_to_translit(title) |
|
|
| |
| if not title_translit or all( |
| p.startswith("[") or p.startswith("*") for p in title_translit.split("-") if p |
| ): |
| |
| tr_check = re.search(r"\|tr=([^|}]+)", wikitext) |
| if not tr_check: |
| continue |
|
|
| |
| |
| |
| |
| ipa = "" |
| head_match = re.search(r"\{\{(?:head|h)\|gmy\|[^}]*\|ts=([^|}]+)", wikitext) |
| if head_match: |
| ipa = head_match.group(1).strip() |
| |
| |
| if " " in ipa or "<br>" in ipa: |
| ipa = "" |
|
|
| |
| |
| |
| |
| translit = title_translit |
| if not translit: |
| |
| head_tr_match = re.search(r"\{\{(?:head|h)\|gmy\|[^}]*\|tr=([^|}]+)", wikitext) |
| if head_tr_match: |
| translit = head_tr_match.group(1).strip() |
|
|
| |
| |
| if translit: |
| |
| translit = translit.replace("'''", "") |
| |
| if " " in translit: |
| translit = translit.split()[0] |
| |
| translit = translit.strip(".,;:!?") |
| |
| if re.search(r"[<>\[\]{}|=]", translit): |
| continue |
|
|
| |
| if not translit or translit == "-": |
| continue |
|
|
| |
| |
| translit_parts = [p for p in translit.split("-") if p] |
| syllabic_parts = [p for p in translit_parts |
| if not p.startswith("*") and not p.startswith("[")] |
| if not syllabic_parts: |
| continue |
|
|
| |
| glosses = [] |
| for line in wikitext.split("\n"): |
| line = line.strip() |
| if line.startswith("# ") and not line.startswith("# {{def-uncertain"): |
| |
| gloss = line[2:] |
| |
| gloss = re.sub(r"\{\{l\|en\|([^|}]+)[^}]*\}\}", r"\1", gloss) |
| gloss = re.sub(r"\{\{[^}]*\}\}", "", gloss) |
| |
| gloss = re.sub(r"\[\[(?:[^|\]]*\|)?([^\]]*)\]\]", r"\1", gloss) |
| |
| gloss = re.sub(r"['\[\]]", "", gloss) |
| |
| gloss = re.sub(r"\}\}|\{\{", "", gloss) |
| |
| gloss = gloss.strip().strip(".,;:") |
| if gloss and len(gloss) > 1: |
| glosses.append(gloss) |
|
|
| |
| pos = "" |
| pos_match = re.search(r"\{\{head\|gmy\|(\w+)", wikitext) |
| if pos_match: |
| pos = pos_match.group(1) |
|
|
| |
| cognates = [] |
| cog_matches = re.finditer(r"\{\{cog\|grc\|([^|}]+)", wikitext) |
| for m in cog_matches: |
| cognates.append(m.group(1)) |
|
|
| gloss_text = "; ".join(glosses) if glosses else "-" |
|
|
| |
| word_type = "common" |
| if pos == "proper noun": |
| word_type = "proper" |
| elif "toponym" in gloss_text.lower(): |
| word_type = "toponym" |
| elif "anthroponym" in gloss_text.lower(): |
| word_type = "anthroponym" |
|
|
| entries.append({ |
| "Title_Unicode": title, |
| "Transliteration": translit, |
| "IPA": ipa, |
| "Gloss": gloss_text, |
| "POS": pos, |
| "Word_Type": word_type, |
| "Greek_Cognate": cognates[0] if cognates else "-", |
| "Source": "wiktionary_gmy", |
| }) |
|
|
| return entries |
|
|
|
|
| def transliterate_to_ipa(translit: str) -> str: |
| """Convert Linear B transliteration to IPA. |
| |
| Reference: Ventris & Chadwick (1973), Hooker (1980) |
| |
| Linear B transliterations use the format: syllable-syllable-syllable |
| where each syllable is a CV value from the Ventris grid. |
| E.g., "a-ke-ro" → "akero", "pa-ka-na" → "pakana" |
| """ |
| if not translit or translit == "-": |
| return "-" |
|
|
| |
| translit = translit.strip().strip("-") |
|
|
| |
| syllables = translit.split("-") |
|
|
| ipa_parts = [] |
| for syl in syllables: |
| syl = syl.strip().lower() |
| if not syl: |
| continue |
| |
| if syl.startswith("*"): |
| ipa_parts.append("?") |
| continue |
| |
| if syl in TRANSLIT_TO_IPA: |
| ipa_parts.append(TRANSLIT_TO_IPA[syl]) |
| else: |
| |
| ipa_parts.append(syl) |
|
|
| return "".join(ipa_parts) |
|
|
|
|
| def load_iecor_gmy_words() -> list[dict]: |
| """Load existing Mycenaean Greek (gmy) words from cognate pairs Parquet.""" |
| try: |
| import pyarrow.parquet as pq |
| import pyarrow.compute as pc |
| except ImportError: |
| print(" [WARN] pyarrow not available, skipping IE-CoR data") |
| return [] |
|
|
| parquet_path = ROOT / "data" / "training" / "cognate_pairs" / "cognate_pairs_inherited.parquet" |
| if not parquet_path.exists(): |
| return [] |
|
|
| t = pq.read_table(parquet_path) |
| mask_a = pc.equal(t["Lang_A"], "gmy") |
| mask_b = pc.equal(t["Lang_B"], "gmy") |
|
|
| words = {} |
|
|
| |
| gmy_a = t.filter(mask_a) |
| for i in range(gmy_a.num_rows): |
| w = gmy_a.column("Word_A")[i].as_py() |
| ipa = gmy_a.column("IPA_A")[i].as_py() |
| cid = gmy_a.column("Concept_ID")[i].as_py() |
| if w and w != "-": |
| if w not in words: |
| words[w] = {"ipa": ipa or "-", "concept_ids": set()} |
| if cid and cid != "-": |
| words[w]["concept_ids"].add(cid) |
|
|
| |
| gmy_b = t.filter(mask_b) |
| for i in range(gmy_b.num_rows): |
| w = gmy_b.column("Word_B")[i].as_py() |
| ipa = gmy_b.column("IPA_B")[i].as_py() |
| cid = gmy_b.column("Concept_ID")[i].as_py() |
| if w and w != "-": |
| if w not in words: |
| words[w] = {"ipa": ipa or "-", "concept_ids": set()} |
| if cid and cid != "-": |
| words[w]["concept_ids"].add(cid) |
|
|
| result = [] |
| for translit, data in words.items(): |
| result.append({ |
| "Transliteration": translit, |
| "IPA": data["ipa"], |
| "Concept_IDs": ",".join(sorted(data["concept_ids"])), |
| "Source": "iecor", |
| }) |
|
|
| return result |
|
|
|
|
| def build_sign_inventory(signs: list[dict]) -> None: |
| """Write sign inventory TSV and sign_to_ipa.json.""" |
| OUT_DIR.mkdir(parents=True, exist_ok=True) |
|
|
| |
| tsv_path = OUT_DIR / "linear_b_signs.tsv" |
| cols = ["Codepoint", "Unicode_Char", "Bennett_Number", "Name", "Type", |
| "Transliteration", "IPA"] |
| with open(tsv_path, "w", encoding="utf-8", newline="") as f: |
| writer = csv.DictWriter(f, fieldnames=cols, delimiter="\t") |
| writer.writeheader() |
| for sign in signs: |
| writer.writerow(sign) |
| print(f" Signs TSV: {len(signs)} signs → {tsv_path}") |
|
|
| |
| sign_map = OrderedDict() |
| for sign in signs: |
| if sign["Type"] == "syllabogram" and sign["Transliteration"] != "-": |
| sign_map[sign["Transliteration"]] = sign["IPA"] |
| json_path = OUT_DIR / "sign_to_ipa.json" |
| json_path.write_text(json.dumps(sign_map, ensure_ascii=False, indent=2), encoding="utf-8") |
| print(f" sign_to_ipa.json: {len(sign_map)} mappings → {json_path}") |
|
|
| |
| syllabograms = [s for s in signs if s["Type"] == "syllabogram"] |
| ideograms = [s for s in signs if s["Type"] == "ideogram"] |
| with_phonetic = [s for s in syllabograms if s["Transliteration"] != "-"] |
| print(f" Syllabograms: {len(syllabograms)} ({len(with_phonetic)} with phonetic values)") |
| print(f" Ideograms: {len(ideograms)}") |
|
|
|
|
| def build_word_list( |
| shannon_entries: list[dict], |
| wiktionary_entries: list[dict], |
| iecor_entries: list[dict], |
| ) -> None: |
| """Merge all word sources and write linear_b_words.tsv.""" |
| |
| |
|
|
| |
| iecor_by_translit = {} |
| for e in iecor_entries: |
| t = e["Transliteration"] |
| iecor_by_translit[t] = e |
|
|
| |
| wikt_by_translit = {} |
| for e in wiktionary_entries: |
| t = e["Transliteration"] |
| if t: |
| wikt_by_translit[t] = e |
|
|
| |
| |
| all_words = {} |
|
|
| |
| for e in shannon_entries: |
| t = e["Transliteration"] |
| if t not in all_words: |
| all_words[t] = { |
| "Transliteration": t, |
| "Gloss": e["Gloss"], |
| "Word_Type": e["Word_Type"], |
| "IPA": "-", |
| "Source": "shannon_lexicon", |
| "Concept_ID": "-", |
| "Cognate_Set_ID": "-", |
| } |
|
|
| |
| for e in wiktionary_entries: |
| t = e["Transliteration"] |
| if not t: |
| continue |
| |
| |
| |
| |
| if len(t) <= 2 and t.isalpha() and "-" not in t: |
| continue |
| if t.isupper() and len(t) <= 6: |
| continue |
| |
| |
| if not re.match(r'^[\-a-z0-9*]+$', t.replace("-", "")): |
| continue |
| if t in all_words: |
| |
| if e["Gloss"] != "-": |
| all_words[t]["Gloss"] = e["Gloss"] |
| if e["IPA"]: |
| all_words[t]["IPA"] = e["IPA"] |
| all_words[t]["Source"] = "wiktionary_gmy" |
| if e["Word_Type"] != "common": |
| all_words[t]["Word_Type"] = e["Word_Type"] |
| else: |
| all_words[t] = { |
| "Transliteration": t, |
| "Gloss": e["Gloss"], |
| "Word_Type": e["Word_Type"], |
| "IPA": e["IPA"] if e["IPA"] else "-", |
| "Source": "wiktionary_gmy", |
| "Concept_ID": "-", |
| "Cognate_Set_ID": "-", |
| } |
|
|
| |
| for e in iecor_entries: |
| t = e["Transliteration"] |
| if t in all_words: |
| |
| if e["IPA"] and e["IPA"] != "-": |
| all_words[t]["IPA"] = e["IPA"] |
| if e["Concept_IDs"]: |
| all_words[t]["Concept_ID"] = e["Concept_IDs"] |
| |
| all_words[t]["Source"] = "iecor+" + all_words[t]["Source"] |
| else: |
| all_words[t] = { |
| "Transliteration": t, |
| "Gloss": "-", |
| "Word_Type": "common", |
| "IPA": e["IPA"], |
| "Source": "iecor", |
| "Concept_ID": e.get("Concept_IDs", "-"), |
| "Cognate_Set_ID": "-", |
| } |
|
|
| |
| for t, entry in all_words.items(): |
| if entry["IPA"] == "-" or not entry["IPA"]: |
| entry["IPA"] = transliterate_to_ipa(t) |
| if entry["IPA"] != "-": |
| entry["IPA_Source"] = "translit_conversion" |
| else: |
| entry["IPA_Source"] = "none" |
| else: |
| entry["IPA_Source"] = "expert" |
|
|
| |
| try: |
| sys.path.insert(0, str(ROOT / "cognate_pipeline" / "src")) |
| from cognate_pipeline.normalise.sound_class import ipa_to_sound_class |
| has_sca = True |
| except ImportError: |
| has_sca = False |
| print(" [WARN] cognate_pipeline not available, SCA will be computed from IPA directly") |
|
|
| for entry in all_words.values(): |
| if has_sca and entry["IPA"] != "-": |
| try: |
| entry["SCA"] = ipa_to_sound_class(entry["IPA"]) |
| except Exception: |
| entry["SCA"] = entry["IPA"].upper() |
| elif entry["IPA"] != "-": |
| |
| entry["SCA"] = entry["IPA"].upper() |
| else: |
| entry["SCA"] = "-" |
|
|
| |
| OUT_DIR.mkdir(parents=True, exist_ok=True) |
| tsv_path = OUT_DIR / "linear_b_words.tsv" |
| cols = ["Word", "IPA", "SCA", "Source", "Concept_ID", "Cognate_Set_ID", |
| "Gloss", "Word_Type", "IPA_Source"] |
|
|
| |
| type_order = {"common": 0, "unknown": 1, "theonym": 2, "ethnic": 3, |
| "proper": 4, "toponym": 5, "anthroponym": 6} |
| sorted_entries = sorted( |
| all_words.values(), |
| key=lambda e: (type_order.get(e["Word_Type"], 9), e["Transliteration"]), |
| ) |
|
|
| with open(tsv_path, "w", encoding="utf-8", newline="") as f: |
| writer = csv.DictWriter(f, fieldnames=cols, delimiter="\t", |
| extrasaction="ignore") |
| writer.writeheader() |
| for entry in sorted_entries: |
| writer.writerow({ |
| "Word": entry["Transliteration"], |
| "IPA": entry["IPA"], |
| "SCA": entry["SCA"], |
| "Source": entry["Source"], |
| "Concept_ID": entry["Concept_ID"], |
| "Cognate_Set_ID": entry["Cognate_Set_ID"], |
| "Gloss": entry["Gloss"], |
| "Word_Type": entry["Word_Type"], |
| "IPA_Source": entry.get("IPA_Source", "unknown"), |
| }) |
|
|
| |
| total = len(sorted_entries) |
| common = sum(1 for e in sorted_entries if e["Word_Type"] == "common") |
| proper = total - common |
| with_expert_ipa = sum(1 for e in sorted_entries if e.get("IPA_Source") == "expert") |
| with_translit_ipa = sum(1 for e in sorted_entries if e.get("IPA_Source") == "translit_conversion") |
|
|
| print(f"\n Words TSV: {total} entries → {tsv_path}") |
| print(f" Common nouns: {common}") |
| print(f" Proper nouns (names/places): {proper}") |
| print(f" IPA from expert sources: {with_expert_ipa}") |
| print(f" IPA from transliteration conversion: {with_translit_ipa}") |
| print(f" No IPA: {total - with_expert_ipa - with_translit_ipa}") |
|
|
| |
| src_counts = {} |
| for e in sorted_entries: |
| s = e["Source"] |
| src_counts[s] = src_counts.get(s, 0) + 1 |
| print(f"\n Source distribution:") |
| for src, count in sorted(src_counts.items(), key=lambda x: -x[1]): |
| print(f" {src}: {count}") |
|
|
| return sorted_entries |
|
|
|
|
| def main(): |
| print("=" * 70) |
| print("LINEAR B DATASET BUILD") |
| print("=" * 70) |
|
|
| |
| print("\n[1/4] Parsing Unicode UCD for Linear B signs...") |
| ucd_path = RAW_DIR / "UnicodeData.txt" |
| if not ucd_path.exists(): |
| print(" ERROR: UnicodeData.txt not found. Run ingest_linear_b.py first.") |
| sys.exit(1) |
| signs = parse_unicode_signs(ucd_path) |
| build_sign_inventory(signs) |
|
|
| |
| print("\n[2/4] Parsing Shannon Linear B Lexicon...") |
| shannon_path = RAW_DIR / "shannon_Linear_B_Lexicon.csv" |
| if not shannon_path.exists(): |
| print(" ERROR: shannon_Linear_B_Lexicon.csv not found. Run ingest_linear_b.py first.") |
| sys.exit(1) |
| shannon_entries = parse_shannon_lexicon(shannon_path) |
| print(f" Parsed {len(shannon_entries)} entries") |
| type_counts = {} |
| for e in shannon_entries: |
| type_counts[e["Word_Type"]] = type_counts.get(e["Word_Type"], 0) + 1 |
| for wt, c in sorted(type_counts.items(), key=lambda x: -x[1]): |
| print(f" {wt}: {c}") |
|
|
| |
| print("\n[3/4] Parsing Wiktionary Mycenaean Greek lemmas...") |
| wikt_path = RAW_DIR / "wiktionary_gmy_lemmas.json" |
| if not wikt_path.exists(): |
| print(" ERROR: wiktionary_gmy_lemmas.json not found. Run ingest_linear_b.py first.") |
| sys.exit(1) |
| wiktionary_entries = parse_wiktionary_lemmas(wikt_path) |
| print(f" Parsed {len(wiktionary_entries)} entries") |
| with_ipa = sum(1 for e in wiktionary_entries if e["IPA"]) |
| with_translit = sum(1 for e in wiktionary_entries if e["Transliteration"]) |
| with_gloss = sum(1 for e in wiktionary_entries if e["Gloss"] != "-") |
| print(f" With IPA (ts=): {with_ipa}") |
| print(f" With transliteration: {with_translit}") |
| print(f" With gloss: {with_gloss}") |
|
|
| |
| print("\n[4/4] Loading IE-CoR Mycenaean Greek data...") |
| iecor_entries = load_iecor_gmy_words() |
| print(f" Loaded {len(iecor_entries)} entries from cognate pairs") |
|
|
| |
| print("\n[BUILD] Merging all sources...") |
| entries = build_word_list(shannon_entries, wiktionary_entries, iecor_entries) |
|
|
| print("\n" + "=" * 70) |
| print("BUILD COMPLETE") |
| print("=" * 70) |
| print(f"\nOutput directory: {OUT_DIR}") |
| for p in sorted(OUT_DIR.iterdir()): |
| print(f" {p.name}: {p.stat().st_size:,} bytes") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|