| |
| """Ingest Linear B (Mycenaean Greek) data from CC-BY-SA compatible sources. |
| |
| Sources: |
| 1. Unicode UCD — Sign inventory (88 syllabograms + 123 ideograms) |
| URL: https://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt |
| License: Unicode Terms (permissive, CC-BY-SA-4.0 compatible) |
| |
| 2. Wiktionary — Mycenaean Greek lemmas (~435 entries) |
| URL: https://en.wiktionary.org/w/api.php (MediaWiki API) |
| License: CC-BY-SA-3.0+ |
| |
| 3. jhnwnstd/shannon — Linear B Lexicon (2,747 entries, MIT license) |
| URL: https://raw.githubusercontent.com/jhnwnstd/shannon/main/Linear_B_Lexicon.csv |
| License: MIT |
| |
| Iron Rule: Data comes from downloaded files/API responses. No hardcoded word lists. |
| |
| Usage: |
| python scripts/ingest_linear_b.py [--dry-run] |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import csv |
| import io |
| import json |
| import logging |
| import os |
| import re |
| import sys |
| import time |
| import urllib.error |
| import urllib.parse |
| import urllib.request |
| from pathlib import Path |
|
|
| sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8") |
| sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8") |
|
|
| ROOT = Path(__file__).resolve().parent.parent |
|
|
| logger = logging.getLogger(__name__) |
|
|
| RAW_DIR = ROOT / "data" / "training" / "raw" / "linear_b" |
|
|
| |
|
|
| UNICODE_DATA_URL = "https://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt" |
|
|
| WIKTIONARY_API = "https://en.wiktionary.org/w/api.php" |
|
|
| SHANNON_LEXICON_URL = ( |
| "https://raw.githubusercontent.com/jhnwnstd/shannon/main/Linear_B_Lexicon.csv" |
| ) |
|
|
| |
| LINB_SYLLABARY_START = 0x10000 |
| LINB_SYLLABARY_END = 0x1007F |
| LINB_IDEOGRAM_START = 0x10080 |
| LINB_IDEOGRAM_END = 0x100FF |
|
|
|
|
| def download_file(url: str, dest: Path, label: str) -> bool: |
| """Download a file, skipping if already present and non-empty.""" |
| if dest.exists() and dest.stat().st_size > 0: |
| logger.info(f" {label}: already exists ({dest.stat().st_size:,} bytes)") |
| return True |
| logger.info(f" {label}: downloading from {url}") |
| try: |
| req = urllib.request.Request(url, headers={"User-Agent": "LinearB-Ingestion/1.0"}) |
| with urllib.request.urlopen(req, timeout=60) as resp: |
| data = resp.read() |
| dest.parent.mkdir(parents=True, exist_ok=True) |
| dest.write_bytes(data) |
| logger.info(f" {label}: downloaded {len(data):,} bytes") |
| return True |
| except (urllib.error.URLError, urllib.error.HTTPError, OSError) as e: |
| logger.error(f" {label}: DOWNLOAD FAILED — {e}") |
| return False |
|
|
|
|
| def download_unicode_data(dry_run: bool = False) -> Path: |
| """Download UnicodeData.txt.""" |
| dest = RAW_DIR / "UnicodeData.txt" |
| if dry_run: |
| logger.info(f" [DRY RUN] Would download {UNICODE_DATA_URL}") |
| return dest |
| download_file(UNICODE_DATA_URL, dest, "UnicodeData.txt") |
| return dest |
|
|
|
|
| def download_shannon_lexicon(dry_run: bool = False) -> Path: |
| """Download jhnwnstd/shannon Linear_B_Lexicon.csv.""" |
| dest = RAW_DIR / "shannon_Linear_B_Lexicon.csv" |
| if dry_run: |
| logger.info(f" [DRY RUN] Would download {SHANNON_LEXICON_URL}") |
| return dest |
| download_file(SHANNON_LEXICON_URL, dest, "shannon_lexicon") |
| return dest |
|
|
|
|
| def download_wiktionary_lemmas(dry_run: bool = False) -> Path: |
| """Download all Mycenaean Greek lemmas from Wiktionary API.""" |
| dest = RAW_DIR / "wiktionary_gmy_lemmas.json" |
| if dest.exists() and dest.stat().st_size > 0: |
| logger.info(f" wiktionary: already exists ({dest.stat().st_size:,} bytes)") |
| return dest |
| if dry_run: |
| logger.info(f" [DRY RUN] Would fetch Wiktionary gmy lemmas") |
| return dest |
|
|
| all_titles = [] |
| cmcontinue = None |
| page = 0 |
|
|
| while True: |
| page += 1 |
| params = { |
| "action": "query", |
| "list": "categorymembers", |
| "cmtitle": "Category:Mycenaean_Greek_lemmas", |
| "cmlimit": "500", |
| "format": "json", |
| } |
| if cmcontinue: |
| params["cmcontinue"] = cmcontinue |
|
|
| url = f"{WIKTIONARY_API}?{urllib.parse.urlencode(params)}" |
| logger.info(f" wiktionary: page {page}, {len(all_titles)} titles so far...") |
|
|
| req = urllib.request.Request(url, headers={"User-Agent": "LinearB-Ingestion/1.0"}) |
| with urllib.request.urlopen(req, timeout=30) as resp: |
| data = json.loads(resp.read().decode("utf-8")) |
|
|
| members = data.get("query", {}).get("categorymembers", []) |
| for m in members: |
| all_titles.append(m["title"]) |
|
|
| |
| cont = data.get("continue", {}) |
| if "cmcontinue" in cont: |
| cmcontinue = cont["cmcontinue"] |
| time.sleep(0.5) |
| else: |
| break |
|
|
| logger.info(f" wiktionary: fetched {len(all_titles)} lemma titles") |
|
|
| |
| lemma_data = [] |
| batch_size = 50 |
| for i in range(0, len(all_titles), batch_size): |
| batch = all_titles[i : i + batch_size] |
| titles_param = "|".join(batch) |
| params = { |
| "action": "query", |
| "titles": titles_param, |
| "prop": "revisions", |
| "rvprop": "content", |
| "rvslots": "main", |
| "format": "json", |
| } |
| url = f"{WIKTIONARY_API}?{urllib.parse.urlencode(params)}" |
| req = urllib.request.Request(url, headers={"User-Agent": "LinearB-Ingestion/1.0"}) |
|
|
| try: |
| with urllib.request.urlopen(req, timeout=30) as resp: |
| data = json.loads(resp.read().decode("utf-8")) |
|
|
| pages = data.get("query", {}).get("pages", {}) |
| for page_id, page_data in pages.items(): |
| title = page_data.get("title", "") |
| revisions = page_data.get("revisions", []) |
| if revisions: |
| content = revisions[0].get("slots", {}).get("main", {}).get("*", "") |
| lemma_data.append({"title": title, "wikitext": content}) |
| except Exception as e: |
| logger.warning(f" wiktionary batch {i//batch_size}: {e}") |
|
|
| if i + batch_size < len(all_titles): |
| time.sleep(1.0) |
|
|
| dest.parent.mkdir(parents=True, exist_ok=True) |
| dest.write_text(json.dumps(lemma_data, ensure_ascii=False, indent=2), encoding="utf-8") |
| logger.info(f" wiktionary: saved {len(lemma_data)} lemma entries to {dest}") |
| return dest |
|
|
|
|
| def download_wiktionary_swadesh(dry_run: bool = False) -> Path: |
| """Download Mycenaean Greek Swadesh list from Wiktionary.""" |
| dest = RAW_DIR / "wiktionary_gmy_swadesh.json" |
| if dest.exists() and dest.stat().st_size > 0: |
| logger.info(f" swadesh: already exists ({dest.stat().st_size:,} bytes)") |
| return dest |
| if dry_run: |
| logger.info(f" [DRY RUN] Would fetch Wiktionary gmy Swadesh list") |
| return dest |
|
|
| params = { |
| "action": "query", |
| "titles": "Appendix:Mycenaean_Greek_Swadesh_list", |
| "prop": "revisions", |
| "rvprop": "content", |
| "rvslots": "main", |
| "format": "json", |
| } |
| url = f"{WIKTIONARY_API}?{urllib.parse.urlencode(params)}" |
| req = urllib.request.Request(url, headers={"User-Agent": "LinearB-Ingestion/1.0"}) |
|
|
| with urllib.request.urlopen(req, timeout=30) as resp: |
| data = json.loads(resp.read().decode("utf-8")) |
|
|
| pages = data.get("query", {}).get("pages", {}) |
| for page_id, page_data in pages.items(): |
| content = page_data.get("revisions", [{}])[0].get("slots", {}).get("main", {}).get("*", "") |
|
|
| dest.parent.mkdir(parents=True, exist_ok=True) |
| dest.write_text(json.dumps({"title": "Mycenaean_Greek_Swadesh_list", "wikitext": content}, |
| ensure_ascii=False, indent=2), encoding="utf-8") |
| logger.info(f" swadesh: saved to {dest}") |
| return dest |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="Ingest Linear B data from open sources") |
| parser.add_argument("--dry-run", action="store_true", help="Show what would be downloaded") |
| args = parser.parse_args() |
|
|
| logging.basicConfig( |
| level=logging.INFO, |
| format="%(asctime)s %(levelname)s %(message)s", |
| datefmt="%H:%M:%S", |
| ) |
|
|
| RAW_DIR.mkdir(parents=True, exist_ok=True) |
|
|
| print("=" * 70) |
| print("LINEAR B DATA INGESTION") |
| print("=" * 70) |
|
|
| |
| print("\n[1/4] Unicode UCD (sign inventory)") |
| ucd_path = download_unicode_data(args.dry_run) |
|
|
| |
| print("\n[2/4] jhnwnstd/shannon Linear B Lexicon (MIT)") |
| shannon_path = download_shannon_lexicon(args.dry_run) |
|
|
| |
| print("\n[3/4] Wiktionary Mycenaean Greek lemmas (CC-BY-SA)") |
| wikt_path = download_wiktionary_lemmas(args.dry_run) |
|
|
| |
| print("\n[4/4] Wiktionary Mycenaean Greek Swadesh list") |
| swadesh_path = download_wiktionary_swadesh(args.dry_run) |
|
|
| print("\n" + "=" * 70) |
| print("INGESTION COMPLETE") |
| print("=" * 70) |
|
|
| |
| for label, path in [ |
| ("UnicodeData.txt", ucd_path), |
| ("Shannon Lexicon", shannon_path), |
| ("Wiktionary Lemmas", wikt_path), |
| ("Wiktionary Swadesh", swadesh_path), |
| ]: |
| if path.exists(): |
| size = path.stat().st_size |
| print(f" {label}: {path.name} ({size:,} bytes)") |
| else: |
| print(f" {label}: NOT DOWNLOADED") |
|
|
| print(f"\nAll raw data saved to: {RAW_DIR}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|