ancient-scripts-datasets / data /linear_a /build_model_input.py
Alvin
Add complete dataset: all sources, metadata, scripts, docs, and phylo enrichment
26786e3
#!/usr/bin/env python3
"""Build Linear A model-ready training data from libation tables and lineara.xyz.
Reads:
- libation_tables.json (inscription metadata + transcriptions)
- sign_to_ipa.json (syllabogram → IPA mapping)
- LinearAInscriptions.js (fetched from lineara.xyz, cached locally)
Outputs:
- linear_a_words.tsv (all unique word forms with IPA and SCA)
- linear_a_corpus.txt (one inscription per line, IPA-converted words)
"""
from __future__ import annotations
import json
import re
import sys
import urllib.request
from pathlib import Path
from typing import Dict, List, Optional, Tuple
_HERE = Path(__file__).resolve().parent
_CACHE_JS = _HERE / "LinearAInscriptions.js"
_LINEARA_URL = "https://www.lineara.xyz/LinearAInscriptions.js"
def load_sign_map(path: Path) -> Dict[str, str]:
"""Load sign_to_ipa.json."""
with path.open("r", encoding="utf-8") as f:
return json.load(f)
# Alternate notations used by lineara.xyz for undeciphered signs.
_SIGN_ALIASES = {
"pa₃": "*56",
"pa3": "*56",
}
def syllables_to_ipa(word: str, sign_map: Dict[str, str]) -> Optional[str]:
"""Convert a hyphen-delimited transliteration to IPA.
E.g. "A-TA-I-*301-WA-JA" → "ataiΘwaja"
Returns None if any syllable is unrecognizable (damaged, numeric, etc.).
"""
syllables = word.split("-")
ipa_parts: List[str] = []
for syl in syllables:
key = syl.lower().strip()
if not key:
continue
# Normalize alternate notations
key = _SIGN_ALIASES.get(key, key)
if key in sign_map:
ipa_parts.append(sign_map[key])
else:
# Skip numeric tokens (commodity quantities), damaged signs, etc.
return None
if not ipa_parts:
return None
return "".join(ipa_parts)
def ipa_to_sca(ipa: str) -> str:
"""Generate an uppercase SCA (Sound Class Approximation) from IPA.
For CV syllabograms that map directly, this is just uppercase ASCII.
For placeholders Θ and Φ, keep them as-is in SCA.
"""
result: List[str] = []
for ch in ipa:
if ch in ("Θ", "Φ"):
result.append(ch)
elif ch.isascii() and ch.isalpha():
result.append(ch.upper())
else:
# Non-ASCII IPA character — keep uppercase if possible
result.append(ch.upper())
return "".join(result)
def fetch_lineara_js(cache_path: Path) -> str:
"""Fetch LinearAInscriptions.js, caching locally."""
if cache_path.exists():
return cache_path.read_text(encoding="utf-8")
print(f"Fetching {_LINEARA_URL} ...")
req = urllib.request.Request(_LINEARA_URL, headers={"User-Agent": "LinearA-build/1.0"})
with urllib.request.urlopen(req, timeout=30) as resp:
data = resp.read().decode("utf-8")
cache_path.write_text(data, encoding="utf-8")
print(f" Cached to {cache_path}")
return data
def parse_lineara_js(js_text: str) -> Dict[str, List[str]]:
"""Parse the JS Map to extract transliteratedWords per inscription ID.
Returns dict of inscription_id → list of transliterated word strings.
"""
inscriptions: Dict[str, List[str]] = {}
# The file has format: ["IOZa2",{...}]
# We look for Za inscriptions and extract their transliteratedWords.
# Use regex to find each entry key and its transliteratedWords array.
# Since the JS is not valid JSON, we parse carefully.
# Strategy: find all ["<key>Za<num>", ...] patterns and extract transliteratedWords
# More robust: iterate line by line looking for Za entries
# Find all entry starts: ["<ID>",{
entry_pattern = re.compile(r'\["([A-Za-z]+Za\d+[a-z]?)",\s*\{')
tw_pattern = re.compile(r'"transliteratedWords"\s*:\s*\[([^\]]*)\]')
# Split into entries by looking for ["...Za...",{
# We'll use a simpler approach: find each Za entry and extract its content
pos = 0
while pos < len(js_text):
m = entry_pattern.search(js_text, pos)
if not m:
break
entry_id = m.group(1)
entry_start = m.start()
# Find the transliteratedWords array in the next ~5000 chars
chunk_end = min(len(js_text), entry_start + 10000)
chunk = js_text[entry_start:chunk_end]
tw_match = tw_pattern.search(chunk)
if tw_match:
raw = tw_match.group(1)
# Parse the array elements (strings)
words: List[str] = []
for item in re.findall(r'"([^"]*)"', raw):
# Skip word separators: ⦁ (U+2981) and 𐄁 (U+10101) and newlines
if item in ("\u2981", "\U00010101", "\n", ""):
continue
words.append(item)
inscriptions[entry_id] = words
pos = m.end()
return inscriptions
def normalize_inscription_id(json_id: str) -> str:
"""Convert JSON id like 'IO Za 2' to lineara.xyz key like 'IOZa2'."""
return json_id.replace(" ", "")
def is_transliterable_word(word: str) -> bool:
"""Check if a word from lineara.xyz is a transliterable syllabic word.
Filters out:
- Pure numbers (commodity counts)
- Words with embedded Unicode Linear A characters (untransliterable signs)
- Empty strings
"""
if not word or not word.strip():
return False
# Skip pure numeric tokens
if re.match(r"^\d+$", word.strip()):
return False
# Skip if contains Linear A Unicode block chars (U+10600-U+1077F)
for ch in word:
cp = ord(ch)
if 0x10600 <= cp <= 0x1077F:
return False
# Skip single punctuation or special chars
clean = word.strip()
if len(clean) <= 0:
return False
return True
def build_corpus(
libation_path: Path,
sign_map: Dict[str, str],
lineara_inscriptions: Dict[str, List[str]],
) -> Tuple[List[Dict[str, str]], List[str]]:
"""Build word list and corpus lines from all sources.
Returns:
(word_records, corpus_lines) where:
- word_records: list of dicts with Word, IPA, SCA, Source, Concept_ID, Cognate_Set_ID
- corpus_lines: list of strings, one per inscription (IPA words space-separated)
"""
with libation_path.open("r", encoding="utf-8") as f:
data = json.load(f)
all_words: Dict[str, Dict[str, str]] = {} # IPA → record
corpus_lines: List[str] = []
inscriptions_used = 0
for insc in data["inscriptions"]:
insc_id = insc["id"]
norm_id = normalize_inscription_id(insc_id)
# Gather transliterated words from lineara.xyz first (most complete)
words_raw: List[str] = []
source_tag = "linear_a_za"
if norm_id in lineara_inscriptions:
words_raw = lineara_inscriptions[norm_id]
# Also check sub-parts (e.g. IOZa2 may also have IOZa2a, IOZa2b)
for key in lineara_inscriptions:
if key.startswith(norm_id) and key != norm_id and key not in [
k for k in lineara_inscriptions if k == norm_id
]:
words_raw.extend(lineara_inscriptions[key])
# If lineara.xyz has no data, fall back to libation_tables.json
if not words_raw and insc.get("word_segmentation"):
for seg in insc["word_segmentation"]:
# Skip placeholders like [dedicant_name], [damaged/unreadable]
if seg.startswith("["):
continue
# Handle multi-word segments like "i-pi-na-ma si-ru-te"
for part in seg.split():
words_raw.append(part.upper())
if not words_raw:
continue
# Convert each word to IPA
line_ipa_words: List[str] = []
for word_raw in words_raw:
if not is_transliterable_word(word_raw):
continue
word_clean = word_raw.strip()
ipa = syllables_to_ipa(word_clean, sign_map)
if ipa is None:
continue
sca = ipa_to_sca(ipa)
if ipa not in all_words:
all_words[ipa] = {
"Word": word_clean,
"IPA": ipa,
"SCA": sca,
"Source": source_tag,
"Concept_ID": "-",
"Cognate_Set_ID": "-",
}
line_ipa_words.append(ipa)
if line_ipa_words:
corpus_lines.append(" ".join(line_ipa_words))
inscriptions_used += 1
print(f"Inscriptions with data: {inscriptions_used}")
print(f"Unique word forms: {len(all_words)}")
print(f"Corpus lines: {len(corpus_lines)}")
# Sort word records alphabetically by IPA
word_records = sorted(all_words.values(), key=lambda r: r["IPA"])
return word_records, corpus_lines
def write_tsv(records: List[Dict[str, str]], path: Path) -> None:
"""Write word records to TSV."""
cols = ["Word", "IPA", "SCA", "Source", "Concept_ID", "Cognate_Set_ID"]
with path.open("w", encoding="utf-8", newline="") as f:
f.write("\t".join(cols) + "\n")
for rec in records:
f.write("\t".join(rec[c] for c in cols) + "\n")
print(f"Wrote {len(records)} rows to {path}")
def write_corpus(lines: List[str], path: Path) -> None:
"""Write corpus lines."""
with path.open("w", encoding="utf-8", newline="") as f:
for line in lines:
f.write(line + "\n")
print(f"Wrote {len(lines)} lines to {path}")
def main() -> None:
sign_map_path = _HERE / "sign_to_ipa.json"
libation_path = _HERE / "libation_tables.json"
words_out = _HERE / "linear_a_words.tsv"
corpus_out = _HERE / "linear_a_corpus.txt"
if not sign_map_path.exists():
print(f"ERROR: {sign_map_path} not found", file=sys.stderr)
sys.exit(1)
if not libation_path.exists():
print(f"ERROR: {libation_path} not found", file=sys.stderr)
sys.exit(1)
sign_map = load_sign_map(sign_map_path)
print(f"Loaded {len(sign_map)} sign mappings")
# Fetch and parse lineara.xyz data
js_text = fetch_lineara_js(_CACHE_JS)
lineara_inscriptions = parse_lineara_js(js_text)
za_count = len(lineara_inscriptions)
print(f"Parsed {za_count} Za inscriptions from lineara.xyz")
word_records, corpus_lines = build_corpus(libation_path, sign_map, lineara_inscriptions)
write_tsv(word_records, words_out)
write_corpus(corpus_lines, corpus_out)
print("\nDone. Files created:")
print(f" {words_out}")
print(f" {corpus_out}")
if __name__ == "__main__":
main()