Datasets:
File size: 10,599 Bytes
26786e3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 | #!/usr/bin/env python3
"""Build Linear A model-ready training data from libation tables and lineara.xyz.
Reads:
- libation_tables.json (inscription metadata + transcriptions)
- sign_to_ipa.json (syllabogram → IPA mapping)
- LinearAInscriptions.js (fetched from lineara.xyz, cached locally)
Outputs:
- linear_a_words.tsv (all unique word forms with IPA and SCA)
- linear_a_corpus.txt (one inscription per line, IPA-converted words)
"""
from __future__ import annotations
import json
import re
import sys
import urllib.request
from pathlib import Path
from typing import Dict, List, Optional, Tuple
_HERE = Path(__file__).resolve().parent
_CACHE_JS = _HERE / "LinearAInscriptions.js"
_LINEARA_URL = "https://www.lineara.xyz/LinearAInscriptions.js"
def load_sign_map(path: Path) -> Dict[str, str]:
"""Load sign_to_ipa.json."""
with path.open("r", encoding="utf-8") as f:
return json.load(f)
# Alternate notations used by lineara.xyz for undeciphered signs.
_SIGN_ALIASES = {
"pa₃": "*56",
"pa3": "*56",
}
def syllables_to_ipa(word: str, sign_map: Dict[str, str]) -> Optional[str]:
"""Convert a hyphen-delimited transliteration to IPA.
E.g. "A-TA-I-*301-WA-JA" → "ataiΘwaja"
Returns None if any syllable is unrecognizable (damaged, numeric, etc.).
"""
syllables = word.split("-")
ipa_parts: List[str] = []
for syl in syllables:
key = syl.lower().strip()
if not key:
continue
# Normalize alternate notations
key = _SIGN_ALIASES.get(key, key)
if key in sign_map:
ipa_parts.append(sign_map[key])
else:
# Skip numeric tokens (commodity quantities), damaged signs, etc.
return None
if not ipa_parts:
return None
return "".join(ipa_parts)
def ipa_to_sca(ipa: str) -> str:
"""Generate an uppercase SCA (Sound Class Approximation) from IPA.
For CV syllabograms that map directly, this is just uppercase ASCII.
For placeholders Θ and Φ, keep them as-is in SCA.
"""
result: List[str] = []
for ch in ipa:
if ch in ("Θ", "Φ"):
result.append(ch)
elif ch.isascii() and ch.isalpha():
result.append(ch.upper())
else:
# Non-ASCII IPA character — keep uppercase if possible
result.append(ch.upper())
return "".join(result)
def fetch_lineara_js(cache_path: Path) -> str:
"""Fetch LinearAInscriptions.js, caching locally."""
if cache_path.exists():
return cache_path.read_text(encoding="utf-8")
print(f"Fetching {_LINEARA_URL} ...")
req = urllib.request.Request(_LINEARA_URL, headers={"User-Agent": "LinearA-build/1.0"})
with urllib.request.urlopen(req, timeout=30) as resp:
data = resp.read().decode("utf-8")
cache_path.write_text(data, encoding="utf-8")
print(f" Cached to {cache_path}")
return data
def parse_lineara_js(js_text: str) -> Dict[str, List[str]]:
"""Parse the JS Map to extract transliteratedWords per inscription ID.
Returns dict of inscription_id → list of transliterated word strings.
"""
inscriptions: Dict[str, List[str]] = {}
# The file has format: ["IOZa2",{...}]
# We look for Za inscriptions and extract their transliteratedWords.
# Use regex to find each entry key and its transliteratedWords array.
# Since the JS is not valid JSON, we parse carefully.
# Strategy: find all ["<key>Za<num>", ...] patterns and extract transliteratedWords
# More robust: iterate line by line looking for Za entries
# Find all entry starts: ["<ID>",{
entry_pattern = re.compile(r'\["([A-Za-z]+Za\d+[a-z]?)",\s*\{')
tw_pattern = re.compile(r'"transliteratedWords"\s*:\s*\[([^\]]*)\]')
# Split into entries by looking for ["...Za...",{
# We'll use a simpler approach: find each Za entry and extract its content
pos = 0
while pos < len(js_text):
m = entry_pattern.search(js_text, pos)
if not m:
break
entry_id = m.group(1)
entry_start = m.start()
# Find the transliteratedWords array in the next ~5000 chars
chunk_end = min(len(js_text), entry_start + 10000)
chunk = js_text[entry_start:chunk_end]
tw_match = tw_pattern.search(chunk)
if tw_match:
raw = tw_match.group(1)
# Parse the array elements (strings)
words: List[str] = []
for item in re.findall(r'"([^"]*)"', raw):
# Skip word separators: ⦁ (U+2981) and 𐄁 (U+10101) and newlines
if item in ("\u2981", "\U00010101", "\n", ""):
continue
words.append(item)
inscriptions[entry_id] = words
pos = m.end()
return inscriptions
def normalize_inscription_id(json_id: str) -> str:
"""Convert JSON id like 'IO Za 2' to lineara.xyz key like 'IOZa2'."""
return json_id.replace(" ", "")
def is_transliterable_word(word: str) -> bool:
"""Check if a word from lineara.xyz is a transliterable syllabic word.
Filters out:
- Pure numbers (commodity counts)
- Words with embedded Unicode Linear A characters (untransliterable signs)
- Empty strings
"""
if not word or not word.strip():
return False
# Skip pure numeric tokens
if re.match(r"^\d+$", word.strip()):
return False
# Skip if contains Linear A Unicode block chars (U+10600-U+1077F)
for ch in word:
cp = ord(ch)
if 0x10600 <= cp <= 0x1077F:
return False
# Skip single punctuation or special chars
clean = word.strip()
if len(clean) <= 0:
return False
return True
def build_corpus(
libation_path: Path,
sign_map: Dict[str, str],
lineara_inscriptions: Dict[str, List[str]],
) -> Tuple[List[Dict[str, str]], List[str]]:
"""Build word list and corpus lines from all sources.
Returns:
(word_records, corpus_lines) where:
- word_records: list of dicts with Word, IPA, SCA, Source, Concept_ID, Cognate_Set_ID
- corpus_lines: list of strings, one per inscription (IPA words space-separated)
"""
with libation_path.open("r", encoding="utf-8") as f:
data = json.load(f)
all_words: Dict[str, Dict[str, str]] = {} # IPA → record
corpus_lines: List[str] = []
inscriptions_used = 0
for insc in data["inscriptions"]:
insc_id = insc["id"]
norm_id = normalize_inscription_id(insc_id)
# Gather transliterated words from lineara.xyz first (most complete)
words_raw: List[str] = []
source_tag = "linear_a_za"
if norm_id in lineara_inscriptions:
words_raw = lineara_inscriptions[norm_id]
# Also check sub-parts (e.g. IOZa2 may also have IOZa2a, IOZa2b)
for key in lineara_inscriptions:
if key.startswith(norm_id) and key != norm_id and key not in [
k for k in lineara_inscriptions if k == norm_id
]:
words_raw.extend(lineara_inscriptions[key])
# If lineara.xyz has no data, fall back to libation_tables.json
if not words_raw and insc.get("word_segmentation"):
for seg in insc["word_segmentation"]:
# Skip placeholders like [dedicant_name], [damaged/unreadable]
if seg.startswith("["):
continue
# Handle multi-word segments like "i-pi-na-ma si-ru-te"
for part in seg.split():
words_raw.append(part.upper())
if not words_raw:
continue
# Convert each word to IPA
line_ipa_words: List[str] = []
for word_raw in words_raw:
if not is_transliterable_word(word_raw):
continue
word_clean = word_raw.strip()
ipa = syllables_to_ipa(word_clean, sign_map)
if ipa is None:
continue
sca = ipa_to_sca(ipa)
if ipa not in all_words:
all_words[ipa] = {
"Word": word_clean,
"IPA": ipa,
"SCA": sca,
"Source": source_tag,
"Concept_ID": "-",
"Cognate_Set_ID": "-",
}
line_ipa_words.append(ipa)
if line_ipa_words:
corpus_lines.append(" ".join(line_ipa_words))
inscriptions_used += 1
print(f"Inscriptions with data: {inscriptions_used}")
print(f"Unique word forms: {len(all_words)}")
print(f"Corpus lines: {len(corpus_lines)}")
# Sort word records alphabetically by IPA
word_records = sorted(all_words.values(), key=lambda r: r["IPA"])
return word_records, corpus_lines
def write_tsv(records: List[Dict[str, str]], path: Path) -> None:
"""Write word records to TSV."""
cols = ["Word", "IPA", "SCA", "Source", "Concept_ID", "Cognate_Set_ID"]
with path.open("w", encoding="utf-8", newline="") as f:
f.write("\t".join(cols) + "\n")
for rec in records:
f.write("\t".join(rec[c] for c in cols) + "\n")
print(f"Wrote {len(records)} rows to {path}")
def write_corpus(lines: List[str], path: Path) -> None:
"""Write corpus lines."""
with path.open("w", encoding="utf-8", newline="") as f:
for line in lines:
f.write(line + "\n")
print(f"Wrote {len(lines)} lines to {path}")
def main() -> None:
sign_map_path = _HERE / "sign_to_ipa.json"
libation_path = _HERE / "libation_tables.json"
words_out = _HERE / "linear_a_words.tsv"
corpus_out = _HERE / "linear_a_corpus.txt"
if not sign_map_path.exists():
print(f"ERROR: {sign_map_path} not found", file=sys.stderr)
sys.exit(1)
if not libation_path.exists():
print(f"ERROR: {libation_path} not found", file=sys.stderr)
sys.exit(1)
sign_map = load_sign_map(sign_map_path)
print(f"Loaded {len(sign_map)} sign mappings")
# Fetch and parse lineara.xyz data
js_text = fetch_lineara_js(_CACHE_JS)
lineara_inscriptions = parse_lineara_js(js_text)
za_count = len(lineara_inscriptions)
print(f"Parsed {za_count} Za inscriptions from lineara.xyz")
word_records, corpus_lines = build_corpus(libation_path, sign_map, lineara_inscriptions)
write_tsv(word_records, words_out)
write_corpus(corpus_lines, corpus_out)
print("\nDone. Files created:")
print(f" {words_out}")
print(f" {corpus_out}")
if __name__ == "__main__":
main()
|