File size: 12,378 Bytes
26786e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
#!/usr/bin/env python3
"""Reusable framework for extracting ancient language data from online sources.

Orchestrates multiple source-specific parsers (ASJP, Wiktionary, Oracc,
eDiAna, Avesta, LRC, DEDR) to build a unified TSV lexicon file for any
ancient or reconstructed language.

Pipeline:
    1. For each source in the config, call the appropriate parser
    2. Apply transliteration -> IPA mapping via transliteration_maps
    3. Generate SCA sound-class codes via cognate_pipeline
    4. Deduplicate by (word, ipa)
    5. Write output TSV

Usage:
    python extract_ancient_language.py \\
        --iso hit --name Hittite --family anatolian \\
        --source asjp:https://asjp.clld.org/languages/HITTITE \\
        --source wiktionary:https://en.wiktionary.org/wiki/Appendix:Hittite_Swadesh_list

Output: data/training/lexicons/{iso}.tsv
"""

from __future__ import annotations

import argparse
import importlib
import logging
import sys
from pathlib import Path

# ---------------------------------------------------------------------------
# Path setup: make cognate_pipeline importable
# ---------------------------------------------------------------------------
ROOT = Path(__file__).resolve().parent.parent
sys.path.insert(0, str(ROOT / "cognate_pipeline" / "src"))

from cognate_pipeline.normalise.sound_class import ipa_to_sound_class  # noqa: E402

# ---------------------------------------------------------------------------
# Transliteration maps (same scripts/ directory)
# ---------------------------------------------------------------------------
# transliteration_maps is expected to live alongside this script and
# expose a function: transliterate(translit: str, iso: str) -> str
# If it doesn't exist yet, we provide a passthrough fallback.
try:
    sys.path.insert(0, str(Path(__file__).resolve().parent))
    from transliteration_maps import transliterate as _transliterate  # type: ignore
except ImportError:
    logging.getLogger(__name__).warning(
        "transliteration_maps not found in scripts/; "
        "IPA output will use raw transliteration forms"
    )

    def _transliterate(translit: str, iso: str) -> str:  # type: ignore[misc]
        """Passthrough: return transliteration as-is when no map is available."""
        return translit

# ---------------------------------------------------------------------------
# Parser registry
# ---------------------------------------------------------------------------
PARSER_MODULES: dict[str, str] = {
    "asjp": "scripts.parsers.parse_asjp",
    "wiktionary": "scripts.parsers.parse_wiktionary",
    "oracc": "scripts.parsers.parse_oracc",
    "ediana": "scripts.parsers.parse_ediana",
    "avesta": "scripts.parsers.parse_avesta",
    "lrc": "scripts.parsers.parse_lrc",
    "dedr": "scripts.parsers.parse_dedr",
}

# Also support direct import if running from the scripts/ directory
_PARSER_MODULES_ALT: dict[str, str] = {
    "asjp": "parsers.parse_asjp",
    "wiktionary": "parsers.parse_wiktionary",
    "oracc": "parsers.parse_oracc",
    "ediana": "parsers.parse_ediana",
    "avesta": "parsers.parse_avesta",
    "lrc": "parsers.parse_lrc",
    "dedr": "parsers.parse_dedr",
}

logger = logging.getLogger(__name__)


def _load_parser(source_type: str):
    """Dynamically import and return the parse() function for a source type."""
    if source_type not in PARSER_MODULES:
        raise ValueError(
            f"Unknown source type '{source_type}'. "
            f"Available: {', '.join(sorted(PARSER_MODULES))}"
        )

    # Try the fully-qualified module path first, then the relative one
    for module_map in (PARSER_MODULES, _PARSER_MODULES_ALT):
        module_name = module_map[source_type]
        try:
            mod = importlib.import_module(module_name)
            return mod.parse
        except ImportError:
            continue

    raise ImportError(
        f"Could not import parser module for '{source_type}'. "
        f"Tried: {PARSER_MODULES[source_type]}, {_PARSER_MODULES_ALT[source_type]}"
    )


def extract_language(config: dict) -> Path:
    """Extract language data from multiple sources and write a unified TSV.

    Args:
        config: Dictionary with keys:
            iso (str): ISO 639-3 code (e.g. "hit" for Hittite)
            name (str): Human-readable language name
            family (str): Language family (e.g. "anatolian")
            sources (list[dict]): List of source descriptors, each with:
                type (str): Parser type (asjp, wiktionary, oracc, etc.)
                url (str): URL to download from
                ... additional kwargs passed to the parser
            target_dir (str | Path | None): Output directory. Defaults to
                data/training/lexicons/ relative to repo root.

    Returns:
        Path to the output TSV file.
    """
    iso = config["iso"]
    name = config["name"]
    family = config.get("family", "unknown")
    sources = config.get("sources", [])
    target_dir = config.get("target_dir")

    if target_dir is None:
        target_dir = ROOT / "data" / "training" / "lexicons"
    else:
        target_dir = Path(target_dir)

    target_dir.mkdir(parents=True, exist_ok=True)
    output_path = target_dir / f"{iso}.tsv"

    logger.info(
        "Extracting %s (%s, family=%s) from %d source(s)",
        name, iso, family, len(sources),
    )

    # ------------------------------------------------------------------
    # Step 1: Collect raw entries from all sources
    # ------------------------------------------------------------------
    all_entries: list[dict] = []

    for source in sources:
        source_type = source["type"]
        source_url = source["url"]
        # Pass any extra kwargs to the parser
        extra_kwargs = {k: v for k, v in source.items() if k not in ("type", "url")}

        logger.info("  Source: %s -> %s", source_type, source_url)
        try:
            parse_fn = _load_parser(source_type)
            raw_entries = parse_fn(source_url, **extra_kwargs)
        except Exception:
            logger.exception(
                "  Failed to parse source %s (%s)", source_type, source_url
            )
            raw_entries = []

        # Tag each entry with its source type
        for entry in raw_entries:
            entry["_source"] = source_type
        all_entries.extend(raw_entries)

    logger.info("  Total raw entries: %d", len(all_entries))

    if not all_entries:
        logger.warning("  No entries extracted for %s -- writing empty TSV", name)
        output_path.write_text(
            "Word\tIPA\tSCA\tSource\tConcept_ID\tCognate_Set_ID\n",
            encoding="utf-8",
        )
        return output_path

    # ------------------------------------------------------------------
    # Step 2: Transliteration -> IPA -> SCA
    # ------------------------------------------------------------------
    processed: list[tuple[str, str, str, str, str, str]] = []
    for entry in all_entries:
        word = entry.get("word", "").strip()
        translit = entry.get("transliteration", word).strip()
        gloss = entry.get("gloss", "").strip()
        source_tag = entry.get("_source", "unknown")

        if not word:
            continue

        # Apply transliteration -> IPA mapping
        ipa = _transliterate(translit, iso)
        if not ipa:
            ipa = translit  # Fallback: use raw transliteration

        # Generate SCA code
        sca = ipa_to_sound_class(ipa)

        # Use gloss as a concept ID (normalized)
        concept_id = gloss.lower().replace(" ", "_")[:50] if gloss else "-"
        cognate_set_id = "-"

        processed.append((word, ipa, sca, source_tag, concept_id, cognate_set_id))

    # ------------------------------------------------------------------
    # Step 3: Deduplicate by (word, ipa)
    # ------------------------------------------------------------------
    seen: set[tuple[str, str]] = set()
    deduplicated: list[tuple[str, str, str, str, str, str]] = []
    for entry_tuple in processed:
        key = (entry_tuple[0], entry_tuple[1])  # (word, ipa)
        if key not in seen:
            seen.add(key)
            deduplicated.append(entry_tuple)

    dupes_removed = len(processed) - len(deduplicated)
    if dupes_removed > 0:
        logger.info("  Removed %d duplicate entries", dupes_removed)

    # ------------------------------------------------------------------
    # Step 4: Write output TSV
    # ------------------------------------------------------------------
    with open(output_path, "w", encoding="utf-8", newline="") as f:
        f.write("Word\tIPA\tSCA\tSource\tConcept_ID\tCognate_Set_ID\n")
        for word, ipa, sca, source_tag, concept_id, cognate_set_id in sorted(
            deduplicated, key=lambda x: x[0].lower()
        ):
            f.write(
                f"{word}\t{ipa}\t{sca}\t{source_tag}\t{concept_id}\t{cognate_set_id}\n"
            )

    logger.info(
        "  Wrote %d entries to %s", len(deduplicated), output_path
    )
    return output_path


def _parse_source_arg(source_str: str) -> dict:
    """Parse a --source argument of the form 'type:url'.

    Returns:
        Dict with keys 'type' and 'url'.
    """
    if ":" not in source_str:
        raise argparse.ArgumentTypeError(
            f"Invalid source format: '{source_str}'. Expected 'type:url'"
        )
    # Split on first colon only (URLs contain colons)
    source_type, url = source_str.split(":", 1)
    source_type = source_type.strip().lower()
    url = url.strip()

    if source_type not in PARSER_MODULES:
        raise argparse.ArgumentTypeError(
            f"Unknown source type '{source_type}'. "
            f"Available: {', '.join(sorted(PARSER_MODULES))}"
        )
    if not url:
        raise argparse.ArgumentTypeError(
            f"Empty URL for source type '{source_type}'"
        )

    return {"type": source_type, "url": url}


def main() -> None:
    """CLI entry point."""
    parser = argparse.ArgumentParser(
        description="Extract ancient language data from online sources into a TSV lexicon.",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog=(
            "Examples:\n"
            "  python extract_ancient_language.py \\\n"
            "      --iso hit --name Hittite --family anatolian \\\n"
            "      --source asjp:https://asjp.clld.org/languages/HITTITE \\\n"
            "      --source wiktionary:https://en.wiktionary.org/wiki/Appendix:Hittite_Swadesh_list\n"
            "\n"
            "  python extract_ancient_language.py \\\n"
            "      --iso xur --name Urartian --family hurro-urartian \\\n"
            "      --source oracc:http://oracc.museum.upenn.edu/ecut\n"
            "\n"
            f"Available source types: {', '.join(sorted(PARSER_MODULES))}\n"
            "Source format: type:url\n"
        ),
    )
    parser.add_argument(
        "--iso", required=True,
        help="ISO 639-3 language code (e.g. hit, xur, xcl)",
    )
    parser.add_argument(
        "--name", required=True,
        help="Human-readable language name (e.g. Hittite, Urartian)",
    )
    parser.add_argument(
        "--family", default="unknown",
        help="Language family (e.g. anatolian, hurro-urartian, semitic)",
    )
    parser.add_argument(
        "--source", action="append", dest="sources", default=[],
        type=_parse_source_arg,
        help="Source in format type:url (can be specified multiple times)",
    )
    parser.add_argument(
        "--target-dir", default=None,
        help="Output directory (default: data/training/lexicons/)",
    )
    parser.add_argument(
        "--verbose", "-v", action="store_true",
        help="Enable verbose logging",
    )

    args = parser.parse_args()

    # Setup logging
    logging.basicConfig(
        level=logging.DEBUG if args.verbose else logging.INFO,
        format="%(asctime)s %(levelname)-8s %(name)s: %(message)s",
        datefmt="%H:%M:%S",
    )

    if not args.sources:
        parser.error("At least one --source is required")

    config = {
        "iso": args.iso,
        "name": args.name,
        "family": args.family,
        "sources": args.sources,
        "target_dir": args.target_dir,
    }

    output_path = extract_language(config)
    print(f"\nOutput written to: {output_path}")


if __name__ == "__main__":
    main()