Datasets:
File size: 6,694 Bytes
98e5288 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 | #!/usr/bin/env python3
"""Fetch Wiktionary category members and save as raw JSON for later processing.
Uses curl with retry-after header respect. Designed to handle rate limiting
gracefully by waiting the specified time between retries.
Iron Rule: All data comes from HTTP API responses.
Usage:
python scripts/fetch_wiktionary_raw.py [--language ISO]
"""
from __future__ import annotations
import argparse
import io
import json
import logging
import subprocess
import sys
import time
from pathlib import Path
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8")
ROOT = Path(__file__).resolve().parent.parent
RAW_DIR = ROOT / "data" / "training" / "raw"
logger = logging.getLogger(__name__)
API_URL = "https://en.wiktionary.org/w/api.php"
USER_AGENT = "PhaiPhon/1.0 (ancient-scripts-datasets; academic research)"
# All categories: (category_name, namespace)
CATEGORIES = {
"cop": ("Coptic_lemmas", 0),
"pli": ("Pali_lemmas", 0),
"xcl": ("Old_Armenian_lemmas", 0),
"ang": ("Old_English_lemmas", 0),
"gez": ("Ge%27ez_lemmas", 0),
"hbo": ("Hebrew_lemmas", 0),
"xht": ("Hattic_lemmas", 0),
# Tier 3 + Proto-languages
"gem-pro": ("Proto-Germanic_lemmas", 118),
"cel-pro": ("Proto-Celtic_lemmas", 118),
"urj-pro": ("Proto-Uralic_lemmas", 118),
"nci": ("Classical_Nahuatl_lemmas", 0),
"sga": ("Old_Irish_lemmas", 0),
# Phase 7 additions
"pal": ("Middle_Persian_lemmas", 0),
"bnt-pro": ("Proto-Bantu_lemmas", 118),
"sit-pro": ("Proto-Sino-Tibetan_lemmas", 118),
"xtg": ("Gaulish_lemmas", 0),
"sog": ("Sogdian_lemmas", 0),
"ojp": ("Old_Japanese_lemmas", 0),
# Phase 8 P0 additions
"sla-pro": ("Proto-Slavic_lemmas", 118),
"trk-pro": ("Proto-Turkic_lemmas", 118),
"itc-pro": ("Proto-Italic_lemmas", 118),
"jpx-pro": ("Proto-Japonic_lemmas", 118),
"ira-pro": ("Proto-Iranian_lemmas", 118),
# Phase 8 P1 proto-languages
"alg-pro": ("Proto-Algonquian_lemmas", 118),
"sqj-pro": ("Proto-Albanian_lemmas", 118),
"aav-pro": ("Proto-Austroasiatic_lemmas", 118),
"poz-pol-pro": ("Proto-Polynesian_lemmas", 118),
"tai-pro": ("Proto-Tai_lemmas", 118),
"xto-pro": ("Proto-Tocharian_lemmas", 118),
"poz-oce-pro": ("Proto-Oceanic_lemmas", 118),
"xgn-pro": ("Proto-Mongolic_lemmas", 118),
# Phase 8 additional ancient languages
"obm": ("Moabite_lemmas", 0),
# Batch 3: P2 proto-languages + Iberian
"myn-pro": ("Proto-Mayan_lemmas", 118),
"afa-pro": ("Proto-Afroasiatic_lemmas", 118),
"xib": ("Iberian_lemmas", 0),
}
def fetch_one_page(url: str) -> tuple[str, int]:
"""Fetch one URL via curl. Returns (body, retry_after_secs)."""
result = subprocess.run(
["curl", "-s", "-D", "-",
"-H", f"User-Agent: {USER_AGENT}",
url],
capture_output=True, text=True, timeout=60,
)
output = result.stdout
# Split headers from body
parts = output.split("\r\n\r\n", 1)
if len(parts) < 2:
parts = output.split("\n\n", 1)
headers = parts[0] if parts else ""
body = parts[1] if len(parts) > 1 else ""
# Check for 429
retry_after = 0
if "429" in headers.split("\n")[0]:
for line in headers.split("\n"):
if line.lower().startswith("retry-after:"):
try:
retry_after = int(line.split(":", 1)[1].strip())
except ValueError:
retry_after = 300 # Default 5 min
if retry_after == 0:
retry_after = 300
return body, retry_after
def fetch_category(iso: str, category: str, namespace: int = 0) -> list[str]:
"""Fetch all members of a Wiktionary category, respecting rate limits."""
members = []
base = (
f"action=query&list=categorymembers&cmtitle=Category:{category}"
f"&cmtype=page&cmnamespace={namespace}&cmlimit=500&format=json"
)
extra = ""
page = 0
while True:
page += 1
url = f"{API_URL}?{base}{extra}"
body, retry_after = fetch_one_page(url)
if retry_after > 0:
logger.warning("%s: Rate limited. Retry-After=%d seconds (%.1f min). Waiting...",
iso, retry_after, retry_after / 60)
time.sleep(retry_after + 5)
# Retry after waiting
body, retry_after = fetch_one_page(url)
if retry_after > 0:
logger.error("%s: Still rate limited after waiting. Aborting.", iso)
return members
try:
data = json.loads(body)
except json.JSONDecodeError:
logger.error("%s: Invalid JSON on page %d. Body: %s", iso, page, body[:200])
return members
for m in data.get("query", {}).get("categorymembers", []):
members.append(m["title"])
cont = data.get("continue", {})
if "cmcontinue" in cont:
extra = f"&cmcontinue={cont['cmcontinue']}"
if page % 5 == 0:
logger.info(" %s page %d: %d members...", iso, page, len(members))
time.sleep(1.5) # Be nice
else:
break
return members
def main():
parser = argparse.ArgumentParser(description="Fetch Wiktionary category raw data")
parser.add_argument("--language", "-l", help="Specific ISO code")
args = parser.parse_args()
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s: %(message)s",
datefmt="%H:%M:%S",
)
RAW_DIR.mkdir(parents=True, exist_ok=True)
if args.language:
cats = {args.language: CATEGORIES[args.language]}
else:
cats = CATEGORIES
for iso, cat_info in cats.items():
category, namespace = cat_info
raw_path = RAW_DIR / f"wiktionary_category_{iso}.json"
if raw_path.exists():
with open(raw_path, "r", encoding="utf-8") as f:
existing = json.load(f)
logger.info("%s: Already cached (%d members). Skipping.", iso, len(existing.get("members", [])))
continue
logger.info("%s: Fetching %s (ns=%d)...", iso, category, namespace)
members = fetch_category(iso, category, namespace)
logger.info("%s: Got %d members", iso, len(members))
if members:
with open(raw_path, "w", encoding="utf-8") as f:
json.dump({"category": category, "members": members}, f, ensure_ascii=False)
logger.info("%s: Saved to %s", iso, raw_path)
# Pause between languages to be polite
time.sleep(5)
if __name__ == "__main__":
main()
|