File size: 6,666 Bytes
d710a81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93d3cf7
d710a81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
from __future__ import annotations

import argparse
import csv
import json
import time
import urllib.parse
import urllib.request
from pathlib import Path
from typing import Iterable


ROOT_DIR = Path(__file__).resolve().parents[2]
CHATBOT_SERVICE_DIR = ROOT_DIR / 'chatbot_service'

DEFAULT_ENDPOINTS = (
    'https://overpass-api.de/api/interpreter',
    'https://overpass.kumi.systems/api/interpreter',
    'https://lz4.overpass-api.de/api/interpreter',
)
DEFAULT_HEADERS = {
    'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
    'User-Agent': 'SafeVixAI chatbot data fetcher/1.0',
}
CSV_COLUMNS = [
    'name',
    'lat',
    'lon',
    'phone',
    'address',
    'city',
    'state',
    'operator',
    'osm_id',
    'osm_type',
    'category',
    'opening_hours',
    'website',
    'email',
    'postcode',
    'source',
]


def build_arg_parser(description: str, default_output: Path) -> argparse.ArgumentParser:
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument(
        '--output',
        type=Path,
        default=default_output,
        help=f'CSV path to write. Defaults to {default_output}',
    )
    parser.add_argument(
        '--endpoint',
        help='Optional Overpass endpoint override. Defaults to the built-in endpoint fallback list.',
    )
    parser.add_argument(
        '--timeout',
        type=int,
        default=180,
        help='HTTP timeout in seconds. Defaults to 180.',
    )
    parser.add_argument(
        '--retries',
        type=int,
        default=2,
        help='Retries per endpoint before failing over. Defaults to 2.',
    )
    return parser


def build_india_query(selectors: Iterable[str], *, timeout: int) -> str:
    joined = '\n  '.join(selector.strip() for selector in selectors if selector.strip())
    return (
        f'[out:json][timeout:{timeout}];\n'
        'area["ISO3166-1"="IN"][admin_level=2]->.india;\n'
        '(\n'
        f'  {joined}\n'
        ');\n'
        'out center tags;'
    )


def fetch_elements(
    query: str,
    *,
    endpoint: str | None,
    timeout: int,
    retries: int,
) -> list[dict]:
    payload = urllib.parse.urlencode({'data': query}).encode('utf-8')
    endpoints = [endpoint] if endpoint else list(DEFAULT_ENDPOINTS)
    last_error: Exception | None = None

    for url in endpoints:
        for attempt in range(1, retries + 1):
            request = urllib.request.Request(url, data=payload, headers=DEFAULT_HEADERS, method='POST')
            try:
                with urllib.request.urlopen(request, timeout=timeout) as response:
                    decoded = response.read().decode('utf-8')
                data = json.loads(decoded)
                return list(data.get('elements', []))
            except Exception as exc:  # pragma: no cover - network path
                last_error = exc
                if attempt < retries:
                    time.sleep(min(attempt, 3))

    raise SystemExit(f'Unable to fetch data from Overpass. Last error: {last_error}')


def extract_point(element: dict) -> tuple[float | None, float | None]:
    if 'lat' in element and 'lon' in element:
        return float(element['lat']), float(element['lon'])

    center = element.get('center') or {}
    if 'lat' in center and 'lon' in center:
        return float(center['lat']), float(center['lon'])

    return None, None


def first_non_empty(*values: str | None) -> str:
    for value in values:
        if value is None:
            continue
        text = str(value).strip()
        if text:
            return text
    return ''


def compose_address(tags: dict[str, str]) -> str:
    return first_non_empty(
        tags.get('addr:full'),
        ', '.join(
            part
            for part in [
                tags.get('addr:housenumber'),
                tags.get('addr:street'),
                tags.get('addr:suburb'),
                first_non_empty(tags.get('addr:city'), tags.get('addr:town'), tags.get('addr:village')),
                first_non_empty(tags.get('addr:district'), tags.get('addr:county')),
                tags.get('addr:state'),
                tags.get('addr:postcode'),
            ]
            if part
        ),
    )


def normalize_row(element: dict, *, default_category: str, fallback_name: str) -> dict | None:
    lat, lon = extract_point(element)
    if lat is None or lon is None:
        return None

    tags = element.get('tags', {})
    return {
        'name': first_non_empty(tags.get('name'), fallback_name),
        'lat': f'{lat:.6f}',
        'lon': f'{lon:.6f}',
        'phone': first_non_empty(tags.get('phone'), tags.get('contact:phone'), tags.get('emergency:phone')),
        'address': compose_address(tags),
        'city': first_non_empty(tags.get('addr:city'), tags.get('addr:town'), tags.get('addr:village')),
        'state': first_non_empty(tags.get('addr:state')),
        'operator': first_non_empty(tags.get('operator')),
        'osm_id': str(element.get('id', '')),
        'osm_type': str(element.get('type', '')),
        'category': first_non_empty(
            tags.get('amenity'),
            tags.get('healthcare'),
            tags.get('office'),
            tags.get('emergency'),
            default_category,
        ),
        'opening_hours': first_non_empty(tags.get('opening_hours')),
        'website': first_non_empty(tags.get('website'), tags.get('contact:website')),
        'email': first_non_empty(tags.get('email'), tags.get('contact:email')),
        'postcode': first_non_empty(tags.get('addr:postcode')),
        'source': 'overpass',
    }


def dedupe_rows(rows: Iterable[dict]) -> list[dict]:
    seen: set[tuple[str, str, str, str]] = set()
    deduped: list[dict] = []
    for row in rows:
        key = (
            row.get('name', '').strip().lower(),
            row.get('category', '').strip().lower(),
            row.get('lat', ''),
            row.get('lon', ''),
        )
        if key in seen:
            continue
        seen.add(key)
        deduped.append(row)
    deduped.sort(key=lambda item: (item.get('state', ''), item.get('city', ''), item.get('name', '')))
    return deduped


def write_rows(path: Path, rows: Iterable[dict]) -> int:
    path.parent.mkdir(parents=True, exist_ok=True)
    materialized = dedupe_rows(rows)
    with path.open('w', newline='', encoding='utf-8') as handle:
        writer = csv.DictWriter(handle, fieldnames=CSV_COLUMNS)
        writer.writeheader()
        writer.writerows(materialized)
    return len(materialized)


def print_summary(*, label: str, count: int, output: Path) -> None:
    print(f'Saved {count} {label} records to {output}')