id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14,700
|
test_clean_postcodes.py
|
osm-search_Nominatim/test/python/tokenizer/sanitizers/test_clean_postcodes.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for the sanitizer that normalizes postcodes.
"""
import pytest
from nominatim_db.tokenizer.place_sanitizer import PlaceSanitizer
from nominatim_db.data.place_info import PlaceInfo
from nominatim_db.data import country_info
@pytest.fixture
def sanitize(def_config, request):
country_info.setup_country_config(def_config)
sanitizer_args = {'step': 'clean-postcodes'}
for mark in request.node.iter_markers(name="sanitizer_params"):
sanitizer_args.update({k.replace('_', '-') : v for k,v in mark.kwargs.items()})
def _run(country=None, **kwargs):
pi = {'address': kwargs}
if country is not None:
pi['country_code'] = country
_, address = PlaceSanitizer([sanitizer_args], def_config).process_names(PlaceInfo(pi))
return sorted([(p.kind, p.name) for p in address])
return _run
@pytest.mark.parametrize("country", (None, 'ae'))
def test_postcode_no_country(sanitize, country):
assert sanitize(country=country, postcode='23231') == [('unofficial_postcode', '23231')]
@pytest.mark.parametrize("country", (None, 'ae'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_no_country_drop(sanitize, country):
assert sanitize(country=country, postcode='23231') == []
@pytest.mark.parametrize("postcode", ('12345', ' 12345 ', 'de 12345',
'DE12345', 'DE 12345', 'DE-12345'))
def test_postcode_pass_good_format(sanitize, postcode):
assert sanitize(country='de', postcode=postcode) == [('postcode', '12345')]
@pytest.mark.parametrize("postcode", ('123456', '', ' ', '.....',
'DE 12345', 'DEF12345', 'CH 12345'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_drop_bad_format(sanitize, postcode):
assert sanitize(country='de', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('1234', '9435', '99000'))
def test_postcode_cyprus_pass(sanitize, postcode):
assert sanitize(country='cy', postcode=postcode) == [('postcode', postcode)]
@pytest.mark.parametrize("postcode", ('91234', '99a45', '567'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_cyprus_fail(sanitize, postcode):
assert sanitize(country='cy', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('123456', 'A33F2G7'))
def test_postcode_kazakhstan_pass(sanitize, postcode):
assert sanitize(country='kz', postcode=postcode) == [('postcode', postcode)]
@pytest.mark.parametrize("postcode", ('V34T6Y923456', '99345'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_kazakhstan_fail(sanitize, postcode):
assert sanitize(country='kz', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('675 34', '67534', 'SE-675 34', 'SE67534'))
def test_postcode_sweden_pass(sanitize, postcode):
assert sanitize(country='se', postcode=postcode) == [('postcode', '675 34')]
@pytest.mark.parametrize("postcode", ('67 345', '671123'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_sweden_fail(sanitize, postcode):
assert sanitize(country='se', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('AD123', '123', 'AD 123', 'AD-123'))
def test_postcode_andorra_pass(sanitize, postcode):
assert sanitize(country='ad', postcode=postcode) == [('postcode', 'AD123')]
@pytest.mark.parametrize("postcode", ('AD1234', 'AD AD123', 'XX123'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_andorra_fail(sanitize, postcode):
assert sanitize(country='ad', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('AI-2640', '2640', 'AI 2640'))
def test_postcode_anguilla_pass(sanitize, postcode):
assert sanitize(country='ai', postcode=postcode) == [('postcode', 'AI-2640')]
@pytest.mark.parametrize("postcode", ('AI-2000', 'AI US-2640', 'AI AI-2640'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_anguilla_fail(sanitize, postcode):
assert sanitize(country='ai', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('BN1111', 'BN 1111', 'BN BN1111', 'BN BN 1111'))
def test_postcode_brunei_pass(sanitize, postcode):
assert sanitize(country='bn', postcode=postcode) == [('postcode', 'BN1111')]
@pytest.mark.parametrize("postcode", ('BN-1111', 'BNN1111'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_brunei_fail(sanitize, postcode):
assert sanitize(country='bn', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('IM1 1AA', 'IM11AA', 'IM IM11AA'))
def test_postcode_isle_of_man_pass(sanitize, postcode):
assert sanitize(country='im', postcode=postcode) == [('postcode', 'IM1 1AA')]
@pytest.mark.parametrize("postcode", ('IZ1 1AA', 'IM1 AA'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_isle_of_man_fail(sanitize, postcode):
assert sanitize(country='im', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('JE5 0LA', 'JE50LA', 'JE JE50LA', 'je JE5 0LA'))
def test_postcode_jersey_pass(sanitize, postcode):
assert sanitize(country='je', postcode=postcode) == [('postcode', 'JE5 0LA')]
@pytest.mark.parametrize("postcode", ('gb JE5 0LA', 'IM50LA', 'IM5 012'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_jersey_fail(sanitize, postcode):
assert sanitize(country='je', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('KY1-1234', '1-1234', 'KY 1-1234'))
def test_postcode_cayman_islands_pass(sanitize, postcode):
assert sanitize(country='ky', postcode=postcode) == [('postcode', 'KY1-1234')]
@pytest.mark.parametrize("postcode", ('KY-1234', 'KZ1-1234', 'KY1 1234', 'KY1-123', 'KY KY1-1234'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_cayman_islands_fail(sanitize, postcode):
assert sanitize(country='ky', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('LC11 222', '11 222', '11222', 'LC 11 222'))
def test_postcode_saint_lucia_pass(sanitize, postcode):
assert sanitize(country='lc', postcode=postcode) == [('postcode', 'LC11 222')]
@pytest.mark.parametrize("postcode", ('11 2222', 'LC LC11 222'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_saint_lucia_fail(sanitize, postcode):
assert sanitize(country='lc', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('LV-1111', '1111', 'LV 1111', 'LV1111',))
def test_postcode_latvia_pass(sanitize, postcode):
assert sanitize(country='lv', postcode=postcode) == [('postcode', 'LV-1111')]
@pytest.mark.parametrize("postcode", ('111', '11111', 'LV LV-1111'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_latvia_fail(sanitize, postcode):
assert sanitize(country='lv', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('MD-1111', '1111', 'MD 1111', 'MD1111'))
def test_postcode_moldova_pass(sanitize, postcode):
assert sanitize(country='md', postcode=postcode) == [('postcode', 'MD-1111')]
@pytest.mark.parametrize("postcode", ("MD MD-1111", "MD MD1111", "MD MD 1111"))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_moldova_fail(sanitize, postcode):
assert sanitize(country='md', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('VLT 1117', 'GDJ 1234', 'BZN 2222'))
def test_postcode_malta_pass(sanitize, postcode):
assert sanitize(country='mt', postcode=postcode) == [('postcode', postcode)]
@pytest.mark.parametrize("postcode", ('MTF 1111', 'MT MTF 1111', 'MTF1111', 'MT MTF1111'))
def test_postcode_malta_mtarfa_pass(sanitize, postcode):
assert sanitize(country='mt', postcode=postcode) == [('postcode', 'MTF 1111')]
@pytest.mark.parametrize("postcode", ('1111', 'MTMT 1111'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_malta_fail(sanitize, postcode):
assert sanitize(country='mt', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('VC1111', '1111', 'VC-1111', 'VC 1111'))
def test_postcode_saint_vincent_pass(sanitize, postcode):
assert sanitize(country='vc', postcode=postcode) == [('postcode', 'VC1111')]
@pytest.mark.parametrize("postcode", ('VC11', 'VC VC1111'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_saint_vincent_fail(sanitize, postcode):
assert sanitize(country='vc', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('VG1111', '1111', 'VG 1111', 'VG-1111'))
def test_postcode_virgin_islands_pass(sanitize, postcode):
assert sanitize(country='vg', postcode=postcode) == [('postcode', 'VG1111')]
@pytest.mark.parametrize("postcode", ('111', '11111', 'VG VG1111'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_virgin_islands_fail(sanitize, postcode):
assert sanitize(country='vg', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('AB1', '123-456-7890', '1 as 44'))
@pytest.mark.sanitizer_params(default_pattern='[A-Z0-9- ]{3,12}')
def test_postcode_default_pattern_pass(sanitize, postcode):
assert sanitize(country='an', postcode=postcode) == [('postcode', postcode.upper())]
@pytest.mark.parametrize("postcode", ('C', '12', 'ABC123DEF 456', '1234,5678', '11223;11224'))
@pytest.mark.sanitizer_params(convert_to_address=False, default_pattern='[A-Z0-9- ]{3,12}')
def test_postcode_default_pattern_fail(sanitize, postcode):
assert sanitize(country='an', postcode=postcode) == []
| 9,714
|
Python
|
.py
| 158
| 58.101266
| 99
| 0.715808
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,701
|
test_generic_mutation.py
|
osm-search_Nominatim/test/python/tokenizer/token_analysis/test_generic_mutation.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for generic token analysis, mutation part.
"""
import pytest
from icu import Transliterator
import nominatim_db.tokenizer.token_analysis.generic as module
from nominatim_db.errors import UsageError
DEFAULT_NORMALIZATION = """ 'üú≥' > ' ';
[[:Nonspacing Mark:] [:Cf:]] >;
:: lower ();
[[:Punctuation:][:Space:]]+ > ' '
"""
DEFAULT_TRANSLITERATION = """ :: Latin ();
'üúµ' > ' ';
"""
class TestMutationNoVariants:
def make_analyser(self, *mutations):
rules = { 'analyzer': 'generic',
'mutations': [ {'pattern': m[0], 'replacements': m[1]}
for m in mutations]
}
trans = Transliterator.createFromRules("test_trans", DEFAULT_TRANSLITERATION)
norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
config = module.configure(rules, norm, trans)
self.analysis = module.create(norm, trans, config)
def variants(self, name):
norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
return set(self.analysis.compute_variants(norm.transliterate(name).strip()))
@pytest.mark.parametrize('pattern', ('(capture)', ['a list']))
def test_bad_pattern(self, pattern):
with pytest.raises(UsageError):
self.make_analyser((pattern, ['b']))
@pytest.mark.parametrize('replacements', (None, 'a string'))
def test_bad_replacement(self, replacements):
with pytest.raises(UsageError):
self.make_analyser(('a', replacements))
def test_simple_replacement(self):
self.make_analyser(('a', ['b']))
assert self.variants('none') == {'none'}
assert self.variants('abba') == {'bbbb'}
assert self.variants('2 aar') == {'2 bbr'}
def test_multichar_replacement(self):
self.make_analyser(('1 1', ['1 1 1']))
assert self.variants('1 1456') == {'1 1 1456'}
assert self.variants('1 1 1') == {'1 1 1 1'}
def test_removement_replacement(self):
self.make_analyser((' ', [' ', '']))
assert self.variants('A 345') == {'a 345', 'a345'}
assert self.variants('a g b') == {'a g b', 'ag b', 'a gb', 'agb'}
def test_regex_pattern(self):
self.make_analyser(('[^a-z]+', ['XXX', ' ']))
assert self.variants('a-34n12') == {'aXXXnXXX', 'aXXXn', 'a nXXX', 'a n'}
def test_multiple_mutations(self):
self.make_analyser(('ä', ['ä', 'ae']), ('ö', ['ö', 'oe']))
assert self.variants('Längenöhr') == {'längenöhr', 'laengenöhr',
'l√§ngenoehr', 'laengenoehr'}
| 3,017
|
Python
|
.py
| 62
| 38.677419
| 85
| 0.570208
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,702
|
test_generic.py
|
osm-search_Nominatim/test/python/tokenizer/token_analysis/test_generic.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for import name normalisation and variant generation.
"""
import pytest
from icu import Transliterator
import nominatim_db.tokenizer.token_analysis.generic as module
from nominatim_db.errors import UsageError
DEFAULT_NORMALIZATION = """ :: NFD ();
'🜳' > ' ';
[[:Nonspacing Mark:] [:Cf:]] >;
:: lower ();
[[:Punctuation:][:Space:]]+ > ' ';
:: NFC ();
"""
DEFAULT_TRANSLITERATION = """ :: Latin ();
'🜵' > ' ';
"""
def make_analyser(*variants, variant_only=False):
rules = { 'analyzer': 'generic', 'variants': [{'words': variants}]}
if variant_only:
rules['mode'] = 'variant-only'
trans = Transliterator.createFromRules("test_trans", DEFAULT_TRANSLITERATION)
norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
config = module.configure(rules, norm, trans)
return module.create(norm, trans, config)
def get_normalized_variants(proc, name):
norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
return proc.compute_variants(norm.transliterate(name).strip())
def test_no_variants():
rules = { 'analyzer': 'generic' }
trans = Transliterator.createFromRules("test_trans", DEFAULT_TRANSLITERATION)
norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
config = module.configure(rules, norm, trans)
proc = module.create(norm, trans, config)
assert get_normalized_variants(proc, '大德!') == ['dà dé']
def test_variants_empty():
proc = make_analyser('saint -> 🜵', 'street -> st')
assert get_normalized_variants(proc, '🜵') == []
assert get_normalized_variants(proc, '🜳') == []
assert get_normalized_variants(proc, 'saint') == ['saint']
VARIANT_TESTS = [
(('~strasse,~straße -> str', '~weg => weg'), "hallo", {'hallo'}),
(('weg => wg',), "holzweg", {'holzweg'}),
(('weg -> wg',), "holzweg", {'holzweg'}),
(('~weg => weg',), "holzweg", {'holz weg', 'holzweg'}),
(('~weg -> weg',), "holzweg", {'holz weg', 'holzweg'}),
(('~weg => w',), "holzweg", {'holz w', 'holzw'}),
(('~weg -> w',), "holzweg", {'holz weg', 'holzweg', 'holz w', 'holzw'}),
(('~weg => weg',), "Meier Weg", {'meier weg', 'meierweg'}),
(('~weg -> weg',), "Meier Weg", {'meier weg', 'meierweg'}),
(('~weg => w',), "Meier Weg", {'meier w', 'meierw'}),
(('~weg -> w',), "Meier Weg", {'meier weg', 'meierweg', 'meier w', 'meierw'}),
(('weg => wg',), "Meier Weg", {'meier wg'}),
(('weg -> wg',), "Meier Weg", {'meier weg', 'meier wg'}),
(('~strasse,~straße -> str', '~weg => weg'), "Bauwegstraße",
{'bauweg straße', 'bauweg str', 'bauwegstraße', 'bauwegstr'}),
(('am => a', 'bach => b'), "am bach", {'a b'}),
(('am => a', '~bach => b'), "am bach", {'a b'}),
(('am -> a', '~bach -> b'), "am bach", {'am bach', 'a bach', 'am b', 'a b'}),
(('am -> a', '~bach -> b'), "ambach", {'ambach', 'am bach', 'amb', 'am b'}),
(('saint -> s,st', 'street -> st'), "Saint Johns Street",
{'saint johns street', 's johns street', 'st johns street',
'saint johns st', 's johns st', 'st johns st'}),
(('river$ -> r',), "River Bend Road", {'river bend road'}),
(('river$ -> r',), "Bent River", {'bent river', 'bent r'}),
(('^north => n',), "North 2nd Street", {'n 2nd street'}),
(('^north => n',), "Airport North", {'airport north'}),
(('am -> a',), "am am am am am am am am", {'am am am am am am am am'}),
(('am => a',), "am am am am am am am am", {'a a a a a a a a'})
]
@pytest.mark.parametrize("rules,name,variants", VARIANT_TESTS)
def test_variants(rules, name, variants):
proc = make_analyser(*rules)
result = get_normalized_variants(proc, name)
assert len(result) == len(set(result))
assert set(get_normalized_variants(proc, name)) == variants
VARIANT_ONLY_TESTS = [
(('weg => wg',), "hallo", set()),
(('weg => wg',), "Meier Weg", {'meier wg'}),
(('weg -> wg',), "Meier Weg", {'meier wg'}),
]
@pytest.mark.parametrize("rules,name,variants", VARIANT_ONLY_TESTS)
def test_variants_only(rules, name, variants):
proc = make_analyser(*rules, variant_only=True)
result = get_normalized_variants(proc, name)
assert len(result) == len(set(result))
assert set(get_normalized_variants(proc, name)) == variants
class TestGetReplacements:
@staticmethod
def configure_rules(*variants):
rules = { 'analyzer': 'generic', 'variants': [{'words': variants}]}
trans = Transliterator.createFromRules("test_trans", DEFAULT_TRANSLITERATION)
norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
return module.configure(rules, norm, trans)
def get_replacements(self, *variants):
config = self.configure_rules(*variants)
return sorted((k, sorted(v)) for k,v in config['replacements'])
@pytest.mark.parametrize("variant", ['foo > bar', 'foo -> bar -> bar',
'~foo~ -> bar', 'fo~ o -> bar'])
def test_invalid_variant_description(self, variant):
with pytest.raises(UsageError):
self.configure_rules(variant)
@pytest.mark.parametrize("rule", ["!!! -> bar", "bar => !!!"])
def test_ignore_unnormalizable_terms(self, rule):
repl = self.get_replacements(rule)
assert repl == []
def test_add_full(self):
repl = self.get_replacements("foo -> bar")
assert repl == [(' foo ', [' bar', ' foo'])]
def test_replace_full(self):
repl = self.get_replacements("foo => bar")
assert repl == [(' foo ', [' bar'])]
def test_add_suffix_no_decompose(self):
repl = self.get_replacements("~berg |-> bg")
assert repl == [(' berg ', [' berg', ' bg']),
('berg ', ['berg', 'bg'])]
def test_replace_suffix_no_decompose(self):
repl = self.get_replacements("~berg |=> bg")
assert repl == [(' berg ', [' bg']),('berg ', ['bg'])]
def test_add_suffix_decompose(self):
repl = self.get_replacements("~berg -> bg")
assert repl == [(' berg ', [' berg', ' bg', 'berg', 'bg']),
('berg ', [' berg', ' bg', 'berg', 'bg'])]
def test_replace_suffix_decompose(self):
repl = self.get_replacements("~berg => bg")
assert repl == [(' berg ', [' bg', 'bg']),
('berg ', [' bg', 'bg'])]
def test_add_prefix_no_compose(self):
repl = self.get_replacements("hinter~ |-> hnt")
assert repl == [(' hinter', [' hinter', ' hnt']),
(' hinter ', [' hinter', ' hnt'])]
def test_replace_prefix_no_compose(self):
repl = self.get_replacements("hinter~ |=> hnt")
assert repl == [(' hinter', [' hnt']), (' hinter ', [' hnt'])]
def test_add_prefix_compose(self):
repl = self.get_replacements("hinter~-> h")
assert repl == [(' hinter', [' h', ' h ', ' hinter', ' hinter ']),
(' hinter ', [' h', ' h', ' hinter', ' hinter'])]
def test_replace_prefix_compose(self):
repl = self.get_replacements("hinter~=> h")
assert repl == [(' hinter', [' h', ' h ']),
(' hinter ', [' h', ' h'])]
def test_add_beginning_only(self):
repl = self.get_replacements("^Premier -> Pr")
assert repl == [('^ premier ', ['^ pr', '^ premier'])]
def test_replace_beginning_only(self):
repl = self.get_replacements("^Premier => Pr")
assert repl == [('^ premier ', ['^ pr'])]
def test_add_final_only(self):
repl = self.get_replacements("road$ -> rd")
assert repl == [(' road ^', [' rd ^', ' road ^'])]
def test_replace_final_only(self):
repl = self.get_replacements("road$ => rd")
assert repl == [(' road ^', [' rd ^'])]
def test_decompose_only(self):
repl = self.get_replacements("~foo -> foo")
assert repl == [(' foo ', [' foo', 'foo']),
('foo ', [' foo', 'foo'])]
def test_add_suffix_decompose_end_only(self):
repl = self.get_replacements("~berg |-> bg", "~berg$ -> bg")
assert repl == [(' berg ', [' berg', ' bg']),
(' berg ^', [' berg ^', ' bg ^', 'berg ^', 'bg ^']),
('berg ', ['berg', 'bg']),
('berg ^', [' berg ^', ' bg ^', 'berg ^', 'bg ^'])]
def test_replace_suffix_decompose_end_only(self):
repl = self.get_replacements("~berg |=> bg", "~berg$ => bg")
assert repl == [(' berg ', [' bg']),
(' berg ^', [' bg ^', 'bg ^']),
('berg ', ['bg']),
('berg ^', [' bg ^', 'bg ^'])]
@pytest.mark.parametrize('rule', ["~berg,~burg -> bg",
"~berg, ~burg -> bg",
"~berg,,~burg -> bg"])
def test_add_multiple_suffix(self, rule):
repl = self.get_replacements(rule)
assert repl == [(' berg ', [' berg', ' bg', 'berg', 'bg']),
(' burg ', [' bg', ' burg', 'bg', 'burg']),
('berg ', [' berg', ' bg', 'berg', 'bg']),
('burg ', [' bg', ' burg', 'bg', 'burg'])]
| 9,598
|
Python
|
.py
| 185
| 43.232432
| 85
| 0.535343
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,703
|
test_analysis_postcodes.py
|
osm-search_Nominatim/test/python/tokenizer/token_analysis/test_analysis_postcodes.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for special postcode analysis and variant generation.
"""
import pytest
from icu import Transliterator
import nominatim_db.tokenizer.token_analysis.postcodes as module
from nominatim_db.data.place_name import PlaceName
from nominatim_db.errors import UsageError
DEFAULT_NORMALIZATION = """ :: NFD ();
'🜳' > ' ';
[[:Nonspacing Mark:] [:Cf:]] >;
:: lower ();
[[:Punctuation:][:Space:]]+ > ' ';
:: NFC ();
"""
DEFAULT_TRANSLITERATION = """ :: Latin ();
'🜵' > ' ';
"""
@pytest.fixture
def analyser():
rules = { 'analyzer': 'postcodes'}
config = module.configure(rules, DEFAULT_NORMALIZATION)
trans = Transliterator.createFromRules("test_trans", DEFAULT_TRANSLITERATION)
norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
return module.create(norm, trans, config)
def get_normalized_variants(proc, name):
norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
return proc.compute_variants(norm.transliterate(name).strip())
@pytest.mark.parametrize('name,norm', [('12', '12'),
('A 34 ', 'A 34'),
('34-av', '34-AV')])
def test_get_canonical_id(analyser, name, norm):
assert analyser.get_canonical_id(PlaceName(name=name, kind='', suffix='')) == norm
@pytest.mark.parametrize('postcode,variants', [('12345', {'12345'}),
('AB-998', {'ab 998', 'ab998'}),
('23 FGH D3', {'23 fgh d3', '23fgh d3',
'23 fghd3', '23fghd3'})])
def test_compute_variants(analyser, postcode, variants):
out = analyser.compute_variants(postcode)
assert len(out) == len(set(out))
assert set(out) == variants
| 2,253
|
Python
|
.py
| 47
| 36.042553
| 87
| 0.562956
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,704
|
environment.py
|
osm-search_Nominatim/test/bdd/environment.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
from pathlib import Path
import sys
from behave import *
sys.path.insert(1, str(Path(__file__, '..', '..', '..', 'src').resolve()))
from steps.geometry_factory import GeometryFactory
from steps.nominatim_environment import NominatimEnvironment
TEST_BASE_DIR = Path(__file__, '..', '..').resolve()
userconfig = {
'REMOVE_TEMPLATE' : False,
'KEEP_TEST_DB' : False,
'DB_HOST' : None,
'DB_PORT' : None,
'DB_USER' : None,
'DB_PASS' : None,
'TEMPLATE_DB' : 'test_template_nominatim',
'TEST_DB' : 'test_nominatim',
'API_TEST_DB' : 'test_api_nominatim',
'API_TEST_FILE' : TEST_BASE_DIR / 'testdb' / 'apidb-test-data.pbf',
'TOKENIZER' : None, # Test with a custom tokenizer
'STYLE' : 'extratags',
'API_ENGINE': 'falcon'
}
use_step_matcher("re")
def before_all(context):
# logging setup
context.config.setup_logging()
# set up -D options
for k,v in userconfig.items():
context.config.userdata.setdefault(k, v)
# Nominatim test setup
context.nominatim = NominatimEnvironment(context.config.userdata)
context.osm = GeometryFactory()
def before_scenario(context, scenario):
if not 'SQLITE' in context.tags \
and context.config.userdata['API_TEST_DB'].startswith('sqlite:'):
context.scenario.skip("Not usable with Sqlite database.")
elif 'DB' in context.tags:
context.nominatim.setup_db(context)
elif 'APIDB' in context.tags:
context.nominatim.setup_api_db()
elif 'UNKNOWNDB' in context.tags:
context.nominatim.setup_unknown_db()
def after_scenario(context, scenario):
if 'DB' in context.tags:
context.nominatim.teardown_db(context)
| 1,896
|
Python
|
.py
| 51
| 32.941176
| 74
| 0.683924
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,705
|
table_compare.py
|
osm-search_Nominatim/test/bdd/steps/table_compare.py
|
# SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions to facilitate accessing and comparing the content of DB tables.
"""
import re
import json
import psycopg
from psycopg import sql as pysql
from steps.check_functions import Almost
ID_REGEX = re.compile(r"(?P<typ>[NRW])(?P<oid>\d+)(:(?P<cls>\w+))?")
class NominatimID:
""" Splits a unique identifier for places into its components.
As place_ids cannot be used for testing, we use a unique
identifier instead that is of the form <osmtype><osmid>[:<class>].
"""
def __init__(self, oid):
self.typ = self.oid = self.cls = None
if oid is not None:
m = ID_REGEX.fullmatch(oid)
assert m is not None, \
"ID '{}' not of form <osmtype><osmid>[:<class>]".format(oid)
self.typ = m.group('typ')
self.oid = m.group('oid')
self.cls = m.group('cls')
def __str__(self):
if self.cls is None:
return self.typ + self.oid
return '{self.typ}{self.oid}:{self.cls}'.format(self=self)
def query_osm_id(self, cur, query):
""" Run a query on cursor `cur` using osm ID, type and class. The
`query` string must contain exactly one placeholder '{}' where
the 'where' query should go.
"""
where = 'osm_type = %s and osm_id = %s'
params = [self.typ, self. oid]
if self.cls is not None:
where += ' and class = %s'
params.append(self.cls)
cur.execute(query.format(where), params)
def row_by_place_id(self, cur, table, extra_columns=None):
""" Get a row by place_id from the given table using cursor `cur`.
extra_columns may contain a list additional elements for the select
part of the query.
"""
pid = self.get_place_id(cur)
query = "SELECT {} FROM {} WHERE place_id = %s".format(
','.join(['*'] + (extra_columns or [])), table)
cur.execute(query, (pid, ))
def get_place_id(self, cur, allow_empty=False):
""" Look up the place id for the ID. Throws an assertion if the ID
is not unique.
"""
self.query_osm_id(cur, "SELECT place_id FROM placex WHERE {}")
if cur.rowcount == 0 and allow_empty:
return None
assert cur.rowcount == 1, \
"Place ID {!s} not unique. Found {} entries.".format(self, cur.rowcount)
return cur.fetchone()['place_id']
class DBRow:
""" Represents a row from a database and offers comparison functions.
"""
def __init__(self, nid, db_row, context):
self.nid = nid
self.db_row = db_row
self.context = context
def assert_row(self, row, exclude_columns):
""" Check that all columns of the given behave row are contained
in the database row. Exclude behave rows with the names given
in the `exclude_columns` list.
"""
for name, value in zip(row.headings, row.cells):
if name not in exclude_columns:
assert self.contains(name, value), self.assert_msg(name, value)
def contains(self, name, expected):
""" Check that the DB row contains a column `name` with the given value.
"""
if '+' in name:
column, field = name.split('+', 1)
return self._contains_hstore_value(column, field, expected)
if name == 'geometry':
return self._has_geometry(expected)
if name not in self.db_row:
return False
actual = self.db_row[name]
if expected == '-':
return actual is None
if name == 'name' and ':' not in expected:
return self._compare_column(actual[name], expected)
if 'place_id' in name:
return self._compare_place_id(actual, expected)
if name == 'centroid':
return self._has_centroid(expected)
return self._compare_column(actual, expected)
def _contains_hstore_value(self, column, field, expected):
if column == 'addr':
column = 'address'
if column not in self.db_row:
return False
if expected == '-':
return self.db_row[column] is None or field not in self.db_row[column]
if self.db_row[column] is None:
return False
return self._compare_column(self.db_row[column].get(field), expected)
def _compare_column(self, actual, expected):
if isinstance(actual, dict):
return actual == eval('{' + expected + '}')
return str(actual) == expected
def _compare_place_id(self, actual, expected):
if expected == '0':
return actual == 0
with self.context.db.cursor() as cur:
return NominatimID(expected).get_place_id(cur) == actual
def _has_centroid(self, expected):
if expected == 'in geometry':
with self.context.db.cursor(row_factory=psycopg.rows.tuple_row) as cur:
cur.execute("""SELECT ST_Within(ST_SetSRID(ST_Point(%(cx)s, %(cy)s), 4326),
ST_SetSRID(%(geomtxt)s::geometry, 4326))""",
(self.db_row))
return cur.fetchone()[0]
if ' ' in expected:
x, y = expected.split(' ')
else:
x, y = self.context.osm.grid_node(int(expected))
return Almost(float(x)) == self.db_row['cx'] and Almost(float(y)) == self.db_row['cy']
def _has_geometry(self, expected):
geom = self.context.osm.parse_geometry(expected)
with self.context.db.cursor(row_factory=psycopg.rows.tuple_row) as cur:
cur.execute(pysql.SQL("""SELECT ST_Equals(ST_SnapToGrid({}, 0.00001, 0.00001),
ST_SnapToGrid(ST_SetSRID({}::geometry, 4326), 0.00001, 0.00001))""")
.format(pysql.SQL(geom),
pysql.Literal(self.db_row['geomtxt'])))
return cur.fetchone()[0]
def assert_msg(self, name, value):
""" Return a string with an informative message for a failed compare.
"""
msg = "\nBad column '{}' in row '{!s}'.".format(name, self.nid)
actual = self._get_actual(name)
if actual is not None:
msg += " Expected: {}, got: {}.".format(value, actual)
else:
msg += " No such column."
return msg + "\nFull DB row: {}".format(json.dumps(dict(self.db_row), indent=4, default=str))
def _get_actual(self, name):
if '+' in name:
column, field = name.split('+', 1)
if column == 'addr':
column = 'address'
return (self.db_row.get(column) or {}).get(field)
if name == 'geometry':
return self.db_row['geomtxt']
if name not in self.db_row:
return None
if name == 'centroid':
return "POINT({cx} {cy})".format(**self.db_row)
actual = self.db_row[name]
if 'place_id' in name:
if actual is None:
return '<null>'
if actual == 0:
return "place ID 0"
with self.context.db.cursor(row_factory=psycopg.rows.tuple_row) as cur:
cur.execute("""SELECT osm_type, osm_id, class
FROM placex WHERE place_id = %s""",
(actual, ))
if cur.rowcount == 1:
return "{0[0]}{0[1]}:{0[2]}".format(cur.fetchone())
return "[place ID {} not found]".format(actual)
return actual
| 7,855
|
Python
|
.py
| 173
| 34.566474
| 103
| 0.564237
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,706
|
steps_api_queries.py
|
osm-search_Nominatim/test/bdd/steps/steps_api_queries.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
""" Steps that run queries against the API.
"""
from pathlib import Path
import json
import os
import re
import logging
import asyncio
import xml.etree.ElementTree as ET
from urllib.parse import urlencode
from utils import run_script
from http_responses import GenericResponse, SearchResponse, ReverseResponse, StatusResponse
from check_functions import Bbox, check_for_attributes
from table_compare import NominatimID
LOG = logging.getLogger(__name__)
def make_todo_list(context, result_id):
if result_id is None:
context.execute_steps("then at least 1 result is returned")
return range(len(context.response.result))
context.execute_steps(f"then more than {result_id}results are returned")
return (int(result_id.strip()), )
def compare(operator, op1, op2):
if operator == 'less than':
return op1 < op2
elif operator == 'more than':
return op1 > op2
elif operator == 'exactly':
return op1 == op2
elif operator == 'at least':
return op1 >= op2
elif operator == 'at most':
return op1 <= op2
else:
raise ValueError(f"Unknown operator '{operator}'")
def send_api_query(endpoint, params, fmt, context):
if fmt is not None:
if fmt.strip() == 'debug':
params['debug'] = '1'
else:
params['format'] = fmt.strip()
if context.table:
if context.table.headings[0] == 'param':
for line in context.table:
params[line['param']] = line['value']
else:
for h in context.table.headings:
params[h] = context.table[0][h]
return asyncio.run(context.nominatim.api_engine(endpoint, params,
Path(context.nominatim.website_dir.name),
context.nominatim.test_env,
getattr(context, 'http_headers', {})))
@given(u'the HTTP header')
def add_http_header(context):
if not hasattr(context, 'http_headers'):
context.http_headers = {}
for h in context.table.headings:
context.http_headers[h] = context.table[0][h]
@when(u'sending (?P<fmt>\S+ )?search query "(?P<query>.*)"(?P<addr> with address)?')
def website_search_request(context, fmt, query, addr):
params = {}
if query:
params['q'] = query
if addr is not None:
params['addressdetails'] = '1'
outp, status = send_api_query('search', params, fmt, context)
context.response = SearchResponse(outp, fmt or 'json', status)
@when('sending v1/reverse at (?P<lat>[\d.-]*),(?P<lon>[\d.-]*)(?: with format (?P<fmt>.+))?')
def api_endpoint_v1_reverse(context, lat, lon, fmt):
params = {}
if lat is not None:
params['lat'] = lat
if lon is not None:
params['lon'] = lon
if fmt is None:
fmt = 'jsonv2'
elif fmt == "''":
fmt = None
outp, status = send_api_query('reverse', params, fmt, context)
context.response = ReverseResponse(outp, fmt or 'xml', status)
@when('sending v1/reverse N(?P<nodeid>\d+)(?: with format (?P<fmt>.+))?')
def api_endpoint_v1_reverse_from_node(context, nodeid, fmt):
params = {}
params['lon'], params['lat'] = (f'{c:f}' for c in context.osm.grid_node(int(nodeid)))
outp, status = send_api_query('reverse', params, fmt, context)
context.response = ReverseResponse(outp, fmt or 'xml', status)
@when(u'sending (?P<fmt>\S+ )?details query for (?P<query>.*)')
def website_details_request(context, fmt, query):
params = {}
if query[0] in 'NWR':
nid = NominatimID(query)
params['osmtype'] = nid.typ
params['osmid'] = nid.oid
if nid.cls:
params['class'] = nid.cls
else:
params['place_id'] = query
outp, status = send_api_query('details', params, fmt, context)
context.response = GenericResponse(outp, fmt or 'json', status)
@when(u'sending (?P<fmt>\S+ )?lookup query for (?P<query>.*)')
def website_lookup_request(context, fmt, query):
params = { 'osm_ids' : query }
outp, status = send_api_query('lookup', params, fmt, context)
context.response = SearchResponse(outp, fmt or 'xml', status)
@when(u'sending (?P<fmt>\S+ )?status query')
def website_status_request(context, fmt):
params = {}
outp, status = send_api_query('status', params, fmt, context)
context.response = StatusResponse(outp, fmt or 'text', status)
@step(u'(?P<operator>less than|more than|exactly|at least|at most) (?P<number>\d+) results? (?:is|are) returned')
def validate_result_number(context, operator, number):
context.execute_steps("Then a HTTP 200 is returned")
numres = len(context.response.result)
assert compare(operator, numres, int(number)), \
f"Bad number of results: expected {operator} {number}, got {numres}."
@then(u'a HTTP (?P<status>\d+) is returned')
def check_http_return_status(context, status):
assert context.response.errorcode == int(status), \
f"Return HTTP status is {context.response.errorcode}."\
f" Full response:\n{context.response.page}"
@then(u'the page contents equals "(?P<text>.+)"')
def check_page_content_equals(context, text):
assert context.response.page == text
@then(u'the result is valid (?P<fmt>\w+)')
def step_impl(context, fmt):
context.execute_steps("Then a HTTP 200 is returned")
if fmt.strip() == 'html':
try:
tree = ET.fromstring(context.response.page)
except Exception as ex:
assert False, f"Could not parse page: {ex}\n{context.response.page}"
assert tree.tag == 'html'
body = tree.find('./body')
assert body is not None
assert body.find('.//script') is None
else:
assert context.response.format == fmt
@then(u'a (?P<fmt>\w+) user error is returned')
def check_page_error(context, fmt):
context.execute_steps("Then a HTTP 400 is returned")
assert context.response.format == fmt
if fmt == 'xml':
assert re.search(r'<error>.+</error>', context.response.page, re.DOTALL) is not None
else:
assert re.search(r'({"error":)', context.response.page, re.DOTALL) is not None
@then(u'result header contains')
def check_header_attr(context):
context.execute_steps("Then a HTTP 200 is returned")
for line in context.table:
assert line['attr'] in context.response.header, \
f"Field '{line['attr']}' missing in header. Full header:\n{context.response.header}"
value = context.response.header[line['attr']]
assert re.fullmatch(line['value'], value) is not None, \
f"Attribute '{line['attr']}': expected: '{line['value']}', got '{value}'"
@then(u'result header has (?P<neg>not )?attributes (?P<attrs>.*)')
def check_header_no_attr(context, neg, attrs):
check_for_attributes(context.response.header, attrs,
'absent' if neg else 'present')
@then(u'results contain(?: in field (?P<field>.*))?')
def step_impl(context, field):
context.execute_steps("then at least 1 result is returned")
for line in context.table:
context.response.match_row(line, context=context, field=field)
@then(u'result (?P<lid>\d+ )?has (?P<neg>not )?attributes (?P<attrs>.*)')
def validate_attributes(context, lid, neg, attrs):
for i in make_todo_list(context, lid):
check_for_attributes(context.response.result[i], attrs,
'absent' if neg else 'present')
@then(u'result addresses contain')
def step_impl(context):
context.execute_steps("then at least 1 result is returned")
for line in context.table:
idx = int(line['ID']) if 'ID' in line.headings else None
for name, value in zip(line.headings, line.cells):
if name != 'ID':
context.response.assert_address_field(idx, name, value)
@then(u'address of result (?P<lid>\d+) has(?P<neg> no)? types (?P<attrs>.*)')
def check_address(context, lid, neg, attrs):
context.execute_steps(f"then more than {lid} results are returned")
addr_parts = context.response.result[int(lid)]['address']
for attr in attrs.split(','):
if neg:
assert attr not in addr_parts
else:
assert attr in addr_parts
@then(u'address of result (?P<lid>\d+) (?P<complete>is|contains)')
def check_address(context, lid, complete):
context.execute_steps(f"then more than {lid} results are returned")
lid = int(lid)
addr_parts = dict(context.response.result[lid]['address'])
for line in context.table:
context.response.assert_address_field(lid, line['type'], line['value'])
del addr_parts[line['type']]
if complete == 'is':
assert len(addr_parts) == 0, f"Additional address parts found: {addr_parts!s}"
@then(u'result (?P<lid>\d+ )?has bounding box in (?P<coords>[\d,.-]+)')
def check_bounding_box_in_area(context, lid, coords):
expected = Bbox(coords)
for idx in make_todo_list(context, lid):
res = context.response.result[idx]
check_for_attributes(res, 'boundingbox')
context.response.check_row(idx, res['boundingbox'] in expected,
f"Bbox is not contained in {expected}")
@then(u'result (?P<lid>\d+ )?has centroid in (?P<coords>[\d,.-]+)')
def check_centroid_in_area(context, lid, coords):
expected = Bbox(coords)
for idx in make_todo_list(context, lid):
res = context.response.result[idx]
check_for_attributes(res, 'lat,lon')
context.response.check_row(idx, (res['lon'], res['lat']) in expected,
f"Centroid is not inside {expected}")
@then(u'there are(?P<neg> no)? duplicates')
def check_for_duplicates(context, neg):
context.execute_steps("then at least 1 result is returned")
resarr = set()
has_dupe = False
for res in context.response.result:
dup = (res['osm_type'], res['class'], res['type'], res['display_name'])
if dup in resarr:
has_dupe = True
break
resarr.add(dup)
if neg:
assert not has_dupe, f"Found duplicate for {dup}"
else:
assert has_dupe, "No duplicates found"
| 10,470
|
Python
|
.py
| 231
| 38.363636
| 113
| 0.639198
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,707
|
utils.py
|
osm-search_Nominatim/test/bdd/steps/utils.py
|
# SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Various smaller helps for step execution.
"""
import logging
import subprocess
LOG = logging.getLogger(__name__)
def run_script(cmd, **kwargs):
""" Run the given command, check that it is successful and output
when necessary.
"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**kwargs)
(outp, outerr) = proc.communicate()
outp = outp.decode('utf-8')
outerr = outerr.decode('utf-8').replace('\\n', '\n')
LOG.debug("Run command: %s\n%s\n%s", cmd, outp, outerr)
assert proc.returncode == 0, "Script '{}' failed:\n{}\n{}\n".format(cmd[0], outp, outerr)
return outp, outerr
| 887
|
Python
|
.py
| 24
| 32.791667
| 93
| 0.665891
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,708
|
check_functions.py
|
osm-search_Nominatim/test/bdd/steps/check_functions.py
|
# SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Collection of assertion functions used for the steps.
"""
import json
import math
import re
class Almost:
""" Compares a float value with a certain jitter.
"""
def __init__(self, value, offset=0.00001):
self.value = value
self.offset = offset
def __eq__(self, other):
return abs(other - self.value) < self.offset
OSM_TYPE = {'N' : 'node', 'W' : 'way', 'R' : 'relation',
'n' : 'node', 'w' : 'way', 'r' : 'relation',
'node' : 'n', 'way' : 'w', 'relation' : 'r'}
class OsmType:
""" Compares an OSM type, accepting both N/R/W and node/way/relation.
"""
def __init__(self, value):
self.value = value
def __eq__(self, other):
return other == self.value or other == OSM_TYPE[self.value]
def __str__(self):
return f"{self.value} or {OSM_TYPE[self.value]}"
class Field:
""" Generic comparator for fields, which looks at the type of the
value compared.
"""
def __init__(self, value, **extra_args):
self.value = value
self.extra_args = extra_args
def __eq__(self, other):
if isinstance(self.value, float):
return math.isclose(self.value, float(other), **self.extra_args)
if self.value.startswith('^'):
return re.fullmatch(self.value, str(other))
if isinstance(other, dict):
return other == eval('{' + self.value + '}')
return str(self.value) == str(other)
def __str__(self):
return str(self.value)
class Bbox:
""" Comparator for bounding boxes.
"""
def __init__(self, bbox_string):
self.coord = [float(x) for x in bbox_string.split(',')]
def __contains__(self, item):
if isinstance(item, str):
item = item.split(',')
item = list(map(float, item))
if len(item) == 2:
return self.coord[0] <= item[0] <= self.coord[2] \
and self.coord[1] <= item[1] <= self.coord[3]
if len(item) == 4:
return item[0] >= self.coord[0] and item[1] <= self.coord[1] \
and item[2] >= self.coord[2] and item[3] <= self.coord[3]
raise ValueError("Not a coordinate or bbox.")
def __str__(self):
return str(self.coord)
def check_for_attributes(obj, attrs, presence='present'):
""" Check that the object has the given attributes. 'attrs' is a
string with a comma-separated list of attributes. If 'presence'
is set to 'absent' then the function checks that the attributes do
not exist for the object
"""
def _dump_json():
return json.dumps(obj, sort_keys=True, indent=2, ensure_ascii=False)
for attr in attrs.split(','):
attr = attr.strip()
if presence == 'absent':
assert attr not in obj, \
f"Unexpected attribute {attr}. Full response:\n{_dump_json()}"
else:
assert attr in obj, \
f"No attribute '{attr}'. Full response:\n{_dump_json()}"
| 3,252
|
Python
|
.py
| 83
| 31.650602
| 81
| 0.584581
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,709
|
http_responses.py
|
osm-search_Nominatim/test/bdd/steps/http_responses.py
|
# SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Classes wrapping HTTP responses from the Nominatim API.
"""
import re
import json
import xml.etree.ElementTree as ET
from check_functions import Almost, OsmType, Field, check_for_attributes
class GenericResponse:
""" Common base class for all API responses.
"""
def __init__(self, page, fmt, errorcode=200):
fmt = fmt.strip()
if fmt == 'jsonv2':
fmt = 'json'
self.page = page
self.format = fmt
self.errorcode = errorcode
self.result = []
self.header = dict()
if errorcode == 200 and fmt != 'debug':
getattr(self, '_parse_' + fmt)()
def _parse_json(self):
m = re.fullmatch(r'([\w$][^(]*)\((.*)\)', self.page)
if m is None:
code = self.page
else:
code = m.group(2)
self.header['json_func'] = m.group(1)
self.result = json.JSONDecoder().decode(code)
if isinstance(self.result, dict):
if 'error' in self.result:
self.result = []
else:
self.result = [self.result]
def _parse_geojson(self):
self._parse_json()
if self.result:
geojson = self.result[0]
# check for valid geojson
check_for_attributes(geojson, 'type,features')
assert geojson['type'] == 'FeatureCollection'
assert isinstance(geojson['features'], list)
self.result = []
for result in geojson['features']:
check_for_attributes(result, 'type,properties,geometry')
assert result['type'] == 'Feature'
new = result['properties']
check_for_attributes(new, 'geojson', 'absent')
new['geojson'] = result['geometry']
if 'bbox' in result:
check_for_attributes(new, 'boundingbox', 'absent')
# bbox is minlon, minlat, maxlon, maxlat
# boundingbox is minlat, maxlat, minlon, maxlon
new['boundingbox'] = [result['bbox'][1],
result['bbox'][3],
result['bbox'][0],
result['bbox'][2]]
for k, v in geojson.items():
if k not in ('type', 'features'):
check_for_attributes(new, '__' + k, 'absent')
new['__' + k] = v
self.result.append(new)
def _parse_geocodejson(self):
self._parse_geojson()
if self.result:
for r in self.result:
assert set(r.keys()) == {'geocoding', 'geojson', '__geocoding'}, \
f"Unexpected keys in result: {r.keys()}"
check_for_attributes(r['geocoding'], 'geojson', 'absent')
inner = r.pop('geocoding')
r.update(inner)
def assert_address_field(self, idx, field, value):
""" Check that result rows`idx` has a field `field` with value `value`
in its address. If idx is None, then all results are checked.
"""
if idx is None:
todo = range(len(self.result))
else:
todo = [int(idx)]
for idx in todo:
self.check_row(idx, 'address' in self.result[idx], "No field 'address'")
address = self.result[idx]['address']
self.check_row_field(idx, field, value, base=address)
def match_row(self, row, context=None, field=None):
""" Match the result fields against the given behave table row.
"""
if 'ID' in row.headings:
todo = [int(row['ID'])]
else:
todo = range(len(self.result))
for i in todo:
subdict = self.result[i]
if field is not None:
for key in field.split('.'):
self.check_row(i, key in subdict, f"Missing subfield {key}")
subdict = subdict[key]
self.check_row(i, isinstance(subdict, dict),
f"Subfield {key} not a dict")
for name, value in zip(row.headings, row.cells):
if name == 'ID':
pass
elif name == 'osm':
self.check_row_field(i, 'osm_type', OsmType(value[0]), base=subdict)
self.check_row_field(i, 'osm_id', Field(value[1:]), base=subdict)
elif name == 'centroid':
if ' ' in value:
lon, lat = value.split(' ')
elif context is not None:
lon, lat = context.osm.grid_node(int(value))
else:
raise RuntimeError("Context needed when using grid coordinates")
self.check_row_field(i, 'lat', Field(float(lat), abs_tol=1e-07), base=subdict)
self.check_row_field(i, 'lon', Field(float(lon), abs_tol=1e-07), base=subdict)
else:
self.check_row_field(i, name, Field(value), base=subdict)
def check_row(self, idx, check, msg):
""" Assert for the condition 'check' and print 'msg' on fail together
with the contents of the failing result.
"""
class _RowError:
def __init__(self, row):
self.row = row
def __str__(self):
return f"{msg}. Full row {idx}:\n" \
+ json.dumps(self.row, indent=4, ensure_ascii=False)
assert check, _RowError(self.result[idx])
def check_row_field(self, idx, field, expected, base=None):
""" Check field 'field' of result 'idx' for the expected value
and print a meaningful error if the condition fails.
When 'base' is set to a dictionary, then the field is checked
in that base. The error message will still report the contents
of the full result.
"""
if base is None:
base = self.result[idx]
self.check_row(idx, field in base, f"No field '{field}'")
value = base[field]
self.check_row(idx, expected == value,
f"\nBad value for field '{field}'. Expected: {expected}, got: {value}")
class SearchResponse(GenericResponse):
""" Specialised class for search and lookup responses.
Transforms the xml response in a format similar to json.
"""
def _parse_xml(self):
xml_tree = ET.fromstring(self.page)
self.header = dict(xml_tree.attrib)
for child in xml_tree:
assert child.tag == "place"
self.result.append(dict(child.attrib))
address = {}
for sub in child:
if sub.tag == 'extratags':
self.result[-1]['extratags'] = {}
for tag in sub:
self.result[-1]['extratags'][tag.attrib['key']] = tag.attrib['value']
elif sub.tag == 'namedetails':
self.result[-1]['namedetails'] = {}
for tag in sub:
self.result[-1]['namedetails'][tag.attrib['desc']] = tag.text
elif sub.tag == 'geokml':
self.result[-1][sub.tag] = True
else:
address[sub.tag] = sub.text
if address:
self.result[-1]['address'] = address
class ReverseResponse(GenericResponse):
""" Specialised class for reverse responses.
Transforms the xml response in a format similar to json.
"""
def _parse_xml(self):
xml_tree = ET.fromstring(self.page)
self.header = dict(xml_tree.attrib)
self.result = []
for child in xml_tree:
if child.tag == 'result':
assert not self.result, "More than one result in reverse result"
self.result.append(dict(child.attrib))
check_for_attributes(self.result[0], 'display_name', 'absent')
self.result[0]['display_name'] = child.text
elif child.tag == 'addressparts':
assert 'address' not in self.result[0], "More than one address in result"
address = {}
for sub in child:
assert len(sub) == 0, f"Address element '{sub.tag}' has subelements"
address[sub.tag] = sub.text
self.result[0]['address'] = address
elif child.tag == 'extratags':
assert 'extratags' not in self.result[0], "More than one extratags in result"
self.result[0]['extratags'] = {}
for tag in child:
assert len(tag) == 0, f"Extratags element '{tag.attrib['key']}' has subelements"
self.result[0]['extratags'][tag.attrib['key']] = tag.attrib['value']
elif child.tag == 'namedetails':
assert 'namedetails' not in self.result[0], "More than one namedetails in result"
self.result[0]['namedetails'] = {}
for tag in child:
assert len(tag) == 0, f"Namedetails element '{tag.attrib['desc']}' has subelements"
self.result[0]['namedetails'][tag.attrib['desc']] = tag.text
elif child.tag == 'geokml':
assert 'geokml' not in self.result[0], "More than one geokml in result"
self.result[0]['geokml'] = ET.tostring(child, encoding='unicode')
else:
assert child.tag == 'error', \
f"Unknown XML tag {child.tag} on page: {self.page}"
class StatusResponse(GenericResponse):
""" Specialised class for status responses.
Can also parse text responses.
"""
def _parse_text(self):
pass
| 10,105
|
Python
|
.py
| 216
| 33.134259
| 103
| 0.528336
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,710
|
geometry_factory.py
|
osm-search_Nominatim/test/bdd/steps/geometry_factory.py
|
# SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
from pathlib import Path
import os
from steps.geometry_alias import ALIASES
class GeometryFactory:
""" Provides functions to create geometries from coordinates and data grids.
"""
def __init__(self):
self.grid = {}
def parse_geometry(self, geom):
""" Create a WKT SQL term for the given geometry.
The function understands the following formats:
country:<country code>
Point geometry guaranteed to be in the given country
<P>
Point geometry
<P>,...,<P>
Line geometry
(<P>,...,<P>)
Polygon geometry
<P> may either be a coordinate of the form '<x> <y>' or a single
number. In the latter case it must refer to a point in
a previously defined grid.
"""
if geom.startswith('country:'):
ccode = geom[8:].upper()
assert ccode in ALIASES, "Geometry error: unknown country " + ccode
return "ST_SetSRID('POINT({} {})'::geometry, 4326)".format(*ALIASES[ccode])
if geom.find(',') < 0:
out = "POINT({})".format(self.mk_wkt_point(geom))
elif geom.find('(') < 0:
out = "LINESTRING({})".format(self.mk_wkt_points(geom))
else:
out = "POLYGON(({}))".format(self.mk_wkt_points(geom.strip('() ')))
return "ST_SetSRID('{}'::geometry, 4326)".format(out)
def mk_wkt_point(self, point):
""" Parse a point description.
The point may either consist of 'x y' coordinates or a number
that refers to a grid setup.
"""
geom = point.strip()
if geom.find(' ') >= 0:
return geom
try:
pt = self.grid_node(int(geom))
except ValueError:
assert False, "Scenario error: Point '{}' is not a number".format(geom)
assert pt is not None, "Scenario error: Point '{}' not found in grid".format(geom)
return "{} {}".format(*pt)
def mk_wkt_points(self, geom):
""" Parse a list of points.
The list must be a comma-separated list of points. Points
in coordinate and grid format may be mixed.
"""
return ','.join([self.mk_wkt_point(x) for x in geom.split(',')])
def set_grid(self, lines, grid_step, origin=(0.0, 0.0)):
""" Replace the grid with one from the given lines.
"""
self.grid = {}
y = origin[1]
for line in lines:
x = origin[0]
for pt_id in line:
if pt_id.isdigit():
self.grid[int(pt_id)] = (x, y)
x += grid_step
y += grid_step
def grid_node(self, nodeid):
""" Get the coordinates for the given grid node.
"""
return self.grid.get(nodeid)
| 3,084
|
Python
|
.py
| 76
| 30.644737
| 90
| 0.559532
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,711
|
steps_osm_data.py
|
osm-search_Nominatim/test/bdd/steps/steps_osm_data.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
import tempfile
import random
import os
from pathlib import Path
from nominatim_db.tools.exec_utils import run_osm2pgsql
from nominatim_db.tools.replication import run_osm2pgsql_updates
from geometry_alias import ALIASES
def get_osm2pgsql_options(nominatim_env, fname, append):
return dict(import_file=fname,
osm2pgsql='osm2pgsql',
osm2pgsql_cache=50,
osm2pgsql_style=str(nominatim_env.get_test_config().get_import_style_file()),
osm2pgsql_style_path=nominatim_env.get_test_config().config_dir,
threads=1,
dsn=nominatim_env.get_libpq_dsn(),
flatnode_file='',
tablespaces=dict(slim_data='', slim_index='',
main_data='', main_index=''),
append=append
)
def write_opl_file(opl, grid):
""" Create a temporary OSM file from OPL and return the file name. It is
the responsibility of the caller to delete the file again.
Node with missing coordinates, can retrieve their coordinates from
a supplied grid. Failing that a random coordinate is assigned.
"""
with tempfile.NamedTemporaryFile(suffix='.opl', delete=False) as fd:
for line in opl.splitlines():
if line.startswith('n') and line.find(' x') < 0:
coord = grid.grid_node(int(line[1:].split(' ')[0]))
if coord is None:
coord = (random.random() * 360 - 180,
random.random() * 180 - 90)
line += " x%f y%f" % coord
fd.write(line.encode('utf-8'))
fd.write(b'\n')
return fd.name
@given('the lua style file')
def lua_style_file(context):
""" Define a custom style file to use for the import.
"""
style = Path(context.nominatim.website_dir.name) / 'custom.lua'
style.write_text(context.text)
context.nominatim.test_env['NOMINATIM_IMPORT_STYLE'] = str(style)
@given(u'the ([0-9.]+ )?grid(?: with origin (?P<origin>.*))?')
def define_node_grid(context, grid_step, origin):
"""
Define a grid of node positions.
Use a table to define the grid. The nodes must be integer ids. Optionally
you can give the grid distance. The default is 0.00001 degrees.
"""
if grid_step is not None:
grid_step = float(grid_step.strip())
else:
grid_step = 0.00001
if origin:
if ',' in origin:
# TODO coordinate
coords = origin.split(',')
if len(coords) != 2:
raise RuntimeError('Grid origin expects origin with x,y coordinates.')
origin = (float(coords[0]), float(coords[1]))
elif origin in ALIASES:
origin = ALIASES[origin]
else:
raise RuntimeError('Grid origin must be either coordinate or alias.')
else:
origin = (0.0, 0.0)
context.osm.set_grid([context.table.headings] + [list(h) for h in context.table],
grid_step, origin)
@when(u'loading osm data')
def load_osm_file(context):
"""
Load the given data into a freshly created test data using osm2pgsql.
No further indexing is done.
The data is expected as attached text in OPL format.
"""
# create an OSM file and import it
fname = write_opl_file(context.text, context.osm)
try:
run_osm2pgsql(get_osm2pgsql_options(context.nominatim, fname, append=False))
finally:
os.remove(fname)
### reintroduce the triggers/indexes we've lost by having osm2pgsql set up place again
cur = context.db.cursor()
cur.execute("""CREATE TRIGGER place_before_delete BEFORE DELETE ON place
FOR EACH ROW EXECUTE PROCEDURE place_delete()""")
cur.execute("""CREATE TRIGGER place_before_insert BEFORE INSERT ON place
FOR EACH ROW EXECUTE PROCEDURE place_insert()""")
cur.execute("""CREATE UNIQUE INDEX idx_place_osm_unique on place using btree(osm_id,osm_type,class,type)""")
context.db.commit()
@when(u'updating osm data')
def update_from_osm_file(context):
"""
Update a database previously populated with 'loading osm data'.
Needs to run indexing on the existing data first to yield the correct result.
The data is expected as attached text in OPL format.
"""
context.nominatim.copy_from_place(context.db)
context.nominatim.run_nominatim('index')
context.nominatim.run_nominatim('refresh', '--functions')
# create an OSM file and import it
fname = write_opl_file(context.text, context.osm)
try:
run_osm2pgsql_updates(context.db,
get_osm2pgsql_options(context.nominatim, fname, append=True))
finally:
os.remove(fname)
@when('indexing')
def index_database(context):
"""
Run the Nominatim indexing step. This will process data previously
loaded with 'updating osm data'
"""
context.nominatim.run_nominatim('index')
| 5,220
|
Python
|
.py
| 121
| 35.099174
| 112
| 0.643757
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,712
|
geometry_alias.py
|
osm-search_Nominatim/test/bdd/steps/geometry_alias.py
|
# SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Collection of aliases for various world coordinates.
"""
ALIASES = {
# Country aliases
'AD': (1.58972, 42.54241),
'AE': (54.61589, 24.82431),
'AF': (65.90264, 34.84708),
'AG': (-61.72430, 17.069),
'AI': (-63.10571, 18.25461),
'AL': (19.84941, 40.21232),
'AM': (44.64229, 40.37821),
'AO': (16.21924, -12.77014),
'AQ': (44.99999, -75.65695),
'AR': (-61.10759, -34.37615),
'AS': (-170.68470, -14.29307),
'AT': (14.25747, 47.36542),
'AU': (138.23155, -23.72068),
'AW': (-69.98255, 12.555),
'AX': (19.91839, 59.81682),
'AZ': (48.38555, 40.61639),
'BA': (17.18514, 44.25582),
'BB': (-59.53342, 13.19),
'BD': (89.75989, 24.34205),
'BE': (4.90078, 50.34682),
'BF': (-0.56743, 11.90471),
'BG': (24.80616, 43.09859),
'BH': (50.52032, 25.94685),
'BI': (29.54561, -2.99057),
'BJ': (2.70062, 10.02792),
'BL': (-62.79349, 17.907),
'BM': (-64.77406, 32.30199),
'BN': (114.52196, 4.28638),
'BO': (-62.02473, -17.77723),
'BQ': (-63.14322, 17.566),
'BR': (-45.77065, -9.58685),
'BS': (-77.60916, 23.8745),
'BT': (90.01350, 27.28137),
'BV': (3.35744, -54.4215),
'BW': (23.51505, -23.48391),
'BY': (26.77259, 53.15885),
'BZ': (-88.63489, 16.33951),
'CA': (-107.74817, 67.12612),
'CC': (96.84420, -12.01734),
'CD': (24.09544, -1.67713),
'CF': (22.58701, 5.98438),
'CG': (15.78875, 0.40388),
'CH': (7.65705, 46.57446),
'CI': (-6.31190, 6.62783),
'CK': (-159.77835, -21.23349),
'CL': (-70.41790, -53.77189),
'CM': (13.26022, 5.94519),
'CN': (96.44285, 38.04260),
'CO': (-72.52951, 2.45174),
'CR': (-83.83314, 9.93514),
'CU': (-80.81673, 21.88852),
'CV': (-24.50810, 14.929),
'CW': (-68.96409, 12.1845),
'CX': (105.62411, -10.48417),
'CY': (32.95922, 35.37010),
'CZ': (16.32098, 49.50692),
'DE': (9.30716, 50.21289),
'DJ': (42.96904, 11.41542),
'DK': (9.18490, 55.98916),
'DM': (-61.00358, 15.65470),
'DO': (-69.62855, 18.58841),
'DZ': (4.24749, 25.79721),
'EC': (-77.45831, -0.98284),
'EE': (23.94288, 58.43952),
'EG': (28.95293, 28.17718),
'EH': (-13.69031, 25.01241),
'ER': (39.01223, 14.96033),
'ES': (-2.59110, 38.79354),
'ET': (38.61697, 7.71399),
'FI': (26.89798, 63.56194),
'FJ': (177.91853, -17.74237),
'FK': (-58.99044, -51.34509),
'FM': (151.95358, 8.5045),
'FO': (-6.60483, 62.10000),
'FR': (0.28410, 47.51045),
'GA': (10.81070, -0.07429),
'GB': (-0.92823, 52.01618),
'GD': (-61.64524, 12.191),
'GE': (44.16664, 42.00385),
'GF': (-53.46524, 3.56188),
'GG': (-2.50580, 49.58543),
'GH': (-0.46348, 7.16051),
'GI': (-5.32053, 36.11066),
'GL': (-33.85511, 74.66355),
'GM': (-16.40960, 13.25),
'GN': (-13.83940, 10.96291),
'GP': (-61.68712, 16.23049),
'GQ': (10.23973, 1.43119),
'GR': (23.17850, 39.06206),
'GS': (-36.49430, -54.43067),
'GT': (-90.74368, 15.20428),
'GU': (144.73362, 13.44413),
'GW': (-14.83525, 11.92486),
'GY': (-58.45167, 5.73698),
'HK': (114.18577, 22.34923),
'HM': (73.68230, -53.22105),
'HN': (-86.95414, 15.23820),
'HR': (17.49966, 45.52689),
'HT': (-73.51925, 18.32492),
'HU': (20.35362, 47.51721),
'ID': (123.34505, -0.83791),
'IE': (-9.00520, 52.87725),
'IL': (35.46314, 32.86165),
'IM': (-4.86740, 54.023),
'IN': (88.67620, 27.86155),
'IO': (71.42743, -6.14349),
'IQ': (42.58109, 34.26103),
'IR': (56.09355, 30.46751),
'IS': (-17.51785, 64.71687),
'IT': (10.42639, 44.87904),
'JE': (-2.19261, 49.12458),
'JM': (-76.84020, 18.3935),
'JO': (36.55552, 30.75741),
'JP': (138.72531, 35.92099),
'KE': (36.90602, 1.08512),
'KG': (76.15571, 41.66497),
'KH': (104.31901, 12.95555),
'KI': (173.63353, 0.139),
'KM': (44.31474, -12.241),
'KN': (-62.69379, 17.2555),
'KP': (126.65575, 39.64575),
'KR': (127.27740, 36.41388),
'KW': (47.30684, 29.69180),
'KY': (-81.07455, 19.29949),
'KZ': (72.00811, 49.88855),
'LA': (102.44391, 19.81609),
'LB': (35.48464, 33.41766),
'LC': (-60.97894, 13.891),
'LI': (9.54693, 47.15934),
'LK': (80.38520, 8.41649),
'LR': (-11.16960, 4.04122),
'LS': (28.66984, -29.94538),
'LT': (24.51735, 55.49293),
'LU': (6.08649, 49.81533),
'LV': (23.51033, 56.67144),
'LY': (15.36841, 28.12177),
'MA': (-4.03061, 33.21696),
'MC': (7.47743, 43.62917),
'MD': (29.61725, 46.66517),
'ME': (19.72291, 43.02441),
'MF': (-63.06666, 18.08102),
'MG': (45.86378, -20.50245),
'MH': (171.94982, 5.983),
'MK': (21.42108, 41.08980),
'ML': (-1.93310, 16.46993),
'MM': (95.54624, 21.09620),
'MN': (99.81138, 48.18615),
'MO': (113.56441, 22.16209),
'MP': (145.21345, 14.14902),
'MQ': (-60.81128, 14.43706),
'MR': (-9.42324, 22.59251),
'MS': (-62.19455, 16.745),
'MT': (14.38363, 35.94467),
'MU': (57.55121, -20.41),
'MV': (73.39292, 4.19375),
'MW': (33.95722, -12.28218),
'MX': (-105.89221, 25.86826),
'MY': (112.71154, 2.10098),
'MZ': (37.58689, -13.72682),
'NA': (16.68569, -21.46572),
'NC': (164.95322, -20.38889),
'NE': (10.06041, 19.08273),
'NF': (167.95718, -29.0645),
'NG': (10.17781, 10.17804),
'NI': (-85.87974, 13.21715),
'NL': (-68.57062, 12.041),
'NO': (23.11556, 70.09934),
'NP': (83.36259, 28.13107),
'NR': (166.93479, -0.5275),
'NU': (-169.84873, -19.05305),
'NZ': (167.97209, -45.13056),
'OM': (56.86055, 20.47413),
'PA': (-79.40160, 8.80656),
'PE': (-78.66540, -7.54711),
'PF': (-145.05719, -16.70862),
'PG': (146.64600, -7.37427),
'PH': (121.48359, 15.09965),
'PK': (72.11347, 31.14629),
'PL': (17.88136, 52.77182),
'PM': (-56.19515, 46.78324),
'PN': (-130.10642, -25.06955),
'PR': (-65.88755, 18.37169),
'PS': (35.39801, 32.24773),
'PT': (-8.45743, 40.11154),
'PW': (134.49645, 7.3245),
'PY': (-59.51787, -22.41281),
'QA': (51.49903, 24.99816),
'RE': (55.77345, -21.36388),
'RO': (26.37632, 45.36120),
'RS': (20.40371, 44.56413),
'RU': (116.44060, 59.06780),
'RW': (29.57882, -1.62404),
'SA': (47.73169, 22.43790),
'SB': (164.63894, -10.23606),
'SC': (46.36566, -9.454),
'SD': (28.14720, 14.56423),
'SE': (15.68667, 60.35568),
'SG': (103.84187, 1.304),
'SH': (-12.28155, -37.11546),
'SI': (14.04738, 46.39085),
'SJ': (15.27552, 79.23365),
'SK': (20.41603, 48.86970),
'SL': (-11.47773, 8.78156),
'SM': (12.46062, 43.94279),
'SN': (-15.37111, 14.99477),
'SO': (46.93383, 9.34094),
'SR': (-55.42864, 4.56985),
'SS': (28.13573, 8.50933),
'ST': (6.61025, 0.2215),
'SV': (-89.36665, 13.43072),
'SX': (-63.15393, 17.9345),
'SY': (38.15513, 35.34221),
'SZ': (31.78263, -26.14244),
'TC': (-71.32554, 21.35),
'TD': (17.42092, 13.46223),
'TF': (137.5, -67.5),
'TG': (1.06983, 7.87677),
'TH': (102.00877, 16.42310),
'TJ': (71.91349, 39.01527),
'TK': (-171.82603, -9.20990),
'TL': (126.22520, -8.72636),
'TM': (57.71603, 39.92534),
'TN': (9.04958, 34.84199),
'TO': (-176.99320, -23.11104),
'TR': (32.82002, 39.86350),
'TT': (-60.70793, 11.1385),
'TV': (178.77499, -9.41685),
'TW': (120.30074, 23.17002),
'TZ': (33.53892, -5.01840),
'UA': (33.44335, 49.30619),
'UG': (32.96523, 2.08584),
'UM': (-169.50993, 16.74605),
'US': (-116.39535, 40.71379),
'UY': (-56.46505, -33.62658),
'UZ': (61.35529, 42.96107),
'VA': (12.33197, 42.04931),
'VC': (-61.09905, 13.316),
'VE': (-64.88323, 7.69849),
'VG': (-64.62479, 18.419),
'VI': (-64.88950, 18.32263),
'VN': (104.20179, 10.27644),
'VU': (167.31919, -15.88687),
'WF': (-176.20781, -13.28535),
'WS': (-172.10966, -13.85093),
'YE': (45.94562, 16.16338),
'YT': (44.93774, -12.60882),
'ZA': (23.19488, -30.43276),
'ZM': (26.38618, -14.39966),
'ZW': (30.12419, -19.86907)
}
| 7,329
|
Python
|
.py
| 261
| 27.076628
| 58
| 0.58101
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,713
|
place_inserter.py
|
osm-search_Nominatim/test/bdd/steps/place_inserter.py
|
# SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper classes for filling the place table.
"""
import random
import string
class PlaceColumn:
""" Helper class to collect contents from a behave table row and
insert it into the place table.
"""
def __init__(self, context):
self.columns = {'admin_level' : 15}
self.context = context
self.geometry = None
def add_row(self, row, force_name):
""" Parse the content from the given behave row as place column data.
"""
for name, value in zip(row.headings, row.cells):
self._add(name, value)
assert 'osm_type' in self.columns, "osm column missing"
if force_name and 'name' not in self.columns:
self._add_hstore('name', 'name',
''.join(random.choice(string.printable)
for _ in range(int(random.random()*30))))
return self
def _add(self, key, value):
if hasattr(self, '_set_key_' + key):
getattr(self, '_set_key_' + key)(value)
elif key.startswith('name+'):
self._add_hstore('name', key[5:], value)
elif key.startswith('extra+'):
self._add_hstore('extratags', key[6:], value)
elif key.startswith('addr+'):
self._add_hstore('address', key[5:], value)
elif key in ('name', 'address', 'extratags'):
self.columns[key] = eval('{' + value + '}')
else:
assert key in ('class', 'type'), "Unknown column '{}'.".format(key)
self.columns[key] = None if value == '' else value
def _set_key_name(self, value):
self._add_hstore('name', 'name', value)
def _set_key_osm(self, value):
assert value[0] in 'NRW' and value[1:].isdigit(), \
"OSM id needs to be of format <NRW><id>."
self.columns['osm_type'] = value[0]
self.columns['osm_id'] = int(value[1:])
def _set_key_admin(self, value):
self.columns['admin_level'] = int(value)
def _set_key_housenr(self, value):
if value:
self._add_hstore('address', 'housenumber', value)
def _set_key_postcode(self, value):
if value:
self._add_hstore('address', 'postcode', value)
def _set_key_street(self, value):
if value:
self._add_hstore('address', 'street', value)
def _set_key_addr_place(self, value):
if value:
self._add_hstore('address', 'place', value)
def _set_key_country(self, value):
if value:
self._add_hstore('address', 'country', value)
def _set_key_geometry(self, value):
self.geometry = self.context.osm.parse_geometry(value)
assert self.geometry is not None, "Bad geometry: {}".format(value)
def _add_hstore(self, column, key, value):
if column in self.columns:
self.columns[column][key] = value
else:
self.columns[column] = {key: value}
def db_delete(self, cursor):
""" Issue a delete for the given OSM object.
"""
cursor.execute('DELETE FROM place WHERE osm_type = %s and osm_id = %s',
(self.columns['osm_type'] , self.columns['osm_id']))
def db_insert(self, cursor):
""" Insert the collected data into the database.
"""
if self.columns['osm_type'] == 'N' and self.geometry is None:
pt = self.context.osm.grid_node(self.columns['osm_id'])
if pt is None:
pt = (random.random()*360 - 180, random.random()*180 - 90)
self.geometry = "ST_SetSRID(ST_Point(%f, %f), 4326)" % pt
else:
assert self.geometry is not None, "Geometry missing"
query = 'INSERT INTO place ({}, geometry) values({}, {})'.format(
','.join(self.columns.keys()),
','.join(['%s' for x in range(len(self.columns))]),
self.geometry)
cursor.execute(query, list(self.columns.values()))
| 4,186
|
Python
|
.py
| 96
| 34.302083
| 79
| 0.575817
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,714
|
steps_db_ops.py
|
osm-search_Nominatim/test/bdd/steps/steps_db_ops.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
import logging
from itertools import chain
import psycopg
from psycopg import sql as pysql
from place_inserter import PlaceColumn
from table_compare import NominatimID, DBRow
from nominatim_db.indexer import indexer
from nominatim_db.tokenizer import factory as tokenizer_factory
def check_database_integrity(context):
""" Check some generic constraints on the tables.
"""
with context.db.cursor(row_factory=psycopg.rows.tuple_row) as cur:
# place_addressline should not have duplicate (place_id, address_place_id)
cur.execute("""SELECT count(*) FROM
(SELECT place_id, address_place_id, count(*) as c
FROM place_addressline GROUP BY place_id, address_place_id) x
WHERE c > 1""")
assert cur.fetchone()[0] == 0, "Duplicates found in place_addressline"
# word table must not have empty word_tokens
cur.execute("SELECT count(*) FROM word WHERE word_token = ''")
assert cur.fetchone()[0] == 0, "Empty word tokens found in word table"
################################ GIVEN ##################################
@given("the (?P<named>named )?places")
def add_data_to_place_table(context, named):
""" Add entries into the place table. 'named places' makes sure that
the entries get a random name when none is explicitly given.
"""
with context.db.cursor() as cur:
cur.execute('ALTER TABLE place DISABLE TRIGGER place_before_insert')
for row in context.table:
PlaceColumn(context).add_row(row, named is not None).db_insert(cur)
cur.execute('ALTER TABLE place ENABLE TRIGGER place_before_insert')
@given("the relations")
def add_data_to_planet_relations(context):
""" Add entries into the osm2pgsql relation middle table. This is needed
for tests on data that looks up members.
"""
with context.db.cursor() as cur:
cur.execute("SELECT value FROM osm2pgsql_properties WHERE property = 'db_format'")
row = cur.fetchone()
if row is None or row['value'] == '1':
for r in context.table:
last_node = 0
last_way = 0
parts = []
if r['members']:
members = []
for m in r['members'].split(','):
mid = NominatimID(m)
if mid.typ == 'N':
parts.insert(last_node, int(mid.oid))
last_node += 1
last_way += 1
elif mid.typ == 'W':
parts.insert(last_way, int(mid.oid))
last_way += 1
else:
parts.append(int(mid.oid))
members.extend((mid.typ.lower() + mid.oid, mid.cls or ''))
else:
members = None
tags = chain.from_iterable([(h[5:], r[h]) for h in r.headings if h.startswith("tags+")])
cur.execute("""INSERT INTO planet_osm_rels (id, way_off, rel_off, parts, members, tags)
VALUES (%s, %s, %s, %s, %s, %s)""",
(r['id'], last_node, last_way, parts, members, list(tags)))
else:
for r in context.table:
if r['members']:
members = []
for m in r['members'].split(','):
mid = NominatimID(m)
members.append({'ref': mid.oid, 'role': mid.cls or '', 'type': mid.typ})
else:
members = []
tags = {h[5:]: r[h] for h in r.headings if h.startswith("tags+")}
cur.execute("""INSERT INTO planet_osm_rels (id, tags, members)
VALUES (%s, %s, %s)""",
(r['id'], psycopg.types.json.Json(tags),
psycopg.types.json.Json(members)))
@given("the ways")
def add_data_to_planet_ways(context):
""" Add entries into the osm2pgsql way middle table. This is necessary for
tests on that that looks up node ids in this table.
"""
with context.db.cursor() as cur:
cur.execute("SELECT value FROM osm2pgsql_properties WHERE property = 'db_format'")
row = cur.fetchone()
json_tags = row is not None and row['value'] != '1'
for r in context.table:
if json_tags:
tags = psycopg.types.json.Json({h[5:]: r[h] for h in r.headings if h.startswith("tags+")})
else:
tags = list(chain.from_iterable([(h[5:], r[h])
for h in r.headings if h.startswith("tags+")]))
nodes = [ int(x.strip()) for x in r['nodes'].split(',') ]
cur.execute("INSERT INTO planet_osm_ways (id, nodes, tags) VALUES (%s, %s, %s)",
(r['id'], nodes, tags))
################################ WHEN ##################################
@when("importing")
def import_and_index_data_from_place_table(context):
""" Import data previously set up in the place table.
"""
context.nominatim.run_nominatim('import', '--continue', 'load-data',
'--index-noanalyse', '-q',
'--offline')
check_database_integrity(context)
# Remove the output of the input, when all was right. Otherwise it will be
# output when there are errors that had nothing to do with the import
# itself.
context.log_capture.buffer.clear()
@when("updating places")
def update_place_table(context):
""" Update the place table with the given data. Also runs all triggers
related to updates and reindexes the new data.
"""
context.nominatim.run_nominatim('refresh', '--functions')
with context.db.cursor() as cur:
for row in context.table:
col = PlaceColumn(context).add_row(row, False)
col.db_delete(cur)
col.db_insert(cur)
cur.execute('SELECT flush_deleted_places()')
context.nominatim.reindex_placex(context.db)
check_database_integrity(context)
# Remove the output of the input, when all was right. Otherwise it will be
# output when there are errors that had nothing to do with the import
# itself.
context.log_capture.buffer.clear()
@when("updating postcodes")
def update_postcodes(context):
""" Rerun the calculation of postcodes.
"""
context.nominatim.run_nominatim('refresh', '--postcodes')
@when("marking for delete (?P<oids>.*)")
def delete_places(context, oids):
""" Remove entries from the place table. Multiple ids may be given
separated by commas. Also runs all triggers
related to updates and reindexes the new data.
"""
context.nominatim.run_nominatim('refresh', '--functions')
with context.db.cursor() as cur:
cur.execute('TRUNCATE place_to_be_deleted')
for oid in oids.split(','):
NominatimID(oid).query_osm_id(cur, 'DELETE FROM place WHERE {}')
cur.execute('SELECT flush_deleted_places()')
context.nominatim.reindex_placex(context.db)
# Remove the output of the input, when all was right. Otherwise it will be
# output when there are errors that had nothing to do with the import
# itself.
context.log_capture.buffer.clear()
################################ THEN ##################################
@then("(?P<table>placex|place) contains(?P<exact> exactly)?")
def check_place_contents(context, table, exact):
""" Check contents of place/placex tables. Each row represents a table row
and all data must match. Data not present in the expected table, may
be arbitrary. The rows are identified via the 'object' column which must
have an identifier of the form '<NRW><osm id>[:<class>]'. When multiple
rows match (for example because 'class' was left out and there are
multiple entries for the given OSM object) then all must match. All
expected rows are expected to be present with at least one database row.
When 'exactly' is given, there must not be additional rows in the database.
"""
with context.db.cursor() as cur:
expected_content = set()
for row in context.table:
nid = NominatimID(row['object'])
query = 'SELECT *, ST_AsText(geometry) as geomtxt, ST_GeometryType(geometry) as geometrytype'
if table == 'placex':
query += ' ,ST_X(centroid) as cx, ST_Y(centroid) as cy'
query += " FROM %s WHERE {}" % (table, )
nid.query_osm_id(cur, query)
assert cur.rowcount > 0, "No rows found for " + row['object']
for res in cur:
if exact:
expected_content.add((res['osm_type'], res['osm_id'], res['class']))
DBRow(nid, res, context).assert_row(row, ['object'])
if exact:
cur.execute(pysql.SQL('SELECT osm_type, osm_id, class from')
+ pysql.Identifier(table))
actual = set([(r['osm_type'], r['osm_id'], r['class']) for r in cur])
assert expected_content == actual, \
f"Missing entries: {expected_content - actual}\n" \
f"Not expected in table: {actual - expected_content}"
@then("(?P<table>placex|place) has no entry for (?P<oid>.*)")
def check_place_has_entry(context, table, oid):
""" Ensure that no database row for the given object exists. The ID
must be of the form '<NRW><osm id>[:<class>]'.
"""
with context.db.cursor() as cur:
NominatimID(oid).query_osm_id(cur, "SELECT * FROM %s where {}" % table)
assert cur.rowcount == 0, \
"Found {} entries for ID {}".format(cur.rowcount, oid)
@then("search_name contains(?P<exclude> not)?")
def check_search_name_contents(context, exclude):
""" Check contents of place/placex tables. Each row represents a table row
and all data must match. Data not present in the expected table, may
be arbitrary. The rows are identified via the 'object' column which must
have an identifier of the form '<NRW><osm id>[:<class>]'. All
expected rows are expected to be present with at least one database row.
"""
tokenizer = tokenizer_factory.get_tokenizer_for_db(context.nominatim.get_test_config())
with tokenizer.name_analyzer() as analyzer:
with context.db.cursor() as cur:
for row in context.table:
nid = NominatimID(row['object'])
nid.row_by_place_id(cur, 'search_name',
['ST_X(centroid) as cx', 'ST_Y(centroid) as cy'])
assert cur.rowcount > 0, "No rows found for " + row['object']
for res in cur:
db_row = DBRow(nid, res, context)
for name, value in zip(row.headings, row.cells):
if name in ('name_vector', 'nameaddress_vector'):
items = [x.strip() for x in value.split(',')]
tokens = analyzer.get_word_token_info(items)
if not exclude:
assert len(tokens) >= len(items), \
"No word entry found for {}. Entries found: {!s}".format(value, len(tokens))
for word, token, wid in tokens:
if exclude:
assert wid not in res[name], \
"Found term for {}/{}: {}".format(nid, name, wid)
else:
assert wid in res[name], \
"Missing term for {}/{}: {}".format(nid, name, wid)
elif name != 'object':
assert db_row.contains(name, value), db_row.assert_msg(name, value)
@then("search_name has no entry for (?P<oid>.*)")
def check_search_name_has_entry(context, oid):
""" Check that there is noentry in the search_name table for the given
objects. IDs are in format '<NRW><osm id>[:<class>]'.
"""
with context.db.cursor() as cur:
NominatimID(oid).row_by_place_id(cur, 'search_name')
assert cur.rowcount == 0, \
"Found {} entries for ID {}".format(cur.rowcount, oid)
@then("location_postcode contains exactly")
def check_location_postcode(context):
""" Check full contents for location_postcode table. Each row represents a table row
and all data must match. Data not present in the expected table, may
be arbitrary. The rows are identified via 'country' and 'postcode' columns.
All rows must be present as excepted and there must not be additional
rows.
"""
with context.db.cursor() as cur:
cur.execute("SELECT *, ST_AsText(geometry) as geomtxt FROM location_postcode")
assert cur.rowcount == len(list(context.table)), \
"Postcode table has {} rows, expected {}.".format(cur.rowcount, len(list(context.table)))
results = {}
for row in cur:
key = (row['country_code'], row['postcode'])
assert key not in results, "Postcode table has duplicate entry: {}".format(row)
results[key] = DBRow((row['country_code'],row['postcode']), row, context)
for row in context.table:
db_row = results.get((row['country'],row['postcode']))
assert db_row is not None, \
f"Missing row for country '{row['country']}' postcode '{row['postcode']}'."
db_row.assert_row(row, ('country', 'postcode'))
@then("there are(?P<exclude> no)? word tokens for postcodes (?P<postcodes>.*)")
def check_word_table_for_postcodes(context, exclude, postcodes):
""" Check that the tokenizer produces postcode tokens for the given
postcodes. The postcodes are a comma-separated list of postcodes.
Whitespace matters.
"""
nctx = context.nominatim
tokenizer = tokenizer_factory.get_tokenizer_for_db(nctx.get_test_config())
with tokenizer.name_analyzer() as ana:
plist = [ana.normalize_postcode(p) for p in postcodes.split(',')]
plist.sort()
with context.db.cursor() as cur:
cur.execute("SELECT word FROM word WHERE type = 'P' and word = any(%s)",
(plist,))
found = [row['word'] for row in cur]
assert len(found) == len(set(found)), f"Duplicate rows for postcodes: {found}"
if exclude:
assert len(found) == 0, f"Unexpected postcodes: {found}"
else:
assert set(found) == set(plist), \
f"Missing postcodes {set(plist) - set(found)}. Found: {found}"
@then("place_addressline contains")
def check_place_addressline(context):
""" Check the contents of the place_addressline table. Each row represents
a table row and all data must match. Data not present in the expected
table, may be arbitrary. The rows are identified via the 'object' column,
representing the addressee and the 'address' column, representing the
address item.
"""
with context.db.cursor() as cur:
for row in context.table:
nid = NominatimID(row['object'])
pid = nid.get_place_id(cur)
apid = NominatimID(row['address']).get_place_id(cur)
cur.execute(""" SELECT * FROM place_addressline
WHERE place_id = %s AND address_place_id = %s""",
(pid, apid))
assert cur.rowcount > 0, \
"No rows found for place %s and address %s" % (row['object'], row['address'])
for res in cur:
DBRow(nid, res, context).assert_row(row, ('address', 'object'))
@then("place_addressline doesn't contain")
def check_place_addressline_exclude(context):
""" Check that the place_addressline doesn't contain any entries for the
given addressee/address item pairs.
"""
with context.db.cursor() as cur:
for row in context.table:
pid = NominatimID(row['object']).get_place_id(cur)
apid = NominatimID(row['address']).get_place_id(cur, allow_empty=True)
if apid is not None:
cur.execute(""" SELECT * FROM place_addressline
WHERE place_id = %s AND address_place_id = %s""",
(pid, apid))
assert cur.rowcount == 0, \
"Row found for place %s and address %s" % (row['object'], row['address'])
@then("W(?P<oid>\d+) expands to(?P<neg> no)? interpolation")
def check_location_property_osmline(context, oid, neg):
""" Check that the given way is present in the interpolation table.
"""
with context.db.cursor() as cur:
cur.execute("""SELECT *, ST_AsText(linegeo) as geomtxt
FROM location_property_osmline
WHERE osm_id = %s AND startnumber IS NOT NULL""",
(oid, ))
if neg:
assert cur.rowcount == 0, "Interpolation found for way {}.".format(oid)
return
todo = list(range(len(list(context.table))))
for res in cur:
for i in todo:
row = context.table[i]
if (int(row['start']) == res['startnumber']
and int(row['end']) == res['endnumber']):
todo.remove(i)
break
else:
assert False, "Unexpected row " + str(res)
DBRow(oid, res, context).assert_row(row, ('start', 'end'))
assert not todo, f"Unmatched lines in table: {list(context.table[i] for i in todo)}"
@then("location_property_osmline contains(?P<exact> exactly)?")
def check_place_contents(context, exact):
""" Check contents of the interpolation table. Each row represents a table row
and all data must match. Data not present in the expected table, may
be arbitrary. The rows are identified via the 'object' column which must
have an identifier of the form '<osm id>[:<startnumber>]'. When multiple
rows match (for example because 'startnumber' was left out and there are
multiple entries for the given OSM object) then all must match. All
expected rows are expected to be present with at least one database row.
When 'exactly' is given, there must not be additional rows in the database.
"""
with context.db.cursor() as cur:
expected_content = set()
for row in context.table:
if ':' in row['object']:
nid, start = row['object'].split(':', 2)
start = int(start)
else:
nid, start = row['object'], None
query = """SELECT *, ST_AsText(linegeo) as geomtxt,
ST_GeometryType(linegeo) as geometrytype
FROM location_property_osmline WHERE osm_id=%s"""
if ':' in row['object']:
query += ' and startnumber = %s'
params = [int(val) for val in row['object'].split(':', 2)]
else:
params = (int(row['object']), )
cur.execute(query, params)
assert cur.rowcount > 0, "No rows found for " + row['object']
for res in cur:
if exact:
expected_content.add((res['osm_id'], res['startnumber']))
DBRow(nid, res, context).assert_row(row, ['object'])
if exact:
cur.execute('SELECT osm_id, startnumber from location_property_osmline')
actual = set([(r['osm_id'], r['startnumber']) for r in cur])
assert expected_content == actual, \
f"Missing entries: {expected_content - actual}\n" \
f"Not expected in table: {actual - expected_content}"
| 20,337
|
Python
|
.py
| 382
| 41.175393
| 115
| 0.574194
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,715
|
nominatim_environment.py
|
osm-search_Nominatim/test/bdd/steps/nominatim_environment.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
from pathlib import Path
import importlib
import tempfile
import psycopg
from psycopg import sql as pysql
from nominatim_db import cli
from nominatim_db.config import Configuration
from nominatim_db.db.connection import Connection, register_hstore, execute_scalar
from nominatim_db.tools import refresh
from nominatim_db.tokenizer import factory as tokenizer_factory
from steps.utils import run_script
class NominatimEnvironment:
""" Collects all functions for the execution of Nominatim functions.
"""
def __init__(self, config):
self.src_dir = (Path(__file__) / '..' / '..' / '..' / '..').resolve()
self.db_host = config['DB_HOST']
self.db_port = config['DB_PORT']
self.db_user = config['DB_USER']
self.db_pass = config['DB_PASS']
self.template_db = config['TEMPLATE_DB']
self.test_db = config['TEST_DB']
self.api_test_db = config['API_TEST_DB']
self.api_test_file = config['API_TEST_FILE']
self.tokenizer = config['TOKENIZER']
self.import_style = config['STYLE']
self.reuse_template = not config['REMOVE_TEMPLATE']
self.keep_scenario_db = config['KEEP_TEST_DB']
self.default_config = Configuration(None).get_os_env()
self.test_env = None
self.template_db_done = False
self.api_db_done = False
self.website_dir = None
if not hasattr(self, f"create_api_request_func_{config['API_ENGINE']}"):
raise RuntimeError(f"Unknown API engine '{config['API_ENGINE']}'")
self.api_engine = getattr(self, f"create_api_request_func_{config['API_ENGINE']}")()
def connect_database(self, dbname):
""" Return a connection to the database with the given name.
Uses configured host, user and port.
"""
dbargs = {'dbname': dbname, 'row_factory': psycopg.rows.dict_row}
if self.db_host:
dbargs['host'] = self.db_host
if self.db_port:
dbargs['port'] = self.db_port
if self.db_user:
dbargs['user'] = self.db_user
if self.db_pass:
dbargs['password'] = self.db_pass
return psycopg.connect(**dbargs)
def write_nominatim_config(self, dbname):
""" Set up a custom test configuration that connects to the given
database. This sets up the environment variables so that they can
be picked up by dotenv and creates a project directory with the
appropriate website scripts.
"""
if dbname.startswith('sqlite:'):
dsn = 'sqlite:dbname={}'.format(dbname[7:])
else:
dsn = 'pgsql:dbname={}'.format(dbname)
if self.db_host:
dsn += ';host=' + self.db_host
if self.db_port:
dsn += ';port=' + self.db_port
if self.db_user:
dsn += ';user=' + self.db_user
if self.db_pass:
dsn += ';password=' + self.db_pass
self.test_env = dict(self.default_config)
self.test_env['NOMINATIM_DATABASE_DSN'] = dsn
self.test_env['NOMINATIM_LANGUAGES'] = 'en,de,fr,ja'
self.test_env['NOMINATIM_FLATNODE_FILE'] = ''
self.test_env['NOMINATIM_IMPORT_STYLE'] = 'full'
self.test_env['NOMINATIM_USE_US_TIGER_DATA'] = 'yes'
self.test_env['NOMINATIM_DATADIR'] = str((self.src_dir / 'data').resolve())
self.test_env['NOMINATIM_SQLDIR'] = str((self.src_dir / 'lib-sql').resolve())
self.test_env['NOMINATIM_CONFIGDIR'] = str((self.src_dir / 'settings').resolve())
if self.tokenizer is not None:
self.test_env['NOMINATIM_TOKENIZER'] = self.tokenizer
if self.import_style is not None:
self.test_env['NOMINATIM_IMPORT_STYLE'] = self.import_style
if self.website_dir is not None:
self.website_dir.cleanup()
self.website_dir = tempfile.TemporaryDirectory()
def get_test_config(self):
cfg = Configuration(Path(self.website_dir.name), environ=self.test_env)
return cfg
def get_libpq_dsn(self):
dsn = self.test_env['NOMINATIM_DATABASE_DSN']
def quote_param(param):
key, val = param.split('=')
val = val.replace('\\', '\\\\').replace("'", "\\'")
if ' ' in val:
val = "'" + val + "'"
return key + '=' + val
if dsn.startswith('pgsql:'):
# Old PHP DSN format. Convert before returning.
return ' '.join([quote_param(p) for p in dsn[6:].split(';')])
return dsn
def db_drop_database(self, name):
""" Drop the database with the given name.
"""
with self.connect_database('postgres') as conn:
conn.autocommit = True
conn.execute(pysql.SQL('DROP DATABASE IF EXISTS')
+ pysql.Identifier(name))
def setup_template_db(self):
""" Setup a template database that already contains common test data.
Having a template database speeds up tests considerably but at
the price that the tests sometimes run with stale data.
"""
if self.template_db_done:
return
self.template_db_done = True
self.write_nominatim_config(self.template_db)
if not self._reuse_or_drop_db(self.template_db):
try:
# execute nominatim import on an empty file to get the right tables
with tempfile.NamedTemporaryFile(dir='/tmp', suffix='.xml') as fd:
fd.write(b'<osm version="0.6"></osm>')
fd.flush()
self.run_nominatim('import', '--osm-file', fd.name,
'--osm2pgsql-cache', '1',
'--ignore-errors',
'--offline', '--index-noanalyse')
except:
self.db_drop_database(self.template_db)
raise
self.run_nominatim('refresh', '--functions')
def setup_api_db(self):
""" Setup a test against the API test database.
"""
self.write_nominatim_config(self.api_test_db)
if self.api_test_db.startswith('sqlite:'):
return
if not self.api_db_done:
self.api_db_done = True
if not self._reuse_or_drop_db(self.api_test_db):
testdata = (Path(__file__) / '..' / '..' / '..' / 'testdb').resolve()
self.test_env['NOMINATIM_WIKIPEDIA_DATA_PATH'] = str(testdata)
simp_file = Path(self.website_dir.name) / 'secondary_importance.sql.gz'
simp_file.symlink_to(testdata / 'secondary_importance.sql.gz')
try:
self.run_nominatim('import', '--osm-file', str(self.api_test_file))
self.run_nominatim('add-data', '--tiger-data', str(testdata / 'tiger'))
self.run_nominatim('freeze')
csv_path = str(testdata / 'full_en_phrases_test.csv')
self.run_nominatim('special-phrases', '--import-from-csv', csv_path)
except:
self.db_drop_database(self.api_test_db)
raise
tokenizer_factory.get_tokenizer_for_db(self.get_test_config())
def setup_unknown_db(self):
""" Setup a test against a non-existing database.
"""
# The tokenizer needs an existing database to function.
# So start with the usual database
class _Context:
db = None
context = _Context()
self.setup_db(context)
tokenizer_factory.create_tokenizer(self.get_test_config(), init_db=False)
# Then drop the DB again
self.teardown_db(context, force_drop=True)
def setup_db(self, context):
""" Setup a test against a fresh, empty test database.
"""
self.setup_template_db()
with self.connect_database(self.template_db) as conn:
conn.autocommit = True
conn.execute(pysql.SQL('DROP DATABASE IF EXISTS')
+ pysql.Identifier(self.test_db))
conn.execute(pysql.SQL('CREATE DATABASE {} TEMPLATE = {}').format(
pysql.Identifier(self.test_db),
pysql.Identifier(self.template_db)))
self.write_nominatim_config(self.test_db)
context.db = self.connect_database(self.test_db)
context.db.autocommit = True
register_hstore(context.db)
def teardown_db(self, context, force_drop=False):
""" Remove the test database, if it exists.
"""
if hasattr(context, 'db'):
context.db.close()
if force_drop or not self.keep_scenario_db:
self.db_drop_database(self.test_db)
def _reuse_or_drop_db(self, name):
""" Check for the existence of the given DB. If reuse is enabled,
then the function checks for existnce and returns True if the
database is already there. Otherwise an existing database is
dropped and always false returned.
"""
if self.reuse_template:
with self.connect_database('postgres') as conn:
num = execute_scalar(conn,
'select count(*) from pg_database where datname = %s',
(name,))
if num == 1:
return True
else:
self.db_drop_database(name)
return False
def reindex_placex(self, db):
""" Run the indexing step until all data in the placex has
been processed. Indexing during updates can produce more data
to index under some circumstances. That is why indexing may have
to be run multiple times.
"""
self.run_nominatim('index')
def run_nominatim(self, *cmdline):
""" Run the nominatim command-line tool via the library.
"""
if self.website_dir is not None:
cmdline = list(cmdline) + ['--project-dir', self.website_dir.name]
cli.nominatim(osm2pgsql_path=None,
cli_args=cmdline,
environ=self.test_env)
def copy_from_place(self, db):
""" Copy data from place to the placex and location_property_osmline
tables invoking the appropriate triggers.
"""
self.run_nominatim('refresh', '--functions', '--no-diff-updates')
with db.cursor() as cur:
cur.execute("""INSERT INTO placex (osm_type, osm_id, class, type,
name, admin_level, address,
extratags, geometry)
SELECT osm_type, osm_id, class, type,
name, admin_level, address,
extratags, geometry
FROM place
WHERE not (class='place' and type='houses' and osm_type='W')""")
cur.execute("""INSERT INTO location_property_osmline (osm_id, address, linegeo)
SELECT osm_id, address, geometry
FROM place
WHERE class='place' and type='houses'
and osm_type='W'
and ST_GeometryType(geometry) = 'ST_LineString'""")
def create_api_request_func_starlette(self):
import nominatim_api.server.starlette.server
from asgi_lifespan import LifespanManager
import httpx
async def _request(endpoint, params, project_dir, environ, http_headers):
app = nominatim_api.server.starlette.server.get_application(project_dir, environ)
async with LifespanManager(app):
async with httpx.AsyncClient(app=app, base_url="http://nominatim.test") as client:
response = await client.get(f"/{endpoint}", params=params,
headers=http_headers)
return response.text, response.status_code
return _request
def create_api_request_func_falcon(self):
import nominatim_api.server.falcon.server
import falcon.testing
async def _request(endpoint, params, project_dir, environ, http_headers):
app = nominatim_api.server.falcon.server.get_application(project_dir, environ)
async with falcon.testing.ASGIConductor(app) as conductor:
response = await conductor.get(f"/{endpoint}", params=params,
headers=http_headers)
return response.text, response.status_code
return _request
| 13,091
|
Python
|
.py
| 265
| 36.49434
| 98
| 0.573824
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,716
|
errors.py
|
osm-search_Nominatim/src/nominatim_api/errors.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Custom exception and error classes for Nominatim.
"""
class UsageError(Exception):
""" An error raised because of bad user input. This error will usually
not cause a stack trace to be printed unless debugging is enabled.
"""
| 457
|
Python
|
.py
| 13
| 32.846154
| 74
| 0.740406
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,717
|
reverse.py
|
osm-search_Nominatim/src/nominatim_api/reverse.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of reverse geocoding.
"""
from typing import Optional, List, Callable, Type, Tuple, Dict, Any, cast, Union
import functools
import sqlalchemy as sa
from .typing import SaColumn, SaSelect, SaFromClause, SaLabel, SaRow,\
SaBind, SaLambdaSelect
from .sql.sqlalchemy_types import Geometry
from .connection import SearchConnection
from . import results as nres
from .logging import log
from .types import AnyPoint, DataLayer, ReverseDetails, GeometryFormat, Bbox
# In SQLAlchemy expression which compare with NULL need to be expressed with
# the equal sign.
# pylint: disable=singleton-comparison
RowFunc = Callable[[Optional[SaRow], Type[nres.ReverseResult]], Optional[nres.ReverseResult]]
WKT_PARAM: SaBind = sa.bindparam('wkt', type_=Geometry)
MAX_RANK_PARAM: SaBind = sa.bindparam('max_rank')
def no_index(expr: SaColumn) -> SaColumn:
""" Wrap the given expression, so that the query planner will
refrain from using the expression for index lookup.
"""
return sa.func.coalesce(sa.null(), expr) # pylint: disable=not-callable
def _select_from_placex(t: SaFromClause, use_wkt: bool = True) -> SaSelect:
""" Create a select statement with the columns relevant for reverse
results.
"""
if not use_wkt:
distance = t.c.distance
centroid = t.c.centroid
else:
distance = t.c.geometry.ST_Distance(WKT_PARAM)
centroid = sa.case((t.c.geometry.is_line_like(), t.c.geometry.ST_ClosestPoint(WKT_PARAM)),
else_=t.c.centroid).label('centroid')
return sa.select(t.c.place_id, t.c.osm_type, t.c.osm_id, t.c.name,
t.c.class_, t.c.type,
t.c.address, t.c.extratags,
t.c.housenumber, t.c.postcode, t.c.country_code,
t.c.importance, t.c.wikipedia,
t.c.parent_place_id, t.c.rank_address, t.c.rank_search,
centroid,
t.c.linked_place_id, t.c.admin_level,
distance.label('distance'),
t.c.geometry.ST_Expand(0).label('bbox'))
def _interpolated_housenumber(table: SaFromClause) -> SaLabel:
return sa.cast(table.c.startnumber
+ sa.func.round(((table.c.endnumber - table.c.startnumber) * table.c.position)
/ table.c.step) * table.c.step,
sa.Integer).label('housenumber')
def _interpolated_position(table: SaFromClause) -> SaLabel:
fac = sa.cast(table.c.step, sa.Float) / (table.c.endnumber - table.c.startnumber)
rounded_pos = sa.func.round(table.c.position / fac) * fac
return sa.case(
(table.c.endnumber == table.c.startnumber, table.c.linegeo.ST_Centroid()),
else_=table.c.linegeo.ST_LineInterpolatePoint(rounded_pos)).label('centroid')
def _locate_interpolation(table: SaFromClause) -> SaLabel:
""" Given a position, locate the closest point on the line.
"""
return sa.case((table.c.linegeo.is_line_like(),
table.c.linegeo.ST_LineLocatePoint(WKT_PARAM)),
else_=0).label('position')
def _get_closest(*rows: Optional[SaRow]) -> Optional[SaRow]:
return min(rows, key=lambda row: 1000 if row is None else row.distance)
class ReverseGeocoder:
""" Class implementing the logic for looking up a place from a
coordinate.
"""
def __init__(self, conn: SearchConnection, params: ReverseDetails,
restrict_to_country_areas: bool = False) -> None:
self.conn = conn
self.params = params
self.restrict_to_country_areas = restrict_to_country_areas
self.bind_params: Dict[str, Any] = {'max_rank': params.max_rank}
@property
def max_rank(self) -> int:
""" Return the maximum configured rank.
"""
return self.params.max_rank
def has_geometries(self) -> bool:
""" Check if any geometries are requested.
"""
return bool(self.params.geometry_output)
def layer_enabled(self, *layer: DataLayer) -> bool:
""" Return true when any of the given layer types are requested.
"""
return any(self.params.layers & l for l in layer)
def layer_disabled(self, *layer: DataLayer) -> bool:
""" Return true when none of the given layer types is requested.
"""
return not any(self.params.layers & l for l in layer)
def has_feature_layers(self) -> bool:
""" Return true if any layer other than ADDRESS or POI is requested.
"""
return self.layer_enabled(DataLayer.RAILWAY, DataLayer.MANMADE, DataLayer.NATURAL)
def _add_geometry_columns(self, sql: SaLambdaSelect, col: SaColumn) -> SaSelect:
out = []
if self.params.geometry_simplification > 0.0:
col = sa.func.ST_SimplifyPreserveTopology(col, self.params.geometry_simplification)
if self.params.geometry_output & GeometryFormat.GEOJSON:
out.append(sa.func.ST_AsGeoJSON(col, 7).label('geometry_geojson'))
if self.params.geometry_output & GeometryFormat.TEXT:
out.append(sa.func.ST_AsText(col).label('geometry_text'))
if self.params.geometry_output & GeometryFormat.KML:
out.append(sa.func.ST_AsKML(col, 7).label('geometry_kml'))
if self.params.geometry_output & GeometryFormat.SVG:
out.append(sa.func.ST_AsSVG(col, 0, 7).label('geometry_svg'))
return sql.add_columns(*out)
def _filter_by_layer(self, table: SaFromClause) -> SaColumn:
if self.layer_enabled(DataLayer.MANMADE):
exclude = []
if self.layer_disabled(DataLayer.RAILWAY):
exclude.append('railway')
if self.layer_disabled(DataLayer.NATURAL):
exclude.extend(('natural', 'water', 'waterway'))
return table.c.class_.not_in(tuple(exclude))
include = []
if self.layer_enabled(DataLayer.RAILWAY):
include.append('railway')
if self.layer_enabled(DataLayer.NATURAL):
include.extend(('natural', 'water', 'waterway'))
return table.c.class_.in_(tuple(include))
async def _find_closest_street_or_poi(self, distance: float) -> Optional[SaRow]:
""" Look up the closest rank 26+ place in the database, which
is closer than the given distance.
"""
t = self.conn.t.placex
# PostgreSQL must not get the distance as a parameter because
# there is a danger it won't be able to properly estimate index use
# when used with prepared statements
diststr = sa.text(f"{distance}")
sql: SaLambdaSelect = sa.lambda_stmt(lambda: _select_from_placex(t)
.where(t.c.geometry.within_distance(WKT_PARAM, diststr))
.where(t.c.indexed_status == 0)
.where(t.c.linked_place_id == None)
.where(sa.or_(sa.not_(t.c.geometry.is_area()),
t.c.centroid.ST_Distance(WKT_PARAM) < diststr))
.order_by('distance')
.limit(2))
if self.has_geometries():
sql = self._add_geometry_columns(sql, t.c.geometry)
restrict: List[Union[SaColumn, Callable[[], SaColumn]]] = []
if self.layer_enabled(DataLayer.ADDRESS):
max_rank = min(29, self.max_rank)
restrict.append(lambda: no_index(t.c.rank_address).between(26, max_rank))
if self.max_rank == 30:
restrict.append(lambda: sa.func.IsAddressPoint(t))
if self.layer_enabled(DataLayer.POI) and self.max_rank == 30:
restrict.append(lambda: sa.and_(no_index(t.c.rank_search) == 30,
t.c.class_.not_in(('place', 'building')),
sa.not_(t.c.geometry.is_line_like())))
if self.has_feature_layers():
restrict.append(sa.and_(no_index(t.c.rank_search).between(26, MAX_RANK_PARAM),
no_index(t.c.rank_address) == 0,
self._filter_by_layer(t)))
if not restrict:
return None
sql = sql.where(sa.or_(*restrict))
# If the closest object is inside an area, then check if there is a
# POI node nearby and return that.
prev_row = None
for row in await self.conn.execute(sql, self.bind_params):
if prev_row is None:
if row.rank_search <= 27 or row.osm_type == 'N' or row.distance > 0:
return row
prev_row = row
else:
if row.rank_search > 27 and row.osm_type == 'N'\
and row.distance < 0.0001:
return row
return prev_row
async def _find_housenumber_for_street(self, parent_place_id: int) -> Optional[SaRow]:
t = self.conn.t.placex
def _base_query() -> SaSelect:
return _select_from_placex(t)\
.where(t.c.geometry.within_distance(WKT_PARAM, 0.001))\
.where(t.c.parent_place_id == parent_place_id)\
.where(sa.func.IsAddressPoint(t))\
.where(t.c.indexed_status == 0)\
.where(t.c.linked_place_id == None)\
.order_by('distance')\
.limit(1)
sql: SaLambdaSelect
if self.has_geometries():
sql = self._add_geometry_columns(_base_query(), t.c.geometry)
else:
sql = sa.lambda_stmt(_base_query)
return (await self.conn.execute(sql, self.bind_params)).one_or_none()
async def _find_interpolation_for_street(self, parent_place_id: Optional[int],
distance: float) -> Optional[SaRow]:
t = self.conn.t.osmline
sql = sa.select(t,
t.c.linegeo.ST_Distance(WKT_PARAM).label('distance'),
_locate_interpolation(t))\
.where(t.c.linegeo.within_distance(WKT_PARAM, distance))\
.where(t.c.startnumber != None)\
.order_by('distance')\
.limit(1)
if parent_place_id is not None:
sql = sql.where(t.c.parent_place_id == parent_place_id)
inner = sql.subquery('ipol')
sql = sa.select(inner.c.place_id, inner.c.osm_id,
inner.c.parent_place_id, inner.c.address,
_interpolated_housenumber(inner),
_interpolated_position(inner),
inner.c.postcode, inner.c.country_code,
inner.c.distance)
if self.has_geometries():
sub = sql.subquery('geom')
sql = self._add_geometry_columns(sa.select(sub), sub.c.centroid)
return (await self.conn.execute(sql, self.bind_params)).one_or_none()
async def _find_tiger_number_for_street(self, parent_place_id: int) -> Optional[SaRow]:
t = self.conn.t.tiger
def _base_query() -> SaSelect:
inner = sa.select(t,
t.c.linegeo.ST_Distance(WKT_PARAM).label('distance'),
_locate_interpolation(t))\
.where(t.c.linegeo.within_distance(WKT_PARAM, 0.001))\
.where(t.c.parent_place_id == parent_place_id)\
.order_by('distance')\
.limit(1)\
.subquery('tiger')
return sa.select(inner.c.place_id,
inner.c.parent_place_id,
_interpolated_housenumber(inner),
_interpolated_position(inner),
inner.c.postcode,
inner.c.distance)
sql: SaLambdaSelect
if self.has_geometries():
sub = _base_query().subquery('geom')
sql = self._add_geometry_columns(sa.select(sub), sub.c.centroid)
else:
sql = sa.lambda_stmt(_base_query)
return (await self.conn.execute(sql, self.bind_params)).one_or_none()
async def lookup_street_poi(self) -> Tuple[Optional[SaRow], RowFunc]:
""" Find a street or POI/address for the given WKT point.
"""
log().section('Reverse lookup on street/address level')
distance = 0.006
parent_place_id = None
row = await self._find_closest_street_or_poi(distance)
row_func: RowFunc = nres.create_from_placex_row
log().var_dump('Result (street/building)', row)
# If the closest result was a street, but an address was requested,
# check for a housenumber nearby which is part of the street.
if row is not None:
if self.max_rank > 27 \
and self.layer_enabled(DataLayer.ADDRESS) \
and row.rank_address <= 27:
distance = 0.001
parent_place_id = row.place_id
log().comment('Find housenumber for street')
addr_row = await self._find_housenumber_for_street(parent_place_id)
log().var_dump('Result (street housenumber)', addr_row)
if addr_row is not None:
row = addr_row
row_func = nres.create_from_placex_row
distance = addr_row.distance
elif row.country_code == 'us' and parent_place_id is not None:
log().comment('Find TIGER housenumber for street')
addr_row = await self._find_tiger_number_for_street(parent_place_id)
log().var_dump('Result (street Tiger housenumber)', addr_row)
if addr_row is not None:
row_func = cast(RowFunc,
functools.partial(nres.create_from_tiger_row,
osm_type=row.osm_type,
osm_id=row.osm_id))
row = addr_row
else:
distance = row.distance
# Check for an interpolation that is either closer than our result
# or belongs to a close street found.
if self.max_rank > 27 and self.layer_enabled(DataLayer.ADDRESS):
log().comment('Find interpolation for street')
addr_row = await self._find_interpolation_for_street(parent_place_id,
distance)
log().var_dump('Result (street interpolation)', addr_row)
if addr_row is not None:
row = addr_row
row_func = nres.create_from_osmline_row
return row, row_func
async def _lookup_area_address(self) -> Optional[SaRow]:
""" Lookup large addressable areas for the given WKT point.
"""
log().comment('Reverse lookup by larger address area features')
t = self.conn.t.placex
def _base_query() -> SaSelect:
# The inner SQL brings results in the right order, so that
# later only a minimum of results needs to be checked with ST_Contains.
inner = sa.select(t, sa.literal(0.0).label('distance'))\
.where(t.c.rank_search.between(5, MAX_RANK_PARAM))\
.where(t.c.geometry.intersects(WKT_PARAM))\
.where(sa.func.PlacexGeometryReverseLookuppolygon())\
.order_by(sa.desc(t.c.rank_search))\
.limit(50)\
.subquery('area')
return _select_from_placex(inner, False)\
.where(inner.c.geometry.ST_Contains(WKT_PARAM))\
.order_by(sa.desc(inner.c.rank_search))\
.limit(1)
sql: SaLambdaSelect = sa.lambda_stmt(_base_query)
if self.has_geometries():
sql = self._add_geometry_columns(sql, sa.literal_column('area.geometry'))
address_row = (await self.conn.execute(sql, self.bind_params)).one_or_none()
log().var_dump('Result (area)', address_row)
if address_row is not None and address_row.rank_search < self.max_rank:
log().comment('Search for better matching place nodes inside the area')
address_rank = address_row.rank_search
address_id = address_row.place_id
def _place_inside_area_query() -> SaSelect:
inner = \
sa.select(t,
t.c.geometry.ST_Distance(WKT_PARAM).label('distance'))\
.where(t.c.rank_search > address_rank)\
.where(t.c.rank_search <= MAX_RANK_PARAM)\
.where(t.c.indexed_status == 0)\
.where(sa.func.IntersectsReverseDistance(t, WKT_PARAM))\
.order_by(sa.desc(t.c.rank_search))\
.limit(50)\
.subquery('places')
touter = t.alias('outer')
return _select_from_placex(inner, False)\
.join(touter, touter.c.geometry.ST_Contains(inner.c.geometry))\
.where(touter.c.place_id == address_id)\
.where(sa.func.IsBelowReverseDistance(inner.c.distance, inner.c.rank_search))\
.order_by(sa.desc(inner.c.rank_search), inner.c.distance)\
.limit(1)
if self.has_geometries():
sql = self._add_geometry_columns(_place_inside_area_query(),
sa.literal_column('places.geometry'))
else:
sql = sa.lambda_stmt(_place_inside_area_query)
place_address_row = (await self.conn.execute(sql, self.bind_params)).one_or_none()
log().var_dump('Result (place node)', place_address_row)
if place_address_row is not None:
return place_address_row
return address_row
async def _lookup_area_others(self) -> Optional[SaRow]:
t = self.conn.t.placex
inner = sa.select(t, t.c.geometry.ST_Distance(WKT_PARAM).label('distance'))\
.where(t.c.rank_address == 0)\
.where(t.c.rank_search.between(5, MAX_RANK_PARAM))\
.where(t.c.name != None)\
.where(t.c.indexed_status == 0)\
.where(t.c.linked_place_id == None)\
.where(self._filter_by_layer(t))\
.where(t.c.geometry.intersects(sa.func.ST_Expand(WKT_PARAM, 0.007)))\
.order_by(sa.desc(t.c.rank_search))\
.order_by('distance')\
.limit(50)\
.subquery()
sql = _select_from_placex(inner, False)\
.where(sa.or_(sa.not_(inner.c.geometry.is_area()),
inner.c.geometry.ST_Contains(WKT_PARAM)))\
.order_by(sa.desc(inner.c.rank_search), inner.c.distance)\
.limit(1)
if self.has_geometries():
sql = self._add_geometry_columns(sql, inner.c.geometry)
row = (await self.conn.execute(sql, self.bind_params)).one_or_none()
log().var_dump('Result (non-address feature)', row)
return row
async def lookup_area(self) -> Optional[SaRow]:
""" Lookup large areas for the current search.
"""
log().section('Reverse lookup by larger area features')
if self.layer_enabled(DataLayer.ADDRESS):
address_row = await self._lookup_area_address()
else:
address_row = None
if self.has_feature_layers():
other_row = await self._lookup_area_others()
else:
other_row = None
return _get_closest(address_row, other_row)
async def lookup_country_codes(self) -> List[str]:
""" Lookup the country for the current search.
"""
log().section('Reverse lookup by country code')
t = self.conn.t.country_grid
sql = sa.select(t.c.country_code).distinct()\
.where(t.c.geometry.ST_Contains(WKT_PARAM))
ccodes = [cast(str, r[0]) for r in await self.conn.execute(sql, self.bind_params)]
log().var_dump('Country codes', ccodes)
return ccodes
async def lookup_country(self, ccodes: List[str]) -> Optional[SaRow]:
""" Lookup the country for the current search.
"""
if not ccodes:
ccodes = await self.lookup_country_codes()
if not ccodes:
return None
t = self.conn.t.placex
if self.max_rank > 4:
log().comment('Search for place nodes in country')
def _base_query() -> SaSelect:
inner = \
sa.select(t,
t.c.geometry.ST_Distance(WKT_PARAM).label('distance'))\
.where(t.c.rank_search > 4)\
.where(t.c.rank_search <= MAX_RANK_PARAM)\
.where(t.c.indexed_status == 0)\
.where(t.c.country_code.in_(ccodes))\
.where(sa.func.IntersectsReverseDistance(t, WKT_PARAM))\
.order_by(sa.desc(t.c.rank_search))\
.limit(50)\
.subquery('area')
return _select_from_placex(inner, False)\
.where(sa.func.IsBelowReverseDistance(inner.c.distance, inner.c.rank_search))\
.order_by(sa.desc(inner.c.rank_search), inner.c.distance)\
.limit(1)
sql: SaLambdaSelect
if self.has_geometries():
sql = self._add_geometry_columns(_base_query(),
sa.literal_column('area.geometry'))
else:
sql = sa.lambda_stmt(_base_query)
address_row = (await self.conn.execute(sql, self.bind_params)).one_or_none()
log().var_dump('Result (addressable place node)', address_row)
else:
address_row = None
if address_row is None:
# Still nothing, then return a country with the appropriate country code.
def _country_base_query() -> SaSelect:
return _select_from_placex(t)\
.where(t.c.country_code.in_(ccodes))\
.where(t.c.rank_address == 4)\
.where(t.c.rank_search == 4)\
.where(t.c.linked_place_id == None)\
.order_by('distance')\
.limit(1)
if self.has_geometries():
sql = self._add_geometry_columns(_country_base_query(), t.c.geometry)
else:
sql = sa.lambda_stmt(_country_base_query)
address_row = (await self.conn.execute(sql, self.bind_params)).one_or_none()
return address_row
async def lookup(self, coord: AnyPoint) -> Optional[nres.ReverseResult]:
""" Look up a single coordinate. Returns the place information,
if a place was found near the coordinates or None otherwise.
"""
log().function('reverse_lookup', coord=coord, params=self.params)
self.bind_params['wkt'] = f'POINT({coord[0]} {coord[1]})'
row: Optional[SaRow] = None
row_func: RowFunc = nres.create_from_placex_row
if self.max_rank >= 26:
row, tmp_row_func = await self.lookup_street_poi()
if row is not None:
row_func = tmp_row_func
if row is None:
if self.restrict_to_country_areas:
ccodes = await self.lookup_country_codes()
if not ccodes:
return None
else:
ccodes = []
if self.max_rank > 4:
row = await self.lookup_area()
if row is None and self.layer_enabled(DataLayer.ADDRESS):
row = await self.lookup_country(ccodes)
result = row_func(row, nres.ReverseResult)
if result is not None:
assert row is not None
result.distance = row.distance
if hasattr(row, 'bbox'):
result.bbox = Bbox.from_wkb(row.bbox)
await nres.add_result_details(self.conn, [result], self.params)
return result
| 24,774
|
Python
|
.py
| 474
| 38.008439
| 98
| 0.561541
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,718
|
config.py
|
osm-search_Nominatim/src/nominatim_api/config.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
# This file is just a placeholder to make the config module available
# during development. It will be replaced by nominatim_db/config.py on
# installation.
# pylint: skip-file
from nominatim_db.config import *
| 423
|
Python
|
.py
| 11
| 37.363636
| 70
| 0.776156
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,719
|
logging.py
|
osm-search_Nominatim/src/nominatim_api/logging.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for specialised logging with HTML output.
"""
from typing import Any, Iterator, Optional, List, Tuple, cast, Union, Mapping, Sequence
from contextvars import ContextVar
import datetime as dt
import textwrap
import io
import re
import html
import sqlalchemy as sa
from sqlalchemy.ext.asyncio import AsyncConnection
try:
from pygments import highlight
from pygments.lexers import PythonLexer, PostgresLexer
from pygments.formatters import HtmlFormatter
CODE_HIGHLIGHT = True
except ModuleNotFoundError:
CODE_HIGHLIGHT = False
def _debug_name(res: Any) -> str:
if res.names:
return cast(str, res.names.get('name', next(iter(res.names.values()))))
return f"Hnr {res.housenumber}" if res.housenumber is not None else '[NONE]'
class BaseLogger:
""" Interface for logging function.
The base implementation does nothing. Overwrite the functions
in derived classes which implement logging functionality.
"""
def get_buffer(self) -> str:
""" Return the current content of the log buffer.
"""
return ''
def function(self, func: str, **kwargs: Any) -> None:
""" Start a new debug chapter for the given function and its parameters.
"""
def section(self, heading: str) -> None:
""" Start a new section with the given title.
"""
def comment(self, text: str) -> None:
""" Add a simple comment to the debug output.
"""
def var_dump(self, heading: str, var: Any) -> None:
""" Print the content of the variable to the debug output prefixed by
the given heading.
"""
def table_dump(self, heading: str, rows: Iterator[Optional[List[Any]]]) -> None:
""" Print the table generated by the generator function.
"""
def result_dump(self, heading: str, results: Iterator[Tuple[Any, Any]]) -> None:
""" Print a list of search results generated by the generator function.
"""
def sql(self, conn: AsyncConnection, statement: 'sa.Executable',
params: Union[Mapping[str, Any], Sequence[Mapping[str, Any]], None]) -> None:
""" Print the SQL for the given statement.
"""
def format_sql(self, conn: AsyncConnection, statement: 'sa.Executable',
extra_params: Union[Mapping[str, Any],
Sequence[Mapping[str, Any]], None]) -> str:
""" Return the compiled version of the statement.
"""
compiled = cast('sa.ClauseElement', statement).compile(conn.sync_engine)
params = dict(compiled.params)
if isinstance(extra_params, Mapping):
for k, v in extra_params.items():
if hasattr(v, 'to_wkt'):
params[k] = v.to_wkt()
elif isinstance(v, (int, float)):
params[k] = v
else:
params[k] = str(v)
elif isinstance(extra_params, Sequence) and extra_params:
for k in extra_params[0]:
params[k] = f':{k}'
sqlstr = str(compiled)
if conn.dialect.name == 'postgresql':
if sa.__version__.startswith('1'):
try:
sqlstr = re.sub(r'__\[POSTCOMPILE_[^]]*\]', '%s', sqlstr)
return sqlstr % tuple((repr(params.get(name, None))
for name in compiled.positiontup)) # type: ignore
except TypeError:
return sqlstr
# Fixes an odd issue with Python 3.7 where percentages are not
# quoted correctly.
sqlstr = re.sub(r'%(?!\()', '%%', sqlstr)
sqlstr = re.sub(r'__\[POSTCOMPILE_([^]]*)\]', r'%(\1)s', sqlstr)
return sqlstr % params
assert conn.dialect.name == 'sqlite'
# params in positional order
pparams = (repr(params.get(name, None)) for name in compiled.positiontup) # type: ignore
sqlstr = re.sub(r'__\[POSTCOMPILE_([^]]*)\]', '?', sqlstr)
sqlstr = re.sub(r"\?", lambda m: next(pparams), sqlstr)
return sqlstr
class HTMLLogger(BaseLogger):
""" Logger that formats messages in HTML.
"""
def __init__(self) -> None:
self.buffer = io.StringIO()
def _timestamp(self) -> None:
self._write(f'<p class="timestamp">[{dt.datetime.now()}]</p>')
def get_buffer(self) -> str:
return HTML_HEADER + self.buffer.getvalue() + HTML_FOOTER
def function(self, func: str, **kwargs: Any) -> None:
self._timestamp()
self._write(f"<h1>Debug output for {func}()</h1>\n<p>Parameters:<dl>")
for name, value in kwargs.items():
self._write(f'<dt>{name}</dt><dd>{self._python_var(value)}</dd>')
self._write('</dl></p>')
def section(self, heading: str) -> None:
self._timestamp()
self._write(f"<h2>{heading}</h2>")
def comment(self, text: str) -> None:
self._timestamp()
self._write(f"<p>{text}</p>")
def var_dump(self, heading: str, var: Any) -> None:
self._timestamp()
if callable(var):
var = var()
self._write(f'<h5>{heading}</h5>{self._python_var(var)}')
def table_dump(self, heading: str, rows: Iterator[Optional[List[Any]]]) -> None:
self._timestamp()
head = next(rows)
assert head
self._write(f'<table><thead><tr><th colspan="{len(head)}">{heading}</th></tr><tr>')
for cell in head:
self._write(f'<th>{cell}</th>')
self._write('</tr></thead><tbody>')
for row in rows:
if row is not None:
self._write('<tr>')
for cell in row:
self._write(f'<td>{cell}</td>')
self._write('</tr>')
self._write('</tbody></table>')
def result_dump(self, heading: str, results: Iterator[Tuple[Any, Any]]) -> None:
""" Print a list of search results generated by the generator function.
"""
self._timestamp()
def format_osm(osm_object: Optional[Tuple[str, int]]) -> str:
if not osm_object:
return '-'
t, i = osm_object
if t == 'N':
fullt = 'node'
elif t == 'W':
fullt = 'way'
elif t == 'R':
fullt = 'relation'
else:
return f'{t}{i}'
return f'<a href="https://www.openstreetmap.org/{fullt}/{i}">{t}{i}</a>'
self._write(f'<h5>{heading}</h5><p><dl>')
total = 0
for rank, res in results:
self._write(f'<dt>[{rank:.3f}]</dt> <dd>{res.source_table.name}(')
self._write(f"{_debug_name(res)}, type=({','.join(res.category)}), ")
self._write(f"rank={res.rank_address}, ")
self._write(f"osm={format_osm(res.osm_object)}, ")
self._write(f'cc={res.country_code}, ')
self._write(f'importance={res.importance or float("nan"):.5f})</dd>')
total += 1
self._write(f'</dl><b>TOTAL:</b> {total}</p>')
def sql(self, conn: AsyncConnection, statement: 'sa.Executable',
params: Union[Mapping[str, Any], Sequence[Mapping[str, Any]], None]) -> None:
self._timestamp()
sqlstr = self.format_sql(conn, statement, params)
if CODE_HIGHLIGHT:
sqlstr = highlight(sqlstr, PostgresLexer(),
HtmlFormatter(nowrap=True, lineseparator='<br />'))
self._write(f'<div class="highlight"><code class="lang-sql">{sqlstr}</code></div>')
else:
self._write(f'<code class="lang-sql">{html.escape(sqlstr)}</code>')
def _python_var(self, var: Any) -> str:
if CODE_HIGHLIGHT:
fmt = highlight(str(var), PythonLexer(), HtmlFormatter(nowrap=True))
return f'<div class="highlight"><code class="lang-python">{fmt}</code></div>'
return f'<code class="lang-python">{html.escape(str(var))}</code>'
def _write(self, text: str) -> None:
""" Add the raw text to the debug output.
"""
self.buffer.write(text)
class TextLogger(BaseLogger):
""" Logger creating output suitable for the console.
"""
def __init__(self) -> None:
self.buffer = io.StringIO()
def _timestamp(self) -> None:
self._write(f'[{dt.datetime.now()}]\n')
def get_buffer(self) -> str:
return self.buffer.getvalue()
def function(self, func: str, **kwargs: Any) -> None:
self._write(f"#### Debug output for {func}()\n\nParameters:\n")
for name, value in kwargs.items():
self._write(f' {name}: {self._python_var(value)}\n')
self._write('\n')
def section(self, heading: str) -> None:
self._timestamp()
self._write(f"\n# {heading}\n\n")
def comment(self, text: str) -> None:
self._write(f"{text}\n")
def var_dump(self, heading: str, var: Any) -> None:
if callable(var):
var = var()
self._write(f'{heading}:\n {self._python_var(var)}\n\n')
def table_dump(self, heading: str, rows: Iterator[Optional[List[Any]]]) -> None:
self._write(f'{heading}:\n')
data = [list(map(self._python_var, row)) if row else None for row in rows]
assert data[0] is not None
num_cols = len(data[0])
maxlens = [max(len(d[i]) for d in data if d) for i in range(num_cols)]
tablewidth = sum(maxlens) + 3 * num_cols + 1
row_format = '| ' +' | '.join(f'{{:<{l}}}' for l in maxlens) + ' |\n'
self._write('-'*tablewidth + '\n')
self._write(row_format.format(*data[0]))
self._write('-'*tablewidth + '\n')
for row in data[1:]:
if row:
self._write(row_format.format(*row))
else:
self._write('-'*tablewidth + '\n')
if data[-1]:
self._write('-'*tablewidth + '\n')
def result_dump(self, heading: str, results: Iterator[Tuple[Any, Any]]) -> None:
self._timestamp()
self._write(f'{heading}:\n')
total = 0
for rank, res in results:
self._write(f'[{rank:.3f}] {res.source_table.name}(')
self._write(f"{_debug_name(res)}, type=({','.join(res.category)}), ")
self._write(f"rank={res.rank_address}, ")
self._write(f"osm={''.join(map(str, res.osm_object or []))}, ")
self._write(f'cc={res.country_code}, ')
self._write(f'importance={res.importance or -1:.5f})\n')
total += 1
self._write(f'TOTAL: {total}\n\n')
def sql(self, conn: AsyncConnection, statement: 'sa.Executable',
params: Union[Mapping[str, Any], Sequence[Mapping[str, Any]], None]) -> None:
self._timestamp()
sqlstr = '\n| '.join(textwrap.wrap(self.format_sql(conn, statement, params), width=78))
self._write(f"| {sqlstr}\n\n")
def _python_var(self, var: Any) -> str:
return str(var)
def _write(self, text: str) -> None:
self.buffer.write(text)
logger: ContextVar[BaseLogger] = ContextVar('logger', default=BaseLogger())
def set_log_output(fmt: str) -> None:
""" Enable collecting debug information.
"""
if fmt == 'html':
logger.set(HTMLLogger())
elif fmt == 'text':
logger.set(TextLogger())
else:
logger.set(BaseLogger())
def log() -> BaseLogger:
""" Return the logger for the current context.
"""
return logger.get()
def get_and_disable() -> str:
""" Return the current content of the debug buffer and disable logging.
"""
buf = logger.get().get_buffer()
logger.set(BaseLogger())
return buf
HTML_HEADER: str = """<!DOCTYPE html>
<html>
<head>
<title>Nominatim - Debug</title>
<style>
""" + \
(HtmlFormatter(nobackground=True).get_style_defs('.highlight') if CODE_HIGHLIGHT else '') +\
"""
h2 { font-size: x-large }
dl {
padding-left: 10pt;
font-family: monospace
}
dt {
float: left;
font-weight: bold;
margin-right: 0.5em
}
dt::after { content: ": "; }
dd::after {
clear: left;
display: block
}
.lang-sql {
color: #555;
font-size: small
}
h5 {
border: solid lightgrey 0.1pt;
margin-bottom: 0;
background-color: #f7f7f7
}
h5 + .highlight {
padding: 3pt;
border: solid lightgrey 0.1pt
}
table, th, tbody {
border: thin solid;
border-collapse: collapse;
}
td {
border-right: thin solid;
padding-left: 3pt;
padding-right: 3pt;
}
.timestamp {
font-size: 0.8em;
color: darkblue;
width: calc(100% - 5pt);
text-align: right;
position: absolute;
left: 0;
margin-top: -5px;
}
</style>
</head>
<body>
"""
HTML_FOOTER: str = "</body></html>"
| 13,227
|
Python
|
.py
| 329
| 31.382979
| 96
| 0.567219
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,720
|
status.py
|
osm-search_Nominatim/src/nominatim_api/status.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Classes and function related to status call.
"""
from typing import Optional
import datetime as dt
import dataclasses
import sqlalchemy as sa
from .connection import SearchConnection
from .version import NOMINATIM_API_VERSION
@dataclasses.dataclass
class StatusResult:
""" Result of a call to the status API.
"""
status: int
message: str
software_version = NOMINATIM_API_VERSION
data_updated: Optional[dt.datetime] = None
database_version: Optional[str] = None
async def get_status(conn: SearchConnection) -> StatusResult:
""" Execute a status API call.
"""
status = StatusResult(0, 'OK')
# Last update date
sql = sa.select(conn.t.import_status.c.lastimportdate).limit(1)
status.data_updated = await conn.scalar(sql)
if status.data_updated is not None:
if status.data_updated.tzinfo is None:
status.data_updated = status.data_updated.replace(tzinfo=dt.timezone.utc)
else:
status.data_updated = status.data_updated.astimezone(dt.timezone.utc)
# Database version
try:
status.database_version = await conn.get_property('database_version')
except ValueError:
pass
return status
| 1,426
|
Python
|
.py
| 42
| 29.690476
| 85
| 0.725091
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,721
|
results.py
|
osm-search_Nominatim/src/nominatim_api/results.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Dataclasses for search results and helper functions to fill them.
Data classes are part of the public API while the functions are for
internal use only. That's why they are implemented as free-standing functions
instead of member functions.
"""
from typing import Optional, Tuple, Dict, Sequence, TypeVar, Type, List, cast, Callable
import enum
import dataclasses
import datetime as dt
import sqlalchemy as sa
from .typing import SaSelect, SaRow
from .sql.sqlalchemy_types import Geometry
from .types import Point, Bbox, LookupDetails
from .connection import SearchConnection
from .logging import log
from .localization import Locales
# This file defines complex result data classes.
# pylint: disable=too-many-instance-attributes
def _mingle_name_tags(names: Optional[Dict[str, str]]) -> Optional[Dict[str, str]]:
""" Mix-in names from linked places, so that they show up
as standard names where necessary.
"""
if not names:
return None
out = {}
for k, v in names.items():
if k.startswith('_place_'):
outkey = k[7:]
out[k if outkey in names else outkey] = v
else:
out[k] = v
return out
class SourceTable(enum.Enum):
""" The `SourceTable` type lists the possible sources a result can have.
"""
PLACEX = 1
""" The placex table is the main source for result usually containing
OSM data.
"""
OSMLINE = 2
""" The osmline table contains address interpolations from OSM data.
Interpolation addresses are always approximate. The OSM id in the
result refers to the OSM way with the interpolation line object.
"""
TIGER = 3
""" TIGER address data contains US addresses imported on the side,
see [Installing TIGER data](../customize/Tiger.md).
TIGER address are also interpolations. The addresses always refer
to a street from OSM data. The OSM id in the result refers to
that street.
"""
POSTCODE = 4
""" The postcode table contains artificial centroids for postcodes,
computed from the postcodes available with address points. Results
are always approximate.
"""
COUNTRY = 5
""" The country table provides a fallback, when country data is missing
in the OSM data.
"""
@dataclasses.dataclass
class AddressLine:
""" The `AddressLine` may contain the following fields about a related place
and its function as an address object. Most fields are optional.
Their presence depends on the kind and function of the address part.
"""
category: Tuple[str, str]
""" Main category of the place, described by a key-value pair.
"""
names: Dict[str, str]
""" All available names for the place including references, alternative
names and translations.
"""
fromarea: bool
""" If true, then the exact area of the place is known. Without area
information, Nominatim has to make an educated guess if an address
belongs to one place or another.
"""
isaddress: bool
""" If true, this place should be considered for the final address display.
Nominatim will sometimes include more than one candidate for
the address in the list when it cannot reliably determine where the
place belongs. It will consider names of all candidates when searching
but when displaying the result, only the most likely candidate should
be shown.
"""
rank_address: int
""" [Address rank](../customize/Ranking.md#address-rank) of the place.
"""
distance: float
""" Distance in degrees between the result place and this address part.
"""
place_id: Optional[int] = None
""" Internal ID of the place.
"""
osm_object: Optional[Tuple[str, int]] = None
""" OSM type and ID of the place, if such an object exists.
"""
extratags: Optional[Dict[str, str]] = None
""" Any extra information available about the place. This is a dictionary
that usually contains OSM tag key-value pairs.
"""
admin_level: Optional[int] = None
""" The administrative level of a boundary as tagged in the input data.
This field is only meaningful for places of the category
(boundary, administrative).
"""
local_name: Optional[str] = None
""" Place holder for localization of this address part. See
[Localization](Result-Handling.md#localization) below.
"""
class AddressLines(List[AddressLine]):
""" Sequence of address lines order in descending order by their rank.
"""
def localize(self, locales: Locales) -> List[str]:
""" Set the local name of address parts according to the chosen
locale. Return the list of local names without duplicates.
Only address parts that are marked as isaddress are localized
and returned.
"""
label_parts: List[str] = []
for line in self:
if line.isaddress and line.names:
line.local_name = locales.display_name(line.names)
if not label_parts or label_parts[-1] != line.local_name:
label_parts.append(line.local_name)
return label_parts
@dataclasses.dataclass
class WordInfo:
""" Each entry in the list of search terms contains the
following detailed information.
"""
word_id: int
""" Internal identifier for the word.
"""
word_token: str
""" Normalised and transliterated form of the word.
This form is used for searching.
"""
word: Optional[str] = None
""" Untransliterated form, if available.
"""
WordInfos = Sequence[WordInfo]
@dataclasses.dataclass
class BaseResult:
""" Data class collecting information common to all
types of search results.
"""
source_table: SourceTable
category: Tuple[str, str]
centroid: Point
place_id : Optional[int] = None
osm_object: Optional[Tuple[str, int]] = None
parent_place_id: Optional[int] = None
linked_place_id: Optional[int] = None
admin_level: int = 15
locale_name: Optional[str] = None
display_name: Optional[str] = None
names: Optional[Dict[str, str]] = None
address: Optional[Dict[str, str]] = None
extratags: Optional[Dict[str, str]] = None
housenumber: Optional[str] = None
postcode: Optional[str] = None
wikipedia: Optional[str] = None
rank_address: int = 30
rank_search: int = 30
importance: Optional[float] = None
country_code: Optional[str] = None
address_rows: Optional[AddressLines] = None
linked_rows: Optional[AddressLines] = None
parented_rows: Optional[AddressLines] = None
name_keywords: Optional[WordInfos] = None
address_keywords: Optional[WordInfos] = None
geometry: Dict[str, str] = dataclasses.field(default_factory=dict)
@property
def lat(self) -> float:
""" Get the latitude (or y) of the center point of the place.
"""
return self.centroid[1]
@property
def lon(self) -> float:
""" Get the longitude (or x) of the center point of the place.
"""
return self.centroid[0]
def calculated_importance(self) -> float:
""" Get a valid importance value. This is either the stored importance
of the value or an artificial value computed from the place's
search rank.
"""
return self.importance or (0.40001 - (self.rank_search/75.0))
def localize(self, locales: Locales) -> None:
""" Fill the locale_name and the display_name field for the
place and, if available, its address information.
"""
self.locale_name = locales.display_name(self.names)
if self.address_rows:
self.display_name = ', '.join(self.address_rows.localize(locales))
else:
self.display_name = self.locale_name
BaseResultT = TypeVar('BaseResultT', bound=BaseResult)
@dataclasses.dataclass
class DetailedResult(BaseResult):
""" A search result with more internal information from the database
added.
"""
indexed_date: Optional[dt.datetime] = None
@dataclasses.dataclass
class ReverseResult(BaseResult):
""" A search result for reverse geocoding.
"""
distance: Optional[float] = None
bbox: Optional[Bbox] = None
class ReverseResults(List[ReverseResult]):
""" Sequence of reverse lookup results ordered by distance.
May be empty when no result was found.
"""
@dataclasses.dataclass
class SearchResult(BaseResult):
""" A search result for forward geocoding.
"""
bbox: Optional[Bbox] = None
accuracy: float = 0.0
@property
def ranking(self) -> float:
""" Return the ranking, a combined measure of accuracy and importance.
"""
return (self.accuracy if self.accuracy is not None else 1) \
- self.calculated_importance()
class SearchResults(List[SearchResult]):
""" Sequence of forward lookup results ordered by relevance.
May be empty when no result was found.
"""
def _filter_geometries(row: SaRow) -> Dict[str, str]:
return {k[9:]: v for k, v in row._mapping.items() # pylint: disable=W0212
if k.startswith('geometry_')}
def create_from_placex_row(row: Optional[SaRow],
class_type: Type[BaseResultT]) -> Optional[BaseResultT]:
""" Construct a new result and add the data from the result row
from the placex table. 'class_type' defines the type of result
to return. Returns None if the row is None.
"""
if row is None:
return None
return class_type(source_table=SourceTable.PLACEX,
place_id=row.place_id,
osm_object=(row.osm_type, row.osm_id),
category=(row.class_, row.type),
parent_place_id = row.parent_place_id,
linked_place_id = getattr(row, 'linked_place_id', None),
admin_level = getattr(row, 'admin_level', 15),
names=_mingle_name_tags(row.name),
address=row.address,
extratags=row.extratags,
housenumber=row.housenumber,
postcode=row.postcode,
wikipedia=row.wikipedia,
rank_address=row.rank_address,
rank_search=row.rank_search,
importance=row.importance,
country_code=row.country_code,
centroid=Point.from_wkb(row.centroid),
geometry=_filter_geometries(row))
def create_from_osmline_row(row: Optional[SaRow],
class_type: Type[BaseResultT]) -> Optional[BaseResultT]:
""" Construct a new result and add the data from the result row
from the address interpolation table osmline. 'class_type' defines
the type of result to return. Returns None if the row is None.
If the row contains a housenumber, then the housenumber is filled out.
Otherwise the result contains the interpolation information in extratags.
"""
if row is None:
return None
hnr = getattr(row, 'housenumber', None)
res = class_type(source_table=SourceTable.OSMLINE,
place_id=row.place_id,
parent_place_id = row.parent_place_id,
osm_object=('W', row.osm_id),
category=('place', 'houses' if hnr is None else 'house'),
address=row.address,
postcode=row.postcode,
country_code=row.country_code,
centroid=Point.from_wkb(row.centroid),
geometry=_filter_geometries(row))
if hnr is None:
res.extratags = {'startnumber': str(row.startnumber),
'endnumber': str(row.endnumber),
'step': str(row.step)}
else:
res.housenumber = str(hnr)
return res
def create_from_tiger_row(row: Optional[SaRow],
class_type: Type[BaseResultT],
osm_type: Optional[str] = None,
osm_id: Optional[int] = None) -> Optional[BaseResultT]:
""" Construct a new result and add the data from the result row
from the Tiger data interpolation table. 'class_type' defines
the type of result to return. Returns None if the row is None.
If the row contains a housenumber, then the housenumber is filled out.
Otherwise the result contains the interpolation information in extratags.
"""
if row is None:
return None
hnr = getattr(row, 'housenumber', None)
res = class_type(source_table=SourceTable.TIGER,
place_id=row.place_id,
parent_place_id = row.parent_place_id,
osm_object=(osm_type or row.osm_type, osm_id or row.osm_id),
category=('place', 'houses' if hnr is None else 'house'),
postcode=row.postcode,
country_code='us',
centroid=Point.from_wkb(row.centroid),
geometry=_filter_geometries(row))
if hnr is None:
res.extratags = {'startnumber': str(row.startnumber),
'endnumber': str(row.endnumber),
'step': str(row.step)}
else:
res.housenumber = str(hnr)
return res
def create_from_postcode_row(row: Optional[SaRow],
class_type: Type[BaseResultT]) -> Optional[BaseResultT]:
""" Construct a new result and add the data from the result row
from the postcode table. 'class_type' defines
the type of result to return. Returns None if the row is None.
"""
if row is None:
return None
return class_type(source_table=SourceTable.POSTCODE,
place_id=row.place_id,
parent_place_id = row.parent_place_id,
category=('place', 'postcode'),
names={'ref': row.postcode},
rank_search=row.rank_search,
rank_address=row.rank_address,
country_code=row.country_code,
centroid=Point.from_wkb(row.centroid),
geometry=_filter_geometries(row))
def create_from_country_row(row: Optional[SaRow],
class_type: Type[BaseResultT]) -> Optional[BaseResultT]:
""" Construct a new result and add the data from the result row
from the fallback country tables. 'class_type' defines
the type of result to return. Returns None if the row is None.
"""
if row is None:
return None
return class_type(source_table=SourceTable.COUNTRY,
category=('place', 'country'),
centroid=Point.from_wkb(row.centroid),
names=row.name,
rank_address=4, rank_search=4,
country_code=row.country_code,
geometry=_filter_geometries(row))
async def add_result_details(conn: SearchConnection, results: List[BaseResultT],
details: LookupDetails) -> None:
""" Retrieve more details from the database according to the
parameters specified in 'details'.
"""
if results:
log().section('Query details for result')
if details.address_details:
log().comment('Query address details')
await complete_address_details(conn, results)
if details.linked_places:
log().comment('Query linked places')
for result in results:
await complete_linked_places(conn, result)
if details.parented_places:
log().comment('Query parent places')
for result in results:
await complete_parented_places(conn, result)
if details.keywords:
log().comment('Query keywords')
for result in results:
await complete_keywords(conn, result)
for result in results:
result.localize(details.locales)
def _result_row_to_address_row(row: SaRow, isaddress: Optional[bool] = None) -> AddressLine:
""" Create a new AddressLine from the results of a database query.
"""
extratags: Dict[str, str] = getattr(row, 'extratags', {}) or {}
if 'linked_place' in extratags:
extratags['place'] = extratags['linked_place']
names = _mingle_name_tags(row.name) or {}
if getattr(row, 'housenumber', None) is not None:
names['housenumber'] = row.housenumber
if isaddress is None:
isaddress = getattr(row, 'isaddress', True)
return AddressLine(place_id=row.place_id,
osm_object=None if row.osm_type is None else (row.osm_type, row.osm_id),
category=(getattr(row, 'class'), row.type),
names=names,
extratags=extratags,
admin_level=row.admin_level,
fromarea=row.fromarea,
isaddress=isaddress,
rank_address=row.rank_address,
distance=row.distance)
def _get_address_lookup_id(result: BaseResultT) -> int:
assert result.place_id
if result.source_table != SourceTable.PLACEX or result.rank_search > 27:
return result.parent_place_id or result.place_id
return result.linked_place_id or result.place_id
async def _finalize_entry(conn: SearchConnection, result: BaseResultT) -> None:
assert result.address_rows is not None
if result.category[0] not in ('boundary', 'place')\
or result.category[1] not in ('postal_code', 'postcode'):
postcode = result.postcode
if not postcode and result.address:
postcode = result.address.get('postcode')
if postcode and ',' not in postcode and ';' not in postcode:
result.address_rows.append(AddressLine(
category=('place', 'postcode'),
names={'ref': postcode},
fromarea=False, isaddress=True, rank_address=5,
distance=0.0))
if result.country_code:
async def _get_country_names() -> Optional[Dict[str, str]]:
t = conn.t.country_name
sql = sa.select(t.c.name, t.c.derived_name)\
.where(t.c.country_code == result.country_code)
for cres in await conn.execute(sql):
names = cast(Dict[str, str], cres[0])
if cres[1]:
names.update(cast(Dict[str, str], cres[1]))
return names
return None
country_names = await conn.get_cached_value('COUNTRY_NAME',
result.country_code,
_get_country_names)
if country_names:
result.address_rows.append(AddressLine(
category=('place', 'country'),
names=country_names,
fromarea=False, isaddress=True, rank_address=4,
distance=0.0))
result.address_rows.append(AddressLine(
category=('place', 'country_code'),
names={'ref': result.country_code}, extratags = {},
fromarea=True, isaddress=False, rank_address=4,
distance=0.0))
def _setup_address_details(result: BaseResultT) -> None:
""" Retrieve information about places that make up the address of the result.
"""
result.address_rows = AddressLines()
if result.names:
result.address_rows.append(AddressLine(
place_id=result.place_id,
osm_object=result.osm_object,
category=result.category,
names=result.names,
extratags=result.extratags or {},
admin_level=result.admin_level,
fromarea=True, isaddress=True,
rank_address=result.rank_address, distance=0.0))
if result.source_table == SourceTable.PLACEX and result.address:
housenumber = result.address.get('housenumber')\
or result.address.get('streetnumber')\
or result.address.get('conscriptionnumber')
elif result.housenumber:
housenumber = result.housenumber
else:
housenumber = None
if housenumber:
result.address_rows.append(AddressLine(
category=('place', 'house_number'),
names={'ref': housenumber},
fromarea=True, isaddress=True, rank_address=28, distance=0))
if result.address and '_unlisted_place' in result.address:
result.address_rows.append(AddressLine(
category=('place', 'locality'),
names={'name': result.address['_unlisted_place']},
fromarea=False, isaddress=True, rank_address=25, distance=0))
async def complete_address_details(conn: SearchConnection, results: List[BaseResultT]) -> None:
""" Retrieve information about places that make up the address of the result.
"""
for result in results:
_setup_address_details(result)
### Lookup entries from place_address line
lookup_ids = [{'pid': r.place_id,
'lid': _get_address_lookup_id(r),
'names': list(r.address.values()) if r.address else [],
'c': ('SRID=4326;' + r.centroid.to_wkt()) if r.centroid else '' }
for r in results if r.place_id]
if not lookup_ids:
return
ltab = sa.func.JsonArrayEach(sa.type_coerce(lookup_ids, sa.JSON))\
.table_valued(sa.column('value', type_=sa.JSON))
t = conn.t.placex
taddr = conn.t.addressline
sql = sa.select(ltab.c.value['pid'].as_integer().label('src_place_id'),
t.c.place_id, t.c.osm_type, t.c.osm_id, t.c.name,
t.c.class_, t.c.type, t.c.extratags,
t.c.admin_level, taddr.c.fromarea,
sa.case((t.c.rank_address == 11, 5),
else_=t.c.rank_address).label('rank_address'),
taddr.c.distance, t.c.country_code, t.c.postcode)\
.join(taddr, sa.or_(taddr.c.place_id == ltab.c.value['pid'].as_integer(),
taddr.c.place_id == ltab.c.value['lid'].as_integer()))\
.join(t, taddr.c.address_place_id == t.c.place_id)\
.order_by('src_place_id')\
.order_by(sa.column('rank_address').desc())\
.order_by((taddr.c.place_id == ltab.c.value['pid'].as_integer()).desc())\
.order_by(sa.case((sa.func.CrosscheckNames(t.c.name, ltab.c.value['names']), 2),
(taddr.c.isaddress, 0),
(sa.and_(taddr.c.fromarea,
t.c.geometry.ST_Contains(
sa.func.ST_GeomFromEWKT(
ltab.c.value['c'].as_string()))), 1),
else_=-1).desc())\
.order_by(taddr.c.fromarea.desc())\
.order_by(taddr.c.distance.desc())\
.order_by(t.c.rank_search.desc())
current_result = None
current_rank_address = -1
for row in await conn.execute(sql):
if current_result is None or row.src_place_id != current_result.place_id:
current_result = next((r for r in results if r.place_id == row.src_place_id), None)
assert current_result is not None
current_rank_address = -1
location_isaddress = row.rank_address != current_rank_address
if current_result.country_code is None and row.country_code:
current_result.country_code = row.country_code
if row.type in ('postcode', 'postal_code') and location_isaddress:
if not row.fromarea or \
(current_result.address and 'postcode' in current_result.address):
location_isaddress = False
else:
current_result.postcode = None
assert current_result.address_rows is not None
current_result.address_rows.append(_result_row_to_address_row(row, location_isaddress))
current_rank_address = row.rank_address
for result in results:
await _finalize_entry(conn, result)
### Finally add the record for the parent entry where necessary.
parent_lookup_ids = list(filter(lambda e: e['pid'] != e['lid'], lookup_ids))
if parent_lookup_ids:
ltab = sa.func.JsonArrayEach(sa.type_coerce(parent_lookup_ids, sa.JSON))\
.table_valued(sa.column('value', type_=sa.JSON))
sql = sa.select(ltab.c.value['pid'].as_integer().label('src_place_id'),
t.c.place_id, t.c.osm_type, t.c.osm_id, t.c.name,
t.c.class_, t.c.type, t.c.extratags,
t.c.admin_level,
t.c.rank_address)\
.where(t.c.place_id == ltab.c.value['lid'].as_integer())
for row in await conn.execute(sql):
current_result = next((r for r in results if r.place_id == row.src_place_id), None)
assert current_result is not None
assert current_result.address_rows is not None
current_result.address_rows.append(AddressLine(
place_id=row.place_id,
osm_object=(row.osm_type, row.osm_id),
category=(row.class_, row.type),
names=row.name, extratags=row.extratags or {},
admin_level=row.admin_level,
fromarea=True, isaddress=True,
rank_address=row.rank_address, distance=0.0))
### Now sort everything
def mk_sort_key(place_id: Optional[int]) -> Callable[[AddressLine], Tuple[bool, int, bool]]:
return lambda a: (a.place_id != place_id, -a.rank_address, a.isaddress)
for result in results:
assert result.address_rows is not None
result.address_rows.sort(key=mk_sort_key(result.place_id))
def _placex_select_address_row(conn: SearchConnection,
centroid: Point) -> SaSelect:
t = conn.t.placex
return sa.select(t.c.place_id, t.c.osm_type, t.c.osm_id, t.c.name,
t.c.class_.label('class'), t.c.type,
t.c.admin_level, t.c.housenumber,
t.c.geometry.is_area().label('fromarea'),
t.c.rank_address,
t.c.geometry.distance_spheroid(
sa.bindparam('centroid', value=centroid, type_=Geometry)).label('distance'))
async def complete_linked_places(conn: SearchConnection, result: BaseResult) -> None:
""" Retrieve information about places that link to the result.
"""
result.linked_rows = AddressLines()
if result.source_table != SourceTable.PLACEX:
return
sql = _placex_select_address_row(conn, result.centroid)\
.where(conn.t.placex.c.linked_place_id == result.place_id)
for row in await conn.execute(sql):
result.linked_rows.append(_result_row_to_address_row(row))
async def complete_keywords(conn: SearchConnection, result: BaseResult) -> None:
""" Retrieve information about the search terms used for this place.
Requires that the query analyzer was initialised to get access to
the word table.
"""
t = conn.t.search_name
sql = sa.select(t.c.name_vector, t.c.nameaddress_vector)\
.where(t.c.place_id == result.place_id)
result.name_keywords = []
result.address_keywords = []
t = conn.t.meta.tables['word']
sel = sa.select(t.c.word_id, t.c.word_token, t.c.word)
for name_tokens, address_tokens in await conn.execute(sql):
for row in await conn.execute(sel.where(t.c.word_id.in_(name_tokens))):
result.name_keywords.append(WordInfo(*row))
for row in await conn.execute(sel.where(t.c.word_id.in_(address_tokens))):
result.address_keywords.append(WordInfo(*row))
async def complete_parented_places(conn: SearchConnection, result: BaseResult) -> None:
""" Retrieve information about places that the result provides the
address for.
"""
result.parented_rows = AddressLines()
if result.source_table != SourceTable.PLACEX:
return
sql = _placex_select_address_row(conn, result.centroid)\
.where(conn.t.placex.c.parent_place_id == result.place_id)\
.where(conn.t.placex.c.rank_search == 30)
for row in await conn.execute(sql):
result.parented_rows.append(_result_row_to_address_row(row))
| 29,105
|
Python
|
.py
| 618
| 36.488673
| 99
| 0.613092
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,722
|
__init__.py
|
osm-search_Nominatim/src/nominatim_api/__init__.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
The public interface of the Nominatim library.
Classes and functions defined in this file are considered stable. Always
import from this file, not from the source files directly.
"""
# See also https://github.com/PyCQA/pylint/issues/6006
# pylint: disable=useless-import-alias
from .errors import (UsageError as UsageError)
from .config import (Configuration as Configuration)
from .core import (NominatimAPI as NominatimAPI,
NominatimAPIAsync as NominatimAPIAsync)
from .connection import (SearchConnection as SearchConnection)
from .status import (StatusResult as StatusResult)
from .types import (PlaceID as PlaceID,
OsmID as OsmID,
PlaceRef as PlaceRef,
Point as Point,
Bbox as Bbox,
GeometryFormat as GeometryFormat,
DataLayer as DataLayer)
from .results import (SourceTable as SourceTable,
AddressLine as AddressLine,
AddressLines as AddressLines,
WordInfo as WordInfo,
WordInfos as WordInfos,
DetailedResult as DetailedResult,
ReverseResult as ReverseResult,
ReverseResults as ReverseResults,
SearchResult as SearchResult,
SearchResults as SearchResults)
from .localization import (Locales as Locales)
from .result_formatting import (FormatDispatcher as FormatDispatcher,
load_format_dispatcher as load_format_dispatcher)
from .version import NOMINATIM_API_VERSION as __version__
| 1,876
|
Python
|
.py
| 40
| 36.55
| 81
| 0.664664
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,723
|
localization.py
|
osm-search_Nominatim/src/nominatim_api/localization.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper functions for localizing names of results.
"""
from typing import Mapping, List, Optional
import re
class Locales:
""" Helper class for localization of names.
It takes a list of language prefixes in their order of preferred
usage.
"""
def __init__(self, langs: Optional[List[str]] = None):
self.languages = langs or []
self.name_tags: List[str] = []
# Build the list of supported tags. It is currently hard-coded.
self._add_lang_tags('name')
self._add_tags('name', 'brand')
self._add_lang_tags('official_name', 'short_name')
self._add_tags('official_name', 'short_name', 'ref')
def __bool__(self) -> bool:
return len(self.languages) > 0
def _add_tags(self, *tags: str) -> None:
for tag in tags:
self.name_tags.append(tag)
self.name_tags.append(f"_place_{tag}")
def _add_lang_tags(self, *tags: str) -> None:
for tag in tags:
for lang in self.languages:
self.name_tags.append(f"{tag}:{lang}")
self.name_tags.append(f"_place_{tag}:{lang}")
def display_name(self, names: Optional[Mapping[str, str]]) -> str:
""" Return the best matching name from a dictionary of names
containing different name variants.
If 'names' is null or empty, an empty string is returned. If no
appropriate localization is found, the first name is returned.
"""
if not names:
return ''
if len(names) > 1:
for tag in self.name_tags:
if tag in names:
return names[tag]
# Nothing? Return any of the other names as a default.
return next(iter(names.values()))
@staticmethod
def from_accept_languages(langstr: str) -> 'Locales':
""" Create a localization object from a language list in the
format of HTTP accept-languages header.
The functions tries to be forgiving of format errors by first splitting
the string into comma-separated parts and then parsing each
description separately. Badly formatted parts are then ignored.
"""
# split string into languages
candidates = []
for desc in langstr.split(','):
m = re.fullmatch(r'\s*([a-z_-]+)(?:;\s*q\s*=\s*([01](?:\.\d+)?))?\s*',
desc, flags=re.I)
if m:
candidates.append((m[1], float(m[2] or 1.0)))
# sort the results by the weight of each language (preserving order).
candidates.sort(reverse=True, key=lambda e: e[1])
# If a language has a region variant, also add the language without
# variant but only if it isn't already in the list to not mess up the weight.
languages = []
for lid, _ in candidates:
languages.append(lid)
parts = lid.split('-', 1)
if len(parts) > 1 and all(c[0] != parts[0] for c in candidates):
languages.append(parts[0])
return Locales(languages)
| 3,340
|
Python
|
.py
| 75
| 35.12
| 85
| 0.599445
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,724
|
core.py
|
osm-search_Nominatim/src/nominatim_api/core.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of classes for API access via libraries.
"""
from typing import Mapping, Optional, Any, AsyncIterator, Dict, Sequence, List,\
Union, Tuple, cast
import asyncio
import sys
import contextlib
from pathlib import Path
import sqlalchemy as sa
import sqlalchemy.ext.asyncio as sa_asyncio
from .errors import UsageError
from .sql.sqlalchemy_schema import SearchTables
from .sql.async_core_library import PGCORE_LIB, PGCORE_ERROR
from .config import Configuration
from .sql import sqlite_functions, sqlalchemy_functions #pylint: disable=unused-import
from .connection import SearchConnection
from .status import get_status, StatusResult
from .lookup import get_detailed_place, get_simple_place
from .reverse import ReverseGeocoder
from .search import ForwardGeocoder, Phrase, PhraseType, make_query_analyzer
from . import types as ntyp
from .results import DetailedResult, ReverseResult, SearchResults
class NominatimAPIAsync: #pylint: disable=too-many-instance-attributes
""" The main frontend to the Nominatim database implements the
functions for lookup, forward and reverse geocoding using
asynchronous functions.
This class shares most of the functions with its synchronous
version. There are some additional functions or parameters,
which are documented below.
This class should usually be used as a context manager in 'with' context.
"""
def __init__(self, project_dir: Optional[Union[str, Path]] = None,
environ: Optional[Mapping[str, str]] = None,
loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
""" Initiate a new frontend object with synchronous API functions.
Parameters:
project_dir: Path to the
[project directory](../admin/Import.md#creating-the-project-directory)
of the local Nominatim installation.
environ: Mapping of [configuration parameters](../customize/Settings.md).
When set, replaces any configuration via environment variables.
Settings in this mapping also have precedence over any
parameters found in the `.env` file of the project directory.
loop: The asyncio event loop that will be used when calling
functions. Only needed, when a custom event loop is used
and the Python version is 3.9 or earlier.
"""
self.config = Configuration(project_dir, environ)
self.query_timeout = self.config.get_int('QUERY_TIMEOUT') \
if self.config.QUERY_TIMEOUT else None
self.reverse_restrict_to_country_area = self.config.get_bool('SEARCH_WITHIN_COUNTRIES')
self.server_version = 0
if sys.version_info >= (3, 10):
self._engine_lock = asyncio.Lock()
else:
self._engine_lock = asyncio.Lock(loop=loop) # pylint: disable=unexpected-keyword-arg
self._engine: Optional[sa_asyncio.AsyncEngine] = None
self._tables: Optional[SearchTables] = None
self._property_cache: Dict[str, Any] = {'DB:server_version': 0}
async def setup_database(self) -> None:
""" Set up the SQL engine and connections.
This function will be implicitly called when the database is
accessed for the first time. You may also call it explicitly to
avoid that the first call is delayed by the setup.
"""
async with self._engine_lock:
if self._engine:
return
extra_args: Dict[str, Any] = {'future': True,
'echo': self.config.get_bool('DEBUG_SQL')}
if self.config.get_int('API_POOL_SIZE') == 0:
extra_args['poolclass'] = sa.pool.NullPool
else:
extra_args['poolclass'] = sa.pool.AsyncAdaptedQueuePool
extra_args['max_overflow'] = 0
extra_args['pool_size'] = self.config.get_int('API_POOL_SIZE')
is_sqlite = self.config.DATABASE_DSN.startswith('sqlite:')
if is_sqlite:
params = dict((p.split('=', 1)
for p in self.config.DATABASE_DSN[7:].split(';')))
dburl = sa.engine.URL.create('sqlite+aiosqlite',
database=params.get('dbname'))
if not ('NOMINATIM_DATABASE_RW' in self.config.environ
and self.config.get_bool('DATABASE_RW')) \
and not Path(params.get('dbname', '')).is_file():
raise UsageError(f"SQlite database '{params.get('dbname')}' does not exist.")
else:
dsn = self.config.get_database_params()
query = {k: str(v) for k, v in dsn.items()
if k not in ('user', 'password', 'dbname', 'host', 'port')}
dburl = sa.engine.URL.create(
f'postgresql+{PGCORE_LIB}',
database=cast(str, dsn.get('dbname')),
username=cast(str, dsn.get('user')),
password=cast(str, dsn.get('password')),
host=cast(str, dsn.get('host')),
port=int(cast(str, dsn['port'])) if 'port' in dsn else None,
query=query)
engine = sa_asyncio.create_async_engine(dburl, **extra_args)
if is_sqlite:
server_version = 0
@sa.event.listens_for(engine.sync_engine, "connect")
def _on_sqlite_connect(dbapi_con: Any, _: Any) -> None:
dbapi_con.run_async(lambda conn: conn.enable_load_extension(True))
sqlite_functions.install_custom_functions(dbapi_con)
cursor = dbapi_con.cursor()
cursor.execute("SELECT load_extension('mod_spatialite')")
cursor.execute('SELECT SetDecimalPrecision(7)')
dbapi_con.run_async(lambda conn: conn.enable_load_extension(False))
else:
try:
async with engine.begin() as conn:
result = await conn.scalar(sa.text('SHOW server_version_num'))
server_version = int(result)
if server_version >= 110000:
await conn.execute(sa.text("SET jit_above_cost TO '-1'"))
await conn.execute(sa.text(
"SET max_parallel_workers_per_gather TO '0'"))
except (PGCORE_ERROR, sa.exc.OperationalError):
server_version = 0
if server_version >= 110000:
@sa.event.listens_for(engine.sync_engine, "connect")
def _on_connect(dbapi_con: Any, _: Any) -> None:
cursor = dbapi_con.cursor()
cursor.execute("SET jit_above_cost TO '-1'")
cursor.execute("SET max_parallel_workers_per_gather TO '0'")
self._property_cache['DB:server_version'] = server_version
self._tables = SearchTables(sa.MetaData()) # pylint: disable=no-member
self._engine = engine
async def close(self) -> None:
""" Close all active connections to the database. The NominatimAPIAsync
object remains usable after closing. If a new API functions is
called, new connections are created.
"""
if self._engine is not None:
await self._engine.dispose()
async def __aenter__(self) -> 'NominatimAPIAsync':
return self
async def __aexit__(self, *_: Any) -> None:
await self.close()
@contextlib.asynccontextmanager
async def begin(self) -> AsyncIterator[SearchConnection]:
""" Create a new connection with automatic transaction handling.
This function may be used to get low-level access to the database.
Refer to the documentation of SQLAlchemy for details how to use
the connection object.
"""
if self._engine is None:
await self.setup_database()
assert self._engine is not None
assert self._tables is not None
async with self._engine.begin() as conn:
yield SearchConnection(conn, self._tables, self._property_cache)
async def status(self) -> StatusResult:
""" Return the status of the database.
"""
try:
async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout)
status = await get_status(conn)
except (PGCORE_ERROR, sa.exc.OperationalError):
return StatusResult(700, 'Database connection failed')
return status
async def details(self, place: ntyp.PlaceRef, **params: Any) -> Optional[DetailedResult]:
""" Get detailed information about a place in the database.
Returns None if there is no entry under the given ID.
"""
details = ntyp.LookupDetails.from_kwargs(params)
async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout)
if details.keywords:
await make_query_analyzer(conn)
return await get_detailed_place(conn, place, details)
async def lookup(self, places: Sequence[ntyp.PlaceRef], **params: Any) -> SearchResults:
""" Get simple information about a list of places.
Returns a list of place information for all IDs that were found.
"""
details = ntyp.LookupDetails.from_kwargs(params)
async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout)
if details.keywords:
await make_query_analyzer(conn)
return SearchResults(filter(None,
[await get_simple_place(conn, p, details) for p in places]))
async def reverse(self, coord: ntyp.AnyPoint, **params: Any) -> Optional[ReverseResult]:
""" Find a place by its coordinates. Also known as reverse geocoding.
Returns the closest result that can be found or None if
no place matches the given criteria.
"""
# The following negation handles NaN correctly. Don't change.
if not abs(coord[0]) <= 180 or not abs(coord[1]) <= 90:
# There are no results to be expected outside valid coordinates.
return None
details = ntyp.ReverseDetails.from_kwargs(params)
async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout)
if details.keywords:
await make_query_analyzer(conn)
geocoder = ReverseGeocoder(conn, details,
self.reverse_restrict_to_country_area)
return await geocoder.lookup(coord)
async def search(self, query: str, **params: Any) -> SearchResults:
""" Find a place by free-text search. Also known as forward geocoding.
"""
query = query.strip()
if not query:
raise UsageError('Nothing to search for.')
async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout)
geocoder = ForwardGeocoder(conn, ntyp.SearchDetails.from_kwargs(params),
self.config.get_int('REQUEST_TIMEOUT') \
if self.config.REQUEST_TIMEOUT else None)
phrases = [Phrase(PhraseType.NONE, p.strip()) for p in query.split(',')]
return await geocoder.lookup(phrases)
# pylint: disable=too-many-arguments,too-many-branches
async def search_address(self, amenity: Optional[str] = None,
street: Optional[str] = None,
city: Optional[str] = None,
county: Optional[str] = None,
state: Optional[str] = None,
country: Optional[str] = None,
postalcode: Optional[str] = None,
**params: Any) -> SearchResults:
""" Find an address using structured search.
"""
async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout)
details = ntyp.SearchDetails.from_kwargs(params)
phrases: List[Phrase] = []
if amenity:
phrases.append(Phrase(PhraseType.AMENITY, amenity))
if street:
phrases.append(Phrase(PhraseType.STREET, street))
if city:
phrases.append(Phrase(PhraseType.CITY, city))
if county:
phrases.append(Phrase(PhraseType.COUNTY, county))
if state:
phrases.append(Phrase(PhraseType.STATE, state))
if postalcode:
phrases.append(Phrase(PhraseType.POSTCODE, postalcode))
if country:
phrases.append(Phrase(PhraseType.COUNTRY, country))
if not phrases:
raise UsageError('Nothing to search for.')
if amenity or street:
details.restrict_min_max_rank(26, 30)
elif city:
details.restrict_min_max_rank(13, 25)
elif county:
details.restrict_min_max_rank(10, 12)
elif state:
details.restrict_min_max_rank(5, 9)
elif postalcode:
details.restrict_min_max_rank(5, 11)
else:
details.restrict_min_max_rank(4, 4)
if 'layers' not in params:
details.layers = ntyp.DataLayer.ADDRESS
if amenity:
details.layers |= ntyp.DataLayer.POI
geocoder = ForwardGeocoder(conn, details,
self.config.get_int('REQUEST_TIMEOUT') \
if self.config.REQUEST_TIMEOUT else None)
return await geocoder.lookup(phrases)
async def search_category(self, categories: List[Tuple[str, str]],
near_query: Optional[str] = None,
**params: Any) -> SearchResults:
""" Find an object of a certain category near another place.
The near place may either be given as an unstructured search
query in itself or as coordinates.
"""
if not categories:
return SearchResults()
details = ntyp.SearchDetails.from_kwargs(params)
async with self.begin() as conn:
conn.set_query_timeout(self.query_timeout)
if near_query:
phrases = [Phrase(PhraseType.NONE, p) for p in near_query.split(',')]
else:
phrases = []
if details.keywords:
await make_query_analyzer(conn)
geocoder = ForwardGeocoder(conn, details,
self.config.get_int('REQUEST_TIMEOUT') \
if self.config.REQUEST_TIMEOUT else None)
return await geocoder.lookup_pois(categories, phrases)
class NominatimAPI:
""" This class provides a thin synchronous wrapper around the asynchronous
Nominatim functions. It creates its own event loop and runs each
synchronous function call to completion using that loop.
This class should usually be used as a context manager in 'with' context.
"""
def __init__(self, project_dir: Optional[Union[str, Path]] = None,
environ: Optional[Mapping[str, str]] = None) -> None:
""" Initiate a new frontend object with synchronous API functions.
Parameters:
project_dir: Path to the
[project directory](../admin/Import.md#creating-the-project-directory)
of the local Nominatim installation.
environ: Mapping of [configuration parameters](../customize/Settings.md).
When set, replaces any configuration via environment variables.
Settings in this mapping also have precedence over any
parameters found in the `.env` file of the project directory.
"""
self._loop = asyncio.new_event_loop()
self._async_api = NominatimAPIAsync(project_dir, environ, loop=self._loop)
def close(self) -> None:
""" Close all active connections to the database.
This function also closes the asynchronous worker loop making
the NominatimAPI object unusable.
"""
if not self._loop.is_closed():
self._loop.run_until_complete(self._async_api.close())
self._loop.close()
def __enter__(self) -> 'NominatimAPI':
return self
def __exit__(self, *_: Any) -> None:
self.close()
@property
def config(self) -> Configuration:
""" Provide read-only access to the [configuration](Configuration.md)
used by the API.
"""
return self._async_api.config
def status(self) -> StatusResult:
""" Return the status of the database as a dataclass object
with the fields described below.
Returns:
status(int): A status code as described on the status page.
message(str): Either 'OK' or a human-readable message of the
problem encountered.
software_version(tuple): A tuple with the version of the
Nominatim library consisting of (major, minor, patch, db-patch)
version.
database_version(tuple): A tuple with the version of the library
which was used for the import or last migration.
Also consists of (major, minor, patch, db-patch).
data_updated(datetime): Timestamp with the age of the data.
"""
return self._loop.run_until_complete(self._async_api.status())
def details(self, place: ntyp.PlaceRef, **params: Any) -> Optional[DetailedResult]:
""" Get detailed information about a place in the database.
The result is a dataclass object with the fields described below
or `None` if the place could not be found in the database.
Parameters:
place: Description of the place to look up. See
[Place identification](Input-Parameter-Types.md#place-identification)
for the various ways to reference a place.
Other parameters:
geometry_output (enum): Add the full geometry of the place to the result.
Multiple formats may be selected. Note that geometries can become
quite large. (Default: none)
geometry_simplification (float): Simplification factor to use on
the geometries before returning them. The factor expresses
the tolerance in degrees from which the geometry may differ.
Topology is preserved. (Default: 0.0)
address_details (bool): Add detailed information about the places
that make up the address of the requested object. (Default: False)
linked_places (bool): Add detailed information about the places
that link to the result. (Default: False)
parented_places (bool): Add detailed information about all places
for which the requested object is a parent, i.e. all places for
which the object provides the address details.
Only POI places can have parents. (Default: False)
keywords (bool): Add detailed information about the search terms
used for this place.
Returns:
source_table (enum): Data source of the place. See below for possible values.
category (tuple): A tuple of two strings with the primary OSM tag
and value.
centroid (Point): Point position of the place.
place_id (Optional[int]): Internal ID of the place. This ID may differ
for the same place between different installations.
parent_place_id (Optional(int]): Internal ID of the parent of this
place. Only meaning full for POI-like objects (places with a
rank_address of 30).
linked_place_id (Optional[int]): Internal ID of the place this object
links to. When this ID is set then there is no guarantee that
the rest of the result information is complete.
admin_level (int): Value of the `admin_level` OSM tag. Only meaningful
for administrative boundary objects.
indexed_date (datetime): Timestamp when the place was last updated.
osm_object (Optional[tuple]): OSM type and ID of the place, if available.
names (Optional[dict]): Dictionary of names of the place. Keys are
usually the corresponding OSM tag keys.
address (Optional[dict]): Dictionary of address parts directly
attributed to the place. Keys are usually the corresponding
OSM tag keys with the `addr:` prefix removed.
extratags (Optional[dict]): Dictionary of additional attributes for
the place. Usually OSM tag keys and values.
housenumber (Optional[str]): House number of the place, normalised
for lookup. To get the house number in its original spelling,
use `address['housenumber']`.
postcode (Optional[str]): Computed postcode for the place. To get
directly attributed postcodes, use `address['postcode']` instead.
wikipedia (Optional[str]): Reference to a wikipedia site for the place.
The string has the format <language code>:<wikipedia title>.
rank_address (int): [Address rank](../customize/Ranking.md#address-rank).
rank_search (int): [Search rank](../customize/Ranking.md#search-rank).
importance (Optional[float]): Relative importance of the place. This is a measure
how likely the place will be searched for.
country_code (Optional[str]): Country the feature is in as
ISO 3166-1 alpha-2 country code.
address_rows (Optional[AddressLines]): List of places that make up the
computed address. `None` when `address_details` parameter was False.
linked_rows (Optional[AddressLines]): List of places that link to the object.
`None` when `linked_places` parameter was False.
parented_rows (Optional[AddressLines]): List of direct children of the place.
`None` when `parented_places` parameter was False.
name_keywords (Optional[WordInfos]): List of search words for the name of
the place. `None` when `keywords` parameter is set to False.
address_keywords (Optional[WordInfos]): List of search word for the address of
the place. `None` when `keywords` parameter is set to False.
geometry (dict): Dictionary containing the full geometry of the place
in the formats requested in the `geometry_output` parameter.
"""
return self._loop.run_until_complete(self._async_api.details(place, **params))
def lookup(self, places: Sequence[ntyp.PlaceRef], **params: Any) -> SearchResults:
""" Get simple information about a list of places.
Returns a list of place information for all IDs that were found.
Each result is a dataclass with the fields detailed below.
Parameters:
places: List of descriptions of the place to look up. See
[Place identification](Input-Parameter-Types.md#place-identification)
for the various ways to reference a place.
Other parameters:
geometry_output (enum): Add the full geometry of the place to the result.
Multiple formats may be selected. Note that geometries can become
quite large. (Default: none)
geometry_simplification (float): Simplification factor to use on
the geometries before returning them. The factor expresses
the tolerance in degrees from which the geometry may differ.
Topology is preserved. (Default: 0.0)
address_details (bool): Add detailed information about the places
that make up the address of the requested object. (Default: False)
linked_places (bool): Add detailed information about the places
that link to the result. (Default: False)
parented_places (bool): Add detailed information about all places
for which the requested object is a parent, i.e. all places for
which the object provides the address details.
Only POI places can have parents. (Default: False)
keywords (bool): Add detailed information about the search terms
used for this place.
Returns:
source_table (enum): Data source of the place. See below for possible values.
category (tuple): A tuple of two strings with the primary OSM tag
and value.
centroid (Point): Point position of the place.
place_id (Optional[int]): Internal ID of the place. This ID may differ
for the same place between different installations.
osm_object (Optional[tuple]): OSM type and ID of the place, if available.
names (Optional[dict]): Dictionary of names of the place. Keys are
usually the corresponding OSM tag keys.
address (Optional[dict]): Dictionary of address parts directly
attributed to the place. Keys are usually the corresponding
OSM tag keys with the `addr:` prefix removed.
extratags (Optional[dict]): Dictionary of additional attributes for
the place. Usually OSM tag keys and values.
housenumber (Optional[str]): House number of the place, normalised
for lookup. To get the house number in its original spelling,
use `address['housenumber']`.
postcode (Optional[str]): Computed postcode for the place. To get
directly attributed postcodes, use `address['postcode']` instead.
wikipedia (Optional[str]): Reference to a wikipedia site for the place.
The string has the format <language code>:<wikipedia title>.
rank_address (int): [Address rank](../customize/Ranking.md#address-rank).
rank_search (int): [Search rank](../customize/Ranking.md#search-rank).
importance (Optional[float]): Relative importance of the place. This is a measure
how likely the place will be searched for.
country_code (Optional[str]): Country the feature is in as
ISO 3166-1 alpha-2 country code.
address_rows (Optional[AddressLines]): List of places that make up the
computed address. `None` when `address_details` parameter was False.
linked_rows (Optional[AddressLines]): List of places that link to the object.
`None` when `linked_places` parameter was False.
parented_rows (Optional[AddressLines]): List of direct children of the place.
`None` when `parented_places` parameter was False.
name_keywords (Optional[WordInfos]): List of search words for the name of
the place. `None` when `keywords` parameter is set to False.
address_keywords (Optional[WordInfos]): List of search word for the address of
the place. `None` when `keywords` parameter is set to False.
bbox (Bbox): Bounding box of the full geometry of the place.
If the place is a single point, then the size of the bounding
box is guessed according to the type of place.
geometry (dict): Dictionary containing the full geometry of the place
in the formats requested in the `geometry_output` parameter.
"""
return self._loop.run_until_complete(self._async_api.lookup(places, **params))
def reverse(self, coord: ntyp.AnyPoint, **params: Any) -> Optional[ReverseResult]:
""" Find a place by its coordinates. Also known as reverse geocoding.
Returns the closest result that can be found or `None` if
no place matches the given criteria. The result is a dataclass
with the fields as detailed below.
Parameters:
coord: Coordinate to lookup the place for as a Point
or a tuple (x, y). Must be in WGS84 projection.
Other parameters:
max_rank (int): Highest address rank to return. Can be used to
restrict search to streets or settlements.
layers (enum): Defines the kind of data to take into account.
See description of layers below. (Default: addresses and POIs)
geometry_output (enum): Add the full geometry of the place to the result.
Multiple formats may be selected. Note that geometries can become
quite large. (Default: none)
geometry_simplification (float): Simplification factor to use on
the geometries before returning them. The factor expresses
the tolerance in degrees from which the geometry may differ.
Topology is preserved. (Default: 0.0)
address_details (bool): Add detailed information about the places
that make up the address of the requested object. (Default: False)
linked_places (bool): Add detailed information about the places
that link to the result. (Default: False)
parented_places (bool): Add detailed information about all places
for which the requested object is a parent, i.e. all places for
which the object provides the address details.
Only POI places can have parents. (Default: False)
keywords (bool): Add detailed information about the search terms
used for this place.
Returns:
source_table (enum): Data source of the place. See below for possible values.
category (tuple): A tuple of two strings with the primary OSM tag
and value.
centroid (Point): Point position of the place.
place_id (Optional[int]): Internal ID of the place. This ID may differ
for the same place between different installations.
osm_object (Optional[tuple]): OSM type and ID of the place, if available.
names (Optional[dict]): Dictionary of names of the place. Keys are
usually the corresponding OSM tag keys.
address (Optional[dict]): Dictionary of address parts directly
attributed to the place. Keys are usually the corresponding
OSM tag keys with the `addr:` prefix removed.
extratags (Optional[dict]): Dictionary of additional attributes for
the place. Usually OSM tag keys and values.
housenumber (Optional[str]): House number of the place, normalised
for lookup. To get the house number in its original spelling,
use `address['housenumber']`.
postcode (Optional[str]): Computed postcode for the place. To get
directly attributed postcodes, use `address['postcode']` instead.
wikipedia (Optional[str]): Reference to a wikipedia site for the place.
The string has the format <language code>:<wikipedia title>.
rank_address (int): [Address rank](../customize/Ranking.md#address-rank).
rank_search (int): [Search rank](../customize/Ranking.md#search-rank).
importance (Optional[float]): Relative importance of the place. This is a measure
how likely the place will be searched for.
country_code (Optional[str]): Country the feature is in as
ISO 3166-1 alpha-2 country code.
address_rows (Optional[AddressLines]): List of places that make up the
computed address. `None` when `address_details` parameter was False.
linked_rows (Optional[AddressLines]): List of places that link to the object.
`None` when `linked_places` parameter was False.
parented_rows (Optional[AddressLines]): List of direct children of the place.
`None` when `parented_places` parameter was False.
name_keywords (Optional[WordInfos]): List of search words for the name of
the place. `None` when `keywords` parameter is set to False.
address_keywords (Optional[WordInfos]): List of search word for the address of
the place. `None` when `keywords` parameter is set to False.
bbox (Bbox): Bounding box of the full geometry of the place.
If the place is a single point, then the size of the bounding
box is guessed according to the type of place.
geometry (dict): Dictionary containing the full geometry of the place
in the formats requested in the `geometry_output` parameter.
distance (Optional[float]): Distance in degree from the input point.
"""
return self._loop.run_until_complete(self._async_api.reverse(coord, **params))
def search(self, query: str, **params: Any) -> SearchResults:
""" Find a place by free-text search. Also known as forward geocoding.
Parameters:
query: Free-form text query searching for a place.
Other parameters:
max_results (int): Maximum number of results to return. The
actual number of results may be less. (Default: 10)
min_rank (int): Lowest permissible rank for the result.
For addressable places this is the minimum
[address rank](../customize/Ranking.md#address-rank). For all
other places the [search rank](../customize/Ranking.md#search-rank)
is used.
max_rank (int): Highest permissible rank for the result. See min_rank above.
layers (enum): Defines the kind of data to take into account.
See [layers section](Input-Parameter-Types.md#layers) for details.
(Default: addresses and POIs)
countries (list[str]): Restrict search to countries with the given
ISO 3166-1 alpha-2 country code. An empty list (the default)
disables this filter.
excluded (list[int]): A list of internal IDs of places to exclude
from the search.
viewbox (Optional[Bbox]): Bounding box of an area to focus search on.
bounded_viewbox (bool): Consider the bounding box given in `viewbox`
as a filter and return only results within the bounding box.
near (Optional[Point]): Focus search around the given point and
return results ordered by distance to the given point.
near_radius (Optional[float]): Restrict results to results within
the given distance in degrees of `near` point. Ignored, when
`near` is not set.
categories (list[tuple]): Restrict search to places of the given
categories. The category is the main OSM tag assigned to each
place. An empty list (the default) disables this filter.
geometry_output (enum): Add the full geometry of the place to the result.
Multiple formats may be selected. Note that geometries can become
quite large. (Default: none)
geometry_simplification (float): Simplification factor to use on
the geometries before returning them. The factor expresses
the tolerance in degrees from which the geometry may differ.
Topology is preserved. (Default: 0.0)
address_details (bool): Add detailed information about the places
that make up the address of the requested object. (Default: False)
linked_places (bool): Add detailed information about the places
that link to the result. (Default: False)
parented_places (bool): Add detailed information about all places
for which the requested object is a parent, i.e. all places for
which the object provides the address details.
Only POI places can have parents. (Default: False)
keywords (bool): Add detailed information about the search terms
used for this place.
Returns:
source_table (enum): Data source of the place. See below for possible values.
category (tuple): A tuple of two strings with the primary OSM tag
and value.
centroid (Point): Point position of the place.
place_id (Optional[int]): Internal ID of the place. This ID may differ
for the same place between different installations.
osm_object (Optional[tuple]): OSM type and ID of the place, if available.
names (Optional[dict]): Dictionary of names of the place. Keys are
usually the corresponding OSM tag keys.
address (Optional[dict]): Dictionary of address parts directly
attributed to the place. Keys are usually the corresponding
OSM tag keys with the `addr:` prefix removed.
extratags (Optional[dict]): Dictionary of additional attributes for
the place. Usually OSM tag keys and values.
housenumber (Optional[str]): House number of the place, normalised
for lookup. To get the house number in its original spelling,
use `address['housenumber']`.
postcode (Optional[str]): Computed postcode for the place. To get
directly attributed postcodes, use `address['postcode']` instead.
wikipedia (Optional[str]): Reference to a wikipedia site for the place.
The string has the format <language code>:<wikipedia title>.
rank_address (int): [Address rank](../customize/Ranking.md#address-rank).
rank_search (int): [Search rank](../customize/Ranking.md#search-rank).
importance (Optional[float]): Relative importance of the place. This is a measure
how likely the place will be searched for.
country_code (Optional[str]): Country the feature is in as
ISO 3166-1 alpha-2 country code.
address_rows (Optional[AddressLines]): List of places that make up the
computed address. `None` when `address_details` parameter was False.
linked_rows (Optional[AddressLines]): List of places that link to the object.
`None` when `linked_places` parameter was False.
parented_rows (Optional[AddressLines]): List of direct children of the place.
`None` when `parented_places` parameter was False.
name_keywords (Optional[WordInfos]): List of search words for the name of
the place. `None` when `keywords` parameter is set to False.
address_keywords (Optional[WordInfos]): List of search word for the address of
the place. `None` when `keywords` parameter is set to False.
bbox (Bbox): Bounding box of the full geometry of the place.
If the place is a single point, then the size of the bounding
box is guessed according to the type of place.
geometry (dict): Dictionary containing the full geometry of the place
in the formats requested in the `geometry_output` parameter.
"""
return self._loop.run_until_complete(
self._async_api.search(query, **params))
# pylint: disable=too-many-arguments
def search_address(self, amenity: Optional[str] = None,
street: Optional[str] = None,
city: Optional[str] = None,
county: Optional[str] = None,
state: Optional[str] = None,
country: Optional[str] = None,
postalcode: Optional[str] = None,
**params: Any) -> SearchResults:
""" Find an address using structured search.
Parameters:
amenity: Name of a POI.
street: Street and optionally housenumber of the address. If the address
does not have a street, then the place the housenumber references to.
city: Postal city of the address.
county: County equivalent of the address. Does not exist in all
jurisdictions.
state: State or province of the address.
country: Country with its full name or its ISO 3166-1 alpha-2 country code.
Do not use together with the country_code filter.
postalcode: Post code or ZIP for the place.
Other parameters:
max_results (int): Maximum number of results to return. The
actual number of results may be less. (Default: 10)
min_rank (int): Lowest permissible rank for the result.
For addressable places this is the minimum
[address rank](../customize/Ranking.md#address-rank). For all
other places the [search rank](../customize/Ranking.md#search-rank)
is used.
max_rank (int): Highest permissible rank for the result. See min_rank above.
layers (enum): Defines the kind of data to take into account.
See [layers section](Input-Parameter-Types.md#layers) for details.
(Default: addresses and POIs)
countries (list[str]): Restrict search to countries with the given
ISO 3166-1 alpha-2 country code. An empty list (the default)
disables this filter. Do not use, when the country parameter
is used.
excluded (list[int]): A list of internal IDs of places to exclude
from the search.
viewbox (Optional[Bbox]): Bounding box of an area to focus search on.
bounded_viewbox (bool): Consider the bounding box given in `viewbox`
as a filter and return only results within the bounding box.
near (Optional[Point]): Focus search around the given point and
return results ordered by distance to the given point.
near_radius (Optional[float]): Restrict results to results within
the given distance in degrees of `near` point. Ignored, when
`near` is not set.
categories (list[tuple]): Restrict search to places of the given
categories. The category is the main OSM tag assigned to each
place. An empty list (the default) disables this filter.
geometry_output (enum): Add the full geometry of the place to the result.
Multiple formats may be selected. Note that geometries can become
quite large. (Default: none)
geometry_simplification (float): Simplification factor to use on
the geometries before returning them. The factor expresses
the tolerance in degrees from which the geometry may differ.
Topology is preserved. (Default: 0.0)
address_details (bool): Add detailed information about the places
that make up the address of the requested object. (Default: False)
linked_places (bool): Add detailed information about the places
that link to the result. (Default: False)
parented_places (bool): Add detailed information about all places
for which the requested object is a parent, i.e. all places for
which the object provides the address details.
Only POI places can have parents. (Default: False)
keywords (bool): Add detailed information about the search terms
used for this place.
Returns:
source_table (enum): Data source of the place. See below for possible values.
category (tuple): A tuple of two strings with the primary OSM tag
and value.
centroid (Point): Point position of the place.
place_id (Optional[int]): Internal ID of the place. This ID may differ
for the same place between different installations.
osm_object (Optional[tuple]): OSM type and ID of the place, if available.
names (Optional[dict]): Dictionary of names of the place. Keys are
usually the corresponding OSM tag keys.
address (Optional[dict]): Dictionary of address parts directly
attributed to the place. Keys are usually the corresponding
OSM tag keys with the `addr:` prefix removed.
extratags (Optional[dict]): Dictionary of additional attributes for
the place. Usually OSM tag keys and values.
housenumber (Optional[str]): House number of the place, normalised
for lookup. To get the house number in its original spelling,
use `address['housenumber']`.
postcode (Optional[str]): Computed postcode for the place. To get
directly attributed postcodes, use `address['postcode']` instead.
wikipedia (Optional[str]): Reference to a wikipedia site for the place.
The string has the format <language code>:<wikipedia title>.
rank_address (int): [Address rank](../customize/Ranking.md#address-rank).
rank_search (int): [Search rank](../customize/Ranking.md#search-rank).
importance (Optional[float]): Relative importance of the place. This is a measure
how likely the place will be searched for.
country_code (Optional[str]): Country the feature is in as
ISO 3166-1 alpha-2 country code.
address_rows (Optional[AddressLines]): List of places that make up the
computed address. `None` when `address_details` parameter was False.
linked_rows (Optional[AddressLines]): List of places that link to the object.
`None` when `linked_places` parameter was False.
parented_rows (Optional[AddressLines]): List of direct children of the place.
`None` when `parented_places` parameter was False.
name_keywords (Optional[WordInfos]): List of search words for the name of
the place. `None` when `keywords` parameter is set to False.
address_keywords (Optional[WordInfos]): List of search word for the address of
the place. `None` when `keywords` parameter is set to False.
bbox (Bbox): Bounding box of the full geometry of the place.
If the place is a single point, then the size of the bounding
box is guessed according to the type of place.
geometry (dict): Dictionary containing the full geometry of the place
in the formats requested in the `geometry_output` parameter.
"""
return self._loop.run_until_complete(
self._async_api.search_address(amenity, street, city, county,
state, country, postalcode, **params))
def search_category(self, categories: List[Tuple[str, str]],
near_query: Optional[str] = None,
**params: Any) -> SearchResults:
""" Find an object of a certain category near another place.
The near place may either be given as an unstructured search
query in itself or as a geographic area through the
viewbox or near parameters.
Parameters:
categories: Restrict search to places of the given
categories. The category is the main OSM tag assigned to each
place.
near_query: Optional free-text query to define the are to
restrict search to.
Other parameters:
max_results (int): Maximum number of results to return. The
actual number of results may be less. (Default: 10)
min_rank (int): Lowest permissible rank for the result.
For addressable places this is the minimum
[address rank](../customize/Ranking.md#address-rank). For all
other places the [search rank](../customize/Ranking.md#search-rank)
is used.
max_rank (int): Highest permissible rank for the result. See min_rank above.
layers (enum): Defines the kind of data to take into account.
See [layers section](Input-Parameter-Types.md#layers) for details.
(Default: addresses and POIs)
countries (list[str]): Restrict search to countries with the given
ISO 3166-1 alpha-2 country code. An empty list (the default)
disables this filter.
excluded (list[int]): A list of internal IDs of places to exclude
from the search.
viewbox (Optional[Bbox]): Bounding box of an area to focus search on.
bounded_viewbox (bool): Consider the bounding box given in `viewbox`
as a filter and return only results within the bounding box.
near (Optional[Point]): Focus search around the given point and
return results ordered by distance to the given point.
near_radius (Optional[float]): Restrict results to results within
the given distance in degrees of `near` point. Ignored, when
`near` is not set.
geometry_output (enum): Add the full geometry of the place to the result.
Multiple formats may be selected. Note that geometries can become
quite large. (Default: none)
geometry_simplification (float): Simplification factor to use on
the geometries before returning them. The factor expresses
the tolerance in degrees from which the geometry may differ.
Topology is preserved. (Default: 0.0)
address_details (bool): Add detailed information about the places
that make up the address of the requested object. (Default: False)
linked_places (bool): Add detailed information about the places
that link to the result. (Default: False)
parented_places (bool): Add detailed information about all places
for which the requested object is a parent, i.e. all places for
which the object provides the address details.
Only POI places can have parents. (Default: False)
keywords (bool): Add detailed information about the search terms
used for this place.
Returns:
source_table (enum): Data source of the place. See below for possible values.
category (tuple): A tuple of two strings with the primary OSM tag
and value.
centroid (Point): Point position of the place.
place_id (Optional[int]): Internal ID of the place. This ID may differ
for the same place between different installations.
osm_object (Optional[tuple]): OSM type and ID of the place, if available.
names (Optional[dict]): Dictionary of names of the place. Keys are
usually the corresponding OSM tag keys.
address (Optional[dict]): Dictionary of address parts directly
attributed to the place. Keys are usually the corresponding
OSM tag keys with the `addr:` prefix removed.
extratags (Optional[dict]): Dictionary of additional attributes for
the place. Usually OSM tag keys and values.
housenumber (Optional[str]): House number of the place, normalised
for lookup. To get the house number in its original spelling,
use `address['housenumber']`.
postcode (Optional[str]): Computed postcode for the place. To get
directly attributed postcodes, use `address['postcode']` instead.
wikipedia (Optional[str]): Reference to a wikipedia site for the place.
The string has the format <language code>:<wikipedia title>.
rank_address (int): [Address rank](../customize/Ranking.md#address-rank).
rank_search (int): [Search rank](../customize/Ranking.md#search-rank).
importance (Optional[float]): Relative importance of the place. This is a measure
how likely the place will be searched for.
country_code (Optional[str]): Country the feature is in as
ISO 3166-1 alpha-2 country code.
address_rows (Optional[AddressLines]): List of places that make up the
computed address. `None` when `address_details` parameter was False.
linked_rows (Optional[AddressLines]): List of places that link to the object.
`None` when `linked_places` parameter was False.
parented_rows (Optional[AddressLines]): List of direct children of the place.
`None` when `parented_places` parameter was False.
name_keywords (Optional[WordInfos]): List of search words for the name of
the place. `None` when `keywords` parameter is set to False.
address_keywords (Optional[WordInfos]): List of search word for the address of
the place. `None` when `keywords` parameter is set to False.
bbox (Bbox): Bounding box of the full geometry of the place.
If the place is a single point, then the size of the bounding
box is guessed according to the type of place.
geometry (dict): Dictionary containing the full geometry of the place
in the formats requested in the `geometry_output` parameter.
"""
return self._loop.run_until_complete(
self._async_api.search_category(categories, near_query, **params))
| 56,140
|
Python
|
.py
| 882
| 48.222222
| 100
| 0.60906
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,725
|
connection.py
|
osm-search_Nominatim/src/nominatim_api/connection.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Extended SQLAlchemy connection class that also includes access to the schema.
"""
from typing import cast, Any, Mapping, Sequence, Union, Dict, Optional, Set, \
Awaitable, Callable, TypeVar
import asyncio
import sqlalchemy as sa
from sqlalchemy.ext.asyncio import AsyncConnection
from .typing import SaFromClause
from .sql.sqlalchemy_schema import SearchTables
from .sql.sqlalchemy_types import Geometry
from .logging import log
T = TypeVar('T')
class SearchConnection:
""" An extended SQLAlchemy connection class, that also contains
the table definitions. The underlying asynchronous SQLAlchemy
connection can be accessed with the 'connection' property.
The 't' property is the collection of Nominatim tables.
"""
def __init__(self, conn: AsyncConnection,
tables: SearchTables,
properties: Dict[str, Any]) -> None:
self.connection = conn
self.t = tables # pylint: disable=invalid-name
self._property_cache = properties
self._classtables: Optional[Set[str]] = None
self.query_timeout: Optional[int] = None
def set_query_timeout(self, timeout: Optional[int]) -> None:
""" Set the timeout after which a query over this connection
is cancelled.
"""
self.query_timeout = timeout
async def scalar(self, sql: sa.sql.base.Executable,
params: Union[Mapping[str, Any], None] = None
) -> Any:
""" Execute a 'scalar()' query on the connection.
"""
log().sql(self.connection, sql, params)
return await asyncio.wait_for(self.connection.scalar(sql, params), self.query_timeout)
async def execute(self, sql: 'sa.Executable',
params: Union[Mapping[str, Any], Sequence[Mapping[str, Any]], None] = None
) -> 'sa.Result[Any]':
""" Execute a 'execute()' query on the connection.
"""
log().sql(self.connection, sql, params)
return await asyncio.wait_for(self.connection.execute(sql, params), self.query_timeout)
async def get_property(self, name: str, cached: bool = True) -> str:
""" Get a property from Nominatim's property table.
Property values are normally cached so that they are only
retrieved from the database when they are queried for the
first time with this function. Set 'cached' to False to force
reading the property from the database.
Raises a ValueError if the property does not exist.
"""
lookup_name = f'DBPROP:{name}'
if cached and lookup_name in self._property_cache:
return cast(str, self._property_cache[lookup_name])
sql = sa.select(self.t.properties.c.value)\
.where(self.t.properties.c.property == name)
value = await self.connection.scalar(sql)
if value is None:
raise ValueError(f"Property '{name}' not found in database.")
self._property_cache[lookup_name] = cast(str, value)
return cast(str, value)
async def get_db_property(self, name: str) -> Any:
""" Get a setting from the database. At the moment, only
'server_version', the version of the database software, can
be retrieved with this function.
Raises a ValueError if the property does not exist.
"""
if name != 'server_version':
raise ValueError(f"DB setting '{name}' not found in database.")
return self._property_cache['DB:server_version']
async def get_cached_value(self, group: str, name: str,
factory: Callable[[], Awaitable[T]]) -> T:
""" Access the cache for this Nominatim instance.
Each cache value needs to belong to a group and have a name.
This function is for internal API use only.
`factory` is an async callback function that produces
the value if it is not already cached.
Returns the cached value or the result of factory (also caching
the result).
"""
full_name = f'{group}:{name}'
if full_name in self._property_cache:
return cast(T, self._property_cache[full_name])
value = await factory()
self._property_cache[full_name] = value
return value
async def get_class_table(self, cls: str, typ: str) -> Optional[SaFromClause]:
""" Lookup up if there is a classtype table for the given category
and return a SQLAlchemy table for it, if it exists.
"""
if self._classtables is None:
res = await self.execute(sa.text("""SELECT tablename FROM pg_tables
WHERE tablename LIKE 'place_classtype_%'
"""))
self._classtables = {r[0] for r in res}
tablename = f"place_classtype_{cls}_{typ}"
if tablename not in self._classtables:
return None
if tablename in self.t.meta.tables:
return self.t.meta.tables[tablename]
return sa.Table(tablename, self.t.meta,
sa.Column('place_id', sa.BigInteger),
sa.Column('centroid', Geometry))
| 5,562
|
Python
|
.py
| 112
| 39.25
| 96
| 0.625346
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,726
|
lookup.py
|
osm-search_Nominatim/src/nominatim_api/lookup.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of place lookup by ID.
"""
from typing import Optional, Callable, Tuple, Type
import datetime as dt
import sqlalchemy as sa
from .typing import SaColumn, SaRow, SaSelect
from .connection import SearchConnection
from .logging import log
from . import types as ntyp
from . import results as nres
RowFunc = Callable[[Optional[SaRow], Type[nres.BaseResultT]], Optional[nres.BaseResultT]]
GeomFunc = Callable[[SaSelect, SaColumn], SaSelect]
async def find_in_placex(conn: SearchConnection, place: ntyp.PlaceRef,
add_geometries: GeomFunc) -> Optional[SaRow]:
""" Search for the given place in the placex table and return the
base information.
"""
log().section("Find in placex table")
t = conn.t.placex
sql = sa.select(t.c.place_id, t.c.osm_type, t.c.osm_id, t.c.name,
t.c.class_, t.c.type, t.c.admin_level,
t.c.address, t.c.extratags,
t.c.housenumber, t.c.postcode, t.c.country_code,
t.c.importance, t.c.wikipedia, t.c.indexed_date,
t.c.parent_place_id, t.c.rank_address, t.c.rank_search,
t.c.linked_place_id,
t.c.geometry.ST_Expand(0).label('bbox'),
t.c.centroid)
if isinstance(place, ntyp.PlaceID):
sql = sql.where(t.c.place_id == place.place_id)
elif isinstance(place, ntyp.OsmID):
sql = sql.where(t.c.osm_type == place.osm_type)\
.where(t.c.osm_id == place.osm_id)
if place.osm_class:
sql = sql.where(t.c.class_ == place.osm_class)
else:
sql = sql.order_by(t.c.class_)
sql = sql.limit(1)
else:
return None
return (await conn.execute(add_geometries(sql, t.c.geometry))).one_or_none()
async def find_in_osmline(conn: SearchConnection, place: ntyp.PlaceRef,
add_geometries: GeomFunc) -> Optional[SaRow]:
""" Search for the given place in the osmline table and return the
base information.
"""
log().section("Find in interpolation table")
t = conn.t.osmline
sql = sa.select(t.c.place_id, t.c.osm_id, t.c.parent_place_id,
t.c.indexed_date, t.c.startnumber, t.c.endnumber,
t.c.step, t.c.address, t.c.postcode, t.c.country_code,
t.c.linegeo.ST_Centroid().label('centroid'))
if isinstance(place, ntyp.PlaceID):
sql = sql.where(t.c.place_id == place.place_id)
elif isinstance(place, ntyp.OsmID) and place.osm_type == 'W':
# There may be multiple interpolations for a single way.
# If 'class' contains a number, return the one that belongs to that number.
sql = sql.where(t.c.osm_id == place.osm_id).limit(1)
if place.osm_class and place.osm_class.isdigit():
sql = sql.order_by(sa.func.greatest(0,
int(place.osm_class) - t.c.endnumber,
t.c.startnumber - int(place.osm_class)))
else:
return None
return (await conn.execute(add_geometries(sql, t.c.linegeo))).one_or_none()
async def find_in_tiger(conn: SearchConnection, place: ntyp.PlaceRef,
add_geometries: GeomFunc) -> Optional[SaRow]:
""" Search for the given place in the table of Tiger addresses and return
the base information. Only lookup by place ID is supported.
"""
if not isinstance(place, ntyp.PlaceID):
return None
log().section("Find in TIGER table")
t = conn.t.tiger
parent = conn.t.placex
sql = sa.select(t.c.place_id, t.c.parent_place_id,
parent.c.osm_type, parent.c.osm_id,
t.c.startnumber, t.c.endnumber, t.c.step,
t.c.postcode,
t.c.linegeo.ST_Centroid().label('centroid'))\
.where(t.c.place_id == place.place_id)\
.join(parent, t.c.parent_place_id == parent.c.place_id, isouter=True)
return (await conn.execute(add_geometries(sql, t.c.linegeo))).one_or_none()
async def find_in_postcode(conn: SearchConnection, place: ntyp.PlaceRef,
add_geometries: GeomFunc) -> Optional[SaRow]:
""" Search for the given place in the postcode table and return the
base information. Only lookup by place ID is supported.
"""
if not isinstance(place, ntyp.PlaceID):
return None
log().section("Find in postcode table")
t = conn.t.postcode
sql = sa.select(t.c.place_id, t.c.parent_place_id,
t.c.rank_search, t.c.rank_address,
t.c.indexed_date, t.c.postcode, t.c.country_code,
t.c.geometry.label('centroid')) \
.where(t.c.place_id == place.place_id)
return (await conn.execute(add_geometries(sql, t.c.geometry))).one_or_none()
async def find_in_all_tables(conn: SearchConnection, place: ntyp.PlaceRef,
add_geometries: GeomFunc
) -> Tuple[Optional[SaRow], RowFunc[nres.BaseResultT]]:
""" Search for the given place in all data tables
and return the base information.
"""
row = await find_in_placex(conn, place, add_geometries)
log().var_dump('Result (placex)', row)
if row is not None:
return row, nres.create_from_placex_row
row = await find_in_osmline(conn, place, add_geometries)
log().var_dump('Result (osmline)', row)
if row is not None:
return row, nres.create_from_osmline_row
row = await find_in_postcode(conn, place, add_geometries)
log().var_dump('Result (postcode)', row)
if row is not None:
return row, nres.create_from_postcode_row
row = await find_in_tiger(conn, place, add_geometries)
log().var_dump('Result (tiger)', row)
return row, nres.create_from_tiger_row
async def get_detailed_place(conn: SearchConnection, place: ntyp.PlaceRef,
details: ntyp.LookupDetails) -> Optional[nres.DetailedResult]:
""" Retrieve a place with additional details from the database.
"""
log().function('get_detailed_place', place=place, details=details)
if details.geometry_output and details.geometry_output != ntyp.GeometryFormat.GEOJSON:
raise ValueError("lookup only supports geojosn polygon output.")
if details.geometry_output & ntyp.GeometryFormat.GEOJSON:
def _add_geometry(sql: SaSelect, column: SaColumn) -> SaSelect:
return sql.add_columns(sa.func.ST_AsGeoJSON(
sa.case((sa.func.ST_NPoints(column) > 5000,
sa.func.ST_SimplifyPreserveTopology(column, 0.0001)),
else_=column), 7).label('geometry_geojson'))
else:
def _add_geometry(sql: SaSelect, column: SaColumn) -> SaSelect:
return sql.add_columns(sa.func.ST_GeometryType(column).label('geometry_type'))
row_func: RowFunc[nres.DetailedResult]
row, row_func = await find_in_all_tables(conn, place, _add_geometry)
if row is None:
return None
result = row_func(row, nres.DetailedResult)
assert result is not None
# add missing details
assert result is not None
if 'type' in result.geometry:
result.geometry['type'] = GEOMETRY_TYPE_MAP.get(result.geometry['type'],
result.geometry['type'])
indexed_date = getattr(row, 'indexed_date', None)
if indexed_date is not None:
result.indexed_date = indexed_date.replace(tzinfo=dt.timezone.utc)
await nres.add_result_details(conn, [result], details)
return result
async def get_simple_place(conn: SearchConnection, place: ntyp.PlaceRef,
details: ntyp.LookupDetails) -> Optional[nres.SearchResult]:
""" Retrieve a place as a simple search result from the database.
"""
log().function('get_simple_place', place=place, details=details)
def _add_geometry(sql: SaSelect, col: SaColumn) -> SaSelect:
if not details.geometry_output:
return sql
out = []
if details.geometry_simplification > 0.0:
col = sa.func.ST_SimplifyPreserveTopology(col, details.geometry_simplification)
if details.geometry_output & ntyp.GeometryFormat.GEOJSON:
out.append(sa.func.ST_AsGeoJSON(col, 7).label('geometry_geojson'))
if details.geometry_output & ntyp.GeometryFormat.TEXT:
out.append(sa.func.ST_AsText(col).label('geometry_text'))
if details.geometry_output & ntyp.GeometryFormat.KML:
out.append(sa.func.ST_AsKML(col, 7).label('geometry_kml'))
if details.geometry_output & ntyp.GeometryFormat.SVG:
out.append(sa.func.ST_AsSVG(col, 0, 7).label('geometry_svg'))
return sql.add_columns(*out)
row_func: RowFunc[nres.SearchResult]
row, row_func = await find_in_all_tables(conn, place, _add_geometry)
if row is None:
return None
result = row_func(row, nres.SearchResult)
assert result is not None
# add missing details
assert result is not None
if hasattr(row, 'bbox'):
result.bbox = ntyp.Bbox.from_wkb(row.bbox)
await nres.add_result_details(conn, [result], details)
return result
GEOMETRY_TYPE_MAP = {
'POINT': 'ST_Point',
'MULTIPOINT': 'ST_MultiPoint',
'LINESTRING': 'ST_LineString',
'MULTILINESTRING': 'ST_MultiLineString',
'POLYGON': 'ST_Polygon',
'MULTIPOLYGON': 'ST_MultiPolygon',
'GEOMETRYCOLLECTION': 'ST_GeometryCollection'
}
| 9,900
|
Python
|
.py
| 199
| 40.025126
| 98
| 0.630777
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,727
|
version.py
|
osm-search_Nominatim/src/nominatim_api/version.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Version information for the Nominatim API.
"""
NOMINATIM_API_VERSION = '4.5.0'
| 295
|
Python
|
.py
| 10
| 28.4
| 58
| 0.746479
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,728
|
result_formatting.py
|
osm-search_Nominatim/src/nominatim_api/result_formatting.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper classes and functions for formatting results into API responses.
"""
from typing import Type, TypeVar, Dict, List, Callable, Any, Mapping, Optional, cast
from collections import defaultdict
from pathlib import Path
import importlib
from .server.content_types import CONTENT_JSON
T = TypeVar('T') # pylint: disable=invalid-name
FormatFunc = Callable[[T, Mapping[str, Any]], str]
ErrorFormatFunc = Callable[[str, str, int], str]
class FormatDispatcher:
""" Container for formatting functions for results.
Functions can conveniently be added by using decorated functions.
"""
def __init__(self, content_types: Optional[Mapping[str, str]] = None) -> None:
self.error_handler: ErrorFormatFunc = lambda ct, msg, status: f"ERROR {status}: {msg}"
self.content_types: Dict[str, str] = {}
if content_types:
self.content_types.update(content_types)
self.format_functions: Dict[Type[Any], Dict[str, FormatFunc[Any]]] = defaultdict(dict)
def format_func(self, result_class: Type[T],
fmt: str) -> Callable[[FormatFunc[T]], FormatFunc[T]]:
""" Decorator for a function that formats a given type of result into the
selected format.
"""
def decorator(func: FormatFunc[T]) -> FormatFunc[T]:
self.format_functions[result_class][fmt] = func
return func
return decorator
def error_format_func(self, func: ErrorFormatFunc) -> ErrorFormatFunc:
""" Decorator for a function that formats error messges.
There is only one error formatter per dispatcher. Using
the decorator repeatedly will overwrite previous functions.
"""
self.error_handler = func
return func
def list_formats(self, result_type: Type[Any]) -> List[str]:
""" Return a list of formats supported by this formatter.
"""
return list(self.format_functions[result_type].keys())
def supports_format(self, result_type: Type[Any], fmt: str) -> bool:
""" Check if the given format is supported by this formatter.
"""
return fmt in self.format_functions[result_type]
def format_result(self, result: Any, fmt: str, options: Mapping[str, Any]) -> str:
""" Convert the given result into a string using the given format.
The format is expected to be in the list returned by
`list_formats()`.
"""
return self.format_functions[type(result)][fmt](result, options)
def format_error(self, content_type: str, msg: str, status: int) -> str:
""" Convert the given error message into a response string
taking the requested content_type into account.
Change the format using the error_format_func decorator.
"""
return self.error_handler(content_type, msg, status)
def set_content_type(self, fmt: str, content_type: str) -> None:
""" Set the content type for the given format. This is the string
that will be returned in the Content-Type header of the HTML
response, when the given format is choosen.
"""
self.content_types[fmt] = content_type
def get_content_type(self, fmt: str) -> str:
""" Return the content type for the given format.
If no explicit content type has been defined, then
JSON format is assumed.
"""
return self.content_types.get(fmt, CONTENT_JSON)
def load_format_dispatcher(api_name: str, project_dir: Optional[Path]) -> FormatDispatcher:
""" Load the dispatcher for the given API.
The function first tries to find a module api/<api_name>/format.py
in the project directory. This file must export a single variable
`dispatcher`.
If the function does not exist, the default formatter is loaded.
"""
if project_dir is not None:
priv_module = project_dir / 'api' / api_name / 'format.py'
if priv_module.is_file():
spec = importlib.util.spec_from_file_location(f'api.{api_name},format',
str(priv_module))
if spec:
module = importlib.util.module_from_spec(spec)
# Do not add to global modules because there is no standard
# module name that Python can resolve.
assert spec.loader is not None
spec.loader.exec_module(module)
return cast(FormatDispatcher, module.dispatch)
return cast(FormatDispatcher,
importlib.import_module(f'nominatim_api.{api_name}.format').dispatch)
| 4,894
|
Python
|
.py
| 96
| 41.96875
| 94
| 0.652612
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,729
|
types.py
|
osm-search_Nominatim/src/nominatim_api/types.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Complex datatypes used by the Nominatim API.
"""
from typing import Optional, Union, Tuple, NamedTuple, TypeVar, Type, Dict, \
Any, List, Sequence
from collections import abc
import dataclasses
import enum
import math
from struct import unpack
from binascii import unhexlify
from .errors import UsageError
from .localization import Locales
# pylint: disable=no-member,too-many-boolean-expressions,too-many-instance-attributes
@dataclasses.dataclass
class PlaceID:
""" Reference a place by Nominatim's internal ID.
A PlaceID may reference place from the main table placex, from
the interpolation tables or the postcode tables. Place IDs are not
stable between installations. You may use this type theefore only
with place IDs obtained from the same database.
"""
place_id: int
"""
The internal ID of the place to reference.
"""
@dataclasses.dataclass
class OsmID:
""" Reference a place by its OSM ID and potentially the basic category.
The OSM ID may refer to places in the main table placex and OSM
interpolation lines.
"""
osm_type: str
""" OSM type of the object. Must be one of `N`(node), `W`(way) or
`R`(relation).
"""
osm_id: int
""" The OSM ID of the object.
"""
osm_class: Optional[str] = None
""" The same OSM object may appear multiple times in the database under
different categories. The optional class parameter allows to distinguish
the different categories and corresponds to the key part of the category.
If there are multiple objects in the database and `osm_class` is
left out, then one of the objects is returned at random.
"""
def __post_init__(self) -> None:
if self.osm_type not in ('N', 'W', 'R'):
raise ValueError(f"Illegal OSM type '{self.osm_type}'. Must be one of N, W, R.")
PlaceRef = Union[PlaceID, OsmID]
class Point(NamedTuple):
""" A geographic point in WGS84 projection.
"""
x: float
y: float
@property
def lat(self) -> float:
""" Return the latitude of the point.
"""
return self.y
@property
def lon(self) -> float:
""" Return the longitude of the point.
"""
return self.x
def to_geojson(self) -> str:
""" Return the point in GeoJSON format.
"""
return f'{{"type": "Point","coordinates": [{self.x}, {self.y}]}}'
@staticmethod
def from_wkb(wkb: Union[str, bytes]) -> 'Point':
""" Create a point from EWKB as returned from the database.
"""
if isinstance(wkb, str):
wkb = unhexlify(wkb)
if len(wkb) != 25:
raise ValueError(f"Point wkb has unexpected length {len(wkb)}")
if wkb[0] == 0:
gtype, srid, x, y = unpack('>iidd', wkb[1:])
elif wkb[0] == 1:
gtype, srid, x, y = unpack('<iidd', wkb[1:])
else:
raise ValueError("WKB has unknown endian value.")
if gtype != 0x20000001:
raise ValueError("WKB must be a point geometry.")
if srid != 4326:
raise ValueError("Only WGS84 WKB supported.")
return Point(x, y)
@staticmethod
def from_param(inp: Any) -> 'Point':
""" Create a point from an input parameter. The parameter
may be given as a point, a string or a sequence of
strings or floats. Raises a UsageError if the format is
not correct.
"""
if isinstance(inp, Point):
return inp
seq: Sequence[str]
if isinstance(inp, str):
seq = inp.split(',')
elif isinstance(inp, abc.Sequence):
seq = inp
if len(seq) != 2:
raise UsageError('Point parameter needs 2 coordinates.')
try:
x, y = filter(math.isfinite, map(float, seq))
except ValueError as exc:
raise UsageError('Point parameter needs to be numbers.') from exc
if x < -180.0 or x > 180.0 or y < -90.0 or y > 90.0:
raise UsageError('Point coordinates invalid.')
return Point(x, y)
def to_wkt(self) -> str:
""" Return the WKT representation of the point.
"""
return f'POINT({self.x} {self.y})'
AnyPoint = Union[Point, Tuple[float, float]]
WKB_BBOX_HEADER_LE = b'\x01\x03\x00\x00\x20\xE6\x10\x00\x00\x01\x00\x00\x00\x05\x00\x00\x00'
WKB_BBOX_HEADER_BE = b'\x00\x20\x00\x00\x03\x00\x00\x10\xe6\x00\x00\x00\x01\x00\x00\x00\x05'
class Bbox:
""" A bounding box in WGS84 projection.
The coordinates are available as an array in the 'coord'
property in the order (minx, miny, maxx, maxy).
"""
def __init__(self, minx: float, miny: float, maxx: float, maxy: float) -> None:
""" Create a new bounding box with the given coordinates in WGS84
projection.
"""
self.coords = (minx, miny, maxx, maxy)
@property
def minlat(self) -> float:
""" Southern-most latitude, corresponding to the minimum y coordinate.
"""
return self.coords[1]
@property
def maxlat(self) -> float:
""" Northern-most latitude, corresponding to the maximum y coordinate.
"""
return self.coords[3]
@property
def minlon(self) -> float:
""" Western-most longitude, corresponding to the minimum x coordinate.
"""
return self.coords[0]
@property
def maxlon(self) -> float:
""" Eastern-most longitude, corresponding to the maximum x coordinate.
"""
return self.coords[2]
@property
def area(self) -> float:
""" Return the area of the box in WGS84.
"""
return (self.coords[2] - self.coords[0]) * (self.coords[3] - self.coords[1])
def contains(self, pt: Point) -> bool:
""" Check if the point is inside or on the boundary of the box.
"""
return self.coords[0] <= pt[0] and self.coords[1] <= pt[1]\
and self.coords[2] >= pt[0] and self.coords[3] >= pt[1]
def to_wkt(self) -> str:
""" Return the WKT representation of the Bbox. This
is a simple polygon with four points.
"""
return 'POLYGON(({0} {1},{0} {3},{2} {3},{2} {1},{0} {1}))'\
.format(*self.coords) # pylint: disable=consider-using-f-string
@staticmethod
def from_wkb(wkb: Union[None, str, bytes]) -> 'Optional[Bbox]':
""" Create a Bbox from a bounding box polygon as returned by
the database. Returns `None` if the input value is None.
"""
if wkb is None:
return None
if isinstance(wkb, str):
wkb = unhexlify(wkb)
if len(wkb) != 97:
raise ValueError("WKB must be a bounding box polygon")
if wkb.startswith(WKB_BBOX_HEADER_LE):
x1, y1, _, _, x2, y2 = unpack('<dddddd', wkb[17:65])
elif wkb.startswith(WKB_BBOX_HEADER_BE):
x1, y1, _, _, x2, y2 = unpack('>dddddd', wkb[17:65])
else:
raise ValueError("WKB has wrong header")
return Bbox(min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2))
@staticmethod
def from_point(pt: Point, buffer: float) -> 'Bbox':
""" Return a Bbox around the point with the buffer added to all sides.
"""
return Bbox(pt[0] - buffer, pt[1] - buffer,
pt[0] + buffer, pt[1] + buffer)
@staticmethod
def from_param(inp: Any) -> 'Bbox':
""" Return a Bbox from an input parameter. The box may be
given as a Bbox, a string or a list or strings or integer.
Raises a UsageError if the format is incorrect.
"""
if isinstance(inp, Bbox):
return inp
seq: Sequence[str]
if isinstance(inp, str):
seq = inp.split(',')
elif isinstance(inp, abc.Sequence):
seq = inp
if len(seq) != 4:
raise UsageError('Bounding box parameter needs 4 coordinates.')
try:
x1, y1, x2, y2 = filter(math.isfinite, map(float, seq))
except ValueError as exc:
raise UsageError('Bounding box parameter needs to be numbers.') from exc
x1 = min(180, max(-180, x1))
x2 = min(180, max(-180, x2))
y1 = min(90, max(-90, y1))
y2 = min(90, max(-90, y2))
if x1 == x2 or y1 == y2:
raise UsageError('Bounding box with invalid parameters.')
return Bbox(min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2))
class GeometryFormat(enum.Flag):
""" All search functions support returning the full geometry of a place in
various formats. The internal geometry is converted by PostGIS to
the desired format and then returned as a string. It is possible to
request multiple formats at the same time.
"""
NONE = 0
""" No geometry requested. Alias for a empty flag.
"""
GEOJSON = enum.auto()
"""
[GeoJSON](https://geojson.org/) format
"""
KML = enum.auto()
"""
[KML](https://en.wikipedia.org/wiki/Keyhole_Markup_Language) format
"""
SVG = enum.auto()
"""
[SVG](http://www.w3.org/TR/SVG/paths.html) format
"""
TEXT = enum.auto()
"""
[WKT](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry) format
"""
class DataLayer(enum.Flag):
""" The `DataLayer` flag type defines the layers that can be selected
for reverse and forward search.
"""
ADDRESS = enum.auto()
""" The address layer contains all places relevant for addresses:
fully qualified addresses with a house number (or a house name equivalent,
for some addresses) and places that can be part of an address like
roads, cities, states.
"""
POI = enum.auto()
""" Layer for points of interest like shops, restaurants but also
recycling bins or postboxes.
"""
RAILWAY = enum.auto()
""" Layer with railway features including tracks and other infrastructure.
Note that in Nominatim's standard configuration, only very few railway
features are imported into the database. Thus a custom configuration
is required to make full use of this layer.
"""
NATURAL = enum.auto()
""" Layer with natural features like rivers, lakes and mountains.
"""
MANMADE = enum.auto()
""" Layer with other human-made features and boundaries. This layer is
the catch-all and includes all features not covered by the other
layers. A typical example for this layer are national park boundaries.
"""
def format_country(cc: Any) -> List[str]:
""" Extract a list of country codes from the input which may be either
a string or list of strings. Filters out all values that are not
a two-letter string.
"""
clist: Sequence[str]
if isinstance(cc, str):
clist = cc.split(',')
elif isinstance(cc, abc.Sequence):
clist = cc
else:
raise UsageError("Parameter 'country' needs to be a comma-separated list "
"or a Python list of strings.")
return [cc.lower() for cc in clist if isinstance(cc, str) and len(cc) == 2]
def format_excluded(ids: Any) -> List[int]:
""" Extract a list of place ids from the input which may be either
a string or a list of strings or ints. Ignores empty value but
throws a UserError on anything that cannot be converted to int.
"""
plist: Sequence[str]
if isinstance(ids, str):
plist = [s.strip() for s in ids.split(',')]
elif isinstance(ids, abc.Sequence):
plist = ids
else:
raise UsageError("Parameter 'excluded' needs to be a comma-separated list "
"or a Python list of numbers.")
if not all(isinstance(i, int) or
(isinstance(i, str) and (not i or i.isdigit())) for i in plist):
raise UsageError("Parameter 'excluded' only takes place IDs.")
return [int(id) for id in plist if id] or [0]
def format_categories(categories: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
""" Extract a list of categories. Currently a noop.
"""
return categories
TParam = TypeVar('TParam', bound='LookupDetails') # pylint: disable=invalid-name
@dataclasses.dataclass
class LookupDetails:
""" Collection of parameters that define which kind of details are
returned with a lookup or details result.
"""
geometry_output: GeometryFormat = GeometryFormat.NONE
""" Add the full geometry of the place to the result. Multiple
formats may be selected. Note that geometries can become quite large.
"""
address_details: bool = False
""" Get detailed information on the places that make up the address
for the result.
"""
linked_places: bool = False
""" Get detailed information on the places that link to the result.
"""
parented_places: bool = False
""" Get detailed information on all places that this place is a parent
for, i.e. all places for which it provides the address details.
Only POI places can have parents.
"""
keywords: bool = False
""" Add information about the search terms used for this place.
"""
geometry_simplification: float = 0.0
""" Simplification factor for a geometry in degrees WGS. A factor of
0.0 means the original geometry is kept. The higher the value, the
more the geometry gets simplified.
"""
locales: Locales = Locales()
""" Preferred languages for localization of results.
"""
@classmethod
def from_kwargs(cls: Type[TParam], kwargs: Dict[str, Any]) -> TParam:
""" Load the data fields of the class from a dictionary.
Unknown entries in the dictionary are ignored, missing ones
get the default setting.
The function supports type checking and throws a UsageError
when the value does not fit.
"""
def _check_field(v: Any, field: 'dataclasses.Field[Any]') -> Any:
if v is None:
return field.default_factory() \
if field.default_factory != dataclasses.MISSING \
else field.default
if field.metadata and 'transform' in field.metadata:
return field.metadata['transform'](v)
if not isinstance(v, field.type): # type: ignore[arg-type]
raise UsageError(f"Parameter '{field.name}' needs to be of {field.type!s}.")
return v
return cls(**{f.name: _check_field(kwargs[f.name], f)
for f in dataclasses.fields(cls) if f.name in kwargs})
@dataclasses.dataclass
class ReverseDetails(LookupDetails):
""" Collection of parameters for the reverse call.
"""
max_rank: int = dataclasses.field(default=30,
metadata={'transform': lambda v: max(0, min(v, 30))}
)
""" Highest address rank to return.
"""
layers: DataLayer = DataLayer.ADDRESS | DataLayer.POI
""" Filter which kind of data to include.
"""
@dataclasses.dataclass
class SearchDetails(LookupDetails):
""" Collection of parameters for the search call.
"""
max_results: int = 10
""" Maximum number of results to be returned. The actual number of results
may be less.
"""
min_rank: int = dataclasses.field(default=0,
metadata={'transform': lambda v: max(0, min(v, 30))}
)
""" Lowest address rank to return.
"""
max_rank: int = dataclasses.field(default=30,
metadata={'transform': lambda v: max(0, min(v, 30))}
)
""" Highest address rank to return.
"""
layers: Optional[DataLayer] = dataclasses.field(default=None,
metadata={'transform': lambda r : r})
""" Filter which kind of data to include. When 'None' (the default) then
filtering by layers is disabled.
"""
countries: List[str] = dataclasses.field(default_factory=list,
metadata={'transform': format_country})
""" Restrict search results to the given countries. An empty list (the
default) will disable this filter.
"""
excluded: List[int] = dataclasses.field(default_factory=list,
metadata={'transform': format_excluded})
""" List of OSM objects to exclude from the results. Currently only
works when the internal place ID is given.
An empty list (the default) will disable this filter.
"""
viewbox: Optional[Bbox] = dataclasses.field(default=None,
metadata={'transform': Bbox.from_param})
""" Focus the search on a given map area.
"""
bounded_viewbox: bool = False
""" Use 'viewbox' as a filter and restrict results to places within the
given area.
"""
near: Optional[Point] = dataclasses.field(default=None,
metadata={'transform': Point.from_param})
""" Order results by distance to the given point.
"""
near_radius: Optional[float] = dataclasses.field(default=None,
metadata={'transform': lambda r : r})
""" Use near point as a filter and drop results outside the given
radius. Radius is given in degrees WSG84.
"""
categories: List[Tuple[str, str]] = dataclasses.field(default_factory=list,
metadata={'transform': format_categories})
""" Restrict search to places with one of the given class/type categories.
An empty list (the default) will disable this filter.
"""
viewbox_x2: Optional[Bbox] = None
def __post_init__(self) -> None:
if self.viewbox is not None:
xext = (self.viewbox.maxlon - self.viewbox.minlon)/2
yext = (self.viewbox.maxlat - self.viewbox.minlat)/2
self.viewbox_x2 = Bbox(self.viewbox.minlon - xext, self.viewbox.minlat - yext,
self.viewbox.maxlon + xext, self.viewbox.maxlat + yext)
def restrict_min_max_rank(self, new_min: int, new_max: int) -> None:
""" Change the min_rank and max_rank fields to respect the
given boundaries.
"""
assert new_min <= new_max
self.min_rank = max(self.min_rank, new_min)
self.max_rank = min(self.max_rank, new_max)
def is_impossible(self) -> bool:
""" Check if the parameter configuration is contradictionary and
cannot yield any results.
"""
return (self.min_rank > self.max_rank
or (self.bounded_viewbox
and self.viewbox is not None and self.near is not None
and self.viewbox.contains(self.near))
or (self.layers is not None and not self.layers)
or (self.max_rank <= 4 and
self.layers is not None and not self.layers & DataLayer.ADDRESS))
def layer_enabled(self, layer: DataLayer) -> bool:
""" Check if the given layer has been chosen. Also returns
true when layer restriction has been disabled completely.
"""
return self.layers is None or bool(self.layers & layer)
| 19,833
|
Python
|
.py
| 459
| 34.359477
| 100
| 0.61116
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,730
|
typing.py
|
osm-search_Nominatim/src/nominatim_api/typing.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Type definitions for typing annotations.
Complex type definitions are moved here, to keep the source files readable.
"""
from typing import Union, TYPE_CHECKING
# pylint: disable=missing-class-docstring,useless-import-alias
# SQLAlchemy introduced generic types in version 2.0 making typing
# incompatible with older versions. Add wrappers here so we don't have
# to litter the code with bare-string types.
if TYPE_CHECKING:
from typing import Any
import sqlalchemy as sa
import os
from typing_extensions import (TypeAlias as TypeAlias)
else:
TypeAlias = str
StrPath = Union[str, 'os.PathLike[str]']
SaLambdaSelect: TypeAlias = 'Union[sa.Select[Any], sa.StatementLambdaElement]'
SaSelect: TypeAlias = 'sa.Select[Any]'
SaScalarSelect: TypeAlias = 'sa.ScalarSelect[Any]'
SaRow: TypeAlias = 'sa.Row[Any]'
SaColumn: TypeAlias = 'sa.ColumnElement[Any]'
SaExpression: TypeAlias = 'sa.ColumnElement[bool]'
SaLabel: TypeAlias = 'sa.Label[Any]'
SaFromClause: TypeAlias = 'sa.FromClause'
SaSelectable: TypeAlias = 'sa.Selectable'
SaBind: TypeAlias = 'sa.BindParameter[Any]'
SaDialect: TypeAlias = 'sa.Dialect'
| 1,341
|
Python
|
.py
| 34
| 37.676471
| 78
| 0.777863
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,731
|
server_glue.py
|
osm-search_Nominatim/src/nominatim_api/v1/server_glue.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Generic part of the server implementation of the v1 API.
Combine with the scaffolding provided for the various Python ASGI frameworks.
"""
from typing import Optional, Any, Type, Dict, cast
from functools import reduce
import dataclasses
from urllib.parse import urlencode
import sqlalchemy as sa
from ..errors import UsageError
from .. import logging as loglib
from ..core import NominatimAPIAsync
from .format import RawDataList
from ..types import DataLayer, GeometryFormat, PlaceRef, PlaceID, OsmID, Point
from ..status import StatusResult
from ..results import DetailedResult, ReverseResults, SearchResult, SearchResults
from ..localization import Locales
from . import helpers
from ..server import content_types as ct
from ..server.asgi_adaptor import ASGIAdaptor
def build_response(adaptor: ASGIAdaptor, output: str, status: int = 200,
num_results: int = 0) -> Any:
""" Create a response from the given output. Wraps a JSONP function
around the response, if necessary.
"""
if adaptor.content_type == ct.CONTENT_JSON and status == 200:
jsonp = adaptor.get('json_callback')
if jsonp is not None:
if any(not part.isidentifier() for part in jsonp.split('.')):
adaptor.raise_error('Invalid json_callback value')
output = f"{jsonp}({output})"
adaptor.content_type = 'application/javascript; charset=utf-8'
return adaptor.create_response(status, output, num_results)
def get_accepted_languages(adaptor: ASGIAdaptor) -> str:
""" Return the accepted languages.
"""
return adaptor.get('accept-language')\
or adaptor.get_header('accept-language')\
or adaptor.config().DEFAULT_LANGUAGE
def setup_debugging(adaptor: ASGIAdaptor) -> bool:
""" Set up collection of debug information if requested.
Return True when debugging was requested.
"""
if adaptor.get_bool('debug', False):
loglib.set_log_output('html')
adaptor.content_type = ct.CONTENT_HTML
return True
return False
def get_layers(adaptor: ASGIAdaptor) -> Optional[DataLayer]:
""" Return a parsed version of the layer parameter.
"""
param = adaptor.get('layer', None)
if param is None:
return None
return cast(DataLayer,
reduce(DataLayer.__or__,
(getattr(DataLayer, s.upper()) for s in param.split(','))))
def parse_format(adaptor: ASGIAdaptor, result_type: Type[Any], default: str) -> str:
""" Get and check the 'format' parameter and prepare the formatter.
`result_type` is the type of result to be returned by the function
and `default` the format value to assume when no parameter is present.
"""
fmt = adaptor.get('format', default=default)
assert fmt is not None
formatting = adaptor.formatting()
if not formatting.supports_format(result_type, fmt):
adaptor.raise_error("Parameter 'format' must be one of: " +
', '.join(formatting.list_formats(result_type)))
adaptor.content_type = formatting.get_content_type(fmt)
return fmt
def parse_geometry_details(adaptor: ASGIAdaptor, fmt: str) -> Dict[str, Any]:
""" Create details structure from the supplied geometry parameters.
"""
numgeoms = 0
output = GeometryFormat.NONE
if adaptor.get_bool('polygon_geojson', False):
output |= GeometryFormat.GEOJSON
numgeoms += 1
if fmt not in ('geojson', 'geocodejson'):
if adaptor.get_bool('polygon_text', False):
output |= GeometryFormat.TEXT
numgeoms += 1
if adaptor.get_bool('polygon_kml', False):
output |= GeometryFormat.KML
numgeoms += 1
if adaptor.get_bool('polygon_svg', False):
output |= GeometryFormat.SVG
numgeoms += 1
if numgeoms > adaptor.config().get_int('POLYGON_OUTPUT_MAX_TYPES'):
adaptor.raise_error('Too many polygon output options selected.')
return {'address_details': True,
'geometry_simplification': adaptor.get_float('polygon_threshold', 0.0),
'geometry_output': output
}
async def status_endpoint(api: NominatimAPIAsync, params: ASGIAdaptor) -> Any:
""" Server glue for /status endpoint. See API docs for details.
"""
result = await api.status()
fmt = parse_format(params, StatusResult, 'text')
if fmt == 'text' and result.status:
status_code = 500
else:
status_code = 200
return build_response(params, params.formatting().format_result(result, fmt, {}),
status=status_code)
async def details_endpoint(api: NominatimAPIAsync, params: ASGIAdaptor) -> Any:
""" Server glue for /details endpoint. See API docs for details.
"""
fmt = parse_format(params, DetailedResult, 'json')
place_id = params.get_int('place_id', 0)
place: PlaceRef
if place_id:
place = PlaceID(place_id)
else:
osmtype = params.get('osmtype')
if osmtype is None:
params.raise_error("Missing ID parameter 'place_id' or 'osmtype'.")
place = OsmID(osmtype, params.get_int('osmid'), params.get('class'))
debug = setup_debugging(params)
locales = Locales.from_accept_languages(get_accepted_languages(params))
result = await api.details(place,
address_details=params.get_bool('addressdetails', False),
linked_places=params.get_bool('linkedplaces', True),
parented_places=params.get_bool('hierarchy', False),
keywords=params.get_bool('keywords', False),
geometry_output = GeometryFormat.GEOJSON
if params.get_bool('polygon_geojson', False)
else GeometryFormat.NONE,
locales=locales
)
if debug:
return build_response(params, loglib.get_and_disable())
if result is None:
params.raise_error('No place with that OSM ID found.', status=404)
output = params.formatting().format_result(result, fmt,
{'locales': locales,
'group_hierarchy': params.get_bool('group_hierarchy', False),
'icon_base_url': params.config().MAPICON_URL})
return build_response(params, output, num_results=1)
async def reverse_endpoint(api: NominatimAPIAsync, params: ASGIAdaptor) -> Any:
""" Server glue for /reverse endpoint. See API docs for details.
"""
fmt = parse_format(params, ReverseResults, 'xml')
debug = setup_debugging(params)
coord = Point(params.get_float('lon'), params.get_float('lat'))
details = parse_geometry_details(params, fmt)
details['max_rank'] = helpers.zoom_to_rank(params.get_int('zoom', 18))
details['layers'] = get_layers(params)
details['locales'] = Locales.from_accept_languages(get_accepted_languages(params))
result = await api.reverse(coord, **details)
if debug:
return build_response(params, loglib.get_and_disable(), num_results=1 if result else 0)
if fmt == 'xml':
queryparts = {'lat': str(coord.lat), 'lon': str(coord.lon), 'format': 'xml'}
zoom = params.get('zoom', None)
if zoom:
queryparts['zoom'] = zoom
query = urlencode(queryparts)
else:
query = ''
fmt_options = {'query': query,
'extratags': params.get_bool('extratags', False),
'namedetails': params.get_bool('namedetails', False),
'addressdetails': params.get_bool('addressdetails', True)}
output = params.formatting().format_result(ReverseResults([result] if result else []),
fmt, fmt_options)
return build_response(params, output, num_results=1 if result else 0)
async def lookup_endpoint(api: NominatimAPIAsync, params: ASGIAdaptor) -> Any:
""" Server glue for /lookup endpoint. See API docs for details.
"""
fmt = parse_format(params, SearchResults, 'xml')
debug = setup_debugging(params)
details = parse_geometry_details(params, fmt)
details['locales'] = Locales.from_accept_languages(get_accepted_languages(params))
places = []
for oid in (params.get('osm_ids') or '').split(','):
oid = oid.strip()
if len(oid) > 1 and oid[0] in 'RNWrnw' and oid[1:].isdigit():
places.append(OsmID(oid[0].upper(), int(oid[1:])))
if len(places) > params.config().get_int('LOOKUP_MAX_COUNT'):
params.raise_error('Too many object IDs.')
if places:
results = await api.lookup(places, **details)
else:
results = SearchResults()
if debug:
return build_response(params, loglib.get_and_disable(), num_results=len(results))
fmt_options = {'extratags': params.get_bool('extratags', False),
'namedetails': params.get_bool('namedetails', False),
'addressdetails': params.get_bool('addressdetails', True)}
output = params.formatting().format_result(results, fmt, fmt_options)
return build_response(params, output, num_results=len(results))
async def _unstructured_search(query: str, api: NominatimAPIAsync,
details: Dict[str, Any]) -> SearchResults:
if not query:
return SearchResults()
# Extract special format for coordinates from query.
query, x, y = helpers.extract_coords_from_query(query)
if x is not None:
assert y is not None
details['near'] = Point(x, y)
details['near_radius'] = 0.1
# If no query is left, revert to reverse search.
if x is not None and not query:
result = await api.reverse(details['near'], **details)
if not result:
return SearchResults()
return SearchResults(
[SearchResult(**{f.name: getattr(result, f.name)
for f in dataclasses.fields(SearchResult)
if hasattr(result, f.name)})])
query, cls, typ = helpers.extract_category_from_query(query)
if cls is not None:
assert typ is not None
return await api.search_category([(cls, typ)], near_query=query, **details)
return await api.search(query, **details)
async def search_endpoint(api: NominatimAPIAsync, params: ASGIAdaptor) -> Any:
""" Server glue for /search endpoint. See API docs for details.
"""
fmt = parse_format(params, SearchResults, 'jsonv2')
debug = setup_debugging(params)
details = parse_geometry_details(params, fmt)
details['countries'] = params.get('countrycodes', None)
details['excluded'] = params.get('exclude_place_ids', None)
details['viewbox'] = params.get('viewbox', None) or params.get('viewboxlbrt', None)
details['bounded_viewbox'] = params.get_bool('bounded', False)
details['dedupe'] = params.get_bool('dedupe', True)
max_results = max(1, min(50, params.get_int('limit', 10)))
details['max_results'] = max_results + min(10, max_results) \
if details['dedupe'] else max_results
details['min_rank'], details['max_rank'] = \
helpers.feature_type_to_rank(params.get('featureType', ''))
if params.get('featureType', None) is not None:
details['layers'] = DataLayer.ADDRESS
else:
details['layers'] = get_layers(params)
details['locales'] = Locales.from_accept_languages(get_accepted_languages(params))
# unstructured query parameters
query = params.get('q', None)
# structured query parameters
queryparts = {}
for key in ('amenity', 'street', 'city', 'county', 'state', 'postalcode', 'country'):
details[key] = params.get(key, None)
if details[key]:
queryparts[key] = details[key]
try:
if query is not None:
if queryparts:
params.raise_error("Structured query parameters"
"(amenity, street, city, county, state, postalcode, country)"
" cannot be used together with 'q' parameter.")
queryparts['q'] = query
results = await _unstructured_search(query, api, details)
else:
query = ', '.join(queryparts.values())
results = await api.search_address(**details)
except UsageError as err:
params.raise_error(str(err))
if details['dedupe'] and len(results) > 1:
results = helpers.deduplicate_results(results, max_results)
if debug:
return build_response(params, loglib.get_and_disable(), num_results=len(results))
if fmt == 'xml':
helpers.extend_query_parts(queryparts, details,
params.get('featureType', ''),
params.get_bool('namedetails', False),
params.get_bool('extratags', False),
(str(r.place_id) for r in results if r.place_id))
queryparts['format'] = fmt
moreurl = params.base_uri() + '/search?' + urlencode(queryparts)
else:
moreurl = ''
fmt_options = {'query': query, 'more_url': moreurl,
'exclude_place_ids': queryparts.get('exclude_place_ids'),
'viewbox': queryparts.get('viewbox'),
'extratags': params.get_bool('extratags', False),
'namedetails': params.get_bool('namedetails', False),
'addressdetails': params.get_bool('addressdetails', False)}
output = params.formatting().format_result(results, fmt, fmt_options)
return build_response(params, output, num_results=len(results))
async def deletable_endpoint(api: NominatimAPIAsync, params: ASGIAdaptor) -> Any:
""" Server glue for /deletable endpoint.
This is a special endpoint that shows polygons that have been
deleted or are broken in the OSM data but are kept in the
Nominatim database to minimize disruption.
"""
fmt = parse_format(params, RawDataList, 'json')
async with api.begin() as conn:
sql = sa.text(""" SELECT p.place_id, country_code,
name->'name' as name, i.*
FROM placex p, import_polygon_delete i
WHERE p.osm_id = i.osm_id AND p.osm_type = i.osm_type
AND p.class = i.class AND p.type = i.type
""")
results = RawDataList(r._asdict() for r in await conn.execute(sql))
return build_response(params, params.formatting().format_result(results, fmt, {}))
async def polygons_endpoint(api: NominatimAPIAsync, params: ASGIAdaptor) -> Any:
""" Server glue for /polygons endpoint.
This is a special endpoint that shows polygons that have changed
their size but are kept in the Nominatim database with their
old area to minimize disruption.
"""
fmt = parse_format(params, RawDataList, 'json')
sql_params: Dict[str, Any] = {
'days': params.get_int('days', -1),
'cls': params.get('class')
}
reduced = params.get_bool('reduced', False)
async with api.begin() as conn:
sql = sa.select(sa.text("""osm_type, osm_id, class, type,
name->'name' as name,
country_code, errormessage, updated"""))\
.select_from(sa.text('import_polygon_error'))
if sql_params['days'] > 0:
sql = sql.where(sa.text("updated > 'now'::timestamp - make_interval(days => :days)"))
if reduced:
sql = sql.where(sa.text("errormessage like 'Area reduced%'"))
if sql_params['cls'] is not None:
sql = sql.where(sa.text("class = :cls"))
sql = sql.order_by(sa.literal_column('updated').desc()).limit(1000)
results = RawDataList(r._asdict() for r in await conn.execute(sql, sql_params))
return build_response(params, params.formatting().format_result(results, fmt, {}))
ROUTES = [
('status', status_endpoint),
('details', details_endpoint),
('reverse', reverse_endpoint),
('lookup', lookup_endpoint),
('search', search_endpoint),
('deletable', deletable_endpoint),
('polygons', polygons_endpoint),
]
| 16,723
|
Python
|
.py
| 337
| 39.94362
| 97
| 0.625698
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,732
|
classtypes.py
|
osm-search_Nominatim/src/nominatim_api/v1/classtypes.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Hard-coded information about tag categories.
These tables have been copied verbatim from the old PHP code. For future
version a more flexible formatting is required.
"""
from typing import Tuple, Optional, Mapping, Union
from ..results import ReverseResult, SearchResult
from ..types import Bbox
def get_label_tag(category: Tuple[str, str], extratags: Optional[Mapping[str, str]],
rank: int, country: Optional[str]) -> str:
""" Create a label tag for the given place that can be used as an XML name.
"""
if rank < 26 and extratags and 'place' in extratags:
label = extratags['place']
elif rank < 26 and extratags and 'linked_place' in extratags:
label = extratags['linked_place']
elif category == ('boundary', 'administrative'):
label = ADMIN_LABELS.get((country or '', int(rank/2)))\
or ADMIN_LABELS.get(('', int(rank/2)))\
or 'Administrative'
elif category[1] == 'postal_code':
label = 'postcode'
elif rank < 26:
label = category[1] if category[1] != 'yes' else category[0]
elif rank < 28:
label = 'road'
elif category[0] == 'place'\
and category[1] in ('house_number', 'house_name', 'country_code'):
label = category[1]
else:
label = category[0]
return label.lower().replace(' ', '_')
def bbox_from_result(result: Union[ReverseResult, SearchResult]) -> Bbox:
""" Compute a bounding box for the result. For ways and relations
a given boundingbox is used. For all other object, a box is computed
around the centroid according to dimensions derived from the
search rank.
"""
if result.category == ('place', 'postcode') and result.bbox is None:
return Bbox.from_point(result.centroid,
0.05 - 0.012 * (result.rank_search - 21))
if (result.osm_object and result.osm_object[0] == 'N') or result.bbox is None:
extent = NODE_EXTENT.get(result.category, 0.00005)
return Bbox.from_point(result.centroid, extent)
return result.bbox
# pylint: disable=line-too-long
OSM_ATTRIBUTION = 'Data © OpenStreetMap contributors, ODbL 1.0. http://osm.org/copyright'
OSM_TYPE_NAME = {
'N': 'node',
'W': 'way',
'R': 'relation'
}
ADMIN_LABELS = {
('', 1): 'Continent',
('', 2): 'Country',
('', 3): 'Region',
('', 4): 'State',
('', 5): 'State District',
('', 6): 'County',
('', 7): 'Municipality',
('', 8): 'City',
('', 9): 'City District',
('', 10): 'Suburb',
('', 11): 'Neighbourhood',
('', 12): 'City Block',
('no', 3): 'State',
('no', 4): 'County',
('se', 3): 'State',
('se', 4): 'County'
}
ICONS = {
('boundary', 'administrative'): 'poi_boundary_administrative',
('place', 'city'): 'poi_place_city',
('place', 'town'): 'poi_place_town',
('place', 'village'): 'poi_place_village',
('place', 'hamlet'): 'poi_place_village',
('place', 'suburb'): 'poi_place_village',
('place', 'locality'): 'poi_place_village',
('place', 'airport'): 'transport_airport2',
('aeroway', 'aerodrome'): 'transport_airport2',
('railway', 'station'): 'transport_train_station2',
('amenity', 'place_of_worship'): 'place_of_worship_unknown3',
('amenity', 'pub'): 'food_pub',
('amenity', 'bar'): 'food_bar',
('amenity', 'university'): 'education_university',
('tourism', 'museum'): 'tourist_museum',
('amenity', 'arts_centre'): 'tourist_art_gallery2',
('tourism', 'zoo'): 'tourist_zoo',
('tourism', 'theme_park'): 'poi_point_of_interest',
('tourism', 'attraction'): 'poi_point_of_interest',
('leisure', 'golf_course'): 'sport_golf',
('historic', 'castle'): 'tourist_castle',
('amenity', 'hospital'): 'health_hospital',
('amenity', 'school'): 'education_school',
('amenity', 'theatre'): 'tourist_theatre',
('amenity', 'library'): 'amenity_library',
('amenity', 'fire_station'): 'amenity_firestation3',
('amenity', 'police'): 'amenity_police2',
('amenity', 'bank'): 'money_bank2',
('amenity', 'post_office'): 'amenity_post_office',
('tourism', 'hotel'): 'accommodation_hotel2',
('amenity', 'cinema'): 'tourist_cinema',
('tourism', 'artwork'): 'tourist_art_gallery2',
('historic', 'archaeological_site'): 'tourist_archaeological2',
('amenity', 'doctors'): 'health_doctors',
('leisure', 'sports_centre'): 'sport_leisure_centre',
('leisure', 'swimming_pool'): 'sport_swimming_outdoor',
('shop', 'supermarket'): 'shopping_supermarket',
('shop', 'convenience'): 'shopping_convenience',
('amenity', 'restaurant'): 'food_restaurant',
('amenity', 'fast_food'): 'food_fastfood',
('amenity', 'cafe'): 'food_cafe',
('tourism', 'guest_house'): 'accommodation_bed_and_breakfast',
('amenity', 'pharmacy'): 'health_pharmacy_dispensing',
('amenity', 'fuel'): 'transport_fuel',
('natural', 'peak'): 'poi_peak',
('natural', 'wood'): 'landuse_coniferous_and_deciduous',
('shop', 'bicycle'): 'shopping_bicycle',
('shop', 'clothes'): 'shopping_clothes',
('shop', 'hairdresser'): 'shopping_hairdresser',
('shop', 'doityourself'): 'shopping_diy',
('shop', 'estate_agent'): 'shopping_estateagent2',
('shop', 'car'): 'shopping_car',
('shop', 'garden_centre'): 'shopping_garden_centre',
('shop', 'car_repair'): 'shopping_car_repair',
('shop', 'bakery'): 'shopping_bakery',
('shop', 'butcher'): 'shopping_butcher',
('shop', 'apparel'): 'shopping_clothes',
('shop', 'laundry'): 'shopping_laundrette',
('shop', 'beverages'): 'shopping_alcohol',
('shop', 'alcohol'): 'shopping_alcohol',
('shop', 'optician'): 'health_opticians',
('shop', 'chemist'): 'health_pharmacy',
('shop', 'gallery'): 'tourist_art_gallery2',
('shop', 'jewelry'): 'shopping_jewelry',
('tourism', 'information'): 'amenity_information',
('historic', 'ruins'): 'tourist_ruin',
('amenity', 'college'): 'education_school',
('historic', 'monument'): 'tourist_monument',
('historic', 'memorial'): 'tourist_monument',
('historic', 'mine'): 'poi_mine',
('tourism', 'caravan_site'): 'accommodation_caravan_park',
('amenity', 'bus_station'): 'transport_bus_station',
('amenity', 'atm'): 'money_atm2',
('tourism', 'viewpoint'): 'tourist_view_point',
('tourism', 'guesthouse'): 'accommodation_bed_and_breakfast',
('railway', 'tram'): 'transport_tram_stop',
('amenity', 'courthouse'): 'amenity_court',
('amenity', 'recycling'): 'amenity_recycling',
('amenity', 'dentist'): 'health_dentist',
('natural', 'beach'): 'tourist_beach',
('railway', 'tram_stop'): 'transport_tram_stop',
('amenity', 'prison'): 'amenity_prison',
('highway', 'bus_stop'): 'transport_bus_stop2'
}
NODE_EXTENT = {
('place', 'continent'): 25,
('place', 'country'): 7,
('place', 'state'): 2.6,
('place', 'province'): 2.6,
('place', 'region'): 1.0,
('place', 'county'): 0.7,
('place', 'city'): 0.16,
('place', 'municipality'): 0.16,
('place', 'island'): 0.32,
('place', 'postcode'): 0.16,
('place', 'town'): 0.04,
('place', 'village'): 0.02,
('place', 'hamlet'): 0.02,
('place', 'district'): 0.02,
('place', 'borough'): 0.02,
('place', 'suburb'): 0.02,
('place', 'locality'): 0.01,
('place', 'neighbourhood'): 0.01,
('place', 'quarter'): 0.01,
('place', 'city_block'): 0.01,
('landuse', 'farm'): 0.01,
('place', 'farm'): 0.01,
('place', 'airport'): 0.015,
('aeroway', 'aerodrome'): 0.015,
('railway', 'station'): 0.005
}
| 7,840
|
Python
|
.py
| 188
| 36.659574
| 90
| 0.60558
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,733
|
format.py
|
osm-search_Nominatim/src/nominatim_api/v1/format.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Output formatters for API version v1.
"""
from typing import List, Dict, Mapping, Any
import collections
import datetime as dt
from ..utils.json_writer import JsonWriter
from ..status import StatusResult
from ..results import DetailedResult, ReverseResults, SearchResults, \
AddressLines, AddressLine
from ..localization import Locales
from ..result_formatting import FormatDispatcher
from .classtypes import ICONS
from . import format_json, format_xml
from .. import logging as loglib
from ..server import content_types as ct
class RawDataList(List[Dict[str, Any]]):
""" Data type for formatting raw data lists 'as is' in json.
"""
dispatch = FormatDispatcher({'text': ct.CONTENT_TEXT,
'xml': ct.CONTENT_XML,
'debug': ct.CONTENT_HTML})
@dispatch.error_format_func
def _format_error(content_type: str, msg: str, status: int) -> str:
if content_type == ct.CONTENT_XML:
return f"""<?xml version="1.0" encoding="UTF-8" ?>
<error>
<code>{status}</code>
<message>{msg}</message>
</error>
"""
if content_type == ct.CONTENT_JSON:
return f"""{{"error":{{"code":{status},"message":"{msg}"}}}}"""
if content_type == ct.CONTENT_HTML:
loglib.log().section('Execution error')
loglib.log().var_dump('Status', status)
loglib.log().var_dump('Message', msg)
return loglib.get_and_disable()
return f"ERROR {status}: {msg}"
@dispatch.format_func(StatusResult, 'text')
def _format_status_text(result: StatusResult, _: Mapping[str, Any]) -> str:
if result.status:
return f"ERROR: {result.message}"
return 'OK'
@dispatch.format_func(StatusResult, 'json')
def _format_status_json(result: StatusResult, _: Mapping[str, Any]) -> str:
out = JsonWriter()
out.start_object()\
.keyval('status', result.status)\
.keyval('message', result.message)\
.keyval_not_none('data_updated', result.data_updated,
lambda v: v.isoformat())\
.keyval('software_version', str(result.software_version))\
.keyval_not_none('database_version', result.database_version, str)\
.end_object()
return out()
def _add_address_row(writer: JsonWriter, row: AddressLine,
locales: Locales) -> None:
writer.start_object()\
.keyval('localname', locales.display_name(row.names))\
.keyval_not_none('place_id', row.place_id)
if row.osm_object is not None:
writer.keyval('osm_id', row.osm_object[1])\
.keyval('osm_type', row.osm_object[0])
if row.extratags:
writer.keyval_not_none('place_type', row.extratags.get('place_type'))
writer.keyval('class', row.category[0])\
.keyval('type', row.category[1])\
.keyval_not_none('admin_level', row.admin_level)\
.keyval('rank_address', row.rank_address)\
.keyval('distance', row.distance)\
.keyval('isaddress', row.isaddress)\
.end_object()
def _add_address_rows(writer: JsonWriter, section: str, rows: AddressLines,
locales: Locales) -> None:
writer.key(section).start_array()
for row in rows:
_add_address_row(writer, row, locales)
writer.next()
writer.end_array().next()
def _add_parent_rows_grouped(writer: JsonWriter, rows: AddressLines,
locales: Locales) -> None:
# group by category type
data = collections.defaultdict(list)
for row in rows:
sub = JsonWriter()
_add_address_row(sub, row, locales)
data[row.category[1]].append(sub())
writer.key('hierarchy').start_object()
for group, grouped in data.items():
writer.key(group).start_array()
grouped.sort() # sorts alphabetically by local name
for line in grouped:
writer.raw(line).next()
writer.end_array().next()
writer.end_object().next()
@dispatch.format_func(DetailedResult, 'json')
def _format_details_json(result: DetailedResult, options: Mapping[str, Any]) -> str:
locales = options.get('locales', Locales())
geom = result.geometry.get('geojson')
centroid = result.centroid.to_geojson()
out = JsonWriter()
out.start_object()\
.keyval_not_none('place_id', result.place_id)\
.keyval_not_none('parent_place_id', result.parent_place_id)
if result.osm_object is not None:
out.keyval('osm_type', result.osm_object[0])\
.keyval('osm_id', result.osm_object[1])
out.keyval('category', result.category[0])\
.keyval('type', result.category[1])\
.keyval('admin_level', result.admin_level)\
.keyval('localname', result.locale_name or '')\
.keyval('names', result.names or {})\
.keyval('addresstags', result.address or {})\
.keyval_not_none('housenumber', result.housenumber)\
.keyval_not_none('calculated_postcode', result.postcode)\
.keyval_not_none('country_code', result.country_code)\
.keyval_not_none('indexed_date', result.indexed_date, lambda v: v.isoformat())\
.keyval_not_none('importance', result.importance)\
.keyval('calculated_importance', result.calculated_importance())\
.keyval('extratags', result.extratags or {})\
.keyval_not_none('calculated_wikipedia', result.wikipedia)\
.keyval('rank_address', result.rank_address)\
.keyval('rank_search', result.rank_search)\
.keyval('isarea', 'Polygon' in (geom or result.geometry.get('type') or ''))\
.key('centroid').raw(centroid).next()\
.key('geometry').raw(geom or centroid).next()
if options.get('icon_base_url', None):
icon = ICONS.get(result.category)
if icon:
out.keyval('icon', f"{options['icon_base_url']}/{icon}.p.20.png")
if result.address_rows is not None:
_add_address_rows(out, 'address', result.address_rows, locales)
if result.linked_rows:
_add_address_rows(out, 'linked_places', result.linked_rows, locales)
if result.name_keywords is not None or result.address_keywords is not None:
out.key('keywords').start_object()
for sec, klist in (('name', result.name_keywords), ('address', result.address_keywords)):
out.key(sec).start_array()
for word in (klist or []):
out.start_object()\
.keyval('id', word.word_id)\
.keyval('token', word.word_token)\
.end_object().next()
out.end_array().next()
out.end_object().next()
if result.parented_rows is not None:
if options.get('group_hierarchy', False):
_add_parent_rows_grouped(out, result.parented_rows, locales)
else:
_add_address_rows(out, 'hierarchy', result.parented_rows, locales)
out.end_object()
return out()
@dispatch.format_func(ReverseResults, 'xml')
def _format_reverse_xml(results: ReverseResults, options: Mapping[str, Any]) -> str:
return format_xml.format_base_xml(results,
options, True, 'reversegeocode',
{'querystring': options.get('query', '')})
@dispatch.format_func(ReverseResults, 'geojson')
def _format_reverse_geojson(results: ReverseResults,
options: Mapping[str, Any]) -> str:
return format_json.format_base_geojson(results, options, True)
@dispatch.format_func(ReverseResults, 'geocodejson')
def _format_reverse_geocodejson(results: ReverseResults,
options: Mapping[str, Any]) -> str:
return format_json.format_base_geocodejson(results, options, True)
@dispatch.format_func(ReverseResults, 'json')
def _format_reverse_json(results: ReverseResults,
options: Mapping[str, Any]) -> str:
return format_json.format_base_json(results, options, True,
class_label='class')
@dispatch.format_func(ReverseResults, 'jsonv2')
def _format_reverse_jsonv2(results: ReverseResults,
options: Mapping[str, Any]) -> str:
return format_json.format_base_json(results, options, True,
class_label='category')
@dispatch.format_func(SearchResults, 'xml')
def _format_search_xml(results: SearchResults, options: Mapping[str, Any]) -> str:
extra = {'querystring': options.get('query', '')}
for attr in ('more_url', 'exclude_place_ids', 'viewbox'):
if options.get(attr):
extra[attr] = options[attr]
return format_xml.format_base_xml(results, options, False, 'searchresults',
extra)
@dispatch.format_func(SearchResults, 'geojson')
def _format_search_geojson(results: SearchResults,
options: Mapping[str, Any]) -> str:
return format_json.format_base_geojson(results, options, False)
@dispatch.format_func(SearchResults, 'geocodejson')
def _format_search_geocodejson(results: SearchResults,
options: Mapping[str, Any]) -> str:
return format_json.format_base_geocodejson(results, options, False)
@dispatch.format_func(SearchResults, 'json')
def _format_search_json(results: SearchResults,
options: Mapping[str, Any]) -> str:
return format_json.format_base_json(results, options, False,
class_label='class')
@dispatch.format_func(SearchResults, 'jsonv2')
def _format_search_jsonv2(results: SearchResults,
options: Mapping[str, Any]) -> str:
return format_json.format_base_json(results, options, False,
class_label='category')
@dispatch.format_func(RawDataList, 'json')
def _format_raw_data_json(results: RawDataList, _: Mapping[str, Any]) -> str:
out = JsonWriter()
out.start_array()
for res in results:
out.start_object()
for k, v in res.items():
if isinstance(v, dt.datetime):
out.keyval(k, v.isoformat(sep= ' ', timespec='seconds'))
else:
out.keyval(k, v)
out.end_object().next()
out.end_array()
return out()
| 10,642
|
Python
|
.py
| 222
| 38.472973
| 97
| 0.622091
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,734
|
__init__.py
|
osm-search_Nominatim/src/nominatim_api/v1/__init__.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of API version v1 (aka the legacy version).
"""
#pylint: disable=useless-import-alias
from .server_glue import ROUTES as ROUTES
| 360
|
Python
|
.py
| 11
| 31.545455
| 58
| 0.766571
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,735
|
format_xml.py
|
osm-search_Nominatim/src/nominatim_api/v1/format_xml.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper functions for output of results in XML format.
"""
from typing import Mapping, Any, Optional, Union
import datetime as dt
import xml.etree.ElementTree as ET
from ..results import AddressLines, ReverseResult, ReverseResults, \
SearchResult, SearchResults
from . import classtypes as cl
#pylint: disable=too-many-branches
def _write_xml_address(root: ET.Element, address: AddressLines,
country_code: Optional[str]) -> None:
parts = {}
for line in address:
if line.isaddress:
if line.local_name:
label = cl.get_label_tag(line.category, line.extratags,
line.rank_address, country_code)
if label not in parts:
parts[label] = line.local_name
if line.names and 'ISO3166-2' in line.names and line.admin_level:
parts[f"ISO3166-2-lvl{line.admin_level}"] = line.names['ISO3166-2']
for k,v in parts.items():
ET.SubElement(root, k).text = v
if country_code:
ET.SubElement(root, 'country_code').text = country_code
def _create_base_entry(result: Union[ReverseResult, SearchResult],
root: ET.Element, simple: bool) -> ET.Element:
place = ET.SubElement(root, 'result' if simple else 'place')
if result.place_id is not None:
place.set('place_id', str(result.place_id))
if result.osm_object:
osm_type = cl.OSM_TYPE_NAME.get(result.osm_object[0], None)
if osm_type is not None:
place.set('osm_type', osm_type)
place.set('osm_id', str(result.osm_object[1]))
if result.names and 'ref' in result.names:
place.set('ref', result.names['ref'])
elif result.locale_name:
# bug reproduced from PHP
place.set('ref', result.locale_name)
place.set('lat', f"{result.centroid.lat:.7f}")
place.set('lon', f"{result.centroid.lon:.7f}")
bbox = cl.bbox_from_result(result)
place.set('boundingbox',
f"{bbox.minlat:.7f},{bbox.maxlat:.7f},{bbox.minlon:.7f},{bbox.maxlon:.7f}")
place.set('place_rank', str(result.rank_search))
place.set('address_rank', str(result.rank_address))
if result.geometry:
for key in ('text', 'svg'):
if key in result.geometry:
place.set('geo' + key, result.geometry[key])
if 'kml' in result.geometry:
ET.SubElement(root if simple else place, 'geokml')\
.append(ET.fromstring(result.geometry['kml']))
if 'geojson' in result.geometry:
place.set('geojson', result.geometry['geojson'])
if simple:
place.text = result.display_name or ''
else:
place.set('display_name', result.display_name or '')
place.set('class', result.category[0])
place.set('type', result.category[1])
place.set('importance', str(result.calculated_importance()))
return place
def format_base_xml(results: Union[ReverseResults, SearchResults],
options: Mapping[str, Any],
simple: bool, xml_root_tag: str,
xml_extra_info: Mapping[str, str]) -> str:
""" Format the result into an XML response. With 'simple' exactly one
result will be output, otherwise a list.
"""
root = ET.Element(xml_root_tag)
root.set('timestamp', dt.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S +00:00'))
root.set('attribution', cl.OSM_ATTRIBUTION)
for k, v in xml_extra_info.items():
root.set(k, v)
if simple and not results:
ET.SubElement(root, 'error').text = 'Unable to geocode'
for result in results:
place = _create_base_entry(result, root, simple)
if not simple and options.get('icon_base_url', None):
icon = cl.ICONS.get(result.category)
if icon:
place.set('icon', icon)
if options.get('addressdetails', False) and result.address_rows:
_write_xml_address(ET.SubElement(root, 'addressparts') if simple else place,
result.address_rows, result.country_code)
if options.get('extratags', False):
eroot = ET.SubElement(root if simple else place, 'extratags')
if result.extratags:
for k, v in result.extratags.items():
ET.SubElement(eroot, 'tag', attrib={'key': k, 'value': v})
if options.get('namedetails', False):
eroot = ET.SubElement(root if simple else place, 'namedetails')
if result.names:
for k,v in result.names.items():
ET.SubElement(eroot, 'name', attrib={'desc': k}).text = v
return '<?xml version="1.0" encoding="UTF-8" ?>\n' + ET.tostring(root, encoding='unicode')
| 5,025
|
Python
|
.py
| 105
| 38.314286
| 94
| 0.614411
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,736
|
helpers.py
|
osm-search_Nominatim/src/nominatim_api/v1/helpers.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper function for parsing parameters and and outputting data
specifically for the v1 version of the API.
"""
from typing import Tuple, Optional, Any, Dict, Iterable
from itertools import chain
import re
from ..results import SearchResult, SearchResults, SourceTable
from ..types import SearchDetails, GeometryFormat
REVERSE_MAX_RANKS = [2, 2, 2, # 0-2 Continent/Sea
4, 4, # 3-4 Country
8, # 5 State
10, 10, # 6-7 Region
12, 12, # 8-9 County
16, 17, # 10-11 City
18, # 12 Town
19, # 13 Village/Suburb
22, # 14 Hamlet/Neighbourhood
25, # 15 Localities
26, # 16 Major Streets
27, # 17 Minor Streets
30 # 18 Building
]
def zoom_to_rank(zoom: int) -> int:
""" Convert a zoom parameter into a rank according to the v1 API spec.
"""
return REVERSE_MAX_RANKS[max(0, min(18, zoom))]
FEATURE_TYPE_TO_RANK: Dict[Optional[str], Tuple[int, int]] = {
'country': (4, 4),
'state': (8, 8),
'city': (14, 16),
'settlement': (8, 20)
}
def feature_type_to_rank(feature_type: Optional[str]) -> Tuple[int, int]:
""" Convert a feature type parameter to a tuple of
feature type name, minimum rank and maximum rank.
"""
return FEATURE_TYPE_TO_RANK.get(feature_type, (0, 30))
#pylint: disable=too-many-arguments,too-many-branches
def extend_query_parts(queryparts: Dict[str, Any], details: Dict[str, Any],
feature_type: Optional[str],
namedetails: bool, extratags: bool,
excluded: Iterable[str]) -> None:
""" Add parameters from details dictionary to the query parts
dictionary which is suitable as URL parameter dictionary.
"""
parsed = SearchDetails.from_kwargs(details)
if parsed.geometry_output != GeometryFormat.NONE:
if GeometryFormat.GEOJSON & parsed.geometry_output:
queryparts['polygon_geojson'] = '1'
if GeometryFormat.KML & parsed.geometry_output:
queryparts['polygon_kml'] = '1'
if GeometryFormat.SVG & parsed.geometry_output:
queryparts['polygon_svg'] = '1'
if GeometryFormat.TEXT & parsed.geometry_output:
queryparts['polygon_text'] = '1'
if parsed.address_details:
queryparts['addressdetails'] = '1'
if namedetails:
queryparts['namedetails'] = '1'
if extratags:
queryparts['extratags'] = '1'
if parsed.geometry_simplification > 0.0:
queryparts['polygon_threshold'] = f"{parsed.geometry_simplification:.6g}"
if parsed.max_results != 10:
queryparts['limit'] = str(parsed.max_results)
if parsed.countries:
queryparts['countrycodes'] = ','.join(parsed.countries)
queryparts['exclude_place_ids'] = \
','.join(chain(excluded, map(str, (e for e in parsed.excluded if e > 0))))
if parsed.viewbox:
queryparts['viewbox'] = ','.join(f"{c:.7g}" for c in parsed.viewbox.coords)
if parsed.bounded_viewbox:
queryparts['bounded'] = '1'
if not details['dedupe']:
queryparts['dedupe'] = '0'
if feature_type in FEATURE_TYPE_TO_RANK:
queryparts['featureType'] = feature_type
def deduplicate_results(results: SearchResults, max_results: int) -> SearchResults:
""" Remove results that look like duplicates.
Two results are considered the same if they have the same OSM ID
or if they have the same category, display name and rank.
"""
osm_ids_done = set()
classification_done = set()
deduped = SearchResults()
for result in results:
if result.source_table == SourceTable.POSTCODE:
assert result.names and 'ref' in result.names
if any(_is_postcode_relation_for(r, result.names['ref']) for r in results):
continue
if result.source_table == SourceTable.PLACEX:
classification = (result.osm_object[0] if result.osm_object else None,
result.category,
result.display_name,
result.rank_address)
if result.osm_object not in osm_ids_done \
and classification not in classification_done:
deduped.append(result)
osm_ids_done.add(result.osm_object)
classification_done.add(classification)
else:
deduped.append(result)
if len(deduped) >= max_results:
break
return deduped
def _is_postcode_relation_for(result: SearchResult, postcode: str) -> bool:
return result.source_table == SourceTable.PLACEX \
and result.osm_object is not None \
and result.osm_object[0] == 'R' \
and result.category == ('boundary', 'postal_code') \
and result.names is not None \
and result.names.get('ref') == postcode
def _deg(axis:str) -> str:
return f"(?P<{axis}_deg>\\d+\\.\\d+)°?"
def _deg_min(axis: str) -> str:
return f"(?P<{axis}_deg>\\d+)[°\\s]+(?P<{axis}_min>[\\d.]+)[′']*"
def _deg_min_sec(axis: str) -> str:
return f"(?P<{axis}_deg>\\d+)[°\\s]+(?P<{axis}_min>\\d+)[′'\\s]+(?P<{axis}_sec>[\\d.]+)[\"″]*"
COORD_REGEX = [re.compile(r'(?:(?P<pre>.*?)\s+)??' + r + r'(?:\s+(?P<post>.*))?') for r in (
r"(?P<ns>[NS])\s*" + _deg('lat') + r"[\s,]+" + r"(?P<ew>[EW])\s*" + _deg('lon'),
_deg('lat') + r"\s*(?P<ns>[NS])[\s,]+" + _deg('lon') + r"\s*(?P<ew>[EW])",
r"(?P<ns>[NS])\s*" + _deg_min('lat') + r"[\s,]+" + r"(?P<ew>[EW])\s*" + _deg_min('lon'),
_deg_min('lat') + r"\s*(?P<ns>[NS])[\s,]+" + _deg_min('lon') + r"\s*(?P<ew>[EW])",
r"(?P<ns>[NS])\s*" + _deg_min_sec('lat') + r"[\s,]+" + r"(?P<ew>[EW])\s*" + _deg_min_sec('lon'),
_deg_min_sec('lat') + r"\s*(?P<ns>[NS])[\s,]+" + _deg_min_sec('lon') + r"\s*(?P<ew>[EW])",
r"\[?(?P<lat_deg>[+-]?\d+\.\d+)[\s,]+(?P<lon_deg>[+-]?\d+\.\d+)\]?"
)]
def extract_coords_from_query(query: str) -> Tuple[str, Optional[float], Optional[float]]:
""" Look for something that is formatted like a coordinate at the
beginning or end of the query. If found, extract the coordinate and
return the remaining query (or the empty string if the query
consisted of nothing but a coordinate).
Only the first match will be returned.
"""
for regex in COORD_REGEX:
match = regex.fullmatch(query)
if match is None:
continue
groups = match.groupdict()
if not groups['pre'] or not groups['post']:
x = float(groups['lon_deg']) \
+ float(groups.get('lon_min', 0.0)) / 60.0 \
+ float(groups.get('lon_sec', 0.0)) / 3600.0
if groups.get('ew') == 'W':
x = -x
y = float(groups['lat_deg']) \
+ float(groups.get('lat_min', 0.0)) / 60.0 \
+ float(groups.get('lat_sec', 0.0)) / 3600.0
if groups.get('ns') == 'S':
y = -y
return groups['pre'] or groups['post'] or '', x, y
return query, None, None
CATEGORY_REGEX = re.compile(r'(?P<pre>.*?)\[(?P<cls>[a-zA-Z_]+)=(?P<typ>[a-zA-Z_]+)\](?P<post>.*)')
def extract_category_from_query(query: str) -> Tuple[str, Optional[str], Optional[str]]:
""" Extract a hidden category specification of the form '[key=value]' from
the query. If found, extract key and value and
return the remaining query (or the empty string if the query
consisted of nothing but a category).
Only the first match will be returned.
"""
match = CATEGORY_REGEX.search(query)
if match is not None:
return (match.group('pre').strip() + ' ' + match.group('post').strip()).strip(), \
match.group('cls'), match.group('typ')
return query, None, None
| 8,311
|
Python
|
.py
| 172
| 39.215116
| 100
| 0.56993
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,737
|
format_json.py
|
osm-search_Nominatim/src/nominatim_api/v1/format_json.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper functions for output of results in json formats.
"""
from typing import Mapping, Any, Optional, Tuple, Union
from ..utils.json_writer import JsonWriter
from ..results import AddressLines, ReverseResults, SearchResults
from . import classtypes as cl
#pylint: disable=too-many-branches
def _write_osm_id(out: JsonWriter, osm_object: Optional[Tuple[str, int]]) -> None:
if osm_object is not None:
out.keyval_not_none('osm_type', cl.OSM_TYPE_NAME.get(osm_object[0], None))\
.keyval('osm_id', osm_object[1])
def _write_typed_address(out: JsonWriter, address: Optional[AddressLines],
country_code: Optional[str]) -> None:
parts = {}
for line in (address or []):
if line.isaddress:
if line.local_name:
label = cl.get_label_tag(line.category, line.extratags,
line.rank_address, country_code)
if label not in parts:
parts[label] = line.local_name
if line.names and 'ISO3166-2' in line.names and line.admin_level:
parts[f"ISO3166-2-lvl{line.admin_level}"] = line.names['ISO3166-2']
for k, v in parts.items():
out.keyval(k, v)
if country_code:
out.keyval('country_code', country_code)
def _write_geocodejson_address(out: JsonWriter,
address: Optional[AddressLines],
obj_place_id: Optional[int],
country_code: Optional[str]) -> None:
extra = {}
for line in (address or []):
if line.isaddress and line.local_name:
if line.category[1] in ('postcode', 'postal_code'):
out.keyval('postcode', line.local_name)
elif line.category[1] == 'house_number':
out.keyval('housenumber', line.local_name)
elif (obj_place_id is None or obj_place_id != line.place_id) \
and line.rank_address >= 4 and line.rank_address < 28:
rank_name = GEOCODEJSON_RANKS[line.rank_address]
if rank_name not in extra:
extra[rank_name] = line.local_name
for k, v in extra.items():
out.keyval(k, v)
if country_code:
out.keyval('country_code', country_code)
def format_base_json(results: Union[ReverseResults, SearchResults],
options: Mapping[str, Any], simple: bool,
class_label: str) -> str:
""" Return the result list as a simple json string in custom Nominatim format.
"""
out = JsonWriter()
if simple:
if not results:
return '{"error":"Unable to geocode"}'
else:
out.start_array()
for result in results:
out.start_object()\
.keyval_not_none('place_id', result.place_id)\
.keyval('licence', cl.OSM_ATTRIBUTION)\
_write_osm_id(out, result.osm_object)
out.keyval('lat', f"{result.centroid.lat}")\
.keyval('lon', f"{result.centroid.lon}")\
.keyval(class_label, result.category[0])\
.keyval('type', result.category[1])\
.keyval('place_rank', result.rank_search)\
.keyval('importance', result.calculated_importance())\
.keyval('addresstype', cl.get_label_tag(result.category, result.extratags,
result.rank_address,
result.country_code))\
.keyval('name', result.locale_name or '')\
.keyval('display_name', result.display_name or '')
if options.get('icon_base_url', None):
icon = cl.ICONS.get(result.category)
if icon:
out.keyval('icon', f"{options['icon_base_url']}/{icon}.p.20.png")
if options.get('addressdetails', False):
out.key('address').start_object()
_write_typed_address(out, result.address_rows, result.country_code)
out.end_object().next()
if options.get('extratags', False):
out.keyval('extratags', result.extratags)
if options.get('namedetails', False):
out.keyval('namedetails', result.names)
bbox = cl.bbox_from_result(result)
out.key('boundingbox').start_array()\
.value(f"{bbox.minlat:0.7f}").next()\
.value(f"{bbox.maxlat:0.7f}").next()\
.value(f"{bbox.minlon:0.7f}").next()\
.value(f"{bbox.maxlon:0.7f}").next()\
.end_array().next()
if result.geometry:
for key in ('text', 'kml'):
out.keyval_not_none('geo' + key, result.geometry.get(key))
if 'geojson' in result.geometry:
out.key('geojson').raw(result.geometry['geojson']).next()
out.keyval_not_none('svg', result.geometry.get('svg'))
out.end_object()
if simple:
return out()
out.next()
out.end_array()
return out()
def format_base_geojson(results: Union[ReverseResults, SearchResults],
options: Mapping[str, Any],
simple: bool) -> str:
""" Return the result list as a geojson string.
"""
if not results and simple:
return '{"error":"Unable to geocode"}'
out = JsonWriter()
out.start_object()\
.keyval('type', 'FeatureCollection')\
.keyval('licence', cl.OSM_ATTRIBUTION)\
.key('features').start_array()
for result in results:
out.start_object()\
.keyval('type', 'Feature')\
.key('properties').start_object()
out.keyval_not_none('place_id', result.place_id)
_write_osm_id(out, result.osm_object)
out.keyval('place_rank', result.rank_search)\
.keyval('category', result.category[0])\
.keyval('type', result.category[1])\
.keyval('importance', result.calculated_importance())\
.keyval('addresstype', cl.get_label_tag(result.category, result.extratags,
result.rank_address,
result.country_code))\
.keyval('name', result.locale_name or '')\
.keyval('display_name', result.display_name or '')
if options.get('addressdetails', False):
out.key('address').start_object()
_write_typed_address(out, result.address_rows, result.country_code)
out.end_object().next()
if options.get('extratags', False):
out.keyval('extratags', result.extratags)
if options.get('namedetails', False):
out.keyval('namedetails', result.names)
out.end_object().next() # properties
out.key('bbox').start_array()
for coord in cl.bbox_from_result(result).coords:
out.float(coord, 7).next()
out.end_array().next()
out.key('geometry').raw(result.geometry.get('geojson')
or result.centroid.to_geojson()).next()
out.end_object().next()
out.end_array().next().end_object()
return out()
def format_base_geocodejson(results: Union[ReverseResults, SearchResults],
options: Mapping[str, Any], simple: bool) -> str:
""" Return the result list as a geocodejson string.
"""
if not results and simple:
return '{"error":"Unable to geocode"}'
out = JsonWriter()
out.start_object()\
.keyval('type', 'FeatureCollection')\
.key('geocoding').start_object()\
.keyval('version', '0.1.0')\
.keyval('attribution', cl.OSM_ATTRIBUTION)\
.keyval('licence', 'ODbL')\
.keyval_not_none('query', options.get('query'))\
.end_object().next()\
.key('features').start_array()
for result in results:
out.start_object()\
.keyval('type', 'Feature')\
.key('properties').start_object()\
.key('geocoding').start_object()
out.keyval_not_none('place_id', result.place_id)
_write_osm_id(out, result.osm_object)
out.keyval('osm_key', result.category[0])\
.keyval('osm_value', result.category[1])\
.keyval('type', GEOCODEJSON_RANKS[max(3, min(28, result.rank_address))])\
.keyval_not_none('accuracy', getattr(result, 'distance', None), transform=int)\
.keyval('label', result.display_name or '')\
.keyval_not_none('name', result.locale_name or None)\
if options.get('addressdetails', False):
_write_geocodejson_address(out, result.address_rows, result.place_id,
result.country_code)
out.key('admin').start_object()
if result.address_rows:
for line in result.address_rows:
if line.isaddress and (line.admin_level or 15) < 15 and line.local_name \
and line.category[0] == 'boundary' and line.category[1] == 'administrative':
out.keyval(f"level{line.admin_level}", line.local_name)
out.end_object().next()
out.end_object().next().end_object().next()
out.key('geometry').raw(result.geometry.get('geojson')
or result.centroid.to_geojson()).next()
out.end_object().next()
out.end_array().next().end_object()
return out()
GEOCODEJSON_RANKS = {
3: 'locality',
4: 'country',
5: 'state', 6: 'state', 7: 'state', 8: 'state', 9: 'state',
10: 'county', 11: 'county', 12: 'county',
13: 'city', 14: 'city', 15: 'city', 16: 'city',
17: 'district', 18: 'district', 19: 'district', 20: 'district', 21: 'district',
22: 'locality', 23: 'locality', 24: 'locality',
25: 'street', 26: 'street', 27: 'street', 28: 'house'}
| 10,164
|
Python
|
.py
| 211
| 36.236967
| 99
| 0.567095
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,738
|
json_writer.py
|
osm-search_Nominatim/src/nominatim_api/utils/json_writer.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Streaming JSON encoder.
"""
from typing import Any, TypeVar, Optional, Callable
import io
try:
import ujson as json
except ModuleNotFoundError:
import json # type: ignore[no-redef]
T = TypeVar('T') # pylint: disable=invalid-name
class JsonWriter:
""" JSON encoder that renders the output directly into an output
stream. This is a very simple writer which produces JSON in a
compact as possible form.
The writer does not check for syntactic correctness. It is the
responsibility of the caller to call the write functions in an
order that produces correct JSON.
All functions return the writer object itself so that function
calls can be chained.
"""
def __init__(self) -> None:
self.data = io.StringIO()
self.pending = ''
def __call__(self) -> str:
""" Return the rendered JSON content as a string.
The writer remains usable after calling this function.
"""
if self.pending:
assert self.pending in (']', '}')
self.data.write(self.pending)
self.pending = ''
return self.data.getvalue()
def start_object(self) -> 'JsonWriter':
""" Write the open bracket of a JSON object.
"""
if self.pending:
self.data.write(self.pending)
self.pending = '{'
return self
def end_object(self) -> 'JsonWriter':
""" Write the closing bracket of a JSON object.
"""
assert self.pending in (',', '{', '')
if self.pending == '{':
self.data.write(self.pending)
self.pending = '}'
return self
def start_array(self) -> 'JsonWriter':
""" Write the opening bracket of a JSON array.
"""
if self.pending:
self.data.write(self.pending)
self.pending = '['
return self
def end_array(self) -> 'JsonWriter':
""" Write the closing bracket of a JSON array.
"""
assert self.pending in (',', '[', ']', ')', '')
if self.pending not in (',', ''):
self.data.write(self.pending)
self.pending = ']'
return self
def key(self, name: str) -> 'JsonWriter':
""" Write the key string of a JSON object.
"""
assert self.pending
self.data.write(self.pending)
self.data.write(json.dumps(name, ensure_ascii=False))
self.pending = ':'
return self
def value(self, value: Any) -> 'JsonWriter':
""" Write out a value as JSON. The function uses the json.dumps()
function for encoding the JSON. Thus any value that can be
encoded by that function is permissible here.
"""
return self.raw(json.dumps(value, ensure_ascii=False))
def float(self, value: float, precision: int) -> 'JsonWriter':
""" Write out a float value with the given precision.
"""
return self.raw(f"{value:0.{precision}f}")
def next(self) -> 'JsonWriter':
""" Write out a delimiter comma between JSON object or array elements.
"""
if self.pending:
self.data.write(self.pending)
self.pending = ','
return self
def raw(self, raw_json: str) -> 'JsonWriter':
""" Write out the given value as is. This function is useful if
a value is already available in JSON format.
"""
if self.pending:
self.data.write(self.pending)
self.pending = ''
self.data.write(raw_json)
return self
def keyval(self, key: str, value: Any) -> 'JsonWriter':
""" Write out an object element with the given key and value.
This is a shortcut for calling 'key()', 'value()' and 'next()'.
"""
self.key(key)
self.value(value)
return self.next()
def keyval_not_none(self, key: str, value: Optional[T],
transform: Optional[Callable[[T], Any]] = None) -> 'JsonWriter':
""" Write out an object element only if the value is not None.
If 'transform' is given, it must be a function that takes the
value type and returns a JSON encodable type. The transform
function will be called before the value is written out.
"""
if value is not None:
self.key(key)
self.value(transform(value) if transform else value)
self.next()
return self
| 4,713
|
Python
|
.py
| 121
| 30.446281
| 88
| 0.599474
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,739
|
sqlalchemy_schema.py
|
osm-search_Nominatim/src/nominatim_api/sql/sqlalchemy_schema.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
SQLAlchemy definitions for all tables used by the frontend.
"""
import sqlalchemy as sa
from .sqlalchemy_types import Geometry, KeyValueStore, IntArray
#pylint: disable=too-many-instance-attributes
class SearchTables:
""" Data class that holds the tables of the Nominatim database.
This schema strictly reflects the read-access view of the database.
Any data used for updates only will not be visible.
"""
def __init__(self, meta: sa.MetaData) -> None:
self.meta = meta
self.import_status = sa.Table('import_status', meta,
sa.Column('lastimportdate', sa.DateTime(True), nullable=False),
sa.Column('sequence_id', sa.Integer),
sa.Column('indexed', sa.Boolean))
self.properties = sa.Table('nominatim_properties', meta,
sa.Column('property', sa.Text, nullable=False),
sa.Column('value', sa.Text))
self.placex = sa.Table('placex', meta,
sa.Column('place_id', sa.BigInteger, nullable=False),
sa.Column('parent_place_id', sa.BigInteger),
sa.Column('linked_place_id', sa.BigInteger),
sa.Column('importance', sa.Float),
sa.Column('indexed_date', sa.DateTime),
sa.Column('rank_address', sa.SmallInteger),
sa.Column('rank_search', sa.SmallInteger),
sa.Column('indexed_status', sa.SmallInteger),
sa.Column('osm_type', sa.String(1), nullable=False),
sa.Column('osm_id', sa.BigInteger, nullable=False),
sa.Column('class', sa.Text, nullable=False, key='class_'),
sa.Column('type', sa.Text, nullable=False),
sa.Column('admin_level', sa.SmallInteger),
sa.Column('name', KeyValueStore),
sa.Column('address', KeyValueStore),
sa.Column('extratags', KeyValueStore),
sa.Column('geometry', Geometry, nullable=False),
sa.Column('wikipedia', sa.Text),
sa.Column('country_code', sa.String(2)),
sa.Column('housenumber', sa.Text),
sa.Column('postcode', sa.Text),
sa.Column('centroid', Geometry))
self.addressline = sa.Table('place_addressline', meta,
sa.Column('place_id', sa.BigInteger),
sa.Column('address_place_id', sa.BigInteger),
sa.Column('distance', sa.Float),
sa.Column('fromarea', sa.Boolean),
sa.Column('isaddress', sa.Boolean))
self.postcode = sa.Table('location_postcode', meta,
sa.Column('place_id', sa.BigInteger),
sa.Column('parent_place_id', sa.BigInteger),
sa.Column('rank_search', sa.SmallInteger),
sa.Column('rank_address', sa.SmallInteger),
sa.Column('indexed_status', sa.SmallInteger),
sa.Column('indexed_date', sa.DateTime),
sa.Column('country_code', sa.String(2)),
sa.Column('postcode', sa.Text),
sa.Column('geometry', Geometry))
self.osmline = sa.Table('location_property_osmline', meta,
sa.Column('place_id', sa.BigInteger, nullable=False),
sa.Column('osm_id', sa.BigInteger),
sa.Column('parent_place_id', sa.BigInteger),
sa.Column('indexed_date', sa.DateTime),
sa.Column('startnumber', sa.Integer),
sa.Column('endnumber', sa.Integer),
sa.Column('step', sa.SmallInteger),
sa.Column('indexed_status', sa.SmallInteger),
sa.Column('linegeo', Geometry),
sa.Column('address', KeyValueStore),
sa.Column('postcode', sa.Text),
sa.Column('country_code', sa.String(2)))
self.country_name = sa.Table('country_name', meta,
sa.Column('country_code', sa.String(2)),
sa.Column('name', KeyValueStore),
sa.Column('derived_name', KeyValueStore),
sa.Column('partition', sa.Integer))
self.country_grid = sa.Table('country_osm_grid', meta,
sa.Column('country_code', sa.String(2)),
sa.Column('area', sa.Float),
sa.Column('geometry', Geometry))
# The following tables are not necessarily present.
self.search_name = sa.Table('search_name', meta,
sa.Column('place_id', sa.BigInteger),
sa.Column('importance', sa.Float),
sa.Column('search_rank', sa.SmallInteger),
sa.Column('address_rank', sa.SmallInteger),
sa.Column('name_vector', IntArray),
sa.Column('nameaddress_vector', IntArray),
sa.Column('country_code', sa.String(2)),
sa.Column('centroid', Geometry))
self.tiger = sa.Table('location_property_tiger', meta,
sa.Column('place_id', sa.BigInteger),
sa.Column('parent_place_id', sa.BigInteger),
sa.Column('startnumber', sa.Integer),
sa.Column('endnumber', sa.Integer),
sa.Column('step', sa.SmallInteger),
sa.Column('linegeo', Geometry),
sa.Column('postcode', sa.Text))
| 5,298
|
Python
|
.py
| 105
| 39.571429
| 75
| 0.602626
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,740
|
sqlalchemy_functions.py
|
osm-search_Nominatim/src/nominatim_api/sql/sqlalchemy_functions.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Custom functions and expressions for SQLAlchemy.
"""
from __future__ import annotations
from typing import Any
import sqlalchemy as sa
from sqlalchemy.ext.compiler import compiles
from ..typing import SaColumn
# pylint: disable=all
class PlacexGeometryReverseLookuppolygon(sa.sql.functions.GenericFunction[Any]):
""" Check for conditions that allow partial index use on
'idx_placex_geometry_reverse_lookupPolygon'.
Needs to be constant, so that the query planner picks them up correctly
in prepared statements.
"""
name = 'PlacexGeometryReverseLookuppolygon'
inherit_cache = True
@compiles(PlacexGeometryReverseLookuppolygon)
def _default_intersects(element: PlacexGeometryReverseLookuppolygon,
compiler: 'sa.Compiled', **kw: Any) -> str:
return ("(ST_GeometryType(placex.geometry) in ('ST_Polygon', 'ST_MultiPolygon')"
" AND placex.rank_address between 4 and 25"
" AND placex.type != 'postcode'"
" AND placex.name is not null"
" AND placex.indexed_status = 0"
" AND placex.linked_place_id is null)")
@compiles(PlacexGeometryReverseLookuppolygon, 'sqlite')
def _sqlite_intersects(element: PlacexGeometryReverseLookuppolygon,
compiler: 'sa.Compiled', **kw: Any) -> str:
return ("(ST_GeometryType(placex.geometry) in ('POLYGON', 'MULTIPOLYGON')"
" AND placex.rank_address between 4 and 25"
" AND placex.type != 'postcode'"
" AND placex.name is not null"
" AND placex.indexed_status = 0"
" AND placex.linked_place_id is null)")
class IntersectsReverseDistance(sa.sql.functions.GenericFunction[Any]):
name = 'IntersectsReverseDistance'
inherit_cache = True
def __init__(self, table: sa.Table, geom: SaColumn) -> None:
super().__init__(table.c.geometry,
table.c.rank_search, geom)
self.tablename = table.name
@compiles(IntersectsReverseDistance)
def default_reverse_place_diameter(element: IntersectsReverseDistance,
compiler: 'sa.Compiled', **kw: Any) -> str:
table = element.tablename
return f"({table}.rank_address between 4 and 25"\
f" AND {table}.type != 'postcode'"\
f" AND {table}.name is not null"\
f" AND {table}.linked_place_id is null"\
f" AND {table}.osm_type = 'N'" + \
" AND ST_Buffer(%s, reverse_place_diameter(%s)) && %s)" % \
tuple(map(lambda c: compiler.process(c, **kw), element.clauses))
@compiles(IntersectsReverseDistance, 'sqlite')
def sqlite_reverse_place_diameter(element: IntersectsReverseDistance,
compiler: 'sa.Compiled', **kw: Any) -> str:
geom1, rank, geom2 = list(element.clauses)
table = element.tablename
return (f"({table}.rank_address between 4 and 25"\
f" AND {table}.type != 'postcode'"\
f" AND {table}.name is not null"\
f" AND {table}.linked_place_id is null"\
f" AND {table}.osm_type = 'N'"\
" AND MbrIntersects(%s, ST_Expand(%s, 14.0 * exp(-0.2 * %s) - 0.03))"\
f" AND {table}.place_id IN"\
" (SELECT place_id FROM placex_place_node_areas"\
" WHERE ROWID IN (SELECT ROWID FROM SpatialIndex"\
" WHERE f_table_name = 'placex_place_node_areas'"\
" AND search_frame = %s)))") % (
compiler.process(geom1, **kw),
compiler.process(geom2, **kw),
compiler.process(rank, **kw),
compiler.process(geom2, **kw))
class IsBelowReverseDistance(sa.sql.functions.GenericFunction[Any]):
name = 'IsBelowReverseDistance'
inherit_cache = True
@compiles(IsBelowReverseDistance)
def default_is_below_reverse_distance(element: IsBelowReverseDistance,
compiler: 'sa.Compiled', **kw: Any) -> str:
dist, rank = list(element.clauses)
return "%s < reverse_place_diameter(%s)" % (compiler.process(dist, **kw),
compiler.process(rank, **kw))
@compiles(IsBelowReverseDistance, 'sqlite')
def sqlite_is_below_reverse_distance(element: IsBelowReverseDistance,
compiler: 'sa.Compiled', **kw: Any) -> str:
dist, rank = list(element.clauses)
return "%s < 14.0 * exp(-0.2 * %s) - 0.03" % (compiler.process(dist, **kw),
compiler.process(rank, **kw))
class IsAddressPoint(sa.sql.functions.GenericFunction[Any]):
name = 'IsAddressPoint'
inherit_cache = True
def __init__(self, table: sa.Table) -> None:
super().__init__(table.c.rank_address,
table.c.housenumber, table.c.name)
@compiles(IsAddressPoint)
def default_is_address_point(element: IsAddressPoint,
compiler: 'sa.Compiled', **kw: Any) -> str:
rank, hnr, name = list(element.clauses)
return "(%s = 30 AND (%s IS NOT NULL OR %s ? 'addr:housename'))" % (
compiler.process(rank, **kw),
compiler.process(hnr, **kw),
compiler.process(name, **kw))
@compiles(IsAddressPoint, 'sqlite')
def sqlite_is_address_point(element: IsAddressPoint,
compiler: 'sa.Compiled', **kw: Any) -> str:
rank, hnr, name = list(element.clauses)
return "(%s = 30 AND coalesce(%s, json_extract(%s, '$.addr:housename')) IS NOT NULL)" % (
compiler.process(rank, **kw),
compiler.process(hnr, **kw),
compiler.process(name, **kw))
class CrosscheckNames(sa.sql.functions.GenericFunction[Any]):
""" Check if in the given list of names in parameters 1 any of the names
from the JSON array in parameter 2 are contained.
"""
name = 'CrosscheckNames'
inherit_cache = True
@compiles(CrosscheckNames)
def compile_crosscheck_names(element: CrosscheckNames,
compiler: 'sa.Compiled', **kw: Any) -> str:
arg1, arg2 = list(element.clauses)
return "coalesce(avals(%s) && ARRAY(SELECT * FROM json_array_elements_text(%s)), false)" % (
compiler.process(arg1, **kw), compiler.process(arg2, **kw))
@compiles(CrosscheckNames, 'sqlite')
def compile_sqlite_crosscheck_names(element: CrosscheckNames,
compiler: 'sa.Compiled', **kw: Any) -> str:
arg1, arg2 = list(element.clauses)
return "EXISTS(SELECT *"\
" FROM json_each(%s) as name, json_each(%s) as match_name"\
" WHERE name.value = match_name.value)"\
% (compiler.process(arg1, **kw), compiler.process(arg2, **kw))
class JsonArrayEach(sa.sql.functions.GenericFunction[Any]):
""" Return elements of a json array as a set.
"""
name = 'JsonArrayEach'
inherit_cache = True
@compiles(JsonArrayEach)
def default_json_array_each(element: JsonArrayEach, compiler: 'sa.Compiled', **kw: Any) -> str:
return "json_array_elements(%s)" % compiler.process(element.clauses, **kw)
@compiles(JsonArrayEach, 'sqlite')
def sqlite_json_array_each(element: JsonArrayEach, compiler: 'sa.Compiled', **kw: Any) -> str:
return "json_each(%s)" % compiler.process(element.clauses, **kw)
class Greatest(sa.sql.functions.GenericFunction[Any]):
""" Function to compute maximum of all its input parameters.
"""
name = 'greatest'
inherit_cache = True
@compiles(Greatest, 'sqlite')
def sqlite_greatest(element: Greatest, compiler: 'sa.Compiled', **kw: Any) -> str:
return "max(%s)" % compiler.process(element.clauses, **kw)
class RegexpWord(sa.sql.functions.GenericFunction[Any]):
""" Check if a full word is in a given string.
"""
name = 'RegexpWord'
inherit_cache = True
@compiles(RegexpWord, 'postgresql')
def postgres_regexp_nocase(element: RegexpWord, compiler: 'sa.Compiled', **kw: Any) -> str:
arg1, arg2 = list(element.clauses)
return "%s ~* ('\\m(' || %s || ')\\M')::text" % (compiler.process(arg2, **kw), compiler.process(arg1, **kw))
@compiles(RegexpWord, 'sqlite')
def sqlite_regexp_nocase(element: RegexpWord, compiler: 'sa.Compiled', **kw: Any) -> str:
arg1, arg2 = list(element.clauses)
return "regexp('\\b(' || %s || ')\\b', %s)" % (compiler.process(arg1, **kw), compiler.process(arg2, **kw))
| 8,647
|
Python
|
.py
| 168
| 42.696429
| 113
| 0.629836
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,741
|
async_core_library.py
|
osm-search_Nominatim/src/nominatim_api/sql/async_core_library.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Import the base library to use with asynchronous SQLAlchemy.
"""
# pylint: disable=invalid-name, ungrouped-imports, unused-import
from typing import Any
try:
import sqlalchemy.dialects.postgresql.psycopg
import psycopg
PGCORE_LIB = 'psycopg'
PGCORE_ERROR: Any = psycopg.Error
except ModuleNotFoundError:
import sqlalchemy.dialects.postgresql.asyncpg
import asyncpg
PGCORE_LIB = 'asyncpg'
PGCORE_ERROR = asyncpg.PostgresError
| 674
|
Python
|
.py
| 21
| 29.47619
| 64
| 0.769585
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,742
|
sqlite_functions.py
|
osm-search_Nominatim/src/nominatim_api/sql/sqlite_functions.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Custom functions for SQLite.
"""
from typing import cast, Optional, Set, Any
import json
# pylint: disable=protected-access
def weigh_search(search_vector: Optional[str], rankings: str, default: float) -> float:
""" Custom weight function for search results.
"""
if search_vector is not None:
svec = [int(x) for x in search_vector.split(',')]
for rank in json.loads(rankings):
if all(r in svec for r in rank[1]):
return cast(float, rank[0])
return default
class ArrayIntersectFuzzy:
""" Compute the array of common elements of all input integer arrays.
Very large input parameters may be ignored to speed up
computation. Therefore, the result is a superset of common elements.
Input and output arrays are given as comma-separated lists.
"""
def __init__(self) -> None:
self.first = ''
self.values: Optional[Set[int]] = None
def step(self, value: Optional[str]) -> None:
""" Add the next array to the intersection.
"""
if value is not None:
if not self.first:
self.first = value
elif len(value) < 10000000:
if self.values is None:
self.values = {int(x) for x in self.first.split(',')}
self.values.intersection_update((int(x) for x in value.split(',')))
def finalize(self) -> str:
""" Return the final result.
"""
if self.values is not None:
return ','.join(map(str, self.values))
return self.first
class ArrayUnion:
""" Compute the set of all elements of the input integer arrays.
Input and output arrays are given as strings of comma-separated lists.
"""
def __init__(self) -> None:
self.values: Optional[Set[str]] = None
def step(self, value: Optional[str]) -> None:
""" Add the next array to the union.
"""
if value is not None:
if self.values is None:
self.values = set(value.split(','))
else:
self.values.update(value.split(','))
def finalize(self) -> str:
""" Return the final result.
"""
return '' if self.values is None else ','.join(self.values)
def array_contains(container: Optional[str], containee: Optional[str]) -> Optional[bool]:
""" Is the array 'containee' completely contained in array 'container'.
"""
if container is None or containee is None:
return None
vset = container.split(',')
return all(v in vset for v in containee.split(','))
def array_pair_contains(container1: Optional[str], container2: Optional[str],
containee: Optional[str]) -> Optional[bool]:
""" Is the array 'containee' completely contained in the union of
array 'container1' and array 'container2'.
"""
if container1 is None or container2 is None or containee is None:
return None
vset = container1.split(',') + container2.split(',')
return all(v in vset for v in containee.split(','))
def install_custom_functions(conn: Any) -> None:
""" Install helper functions for Nominatim into the given SQLite
database connection.
"""
conn.create_function('weigh_search', 3, weigh_search, deterministic=True)
conn.create_function('array_contains', 2, array_contains, deterministic=True)
conn.create_function('array_pair_contains', 3, array_pair_contains, deterministic=True)
_create_aggregate(conn, 'array_intersect_fuzzy', 1, ArrayIntersectFuzzy)
_create_aggregate(conn, 'array_union', 1, ArrayUnion)
async def _make_aggregate(aioconn: Any, *args: Any) -> None:
await aioconn._execute(aioconn._conn.create_aggregate, *args)
def _create_aggregate(conn: Any, name: str, nargs: int, aggregate: Any) -> None:
try:
conn.await_(_make_aggregate(conn._connection, name, nargs, aggregate))
except Exception as error: # pylint: disable=broad-exception-caught
conn._handle_exception(error)
| 4,271
|
Python
|
.py
| 96
| 37.260417
| 91
| 0.648108
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,743
|
key_value.py
|
osm-search_Nominatim/src/nominatim_api/sql/sqlalchemy_types/key_value.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
A custom type that implements a simple key-value store of strings.
"""
from typing import Any
import sqlalchemy as sa
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.dialects.postgresql import HSTORE
from sqlalchemy.dialects.sqlite import JSON as sqlite_json
from ...typing import SaDialect, SaColumn
# pylint: disable=all
class KeyValueStore(sa.types.TypeDecorator[Any]):
""" Dialect-independent type of a simple key-value store of strings.
"""
impl = HSTORE
cache_ok = True
def load_dialect_impl(self, dialect: SaDialect) -> sa.types.TypeEngine[Any]:
if dialect.name == 'postgresql':
return HSTORE() # type: ignore[no-untyped-call]
return sqlite_json(none_as_null=True)
class comparator_factory(sa.types.UserDefinedType.Comparator): # type: ignore[type-arg]
def merge(self, other: SaColumn) -> 'sa.Operators':
""" Merge the values from the given KeyValueStore into this
one, overwriting values where necessary. When the argument
is null, nothing happens.
"""
return KeyValueConcat(self.expr, other)
class KeyValueConcat(sa.sql.expression.FunctionElement[Any]):
""" Return the merged key-value store from the input parameters.
"""
type = KeyValueStore()
name = 'JsonConcat'
inherit_cache = True
@compiles(KeyValueConcat)
def default_json_concat(element: KeyValueConcat, compiler: 'sa.Compiled', **kw: Any) -> str:
arg1, arg2 = list(element.clauses)
return "(%s || coalesce(%s, ''::hstore))" % (compiler.process(arg1, **kw), compiler.process(arg2, **kw))
@compiles(KeyValueConcat, 'sqlite')
def sqlite_json_concat(element: KeyValueConcat, compiler: 'sa.Compiled', **kw: Any) -> str:
arg1, arg2 = list(element.clauses)
return "json_patch(%s, coalesce(%s, '{}'))" % (compiler.process(arg1, **kw), compiler.process(arg2, **kw))
| 2,133
|
Python
|
.py
| 46
| 41.456522
| 110
| 0.705456
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,744
|
int_array.py
|
osm-search_Nominatim/src/nominatim_api/sql/sqlalchemy_types/int_array.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Custom type for an array of integers.
"""
from typing import Any, List, cast, Optional
import sqlalchemy as sa
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.dialects.postgresql import ARRAY
from ...typing import SaDialect, SaColumn
# pylint: disable=all
class IntList(sa.types.TypeDecorator[Any]):
""" A list of integers saved as a text of comma-separated numbers.
"""
impl = sa.types.Unicode
cache_ok = True
def process_bind_param(self, value: Optional[Any], dialect: 'sa.Dialect') -> Optional[str]:
if value is None:
return None
assert isinstance(value, list)
return ','.join(map(str, value))
def process_result_value(self, value: Optional[Any],
dialect: SaDialect) -> Optional[List[int]]:
return [int(v) for v in value.split(',')] if value is not None else None
def copy(self, **kw: Any) -> 'IntList':
return IntList(self.impl.length)
class IntArray(sa.types.TypeDecorator[Any]):
""" Dialect-independent list of integers.
"""
impl = IntList
cache_ok = True
def load_dialect_impl(self, dialect: SaDialect) -> sa.types.TypeEngine[Any]:
if dialect.name == 'postgresql':
return ARRAY(sa.Integer()) #pylint: disable=invalid-name
return IntList()
class comparator_factory(sa.types.UserDefinedType.Comparator): # type: ignore[type-arg]
def __add__(self, other: SaColumn) -> 'sa.ColumnOperators':
""" Concate the array with the given array. If one of the
operants is null, the value of the other will be returned.
"""
return ArrayCat(self.expr, other)
def contains(self, other: SaColumn, **kwargs: Any) -> 'sa.ColumnOperators':
""" Return true if the array contains all the value of the argument
array.
"""
return ArrayContains(self.expr, other)
class ArrayAgg(sa.sql.functions.GenericFunction[Any]):
""" Aggregate function to collect elements in an array.
"""
type = IntArray()
identifier = 'ArrayAgg'
name = 'array_agg'
inherit_cache = True
@compiles(ArrayAgg, 'sqlite')
def sqlite_array_agg(element: ArrayAgg, compiler: 'sa.Compiled', **kw: Any) -> str:
return "group_concat(%s, ',')" % compiler.process(element.clauses, **kw)
class ArrayContains(sa.sql.expression.FunctionElement[Any]):
""" Function to check if an array is fully contained in another.
"""
name = 'ArrayContains'
inherit_cache = True
@compiles(ArrayContains)
def generic_array_contains(element: ArrayContains, compiler: 'sa.Compiled', **kw: Any) -> str:
arg1, arg2 = list(element.clauses)
return "(%s @> %s)" % (compiler.process(arg1, **kw),
compiler.process(arg2, **kw))
@compiles(ArrayContains, 'sqlite')
def sqlite_array_contains(element: ArrayContains, compiler: 'sa.Compiled', **kw: Any) -> str:
return "array_contains(%s)" % compiler.process(element.clauses, **kw)
class ArrayCat(sa.sql.expression.FunctionElement[Any]):
""" Function to check if an array is fully contained in another.
"""
type = IntArray()
identifier = 'ArrayCat'
inherit_cache = True
@compiles(ArrayCat)
def generic_array_cat(element: ArrayCat, compiler: 'sa.Compiled', **kw: Any) -> str:
return "array_cat(%s)" % compiler.process(element.clauses, **kw)
@compiles(ArrayCat, 'sqlite')
def sqlite_array_cat(element: ArrayCat, compiler: 'sa.Compiled', **kw: Any) -> str:
arg1, arg2 = list(element.clauses)
return "(%s || ',' || %s)" % (compiler.process(arg1, **kw), compiler.process(arg2, **kw))
| 3,896
|
Python
|
.py
| 86
| 39.267442
| 95
| 0.669494
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,745
|
geometry.py
|
osm-search_Nominatim/src/nominatim_api/sql/sqlalchemy_types/geometry.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Custom types for SQLAlchemy.
"""
from __future__ import annotations
from typing import Callable, Any, cast
import sys
import sqlalchemy as sa
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import types
from ...typing import SaColumn, SaBind
#pylint: disable=all
class Geometry_DistanceSpheroid(sa.sql.expression.FunctionElement[float]):
""" Function to compute the spherical distance in meters.
"""
type = sa.Float()
name = 'Geometry_DistanceSpheroid'
inherit_cache = True
@compiles(Geometry_DistanceSpheroid)
def _default_distance_spheroid(element: Geometry_DistanceSpheroid,
compiler: 'sa.Compiled', **kw: Any) -> str:
return "ST_DistanceSpheroid(%s,"\
" 'SPHEROID[\"WGS 84\",6378137,298.257223563, AUTHORITY[\"EPSG\",\"7030\"]]')"\
% compiler.process(element.clauses, **kw)
@compiles(Geometry_DistanceSpheroid, 'sqlite')
def _spatialite_distance_spheroid(element: Geometry_DistanceSpheroid,
compiler: 'sa.Compiled', **kw: Any) -> str:
return "COALESCE(Distance(%s, true), 0.0)" % compiler.process(element.clauses, **kw)
class Geometry_IsLineLike(sa.sql.expression.FunctionElement[Any]):
""" Check if the geometry is a line or multiline.
"""
name = 'Geometry_IsLineLike'
inherit_cache = True
@compiles(Geometry_IsLineLike)
def _default_is_line_like(element: Geometry_IsLineLike,
compiler: 'sa.Compiled', **kw: Any) -> str:
return "ST_GeometryType(%s) IN ('ST_LineString', 'ST_MultiLineString')" % \
compiler.process(element.clauses, **kw)
@compiles(Geometry_IsLineLike, 'sqlite')
def _sqlite_is_line_like(element: Geometry_IsLineLike,
compiler: 'sa.Compiled', **kw: Any) -> str:
return "ST_GeometryType(%s) IN ('LINESTRING', 'MULTILINESTRING')" % \
compiler.process(element.clauses, **kw)
class Geometry_IsAreaLike(sa.sql.expression.FunctionElement[Any]):
""" Check if the geometry is a polygon or multipolygon.
"""
name = 'Geometry_IsLineLike'
inherit_cache = True
@compiles(Geometry_IsAreaLike)
def _default_is_area_like(element: Geometry_IsAreaLike,
compiler: 'sa.Compiled', **kw: Any) -> str:
return "ST_GeometryType(%s) IN ('ST_Polygon', 'ST_MultiPolygon')" % \
compiler.process(element.clauses, **kw)
@compiles(Geometry_IsAreaLike, 'sqlite')
def _sqlite_is_area_like(element: Geometry_IsAreaLike,
compiler: 'sa.Compiled', **kw: Any) -> str:
return "ST_GeometryType(%s) IN ('POLYGON', 'MULTIPOLYGON')" % \
compiler.process(element.clauses, **kw)
class Geometry_IntersectsBbox(sa.sql.expression.FunctionElement[Any]):
""" Check if the bounding boxes of the given geometries intersect.
"""
name = 'Geometry_IntersectsBbox'
inherit_cache = True
@compiles(Geometry_IntersectsBbox)
def _default_intersects(element: Geometry_IntersectsBbox,
compiler: 'sa.Compiled', **kw: Any) -> str:
arg1, arg2 = list(element.clauses)
return "%s && %s" % (compiler.process(arg1, **kw), compiler.process(arg2, **kw))
@compiles(Geometry_IntersectsBbox, 'sqlite')
def _sqlite_intersects(element: Geometry_IntersectsBbox,
compiler: 'sa.Compiled', **kw: Any) -> str:
return "MbrIntersects(%s) = 1" % compiler.process(element.clauses, **kw)
class Geometry_ColumnIntersectsBbox(sa.sql.expression.FunctionElement[Any]):
""" Check if the bounding box of the geometry intersects with the
given table column, using the spatial index for the column.
The index must exist or the query may return nothing.
"""
name = 'Geometry_ColumnIntersectsBbox'
inherit_cache = True
@compiles(Geometry_ColumnIntersectsBbox)
def default_intersects_column(element: Geometry_ColumnIntersectsBbox,
compiler: 'sa.Compiled', **kw: Any) -> str:
arg1, arg2 = list(element.clauses)
return "%s && %s" % (compiler.process(arg1, **kw), compiler.process(arg2, **kw))
@compiles(Geometry_ColumnIntersectsBbox, 'sqlite')
def spatialite_intersects_column(element: Geometry_ColumnIntersectsBbox,
compiler: 'sa.Compiled', **kw: Any) -> str:
arg1, arg2 = list(element.clauses)
return "MbrIntersects(%s, %s) = 1 and "\
"%s.ROWID IN (SELECT ROWID FROM SpatialIndex "\
"WHERE f_table_name = '%s' AND f_geometry_column = '%s' "\
"AND search_frame = %s)" %(
compiler.process(arg1, **kw),
compiler.process(arg2, **kw),
arg1.table.name, arg1.table.name, arg1.name,
compiler.process(arg2, **kw))
class Geometry_ColumnDWithin(sa.sql.expression.FunctionElement[Any]):
""" Check if the geometry is within the distance of the
given table column, using the spatial index for the column.
The index must exist or the query may return nothing.
"""
name = 'Geometry_ColumnDWithin'
inherit_cache = True
@compiles(Geometry_ColumnDWithin)
def default_dwithin_column(element: Geometry_ColumnDWithin,
compiler: 'sa.Compiled', **kw: Any) -> str:
return "ST_DWithin(%s)" % compiler.process(element.clauses, **kw)
@compiles(Geometry_ColumnDWithin, 'sqlite')
def spatialite_dwithin_column(element: Geometry_ColumnDWithin,
compiler: 'sa.Compiled', **kw: Any) -> str:
geom1, geom2, dist = list(element.clauses)
return "ST_Distance(%s, %s) < %s and "\
"%s.ROWID IN (SELECT ROWID FROM SpatialIndex "\
"WHERE f_table_name = '%s' AND f_geometry_column = '%s' "\
"AND search_frame = ST_Expand(%s, %s))" %(
compiler.process(geom1, **kw),
compiler.process(geom2, **kw),
compiler.process(dist, **kw),
geom1.table.name, geom1.table.name, geom1.name,
compiler.process(geom2, **kw),
compiler.process(dist, **kw))
class Geometry(types.UserDefinedType): # type: ignore[type-arg]
""" Simplified type decorator for PostGIS geometry. This type
only supports geometries in 4326 projection.
"""
cache_ok = True
def __init__(self, subtype: str = 'Geometry'):
self.subtype = subtype
def get_col_spec(self) -> str:
return f'GEOMETRY({self.subtype}, 4326)'
def bind_processor(self, dialect: 'sa.Dialect') -> Callable[[Any], str]:
def process(value: Any) -> str:
if isinstance(value, str):
return value
return cast(str, value.to_wkt())
return process
def result_processor(self, dialect: 'sa.Dialect', coltype: object) -> Callable[[Any], str]:
def process(value: Any) -> str:
assert isinstance(value, str)
return value
return process
def column_expression(self, col: SaColumn) -> SaColumn:
return sa.func.ST_AsEWKB(col)
def bind_expression(self, bindvalue: SaBind) -> SaColumn:
return sa.func.ST_GeomFromText(bindvalue, sa.text('4326'), type_=self)
class comparator_factory(types.UserDefinedType.Comparator): # type: ignore[type-arg]
def intersects(self, other: SaColumn, use_index: bool = True) -> 'sa.Operators':
if not use_index:
return Geometry_IntersectsBbox(sa.func.coalesce(sa.null(), self.expr), other)
if isinstance(self.expr, sa.Column):
return Geometry_ColumnIntersectsBbox(self.expr, other)
return Geometry_IntersectsBbox(self.expr, other)
def is_line_like(self) -> SaColumn:
return Geometry_IsLineLike(self)
def is_area(self) -> SaColumn:
return Geometry_IsAreaLike(self)
def within_distance(self, other: SaColumn, distance: SaColumn) -> SaColumn:
if isinstance(self.expr, sa.Column):
return Geometry_ColumnDWithin(self.expr, other, distance)
return self.ST_Distance(other) < distance
def ST_Distance(self, other: SaColumn) -> SaColumn:
return sa.func.ST_Distance(self, other, type_=sa.Float)
def ST_Contains(self, other: SaColumn) -> SaColumn:
return sa.func.ST_Contains(self, other, type_=sa.Boolean)
def ST_CoveredBy(self, other: SaColumn) -> SaColumn:
return sa.func.ST_CoveredBy(self, other, type_=sa.Boolean)
def ST_ClosestPoint(self, other: SaColumn) -> SaColumn:
return sa.func.coalesce(sa.func.ST_ClosestPoint(self, other, type_=Geometry),
other)
def ST_Buffer(self, other: SaColumn) -> SaColumn:
return sa.func.ST_Buffer(self, other, type_=Geometry)
def ST_Expand(self, other: SaColumn) -> SaColumn:
return sa.func.ST_Expand(self, other, type_=Geometry)
def ST_Collect(self) -> SaColumn:
return sa.func.ST_Collect(self, type_=Geometry)
def ST_Centroid(self) -> SaColumn:
return sa.func.ST_Centroid(self, type_=Geometry)
def ST_LineInterpolatePoint(self, other: SaColumn) -> SaColumn:
return sa.func.ST_LineInterpolatePoint(self, other, type_=Geometry)
def ST_LineLocatePoint(self, other: SaColumn) -> SaColumn:
return sa.func.ST_LineLocatePoint(self, other, type_=sa.Float)
def distance_spheroid(self, other: SaColumn) -> SaColumn:
return Geometry_DistanceSpheroid(self, other)
@compiles(Geometry, 'sqlite')
def get_col_spec(self, *args, **kwargs): # type: ignore[no-untyped-def]
return 'GEOMETRY'
SQLITE_FUNCTION_ALIAS = (
('ST_AsEWKB', sa.Text, 'AsEWKB'),
('ST_GeomFromEWKT', Geometry, 'GeomFromEWKT'),
('ST_AsGeoJSON', sa.Text, 'AsGeoJSON'),
('ST_AsKML', sa.Text, 'AsKML'),
('ST_AsSVG', sa.Text, 'AsSVG'),
('ST_LineLocatePoint', sa.Float, 'ST_Line_Locate_Point'),
('ST_LineInterpolatePoint', sa.Float, 'ST_Line_Interpolate_Point'),
)
def _add_function_alias(func: str, ftype: type, alias: str) -> None:
_FuncDef = type(func, (sa.sql.functions.GenericFunction, ), {
"type": ftype(),
"name": func,
"identifier": func,
"inherit_cache": True})
func_templ = f"{alias}(%s)"
def _sqlite_impl(element: Any, compiler: Any, **kw: Any) -> Any:
return func_templ % compiler.process(element.clauses, **kw)
compiles(_FuncDef, 'sqlite')(_sqlite_impl)
for alias in SQLITE_FUNCTION_ALIAS:
_add_function_alias(*alias)
| 10,898
|
Python
|
.py
| 212
| 42.59434
| 95
| 0.646176
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,746
|
__init__.py
|
osm-search_Nominatim/src/nominatim_api/sql/sqlalchemy_types/__init__.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Module with custom types for SQLAlchemy
"""
# See also https://github.com/PyCQA/pylint/issues/6006
# pylint: disable=useless-import-alias
from .geometry import (Geometry as Geometry)
from .int_array import (IntArray as IntArray)
from .key_value import (KeyValueStore as KeyValueStore)
from .json import (Json as Json)
| 535
|
Python
|
.py
| 15
| 34.533333
| 58
| 0.777992
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,747
|
json.py
|
osm-search_Nominatim/src/nominatim_api/sql/sqlalchemy_types/json.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Common json type for different dialects.
"""
from typing import Any
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.sqlite import JSON as sqlite_json
from ...typing import SaDialect
# pylint: disable=all
class Json(sa.types.TypeDecorator[Any]):
""" Dialect-independent type for JSON.
"""
impl = sa.types.JSON
cache_ok = True
def load_dialect_impl(self, dialect: SaDialect) -> sa.types.TypeEngine[Any]:
if dialect.name == 'postgresql':
return JSONB(none_as_null=True) # type: ignore[no-untyped-call]
return sqlite_json(none_as_null=True)
| 856
|
Python
|
.py
| 24
| 32.416667
| 80
| 0.734867
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,748
|
content_types.py
|
osm-search_Nominatim/src/nominatim_api/server/content_types.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Constants for various content types for server responses.
"""
CONTENT_TEXT = 'text/plain; charset=utf-8'
CONTENT_XML = 'text/xml; charset=utf-8'
CONTENT_HTML = 'text/html; charset=utf-8'
CONTENT_JSON = 'application/json; charset=utf-8'
| 452
|
Python
|
.py
| 13
| 33.692308
| 58
| 0.751142
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,749
|
asgi_adaptor.py
|
osm-search_Nominatim/src/nominatim_api/server/asgi_adaptor.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Base abstraction for implementing based on different ASGI frameworks.
"""
from typing import Optional, Any, NoReturn, Callable
import abc
import math
from ..config import Configuration
from ..core import NominatimAPIAsync
from ..result_formatting import FormatDispatcher
from .content_types import CONTENT_TEXT
class ASGIAdaptor(abc.ABC):
""" Adapter class for the different ASGI frameworks.
Wraps functionality over concrete requests and responses.
"""
content_type: str = CONTENT_TEXT
@abc.abstractmethod
def get(self, name: str, default: Optional[str] = None) -> Optional[str]:
""" Return an input parameter as a string. If the parameter was
not provided, return the 'default' value.
"""
@abc.abstractmethod
def get_header(self, name: str, default: Optional[str] = None) -> Optional[str]:
""" Return a HTTP header parameter as a string. If the parameter was
not provided, return the 'default' value.
"""
@abc.abstractmethod
def error(self, msg: str, status: int = 400) -> Exception:
""" Construct an appropriate exception from the given error message.
The exception must result in a HTTP error with the given status.
"""
@abc.abstractmethod
def create_response(self, status: int, output: str, num_results: int) -> Any:
""" Create a response from the given parameters. The result will
be returned by the endpoint functions. The adaptor may also
return None when the response is created internally with some
different means.
The response must return the HTTP given status code 'status', set
the HTTP content-type headers to the string provided and the
body of the response to 'output'.
"""
@abc.abstractmethod
def base_uri(self) -> str:
""" Return the URI of the original request.
"""
@abc.abstractmethod
def config(self) -> Configuration:
""" Return the current configuration object.
"""
@abc.abstractmethod
def formatting(self) -> FormatDispatcher:
""" Return the formatting object to use.
"""
def get_int(self, name: str, default: Optional[int] = None) -> int:
""" Return an input parameter as an int. Raises an exception if
the parameter is given but not in an integer format.
If 'default' is given, then it will be returned when the parameter
is missing completely. When 'default' is None, an error will be
raised on a missing parameter.
"""
value = self.get(name)
if value is None:
if default is not None:
return default
self.raise_error(f"Parameter '{name}' missing.")
try:
intval = int(value)
except ValueError:
self.raise_error(f"Parameter '{name}' must be a number.")
return intval
def get_float(self, name: str, default: Optional[float] = None) -> float:
""" Return an input parameter as a flaoting-point number. Raises an
exception if the parameter is given but not in an float format.
If 'default' is given, then it will be returned when the parameter
is missing completely. When 'default' is None, an error will be
raised on a missing parameter.
"""
value = self.get(name)
if value is None:
if default is not None:
return default
self.raise_error(f"Parameter '{name}' missing.")
try:
fval = float(value)
except ValueError:
self.raise_error(f"Parameter '{name}' must be a number.")
if math.isnan(fval) or math.isinf(fval):
self.raise_error(f"Parameter '{name}' must be a number.")
return fval
def get_bool(self, name: str, default: Optional[bool] = None) -> bool:
""" Return an input parameter as bool. Only '0' is accepted as
an input for 'false' all other inputs will be interpreted as 'true'.
If 'default' is given, then it will be returned when the parameter
is missing completely. When 'default' is None, an error will be
raised on a missing parameter.
"""
value = self.get(name)
if value is None:
if default is not None:
return default
self.raise_error(f"Parameter '{name}' missing.")
return value != '0'
def raise_error(self, msg: str, status: int = 400) -> NoReturn:
""" Raise an exception resulting in the given HTTP status and
message. The message will be formatted according to the
output format chosen by the request.
"""
raise self.error(self.formatting().format_error(self.content_type, msg, status),
status)
EndpointFunc = Callable[[NominatimAPIAsync, ASGIAdaptor], Any]
| 5,225
|
Python
|
.py
| 115
| 36.521739
| 88
| 0.639377
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,750
|
server.py
|
osm-search_Nominatim/src/nominatim_api/server/falcon/server.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Server implementation using the falcon webserver framework.
"""
from typing import Optional, Mapping, Any, List
from pathlib import Path
import datetime as dt
import asyncio
from falcon.asgi import App, Request, Response
from ...config import Configuration
from ...core import NominatimAPIAsync
from ... import v1 as api_impl
from ...result_formatting import FormatDispatcher, load_format_dispatcher
from ... import logging as loglib
from ..asgi_adaptor import ASGIAdaptor, EndpointFunc
class HTTPNominatimError(Exception):
""" A special exception class for errors raised during processing.
"""
def __init__(self, msg: str, status: int, content_type: str) -> None:
self.msg = msg
self.status = status
self.content_type = content_type
async def nominatim_error_handler(req: Request, resp: Response, #pylint: disable=unused-argument
exception: HTTPNominatimError,
_: Any) -> None:
""" Special error handler that passes message and content type as
per exception info.
"""
resp.status = exception.status
resp.text = exception.msg
resp.content_type = exception.content_type
async def timeout_error_handler(req: Request, resp: Response, #pylint: disable=unused-argument
exception: TimeoutError, #pylint: disable=unused-argument
_: Any) -> None:
""" Special error handler that passes message and content type as
per exception info.
"""
resp.status = 503
loglib.log().comment('Aborted: Query took too long to process.')
logdata = loglib.get_and_disable()
if logdata:
resp.text = logdata
resp.content_type = 'text/html; charset=utf-8'
else:
resp.text = "Query took too long to process."
resp.content_type = 'text/plain; charset=utf-8'
class ParamWrapper(ASGIAdaptor):
""" Adaptor class for server glue to Falcon framework.
"""
def __init__(self, req: Request, resp: Response,
config: Configuration, formatter: FormatDispatcher) -> None:
self.request = req
self.response = resp
self._config = config
self._formatter = formatter
def get(self, name: str, default: Optional[str] = None) -> Optional[str]:
return self.request.get_param(name, default=default)
def get_header(self, name: str, default: Optional[str] = None) -> Optional[str]:
return self.request.get_header(name, default=default)
def error(self, msg: str, status: int = 400) -> HTTPNominatimError:
return HTTPNominatimError(msg, status, self.content_type)
def create_response(self, status: int, output: str, num_results: int) -> None:
self.response.context.num_results = num_results
self.response.status = status
self.response.text = output
self.response.content_type = self.content_type
def base_uri(self) -> str:
return self.request.forwarded_prefix
def config(self) -> Configuration:
return self._config
def formatting(self) -> FormatDispatcher:
return self._formatter
class EndpointWrapper:
""" Converter for server glue endpoint functions to Falcon request handlers.
"""
def __init__(self, name: str, func: EndpointFunc, api: NominatimAPIAsync,
formatter: FormatDispatcher) -> None:
self.name = name
self.func = func
self.api = api
self.formatter = formatter
async def on_get(self, req: Request, resp: Response) -> None:
""" Implementation of the endpoint.
"""
await self.func(self.api, ParamWrapper(req, resp, self.api.config,
self.formatter))
class FileLoggingMiddleware:
""" Middleware to log selected requests into a file.
"""
def __init__(self, file_name: str):
self.fd = open(file_name, 'a', buffering=1, encoding='utf8') # pylint: disable=R1732
async def process_request(self, req: Request, _: Response) -> None:
""" Callback before the request starts timing.
"""
req.context.start = dt.datetime.now(tz=dt.timezone.utc)
async def process_response(self, req: Request, resp: Response,
resource: Optional[EndpointWrapper],
req_succeeded: bool) -> None:
""" Callback after requests writes to the logfile. It only
writes logs for successful requests for search, reverse and lookup.
"""
if not req_succeeded or resource is None or resp.status != 200\
or resource.name not in ('reverse', 'search', 'lookup', 'details'):
return
finish = dt.datetime.now(tz=dt.timezone.utc)
duration = (finish - req.context.start).total_seconds()
params = req.scope['query_string'].decode('utf8')
start = req.context.start.replace(tzinfo=None)\
.isoformat(sep=' ', timespec='milliseconds')
self.fd.write(f"[{start}] "
f"{duration:.4f} {getattr(resp.context, 'num_results', 0)} "
f'{resource.name} "{params}"\n')
class APIShutdown:
""" Middleware that closes any open database connections.
"""
def __init__(self, api: NominatimAPIAsync) -> None:
self.api = api
async def process_shutdown(self, *_: Any) -> None:
"""Process the ASGI lifespan shutdown event.
"""
await self.api.close()
def get_application(project_dir: Path,
environ: Optional[Mapping[str, str]] = None) -> App:
""" Create a Nominatim Falcon ASGI application.
"""
api = NominatimAPIAsync(project_dir, environ)
middleware: List[object] = [APIShutdown(api)]
log_file = api.config.LOG_FILE
if log_file:
middleware.append(FileLoggingMiddleware(log_file))
app = App(cors_enable=api.config.get_bool('CORS_NOACCESSCONTROL'),
middleware=middleware)
app.add_error_handler(HTTPNominatimError, nominatim_error_handler)
app.add_error_handler(TimeoutError, timeout_error_handler)
# different from TimeoutError in Python <= 3.10
app.add_error_handler(asyncio.TimeoutError, timeout_error_handler) # type: ignore[arg-type]
legacy_urls = api.config.get_bool('SERVE_LEGACY_URLS')
formatter = load_format_dispatcher('v1', project_dir)
for name, func in api_impl.ROUTES:
endpoint = EndpointWrapper(name, func, api, formatter)
app.add_route(f"/{name}", endpoint)
if legacy_urls:
app.add_route(f"/{name}.php", endpoint)
return app
def run_wsgi() -> App:
""" Entry point for uvicorn.
Make sure uvicorn is run from the project directory.
"""
return get_application(Path('.'))
| 7,082
|
Python
|
.py
| 154
| 37.74026
| 96
| 0.650044
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,751
|
server.py
|
osm-search_Nominatim/src/nominatim_api/server/starlette/server.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Server implementation using the starlette webserver framework.
"""
from typing import Any, Optional, Mapping, Callable, cast, Coroutine, Dict, Awaitable
from pathlib import Path
import datetime as dt
import asyncio
from starlette.applications import Starlette
from starlette.routing import Route
from starlette.exceptions import HTTPException
from starlette.responses import Response, PlainTextResponse, HTMLResponse
from starlette.requests import Request
from starlette.middleware import Middleware
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
from starlette.middleware.cors import CORSMiddleware
from ...config import Configuration
from ...core import NominatimAPIAsync
from ... import v1 as api_impl
from ...result_formatting import FormatDispatcher, load_format_dispatcher
from ..asgi_adaptor import ASGIAdaptor, EndpointFunc
from ... import logging as loglib
class ParamWrapper(ASGIAdaptor):
""" Adaptor class for server glue to Starlette framework.
"""
def __init__(self, request: Request) -> None:
self.request = request
def get(self, name: str, default: Optional[str] = None) -> Optional[str]:
return self.request.query_params.get(name, default=default)
def get_header(self, name: str, default: Optional[str] = None) -> Optional[str]:
return self.request.headers.get(name, default)
def error(self, msg: str, status: int = 400) -> HTTPException:
return HTTPException(status, detail=msg,
headers={'content-type': self.content_type})
def create_response(self, status: int, output: str, num_results: int) -> Response:
self.request.state.num_results = num_results
return Response(output, status_code=status, media_type=self.content_type)
def base_uri(self) -> str:
scheme = self.request.url.scheme
host = self.request.url.hostname
port = self.request.url.port
root = self.request.scope['root_path']
if (scheme == 'http' and port == 80) or (scheme == 'https' and port == 443):
port = None
if port is not None:
return f"{scheme}://{host}:{port}{root}"
return f"{scheme}://{host}{root}"
def config(self) -> Configuration:
return cast(Configuration, self.request.app.state.API.config)
def formatting(self) -> FormatDispatcher:
return cast(FormatDispatcher, self.request.app.state.API.formatter)
def _wrap_endpoint(func: EndpointFunc)\
-> Callable[[Request], Coroutine[Any, Any, Response]]:
async def _callback(request: Request) -> Response:
return cast(Response, await func(request.app.state.API, ParamWrapper(request)))
return _callback
class FileLoggingMiddleware(BaseHTTPMiddleware):
""" Middleware to log selected requests into a file.
"""
def __init__(self, app: Starlette, file_name: str = ''):
super().__init__(app)
self.fd = open(file_name, 'a', buffering=1, encoding='utf8') # pylint: disable=R1732
async def dispatch(self, request: Request,
call_next: RequestResponseEndpoint) -> Response:
start = dt.datetime.now(tz=dt.timezone.utc)
response = await call_next(request)
if response.status_code != 200:
return response
finish = dt.datetime.now(tz=dt.timezone.utc)
for endpoint in ('reverse', 'search', 'lookup', 'details'):
if request.url.path.startswith('/' + endpoint):
qtype = endpoint
break
else:
return response
duration = (finish - start).total_seconds()
params = request.scope['query_string'].decode('utf8')
self.fd.write(f"[{start.replace(tzinfo=None).isoformat(sep=' ', timespec='milliseconds')}] "
f"{duration:.4f} {getattr(request.state, 'num_results', 0)} "
f'{qtype} "{params}"\n')
return response
async def timeout_error(request: Request, #pylint: disable=unused-argument
_: Exception) -> Response:
""" Error handler for query timeouts.
"""
loglib.log().comment('Aborted: Query took too long to process.')
logdata = loglib.get_and_disable()
if logdata:
return HTMLResponse(logdata)
return PlainTextResponse("Query took too long to process.", status_code=503)
def get_application(project_dir: Path,
environ: Optional[Mapping[str, str]] = None,
debug: bool = True) -> Starlette:
""" Create a Nominatim falcon ASGI application.
"""
config = Configuration(project_dir, environ)
routes = []
legacy_urls = config.get_bool('SERVE_LEGACY_URLS')
for name, func in api_impl.ROUTES:
endpoint = _wrap_endpoint(func)
routes.append(Route(f"/{name}", endpoint=endpoint))
if legacy_urls:
routes.append(Route(f"/{name}.php", endpoint=endpoint))
middleware = []
if config.get_bool('CORS_NOACCESSCONTROL'):
middleware.append(Middleware(CORSMiddleware,
allow_origins=['*'],
allow_methods=['GET', 'OPTIONS'],
max_age=86400))
log_file = config.LOG_FILE
if log_file:
middleware.append(Middleware(FileLoggingMiddleware, file_name=log_file))
exceptions: Dict[Any, Callable[[Request, Exception], Awaitable[Response]]] = {
TimeoutError: timeout_error,
asyncio.TimeoutError: timeout_error
}
async def _shutdown() -> None:
await app.state.API.close()
app = Starlette(debug=debug, routes=routes, middleware=middleware,
exception_handlers=exceptions,
on_shutdown=[_shutdown])
app.state.API = NominatimAPIAsync(project_dir, environ)
app.state.formatter = load_format_dispatcher('v1', project_dir)
return app
def run_wsgi() -> Starlette:
""" Entry point for uvicorn.
"""
return get_application(Path('.'), debug=False)
| 6,294
|
Python
|
.py
| 133
| 39.293233
| 100
| 0.662522
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,752
|
icu_tokenizer.py
|
osm-search_Nominatim/src/nominatim_api/search/icu_tokenizer.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of query analysis for the ICU tokenizer.
"""
from typing import Tuple, Dict, List, Optional, NamedTuple, Iterator, Any, cast
from collections import defaultdict
import dataclasses
import difflib
from icu import Transliterator
import sqlalchemy as sa
from ..typing import SaRow
from ..sql.sqlalchemy_types import Json
from ..connection import SearchConnection
from ..logging import log
from ..search import query as qmod
from ..search.query_analyzer_factory import AbstractQueryAnalyzer
DB_TO_TOKEN_TYPE = {
'W': qmod.TokenType.WORD,
'w': qmod.TokenType.PARTIAL,
'H': qmod.TokenType.HOUSENUMBER,
'P': qmod.TokenType.POSTCODE,
'C': qmod.TokenType.COUNTRY
}
class QueryPart(NamedTuple):
""" Normalized and transliterated form of a single term in the query.
When the term came out of a split during the transliteration,
the normalized string is the full word before transliteration.
The word number keeps track of the word before transliteration
and can be used to identify partial transliterated terms.
"""
token: str
normalized: str
word_number: int
QueryParts = List[QueryPart]
WordDict = Dict[str, List[qmod.TokenRange]]
def yield_words(terms: List[QueryPart], start: int) -> Iterator[Tuple[str, qmod.TokenRange]]:
""" Return all combinations of words in the terms list after the
given position.
"""
total = len(terms)
for first in range(start, total):
word = terms[first].token
yield word, qmod.TokenRange(first, first + 1)
for last in range(first + 1, min(first + 20, total)):
word = ' '.join((word, terms[last].token))
yield word, qmod.TokenRange(first, last + 1)
@dataclasses.dataclass
class ICUToken(qmod.Token):
""" Specialised token for ICU tokenizer.
"""
word_token: str
info: Optional[Dict[str, Any]]
def get_category(self) -> Tuple[str, str]:
assert self.info
return self.info.get('class', ''), self.info.get('type', '')
def rematch(self, norm: str) -> None:
""" Check how well the token matches the given normalized string
and add a penalty, if necessary.
"""
if not self.lookup_word:
return
seq = difflib.SequenceMatcher(a=self.lookup_word, b=norm)
distance = 0
for tag, afrom, ato, bfrom, bto in seq.get_opcodes():
if tag in ('delete', 'insert') and (afrom == 0 or ato == len(self.lookup_word)):
distance += 1
elif tag == 'replace':
distance += max((ato-afrom), (bto-bfrom))
elif tag != 'equal':
distance += abs((ato-afrom) - (bto-bfrom))
self.penalty += (distance/len(self.lookup_word))
@staticmethod
def from_db_row(row: SaRow) -> 'ICUToken':
""" Create a ICUToken from the row of the word table.
"""
count = 1 if row.info is None else row.info.get('count', 1)
addr_count = 1 if row.info is None else row.info.get('addr_count', 1)
penalty = 0.0
if row.type == 'w':
penalty = 0.3
elif row.type == 'W':
if len(row.word_token) == 1 and row.word_token == row.word:
penalty = 0.2 if row.word.isdigit() else 0.3
elif row.type == 'H':
penalty = sum(0.1 for c in row.word_token if c != ' ' and not c.isdigit())
if all(not c.isdigit() for c in row.word_token):
penalty += 0.2 * (len(row.word_token) - 1)
elif row.type == 'C':
if len(row.word_token) == 1:
penalty = 0.3
if row.info is None:
lookup_word = row.word
else:
lookup_word = row.info.get('lookup', row.word)
if lookup_word:
lookup_word = lookup_word.split('@', 1)[0]
else:
lookup_word = row.word_token
return ICUToken(penalty=penalty, token=row.word_id, count=max(1, count),
lookup_word=lookup_word,
word_token=row.word_token, info=row.info,
addr_count=max(1, addr_count))
class ICUQueryAnalyzer(AbstractQueryAnalyzer):
""" Converter for query strings into a tokenized query
using the tokens created by a ICU tokenizer.
"""
def __init__(self, conn: SearchConnection) -> None:
self.conn = conn
async def setup(self) -> None:
""" Set up static data structures needed for the analysis.
"""
async def _make_normalizer() -> Any:
rules = await self.conn.get_property('tokenizer_import_normalisation')
return Transliterator.createFromRules("normalization", rules)
self.normalizer = await self.conn.get_cached_value('ICUTOK', 'normalizer',
_make_normalizer)
async def _make_transliterator() -> Any:
rules = await self.conn.get_property('tokenizer_import_transliteration')
return Transliterator.createFromRules("transliteration", rules)
self.transliterator = await self.conn.get_cached_value('ICUTOK', 'transliterator',
_make_transliterator)
if 'word' not in self.conn.t.meta.tables:
sa.Table('word', self.conn.t.meta,
sa.Column('word_id', sa.Integer),
sa.Column('word_token', sa.Text, nullable=False),
sa.Column('type', sa.Text, nullable=False),
sa.Column('word', sa.Text),
sa.Column('info', Json))
async def analyze_query(self, phrases: List[qmod.Phrase]) -> qmod.QueryStruct:
""" Analyze the given list of phrases and return the
tokenized query.
"""
log().section('Analyze query (using ICU tokenizer)')
normalized = list(filter(lambda p: p.text,
(qmod.Phrase(p.ptype, self.normalize_text(p.text))
for p in phrases)))
query = qmod.QueryStruct(normalized)
log().var_dump('Normalized query', query.source)
if not query.source:
return query
parts, words = self.split_query(query)
log().var_dump('Transliterated query', lambda: _dump_transliterated(query, parts))
for row in await self.lookup_in_db(list(words.keys())):
for trange in words[row.word_token]:
token = ICUToken.from_db_row(row)
if row.type == 'S':
if row.info['op'] in ('in', 'near'):
if trange.start == 0:
query.add_token(trange, qmod.TokenType.NEAR_ITEM, token)
else:
if trange.start == 0 and trange.end == query.num_token_slots():
query.add_token(trange, qmod.TokenType.NEAR_ITEM, token)
else:
query.add_token(trange, qmod.TokenType.QUALIFIER, token)
else:
query.add_token(trange, DB_TO_TOKEN_TYPE[row.type], token)
self.add_extra_tokens(query, parts)
self.rerank_tokens(query, parts)
log().table_dump('Word tokens', _dump_word_tokens(query))
return query
def normalize_text(self, text: str) -> str:
""" Bring the given text into a normalized form. That is the
standardized form search will work with. All information removed
at this stage is inevitably lost.
"""
return cast(str, self.normalizer.transliterate(text))
def split_query(self, query: qmod.QueryStruct) -> Tuple[QueryParts, WordDict]:
""" Transliterate the phrases and split them into tokens.
Returns the list of transliterated tokens together with their
normalized form and a dictionary of words for lookup together
with their position.
"""
parts: QueryParts = []
phrase_start = 0
words = defaultdict(list)
wordnr = 0
for phrase in query.source:
query.nodes[-1].ptype = phrase.ptype
for word in phrase.text.split(' '):
trans = self.transliterator.transliterate(word)
if trans:
for term in trans.split(' '):
if term:
parts.append(QueryPart(term, word, wordnr))
query.add_node(qmod.BreakType.TOKEN, phrase.ptype)
query.nodes[-1].btype = qmod.BreakType.WORD
wordnr += 1
query.nodes[-1].btype = qmod.BreakType.PHRASE
for word, wrange in yield_words(parts, phrase_start):
words[word].append(wrange)
phrase_start = len(parts)
query.nodes[-1].btype = qmod.BreakType.END
return parts, words
async def lookup_in_db(self, words: List[str]) -> 'sa.Result[Any]':
""" Return the token information from the database for the
given word tokens.
"""
t = self.conn.t.meta.tables['word']
return await self.conn.execute(t.select().where(t.c.word_token.in_(words)))
def add_extra_tokens(self, query: qmod.QueryStruct, parts: QueryParts) -> None:
""" Add tokens to query that are not saved in the database.
"""
for part, node, i in zip(parts, query.nodes, range(1000)):
if len(part.token) <= 4 and part[0].isdigit()\
and not node.has_tokens(i+1, qmod.TokenType.HOUSENUMBER):
query.add_token(qmod.TokenRange(i, i+1), qmod.TokenType.HOUSENUMBER,
ICUToken(penalty=0.5, token=0,
count=1, addr_count=1, lookup_word=part.token,
word_token=part.token, info=None))
def rerank_tokens(self, query: qmod.QueryStruct, parts: QueryParts) -> None:
""" Add penalties to tokens that depend on presence of other token.
"""
for i, node, tlist in query.iter_token_lists():
if tlist.ttype == qmod.TokenType.POSTCODE:
for repl in node.starting:
if repl.end == tlist.end and repl.ttype != qmod.TokenType.POSTCODE \
and (repl.ttype != qmod.TokenType.HOUSENUMBER
or len(tlist.tokens[0].lookup_word) > 4):
repl.add_penalty(0.39)
elif tlist.ttype == qmod.TokenType.HOUSENUMBER \
and len(tlist.tokens[0].lookup_word) <= 3:
if any(c.isdigit() for c in tlist.tokens[0].lookup_word):
for repl in node.starting:
if repl.end == tlist.end and repl.ttype != qmod.TokenType.HOUSENUMBER:
repl.add_penalty(0.5 - tlist.tokens[0].penalty)
elif tlist.ttype not in (qmod.TokenType.COUNTRY, qmod.TokenType.PARTIAL):
norm = parts[i].normalized
for j in range(i + 1, tlist.end):
if parts[j - 1].word_number != parts[j].word_number:
norm += ' ' + parts[j].normalized
for token in tlist.tokens:
cast(ICUToken, token).rematch(norm)
def _dump_transliterated(query: qmod.QueryStruct, parts: QueryParts) -> str:
out = query.nodes[0].btype.value
for node, part in zip(query.nodes[1:], parts):
out += part.token + node.btype.value
return out
def _dump_word_tokens(query: qmod.QueryStruct) -> Iterator[List[Any]]:
yield ['type', 'token', 'word_token', 'lookup_word', 'penalty', 'count', 'info']
for node in query.nodes:
for tlist in node.starting:
for token in tlist.tokens:
t = cast(ICUToken, token)
yield [tlist.ttype.name, t.token, t.word_token or '',
t.lookup_word or '', t.penalty, t.count, t.info]
async def create_query_analyzer(conn: SearchConnection) -> AbstractQueryAnalyzer:
""" Create and set up a new query analyzer for a database based
on the ICU tokenizer.
"""
out = ICUQueryAnalyzer(conn)
await out.setup()
return out
| 12,549
|
Python
|
.py
| 257
| 37.023346
| 94
| 0.586528
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,753
|
db_search_lookups.py
|
osm-search_Nominatim/src/nominatim_api/search/db_search_lookups.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of lookup functions for the search_name table.
"""
from typing import List, Any
import sqlalchemy as sa
from sqlalchemy.ext.compiler import compiles
from ..typing import SaFromClause
from ..sql.sqlalchemy_types import IntArray
# pylint: disable=consider-using-f-string
LookupType = sa.sql.expression.FunctionElement[Any]
class LookupAll(LookupType):
""" Find all entries in search_name table that contain all of
a given list of tokens using an index for the search.
"""
inherit_cache = True
def __init__(self, table: SaFromClause, column: str, tokens: List[int]) -> None:
super().__init__(table.c.place_id, getattr(table.c, column), column,
sa.type_coerce(tokens, IntArray))
@compiles(LookupAll)
def _default_lookup_all(element: LookupAll,
compiler: 'sa.Compiled', **kw: Any) -> str:
_, col, _, tokens = list(element.clauses)
return "(%s @> %s)" % (compiler.process(col, **kw),
compiler.process(tokens, **kw))
@compiles(LookupAll, 'sqlite')
def _sqlite_lookup_all(element: LookupAll,
compiler: 'sa.Compiled', **kw: Any) -> str:
place, col, colname, tokens = list(element.clauses)
return "(%s IN (SELECT CAST(value as bigint) FROM"\
" (SELECT array_intersect_fuzzy(places) as p FROM"\
" (SELECT places FROM reverse_search_name"\
" WHERE word IN (SELECT value FROM json_each('[' || %s || ']'))"\
" AND column = %s"\
" ORDER BY length(places)) as x) as u,"\
" json_each('[' || u.p || ']'))"\
" AND array_contains(%s, %s))"\
% (compiler.process(place, **kw),
compiler.process(tokens, **kw),
compiler.process(colname, **kw),
compiler.process(col, **kw),
compiler.process(tokens, **kw)
)
class LookupAny(LookupType):
""" Find all entries that contain at least one of the given tokens.
Use an index for the search.
"""
inherit_cache = True
def __init__(self, table: SaFromClause, column: str, tokens: List[int]) -> None:
super().__init__(table.c.place_id, getattr(table.c, column), column,
sa.type_coerce(tokens, IntArray))
@compiles(LookupAny)
def _default_lookup_any(element: LookupAny,
compiler: 'sa.Compiled', **kw: Any) -> str:
_, col, _, tokens = list(element.clauses)
return "(%s && %s)" % (compiler.process(col, **kw),
compiler.process(tokens, **kw))
@compiles(LookupAny, 'sqlite')
def _sqlite_lookup_any(element: LookupAny,
compiler: 'sa.Compiled', **kw: Any) -> str:
place, _, colname, tokens = list(element.clauses)
return "%s IN (SELECT CAST(value as bigint) FROM"\
" (SELECT array_union(places) as p FROM reverse_search_name"\
" WHERE word IN (SELECT value FROM json_each('[' || %s || ']'))"\
" AND column = %s) as u,"\
" json_each('[' || u.p || ']'))" % (compiler.process(place, **kw),
compiler.process(tokens, **kw),
compiler.process(colname, **kw))
class Restrict(LookupType):
""" Find all entries that contain all of the given tokens.
Do not use an index for the search.
"""
inherit_cache = True
def __init__(self, table: SaFromClause, column: str, tokens: List[int]) -> None:
super().__init__(getattr(table.c, column),
sa.type_coerce(tokens, IntArray))
@compiles(Restrict)
def _default_restrict(element: Restrict,
compiler: 'sa.Compiled', **kw: Any) -> str:
arg1, arg2 = list(element.clauses)
return "(coalesce(null, %s) @> %s)" % (compiler.process(arg1, **kw),
compiler.process(arg2, **kw))
@compiles(Restrict, 'sqlite')
def _sqlite_restrict(element: Restrict,
compiler: 'sa.Compiled', **kw: Any) -> str:
return "array_contains(%s)" % compiler.process(element.clauses, **kw)
| 4,420
|
Python
|
.py
| 91
| 38.923077
| 84
| 0.584069
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,754
|
db_search_builder.py
|
osm-search_Nominatim/src/nominatim_api/search/db_search_builder.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Conversion from token assignment to an abstract DB search.
"""
from typing import Optional, List, Tuple, Iterator, Dict
import heapq
from ..types import SearchDetails, DataLayer
from .query import QueryStruct, Token, TokenType, TokenRange, BreakType
from .token_assignment import TokenAssignment
from . import db_search_fields as dbf
from . import db_searches as dbs
from . import db_search_lookups as lookups
def wrap_near_search(categories: List[Tuple[str, str]],
search: dbs.AbstractSearch) -> dbs.NearSearch:
""" Create a new search that wraps the given search in a search
for near places of the given category.
"""
return dbs.NearSearch(penalty=search.penalty,
categories=dbf.WeightedCategories(categories,
[0.0] * len(categories)),
search=search)
def build_poi_search(category: List[Tuple[str, str]],
countries: Optional[List[str]]) -> dbs.PoiSearch:
""" Create a new search for places by the given category, possibly
constraint to the given countries.
"""
if countries:
ccs = dbf.WeightedStrings(countries, [0.0] * len(countries))
else:
ccs = dbf.WeightedStrings([], [])
class _PoiData(dbf.SearchData):
penalty = 0.0
qualifiers = dbf.WeightedCategories(category, [0.0] * len(category))
countries=ccs
return dbs.PoiSearch(_PoiData())
class SearchBuilder:
""" Build the abstract search queries from token assignments.
"""
def __init__(self, query: QueryStruct, details: SearchDetails) -> None:
self.query = query
self.details = details
@property
def configured_for_country(self) -> bool:
""" Return true if the search details are configured to
allow countries in the result.
"""
return self.details.min_rank <= 4 and self.details.max_rank >= 4 \
and self.details.layer_enabled(DataLayer.ADDRESS)
@property
def configured_for_postcode(self) -> bool:
""" Return true if the search details are configured to
allow postcodes in the result.
"""
return self.details.min_rank <= 5 and self.details.max_rank >= 11\
and self.details.layer_enabled(DataLayer.ADDRESS)
@property
def configured_for_housenumbers(self) -> bool:
""" Return true if the search details are configured to
allow addresses in the result.
"""
return self.details.max_rank >= 30 \
and self.details.layer_enabled(DataLayer.ADDRESS)
def build(self, assignment: TokenAssignment) -> Iterator[dbs.AbstractSearch]:
""" Yield all possible abstract searches for the given token assignment.
"""
sdata = self.get_search_data(assignment)
if sdata is None:
return
near_items = self.get_near_items(assignment)
if near_items is not None and not near_items:
return # impossible compbination of near items and category parameter
if assignment.name is None:
if near_items and not sdata.postcodes:
sdata.qualifiers = near_items
near_items = None
builder = self.build_poi_search(sdata)
elif assignment.housenumber:
hnr_tokens = self.query.get_tokens(assignment.housenumber,
TokenType.HOUSENUMBER)
builder = self.build_housenumber_search(sdata, hnr_tokens, assignment.address)
else:
builder = self.build_special_search(sdata, assignment.address,
bool(near_items))
else:
builder = self.build_name_search(sdata, assignment.name, assignment.address,
bool(near_items))
if near_items:
penalty = min(near_items.penalties)
near_items.penalties = [p - penalty for p in near_items.penalties]
for search in builder:
search_penalty = search.penalty
search.penalty = 0.0
yield dbs.NearSearch(penalty + assignment.penalty + search_penalty,
near_items, search)
else:
for search in builder:
search.penalty += assignment.penalty
yield search
def build_poi_search(self, sdata: dbf.SearchData) -> Iterator[dbs.AbstractSearch]:
""" Build abstract search query for a simple category search.
This kind of search requires an additional geographic constraint.
"""
if not sdata.housenumbers \
and ((self.details.viewbox and self.details.bounded_viewbox) or self.details.near):
yield dbs.PoiSearch(sdata)
def build_special_search(self, sdata: dbf.SearchData,
address: List[TokenRange],
is_category: bool) -> Iterator[dbs.AbstractSearch]:
""" Build abstract search queries for searches that do not involve
a named place.
"""
if sdata.qualifiers:
# No special searches over qualifiers supported.
return
if sdata.countries and not address and not sdata.postcodes \
and self.configured_for_country:
yield dbs.CountrySearch(sdata)
if sdata.postcodes and (is_category or self.configured_for_postcode):
penalty = 0.0 if sdata.countries else 0.1
if address:
sdata.lookups = [dbf.FieldLookup('nameaddress_vector',
[t.token for r in address
for t in self.query.get_partials_list(r)],
lookups.Restrict)]
penalty += 0.2
yield dbs.PostcodeSearch(penalty, sdata)
def build_housenumber_search(self, sdata: dbf.SearchData, hnrs: List[Token],
address: List[TokenRange]) -> Iterator[dbs.AbstractSearch]:
""" Build a simple address search for special entries where the
housenumber is the main name token.
"""
sdata.lookups = [dbf.FieldLookup('name_vector', [t.token for t in hnrs], lookups.LookupAny)]
expected_count = sum(t.count for t in hnrs)
partials = {t.token: t.addr_count for trange in address
for t in self.query.get_partials_list(trange)}
if not partials:
# can happen when none of the partials is indexed
return
if expected_count < 8000:
sdata.lookups.append(dbf.FieldLookup('nameaddress_vector',
list(partials), lookups.Restrict))
elif len(partials) != 1 or list(partials.values())[0] < 10000:
sdata.lookups.append(dbf.FieldLookup('nameaddress_vector',
list(partials), lookups.LookupAll))
else:
addr_fulls = [t.token for t
in self.query.get_tokens(address[0], TokenType.WORD)]
if len(addr_fulls) > 5:
return
sdata.lookups.append(
dbf.FieldLookup('nameaddress_vector', addr_fulls, lookups.LookupAny))
sdata.housenumbers = dbf.WeightedStrings([], [])
yield dbs.PlaceSearch(0.05, sdata, expected_count)
def build_name_search(self, sdata: dbf.SearchData,
name: TokenRange, address: List[TokenRange],
is_category: bool) -> Iterator[dbs.AbstractSearch]:
""" Build abstract search queries for simple name or address searches.
"""
if is_category or not sdata.housenumbers or self.configured_for_housenumbers:
ranking = self.get_name_ranking(name)
name_penalty = ranking.normalize_penalty()
if ranking.rankings:
sdata.rankings.append(ranking)
for penalty, count, lookup in self.yield_lookups(name, address):
sdata.lookups = lookup
yield dbs.PlaceSearch(penalty + name_penalty, sdata, count)
def yield_lookups(self, name: TokenRange, address: List[TokenRange])\
-> Iterator[Tuple[float, int, List[dbf.FieldLookup]]]:
""" Yield all variants how the given name and address should best
be searched for. This takes into account how frequent the terms
are and tries to find a lookup that optimizes index use.
"""
penalty = 0.0 # extra penalty
name_partials = {t.token: t for t in self.query.get_partials_list(name)}
addr_partials = [t for r in address for t in self.query.get_partials_list(r)]
addr_tokens = list({t.token for t in addr_partials})
exp_count = min(t.count for t in name_partials.values()) / (2**(len(name_partials) - 1))
if (len(name_partials) > 3 or exp_count < 8000):
yield penalty, exp_count, dbf.lookup_by_names(list(name_partials.keys()), addr_tokens)
return
addr_count = min(t.addr_count for t in addr_partials) if addr_partials else 30000
# Partial term to frequent. Try looking up by rare full names first.
name_fulls = self.query.get_tokens(name, TokenType.WORD)
if name_fulls:
fulls_count = sum(t.count for t in name_fulls)
if fulls_count < 50000 or addr_count < 30000:
yield penalty,fulls_count / (2**len(addr_tokens)), \
self.get_full_name_ranking(name_fulls, addr_partials,
fulls_count > 30000 / max(1, len(addr_tokens)))
# To catch remaining results, lookup by name and address
# We only do this if there is a reasonable number of results expected.
exp_count = exp_count / (2**len(addr_tokens)) if addr_tokens else exp_count
if exp_count < 10000 and addr_count < 20000:
penalty += 0.35 * max(1 if name_fulls else 0.1,
5 - len(name_partials) - len(addr_tokens))
yield penalty, exp_count,\
self.get_name_address_ranking(list(name_partials.keys()), addr_partials)
def get_name_address_ranking(self, name_tokens: List[int],
addr_partials: List[Token]) -> List[dbf.FieldLookup]:
""" Create a ranking expression looking up by name and address.
"""
lookup = [dbf.FieldLookup('name_vector', name_tokens, lookups.LookupAll)]
addr_restrict_tokens = []
addr_lookup_tokens = []
for t in addr_partials:
if t.addr_count > 20000:
addr_restrict_tokens.append(t.token)
else:
addr_lookup_tokens.append(t.token)
if addr_restrict_tokens:
lookup.append(dbf.FieldLookup('nameaddress_vector',
addr_restrict_tokens, lookups.Restrict))
if addr_lookup_tokens:
lookup.append(dbf.FieldLookup('nameaddress_vector',
addr_lookup_tokens, lookups.LookupAll))
return lookup
def get_full_name_ranking(self, name_fulls: List[Token], addr_partials: List[Token],
use_lookup: bool) -> List[dbf.FieldLookup]:
""" Create a ranking expression with full name terms and
additional address lookup. When 'use_lookup' is true, then
address lookups will use the index, when the occurrences are not
too many.
"""
# At this point drop unindexed partials from the address.
# This might yield wrong results, nothing we can do about that.
if use_lookup:
addr_restrict_tokens = []
addr_lookup_tokens = []
for t in addr_partials:
if t.addr_count > 20000:
addr_restrict_tokens.append(t.token)
else:
addr_lookup_tokens.append(t.token)
else:
addr_restrict_tokens = [t.token for t in addr_partials]
addr_lookup_tokens = []
return dbf.lookup_by_any_name([t.token for t in name_fulls],
addr_restrict_tokens, addr_lookup_tokens)
def get_name_ranking(self, trange: TokenRange,
db_field: str = 'name_vector') -> dbf.FieldRanking:
""" Create a ranking expression for a name term in the given range.
"""
name_fulls = self.query.get_tokens(trange, TokenType.WORD)
ranks = [dbf.RankedTokens(t.penalty, [t.token]) for t in name_fulls]
ranks.sort(key=lambda r: r.penalty)
# Fallback, sum of penalty for partials
name_partials = self.query.get_partials_list(trange)
default = sum(t.penalty for t in name_partials) + 0.2
return dbf.FieldRanking(db_field, default, ranks)
def get_addr_ranking(self, trange: TokenRange) -> dbf.FieldRanking:
""" Create a list of ranking expressions for an address term
for the given ranges.
"""
todo: List[Tuple[int, int, dbf.RankedTokens]] = []
heapq.heappush(todo, (0, trange.start, dbf.RankedTokens(0.0, [])))
ranks: List[dbf.RankedTokens] = []
while todo: # pylint: disable=too-many-nested-blocks
neglen, pos, rank = heapq.heappop(todo)
for tlist in self.query.nodes[pos].starting:
if tlist.ttype in (TokenType.PARTIAL, TokenType.WORD):
if tlist.end < trange.end:
chgpenalty = PENALTY_WORDCHANGE[self.query.nodes[tlist.end].btype]
if tlist.ttype == TokenType.PARTIAL:
penalty = rank.penalty + chgpenalty \
+ max(t.penalty for t in tlist.tokens)
heapq.heappush(todo, (neglen - 1, tlist.end,
dbf.RankedTokens(penalty, rank.tokens)))
else:
for t in tlist.tokens:
heapq.heappush(todo, (neglen - 1, tlist.end,
rank.with_token(t, chgpenalty)))
elif tlist.end == trange.end:
if tlist.ttype == TokenType.PARTIAL:
ranks.append(dbf.RankedTokens(rank.penalty
+ max(t.penalty for t in tlist.tokens),
rank.tokens))
else:
ranks.extend(rank.with_token(t, 0.0) for t in tlist.tokens)
if len(ranks) >= 10:
# Too many variants, bail out and only add
# Worst-case Fallback: sum of penalty of partials
name_partials = self.query.get_partials_list(trange)
default = sum(t.penalty for t in name_partials) + 0.2
ranks.append(dbf.RankedTokens(rank.penalty + default, []))
# Bail out of outer loop
todo.clear()
break
ranks.sort(key=lambda r: len(r.tokens))
default = ranks[0].penalty + 0.3
del ranks[0]
ranks.sort(key=lambda r: r.penalty)
return dbf.FieldRanking('nameaddress_vector', default, ranks)
def get_search_data(self, assignment: TokenAssignment) -> Optional[dbf.SearchData]:
""" Collect the tokens for the non-name search fields in the
assignment.
"""
sdata = dbf.SearchData()
sdata.penalty = assignment.penalty
if assignment.country:
tokens = self.get_country_tokens(assignment.country)
if not tokens:
return None
sdata.set_strings('countries', tokens)
elif self.details.countries:
sdata.countries = dbf.WeightedStrings(self.details.countries,
[0.0] * len(self.details.countries))
if assignment.housenumber:
sdata.set_strings('housenumbers',
self.query.get_tokens(assignment.housenumber,
TokenType.HOUSENUMBER))
if assignment.postcode:
sdata.set_strings('postcodes',
self.query.get_tokens(assignment.postcode,
TokenType.POSTCODE))
if assignment.qualifier:
tokens = self.get_qualifier_tokens(assignment.qualifier)
if not tokens:
return None
sdata.set_qualifiers(tokens)
elif self.details.categories:
sdata.qualifiers = dbf.WeightedCategories(self.details.categories,
[0.0] * len(self.details.categories))
if assignment.address:
if not assignment.name and assignment.housenumber:
# housenumber search: the first item needs to be handled like
# a name in ranking or penalties are not comparable with
# normal searches.
sdata.set_ranking([self.get_name_ranking(assignment.address[0],
db_field='nameaddress_vector')]
+ [self.get_addr_ranking(r) for r in assignment.address[1:]])
else:
sdata.set_ranking([self.get_addr_ranking(r) for r in assignment.address])
else:
sdata.rankings = []
return sdata
def get_country_tokens(self, trange: TokenRange) -> List[Token]:
""" Return the list of country tokens for the given range,
optionally filtered by the country list from the details
parameters.
"""
tokens = self.query.get_tokens(trange, TokenType.COUNTRY)
if self.details.countries:
tokens = [t for t in tokens if t.lookup_word in self.details.countries]
return tokens
def get_qualifier_tokens(self, trange: TokenRange) -> List[Token]:
""" Return the list of qualifier tokens for the given range,
optionally filtered by the qualifier list from the details
parameters.
"""
tokens = self.query.get_tokens(trange, TokenType.QUALIFIER)
if self.details.categories:
tokens = [t for t in tokens if t.get_category() in self.details.categories]
return tokens
def get_near_items(self, assignment: TokenAssignment) -> Optional[dbf.WeightedCategories]:
""" Collect tokens for near items search or use the categories
requested per parameter.
Returns None if no category search is requested.
"""
if assignment.near_item:
tokens: Dict[Tuple[str, str], float] = {}
for t in self.query.get_tokens(assignment.near_item, TokenType.NEAR_ITEM):
cat = t.get_category()
# The category of a near search will be that of near_item.
# Thus, if search is restricted to a category parameter,
# the two sets must intersect.
if (not self.details.categories or cat in self.details.categories)\
and t.penalty < tokens.get(cat, 1000.0):
tokens[cat] = t.penalty
return dbf.WeightedCategories(list(tokens.keys()), list(tokens.values()))
return None
PENALTY_WORDCHANGE = {
BreakType.START: 0.0,
BreakType.END: 0.0,
BreakType.PHRASE: 0.0,
BreakType.WORD: 0.1,
BreakType.PART: 0.2,
BreakType.TOKEN: 0.4
}
| 20,342
|
Python
|
.py
| 383
| 38.295039
| 100
| 0.576838
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,755
|
db_searches.py
|
osm-search_Nominatim/src/nominatim_api/search/db_searches.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the actual database accesses for forward search.
"""
from typing import List, Tuple, AsyncIterator, Dict, Any, Callable, cast
import abc
import sqlalchemy as sa
from ..typing import SaFromClause, SaScalarSelect, SaColumn, \
SaExpression, SaSelect, SaLambdaSelect, SaRow, SaBind
from ..sql.sqlalchemy_types import Geometry, IntArray
from ..connection import SearchConnection
from ..types import SearchDetails, DataLayer, GeometryFormat, Bbox
from .. import results as nres
from .db_search_fields import SearchData, WeightedCategories
#pylint: disable=singleton-comparison,not-callable
#pylint: disable=too-many-branches,too-many-arguments,too-many-locals,too-many-statements
def no_index(expr: SaColumn) -> SaColumn:
""" Wrap the given expression, so that the query planner will
refrain from using the expression for index lookup.
"""
return sa.func.coalesce(sa.null(), expr) # pylint: disable=not-callable
def _details_to_bind_params(details: SearchDetails) -> Dict[str, Any]:
""" Create a dictionary from search parameters that can be used
as bind parameter for SQL execute.
"""
return {'limit': details.max_results,
'min_rank': details.min_rank,
'max_rank': details.max_rank,
'viewbox': details.viewbox,
'viewbox2': details.viewbox_x2,
'near': details.near,
'near_radius': details.near_radius,
'excluded': details.excluded,
'countries': details.countries}
LIMIT_PARAM: SaBind = sa.bindparam('limit')
MIN_RANK_PARAM: SaBind = sa.bindparam('min_rank')
MAX_RANK_PARAM: SaBind = sa.bindparam('max_rank')
VIEWBOX_PARAM: SaBind = sa.bindparam('viewbox', type_=Geometry)
VIEWBOX2_PARAM: SaBind = sa.bindparam('viewbox2', type_=Geometry)
NEAR_PARAM: SaBind = sa.bindparam('near', type_=Geometry)
NEAR_RADIUS_PARAM: SaBind = sa.bindparam('near_radius')
COUNTRIES_PARAM: SaBind = sa.bindparam('countries')
def filter_by_area(sql: SaSelect, t: SaFromClause,
details: SearchDetails, avoid_index: bool = False) -> SaSelect:
""" Apply SQL statements for filtering by viewbox and near point,
if applicable.
"""
if details.near is not None and details.near_radius is not None:
if details.near_radius < 0.1 and not avoid_index:
sql = sql.where(t.c.geometry.within_distance(NEAR_PARAM, NEAR_RADIUS_PARAM))
else:
sql = sql.where(t.c.geometry.ST_Distance(NEAR_PARAM) <= NEAR_RADIUS_PARAM)
if details.viewbox is not None and details.bounded_viewbox:
sql = sql.where(t.c.geometry.intersects(VIEWBOX_PARAM,
use_index=not avoid_index and
details.viewbox.area < 0.2))
return sql
def _exclude_places(t: SaFromClause) -> Callable[[], SaExpression]:
return lambda: t.c.place_id.not_in(sa.bindparam('excluded'))
def _select_placex(t: SaFromClause) -> SaSelect:
return sa.select(t.c.place_id, t.c.osm_type, t.c.osm_id, t.c.name,
t.c.class_, t.c.type,
t.c.address, t.c.extratags,
t.c.housenumber, t.c.postcode, t.c.country_code,
t.c.wikipedia,
t.c.parent_place_id, t.c.rank_address, t.c.rank_search,
t.c.linked_place_id, t.c.admin_level,
t.c.centroid,
t.c.geometry.ST_Expand(0).label('bbox'))
def _add_geometry_columns(sql: SaLambdaSelect, col: SaColumn, details: SearchDetails) -> SaSelect:
out = []
if details.geometry_simplification > 0.0:
col = sa.func.ST_SimplifyPreserveTopology(col, details.geometry_simplification)
if details.geometry_output & GeometryFormat.GEOJSON:
out.append(sa.func.ST_AsGeoJSON(col, 7).label('geometry_geojson'))
if details.geometry_output & GeometryFormat.TEXT:
out.append(sa.func.ST_AsText(col).label('geometry_text'))
if details.geometry_output & GeometryFormat.KML:
out.append(sa.func.ST_AsKML(col, 7).label('geometry_kml'))
if details.geometry_output & GeometryFormat.SVG:
out.append(sa.func.ST_AsSVG(col, 0, 7).label('geometry_svg'))
return sql.add_columns(*out)
def _make_interpolation_subquery(table: SaFromClause, inner: SaFromClause,
numerals: List[int], details: SearchDetails) -> SaScalarSelect:
all_ids = sa.func.ArrayAgg(table.c.place_id)
sql = sa.select(all_ids).where(table.c.parent_place_id == inner.c.place_id)
if len(numerals) == 1:
sql = sql.where(sa.between(numerals[0], table.c.startnumber, table.c.endnumber))\
.where((numerals[0] - table.c.startnumber) % table.c.step == 0)
else:
sql = sql.where(sa.or_(
*(sa.and_(sa.between(n, table.c.startnumber, table.c.endnumber),
(n - table.c.startnumber) % table.c.step == 0)
for n in numerals)))
if details.excluded:
sql = sql.where(_exclude_places(table))
return sql.scalar_subquery()
def _filter_by_layer(table: SaFromClause, layers: DataLayer) -> SaColumn:
orexpr: List[SaExpression] = []
if layers & DataLayer.ADDRESS and layers & DataLayer.POI:
orexpr.append(no_index(table.c.rank_address).between(1, 30))
elif layers & DataLayer.ADDRESS:
orexpr.append(no_index(table.c.rank_address).between(1, 29))
orexpr.append(sa.func.IsAddressPoint(table))
elif layers & DataLayer.POI:
orexpr.append(sa.and_(no_index(table.c.rank_address) == 30,
table.c.class_.not_in(('place', 'building'))))
if layers & DataLayer.MANMADE:
exclude = []
if not layers & DataLayer.RAILWAY:
exclude.append('railway')
if not layers & DataLayer.NATURAL:
exclude.extend(('natural', 'water', 'waterway'))
orexpr.append(sa.and_(table.c.class_.not_in(tuple(exclude)),
no_index(table.c.rank_address) == 0))
else:
include = []
if layers & DataLayer.RAILWAY:
include.append('railway')
if layers & DataLayer.NATURAL:
include.extend(('natural', 'water', 'waterway'))
orexpr.append(sa.and_(table.c.class_.in_(tuple(include)),
no_index(table.c.rank_address) == 0))
if len(orexpr) == 1:
return orexpr[0]
return sa.or_(*orexpr)
def _interpolated_position(table: SaFromClause, nr: SaColumn) -> SaColumn:
pos = sa.cast(nr - table.c.startnumber, sa.Float) / (table.c.endnumber - table.c.startnumber)
return sa.case(
(table.c.endnumber == table.c.startnumber, table.c.linegeo.ST_Centroid()),
else_=table.c.linegeo.ST_LineInterpolatePoint(pos)).label('centroid')
async def _get_placex_housenumbers(conn: SearchConnection,
place_ids: List[int],
details: SearchDetails) -> AsyncIterator[nres.SearchResult]:
t = conn.t.placex
sql = _select_placex(t).add_columns(t.c.importance)\
.where(t.c.place_id.in_(place_ids))
if details.geometry_output:
sql = _add_geometry_columns(sql, t.c.geometry, details)
for row in await conn.execute(sql):
result = nres.create_from_placex_row(row, nres.SearchResult)
assert result
result.bbox = Bbox.from_wkb(row.bbox)
yield result
def _int_list_to_subquery(inp: List[int]) -> 'sa.Subquery':
""" Create a subselect that returns the given list of integers
as rows in the column 'nr'.
"""
vtab = sa.func.JsonArrayEach(sa.type_coerce(inp, sa.JSON))\
.table_valued(sa.column('value', type_=sa.JSON))
return sa.select(sa.cast(sa.cast(vtab.c.value, sa.Text), sa.Integer).label('nr')).subquery()
async def _get_osmline(conn: SearchConnection, place_ids: List[int],
numerals: List[int],
details: SearchDetails) -> AsyncIterator[nres.SearchResult]:
t = conn.t.osmline
values = _int_list_to_subquery(numerals)
sql = sa.select(t.c.place_id, t.c.osm_id,
t.c.parent_place_id, t.c.address,
values.c.nr.label('housenumber'),
_interpolated_position(t, values.c.nr),
t.c.postcode, t.c.country_code)\
.where(t.c.place_id.in_(place_ids))\
.join(values, values.c.nr.between(t.c.startnumber, t.c.endnumber))
if details.geometry_output:
sub = sql.subquery()
sql = _add_geometry_columns(sa.select(sub), sub.c.centroid, details)
for row in await conn.execute(sql):
result = nres.create_from_osmline_row(row, nres.SearchResult)
assert result
yield result
async def _get_tiger(conn: SearchConnection, place_ids: List[int],
numerals: List[int], osm_id: int,
details: SearchDetails) -> AsyncIterator[nres.SearchResult]:
t = conn.t.tiger
values = _int_list_to_subquery(numerals)
sql = sa.select(t.c.place_id, t.c.parent_place_id,
sa.literal('W').label('osm_type'),
sa.literal(osm_id).label('osm_id'),
values.c.nr.label('housenumber'),
_interpolated_position(t, values.c.nr),
t.c.postcode)\
.where(t.c.place_id.in_(place_ids))\
.join(values, values.c.nr.between(t.c.startnumber, t.c.endnumber))
if details.geometry_output:
sub = sql.subquery()
sql = _add_geometry_columns(sa.select(sub), sub.c.centroid, details)
for row in await conn.execute(sql):
result = nres.create_from_tiger_row(row, nres.SearchResult)
assert result
yield result
class AbstractSearch(abc.ABC):
""" Encapuslation of a single lookup in the database.
"""
SEARCH_PRIO: int = 2
def __init__(self, penalty: float) -> None:
self.penalty = penalty
@abc.abstractmethod
async def lookup(self, conn: SearchConnection,
details: SearchDetails) -> nres.SearchResults:
""" Find results for the search in the database.
"""
class NearSearch(AbstractSearch):
""" Category search of a place type near the result of another search.
"""
def __init__(self, penalty: float, categories: WeightedCategories,
search: AbstractSearch) -> None:
super().__init__(penalty)
self.search = search
self.categories = categories
async def lookup(self, conn: SearchConnection,
details: SearchDetails) -> nres.SearchResults:
""" Find results for the search in the database.
"""
results = nres.SearchResults()
base = await self.search.lookup(conn, details)
if not base:
return results
base.sort(key=lambda r: (r.accuracy, r.rank_search))
max_accuracy = base[0].accuracy + 0.5
if base[0].rank_address == 0:
min_rank = 0
max_rank = 0
elif base[0].rank_address < 26:
min_rank = 1
max_rank = min(25, base[0].rank_address + 4)
else:
min_rank = 26
max_rank = 30
base = nres.SearchResults(r for r in base if r.source_table == nres.SourceTable.PLACEX
and r.accuracy <= max_accuracy
and r.bbox and r.bbox.area < 20
and r.rank_address >= min_rank
and r.rank_address <= max_rank)
if base:
baseids = [b.place_id for b in base[:5] if b.place_id]
for category, penalty in self.categories:
await self.lookup_category(results, conn, baseids, category, penalty, details)
if len(results) >= details.max_results:
break
return results
async def lookup_category(self, results: nres.SearchResults,
conn: SearchConnection, ids: List[int],
category: Tuple[str, str], penalty: float,
details: SearchDetails) -> None:
""" Find places of the given category near the list of
place ids and add the results to 'results'.
"""
table = await conn.get_class_table(*category)
tgeom = conn.t.placex.alias('pgeom')
if table is None:
# No classtype table available, do a simplified lookup in placex.
table = conn.t.placex
sql = sa.select(table.c.place_id,
sa.func.min(tgeom.c.centroid.ST_Distance(table.c.centroid))
.label('dist'))\
.join(tgeom, table.c.geometry.intersects(tgeom.c.centroid.ST_Expand(0.01)))\
.where(table.c.class_ == category[0])\
.where(table.c.type == category[1])
else:
# Use classtype table. We can afford to use a larger
# radius for the lookup.
sql = sa.select(table.c.place_id,
sa.func.min(tgeom.c.centroid.ST_Distance(table.c.centroid))
.label('dist'))\
.join(tgeom,
table.c.centroid.ST_CoveredBy(
sa.case((sa.and_(tgeom.c.rank_address > 9,
tgeom.c.geometry.is_area()),
tgeom.c.geometry),
else_ = tgeom.c.centroid.ST_Expand(0.05))))
inner = sql.where(tgeom.c.place_id.in_(ids))\
.group_by(table.c.place_id).subquery()
t = conn.t.placex
sql = _select_placex(t).add_columns((-inner.c.dist).label('importance'))\
.join(inner, inner.c.place_id == t.c.place_id)\
.order_by(inner.c.dist)
sql = sql.where(no_index(t.c.rank_address).between(MIN_RANK_PARAM, MAX_RANK_PARAM))
if details.countries:
sql = sql.where(t.c.country_code.in_(COUNTRIES_PARAM))
if details.excluded:
sql = sql.where(_exclude_places(t))
if details.layers is not None:
sql = sql.where(_filter_by_layer(t, details.layers))
sql = sql.limit(LIMIT_PARAM)
for row in await conn.execute(sql, _details_to_bind_params(details)):
result = nres.create_from_placex_row(row, nres.SearchResult)
assert result
result.accuracy = self.penalty + penalty
result.bbox = Bbox.from_wkb(row.bbox)
results.append(result)
class PoiSearch(AbstractSearch):
""" Category search in a geographic area.
"""
def __init__(self, sdata: SearchData) -> None:
super().__init__(sdata.penalty)
self.qualifiers = sdata.qualifiers
self.countries = sdata.countries
async def lookup(self, conn: SearchConnection,
details: SearchDetails) -> nres.SearchResults:
""" Find results for the search in the database.
"""
bind_params = _details_to_bind_params(details)
t = conn.t.placex
rows: List[SaRow] = []
if details.near and details.near_radius is not None and details.near_radius < 0.2:
# simply search in placex table
def _base_query() -> SaSelect:
return _select_placex(t) \
.add_columns((-t.c.centroid.ST_Distance(NEAR_PARAM))
.label('importance'))\
.where(t.c.linked_place_id == None) \
.where(t.c.geometry.within_distance(NEAR_PARAM, NEAR_RADIUS_PARAM)) \
.order_by(t.c.centroid.ST_Distance(NEAR_PARAM)) \
.limit(LIMIT_PARAM)
classtype = self.qualifiers.values
if len(classtype) == 1:
cclass, ctype = classtype[0]
sql: SaLambdaSelect = sa.lambda_stmt(lambda: _base_query()
.where(t.c.class_ == cclass)
.where(t.c.type == ctype))
else:
sql = _base_query().where(sa.or_(*(sa.and_(t.c.class_ == cls, t.c.type == typ)
for cls, typ in classtype)))
if self.countries:
sql = sql.where(t.c.country_code.in_(self.countries.values))
if details.viewbox is not None and details.bounded_viewbox:
sql = sql.where(t.c.geometry.intersects(VIEWBOX_PARAM))
rows.extend(await conn.execute(sql, bind_params))
else:
# use the class type tables
for category in self.qualifiers.values:
table = await conn.get_class_table(*category)
if table is not None:
sql = _select_placex(t)\
.add_columns(t.c.importance)\
.join(table, t.c.place_id == table.c.place_id)\
.where(t.c.class_ == category[0])\
.where(t.c.type == category[1])
if details.viewbox is not None and details.bounded_viewbox:
sql = sql.where(table.c.centroid.intersects(VIEWBOX_PARAM))
if details.near and details.near_radius is not None:
sql = sql.order_by(table.c.centroid.ST_Distance(NEAR_PARAM))\
.where(table.c.centroid.within_distance(NEAR_PARAM,
NEAR_RADIUS_PARAM))
if self.countries:
sql = sql.where(t.c.country_code.in_(self.countries.values))
sql = sql.limit(LIMIT_PARAM)
rows.extend(await conn.execute(sql, bind_params))
results = nres.SearchResults()
for row in rows:
result = nres.create_from_placex_row(row, nres.SearchResult)
assert result
result.accuracy = self.penalty + self.qualifiers.get_penalty((row.class_, row.type))
result.bbox = Bbox.from_wkb(row.bbox)
results.append(result)
return results
class CountrySearch(AbstractSearch):
""" Search for a country name or country code.
"""
SEARCH_PRIO = 0
def __init__(self, sdata: SearchData) -> None:
super().__init__(sdata.penalty)
self.countries = sdata.countries
async def lookup(self, conn: SearchConnection,
details: SearchDetails) -> nres.SearchResults:
""" Find results for the search in the database.
"""
t = conn.t.placex
ccodes = self.countries.values
sql = _select_placex(t)\
.add_columns(t.c.importance)\
.where(t.c.country_code.in_(ccodes))\
.where(t.c.rank_address == 4)
if details.geometry_output:
sql = _add_geometry_columns(sql, t.c.geometry, details)
if details.excluded:
sql = sql.where(_exclude_places(t))
sql = filter_by_area(sql, t, details)
results = nres.SearchResults()
for row in await conn.execute(sql, _details_to_bind_params(details)):
result = nres.create_from_placex_row(row, nres.SearchResult)
assert result
result.accuracy = self.penalty + self.countries.get_penalty(row.country_code, 5.0)
result.bbox = Bbox.from_wkb(row.bbox)
results.append(result)
if not results:
results = await self.lookup_in_country_table(conn, details)
if results:
details.min_rank = min(5, details.max_rank)
details.max_rank = min(25, details.max_rank)
return results
async def lookup_in_country_table(self, conn: SearchConnection,
details: SearchDetails) -> nres.SearchResults:
""" Look up the country in the fallback country tables.
"""
# Avoid the fallback search when this is a more search. Country results
# usually are in the first batch of results and it is not possible
# to exclude these fallbacks.
if details.excluded:
return nres.SearchResults()
t = conn.t.country_name
tgrid = conn.t.country_grid
sql = sa.select(tgrid.c.country_code,
tgrid.c.geometry.ST_Centroid().ST_Collect().ST_Centroid()
.label('centroid'),
tgrid.c.geometry.ST_Collect().ST_Expand(0).label('bbox'))\
.where(tgrid.c.country_code.in_(self.countries.values))\
.group_by(tgrid.c.country_code)
sql = filter_by_area(sql, tgrid, details, avoid_index=True)
sub = sql.subquery('grid')
sql = sa.select(t.c.country_code,
t.c.name.merge(t.c.derived_name).label('name'),
sub.c.centroid, sub.c.bbox)\
.join(sub, t.c.country_code == sub.c.country_code)
if details.geometry_output:
sql = _add_geometry_columns(sql, sub.c.centroid, details)
results = nres.SearchResults()
for row in await conn.execute(sql, _details_to_bind_params(details)):
result = nres.create_from_country_row(row, nres.SearchResult)
assert result
result.bbox = Bbox.from_wkb(row.bbox)
result.accuracy = self.penalty + self.countries.get_penalty(row.country_code, 5.0)
results.append(result)
return results
class PostcodeSearch(AbstractSearch):
""" Search for a postcode.
"""
def __init__(self, extra_penalty: float, sdata: SearchData) -> None:
super().__init__(sdata.penalty + extra_penalty)
self.countries = sdata.countries
self.postcodes = sdata.postcodes
self.lookups = sdata.lookups
self.rankings = sdata.rankings
async def lookup(self, conn: SearchConnection,
details: SearchDetails) -> nres.SearchResults:
""" Find results for the search in the database.
"""
t = conn.t.postcode
pcs = self.postcodes.values
sql = sa.select(t.c.place_id, t.c.parent_place_id,
t.c.rank_search, t.c.rank_address,
t.c.postcode, t.c.country_code,
t.c.geometry.label('centroid'))\
.where(t.c.postcode.in_(pcs))
if details.geometry_output:
sql = _add_geometry_columns(sql, t.c.geometry, details)
penalty: SaExpression = sa.literal(self.penalty)
if details.viewbox is not None and not details.bounded_viewbox:
penalty += sa.case((t.c.geometry.intersects(VIEWBOX_PARAM), 0.0),
(t.c.geometry.intersects(VIEWBOX2_PARAM), 0.5),
else_=1.0)
if details.near is not None:
sql = sql.order_by(t.c.geometry.ST_Distance(NEAR_PARAM))
sql = filter_by_area(sql, t, details)
if self.countries:
sql = sql.where(t.c.country_code.in_(self.countries.values))
if details.excluded:
sql = sql.where(_exclude_places(t))
if self.lookups:
assert len(self.lookups) == 1
tsearch = conn.t.search_name
sql = sql.where(tsearch.c.place_id == t.c.parent_place_id)\
.where((tsearch.c.name_vector + tsearch.c.nameaddress_vector)
.contains(sa.type_coerce(self.lookups[0].tokens,
IntArray)))
for ranking in self.rankings:
penalty += ranking.sql_penalty(conn.t.search_name)
penalty += sa.case(*((t.c.postcode == v, p) for v, p in self.postcodes),
else_=1.0)
sql = sql.add_columns(penalty.label('accuracy'))
sql = sql.order_by('accuracy').limit(LIMIT_PARAM)
results = nres.SearchResults()
for row in await conn.execute(sql, _details_to_bind_params(details)):
p = conn.t.placex
placex_sql = _select_placex(p).add_columns(p.c.importance)\
.where(sa.text("""class = 'boundary'
AND type = 'postal_code'
AND osm_type = 'R'"""))\
.where(p.c.country_code == row.country_code)\
.where(p.c.postcode == row.postcode)\
.limit(1)
if details.geometry_output:
placex_sql = _add_geometry_columns(placex_sql, p.c.geometry, details)
for prow in await conn.execute(placex_sql, _details_to_bind_params(details)):
result = nres.create_from_placex_row(prow, nres.SearchResult)
if result is not None:
result.bbox = Bbox.from_wkb(prow.bbox)
break
else:
result = nres.create_from_postcode_row(row, nres.SearchResult)
assert result
if result.place_id not in details.excluded:
result.accuracy = row.accuracy
results.append(result)
return results
class PlaceSearch(AbstractSearch):
""" Generic search for an address or named place.
"""
SEARCH_PRIO = 1
def __init__(self, extra_penalty: float, sdata: SearchData, expected_count: int) -> None:
super().__init__(sdata.penalty + extra_penalty)
self.countries = sdata.countries
self.postcodes = sdata.postcodes
self.housenumbers = sdata.housenumbers
self.qualifiers = sdata.qualifiers
self.lookups = sdata.lookups
self.rankings = sdata.rankings
self.expected_count = expected_count
def _inner_search_name_cte(self, conn: SearchConnection,
details: SearchDetails) -> 'sa.CTE':
""" Create a subquery that preselects the rows in the search_name
table.
"""
t = conn.t.search_name
penalty: SaExpression = sa.literal(self.penalty)
for ranking in self.rankings:
penalty += ranking.sql_penalty(t)
sql = sa.select(t.c.place_id, t.c.search_rank, t.c.address_rank,
t.c.country_code, t.c.centroid,
t.c.name_vector, t.c.nameaddress_vector,
sa.case((t.c.importance > 0, t.c.importance),
else_=0.40001-(sa.cast(t.c.search_rank, sa.Float())/75))
.label('importance'),
penalty.label('penalty'))
for lookup in self.lookups:
sql = sql.where(lookup.sql_condition(t))
if self.countries:
sql = sql.where(t.c.country_code.in_(self.countries.values))
if self.postcodes:
# if a postcode is given, don't search for state or country level objects
sql = sql.where(t.c.address_rank > 9)
if self.expected_count > 10000:
# Many results expected. Restrict by postcode.
tpc = conn.t.postcode
sql = sql.where(sa.select(tpc.c.postcode)
.where(tpc.c.postcode.in_(self.postcodes.values))
.where(t.c.centroid.within_distance(tpc.c.geometry, 0.4))
.exists())
if details.viewbox is not None:
if details.bounded_viewbox:
sql = sql.where(t.c.centroid
.intersects(VIEWBOX_PARAM,
use_index=details.viewbox.area < 0.2))
elif not self.postcodes and not self.housenumbers and self.expected_count >= 10000:
sql = sql.where(t.c.centroid
.intersects(VIEWBOX2_PARAM,
use_index=details.viewbox.area < 0.5))
if details.near is not None and details.near_radius is not None:
if details.near_radius < 0.1:
sql = sql.where(t.c.centroid.within_distance(NEAR_PARAM,
NEAR_RADIUS_PARAM))
else:
sql = sql.where(t.c.centroid
.ST_Distance(NEAR_PARAM) < NEAR_RADIUS_PARAM)
if self.housenumbers:
sql = sql.where(t.c.address_rank.between(16, 30))
else:
if details.excluded:
sql = sql.where(_exclude_places(t))
if details.min_rank > 0:
sql = sql.where(sa.or_(t.c.address_rank >= MIN_RANK_PARAM,
t.c.search_rank >= MIN_RANK_PARAM))
if details.max_rank < 30:
sql = sql.where(sa.or_(t.c.address_rank <= MAX_RANK_PARAM,
t.c.search_rank <= MAX_RANK_PARAM))
inner = sql.limit(10000).order_by(sa.desc(sa.text('importance'))).subquery()
sql = sa.select(inner.c.place_id, inner.c.search_rank, inner.c.address_rank,
inner.c.country_code, inner.c.centroid, inner.c.importance,
inner.c.penalty)
# If the query is not an address search or has a geographic preference,
# preselect most important items to restrict the number of places
# that need to be looked up in placex.
if not self.housenumbers\
and (details.viewbox is None or details.bounded_viewbox)\
and (details.near is None or details.near_radius is not None)\
and not self.qualifiers:
sql = sql.add_columns(sa.func.first_value(inner.c.penalty - inner.c.importance)
.over(order_by=inner.c.penalty - inner.c.importance)
.label('min_penalty'))
inner = sql.subquery()
sql = sa.select(inner.c.place_id, inner.c.search_rank, inner.c.address_rank,
inner.c.country_code, inner.c.centroid, inner.c.importance,
inner.c.penalty)\
.where(inner.c.penalty - inner.c.importance < inner.c.min_penalty + 0.5)
return sql.cte('searches')
async def lookup(self, conn: SearchConnection,
details: SearchDetails) -> nres.SearchResults:
""" Find results for the search in the database.
"""
t = conn.t.placex
tsearch = self._inner_search_name_cte(conn, details)
sql = _select_placex(t).join(tsearch, t.c.place_id == tsearch.c.place_id)
if details.geometry_output:
sql = _add_geometry_columns(sql, t.c.geometry, details)
penalty: SaExpression = tsearch.c.penalty
if self.postcodes:
tpc = conn.t.postcode
pcs = self.postcodes.values
pc_near = sa.select(sa.func.min(tpc.c.geometry.ST_Distance(t.c.centroid)))\
.where(tpc.c.postcode.in_(pcs))\
.scalar_subquery()
penalty += sa.case((t.c.postcode.in_(pcs), 0.0),
else_=sa.func.coalesce(pc_near, cast(SaColumn, 2.0)))
if details.viewbox is not None and not details.bounded_viewbox:
penalty += sa.case((t.c.geometry.intersects(VIEWBOX_PARAM, use_index=False), 0.0),
(t.c.geometry.intersects(VIEWBOX2_PARAM, use_index=False), 0.5),
else_=1.0)
if details.near is not None:
sql = sql.add_columns((-tsearch.c.centroid.ST_Distance(NEAR_PARAM))
.label('importance'))
sql = sql.order_by(sa.desc(sa.text('importance')))
else:
sql = sql.order_by(penalty - tsearch.c.importance)
sql = sql.add_columns(tsearch.c.importance)
sql = sql.add_columns(penalty.label('accuracy'))\
.order_by(sa.text('accuracy'))
if self.housenumbers:
hnr_list = '|'.join(self.housenumbers.values)
inner = sql.where(sa.or_(tsearch.c.address_rank < 30,
sa.func.RegexpWord(hnr_list, t.c.housenumber)))\
.subquery()
# Housenumbers from placex
thnr = conn.t.placex.alias('hnr')
pid_list = sa.func.ArrayAgg(thnr.c.place_id)
place_sql = sa.select(pid_list)\
.where(thnr.c.parent_place_id == inner.c.place_id)\
.where(sa.func.RegexpWord(hnr_list, thnr.c.housenumber))\
.where(thnr.c.linked_place_id == None)\
.where(thnr.c.indexed_status == 0)
if details.excluded:
place_sql = place_sql.where(thnr.c.place_id.not_in(sa.bindparam('excluded')))
if self.qualifiers:
place_sql = place_sql.where(self.qualifiers.sql_restrict(thnr))
numerals = [int(n) for n in self.housenumbers.values
if n.isdigit() and len(n) < 8]
interpol_sql: SaColumn
tiger_sql: SaColumn
if numerals and \
(not self.qualifiers or ('place', 'house') in self.qualifiers.values):
# Housenumbers from interpolations
interpol_sql = _make_interpolation_subquery(conn.t.osmline, inner,
numerals, details)
# Housenumbers from Tiger
tiger_sql = sa.case((inner.c.country_code == 'us',
_make_interpolation_subquery(conn.t.tiger, inner,
numerals, details)
), else_=None)
else:
interpol_sql = sa.null()
tiger_sql = sa.null()
unsort = sa.select(inner, place_sql.scalar_subquery().label('placex_hnr'),
interpol_sql.label('interpol_hnr'),
tiger_sql.label('tiger_hnr')).subquery('unsort')
sql = sa.select(unsort)\
.order_by(sa.case((unsort.c.placex_hnr != None, 1),
(unsort.c.interpol_hnr != None, 2),
(unsort.c.tiger_hnr != None, 3),
else_=4),
unsort.c.accuracy)
else:
sql = sql.where(t.c.linked_place_id == None)\
.where(t.c.indexed_status == 0)
if self.qualifiers:
sql = sql.where(self.qualifiers.sql_restrict(t))
if details.layers is not None:
sql = sql.where(_filter_by_layer(t, details.layers))
sql = sql.limit(LIMIT_PARAM)
results = nres.SearchResults()
for row in await conn.execute(sql, _details_to_bind_params(details)):
result = nres.create_from_placex_row(row, nres.SearchResult)
assert result
result.bbox = Bbox.from_wkb(row.bbox)
result.accuracy = row.accuracy
if self.housenumbers and row.rank_address < 30:
if row.placex_hnr:
subs = _get_placex_housenumbers(conn, row.placex_hnr, details)
elif row.interpol_hnr:
subs = _get_osmline(conn, row.interpol_hnr, numerals, details)
elif row.tiger_hnr:
subs = _get_tiger(conn, row.tiger_hnr, numerals, row.osm_id, details)
else:
subs = None
if subs is not None:
async for sub in subs:
assert sub.housenumber
sub.accuracy = result.accuracy
if not any(nr in self.housenumbers.values
for nr in sub.housenumber.split(';')):
sub.accuracy += 0.6
results.append(sub)
# Only add the street as a result, if it meets all other
# filter conditions.
if (not details.excluded or result.place_id not in details.excluded)\
and (not self.qualifiers or result.category in self.qualifiers.values)\
and result.rank_address >= details.min_rank:
result.accuracy += 1.0 # penalty for missing housenumber
results.append(result)
else:
results.append(result)
return results
| 37,506
|
Python
|
.py
| 705
| 38.163121
| 98
| 0.55864
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,756
|
query_analyzer_factory.py
|
osm-search_Nominatim/src/nominatim_api/search/query_analyzer_factory.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Factory for creating a query analyzer for the configured tokenizer.
"""
from typing import List, cast, TYPE_CHECKING
from abc import ABC, abstractmethod
from pathlib import Path
import importlib
from ..logging import log
from ..connection import SearchConnection
if TYPE_CHECKING:
from .query import Phrase, QueryStruct
class AbstractQueryAnalyzer(ABC):
""" Class for analysing incoming queries.
Query analyzers are tied to the tokenizer used on import.
"""
@abstractmethod
async def analyze_query(self, phrases: List['Phrase']) -> 'QueryStruct':
""" Analyze the given phrases and return the tokenized query.
"""
@abstractmethod
def normalize_text(self, text: str) -> str:
""" Bring the given text into a normalized form. That is the
standardized form search will work with. All information removed
at this stage is inevitably lost.
"""
async def make_query_analyzer(conn: SearchConnection) -> AbstractQueryAnalyzer:
""" Create a query analyzer for the tokenizer used by the database.
"""
name = await conn.get_property('tokenizer')
src_file = Path(__file__).parent / f'{name}_tokenizer.py'
if not src_file.is_file():
log().comment(f"No tokenizer named '{name}' available. Database not set up properly.")
raise RuntimeError('Tokenizer not found')
module = importlib.import_module(f'nominatim_api.search.{name}_tokenizer')
return cast(AbstractQueryAnalyzer, await module.create_query_analyzer(conn))
| 1,763
|
Python
|
.py
| 41
| 38.365854
| 94
| 0.72323
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,757
|
__init__.py
|
osm-search_Nominatim/src/nominatim_api/search/__init__.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Module for forward search.
"""
# pylint: disable=useless-import-alias
from .geocoder import (ForwardGeocoder as ForwardGeocoder)
from .query import (Phrase as Phrase,
PhraseType as PhraseType)
from .query_analyzer_factory import (make_query_analyzer as make_query_analyzer)
| 510
|
Python
|
.py
| 14
| 33.928571
| 80
| 0.755556
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,758
|
query.py
|
osm-search_Nominatim/src/nominatim_api/search/query.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Datastructures for a tokenized query.
"""
from typing import List, Tuple, Optional, Iterator
from abc import ABC, abstractmethod
import dataclasses
import enum
class BreakType(enum.Enum):
""" Type of break between tokens.
"""
START = '<'
""" Begin of the query. """
END = '>'
""" End of the query. """
PHRASE = ','
""" Break between two phrases. """
WORD = ' '
""" Break between words. """
PART = '-'
""" Break inside a word, for example a hyphen or apostrophe. """
TOKEN = '`'
""" Break created as a result of tokenization.
This may happen in languages without spaces between words.
"""
class TokenType(enum.Enum):
""" Type of token.
"""
WORD = enum.auto()
""" Full name of a place. """
PARTIAL = enum.auto()
""" Word term without breaks, does not necessarily represent a full name. """
HOUSENUMBER = enum.auto()
""" Housenumber term. """
POSTCODE = enum.auto()
""" Postal code term. """
COUNTRY = enum.auto()
""" Country name or reference. """
QUALIFIER = enum.auto()
""" Special term used together with name (e.g. _Hotel_ Bellevue). """
NEAR_ITEM = enum.auto()
""" Special term used as searchable object(e.g. supermarket in ...). """
class PhraseType(enum.Enum):
""" Designation of a phrase.
"""
NONE = 0
""" No specific designation (i.e. source is free-form query). """
AMENITY = enum.auto()
""" Contains name or type of a POI. """
STREET = enum.auto()
""" Contains a street name optionally with a housenumber. """
CITY = enum.auto()
""" Contains the postal city. """
COUNTY = enum.auto()
""" Contains the equivalent of a county. """
STATE = enum.auto()
""" Contains a state or province. """
POSTCODE = enum.auto()
""" Contains a postal code. """
COUNTRY = enum.auto()
""" Contains the country name or code. """
def compatible_with(self, ttype: TokenType,
is_full_phrase: bool) -> bool:
""" Check if the given token type can be used with the phrase type.
"""
if self == PhraseType.NONE:
return not is_full_phrase or ttype != TokenType.QUALIFIER
if self == PhraseType.AMENITY:
return ttype in (TokenType.WORD, TokenType.PARTIAL)\
or (is_full_phrase and ttype == TokenType.NEAR_ITEM)\
or (not is_full_phrase and ttype == TokenType.QUALIFIER)
if self == PhraseType.STREET:
return ttype in (TokenType.WORD, TokenType.PARTIAL, TokenType.HOUSENUMBER)
if self == PhraseType.POSTCODE:
return ttype == TokenType.POSTCODE
if self == PhraseType.COUNTRY:
return ttype == TokenType.COUNTRY
return ttype in (TokenType.WORD, TokenType.PARTIAL)
@dataclasses.dataclass
class Token(ABC):
""" Base type for tokens.
Specific query analyzers must implement the concrete token class.
"""
penalty: float
token: int
count: int
addr_count: int
lookup_word: str
@abstractmethod
def get_category(self) -> Tuple[str, str]:
""" Return the category restriction for qualifier terms and
category objects.
"""
@dataclasses.dataclass
class TokenRange:
""" Indexes of query nodes over which a token spans.
"""
start: int
end: int
def __lt__(self, other: 'TokenRange') -> bool:
return self.end <= other.start
def __le__(self, other: 'TokenRange') -> bool:
return NotImplemented
def __gt__(self, other: 'TokenRange') -> bool:
return self.start >= other.end
def __ge__(self, other: 'TokenRange') -> bool:
return NotImplemented
def replace_start(self, new_start: int) -> 'TokenRange':
""" Return a new token range with the new start.
"""
return TokenRange(new_start, self.end)
def replace_end(self, new_end: int) -> 'TokenRange':
""" Return a new token range with the new end.
"""
return TokenRange(self.start, new_end)
def split(self, index: int) -> Tuple['TokenRange', 'TokenRange']:
""" Split the span into two spans at the given index.
The index must be within the span.
"""
return self.replace_end(index), self.replace_start(index)
@dataclasses.dataclass
class TokenList:
""" List of all tokens of a given type going from one breakpoint to another.
"""
end: int
ttype: TokenType
tokens: List[Token]
def add_penalty(self, penalty: float) -> None:
""" Add the given penalty to all tokens in the list.
"""
for token in self.tokens:
token.penalty += penalty
@dataclasses.dataclass
class QueryNode:
""" A node of the query representing a break between terms.
"""
btype: BreakType
ptype: PhraseType
starting: List[TokenList] = dataclasses.field(default_factory=list)
def has_tokens(self, end: int, *ttypes: TokenType) -> bool:
""" Check if there are tokens of the given types ending at the
given node.
"""
return any(tl.end == end and tl.ttype in ttypes for tl in self.starting)
def get_tokens(self, end: int, ttype: TokenType) -> Optional[List[Token]]:
""" Get the list of tokens of the given type starting at this node
and ending at the node 'end'. Returns 'None' if no such
tokens exist.
"""
for tlist in self.starting:
if tlist.end == end and tlist.ttype == ttype:
return tlist.tokens
return None
@dataclasses.dataclass
class Phrase:
""" A normalized query part. Phrases may be typed which means that
they then represent a specific part of the address.
"""
ptype: PhraseType
text: str
class QueryStruct:
""" A tokenized search query together with the normalized source
from which the tokens have been parsed.
The query contains a list of nodes that represent the breaks
between words. Tokens span between nodes, which don't necessarily
need to be direct neighbours. Thus the query is represented as a
directed acyclic graph.
When created, a query contains a single node: the start of the
query. Further nodes can be added by appending to 'nodes'.
"""
def __init__(self, source: List[Phrase]) -> None:
self.source = source
self.nodes: List[QueryNode] = \
[QueryNode(BreakType.START, source[0].ptype if source else PhraseType.NONE)]
def num_token_slots(self) -> int:
""" Return the length of the query in vertice steps.
"""
return len(self.nodes) - 1
def add_node(self, btype: BreakType, ptype: PhraseType) -> None:
""" Append a new break node with the given break type.
The phrase type denotes the type for any tokens starting
at the node.
"""
self.nodes.append(QueryNode(btype, ptype))
def add_token(self, trange: TokenRange, ttype: TokenType, token: Token) -> None:
""" Add a token to the query. 'start' and 'end' are the indexes of the
nodes from which to which the token spans. The indexes must exist
and are expected to be in the same phrase.
'ttype' denotes the type of the token and 'token' the token to
be inserted.
If the token type is not compatible with the phrase it should
be added to, then the token is silently dropped.
"""
snode = self.nodes[trange.start]
full_phrase = snode.btype in (BreakType.START, BreakType.PHRASE)\
and self.nodes[trange.end].btype in (BreakType.PHRASE, BreakType.END)
if snode.ptype.compatible_with(ttype, full_phrase):
tlist = snode.get_tokens(trange.end, ttype)
if tlist is None:
snode.starting.append(TokenList(trange.end, ttype, [token]))
else:
tlist.append(token)
def get_tokens(self, trange: TokenRange, ttype: TokenType) -> List[Token]:
""" Get the list of tokens of a given type, spanning the given
nodes. The nodes must exist. If no tokens exist, an
empty list is returned.
"""
return self.nodes[trange.start].get_tokens(trange.end, ttype) or []
def get_partials_list(self, trange: TokenRange) -> List[Token]:
""" Create a list of partial tokens between the given nodes.
The list is composed of the first token of type PARTIAL
going to the subsequent node. Such PARTIAL tokens are
assumed to exist.
"""
return [next(iter(self.get_tokens(TokenRange(i, i+1), TokenType.PARTIAL)))
for i in range(trange.start, trange.end)]
def iter_token_lists(self) -> Iterator[Tuple[int, QueryNode, TokenList]]:
""" Iterator over all token lists in the query.
"""
for i, node in enumerate(self.nodes):
for tlist in node.starting:
yield i, node, tlist
def find_lookup_word_by_id(self, token: int) -> str:
""" Find the first token with the given token ID and return
its lookup word. Returns 'None' if no such token exists.
The function is very slow and must only be used for
debugging.
"""
for node in self.nodes:
for tlist in node.starting:
for t in tlist.tokens:
if t.token == token:
return f"[{tlist.ttype.name[0]}]{t.lookup_word}"
return 'None'
| 9,899
|
Python
|
.py
| 239
| 33.560669
| 91
| 0.625117
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,759
|
geocoder.py
|
osm-search_Nominatim/src/nominatim_api/search/geocoder.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Public interface to the search code.
"""
from typing import List, Any, Optional, Iterator, Tuple, Dict
import itertools
import re
import datetime as dt
import difflib
from ..connection import SearchConnection
from ..types import SearchDetails
from ..results import SearchResult, SearchResults, add_result_details
from ..logging import log
from .token_assignment import yield_token_assignments
from .db_search_builder import SearchBuilder, build_poi_search, wrap_near_search
from .db_searches import AbstractSearch
from .query_analyzer_factory import make_query_analyzer, AbstractQueryAnalyzer
from .query import Phrase, QueryStruct
class ForwardGeocoder:
""" Main class responsible for place search.
"""
def __init__(self, conn: SearchConnection,
params: SearchDetails, timeout: Optional[int]) -> None:
self.conn = conn
self.params = params
self.timeout = dt.timedelta(seconds=timeout or 1000000)
self.query_analyzer: Optional[AbstractQueryAnalyzer] = None
@property
def limit(self) -> int:
""" Return the configured maximum number of search results.
"""
return self.params.max_results
async def build_searches(self,
phrases: List[Phrase]) -> Tuple[QueryStruct, List[AbstractSearch]]:
""" Analyse the query and return the tokenized query and list of
possible searches over it.
"""
if self.query_analyzer is None:
self.query_analyzer = await make_query_analyzer(self.conn)
query = await self.query_analyzer.analyze_query(phrases)
searches: List[AbstractSearch] = []
if query.num_token_slots() > 0:
# 2. Compute all possible search interpretations
log().section('Compute abstract searches')
search_builder = SearchBuilder(query, self.params)
num_searches = 0
for assignment in yield_token_assignments(query):
searches.extend(search_builder.build(assignment))
if num_searches < len(searches):
log().table_dump('Searches for assignment',
_dump_searches(searches, query, num_searches))
num_searches = len(searches)
searches.sort(key=lambda s: (s.penalty, s.SEARCH_PRIO))
return query, searches
async def execute_searches(self, query: QueryStruct,
searches: List[AbstractSearch]) -> SearchResults:
""" Run the abstract searches against the database until a result
is found.
"""
log().section('Execute database searches')
results: Dict[Any, SearchResult] = {}
end_time = dt.datetime.now() + self.timeout
min_ranking = searches[0].penalty + 2.0
prev_penalty = 0.0
for i, search in enumerate(searches):
if search.penalty > prev_penalty and (search.penalty > min_ranking or i > 20):
break
log().table_dump(f"{i + 1}. Search", _dump_searches([search], query))
log().var_dump('Params', self.params)
lookup_results = await search.lookup(self.conn, self.params)
for result in lookup_results:
rhash = (result.source_table, result.place_id,
result.housenumber, result.country_code)
prevresult = results.get(rhash)
if prevresult:
prevresult.accuracy = min(prevresult.accuracy, result.accuracy)
else:
results[rhash] = result
min_ranking = min(min_ranking, result.accuracy * 1.2, 2.0)
log().result_dump('Results', ((r.accuracy, r) for r in lookup_results))
prev_penalty = search.penalty
if dt.datetime.now() >= end_time:
break
return SearchResults(results.values())
def pre_filter_results(self, results: SearchResults) -> SearchResults:
""" Remove results that are significantly worse than the
best match.
"""
if results:
max_ranking = min(r.ranking for r in results) + 0.5
results = SearchResults(r for r in results if r.ranking < max_ranking)
return results
def sort_and_cut_results(self, results: SearchResults) -> SearchResults:
""" Remove badly matching results, sort by ranking and
limit to the configured number of results.
"""
if results:
results.sort(key=lambda r: r.ranking)
min_rank = results[0].rank_search
min_ranking = results[0].ranking
results = SearchResults(r for r in results
if r.ranking + 0.03 * (r.rank_search - min_rank)
< min_ranking + 0.5)
results = SearchResults(results[:self.limit])
return results
def rerank_by_query(self, query: QueryStruct, results: SearchResults) -> None:
""" Adjust the accuracy of the localized result according to how well
they match the original query.
"""
assert self.query_analyzer is not None
qwords = [word for phrase in query.source
for word in re.split('[, ]+', phrase.text) if word]
if not qwords:
return
for result in results:
# Negative importance indicates ordering by distance, which is
# more important than word matching.
if not result.display_name\
or (result.importance is not None and result.importance < 0):
continue
distance = 0.0
norm = self.query_analyzer.normalize_text(' '.join((result.display_name,
result.country_code or '')))
words = set((w for w in norm.split(' ') if w))
if not words:
continue
for qword in qwords:
wdist = max(difflib.SequenceMatcher(a=qword, b=w).quick_ratio() for w in words)
if wdist < 0.5:
distance += len(qword)
else:
distance += (1.0 - wdist) * len(qword)
# Compensate for the fact that country names do not get a
# match penalty yet by the tokenizer.
# Temporary hack that needs to be removed!
if result.rank_address == 4:
distance *= 2
result.accuracy += distance * 0.4 / sum(len(w) for w in qwords)
async def lookup_pois(self, categories: List[Tuple[str, str]],
phrases: List[Phrase]) -> SearchResults:
""" Look up places by category. If phrase is given, a place search
over the phrase will be executed first and places close to the
results returned.
"""
log().function('forward_lookup_pois', categories=categories, params=self.params)
if phrases:
query, searches = await self.build_searches(phrases)
if query:
searches = [wrap_near_search(categories, s) for s in searches[:50]]
results = await self.execute_searches(query, searches)
results = self.pre_filter_results(results)
await add_result_details(self.conn, results, self.params)
log().result_dump('Preliminary Results', ((r.accuracy, r) for r in results))
results = self.sort_and_cut_results(results)
else:
results = SearchResults()
else:
search = build_poi_search(categories, self.params.countries)
results = await search.lookup(self.conn, self.params)
await add_result_details(self.conn, results, self.params)
log().result_dump('Final Results', ((r.accuracy, r) for r in results))
return results
async def lookup(self, phrases: List[Phrase]) -> SearchResults:
""" Look up a single free-text query.
"""
log().function('forward_lookup', phrases=phrases, params=self.params)
results = SearchResults()
if self.params.is_impossible():
return results
query, searches = await self.build_searches(phrases)
if searches:
# Execute SQL until an appropriate result is found.
results = await self.execute_searches(query, searches[:50])
results = self.pre_filter_results(results)
await add_result_details(self.conn, results, self.params)
log().result_dump('Preliminary Results', ((r.accuracy, r) for r in results))
self.rerank_by_query(query, results)
log().result_dump('Results after reranking', ((r.accuracy, r) for r in results))
results = self.sort_and_cut_results(results)
log().result_dump('Final Results', ((r.accuracy, r) for r in results))
return results
# pylint: disable=invalid-name,too-many-locals
def _dump_searches(searches: List[AbstractSearch], query: QueryStruct,
start: int = 0) -> Iterator[Optional[List[Any]]]:
yield ['Penalty', 'Lookups', 'Housenr', 'Postcode', 'Countries',
'Qualifier', 'Catgeory', 'Rankings']
def tk(tl: List[int]) -> str:
tstr = [f"{query.find_lookup_word_by_id(t)}({t})" for t in tl]
return f"[{','.join(tstr)}]"
def fmt_ranking(f: Any) -> str:
if not f:
return ''
ranks = ','.join((f"{tk(r.tokens)}^{r.penalty:.3g}" for r in f.rankings))
if len(ranks) > 100:
ranks = ranks[:100] + '...'
return f"{f.column}({ranks},def={f.default:.3g})"
def fmt_lookup(l: Any) -> str:
if not l:
return ''
return f"{l.lookup_type}({l.column}{tk(l.tokens)})"
def fmt_cstr(c: Any) -> str:
if not c:
return ''
return f'{c[0]}^{c[1]}'
for search in searches[start:]:
fields = ('lookups', 'rankings', 'countries', 'housenumbers',
'postcodes', 'qualifiers')
if hasattr(search, 'search'):
iters = itertools.zip_longest([f"{search.penalty:.3g}"],
*(getattr(search.search, attr, []) for attr in fields),
getattr(search, 'categories', []),
fillvalue='')
else:
iters = itertools.zip_longest([f"{search.penalty:.3g}"],
*(getattr(search, attr, []) for attr in fields),
[],
fillvalue='')
for penalty, lookup, rank, cc, hnr, pc, qual, cat in iters:
yield [penalty, fmt_lookup(lookup), fmt_cstr(hnr),
fmt_cstr(pc), fmt_cstr(cc), fmt_cstr(qual), fmt_cstr(cat), fmt_ranking(rank)]
yield None
| 11,223
|
Python
|
.py
| 226
| 37.256637
| 97
| 0.584072
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,760
|
token_assignment.py
|
osm-search_Nominatim/src/nominatim_api/search/token_assignment.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Create query interpretations where each vertice in the query is assigned
a specific function (expressed as a token type).
"""
from typing import Optional, List, Iterator
import dataclasses
from ..logging import log
from . import query as qmod
# pylint: disable=too-many-return-statements,too-many-branches
@dataclasses.dataclass
class TypedRange:
""" A token range for a specific type of tokens.
"""
ttype: qmod.TokenType
trange: qmod.TokenRange
PENALTY_TOKENCHANGE = {
qmod.BreakType.START: 0.0,
qmod.BreakType.END: 0.0,
qmod.BreakType.PHRASE: 0.0,
qmod.BreakType.WORD: 0.1,
qmod.BreakType.PART: 0.2,
qmod.BreakType.TOKEN: 0.4
}
TypedRangeSeq = List[TypedRange]
@dataclasses.dataclass
class TokenAssignment: # pylint: disable=too-many-instance-attributes
""" Representation of a possible assignment of token types
to the tokens in a tokenized query.
"""
penalty: float = 0.0
name: Optional[qmod.TokenRange] = None
address: List[qmod.TokenRange] = dataclasses.field(default_factory=list)
housenumber: Optional[qmod.TokenRange] = None
postcode: Optional[qmod.TokenRange] = None
country: Optional[qmod.TokenRange] = None
near_item: Optional[qmod.TokenRange] = None
qualifier: Optional[qmod.TokenRange] = None
@staticmethod
def from_ranges(ranges: TypedRangeSeq) -> 'TokenAssignment':
""" Create a new token assignment from a sequence of typed spans.
"""
out = TokenAssignment()
for token in ranges:
if token.ttype == qmod.TokenType.PARTIAL:
out.address.append(token.trange)
elif token.ttype == qmod.TokenType.HOUSENUMBER:
out.housenumber = token.trange
elif token.ttype == qmod.TokenType.POSTCODE:
out.postcode = token.trange
elif token.ttype == qmod.TokenType.COUNTRY:
out.country = token.trange
elif token.ttype == qmod.TokenType.NEAR_ITEM:
out.near_item = token.trange
elif token.ttype == qmod.TokenType.QUALIFIER:
out.qualifier = token.trange
return out
class _TokenSequence:
""" Working state used to put together the token assignments.
Represents an intermediate state while traversing the tokenized
query.
"""
def __init__(self, seq: TypedRangeSeq,
direction: int = 0, penalty: float = 0.0) -> None:
self.seq = seq
self.direction = direction
self.penalty = penalty
def __str__(self) -> str:
seq = ''.join(f'[{r.trange.start} - {r.trange.end}: {r.ttype.name}]' for r in self.seq)
return f'{seq} (dir: {self.direction}, penalty: {self.penalty})'
@property
def end_pos(self) -> int:
""" Return the index of the global end of the current sequence.
"""
return self.seq[-1].trange.end if self.seq else 0
def has_types(self, *ttypes: qmod.TokenType) -> bool:
""" Check if the current sequence contains any typed ranges of
the given types.
"""
return any(s.ttype in ttypes for s in self.seq)
def is_final(self) -> bool:
""" Return true when the sequence cannot be extended by any
form of token anymore.
"""
# Country and category must be the final term for left-to-right
return len(self.seq) > 1 and \
self.seq[-1].ttype in (qmod.TokenType.COUNTRY, qmod.TokenType.NEAR_ITEM)
def appendable(self, ttype: qmod.TokenType) -> Optional[int]:
""" Check if the give token type is appendable to the existing sequence.
Returns None if the token type is not appendable, otherwise the
new direction of the sequence after adding such a type. The
token is not added.
"""
if ttype == qmod.TokenType.WORD:
return None
if not self.seq:
# Append unconditionally to the empty list
if ttype == qmod.TokenType.COUNTRY:
return -1
if ttype in (qmod.TokenType.HOUSENUMBER, qmod.TokenType.QUALIFIER):
return 1
return self.direction
# Name tokens are always acceptable and don't change direction
if ttype == qmod.TokenType.PARTIAL:
# qualifiers cannot appear in the middle of the query. They need
# to be near the next phrase.
if self.direction == -1 \
and any(t.ttype == qmod.TokenType.QUALIFIER for t in self.seq[:-1]):
return None
return self.direction
# Other tokens may only appear once
if self.has_types(ttype):
return None
if ttype == qmod.TokenType.HOUSENUMBER:
if self.direction == 1:
if len(self.seq) == 1 and self.seq[0].ttype == qmod.TokenType.QUALIFIER:
return None
if len(self.seq) > 2 \
or self.has_types(qmod.TokenType.POSTCODE, qmod.TokenType.COUNTRY):
return None # direction left-to-right: housenumber must come before anything
elif self.direction == -1 \
or self.has_types(qmod.TokenType.POSTCODE, qmod.TokenType.COUNTRY):
return -1 # force direction right-to-left if after other terms
return self.direction
if ttype == qmod.TokenType.POSTCODE:
if self.direction == -1:
if self.has_types(qmod.TokenType.HOUSENUMBER, qmod.TokenType.QUALIFIER):
return None
return -1
if self.direction == 1:
return None if self.has_types(qmod.TokenType.COUNTRY) else 1
if self.has_types(qmod.TokenType.HOUSENUMBER, qmod.TokenType.QUALIFIER):
return 1
return self.direction
if ttype == qmod.TokenType.COUNTRY:
return None if self.direction == -1 else 1
if ttype == qmod.TokenType.NEAR_ITEM:
return self.direction
if ttype == qmod.TokenType.QUALIFIER:
if self.direction == 1:
if (len(self.seq) == 1
and self.seq[0].ttype in (qmod.TokenType.PARTIAL, qmod.TokenType.NEAR_ITEM)) \
or (len(self.seq) == 2
and self.seq[0].ttype == qmod.TokenType.NEAR_ITEM
and self.seq[1].ttype == qmod.TokenType.PARTIAL):
return 1
return None
if self.direction == -1:
return -1
tempseq = self.seq[1:] if self.seq[0].ttype == qmod.TokenType.NEAR_ITEM else self.seq
if len(tempseq) == 0:
return 1
if len(tempseq) == 1 and self.seq[0].ttype == qmod.TokenType.HOUSENUMBER:
return None
if len(tempseq) > 1 or self.has_types(qmod.TokenType.POSTCODE, qmod.TokenType.COUNTRY):
return -1
return 0
return None
def advance(self, ttype: qmod.TokenType, end_pos: int,
btype: qmod.BreakType) -> Optional['_TokenSequence']:
""" Return a new token sequence state with the given token type
extended.
"""
newdir = self.appendable(ttype)
if newdir is None:
return None
if not self.seq:
newseq = [TypedRange(ttype, qmod.TokenRange(0, end_pos))]
new_penalty = 0.0
else:
last = self.seq[-1]
if btype != qmod.BreakType.PHRASE and last.ttype == ttype:
# extend the existing range
newseq = self.seq[:-1] + [TypedRange(ttype, last.trange.replace_end(end_pos))]
new_penalty = 0.0
else:
# start a new range
newseq = list(self.seq) + [TypedRange(ttype,
qmod.TokenRange(last.trange.end, end_pos))]
new_penalty = PENALTY_TOKENCHANGE[btype]
return _TokenSequence(newseq, newdir, self.penalty + new_penalty)
def _adapt_penalty_from_priors(self, priors: int, new_dir: int) -> bool:
if priors >= 2:
if self.direction == 0:
self.direction = new_dir
else:
if priors == 2:
self.penalty += 0.8
else:
return False
return True
def recheck_sequence(self) -> bool:
""" Check that the sequence is a fully valid token assignment
and adapt direction and penalties further if necessary.
This function catches some impossible assignments that need
forward context and can therefore not be excluded when building
the assignment.
"""
# housenumbers may not be further than 2 words from the beginning.
# If there are two words in front, give it a penalty.
hnrpos = next((i for i, tr in enumerate(self.seq)
if tr.ttype == qmod.TokenType.HOUSENUMBER),
None)
if hnrpos is not None:
if self.direction != -1:
priors = sum(1 for t in self.seq[:hnrpos] if t.ttype == qmod.TokenType.PARTIAL)
if not self._adapt_penalty_from_priors(priors, -1):
return False
if self.direction != 1:
priors = sum(1 for t in self.seq[hnrpos+1:] if t.ttype == qmod.TokenType.PARTIAL)
if not self._adapt_penalty_from_priors(priors, 1):
return False
if any(t.ttype == qmod.TokenType.NEAR_ITEM for t in self.seq):
self.penalty += 1.0
return True
def _get_assignments_postcode(self, base: TokenAssignment,
query_len: int) -> Iterator[TokenAssignment]:
""" Yield possible assignments of Postcode searches with an
address component.
"""
assert base.postcode is not None
if (base.postcode.start == 0 and self.direction != -1)\
or (base.postcode.end == query_len and self.direction != 1):
log().comment('postcode search')
# <address>,<postcode> should give preference to address search
if base.postcode.start == 0:
penalty = self.penalty
self.direction = -1 # name searches are only possible backwards
else:
penalty = self.penalty + 0.1
self.direction = 1 # name searches are only possible forwards
yield dataclasses.replace(base, penalty=penalty)
def _get_assignments_address_forward(self, base: TokenAssignment,
query: qmod.QueryStruct) -> Iterator[TokenAssignment]:
""" Yield possible assignments of address searches with
left-to-right reading.
"""
first = base.address[0]
log().comment('first word = name')
yield dataclasses.replace(base, penalty=self.penalty,
name=first, address=base.address[1:])
# To paraphrase:
# * if another name term comes after the first one and before the
# housenumber
# * a qualifier comes after the name
# * the containing phrase is strictly typed
if (base.housenumber and first.end < base.housenumber.start)\
or (base.qualifier and base.qualifier > first)\
or (query.nodes[first.start].ptype != qmod.PhraseType.NONE):
return
penalty = self.penalty
# Penalty for:
# * <name>, <street>, <housenumber> , ...
# * queries that are comma-separated
if (base.housenumber and base.housenumber > first) or len(query.source) > 1:
penalty += 0.25
for i in range(first.start + 1, first.end):
name, addr = first.split(i)
log().comment(f'split first word = name ({i - first.start})')
yield dataclasses.replace(base, name=name, address=[addr] + base.address[1:],
penalty=penalty + PENALTY_TOKENCHANGE[query.nodes[i].btype])
def _get_assignments_address_backward(self, base: TokenAssignment,
query: qmod.QueryStruct) -> Iterator[TokenAssignment]:
""" Yield possible assignments of address searches with
right-to-left reading.
"""
last = base.address[-1]
if self.direction == -1 or len(base.address) > 1:
log().comment('last word = name')
yield dataclasses.replace(base, penalty=self.penalty,
name=last, address=base.address[:-1])
# To paraphrase:
# * if another name term comes before the last one and after the
# housenumber
# * a qualifier comes before the name
# * the containing phrase is strictly typed
if (base.housenumber and last.start > base.housenumber.end)\
or (base.qualifier and base.qualifier < last)\
or (query.nodes[last.start].ptype != qmod.PhraseType.NONE):
return
penalty = self.penalty
if base.housenumber and base.housenumber < last:
penalty += 0.4
if len(query.source) > 1:
penalty += 0.25
for i in range(last.start + 1, last.end):
addr, name = last.split(i)
log().comment(f'split last word = name ({i - last.start})')
yield dataclasses.replace(base, name=name, address=base.address[:-1] + [addr],
penalty=penalty + PENALTY_TOKENCHANGE[query.nodes[i].btype])
def get_assignments(self, query: qmod.QueryStruct) -> Iterator[TokenAssignment]:
""" Yield possible assignments for the current sequence.
This function splits up general name assignments into name
and address and yields all possible variants of that.
"""
base = TokenAssignment.from_ranges(self.seq)
num_addr_tokens = sum(t.end - t.start for t in base.address)
if num_addr_tokens > 50:
return
# Postcode search (postcode-only search is covered in next case)
if base.postcode is not None and base.address:
yield from self._get_assignments_postcode(base, query.num_token_slots())
# Postcode or country-only search
if not base.address:
if not base.housenumber and (base.postcode or base.country or base.near_item):
log().comment('postcode/country search')
yield dataclasses.replace(base, penalty=self.penalty)
else:
# <postcode>,<address> should give preference to postcode search
if base.postcode and base.postcode.start == 0:
self.penalty += 0.1
# Right-to-left reading of the address
if self.direction != -1:
yield from self._get_assignments_address_forward(base, query)
# Left-to-right reading of the address
if self.direction != 1:
yield from self._get_assignments_address_backward(base, query)
# variant for special housenumber searches
if base.housenumber and not base.qualifier:
yield dataclasses.replace(base, penalty=self.penalty)
def yield_token_assignments(query: qmod.QueryStruct) -> Iterator[TokenAssignment]:
""" Return possible word type assignments to word positions.
The assignments are computed from the concrete tokens listed
in the tokenized query.
The result includes the penalty for transitions from one word type to
another. It does not include penalties for transitions within a
type.
"""
todo = [_TokenSequence([], direction=0 if query.source[0].ptype == qmod.PhraseType.NONE else 1)]
while todo:
state = todo.pop()
node = query.nodes[state.end_pos]
for tlist in node.starting:
newstate = state.advance(tlist.ttype, tlist.end, node.btype)
if newstate is not None:
if newstate.end_pos == query.num_token_slots():
if newstate.recheck_sequence():
log().var_dump('Assignment', newstate)
yield from newstate.get_assignments(query)
elif not newstate.is_final():
todo.append(newstate)
| 16,770
|
Python
|
.py
| 346
| 36.546243
| 100
| 0.597565
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,761
|
db_search_fields.py
|
osm-search_Nominatim/src/nominatim_api/search/db_search_fields.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Data structures for more complex fields in abstract search descriptions.
"""
from typing import List, Tuple, Iterator, Dict, Type
import dataclasses
import sqlalchemy as sa
from ..typing import SaFromClause, SaColumn, SaExpression
from ..utils.json_writer import JsonWriter
from .query import Token
from . import db_search_lookups as lookups
@dataclasses.dataclass
class WeightedStrings:
""" A list of strings together with a penalty.
"""
values: List[str]
penalties: List[float]
def __bool__(self) -> bool:
return bool(self.values)
def __iter__(self) -> Iterator[Tuple[str, float]]:
return iter(zip(self.values, self.penalties))
def get_penalty(self, value: str, default: float = 1000.0) -> float:
""" Get the penalty for the given value. Returns the given default
if the value does not exist.
"""
try:
return self.penalties[self.values.index(value)]
except ValueError:
pass
return default
@dataclasses.dataclass
class WeightedCategories:
""" A list of class/type tuples together with a penalty.
"""
values: List[Tuple[str, str]]
penalties: List[float]
def __bool__(self) -> bool:
return bool(self.values)
def __iter__(self) -> Iterator[Tuple[Tuple[str, str], float]]:
return iter(zip(self.values, self.penalties))
def get_penalty(self, value: Tuple[str, str], default: float = 1000.0) -> float:
""" Get the penalty for the given value. Returns the given default
if the value does not exist.
"""
try:
return self.penalties[self.values.index(value)]
except ValueError:
pass
return default
def sql_restrict(self, table: SaFromClause) -> SaExpression:
""" Return an SQLAlcheny expression that restricts the
class and type columns of the given table to the values
in the list.
Must not be used with an empty list.
"""
assert self.values
if len(self.values) == 1:
return sa.and_(table.c.class_ == self.values[0][0],
table.c.type == self.values[0][1])
return sa.or_(*(sa.and_(table.c.class_ == c, table.c.type == t)
for c, t in self.values))
@dataclasses.dataclass(order=True)
class RankedTokens:
""" List of tokens together with the penalty of using it.
"""
penalty: float
tokens: List[int]
def with_token(self, t: Token, transition_penalty: float) -> 'RankedTokens':
""" Create a new RankedTokens list with the given token appended.
The tokens penalty as well as the given transition penalty
are added to the overall penalty.
"""
return RankedTokens(self.penalty + t.penalty + transition_penalty,
self.tokens + [t.token])
@dataclasses.dataclass
class FieldRanking:
""" A list of rankings to be applied sequentially until one matches.
The matched ranking determines the penalty. If none matches a
default penalty is applied.
"""
column: str
default: float
rankings: List[RankedTokens]
def normalize_penalty(self) -> float:
""" Reduce the default and ranking penalties, such that the minimum
penalty is 0. Return the penalty that was subtracted.
"""
if self.rankings:
min_penalty = min(self.default, min(r.penalty for r in self.rankings))
else:
min_penalty = self.default
if min_penalty > 0.0:
self.default -= min_penalty
for ranking in self.rankings:
ranking.penalty -= min_penalty
return min_penalty
def sql_penalty(self, table: SaFromClause) -> SaColumn:
""" Create an SQL expression for the rankings.
"""
assert self.rankings
rout = JsonWriter().start_array()
for rank in self.rankings:
rout.start_array().value(rank.penalty).next()
rout.start_array()
for token in rank.tokens:
rout.value(token).next()
rout.end_array()
rout.end_array().next()
rout.end_array()
return sa.func.weigh_search(table.c[self.column], rout(), self.default)
@dataclasses.dataclass
class FieldLookup:
""" A list of tokens to be searched for. The column names the database
column to search in and the lookup_type the operator that is applied.
'lookup_all' requires all tokens to match. 'lookup_any' requires
one of the tokens to match. 'restrict' requires to match all tokens
but avoids the use of indexes.
"""
column: str
tokens: List[int]
lookup_type: Type[lookups.LookupType]
def sql_condition(self, table: SaFromClause) -> SaColumn:
""" Create an SQL expression for the given match condition.
"""
return self.lookup_type(table, self.column, self.tokens)
class SearchData:
""" Search fields derived from query and token assignment
to be used with the SQL queries.
"""
penalty: float
lookups: List[FieldLookup] = []
rankings: List[FieldRanking]
housenumbers: WeightedStrings = WeightedStrings([], [])
postcodes: WeightedStrings = WeightedStrings([], [])
countries: WeightedStrings = WeightedStrings([], [])
qualifiers: WeightedCategories = WeightedCategories([], [])
def set_strings(self, field: str, tokens: List[Token]) -> None:
""" Set on of the WeightedStrings properties from the given
token list. Adapt the global penalty, so that the
minimum penalty is 0.
"""
if tokens:
min_penalty = min(t.penalty for t in tokens)
self.penalty += min_penalty
wstrs = WeightedStrings([t.lookup_word for t in tokens],
[t.penalty - min_penalty for t in tokens])
setattr(self, field, wstrs)
def set_qualifiers(self, tokens: List[Token]) -> None:
""" Set the qulaifier field from the given tokens.
"""
if tokens:
categories: Dict[Tuple[str, str], float] = {}
min_penalty = 1000.0
for t in tokens:
min_penalty = min(min_penalty, t.penalty)
cat = t.get_category()
if t.penalty < categories.get(cat, 1000.0):
categories[cat] = t.penalty
self.penalty += min_penalty
self.qualifiers = WeightedCategories(list(categories.keys()),
list(categories.values()))
def set_ranking(self, rankings: List[FieldRanking]) -> None:
""" Set the list of rankings and normalize the ranking.
"""
self.rankings = []
for ranking in rankings:
if ranking.rankings:
self.penalty += ranking.normalize_penalty()
self.rankings.append(ranking)
else:
self.penalty += ranking.default
def lookup_by_names(name_tokens: List[int], addr_tokens: List[int]) -> List[FieldLookup]:
""" Create a lookup list where name tokens are looked up via index
and potential address tokens are used to restrict the search further.
"""
lookup = [FieldLookup('name_vector', name_tokens, lookups.LookupAll)]
if addr_tokens:
lookup.append(FieldLookup('nameaddress_vector', addr_tokens, lookups.Restrict))
return lookup
def lookup_by_any_name(name_tokens: List[int], addr_restrict_tokens: List[int],
addr_lookup_tokens: List[int]) -> List[FieldLookup]:
""" Create a lookup list where name tokens are looked up via index
and only one of the name tokens must be present.
Potential address tokens are used to restrict the search further.
"""
lookup = [FieldLookup('name_vector', name_tokens, lookups.LookupAny)]
if addr_restrict_tokens:
lookup.append(FieldLookup('nameaddress_vector', addr_restrict_tokens, lookups.Restrict))
if addr_lookup_tokens:
lookup.append(FieldLookup('nameaddress_vector', addr_lookup_tokens, lookups.LookupAll))
return lookup
def lookup_by_addr(name_tokens: List[int], addr_tokens: List[int]) -> List[FieldLookup]:
""" Create a lookup list where address tokens are looked up via index
and the name tokens are only used to restrict the search further.
"""
return [FieldLookup('name_vector', name_tokens, lookups.Restrict),
FieldLookup('nameaddress_vector', addr_tokens, lookups.LookupAll)]
| 8,878
|
Python
|
.py
| 202
| 35.272277
| 96
| 0.638915
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,762
|
errors.py
|
osm-search_Nominatim/src/nominatim_db/errors.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Custom exception and error classes for Nominatim.
"""
class UsageError(Exception):
""" An error raised because of bad user input. This error will usually
not cause a stack trace to be printed unless debugging is enabled.
"""
| 457
|
Python
|
.py
| 13
| 32.846154
| 74
| 0.740406
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,763
|
config.py
|
osm-search_Nominatim/src/nominatim_db/config.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Nominatim configuration accessor.
"""
from typing import Union, Dict, Any, List, Mapping, Optional
import importlib.util
import logging
import os
import sys
from pathlib import Path
import json
import yaml
from dotenv import dotenv_values
from psycopg.conninfo import conninfo_to_dict
from .typing import StrPath
from .errors import UsageError
from . import paths
LOG = logging.getLogger()
CONFIG_CACHE : Dict[str, Any] = {}
def flatten_config_list(content: Any, section: str = '') -> List[Any]:
""" Flatten YAML configuration lists that contain include sections
which are lists themselves.
"""
if not content:
return []
if not isinstance(content, list):
raise UsageError(f"List expected in section '{section}'.")
output = []
for ele in content:
if isinstance(ele, list):
output.extend(flatten_config_list(ele, section))
else:
output.append(ele)
return output
class Configuration:
""" This class wraps access to the configuration settings
for the Nominatim instance in use.
All Nominatim configuration options are prefixed with 'NOMINATIM_' to
avoid conflicts with other environment variables. All settings can
be accessed as properties of the class under the same name as the
setting but with the `NOMINATIM_` prefix removed. In addition, there
are accessor functions that convert the setting values to types
other than string.
"""
def __init__(self, project_dir: Optional[Union[Path, str]],
environ: Optional[Mapping[str, str]] = None) -> None:
self.environ = os.environ if environ is None else environ
self.config_dir = paths.CONFIG_DIR
self._config = dotenv_values(str(self.config_dir / 'env.defaults'))
if project_dir is not None:
self.project_dir: Optional[Path] = Path(project_dir).resolve()
if (self.project_dir / '.env').is_file():
self._config.update(dotenv_values(str(self.project_dir / '.env')))
else:
self.project_dir = None
class _LibDirs:
osm2pgsql: Path
sql = paths.SQLLIB_DIR
data = paths.DATA_DIR
self.lib_dir = _LibDirs()
self._private_plugins: Dict[str, object] = {}
def set_libdirs(self, **kwargs: StrPath) -> None:
""" Set paths to library functions and data.
"""
for key, value in kwargs.items():
setattr(self.lib_dir, key, None if value is None else Path(value))
def __getattr__(self, name: str) -> str:
name = 'NOMINATIM_' + name
if name in self.environ:
return self.environ[name]
return self._config[name] or ''
def get_bool(self, name: str) -> bool:
""" Return the given configuration parameter as a boolean.
Parameters:
name: Name of the configuration parameter with the NOMINATIM_
prefix removed.
Returns:
`True` for values of '1', 'yes' and 'true', `False` otherwise.
"""
return getattr(self, name).lower() in ('1', 'yes', 'true')
def get_int(self, name: str) -> int:
""" Return the given configuration parameter as an int.
Parameters:
name: Name of the configuration parameter with the NOMINATIM_
prefix removed.
Returns:
The configuration value converted to int.
Raises:
ValueError: when the value is not a number.
"""
try:
return int(getattr(self, name))
except ValueError as exp:
LOG.fatal("Invalid setting NOMINATIM_%s. Needs to be a number.", name)
raise UsageError("Configuration error.") from exp
def get_str_list(self, name: str) -> Optional[List[str]]:
""" Return the given configuration parameter as a list of strings.
The values are assumed to be given as a comma-sparated list and
will be stripped before returning them.
Parameters:
name: Name of the configuration parameter with the NOMINATIM_
prefix removed.
Returns:
(List[str]): The comma-split parameter as a list. The
elements are stripped of leading and final spaces before
being returned.
(None): The configuration parameter was unset or empty.
"""
raw = getattr(self, name)
return [v.strip() for v in raw.split(',')] if raw else None
def get_path(self, name: str) -> Optional[Path]:
""" Return the given configuration parameter as a Path.
Parameters:
name: Name of the configuration parameter with the NOMINATIM_
prefix removed.
Returns:
(Path): A Path object of the parameter value.
If a relative path is configured, then the function converts this
into an absolute path with the project directory as root path.
(None): The configuration parameter was unset or empty.
"""
value = getattr(self, name)
if not value:
return None
cfgpath = Path(value)
if not cfgpath.is_absolute():
assert self.project_dir is not None
cfgpath = self.project_dir / cfgpath
return cfgpath.resolve()
def get_libpq_dsn(self) -> str:
""" Get configured database DSN converted into the key/value format
understood by libpq and psycopg.
"""
dsn = self.DATABASE_DSN
def quote_param(param: str) -> str:
key, val = param.split('=')
val = val.replace('\\', '\\\\').replace("'", "\\'")
if ' ' in val:
val = "'" + val + "'"
return key + '=' + val
if dsn.startswith('pgsql:'):
# Old PHP DSN format. Convert before returning.
return ' '.join([quote_param(p) for p in dsn[6:].split(';')])
return dsn
def get_database_params(self) -> Mapping[str, Union[str, int, None]]:
""" Get the configured parameters for the database connection
as a mapping.
"""
dsn = self.DATABASE_DSN
if dsn.startswith('pgsql:'):
return dict((p.split('=', 1) for p in dsn[6:].split(';')))
return conninfo_to_dict(dsn)
def get_import_style_file(self) -> Path:
""" Return the import style file as a path object. Translates the
name of the standard styles automatically into a file in the
config style.
"""
style = getattr(self, 'IMPORT_STYLE')
if style in ('admin', 'street', 'address', 'full', 'extratags'):
return self.config_dir / f'import-{style}.lua'
return self.find_config_file('', 'IMPORT_STYLE')
def get_os_env(self) -> Dict[str, str]:
""" Return a copy of the OS environment with the Nominatim configuration
merged in.
"""
env = {k: v for k, v in self._config.items() if v is not None}
env.update(self.environ)
return env
def load_sub_configuration(self, filename: StrPath,
config: Optional[str] = None) -> Any:
""" Load additional configuration from a file. `filename` is the name
of the configuration file. The file is first searched in the
project directory and then in the global settings directory.
If `config` is set, then the name of the configuration file can
be additionally given through a .env configuration option. When
the option is set, then the file will be exclusively loaded as set:
if the name is an absolute path, the file name is taken as is,
if the name is relative, it is taken to be relative to the
project directory.
The format of the file is determined from the filename suffix.
Currently only files with extension '.yaml' are supported.
YAML files support a special '!include' construct. When the
directive is given, the value is taken to be a filename, the file
is loaded using this function and added at the position in the
configuration tree.
"""
configfile = self.find_config_file(filename, config)
if str(configfile) in CONFIG_CACHE:
return CONFIG_CACHE[str(configfile)]
if configfile.suffix in ('.yaml', '.yml'):
result = self._load_from_yaml(configfile)
elif configfile.suffix == '.json':
with configfile.open('r', encoding='utf-8') as cfg:
result = json.load(cfg)
else:
raise UsageError(f"Config file '{configfile}' has unknown format.")
CONFIG_CACHE[str(configfile)] = result
return result
def load_plugin_module(self, module_name: str, internal_path: str) -> Any:
""" Load a Python module as a plugin.
The module_name may have three variants:
* A name without any '.' is assumed to be an internal module
and will be searched relative to `internal_path`.
* If the name ends in `.py`, module_name is assumed to be a
file name relative to the project directory.
* Any other name is assumed to be an absolute module name.
In either of the variants the module name must start with a letter.
"""
if not module_name or not module_name[0].isidentifier():
raise UsageError(f'Invalid module name {module_name}')
if '.' not in module_name:
module_name = module_name.replace('-', '_')
full_module = f'{internal_path}.{module_name}'
return sys.modules.get(full_module) or importlib.import_module(full_module)
if module_name.endswith('.py'):
if self.project_dir is None or not (self.project_dir / module_name).exists():
raise UsageError(f"Cannot find module '{module_name}' in project directory.")
if module_name in self._private_plugins:
return self._private_plugins[module_name]
file_path = str(self.project_dir / module_name)
spec = importlib.util.spec_from_file_location(module_name, file_path)
if spec:
module = importlib.util.module_from_spec(spec)
# Do not add to global modules because there is no standard
# module name that Python can resolve.
self._private_plugins[module_name] = module
assert spec.loader is not None
spec.loader.exec_module(module)
return module
return sys.modules.get(module_name) or importlib.import_module(module_name)
def find_config_file(self, filename: StrPath,
config: Optional[str] = None) -> Path:
""" Resolve the location of a configuration file given a filename and
an optional configuration option with the file name.
Raises a UsageError when the file cannot be found or is not
a regular file.
"""
if config is not None:
cfg_value = getattr(self, config)
if cfg_value:
cfg_filename = Path(cfg_value)
if cfg_filename.is_absolute():
cfg_filename = cfg_filename.resolve()
if not cfg_filename.is_file():
LOG.fatal("Cannot find config file '%s'.", cfg_filename)
raise UsageError("Config file not found.")
return cfg_filename
filename = cfg_filename
search_paths = [self.project_dir, self.config_dir]
for path in search_paths:
if path is not None and (path / filename).is_file():
return path / filename
LOG.fatal("Configuration file '%s' not found.\nDirectories searched: %s",
filename, search_paths)
raise UsageError("Config file not found.")
def _load_from_yaml(self, cfgfile: Path) -> Any:
""" Load a YAML configuration file. This installs a special handler that
allows to include other YAML files using the '!include' operator.
"""
yaml.add_constructor('!include', self._yaml_include_representer,
Loader=yaml.SafeLoader)
return yaml.safe_load(cfgfile.read_text(encoding='utf-8'))
def _yaml_include_representer(self, loader: Any, node: yaml.Node) -> Any:
""" Handler for the '!include' operator in YAML files.
When the filename is relative, then the file is first searched in the
project directory and then in the global settings directory.
"""
fname = loader.construct_scalar(node)
if Path(fname).is_absolute():
configfile = Path(fname)
else:
configfile = self.find_config_file(loader.construct_scalar(node))
if configfile.suffix != '.yaml':
LOG.fatal("Format error while reading '%s': only YAML format supported.",
configfile)
raise UsageError("Cannot handle config file format.")
return yaml.safe_load(configfile.read_text(encoding='utf-8'))
| 13,685
|
Python
|
.py
| 282
| 37.535461
| 93
| 0.609091
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,764
|
cli.py
|
osm-search_Nominatim/src/nominatim_db/cli.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Command-line interface to the Nominatim functions for import, update,
database administration and querying.
"""
from typing import Optional, Any
import importlib
import logging
import os
import sys
import argparse
import asyncio
from pathlib import Path
from .config import Configuration
from .errors import UsageError
from . import clicmd
from . import version
from .clicmd.args import NominatimArgs, Subcommand
LOG = logging.getLogger()
class CommandlineParser:
""" Wraps some of the common functions for parsing the command line
and setting up subcommands.
"""
def __init__(self, prog: str, description: Optional[str]):
self.parser = argparse.ArgumentParser(
prog=prog,
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
self.subs = self.parser.add_subparsers(title='available commands',
dest='subcommand')
# Global arguments that only work if no sub-command given
self.parser.add_argument('--version', action='store_true',
help='Print Nominatim version and exit')
# Arguments added to every sub-command
self.default_args = argparse.ArgumentParser(add_help=False)
group = self.default_args.add_argument_group('Default arguments')
group.add_argument('-h', '--help', action='help',
help='Show this help message and exit')
group.add_argument('-q', '--quiet', action='store_const', const=0,
dest='verbose', default=1,
help='Print only error messages')
group.add_argument('-v', '--verbose', action='count', default=1,
help='Increase verboseness of output')
group.add_argument('--project-dir', metavar='DIR', default='.',
help='Base directory of the Nominatim installation (default:.)')
group.add_argument('-j', '--threads', metavar='NUM', type=int,
help='Number of parallel threads to use')
def nominatim_version_text(self) -> str:
""" Program name and version number as string
"""
text = f'Nominatim version {version.NOMINATIM_VERSION!s}'
if version.GIT_COMMIT_HASH is not None:
text += f' ({version.GIT_COMMIT_HASH})'
return text
def add_subcommand(self, name: str, cmd: Subcommand) -> None:
""" Add a subcommand to the parser. The subcommand must be a class
with a function add_args() that adds the parameters for the
subcommand and a run() function that executes the command.
"""
assert cmd.__doc__ is not None
parser = self.subs.add_parser(name, parents=[self.default_args],
help=cmd.__doc__.split('\n', 1)[0],
description=cmd.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False)
parser.set_defaults(command=cmd)
cmd.add_args(parser)
def run(self, **kwargs: Any) -> int:
""" Parse the command line arguments of the program and execute the
appropriate subcommand.
"""
args = NominatimArgs()
try:
self.parser.parse_args(args=kwargs.get('cli_args'), namespace=args)
except SystemExit:
return 1
if args.version:
print(self.nominatim_version_text())
return 0
if args.subcommand is None:
self.parser.print_help()
return 1
args.project_dir = Path(args.project_dir).resolve()
if 'cli_args' not in kwargs:
logging.basicConfig(stream=sys.stderr,
format='%(asctime)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=max(4 - args.verbose, 1) * 10)
args.config = Configuration(args.project_dir,
environ=kwargs.get('environ', os.environ))
args.config.set_libdirs(osm2pgsql=kwargs['osm2pgsql_path'])
log = logging.getLogger()
log.warning('Using project directory: %s', str(args.project_dir))
try:
ret = args.command.run(args)
return ret
except UsageError as exception:
if log.isEnabledFor(logging.DEBUG):
raise # use Python's exception printing
log.fatal('FATAL: %s', exception)
# If we get here, then execution has failed in some way.
return 1
# Subcommand classes
#
# Each class needs to implement two functions: add_args() adds the CLI parameters
# for the subfunction, run() executes the subcommand.
#
# The class documentation doubles as the help text for the command. The
# first line is also used in the summary when calling the program without
# a subcommand.
#
# No need to document the functions each time.
# pylint: disable=C0111
class AdminServe:
"""\
Start a simple web server for serving the API.
This command starts a built-in webserver to serve the website
from the current project directory. This webserver is only suitable
for testing and development. Do not use it in production setups!
There are two different webserver implementations for Python available:
falcon (the default) and starlette. You need to make sure the
appropriate Python packages as well as the uvicorn package are
installed to use this function.
By the default, the webserver can be accessed at: http://127.0.0.1:8088
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Server arguments')
group.add_argument('--server', default='127.0.0.1:8088',
help='The address the server will listen to.')
group.add_argument('--engine', default='falcon',
choices=('falcon', 'starlette'),
help='Webserver framework to run. (default: falcon)')
def run(self, args: NominatimArgs) -> int:
asyncio.run(self.run_uvicorn(args))
return 0
async def run_uvicorn(self, args: NominatimArgs) -> None:
import uvicorn # pylint: disable=import-outside-toplevel
server_info = args.server.split(':', 1)
host = server_info[0]
if len(server_info) > 1:
if not server_info[1].isdigit():
raise UsageError('Invalid format for --server parameter. Use <host>:<port>')
port = int(server_info[1])
else:
port = 8088
server_module = importlib.import_module(f'nominatim_api.server.{args.engine}.server')
app = server_module.get_application(args.project_dir)
config = uvicorn.Config(app, host=host, port=port)
server = uvicorn.Server(config)
await server.serve()
def get_set_parser() -> CommandlineParser:
"""\
Initializes the parser and adds various subcommands for
nominatim cli.
"""
parser = CommandlineParser('nominatim', nominatim.__doc__)
parser.add_subcommand('import', clicmd.SetupAll())
parser.add_subcommand('freeze', clicmd.SetupFreeze())
parser.add_subcommand('replication', clicmd.UpdateReplication())
parser.add_subcommand('special-phrases', clicmd.ImportSpecialPhrases())
parser.add_subcommand('add-data', clicmd.UpdateAddData())
parser.add_subcommand('index', clicmd.UpdateIndex())
parser.add_subcommand('refresh', clicmd.UpdateRefresh())
parser.add_subcommand('admin', clicmd.AdminFuncs())
try:
exportcmd = importlib.import_module('nominatim_db.clicmd.export')
apicmd = importlib.import_module('nominatim_db.clicmd.api')
convertcmd = importlib.import_module('nominatim_db.clicmd.convert')
parser.add_subcommand('export', exportcmd.QueryExport())
parser.add_subcommand('convert', convertcmd.ConvertDB())
parser.add_subcommand('serve', AdminServe())
parser.add_subcommand('search', apicmd.APISearch())
parser.add_subcommand('reverse', apicmd.APIReverse())
parser.add_subcommand('lookup', apicmd.APILookup())
parser.add_subcommand('details', apicmd.APIDetails())
parser.add_subcommand('status', apicmd.APIStatus())
except ModuleNotFoundError as ex:
if not ex.name or 'nominatim_api' not in ex.name: # pylint: disable=E1135
raise ex
parser.parser.epilog = \
f'\n\nNominatim API package not found (was looking for module: {ex.name}).'\
'\nThe following commands are not available:'\
'\n export, convert, serve, search, reverse, lookup, details, status'\
"\n\nRun 'pip install nominatim-api' to install the package."
return parser
def nominatim(**kwargs: Any) -> int:
"""\
Command-line tools for importing, updating, administrating and
querying the Nominatim database.
"""
return get_set_parser().run(**kwargs)
| 9,393
|
Python
|
.py
| 196
| 38.158163
| 93
| 0.636016
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,765
|
paths.py
|
osm-search_Nominatim/src/nominatim_db/paths.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Path settings for extra data used by Nominatim.
"""
from pathlib import Path
SQLLIB_DIR = (Path(__file__) / '..' / '..' / '..' / 'lib-sql').resolve()
DATA_DIR = (Path(__file__) / '..' / '..' / '..' / 'data').resolve()
CONFIG_DIR = (Path(__file__) / '..' / '..' / '..' / 'settings').resolve()
| 508
|
Python
|
.py
| 13
| 38
| 73
| 0.611336
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,766
|
version.py
|
osm-search_Nominatim/src/nominatim_db/version.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Version information for Nominatim.
"""
from typing import NamedTuple, Optional
# See also https://github.com/PyCQA/pylint/issues/6006
# pylint: disable=useless-import-alias,unused-import
class NominatimVersion(NamedTuple):
""" Version information for Nominatim. We follow semantic versioning.
Major, minor and patch_level refer to the last released version.
The database patch level tracks important changes between releases
and must always be increased when there is a change to the database or code
that requires a migration.
When adding a migration on the development branch, raise the patch level
to 99 to make sure that the migration is applied when updating from a
patch release to the next minor version. Patch releases usually shouldn't
have migrations in them. When they are needed, then make sure that the
migration can be reapplied and set the migration version to the appropriate
patch level when cherry-picking the commit with the migration.
"""
major: int
minor: int
patch_level: int
db_patch_level: int
def __str__(self) -> str:
if self.db_patch_level is None:
return f"{self.major}.{self.minor}.{self.patch_level}"
return f"{self.major}.{self.minor}.{self.patch_level}-{self.db_patch_level}"
def release_version(self) -> str:
""" Return the release version in semantic versioning format.
The release version does not include the database patch version.
"""
return f"{self.major}.{self.minor}.{self.patch_level}"
def parse_version(version: str) -> NominatimVersion:
""" Parse a version string into a version consisting of a tuple of
four ints: major, minor, patch level, database patch level
This is the reverse operation of `version_str()`.
"""
parts = version.split('.')
return NominatimVersion(*[int(x) for x in parts[:2] + parts[2].split('-')])
NOMINATIM_VERSION = parse_version('4.5.0-0')
POSTGRESQL_REQUIRED_VERSION = (9, 6)
POSTGIS_REQUIRED_VERSION = (2, 2)
OSM2PGSQL_REQUIRED_VERSION = (1, 8)
# Cmake sets a variable @GIT_HASH@ by executing 'git --log'. It is not run
# on every execution of 'make'.
# cmake/tool-installed.tmpl is used to build the binary 'nominatim'. Inside
# there is a call to set the variable value below.
GIT_COMMIT_HASH : Optional[str] = None
| 2,629
|
Python
|
.py
| 54
| 43.518519
| 84
| 0.712275
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,767
|
typing.py
|
osm-search_Nominatim/src/nominatim_db/typing.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Type definitions for typing annotations.
Complex type definitions are moved here, to keep the source files readable.
"""
from typing import Any, Union, Mapping, TypeVar, Sequence, TYPE_CHECKING
# Generics variable names do not confirm to naming styles, ignore globally here.
# pylint: disable=invalid-name,abstract-method,multiple-statements
# pylint: disable=missing-class-docstring,useless-import-alias
if TYPE_CHECKING:
import os
StrPath = Union[str, 'os.PathLike[str]']
SysEnv = Mapping[str, str]
# psycopg-related types
T_ResultKey = TypeVar('T_ResultKey', int, str)
class DictCursorResult(Mapping[str, Any]):
def __getitem__(self, x: Union[int, str]) -> Any: ...
DictCursorResults = Sequence[DictCursorResult]
# The following typing features require typing_extensions to work
# on all supported Python versions.
# Only require this for type checking but not for normal operations.
if TYPE_CHECKING:
from typing_extensions import (Protocol as Protocol,
Final as Final,
TypedDict as TypedDict)
else:
Protocol = object
Final = 'Final'
TypedDict = dict
| 1,375
|
Python
|
.py
| 34
| 36.352941
| 80
| 0.733835
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,768
|
place_name.py
|
osm-search_Nominatim/src/nominatim_db/data/place_name.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Data class for a single name of a place.
"""
from typing import Optional, Dict, Mapping
class PlaceName:
""" Each name and address part of a place is encapsulated in an object of
this class. It saves not only the name proper but also describes the
kind of name with two properties:
* `kind` describes the name of the OSM key used without any suffixes
(i.e. the part after the colon removed)
* `suffix` contains the suffix of the OSM tag, if any. The suffix
is the part of the key after the first colon.
In addition to that, a name may have arbitrary additional attributes.
How attributes are used, depends on the sanitizers and token analysers.
The exception is the 'analyzer' attribute. This attribute determines
which token analysis module will be used to finalize the treatment of
names.
"""
def __init__(self, name: str, kind: str, suffix: Optional[str]):
self.name = name
self.kind = kind
self.suffix = suffix
self.attr: Dict[str, str] = {}
def __repr__(self) -> str:
return f"PlaceName(name={self.name!r},kind={self.kind!r},suffix={self.suffix!r})"
def clone(self, name: Optional[str] = None,
kind: Optional[str] = None,
suffix: Optional[str] = None,
attr: Optional[Mapping[str, str]] = None) -> 'PlaceName':
""" Create a deep copy of the place name, optionally with the
given parameters replaced. In the attribute list only the given
keys are updated. The list is not replaced completely.
In particular, the function cannot to be used to remove an
attribute from a place name.
"""
newobj = PlaceName(name or self.name,
kind or self.kind,
suffix or self.suffix)
newobj.attr.update(self.attr)
if attr:
newobj.attr.update(attr)
return newobj
def set_attr(self, key: str, value: str) -> None:
""" Add the given property to the name. If the property was already
set, then the value is overwritten.
"""
self.attr[key] = value
def get_attr(self, key: str, default: Optional[str] = None) -> Optional[str]:
""" Return the given property or the value of 'default' if it
is not set.
"""
return self.attr.get(key, default)
def has_attr(self, key: str) -> bool:
""" Check if the given attribute is set.
"""
return key in self.attr
| 2,820
|
Python
|
.py
| 62
| 36.741935
| 89
| 0.628373
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,769
|
place_info.py
|
osm-search_Nominatim/src/nominatim_db/data/place_info.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Wrapper around place information the indexer gets from the database and hands to
the tokenizer.
"""
from typing import Optional, Mapping, Any, Tuple
class PlaceInfo:
""" This data class contains all information the tokenizer can access
about a place.
"""
def __init__(self, info: Mapping[str, Any]) -> None:
self._info = info
@property
def name(self) -> Optional[Mapping[str, str]]:
""" A dictionary with the names of the place. Keys and values represent
the full key and value of the corresponding OSM tag. Which tags
are saved as names is determined by the import style.
The property may be None if the place has no names.
"""
return self._info.get('name')
@property
def address(self) -> Optional[Mapping[str, str]]:
""" A dictionary with the address elements of the place. They key
usually corresponds to the suffix part of the key of an OSM
'addr:*' or 'isin:*' tag. There are also some special keys like
`country` or `country_code` which merge OSM keys that contain
the same information. See [Import Styles][1] for details.
The property may be None if the place has no address information.
[1]: ../customize/Import-Styles.md
"""
return self._info.get('address')
@property
def country_code(self) -> Optional[str]:
""" The country code of the country the place is in. Guaranteed
to be a two-letter lower-case string. If the place is not inside
any country, the property is set to None.
"""
return self._info.get('country_code')
@property
def rank_address(self) -> int:
""" The [rank address][1] before any rank correction is applied.
[1]: ../customize/Ranking.md#address-rank
"""
return self._info.get('rank_address', 0)
@property
def centroid(self) -> Optional[Tuple[float, float]]:
""" A center point of the place in WGS84. May be None when the
geometry of the place is unknown.
"""
x, y = self._info.get('centroid_x'), self._info.get('centroid_y')
return None if x is None or y is None else (x, y)
def is_a(self, key: str, value: str) -> bool:
""" Set to True when the place's primary tag corresponds to the given
key and value.
"""
return self._info.get('class') == key and self._info.get('type') == value
def is_country(self) -> bool:
""" Set to True when the place is a valid country boundary.
"""
return self.rank_address == 4 \
and self.is_a('boundary', 'administrative') \
and self.country_code is not None
| 3,003
|
Python
|
.py
| 67
| 36.820896
| 81
| 0.629071
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,770
|
country_info.py
|
osm-search_Nominatim/src/nominatim_db/data/country_info.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for importing and managing static country information.
"""
from typing import Dict, Any, Iterable, Tuple, Optional, Container, overload
from pathlib import Path
from ..db import utils as db_utils
from ..db.connection import connect, Connection, register_hstore
from ..errors import UsageError
from ..config import Configuration
from ..tokenizer.base import AbstractTokenizer
def _flatten_name_list(names: Any) -> Dict[str, str]:
if names is None:
return {}
if not isinstance(names, dict):
raise UsageError("Expected key-value list for names in country_settings.py")
flat = {}
for prefix, remain in names.items():
if isinstance(remain, str):
flat[prefix] = remain
elif not isinstance(remain, dict):
raise UsageError("Entries in names must be key-value lists.")
else:
for suffix, name in remain.items():
if suffix == 'default':
flat[prefix] = name
else:
flat[f'{prefix}:{suffix}'] = name
return flat
class _CountryInfo:
""" Caches country-specific properties from the configuration file.
"""
def __init__(self) -> None:
self._info: Dict[str, Dict[str, Any]] = {}
def load(self, config: Configuration) -> None:
""" Load the country properties from the configuration files,
if they are not loaded yet.
"""
if not self._info:
self._info = config.load_sub_configuration('country_settings.yaml')
for prop in self._info.values():
# Convert languages into a list for simpler handling.
if 'languages' not in prop:
prop['languages'] = []
elif not isinstance(prop['languages'], list):
prop['languages'] = [x.strip()
for x in prop['languages'].split(',')]
prop['names'] = _flatten_name_list(prop.get('names'))
def items(self) -> Iterable[Tuple[str, Dict[str, Any]]]:
""" Return tuples of (country_code, property dict) as iterable.
"""
return self._info.items()
def get(self, country_code: str) -> Dict[str, Any]:
""" Get country information for the country with the given country code.
"""
return self._info.get(country_code, {})
_COUNTRY_INFO = _CountryInfo()
def setup_country_config(config: Configuration) -> None:
""" Load country properties from the configuration file.
Needs to be called before using any other functions in this
file.
"""
_COUNTRY_INFO.load(config)
@overload
def iterate() -> Iterable[Tuple[str, Dict[str, Any]]]:
...
@overload
def iterate(prop: str) -> Iterable[Tuple[str, Any]]:
...
def iterate(prop: Optional[str] = None) -> Iterable[Tuple[str, Dict[str, Any]]]:
""" Iterate over country code and properties.
When `prop` is None, all countries are returned with their complete
set of properties.
If `prop` is given, then only countries are returned where the
given property is set. The second item of the tuple contains only
the content of the given property.
"""
if prop is None:
return _COUNTRY_INFO.items()
return ((c, p[prop]) for c, p in _COUNTRY_INFO.items() if prop in p)
def setup_country_tables(dsn: str, sql_dir: Path, ignore_partitions: bool = False) -> None:
""" Create and populate the tables with basic static data that provides
the background for geocoding. Data is assumed to not yet exist.
"""
db_utils.execute_file(dsn, sql_dir / 'country_osm_grid.sql.gz')
params = []
for ccode, props in _COUNTRY_INFO.items():
if ccode is not None and props is not None:
if ignore_partitions:
partition = 0
else:
partition = props.get('partition', 0)
lang = props['languages'][0] if len(
props['languages']) == 1 else None
params.append((ccode, props['names'], lang, partition))
with connect(dsn) as conn:
register_hstore(conn)
with conn.cursor() as cur:
cur.execute(
""" CREATE TABLE public.country_name (
country_code character varying(2),
name public.hstore,
derived_name public.hstore,
country_default_language_code text,
partition integer
); """)
cur.executemany(
""" INSERT INTO public.country_name
(country_code, name, country_default_language_code, partition)
VALUES (%s, %s, %s, %s)
""", params)
conn.commit()
def create_country_names(conn: Connection, tokenizer: AbstractTokenizer,
languages: Optional[Container[str]] = None) -> None:
""" Add default country names to search index. `languages` is a comma-
separated list of language codes as used in OSM. If `languages` is not
empty then only name translations for the given languages are added
to the index.
"""
def _include_key(key: str) -> bool:
return ':' not in key or not languages or \
key[key.index(':') + 1:] in languages
register_hstore(conn)
with conn.cursor() as cur:
cur.execute("""SELECT country_code, name FROM country_name
WHERE country_code is not null""")
with tokenizer.name_analyzer() as analyzer:
for code, name in cur:
names = {'countrycode': code}
# country names (only in languages as provided)
if name:
names.update({k : v for k, v in name.items() if _include_key(k)})
analyzer.add_country_names(code, names)
conn.commit()
| 6,181
|
Python
|
.py
| 139
| 34.604317
| 91
| 0.601898
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,771
|
postcode_format.py
|
osm-search_Nominatim/src/nominatim_db/data/postcode_format.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for formatting postcodes according to their country-specific
format.
"""
from typing import Any, Mapping, Optional, Set, Match
import re
from ..errors import UsageError
from . import country_info
class CountryPostcodeMatcher:
""" Matches and formats a postcode according to a format definition
of the given country.
"""
def __init__(self, country_code: str, config: Mapping[str, Any]) -> None:
if 'pattern' not in config:
raise UsageError("Field 'pattern' required for 'postcode' "
f"for country '{country_code}'")
pc_pattern = config['pattern'].replace('d', '[0-9]').replace('l', '[A-Z]')
self.norm_pattern = re.compile(f'\\s*(?:{country_code.upper()}[ -]?)?({pc_pattern})\\s*')
self.pattern = re.compile(pc_pattern)
self.output = config.get('output', r'\g<0>')
def match(self, postcode: str) -> Optional[Match[str]]:
""" Match the given postcode against the postcode pattern for this
matcher. Returns a `re.Match` object if the match was successful
and None otherwise.
"""
# Upper-case, strip spaces and leading country code.
normalized = self.norm_pattern.fullmatch(postcode.upper())
if normalized:
return self.pattern.fullmatch(normalized.group(1))
return None
def normalize(self, match: Match[str]) -> str:
""" Return the default format of the postcode for the given match.
`match` must be a `re.Match` object previously returned by
`match()`
"""
return match.expand(self.output)
class PostcodeFormatter:
""" Container for different postcode formats of the world and
access functions.
"""
def __init__(self) -> None:
# Objects without a country code can't have a postcode per definition.
self.country_without_postcode: Set[Optional[str]] = {None}
self.country_matcher = {}
self.default_matcher = CountryPostcodeMatcher('', {'pattern': '.*'})
for ccode, prop in country_info.iterate('postcode'):
if prop is False:
self.country_without_postcode.add(ccode)
elif isinstance(prop, dict):
self.country_matcher[ccode] = CountryPostcodeMatcher(ccode, prop)
else:
raise UsageError(f"Invalid entry 'postcode' for country '{ccode}'")
def set_default_pattern(self, pattern: str) -> None:
""" Set the postcode match pattern to use, when a country does not
have a specific pattern.
"""
self.default_matcher = CountryPostcodeMatcher('', {'pattern': pattern})
def get_matcher(self, country_code: Optional[str]) -> Optional[CountryPostcodeMatcher]:
""" Return the CountryPostcodeMatcher for the given country.
Returns None if the country doesn't have a postcode and the
default matcher if there is no specific matcher configured for
the country.
"""
if country_code in self.country_without_postcode:
return None
assert country_code is not None
return self.country_matcher.get(country_code, self.default_matcher)
def match(self, country_code: Optional[str], postcode: str) -> Optional[Match[str]]:
""" Match the given postcode against the postcode pattern for this
matcher. Returns a `re.Match` object if the country has a pattern
and the match was successful or None if the match failed.
"""
if country_code in self.country_without_postcode:
return None
assert country_code is not None
return self.country_matcher.get(country_code, self.default_matcher).match(postcode)
def normalize(self, country_code: str, match: Match[str]) -> str:
""" Return the default format of the postcode for the given match.
`match` must be a `re.Match` object previously returned by
`match()`
"""
return self.country_matcher.get(country_code, self.default_matcher).normalize(match)
| 4,349
|
Python
|
.py
| 88
| 40.75
| 97
| 0.651004
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,772
|
special_phrases.py
|
osm-search_Nominatim/src/nominatim_db/clicmd/special_phrases.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the 'special-phrases' command.
"""
import argparse
import logging
from pathlib import Path
from ..errors import UsageError
from ..db.connection import connect
from ..tools.special_phrases.sp_importer import SPImporter, SpecialPhraseLoader
from ..tools.special_phrases.sp_wiki_loader import SPWikiLoader
from ..tools.special_phrases.sp_csv_loader import SPCsvLoader
from .args import NominatimArgs
LOG = logging.getLogger()
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to avoid eventually unused imports.
# pylint: disable=E0012,C0415
class ImportSpecialPhrases:
"""\
Import special phrases.
Special phrases are search terms that narrow down the type of object
that should be searched. For example, you might want to search for
'Hotels in Barcelona'. The OSM wiki has a selection of special phrases
in many languages, which can be imported with this command.
You can also provide your own phrases in a CSV file. The file needs to have
the following five columns:
* phrase - the term expected for searching
* class - the OSM tag key of the object type
* type - the OSM tag value of the object type
* operator - the kind of search to be done (one of: in, near, name, -)
* plural - whether the term is a plural or not (Y/N)
An example file can be found in the Nominatim sources at
'test/testdb/full_en_phrases_test.csv'.
The import can be further configured to ignore specific key/value pairs.
This is particularly useful when importing phrases from the wiki. The
default configuration excludes some very common tags like building=yes.
The configuration can be customized by putting a file `phrase-settings.json`
with custom rules into the project directory or by using the `--config`
option to point to another configuration file.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Input arguments')
group.add_argument('--import-from-wiki', action='store_true',
help='Import special phrases from the OSM wiki to the database')
group.add_argument('--import-from-csv', metavar='FILE',
help='Import special phrases from a CSV file')
group.add_argument('--no-replace', action='store_true',
help='Keep the old phrases and only add the new ones')
def run(self, args: NominatimArgs) -> int:
if args.import_from_wiki:
self.start_import(args, SPWikiLoader(args.config))
if args.import_from_csv:
if not Path(args.import_from_csv).is_file():
LOG.fatal("CSV file '%s' does not exist.", args.import_from_csv)
raise UsageError('Cannot access file.')
self.start_import(args, SPCsvLoader(args.import_from_csv))
return 0
def start_import(self, args: NominatimArgs, loader: SpecialPhraseLoader) -> None:
"""
Create the SPImporter object containing the right
sp loader and then start the import of special phrases.
"""
from ..tokenizer import factory as tokenizer_factory
tokenizer = tokenizer_factory.get_tokenizer_for_db(args.config)
should_replace = not args.no_replace
with connect(args.config.get_libpq_dsn()) as db_connection:
SPImporter(
args.config, db_connection, loader
).import_phrases(tokenizer, should_replace)
| 3,790
|
Python
|
.py
| 75
| 43.613333
| 91
| 0.70165
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,773
|
setup.py
|
osm-search_Nominatim/src/nominatim_db/clicmd/setup.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the 'import' subcommand.
"""
from typing import Optional
import argparse
import logging
from pathlib import Path
import asyncio
import psutil
from ..errors import UsageError
from ..config import Configuration
from ..db.connection import connect
from ..db import status, properties
from ..tokenizer.base import AbstractTokenizer
from ..version import NOMINATIM_VERSION
from .args import NominatimArgs
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to avoid eventually unused imports.
# pylint: disable=C0415
LOG = logging.getLogger()
class SetupAll:
"""\
Create a new Nominatim database from an OSM file.
This sub-command sets up a new Nominatim database from scratch starting
with creating a new database in Postgresql. The user running this command
needs superuser rights on the database.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group1 = parser.add_argument_group('Required arguments')
group1.add_argument('--osm-file', metavar='FILE', action='append',
help='OSM file to be imported'
' (repeat for importing multiple files)',
default=None)
group1.add_argument('--continue', dest='continue_at',
choices=['import-from-file', 'load-data', 'indexing', 'db-postprocess'],
help='Continue an import that was interrupted',
default=None)
group2 = parser.add_argument_group('Optional arguments')
group2.add_argument('--osm2pgsql-cache', metavar='SIZE', type=int,
help='Size of cache to be used by osm2pgsql (in MB)')
group2.add_argument('--reverse-only', action='store_true',
help='Do not create tables and indexes for searching')
group2.add_argument('--no-partitions', action='store_true',
help=("Do not partition search indices "
"(speeds up import of single country extracts)"))
group2.add_argument('--no-updates', action='store_true',
help="Do not keep tables that are only needed for "
"updating the database later")
group2.add_argument('--offline', action='store_true',
help="Do not attempt to load any additional data from the internet")
group3 = parser.add_argument_group('Expert options')
group3.add_argument('--ignore-errors', action='store_true',
help='Continue import even when errors in SQL are present')
group3.add_argument('--index-noanalyse', action='store_true',
help='Do not perform analyse operations during index (expert only)')
group3.add_argument('--prepare-database', action='store_true',
help='Create the database but do not import any data')
def run(self, args: NominatimArgs) -> int: # pylint: disable=too-many-statements, too-many-branches
if args.osm_file is None and args.continue_at is None and not args.prepare_database:
raise UsageError("No input files (use --osm-file).")
if args.osm_file is not None and args.continue_at not in ('import-from-file', None):
raise UsageError(f"Cannot use --continue {args.continue_at} and --osm-file together.")
if args.continue_at is not None and args.prepare_database:
raise UsageError(
"Cannot use --continue and --prepare-database together."
)
return asyncio.run(self.async_run(args))
async def async_run(self, args: NominatimArgs) -> int:
from ..data import country_info
from ..tools import database_import, postcodes, freeze
from ..indexer.indexer import Indexer
num_threads = args.threads or psutil.cpu_count() or 1
country_info.setup_country_config(args.config)
if args.prepare_database or args.continue_at is None:
LOG.warning('Creating database')
database_import.setup_database_skeleton(args.config.get_libpq_dsn(),
rouser=args.config.DATABASE_WEBUSER)
if args.prepare_database:
return 0
if args.continue_at in (None, 'import-from-file'):
self._base_import(args)
if args.continue_at in ('import-from-file', 'load-data', None):
LOG.warning('Initialise tables')
with connect(args.config.get_libpq_dsn()) as conn:
database_import.truncate_data_tables(conn)
LOG.warning('Load data into placex table')
await database_import.load_data(args.config.get_libpq_dsn(), num_threads)
LOG.warning("Setting up tokenizer")
tokenizer = self._get_tokenizer(args.continue_at, args.config)
if args.continue_at in ('import-from-file', 'load-data', None):
LOG.warning('Calculate postcodes')
postcodes.update_postcodes(args.config.get_libpq_dsn(),
args.project_dir, tokenizer)
if args.continue_at in \
('import-from-file', 'load-data', 'indexing', None):
LOG.warning('Indexing places')
indexer = Indexer(args.config.get_libpq_dsn(), tokenizer, num_threads)
await indexer.index_full(analyse=not args.index_noanalyse)
LOG.warning('Post-process tables')
with connect(args.config.get_libpq_dsn()) as conn:
await database_import.create_search_indices(conn, args.config,
drop=args.no_updates,
threads=num_threads)
LOG.warning('Create search index for default country names.')
country_info.create_country_names(conn, tokenizer,
args.config.get_str_list('LANGUAGES'))
if args.no_updates:
freeze.drop_update_tables(conn)
tokenizer.finalize_import(args.config)
LOG.warning('Recompute word counts')
tokenizer.update_statistics(args.config, threads=num_threads)
self._finalize_database(args.config.get_libpq_dsn(), args.offline)
return 0
def _base_import(self, args: NominatimArgs) -> None:
from ..tools import database_import, refresh
from ..data import country_info
files = args.get_osm_file_list()
if not files:
raise UsageError("No input files (use --osm-file).")
if args.continue_at in ('import-from-file', None):
# Check if the correct plugins are installed
database_import.check_existing_database_plugins(args.config.get_libpq_dsn())
LOG.warning('Setting up country tables')
country_info.setup_country_tables(args.config.get_libpq_dsn(),
args.config.lib_dir.data,
args.no_partitions)
LOG.warning('Importing OSM data file')
database_import.import_osm_data(files,
args.osm2pgsql_options(0, 1),
drop=args.no_updates,
ignore_errors=args.ignore_errors)
LOG.warning('Importing wikipedia importance data')
data_path = Path(args.config.WIKIPEDIA_DATA_PATH or args.project_dir)
if refresh.import_wikipedia_articles(args.config.get_libpq_dsn(),
data_path) > 0:
LOG.error('Wikipedia importance dump file not found. '
'Calculating importance values of locations will not '
'use Wikipedia importance data.')
LOG.warning('Importing secondary importance raster data')
if refresh.import_secondary_importance(args.config.get_libpq_dsn(),
args.project_dir) != 0:
LOG.error('Secondary importance file not imported. '
'Falling back to default ranking.')
self._setup_tables(args.config, args.reverse_only)
def _setup_tables(self, config: Configuration, reverse_only: bool) -> None:
""" Set up the basic database layout: tables, indexes and functions.
"""
from ..tools import database_import, refresh
with connect(config.get_libpq_dsn()) as conn:
LOG.warning('Create functions (1st pass)')
refresh.create_functions(conn, config, False, False)
LOG.warning('Create tables')
database_import.create_tables(conn, config, reverse_only=reverse_only)
refresh.load_address_levels_from_config(conn, config)
LOG.warning('Create functions (2nd pass)')
refresh.create_functions(conn, config, False, False)
LOG.warning('Create table triggers')
database_import.create_table_triggers(conn, config)
LOG.warning('Create partition tables')
database_import.create_partition_tables(conn, config)
LOG.warning('Create functions (3rd pass)')
refresh.create_functions(conn, config, False, False)
def _get_tokenizer(self, continue_at: Optional[str],
config: Configuration) -> AbstractTokenizer:
""" Set up a new tokenizer or load an already initialised one.
"""
from ..tokenizer import factory as tokenizer_factory
if continue_at in ('import-from-file', 'load-data', None):
# (re)initialise the tokenizer data
return tokenizer_factory.create_tokenizer(config)
# just load the tokenizer
return tokenizer_factory.get_tokenizer_for_db(config)
def _finalize_database(self, dsn: str, offline: bool) -> None:
""" Determine the database date and set the status accordingly.
"""
with connect(dsn) as conn:
properties.set_property(conn, 'database_version', str(NOMINATIM_VERSION))
try:
dbdate = status.compute_database_date(conn, offline)
status.set_status(conn, dbdate)
LOG.info('Database is at %s.', dbdate)
except Exception as exc: # pylint: disable=broad-except
LOG.error('Cannot determine date of database: %s', exc)
| 10,864
|
Python
|
.py
| 190
| 43.321053
| 103
| 0.60969
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,774
|
refresh.py
|
osm-search_Nominatim/src/nominatim_db/clicmd/refresh.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of 'refresh' subcommand.
"""
from typing import Tuple, Optional
import argparse
import logging
from pathlib import Path
import asyncio
from ..config import Configuration
from ..db.connection import connect, table_exists
from ..tokenizer.base import AbstractTokenizer
from .args import NominatimArgs
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to avoid eventually unused imports.
# pylint: disable=E0012,C0415
LOG = logging.getLogger()
def _parse_osm_object(obj: str) -> Tuple[str, int]:
""" Parse the given argument into a tuple of OSM type and ID.
Raises an ArgumentError if the format is not recognized.
"""
if len(obj) < 2 or obj[0].lower() not in 'nrw' or not obj[1:].isdigit():
raise argparse.ArgumentTypeError("Cannot parse OSM ID. Expect format: [N|W|R]<id>.")
return (obj[0].upper(), int(obj[1:]))
class UpdateRefresh:
"""\
Recompute auxiliary data used by the indexing process.
This sub-commands updates various static data and functions in the database.
It usually needs to be run after changing various aspects of the
configuration. The configuration documentation will mention the exact
command to use in such case.
Warning: the 'update' command must not be run in parallel with other update
commands like 'replication' or 'add-data'.
"""
def __init__(self) -> None:
self.tokenizer: Optional[AbstractTokenizer] = None
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Data arguments')
group.add_argument('--postcodes', action='store_true',
help='Update postcode centroid table')
group.add_argument('--word-tokens', action='store_true',
help='Clean up search terms')
group.add_argument('--word-counts', action='store_true',
help='Compute frequency of full-word search terms')
group.add_argument('--address-levels', action='store_true',
help='Reimport address level configuration')
group.add_argument('--functions', action='store_true',
help='Update the PL/pgSQL functions in the database')
group.add_argument('--wiki-data', action='store_true',
help='Update Wikipedia/data importance numbers')
group.add_argument('--secondary-importance', action='store_true',
help='Update secondary importance raster data')
group.add_argument('--importance', action='store_true',
help='Recompute place importances (expensive!)')
group.add_argument('--website', action='store_true',
help='DEPRECATED. This function has no function anymore'
' and will be removed in a future version.')
group.add_argument('--data-object', action='append',
type=_parse_osm_object, metavar='OBJECT',
help='Mark the given OSM object as requiring an update'
' (format: [NWR]<id>)')
group.add_argument('--data-area', action='append',
type=_parse_osm_object, metavar='OBJECT',
help='Mark the area around the given OSM object as requiring an update'
' (format: [NWR]<id>)')
group = parser.add_argument_group('Arguments for function refresh')
group.add_argument('--no-diff-updates', action='store_false', dest='diffs',
help='Do not enable code for propagating updates')
group.add_argument('--enable-debug-statements', action='store_true',
help='Enable debug warning statements in functions')
def run(self, args: NominatimArgs) -> int: #pylint: disable=too-many-branches, too-many-statements
from ..tools import refresh, postcodes
from ..indexer.indexer import Indexer
need_function_refresh = args.functions
if args.postcodes:
if postcodes.can_compute(args.config.get_libpq_dsn()):
LOG.warning("Update postcodes centroid")
tokenizer = self._get_tokenizer(args.config)
postcodes.update_postcodes(args.config.get_libpq_dsn(),
args.project_dir, tokenizer)
indexer = Indexer(args.config.get_libpq_dsn(), tokenizer,
args.threads or 1)
asyncio.run(indexer.index_postcodes())
else:
LOG.error("The place table doesn't exist. "
"Postcode updates on a frozen database is not possible.")
if args.word_tokens:
LOG.warning('Updating word tokens')
tokenizer = self._get_tokenizer(args.config)
tokenizer.update_word_tokens()
if args.word_counts:
LOG.warning('Recompute word statistics')
self._get_tokenizer(args.config).update_statistics(args.config,
threads=args.threads or 1)
if args.address_levels:
LOG.warning('Updating address levels')
with connect(args.config.get_libpq_dsn()) as conn:
refresh.load_address_levels_from_config(conn, args.config)
# Attention: must come BEFORE functions
if args.secondary_importance:
with connect(args.config.get_libpq_dsn()) as conn:
# If the table did not exist before, then the importance code
# needs to be enabled.
if not table_exists(conn, 'secondary_importance'):
args.functions = True
LOG.warning('Import secondary importance raster data from %s', args.project_dir)
if refresh.import_secondary_importance(args.config.get_libpq_dsn(),
args.project_dir) > 0:
LOG.fatal('FATAL: Cannot update secondary importance raster data')
return 1
need_function_refresh = True
if args.wiki_data:
data_path = Path(args.config.WIKIPEDIA_DATA_PATH
or args.project_dir)
LOG.warning('Import wikipedia article importance from %s', data_path)
if refresh.import_wikipedia_articles(args.config.get_libpq_dsn(),
data_path) > 0:
LOG.fatal('FATAL: Wikipedia importance file not found in %s', data_path)
return 1
need_function_refresh = True
if need_function_refresh:
LOG.warning('Create functions')
with connect(args.config.get_libpq_dsn()) as conn:
refresh.create_functions(conn, args.config,
args.diffs, args.enable_debug_statements)
self._get_tokenizer(args.config).update_sql_functions(args.config)
# Attention: importance MUST come after wiki data import and after functions.
if args.importance:
LOG.warning('Update importance values for database')
with connect(args.config.get_libpq_dsn()) as conn:
refresh.recompute_importance(conn)
if args.website:
LOG.error('WARNING: Website setup is no longer required. '
'This function will be removed in future version of Nominatim.')
if args.data_object or args.data_area:
with connect(args.config.get_libpq_dsn()) as conn:
for obj in args.data_object or []:
refresh.invalidate_osm_object(*obj, conn, recursive=False)
for obj in args.data_area or []:
refresh.invalidate_osm_object(*obj, conn, recursive=True)
conn.commit()
return 0
def _get_tokenizer(self, config: Configuration) -> AbstractTokenizer:
if self.tokenizer is None:
from ..tokenizer import factory as tokenizer_factory
self.tokenizer = tokenizer_factory.get_tokenizer_for_db(config)
return self.tokenizer
| 8,555
|
Python
|
.py
| 153
| 42.294118
| 102
| 0.607621
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,775
|
export.py
|
osm-search_Nominatim/src/nominatim_db/clicmd/export.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the 'export' subcommand.
"""
from typing import Optional, List, cast
import logging
import argparse
import asyncio
import csv
import sys
import nominatim_api as napi
from nominatim_api.results import create_from_placex_row, ReverseResult, add_result_details
from nominatim_api.types import LookupDetails
import sqlalchemy as sa # pylint: disable=C0411
from ..errors import UsageError
from .args import NominatimArgs
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to avoid eventually unused imports.
# pylint: disable=E0012,C0415
# Needed for SQLAlchemy
# pylint: disable=singleton-comparison
LOG = logging.getLogger()
RANK_RANGE_MAP = {
'country': (4, 4),
'state': (5, 9),
'county': (10, 12),
'city': (13, 16),
'suburb': (17, 21),
'street': (26, 26),
'path': (27, 27)
}
RANK_TO_OUTPUT_MAP = {
4: 'country',
5: 'state', 6: 'state', 7: 'state', 8: 'state', 9: 'state',
10: 'county', 11: 'county', 12: 'county',
13: 'city', 14: 'city', 15: 'city', 16: 'city',
17: 'suburb', 18: 'suburb', 19: 'suburb', 20: 'suburb', 21: 'suburb',
26: 'street', 27: 'path'}
class QueryExport:
"""\
Export places as CSV file from the database.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Output arguments')
group.add_argument('--output-type', default='street',
choices=('country', 'state', 'county',
'city', 'suburb', 'street', 'path'),
help='Type of places to output (default: street)')
group.add_argument('--output-format',
default='street;suburb;city;county;state;country',
help=("Semicolon-separated list of address types "
"(see --output-type). Additionally accepts:"
"placeid,postcode"))
group.add_argument('--language',
help=("Preferred language for output "
"(use local name, if omitted)"))
group = parser.add_argument_group('Filter arguments')
group.add_argument('--restrict-to-country', metavar='COUNTRY_CODE',
help='Export only objects within country')
group.add_argument('--restrict-to-osm-node', metavar='ID', type=int,
dest='node',
help='Export only children of this OSM node')
group.add_argument('--restrict-to-osm-way', metavar='ID', type=int,
dest='way',
help='Export only children of this OSM way')
group.add_argument('--restrict-to-osm-relation', metavar='ID', type=int,
dest='relation',
help='Export only children of this OSM relation')
def run(self, args: NominatimArgs) -> int:
return asyncio.run(export(args))
async def export(args: NominatimArgs) -> int:
""" The actual export as a asynchronous function.
"""
api = napi.NominatimAPIAsync(args.project_dir)
try:
output_range = RANK_RANGE_MAP[args.output_type]
writer = init_csv_writer(args.output_format)
async with api.begin() as conn, api.begin() as detail_conn:
t = conn.t.placex
sql = sa.select(t.c.place_id, t.c.parent_place_id,
t.c.osm_type, t.c.osm_id, t.c.name,
t.c.class_, t.c.type, t.c.admin_level,
t.c.address, t.c.extratags,
t.c.housenumber, t.c.postcode, t.c.country_code,
t.c.importance, t.c.wikipedia, t.c.indexed_date,
t.c.rank_address, t.c.rank_search,
t.c.centroid)\
.where(t.c.linked_place_id == None)\
.where(t.c.rank_address.between(*output_range))
parent_place_id = await get_parent_id(conn, args.node, args.way, args.relation)
if parent_place_id:
taddr = conn.t.addressline
sql = sql.join(taddr, taddr.c.place_id == t.c.place_id)\
.where(taddr.c.address_place_id == parent_place_id)\
.where(taddr.c.isaddress)
if args.restrict_to_country:
sql = sql.where(t.c.country_code == args.restrict_to_country.lower())
results = []
for row in await conn.execute(sql):
result = create_from_placex_row(row, ReverseResult)
if result is not None:
results.append(result)
if len(results) == 1000:
await dump_results(detail_conn, results, writer, args.language)
results = []
if results:
await dump_results(detail_conn, results, writer, args.language)
finally:
await api.close()
return 0
def init_csv_writer(output_format: str) -> 'csv.DictWriter[str]':
fields = output_format.split(';')
writer = csv.DictWriter(sys.stdout, fieldnames=fields, extrasaction='ignore')
writer.writeheader()
return writer
async def dump_results(conn: napi.SearchConnection,
results: List[ReverseResult],
writer: 'csv.DictWriter[str]',
lang: Optional[str]) -> None:
locale = napi.Locales([lang] if lang else None)
await add_result_details(conn, results,
LookupDetails(address_details=True, locales=locale))
for result in results:
data = {'placeid': result.place_id,
'postcode': result.postcode}
for line in (result.address_rows or []):
if line.isaddress and line.local_name:
if line.category[1] == 'postcode':
data['postcode'] = line.local_name
elif line.rank_address in RANK_TO_OUTPUT_MAP:
data[RANK_TO_OUTPUT_MAP[line.rank_address]] = line.local_name
writer.writerow(data)
async def get_parent_id(conn: napi.SearchConnection, node_id: Optional[int],
way_id: Optional[int],
relation_id: Optional[int]) -> Optional[int]:
""" Get the place ID for the given OSM object.
"""
if node_id is not None:
osm_type, osm_id = 'N', node_id
elif way_id is not None:
osm_type, osm_id = 'W', way_id
elif relation_id is not None:
osm_type, osm_id = 'R', relation_id
else:
return None
t = conn.t.placex
sql = sa.select(t.c.place_id).limit(1)\
.where(t.c.osm_type == osm_type)\
.where(t.c.osm_id == osm_id)\
.where(t.c.rank_address > 0)\
.order_by(t.c.rank_address)
for result in await conn.execute(sql):
return cast(int, result[0])
raise UsageError(f'Cannot find a place {osm_type}{osm_id}.')
| 7,307
|
Python
|
.py
| 160
| 34.36875
| 91
| 0.575007
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,776
|
add_data.py
|
osm-search_Nominatim/src/nominatim_db/clicmd/add_data.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the 'add-data' subcommand.
"""
from typing import cast
import argparse
import logging
import asyncio
import psutil
from .args import NominatimArgs
from ..db.connection import connect
from ..tools.freeze import is_frozen
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to avoid eventually unused imports.
# pylint: disable=E0012,C0415
LOG = logging.getLogger()
class UpdateAddData:
"""\
Add additional data from a file or an online source.
This command allows to add or update the search data in the database.
The data can come either from an OSM file or single OSM objects can
directly be downloaded from the OSM API. This function only loads the
data into the database. Afterwards it still needs to be integrated
in the search index. Use the `nominatim index` command for that.
The command can also be used to add external non-OSM data to the
database. At the moment the only supported format is TIGER housenumber
data. See the online documentation at
https://nominatim.org/release-docs/latest/customize/Tiger/
for more information.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group_name = parser.add_argument_group('Source')
group1 = group_name.add_mutually_exclusive_group(required=True)
group1.add_argument('--file', metavar='FILE',
help='Import data from an OSM file or diff file')
group1.add_argument('--diff', metavar='FILE',
help='Import data from an OSM diff file (deprecated: use --file)')
group1.add_argument('--node', metavar='ID', type=int,
help='Import a single node from the API')
group1.add_argument('--way', metavar='ID', type=int,
help='Import a single way from the API')
group1.add_argument('--relation', metavar='ID', type=int,
help='Import a single relation from the API')
group1.add_argument('--tiger-data', metavar='DIR',
help='Add housenumbers from the US TIGER census database')
group2 = parser.add_argument_group('Extra arguments')
group2.add_argument('--use-main-api', action='store_true',
help='Use OSM API instead of Overpass to download objects')
group2.add_argument('--osm2pgsql-cache', metavar='SIZE', type=int,
help='Size of cache to be used by osm2pgsql (in MB)')
group2.add_argument('--socket-timeout', dest='socket_timeout', type=int, default=60,
help='Set timeout for file downloads')
def run(self, args: NominatimArgs) -> int:
from ..tools import add_osm_data
with connect(args.config.get_libpq_dsn()) as conn:
if is_frozen(conn):
print('Database is marked frozen. New data can\'t be added.')
return 1
if args.tiger_data:
return asyncio.run(self._add_tiger_data(args))
osm2pgsql_params = args.osm2pgsql_options(default_cache=1000, default_threads=1)
if args.file or args.diff:
return add_osm_data.add_data_from_file(args.config.get_libpq_dsn(),
cast(str, args.file or args.diff),
osm2pgsql_params)
if args.node:
return add_osm_data.add_osm_object(args.config.get_libpq_dsn(),
'node', args.node,
args.use_main_api,
osm2pgsql_params)
if args.way:
return add_osm_data.add_osm_object(args.config.get_libpq_dsn(),
'way', args.way,
args.use_main_api,
osm2pgsql_params)
if args.relation:
return add_osm_data.add_osm_object(args.config.get_libpq_dsn(),
'relation', args.relation,
args.use_main_api,
osm2pgsql_params)
return 0
async def _add_tiger_data(self, args: NominatimArgs) -> int:
from ..tokenizer import factory as tokenizer_factory
from ..tools import tiger_data
assert args.tiger_data
tokenizer = tokenizer_factory.get_tokenizer_for_db(args.config)
return await tiger_data.add_tiger_data(args.tiger_data,
args.config,
args.threads or psutil.cpu_count() or 1,
tokenizer)
| 5,138
|
Python
|
.py
| 96
| 38.822917
| 94
| 0.579765
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,777
|
api.py
|
osm-search_Nominatim/src/nominatim_db/clicmd/api.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Subcommand definitions for API calls from the command line.
"""
from typing import Dict, Any, Optional, Type, Mapping
import argparse
import logging
import json
import sys
import pprint
from functools import reduce
import nominatim_api as napi
from nominatim_api.v1.helpers import zoom_to_rank, deduplicate_results
from nominatim_api.server.content_types import CONTENT_JSON
import nominatim_api.logging as loglib
from ..errors import UsageError
from .args import NominatimArgs
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
LOG = logging.getLogger()
STRUCTURED_QUERY = (
('amenity', 'name and/or type of POI'),
('street', 'housenumber and street'),
('city', 'city, town or village'),
('county', 'county'),
('state', 'state'),
('country', 'country'),
('postalcode', 'postcode')
)
EXTRADATA_PARAMS = (
('addressdetails', 'Include a breakdown of the address into elements'),
('extratags', ("Include additional information if available "
"(e.g. wikipedia link, opening hours)")),
('namedetails', 'Include a list of alternative names')
)
def _add_list_format(parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Other options')
group.add_argument('--list-formats', action='store_true',
help='List supported output formats and exit.')
def _add_api_output_arguments(parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Output formatting')
group.add_argument('--format', type=str, default='jsonv2',
help='Format of result (use --list-format to see supported formats)')
for name, desc in EXTRADATA_PARAMS:
group.add_argument('--' + name, action='store_true', help=desc)
group.add_argument('--lang', '--accept-language', metavar='LANGS',
help='Preferred language order for presenting search results')
group.add_argument('--polygon-output',
choices=['geojson', 'kml', 'svg', 'text'],
help='Output geometry of results as a GeoJSON, KML, SVG or WKT')
group.add_argument('--polygon-threshold', type=float, default = 0.0,
metavar='TOLERANCE',
help=("Simplify output geometry."
"Parameter is difference tolerance in degrees."))
def _get_geometry_output(args: NominatimArgs) -> napi.GeometryFormat:
""" Get the requested geometry output format in a API-compatible
format.
"""
if not args.polygon_output:
return napi.GeometryFormat.NONE
if args.polygon_output == 'geojson':
return napi.GeometryFormat.GEOJSON
if args.polygon_output == 'kml':
return napi.GeometryFormat.KML
if args.polygon_output == 'svg':
return napi.GeometryFormat.SVG
if args.polygon_output == 'text':
return napi.GeometryFormat.TEXT
try:
return napi.GeometryFormat[args.polygon_output.upper()]
except KeyError as exp:
raise UsageError(f"Unknown polygon output format '{args.polygon_output}'.") from exp
def _get_locales(args: NominatimArgs, default: Optional[str]) -> napi.Locales:
""" Get the locales from the language parameter.
"""
if args.lang:
return napi.Locales.from_accept_languages(args.lang)
if default:
return napi.Locales.from_accept_languages(default)
return napi.Locales()
def _get_layers(args: NominatimArgs, default: napi.DataLayer) -> Optional[napi.DataLayer]:
""" Get the list of selected layers as a DataLayer enum.
"""
if not args.layers:
return default
return reduce(napi.DataLayer.__or__,
(napi.DataLayer[s.upper()] for s in args.layers))
def _list_formats(formatter: napi.FormatDispatcher, rtype: Type[Any]) -> int:
for fmt in formatter.list_formats(rtype):
print(fmt)
print('debug')
print('raw')
return 0
def _print_output(formatter: napi.FormatDispatcher, result: Any,
fmt: str, options: Mapping[str, Any]) -> None:
if fmt == 'raw':
pprint.pprint(result)
else:
output = formatter.format_result(result, fmt, options)
if formatter.get_content_type(fmt) == CONTENT_JSON:
# reformat the result, so it is pretty-printed
try:
json.dump(json.loads(output), sys.stdout, indent=4, ensure_ascii=False)
except json.decoder.JSONDecodeError as err:
# Catch the error here, so that data can be debugged,
# when people are developping custom result formatters.
LOG.fatal("Parsing json failed: %s\nUnformatted output:\n%s", err, output)
else:
sys.stdout.write(output)
sys.stdout.write('\n')
class APISearch:
"""\
Execute a search query.
This command works exactly the same as if calling the /search endpoint on
the web API. See the online documentation for more details on the
various parameters:
https://nominatim.org/release-docs/latest/api/Search/
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Query arguments')
group.add_argument('--query',
help='Free-form query string')
for name, desc in STRUCTURED_QUERY:
group.add_argument('--' + name, help='Structured query: ' + desc)
_add_api_output_arguments(parser)
group = parser.add_argument_group('Result limitation')
group.add_argument('--countrycodes', metavar='CC,..',
help='Limit search results to one or more countries')
group.add_argument('--exclude_place_ids', metavar='ID,..',
help='List of search object to be excluded')
group.add_argument('--limit', type=int, default=10,
help='Limit the number of returned results')
group.add_argument('--viewbox', metavar='X1,Y1,X2,Y2',
help='Preferred area to find search results')
group.add_argument('--bounded', action='store_true',
help='Strictly restrict results to viewbox area')
group.add_argument('--no-dedupe', action='store_false', dest='dedupe',
help='Do not remove duplicates from the result list')
_add_list_format(parser)
def run(self, args: NominatimArgs) -> int:
formatter = napi.load_format_dispatcher('v1', args.project_dir)
if args.list_formats:
return _list_formats(formatter, napi.SearchResults)
if args.format in ('debug', 'raw'):
loglib.set_log_output('text')
elif not formatter.supports_format(napi.SearchResults, args.format):
raise UsageError(f"Unsupported format '{args.format}'. "
'Use --list-formats to see supported formats.')
try:
with napi.NominatimAPI(args.project_dir) as api:
params: Dict[str, Any] = {'max_results': args.limit + min(args.limit, 10),
'address_details': True, # needed for display name
'geometry_output': _get_geometry_output(args),
'geometry_simplification': args.polygon_threshold,
'countries': args.countrycodes,
'excluded': args.exclude_place_ids,
'viewbox': args.viewbox,
'bounded_viewbox': args.bounded,
'locales': _get_locales(args, api.config.DEFAULT_LANGUAGE)
}
if args.query:
results = api.search(args.query, **params)
else:
results = api.search_address(amenity=args.amenity,
street=args.street,
city=args.city,
county=args.county,
state=args.state,
postalcode=args.postalcode,
country=args.country,
**params)
except napi.UsageError as ex:
raise UsageError(ex) from ex
if args.dedupe and len(results) > 1:
results = deduplicate_results(results, args.limit)
if args.format == 'debug':
print(loglib.get_and_disable())
return 0
_print_output(formatter, results, args.format,
{'extratags': args.extratags,
'namedetails': args.namedetails,
'addressdetails': args.addressdetails})
return 0
class APIReverse:
"""\
Execute API reverse query.
This command works exactly the same as if calling the /reverse endpoint on
the web API. See the online documentation for more details on the
various parameters:
https://nominatim.org/release-docs/latest/api/Reverse/
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Query arguments')
group.add_argument('--lat', type=float,
help='Latitude of coordinate to look up (in WGS84)')
group.add_argument('--lon', type=float,
help='Longitude of coordinate to look up (in WGS84)')
group.add_argument('--zoom', type=int,
help='Level of detail required for the address')
group.add_argument('--layer', metavar='LAYER',
choices=[n.name.lower() for n in napi.DataLayer if n.name],
action='append', required=False, dest='layers',
help='OSM id to lookup in format <NRW><id> (may be repeated)')
_add_api_output_arguments(parser)
_add_list_format(parser)
def run(self, args: NominatimArgs) -> int:
formatter = napi.load_format_dispatcher('v1', args.project_dir)
if args.list_formats:
return _list_formats(formatter, napi.ReverseResults)
if args.format in ('debug', 'raw'):
loglib.set_log_output('text')
elif not formatter.supports_format(napi.ReverseResults, args.format):
raise UsageError(f"Unsupported format '{args.format}'. "
'Use --list-formats to see supported formats.')
if args.lat is None or args.lon is None:
raise UsageError("lat' and 'lon' parameters are required.")
layers = _get_layers(args, napi.DataLayer.ADDRESS | napi.DataLayer.POI)
try:
with napi.NominatimAPI(args.project_dir) as api:
result = api.reverse(napi.Point(args.lon, args.lat),
max_rank=zoom_to_rank(args.zoom or 18),
layers=layers,
address_details=True, # needed for display name
geometry_output=_get_geometry_output(args),
geometry_simplification=args.polygon_threshold,
locales=_get_locales(args, api.config.DEFAULT_LANGUAGE))
except napi.UsageError as ex:
raise UsageError(ex) from ex
if args.format == 'debug':
print(loglib.get_and_disable())
return 0
if result:
_print_output(formatter, napi.ReverseResults([result]), args.format,
{'extratags': args.extratags,
'namedetails': args.namedetails,
'addressdetails': args.addressdetails})
return 0
LOG.error("Unable to geocode.")
return 42
class APILookup:
"""\
Execute API lookup query.
This command works exactly the same as if calling the /lookup endpoint on
the web API. See the online documentation for more details on the
various parameters:
https://nominatim.org/release-docs/latest/api/Lookup/
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Query arguments')
group.add_argument('--id', metavar='OSMID',
action='append', dest='ids',
help='OSM id to lookup in format <NRW><id> (may be repeated)')
_add_api_output_arguments(parser)
_add_list_format(parser)
def run(self, args: NominatimArgs) -> int:
formatter = napi.load_format_dispatcher('v1', args.project_dir)
if args.list_formats:
return _list_formats(formatter, napi.ReverseResults)
if args.format in ('debug', 'raw'):
loglib.set_log_output('text')
elif not formatter.supports_format(napi.ReverseResults, args.format):
raise UsageError(f"Unsupported format '{args.format}'. "
'Use --list-formats to see supported formats.')
if args.ids is None:
raise UsageError("'id' parameter required.")
places = [napi.OsmID(o[0], int(o[1:])) for o in args.ids]
try:
with napi.NominatimAPI(args.project_dir) as api:
results = api.lookup(places,
address_details=True, # needed for display name
geometry_output=_get_geometry_output(args),
geometry_simplification=args.polygon_threshold or 0.0,
locales=_get_locales(args, api.config.DEFAULT_LANGUAGE))
except napi.UsageError as ex:
raise UsageError(ex) from ex
if args.format == 'debug':
print(loglib.get_and_disable())
return 0
_print_output(formatter, results, args.format,
{'extratags': args.extratags,
'namedetails': args.namedetails,
'addressdetails': args.addressdetails})
return 0
class APIDetails:
"""\
Execute API details query.
This command works exactly the same as if calling the /details endpoint on
the web API. See the online documentation for more details on the
various parameters:
https://nominatim.org/release-docs/latest/api/Details/
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Query arguments')
group.add_argument('--node', '-n', type=int,
help="Look up the OSM node with the given ID.")
group.add_argument('--way', '-w', type=int,
help="Look up the OSM way with the given ID.")
group.add_argument('--relation', '-r', type=int,
help="Look up the OSM relation with the given ID.")
group.add_argument('--place_id', '-p', type=int,
help='Database internal identifier of the OSM object to look up')
group.add_argument('--class', dest='object_class',
help=("Class type to disambiguated multiple entries "
"of the same object."))
group = parser.add_argument_group('Output arguments')
group.add_argument('--format', type=str, default='json',
help='Format of result (use --list-formats to see supported formats)')
group.add_argument('--addressdetails', action='store_true',
help='Include a breakdown of the address into elements')
group.add_argument('--keywords', action='store_true',
help='Include a list of name keywords and address keywords')
group.add_argument('--linkedplaces', action='store_true',
help='Include a details of places that are linked with this one')
group.add_argument('--hierarchy', action='store_true',
help='Include details of places lower in the address hierarchy')
group.add_argument('--group_hierarchy', action='store_true',
help='Group the places by type')
group.add_argument('--polygon_geojson', action='store_true',
help='Include geometry of result')
group.add_argument('--lang', '--accept-language', metavar='LANGS',
help='Preferred language order for presenting search results')
_add_list_format(parser)
def run(self, args: NominatimArgs) -> int:
formatter = napi.load_format_dispatcher('v1', args.project_dir)
if args.list_formats:
return _list_formats(formatter, napi.DetailedResult)
if args.format in ('debug', 'raw'):
loglib.set_log_output('text')
elif not formatter.supports_format(napi.DetailedResult, args.format):
raise UsageError(f"Unsupported format '{args.format}'. "
'Use --list-formats to see supported formats.')
place: napi.PlaceRef
if args.node:
place = napi.OsmID('N', args.node, args.object_class)
elif args.way:
place = napi.OsmID('W', args.way, args.object_class)
elif args.relation:
place = napi.OsmID('R', args.relation, args.object_class)
elif args.place_id is not None:
place = napi.PlaceID(args.place_id)
else:
raise UsageError('One of the arguments --node/-n --way/-w '
'--relation/-r --place_id/-p is required/')
try:
with napi.NominatimAPI(args.project_dir) as api:
locales = _get_locales(args, api.config.DEFAULT_LANGUAGE)
result = api.details(place,
address_details=args.addressdetails,
linked_places=args.linkedplaces,
parented_places=args.hierarchy,
keywords=args.keywords,
geometry_output=napi.GeometryFormat.GEOJSON
if args.polygon_geojson
else napi.GeometryFormat.NONE,
locales=locales)
except napi.UsageError as ex:
raise UsageError(ex) from ex
if args.format == 'debug':
print(loglib.get_and_disable())
return 0
if result:
_print_output(formatter, result, args.format or 'json',
{'locales': locales,
'group_hierarchy': args.group_hierarchy})
return 0
LOG.error("Object not found in database.")
return 42
class APIStatus:
"""
Execute API status query.
This command works exactly the same as if calling the /status endpoint on
the web API. See the online documentation for more details on the
various parameters:
https://nominatim.org/release-docs/latest/api/Status/
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('API parameters')
group.add_argument('--format', type=str, default='text',
help='Format of result (use --list-formats to see supported formats)')
_add_list_format(parser)
def run(self, args: NominatimArgs) -> int:
formatter = napi.load_format_dispatcher('v1', args.project_dir)
if args.list_formats:
return _list_formats(formatter, napi.StatusResult)
if args.format in ('debug', 'raw'):
loglib.set_log_output('text')
elif not formatter.supports_format(napi.StatusResult, args.format):
raise UsageError(f"Unsupported format '{args.format}'. "
'Use --list-formats to see supported formats.')
try:
with napi.NominatimAPI(args.project_dir) as api:
status = api.status()
except napi.UsageError as ex:
raise UsageError(ex) from ex
if args.format == 'debug':
print(loglib.get_and_disable())
return 0
_print_output(formatter, status, args.format, {})
return 0
| 20,926
|
Python
|
.py
| 404
| 38.012376
| 100
| 0.581318
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,778
|
convert.py
|
osm-search_Nominatim/src/nominatim_db/clicmd/convert.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the 'convert' subcommand.
"""
from typing import Set, Any, Union, Optional, Sequence
import argparse
import asyncio
from pathlib import Path
from ..errors import UsageError
from .args import NominatimArgs
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to avoid eventually unused imports.
# pylint: disable=E0012,C0415
class WithAction(argparse.Action):
""" Special action that saves a list of flags, given on the command-line
as `--with-foo` or `--without-foo`.
"""
def __init__(self, option_strings: Sequence[str], dest: Any,
default: bool = True, **kwargs: Any) -> None:
if 'nargs' in kwargs:
raise ValueError("nargs not allowed.")
if option_strings is None:
raise ValueError("Positional parameter not allowed.")
self.dest_set = kwargs.pop('dest_set')
full_option_strings = []
for opt in option_strings:
if not opt.startswith('--'):
raise ValueError("short-form options not allowed")
if default:
self.dest_set.add(opt[2:])
full_option_strings.append(f"--with-{opt[2:]}")
full_option_strings.append(f"--without-{opt[2:]}")
super().__init__(full_option_strings, argparse.SUPPRESS, nargs=0, **kwargs)
def __call__(self, parser: argparse.ArgumentParser, namespace: argparse.Namespace,
values: Union[str, Sequence[Any], None],
option_string: Optional[str] = None) -> None:
assert option_string
if option_string.startswith('--with-'):
self.dest_set.add(option_string[7:])
if option_string.startswith('--without-'):
self.dest_set.discard(option_string[10:])
class ConvertDB:
""" Convert an existing database into a different format. (EXPERIMENTAL)
Dump a read-only version of the database in a different format.
At the moment only a SQLite database suitable for reverse lookup
can be created.
"""
def __init__(self) -> None:
self.options: Set[str] = set()
def add_args(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument('--format', default='sqlite',
choices=('sqlite', ),
help='Format of the output database (must be sqlite currently)')
parser.add_argument('--output', '-o', required=True, type=Path,
help='File to write the database to.')
group = parser.add_argument_group('Switches to define database layout'
'(currently no effect)')
group.add_argument('--reverse', action=WithAction, dest_set=self.options, default=True,
help='Enable/disable support for reverse and lookup API'
' (default: enabled)')
group.add_argument('--search', action=WithAction, dest_set=self.options, default=True,
help='Enable/disable support for search API (default: disabled)')
group.add_argument('--details', action=WithAction, dest_set=self.options, default=True,
help='Enable/disable support for details API (default: enabled)')
def run(self, args: NominatimArgs) -> int:
if args.output.exists():
raise UsageError(f"File '{args.output}' already exists. Refusing to overwrite.")
if args.format == 'sqlite':
from ..tools import convert_sqlite
asyncio.run(convert_sqlite.convert(args.project_dir, args.output, self.options))
return 0
return 1
| 3,930
|
Python
|
.py
| 78
| 40.371795
| 95
| 0.626597
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,779
|
__init__.py
|
osm-search_Nominatim/src/nominatim_db/clicmd/__init__.py
|
# SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Subcommand definitions for the command-line tool.
"""
# mypy and pylint disagree about the style of explicit exports,
# see https://github.com/PyCQA/pylint/issues/6006.
# pylint: disable=useless-import-alias
from .setup import SetupAll as SetupAll
from .replication import UpdateReplication as UpdateReplication
from .index import UpdateIndex as UpdateIndex
from .refresh import UpdateRefresh as UpdateRefresh
from .add_data import UpdateAddData as UpdateAddData
from .admin import AdminFuncs as AdminFuncs
from .freeze import SetupFreeze as SetupFreeze
from .special_phrases import ImportSpecialPhrases as ImportSpecialPhrases
| 840
|
Python
|
.py
| 20
| 40.95
| 73
| 0.824176
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,780
|
replication.py
|
osm-search_Nominatim/src/nominatim_db/clicmd/replication.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the 'replication' sub-command.
"""
from typing import Optional
import argparse
import datetime as dt
import logging
import socket
import time
import asyncio
from ..db import status
from ..db.connection import connect
from ..errors import UsageError
from .args import NominatimArgs
LOG = logging.getLogger()
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to make pyosmium optional for replication only.
# pylint: disable=C0415
class UpdateReplication:
"""\
Update the database using an online replication service.
An OSM replication service is an online service that provides regular
updates (OSM diff files) for the planet or update they provide. The OSMF
provides the primary replication service for the full planet at
https://planet.osm.org/replication/ but there are other providers of
extracts of OSM data who provide such a service as well.
This sub-command allows to set up such a replication service and download
and import updates at regular intervals. You need to call '--init' once to
set up the process or whenever you change the replication configuration
parameters. Without any arguments, the sub-command will go into a loop and
continuously apply updates as they become available. Giving `--once` just
downloads and imports the next batch of updates.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Arguments for initialisation')
group.add_argument('--init', action='store_true',
help='Initialise the update process')
group.add_argument('--no-update-functions', dest='update_functions',
action='store_false',
help="Do not update the trigger function to "
"support differential updates (EXPERT)")
group = parser.add_argument_group('Arguments for updates')
group.add_argument('--check-for-updates', action='store_true',
help='Check if new updates are available and exit')
group.add_argument('--once', action='store_true',
help="Download and apply updates only once. When "
"not set, updates are continuously applied")
group.add_argument('--catch-up', action='store_true',
help="Download and apply updates until no new "
"data is available on the server")
group.add_argument('--no-index', action='store_false', dest='do_index',
help=("Do not index the new data. Only usable "
"together with --once"))
group.add_argument('--osm2pgsql-cache', metavar='SIZE', type=int,
help='Size of cache to be used by osm2pgsql (in MB)')
group = parser.add_argument_group('Download parameters')
group.add_argument('--socket-timeout', dest='socket_timeout', type=int, default=60,
help='Set timeout for file downloads')
def _init_replication(self, args: NominatimArgs) -> int:
from ..tools import replication, refresh
LOG.warning("Initialising replication updates")
with connect(args.config.get_libpq_dsn()) as conn:
replication.init_replication(conn, base_url=args.config.REPLICATION_URL,
socket_timeout=args.socket_timeout)
if args.update_functions:
LOG.warning("Create functions")
refresh.create_functions(conn, args.config, True, False)
return 0
def _check_for_updates(self, args: NominatimArgs) -> int:
from ..tools import replication
with connect(args.config.get_libpq_dsn()) as conn:
return replication.check_for_updates(conn, base_url=args.config.REPLICATION_URL,
socket_timeout=args.socket_timeout)
def _report_update(self, batchdate: dt.datetime,
start_import: dt.datetime,
start_index: Optional[dt.datetime]) -> None:
def round_time(delta: dt.timedelta) -> dt.timedelta:
return dt.timedelta(seconds=int(delta.total_seconds()))
end = dt.datetime.now(dt.timezone.utc)
LOG.warning("Update completed. Import: %s. %sTotal: %s. Remaining backlog: %s.",
round_time((start_index or end) - start_import),
f"Indexing: {round_time(end - start_index)} " if start_index else '',
round_time(end - start_import),
round_time(end - batchdate))
def _compute_update_interval(self, args: NominatimArgs) -> int:
if args.catch_up:
return 0
update_interval = args.config.get_int('REPLICATION_UPDATE_INTERVAL')
# Sanity check to not overwhelm the Geofabrik servers.
if 'download.geofabrik.de' in args.config.REPLICATION_URL\
and update_interval < 86400:
LOG.fatal("Update interval too low for download.geofabrik.de.\n"
"Please check install documentation "
"(https://nominatim.org/release-docs/latest/admin/Update/#"
"setting-up-the-update-process).")
raise UsageError("Invalid replication update interval setting.")
return update_interval
async def _update(self, args: NominatimArgs) -> None:
# pylint: disable=too-many-locals
from ..tools import replication
from ..indexer.indexer import Indexer
from ..tokenizer import factory as tokenizer_factory
update_interval = self._compute_update_interval(args)
params = args.osm2pgsql_options(default_cache=2000, default_threads=1)
params.update(base_url=args.config.REPLICATION_URL,
update_interval=update_interval,
import_file=args.project_dir / 'osmosischange.osc',
max_diff_size=args.config.get_int('REPLICATION_MAX_DIFF'),
indexed_only=not args.once)
if not args.once:
if not args.do_index:
LOG.fatal("Indexing cannot be disabled when running updates continuously.")
raise UsageError("Bad argument '--no-index'.")
recheck_interval = args.config.get_int('REPLICATION_RECHECK_INTERVAL')
tokenizer = tokenizer_factory.get_tokenizer_for_db(args.config)
indexer = Indexer(args.config.get_libpq_dsn(), tokenizer, args.threads or 1)
dsn = args.config.get_libpq_dsn()
while True:
start = dt.datetime.now(dt.timezone.utc)
state = replication.update(dsn, params, socket_timeout=args.socket_timeout)
with connect(dsn) as conn:
if state is not replication.UpdateState.NO_CHANGES:
status.log_status(conn, start, 'import')
batchdate, _, _ = status.get_status(conn)
conn.commit()
if state is not replication.UpdateState.NO_CHANGES and args.do_index:
index_start = dt.datetime.now(dt.timezone.utc)
await indexer.index_full(analyse=False)
with connect(dsn) as conn:
status.set_indexed(conn, True)
status.log_status(conn, index_start, 'index')
conn.commit()
else:
index_start = None
if state is replication.UpdateState.NO_CHANGES and \
args.catch_up or update_interval > 40*60:
await indexer.index_full(analyse=False)
if LOG.isEnabledFor(logging.WARNING):
assert batchdate is not None
self._report_update(batchdate, start, index_start)
if args.once or (args.catch_up and state is replication.UpdateState.NO_CHANGES):
break
if state is replication.UpdateState.NO_CHANGES:
LOG.warning("No new changes. Sleeping for %d sec.", recheck_interval)
time.sleep(recheck_interval)
def run(self, args: NominatimArgs) -> int:
socket.setdefaulttimeout(args.socket_timeout)
if args.init:
return self._init_replication(args)
if args.check_for_updates:
return self._check_for_updates(args)
asyncio.run(self._update(args))
return 0
| 8,785
|
Python
|
.py
| 160
| 42.55625
| 92
| 0.628189
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,781
|
admin.py
|
osm-search_Nominatim/src/nominatim_db/clicmd/admin.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the 'admin' subcommand.
"""
import logging
import argparse
import random
from ..errors import UsageError
from ..db.connection import connect, table_exists
from .args import NominatimArgs
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to avoid eventually unused imports.
# pylint: disable=E0012,C0415
LOG = logging.getLogger()
class AdminFuncs:
"""\
Analyse and maintain the database.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Admin tasks')
objs = group.add_mutually_exclusive_group(required=True)
objs.add_argument('--warm', action='store_true',
help='Warm database caches for search and reverse queries')
objs.add_argument('--check-database', action='store_true',
help='Check that the database is complete and operational')
objs.add_argument('--migrate', action='store_true',
help='Migrate the database to a new software version')
objs.add_argument('--analyse-indexing', action='store_true',
help='Print performance analysis of the indexing process')
objs.add_argument('--collect-os-info', action="store_true",
help="Generate a report about the host system information")
objs.add_argument('--clean-deleted', action='store', metavar='AGE',
help='Clean up deleted relations')
group = parser.add_argument_group('Arguments for cache warming')
group.add_argument('--search-only', action='store_const', dest='target',
const='search',
help="Only pre-warm tables for search queries")
group.add_argument('--reverse-only', action='store_const', dest='target',
const='reverse',
help="Only pre-warm tables for reverse queries")
group = parser.add_argument_group('Arguments for index anaysis')
mgroup = group.add_mutually_exclusive_group()
mgroup.add_argument('--osm-id', type=str,
help='Analyse indexing of the given OSM object')
mgroup.add_argument('--place-id', type=int,
help='Analyse indexing of the given Nominatim object')
def run(self, args: NominatimArgs) -> int:
# pylint: disable=too-many-return-statements
if args.warm:
return self._warm(args)
if args.check_database:
LOG.warning('Checking database')
from ..tools import check_database
return check_database.check_database(args.config)
if args.analyse_indexing:
LOG.warning('Analysing performance of indexing function')
from ..tools import admin
admin.analyse_indexing(args.config, osm_id=args.osm_id, place_id=args.place_id)
return 0
if args.migrate:
LOG.warning('Checking for necessary database migrations')
from ..tools import migration
return migration.migrate(args.config, args)
if args.collect_os_info:
LOG.warning("Reporting System Information")
from ..tools import collect_os_info
collect_os_info.report_system_information(args.config)
return 0
if args.clean_deleted:
LOG.warning('Cleaning up deleted relations')
from ..tools import admin
admin.clean_deleted_relations(args.config, age=args.clean_deleted)
return 0
return 1
def _warm(self, args: NominatimArgs) -> int:
try:
import nominatim_api as napi
except ModuleNotFoundError as exp:
raise UsageError("Warming requires nominatim API. "
"Install with 'pip install nominatim-api'.") from exp
LOG.warning('Warming database caches')
api = napi.NominatimAPI(args.project_dir)
try:
if args.target != 'search':
for _ in range(1000):
api.reverse((random.uniform(-90, 90), random.uniform(-180, 180)),
address_details=True)
if args.target != 'reverse':
from ..tokenizer import factory as tokenizer_factory
tokenizer = tokenizer_factory.get_tokenizer_for_db(args.config)
with connect(args.config.get_libpq_dsn()) as conn:
if table_exists(conn, 'search_name'):
words = tokenizer.most_frequent_words(conn, 1000)
else:
words = []
for word in words:
api.search(word)
finally:
api.close()
return 0
| 5,108
|
Python
|
.py
| 106
| 36.216981
| 91
| 0.606627
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,782
|
index.py
|
osm-search_Nominatim/src/nominatim_db/clicmd/index.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the 'index' subcommand.
"""
import argparse
import asyncio
import psutil
from ..db import status
from ..db.connection import connect
from .args import NominatimArgs
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to avoid eventually unused imports.
# pylint: disable=E0012,C0415
class UpdateIndex:
"""\
Reindex all new and modified data.
Indexing is the process of computing the address and search terms for
the places in the database. Every time data is added or changed, indexing
needs to be run. Imports and replication updates automatically take care
of indexing. For other cases, this function allows to run indexing manually.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Filter arguments')
group.add_argument('--boundaries-only', action='store_true',
help="""Index only administrative boundaries.""")
group.add_argument('--no-boundaries', action='store_true',
help="""Index everything except administrative boundaries.""")
group.add_argument('--minrank', '-r', type=int, metavar='RANK', default=0,
help='Minimum/starting rank')
group.add_argument('--maxrank', '-R', type=int, metavar='RANK', default=30,
help='Maximum/finishing rank')
def run(self, args: NominatimArgs) -> int:
asyncio.run(self._do_index(args))
if not args.no_boundaries and not args.boundaries_only \
and args.minrank == 0 and args.maxrank == 30:
with connect(args.config.get_libpq_dsn()) as conn:
status.set_indexed(conn, True)
return 0
async def _do_index(self, args: NominatimArgs) -> None:
from ..tokenizer import factory as tokenizer_factory
tokenizer = tokenizer_factory.get_tokenizer_for_db(args.config)
from ..indexer.indexer import Indexer
indexer = Indexer(args.config.get_libpq_dsn(), tokenizer,
args.threads or psutil.cpu_count() or 1)
has_pending = True # run at least once
while has_pending:
if not args.no_boundaries:
await indexer.index_boundaries(args.minrank, args.maxrank)
if not args.boundaries_only:
await indexer.index_by_rank(args.minrank, args.maxrank)
await indexer.index_postcodes()
has_pending = indexer.has_pending()
| 2,791
|
Python
|
.py
| 58
| 39.793103
| 89
| 0.665072
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,783
|
args.py
|
osm-search_Nominatim/src/nominatim_db/clicmd/args.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Provides custom functions over command-line arguments.
"""
from typing import Optional, List, Dict, Any, Sequence, Tuple
import argparse
import logging
from pathlib import Path
from ..errors import UsageError
from ..config import Configuration
from ..typing import Protocol
LOG = logging.getLogger()
class Subcommand(Protocol):
"""
Interface to be implemented by classes implementing a CLI subcommand.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
"""
Fill the given parser for the subcommand with the appropriate
parameters.
"""
def run(self, args: 'NominatimArgs') -> int:
"""
Run the subcommand with the given parsed arguments.
"""
class NominatimArgs:
""" Customized namespace class for the nominatim command line tool
to receive the command-line arguments.
"""
# Basic environment set by root program.
config: Configuration
project_dir: Path
# Global switches
version: bool
subcommand: Optional[str]
command: Subcommand
# Shared parameters
osm2pgsql_cache: Optional[int]
socket_timeout: int
# Arguments added to all subcommands.
verbose: int
threads: Optional[int]
# Arguments to 'add-data'
file: Optional[str]
diff: Optional[str]
node: Optional[int]
way: Optional[int]
relation: Optional[int]
tiger_data: Optional[str]
use_main_api: bool
# Arguments to 'admin'
warm: bool
check_database: bool
migrate: bool
collect_os_info: bool
clean_deleted: str
analyse_indexing: bool
target: Optional[str]
osm_id: Optional[str]
place_id: Optional[int]
# Arguments to 'import'
osm_file: List[str]
continue_at: Optional[str]
reverse_only: bool
no_partitions: bool
no_updates: bool
offline: bool
ignore_errors: bool
index_noanalyse: bool
prepare_database: bool
# Arguments to 'index'
boundaries_only: bool
no_boundaries: bool
minrank: int
maxrank: int
# Arguments to 'export'
output_type: str
output_format: str
output_all_postcodes: bool
language: Optional[str]
restrict_to_country: Optional[str]
# Arguments to 'convert'
output: Path
# Arguments to 'refresh'
postcodes: bool
word_tokens: bool
word_counts: bool
address_levels: bool
functions: bool
wiki_data: bool
secondary_importance: bool
importance: bool
website: bool
diffs: bool
enable_debug_statements: bool
data_object: Sequence[Tuple[str, int]]
data_area: Sequence[Tuple[str, int]]
# Arguments to 'replication'
init: bool
update_functions: bool
check_for_updates: bool
once: bool
catch_up: bool
do_index: bool
# Arguments to 'serve'
server: str
engine: str
# Arguments to 'special-phrases
import_from_wiki: bool
import_from_csv: Optional[str]
no_replace: bool
# Arguments to all query functions
format: str
list_formats: bool
addressdetails: bool
extratags: bool
namedetails: bool
lang: Optional[str]
polygon_output: Optional[str]
polygon_threshold: Optional[float]
# Arguments to 'search'
query: Optional[str]
amenity: Optional[str]
street: Optional[str]
city: Optional[str]
county: Optional[str]
state: Optional[str]
country: Optional[str]
postalcode: Optional[str]
countrycodes: Optional[str]
exclude_place_ids: Optional[str]
limit: int
viewbox: Optional[str]
bounded: bool
dedupe: bool
# Arguments to 'reverse'
lat: float
lon: float
zoom: Optional[int]
layers: Optional[Sequence[str]]
# Arguments to 'lookup'
ids: Sequence[str]
# Arguments to 'details'
object_class: Optional[str]
linkedplaces: bool
hierarchy: bool
keywords: bool
polygon_geojson: bool
group_hierarchy: bool
def osm2pgsql_options(self, default_cache: int,
default_threads: int) -> Dict[str, Any]:
""" Return the standard osm2pgsql options that can be derived
from the command line arguments. The resulting dict can be
further customized and then used in `run_osm2pgsql()`.
"""
return dict(osm2pgsql=self.config.OSM2PGSQL_BINARY or self.config.lib_dir.osm2pgsql,
osm2pgsql_cache=self.osm2pgsql_cache or default_cache,
osm2pgsql_style=self.config.get_import_style_file(),
osm2pgsql_style_path=self.config.config_dir,
threads=self.threads or default_threads,
dsn=self.config.get_libpq_dsn(),
flatnode_file=str(self.config.get_path('FLATNODE_FILE') or ''),
tablespaces=dict(slim_data=self.config.TABLESPACE_OSM_DATA,
slim_index=self.config.TABLESPACE_OSM_INDEX,
main_data=self.config.TABLESPACE_PLACE_DATA,
main_index=self.config.TABLESPACE_PLACE_INDEX
)
)
def get_osm_file_list(self) -> Optional[List[Path]]:
""" Return the --osm-file argument as a list of Paths or None
if no argument was given. The function also checks if the files
exist and raises a UsageError if one cannot be found.
"""
if not self.osm_file:
return None
files = [Path(f) for f in self.osm_file]
for fname in files:
if not fname.is_file():
LOG.fatal("OSM file '%s' does not exist.", fname)
raise UsageError('Cannot access file.')
return files
| 5,986
|
Python
|
.py
| 186
| 25.166667
| 92
| 0.650373
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,784
|
freeze.py
|
osm-search_Nominatim/src/nominatim_db/clicmd/freeze.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the 'freeze' subcommand.
"""
import argparse
from ..db.connection import connect
from .args import NominatimArgs
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to avoid eventually unused imports.
# pylint: disable=E0012,C0415
class SetupFreeze:
"""\
Make database read-only.
About half of data in the Nominatim database is kept only to be able to
keep the data up-to-date with new changes made in OpenStreetMap. This
command drops all this data and only keeps the part needed for geocoding
itself.
This command has the same effect as the `--no-updates` option for imports.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
pass # No options
def run(self, args: NominatimArgs) -> int:
from ..tools import freeze
with connect(args.config.get_libpq_dsn()) as conn:
freeze.drop_update_tables(conn)
freeze.drop_flatnode_file(args.config.get_path('FLATNODE_FILE'))
return 0
| 1,278
|
Python
|
.py
| 33
| 34.636364
| 78
| 0.728745
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,785
|
sql_preprocessor.py
|
osm-search_Nominatim/src/nominatim_db/db/sql_preprocessor.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Preprocessing of SQL files.
"""
from typing import Set, Dict, Any, cast
import jinja2
from .connection import Connection, server_version_tuple, postgis_version_tuple
from ..config import Configuration
from ..db.query_pool import QueryPool
def _get_partitions(conn: Connection) -> Set[int]:
""" Get the set of partitions currently in use.
"""
with conn.cursor() as cur:
cur.execute('SELECT DISTINCT partition FROM country_name')
partitions = set([0])
for row in cur:
partitions.add(row[0])
return partitions
def _get_tables(conn: Connection) -> Set[str]:
""" Return the set of tables currently in use.
"""
with conn.cursor() as cur:
cur.execute("SELECT tablename FROM pg_tables WHERE schemaname = 'public'")
return set((row[0] for row in list(cur)))
def _get_middle_db_format(conn: Connection, tables: Set[str]) -> str:
""" Returns the version of the slim middle tables.
"""
if 'osm2pgsql_properties' not in tables:
return '1'
with conn.cursor() as cur:
cur.execute("SELECT value FROM osm2pgsql_properties WHERE property = 'db_format'")
row = cur.fetchone()
return cast(str, row[0]) if row is not None else '1'
def _setup_tablespace_sql(config: Configuration) -> Dict[str, str]:
""" Returns a dict with tablespace expressions for the different tablespace
kinds depending on whether a tablespace is configured or not.
"""
out = {}
for subset in ('ADDRESS', 'SEARCH', 'AUX'):
for kind in ('DATA', 'INDEX'):
tspace = getattr(config, f'TABLESPACE_{subset}_{kind}')
if tspace:
tspace = f'TABLESPACE "{tspace}"'
out[f'{subset.lower()}_{kind.lower()}'] = tspace
return out
def _setup_postgresql_features(conn: Connection) -> Dict[str, Any]:
""" Set up a dictionary with various optional Postgresql/Postgis features that
depend on the database version.
"""
pg_version = server_version_tuple(conn)
postgis_version = postgis_version_tuple(conn)
pg11plus = pg_version >= (11, 0, 0)
ps3 = postgis_version >= (3, 0)
return {
'has_index_non_key_column': pg11plus,
'spgist_geom' : 'SPGIST' if pg11plus and ps3 else 'GIST'
}
class SQLPreprocessor:
""" A environment for preprocessing SQL files from the
lib-sql directory.
The preprocessor provides a number of default filters and variables.
The variables may be overwritten when rendering an SQL file.
The preprocessing is currently based on the jinja2 templating library
and follows its syntax.
"""
def __init__(self, conn: Connection, config: Configuration) -> None:
self.env = jinja2.Environment(autoescape=False,
loader=jinja2.FileSystemLoader(str(config.lib_dir.sql)))
db_info: Dict[str, Any] = {}
db_info['partitions'] = _get_partitions(conn)
db_info['tables'] = _get_tables(conn)
db_info['reverse_only'] = 'search_name' not in db_info['tables']
db_info['tablespace'] = _setup_tablespace_sql(config)
db_info['middle_db_format'] = _get_middle_db_format(conn, db_info['tables'])
self.env.globals['config'] = config
self.env.globals['db'] = db_info
self.env.globals['postgres'] = _setup_postgresql_features(conn)
def run_string(self, conn: Connection, template: str, **kwargs: Any) -> None:
""" Execute the given SQL template string on the connection.
The keyword arguments may supply additional parameters
for preprocessing.
"""
sql = self.env.from_string(template).render(**kwargs)
with conn.cursor() as cur:
cur.execute(sql)
conn.commit()
def run_sql_file(self, conn: Connection, name: str, **kwargs: Any) -> None:
""" Execute the given SQL file on the connection. The keyword arguments
may supply additional parameters for preprocessing.
"""
sql = self.env.get_template(name).render(**kwargs)
with conn.cursor() as cur:
cur.execute(sql)
conn.commit()
async def run_parallel_sql_file(self, dsn: str, name: str, num_threads: int = 1,
**kwargs: Any) -> None:
""" Execute the given SQL files using parallel asynchronous connections.
The keyword arguments may supply additional parameters for
preprocessing.
After preprocessing the SQL code is cut at lines containing only
'---'. Each chunk is sent to one of the `num_threads` workers.
"""
sql = self.env.get_template(name).render(**kwargs)
parts = sql.split('\n---\n')
async with QueryPool(dsn, num_threads) as pool:
for part in parts:
await pool.put_query(part, None)
| 5,148
|
Python
|
.py
| 112
| 38.017857
| 94
| 0.642686
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,786
|
properties.py
|
osm-search_Nominatim/src/nominatim_db/db/properties.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Query and access functions for the in-database property table.
"""
from typing import Optional, cast
from .connection import Connection, table_exists
def set_property(conn: Connection, name: str, value: str) -> None:
""" Add or replace the property with the given name.
"""
with conn.cursor() as cur:
cur.execute('SELECT value FROM nominatim_properties WHERE property = %s',
(name, ))
if cur.rowcount == 0:
sql = 'INSERT INTO nominatim_properties (value, property) VALUES (%s, %s)'
else:
sql = 'UPDATE nominatim_properties SET value = %s WHERE property = %s'
cur.execute(sql, (value, name))
conn.commit()
def get_property(conn: Connection, name: str) -> Optional[str]:
""" Return the current value of the given property or None if the property
is not set.
"""
if not table_exists(conn, 'nominatim_properties'):
return None
with conn.cursor() as cur:
cur.execute('SELECT value FROM nominatim_properties WHERE property = %s',
(name, ))
if cur.rowcount == 0:
return None
result = cur.fetchone()
assert result is not None
return cast(Optional[str], result[0])
| 1,477
|
Python
|
.py
| 37
| 33.351351
| 86
| 0.648951
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,787
|
status.py
|
osm-search_Nominatim/src/nominatim_db/db/status.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Access and helper functions for the status and status log table.
"""
from typing import Optional, Tuple
import datetime as dt
import logging
import re
from .connection import Connection, table_exists, execute_scalar
from ..utils.url_utils import get_url
from ..errors import UsageError
LOG = logging.getLogger()
ISODATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
def compute_database_date(conn: Connection, offline: bool = False) -> dt.datetime:
""" Determine the date of the database from the newest object in the
data base.
"""
# If there is a date from osm2pgsql available, use that.
if table_exists(conn, 'osm2pgsql_properties'):
with conn.cursor() as cur:
cur.execute(""" SELECT value FROM osm2pgsql_properties
WHERE property = 'current_timestamp' """)
row = cur.fetchone()
if row is not None:
return dt.datetime.strptime(row[0], "%Y-%m-%dT%H:%M:%SZ")\
.replace(tzinfo=dt.timezone.utc)
if offline:
raise UsageError("Cannot determine database date from data in offline mode.")
# Else, find the node with the highest ID in the database
if table_exists(conn, 'place'):
osmid = execute_scalar(conn, "SELECT max(osm_id) FROM place WHERE osm_type='N'")
else:
osmid = execute_scalar(conn, "SELECT max(osm_id) FROM placex WHERE osm_type='N'")
if osmid is None:
LOG.fatal("No data found in the database.")
raise UsageError("No data found in the database.")
LOG.info("Using node id %d for timestamp lookup", osmid)
# Get the node from the API to find the timestamp when it was created.
node_url = f'https://www.openstreetmap.org/api/0.6/node/{osmid}/1'
data = get_url(node_url)
match = re.search(r'timestamp="((\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}))Z"', data)
if match is None:
LOG.fatal("The node data downloaded from the API does not contain valid data.\n"
"URL used: %s", node_url)
raise UsageError("Bad API data.")
LOG.debug("Found timestamp %s", match.group(1))
return dt.datetime.strptime(match.group(1), ISODATE_FORMAT).replace(tzinfo=dt.timezone.utc)
def set_status(conn: Connection, date: Optional[dt.datetime],
seq: Optional[int] = None, indexed: bool = True) -> None:
""" Replace the current status with the given status. If date is `None`
then only sequence and indexed will be updated as given. Otherwise
the whole status is replaced.
The change will be committed to the database.
"""
assert date is None or date.tzinfo == dt.timezone.utc
with conn.cursor() as cur:
if date is None:
cur.execute("UPDATE import_status set sequence_id = %s, indexed = %s",
(seq, indexed))
else:
cur.execute("TRUNCATE TABLE import_status")
cur.execute("""INSERT INTO import_status (lastimportdate, sequence_id, indexed)
VALUES (%s, %s, %s)""", (date, seq, indexed))
conn.commit()
def get_status(conn: Connection) -> Tuple[Optional[dt.datetime], Optional[int], Optional[bool]]:
""" Return the current status as a triple of (date, sequence, indexed).
If status has not been set up yet, a triple of None is returned.
"""
with conn.cursor() as cur:
cur.execute("SELECT * FROM import_status LIMIT 1")
if cur.rowcount < 1:
return None, None, None
row = cur.fetchone()
assert row
return row.lastimportdate, row.sequence_id, row.indexed
def set_indexed(conn: Connection, state: bool) -> None:
""" Set the indexed flag in the status table to the given state.
"""
with conn.cursor() as cur:
cur.execute("UPDATE import_status SET indexed = %s", (state, ))
conn.commit()
def log_status(conn: Connection, start: dt.datetime,
event: str, batchsize: Optional[int] = None) -> None:
""" Write a new status line to the `import_osmosis_log` table.
"""
with conn.cursor() as cur:
cur.execute("""INSERT INTO import_osmosis_log
(batchend, batchseq, batchsize, starttime, endtime, event)
SELECT lastimportdate, sequence_id, %s, %s, now(), %s FROM import_status""",
(batchsize, start, event))
conn.commit()
| 4,638
|
Python
|
.py
| 96
| 40.46875
| 99
| 0.639823
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,788
|
utils.py
|
osm-search_Nominatim/src/nominatim_db/db/utils.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper functions for handling DB accesses.
"""
from typing import IO, Optional, Union
import subprocess
import logging
import gzip
from pathlib import Path
from .connection import get_pg_env
from ..errors import UsageError
LOG = logging.getLogger()
def _pipe_to_proc(proc: 'subprocess.Popen[bytes]',
fdesc: Union[IO[bytes], gzip.GzipFile]) -> int:
assert proc.stdin is not None
chunk = fdesc.read(2048)
while chunk and proc.poll() is None:
try:
proc.stdin.write(chunk)
except BrokenPipeError as exc:
raise UsageError("Failed to execute SQL file.") from exc
chunk = fdesc.read(2048)
return len(chunk)
def execute_file(dsn: str, fname: Path,
ignore_errors: bool = False,
pre_code: Optional[str] = None,
post_code: Optional[str] = None) -> None:
""" Read an SQL file and run its contents against the given database
using psql. Use `pre_code` and `post_code` to run extra commands
before or after executing the file. The commands are run within the
same session, so they may be used to wrap the file execution in a
transaction.
"""
cmd = ['psql']
if not ignore_errors:
cmd.extend(('-v', 'ON_ERROR_STOP=1'))
if not LOG.isEnabledFor(logging.INFO):
cmd.append('--quiet')
with subprocess.Popen(cmd, env=get_pg_env(dsn), stdin=subprocess.PIPE) as proc:
assert proc.stdin is not None
try:
if not LOG.isEnabledFor(logging.INFO):
proc.stdin.write('set client_min_messages to WARNING;'.encode('utf-8'))
if pre_code:
proc.stdin.write((pre_code + ';').encode('utf-8'))
if fname.suffix == '.gz':
with gzip.open(str(fname), 'rb') as fdesc:
remain = _pipe_to_proc(proc, fdesc)
else:
with fname.open('rb') as fdesc:
remain = _pipe_to_proc(proc, fdesc)
if remain == 0 and post_code:
proc.stdin.write((';' + post_code).encode('utf-8'))
finally:
proc.stdin.close()
ret = proc.wait()
if ret != 0 or remain > 0:
raise UsageError("Failed to execute SQL file.")
| 2,506
|
Python
|
.py
| 63
| 31.555556
| 87
| 0.613235
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,789
|
query_pool.py
|
osm-search_Nominatim/src/nominatim_db/db/query_pool.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
A connection pool that executes incoming queries in parallel.
"""
from typing import Any, Tuple, Optional
import asyncio
import logging
import time
import psycopg
LOG = logging.getLogger()
QueueItem = Optional[Tuple[psycopg.abc.Query, Any]]
class QueryPool:
""" Pool to run SQL queries in parallel asynchronous execution.
All queries are run in autocommit mode. If parallel execution leads
to a deadlock, then the query is repeated.
The results of the queries is discarded.
"""
def __init__(self, dsn: str, pool_size: int = 1, **conn_args: Any) -> None:
self.wait_time = 0.0
self.query_queue: 'asyncio.Queue[QueueItem]' = asyncio.Queue(maxsize=2 * pool_size)
self.pool = [asyncio.create_task(self._worker_loop(dsn, **conn_args))
for _ in range(pool_size)]
async def put_query(self, query: psycopg.abc.Query, params: Any) -> None:
""" Schedule a query for execution.
"""
tstart = time.time()
await self.query_queue.put((query, params))
self.wait_time += time.time() - tstart
await asyncio.sleep(0)
async def finish(self) -> None:
""" Wait for all queries to finish and close the pool.
"""
for _ in self.pool:
await self.query_queue.put(None)
tstart = time.time()
await asyncio.wait(self.pool)
self.wait_time += time.time() - tstart
for task in self.pool:
excp = task.exception()
if excp is not None:
raise excp
async def _worker_loop(self, dsn: str, **conn_args: Any) -> None:
conn_args['autocommit'] = True
aconn = await psycopg.AsyncConnection.connect(dsn, **conn_args)
async with aconn:
async with aconn.cursor() as cur:
item = await self.query_queue.get()
while item is not None:
try:
if item[1] is None:
await cur.execute(item[0])
else:
await cur.execute(item[0], item[1])
item = await self.query_queue.get()
except psycopg.errors.DeadlockDetected:
assert item is not None
LOG.info("Deadlock detected (sql = %s, params = %s), retry.",
str(item[0]), str(item[1]))
# item is still valid here, causing a retry
async def __aenter__(self) -> 'QueryPool':
return self
async def __aexit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
await self.finish()
| 2,916
|
Python
|
.py
| 68
| 32.573529
| 91
| 0.583598
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,790
|
connection.py
|
osm-search_Nominatim/src/nominatim_db/db/connection.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Specialised connection and cursor functions.
"""
from typing import Optional, Any, Dict, Tuple
import logging
import os
import psycopg
import psycopg.types.hstore
from psycopg import sql as pysql
from ..typing import SysEnv
from ..errors import UsageError
LOG = logging.getLogger()
Cursor = psycopg.Cursor[Any]
Connection = psycopg.Connection[Any]
def execute_scalar(conn: Connection, sql: psycopg.abc.Query, args: Any = None) -> Any:
""" Execute query that returns a single value. The value is returned.
If the query yields more than one row, a ValueError is raised.
"""
with conn.cursor(row_factory=psycopg.rows.tuple_row) as cur:
cur.execute(sql, args)
if cur.rowcount != 1:
raise RuntimeError("Query did not return a single row.")
result = cur.fetchone()
assert result is not None
return result[0]
def table_exists(conn: Connection, table: str) -> bool:
""" Check that a table with the given name exists in the database.
"""
num = execute_scalar(conn,
"""SELECT count(*) FROM pg_tables
WHERE tablename = %s and schemaname = 'public'""", (table, ))
return num == 1 if isinstance(num, int) else False
def table_has_column(conn: Connection, table: str, column: str) -> bool:
""" Check if the table 'table' exists and has a column with name 'column'.
"""
has_column = execute_scalar(conn,
"""SELECT count(*) FROM information_schema.columns
WHERE table_name = %s and column_name = %s""",
(table, column))
return has_column > 0 if isinstance(has_column, int) else False
def index_exists(conn: Connection, index: str, table: Optional[str] = None) -> bool:
""" Check that an index with the given name exists in the database.
If table is not None then the index must relate to the given
table.
"""
with conn.cursor() as cur:
cur.execute("""SELECT tablename FROM pg_indexes
WHERE indexname = %s and schemaname = 'public'""", (index, ))
if cur.rowcount == 0:
return False
if table is not None:
row = cur.fetchone()
if row is None or not isinstance(row[0], str):
return False
return row[0] == table
return True
def drop_tables(conn: Connection, *names: str,
if_exists: bool = True, cascade: bool = False) -> None:
""" Drop one or more tables with the given names.
Set `if_exists` to False if a non-existent table should raise
an exception instead of just being ignored. `cascade` will cause
depended objects to be dropped as well.
The caller needs to take care of committing the change.
"""
sql = pysql.SQL('DROP TABLE%s{}%s' % (
' IF EXISTS ' if if_exists else ' ',
' CASCADE' if cascade else ''))
with conn.cursor() as cur:
for name in names:
cur.execute(sql.format(pysql.Identifier(name)))
def server_version_tuple(conn: Connection) -> Tuple[int, int]:
""" Return the server version as a tuple of (major, minor).
Converts correctly for pre-10 and post-10 PostgreSQL versions.
"""
version = conn.info.server_version
if version < 100000:
return (int(version / 10000), int((version % 10000) / 100))
return (int(version / 10000), version % 10000)
def postgis_version_tuple(conn: Connection) -> Tuple[int, int]:
""" Return the postgis version installed in the database as a
tuple of (major, minor). Assumes that the PostGIS extension
has been installed already.
"""
version = execute_scalar(conn, 'SELECT postgis_lib_version()')
version_parts = version.split('.')
if len(version_parts) < 2:
raise UsageError(f"Error fetching Postgis version. Bad format: {version}")
return (int(version_parts[0]), int(version_parts[1]))
def register_hstore(conn: Connection) -> None:
""" Register the hstore type with psycopg for the connection.
"""
info = psycopg.types.TypeInfo.fetch(conn, "hstore")
if info is None:
raise RuntimeError('Hstore extension is requested but not installed.')
psycopg.types.hstore.register_hstore(info, conn)
def connect(dsn: str, **kwargs: Any) -> Connection:
""" Open a connection to the database using the specialised connection
factory. The returned object may be used in conjunction with 'with'.
When used outside a context manager, use the `connection` attribute
to get the connection.
"""
try:
return psycopg.connect(dsn, row_factory=psycopg.rows.namedtuple_row, **kwargs)
except psycopg.OperationalError as err:
raise UsageError(f"Cannot connect to database: {err}") from err
# Translation from PG connection string parameters to PG environment variables.
# Derived from https://www.postgresql.org/docs/current/libpq-envars.html.
_PG_CONNECTION_STRINGS = {
'host': 'PGHOST',
'hostaddr': 'PGHOSTADDR',
'port': 'PGPORT',
'dbname': 'PGDATABASE',
'user': 'PGUSER',
'password': 'PGPASSWORD',
'passfile': 'PGPASSFILE',
'channel_binding': 'PGCHANNELBINDING',
'service': 'PGSERVICE',
'options': 'PGOPTIONS',
'application_name': 'PGAPPNAME',
'sslmode': 'PGSSLMODE',
'requiressl': 'PGREQUIRESSL',
'sslcompression': 'PGSSLCOMPRESSION',
'sslcert': 'PGSSLCERT',
'sslkey': 'PGSSLKEY',
'sslrootcert': 'PGSSLROOTCERT',
'sslcrl': 'PGSSLCRL',
'requirepeer': 'PGREQUIREPEER',
'ssl_min_protocol_version': 'PGSSLMINPROTOCOLVERSION',
'ssl_max_protocol_version': 'PGSSLMAXPROTOCOLVERSION',
'gssencmode': 'PGGSSENCMODE',
'krbsrvname': 'PGKRBSRVNAME',
'gsslib': 'PGGSSLIB',
'connect_timeout': 'PGCONNECT_TIMEOUT',
'target_session_attrs': 'PGTARGETSESSIONATTRS',
}
def get_pg_env(dsn: str,
base_env: Optional[SysEnv] = None) -> Dict[str, str]:
""" Return a copy of `base_env` with the environment variables for
PostgreSQL set up from the given database connection string.
If `base_env` is None, then the OS environment is used as a base
environment.
"""
env = dict(base_env if base_env is not None else os.environ)
for param, value in psycopg.conninfo.conninfo_to_dict(dsn).items():
if param in _PG_CONNECTION_STRINGS:
env[_PG_CONNECTION_STRINGS[param]] = str(value)
else:
LOG.error("Unknown connection parameter '%s' ignored.", param)
return env
async def run_async_query(dsn: str, query: psycopg.abc.Query) -> None:
""" Open a connection to the database and run a single query
asynchronously.
"""
async with await psycopg.AsyncConnection.connect(dsn) as aconn:
await aconn.execute(query)
| 7,086
|
Python
|
.py
| 161
| 37.447205
| 86
| 0.663473
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,791
|
collect_os_info.py
|
osm-search_Nominatim/src/nominatim_db/tools/collect_os_info.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Collection of host system information including software versions, memory,
storage, and database configuration.
"""
import os
import subprocess
import sys
from pathlib import Path
from typing import List, Optional, Union
import psutil
from ..config import Configuration
from ..db.connection import connect, server_version_tuple, execute_scalar
from ..version import NOMINATIM_VERSION
def friendly_memory_string(mem: float) -> str:
"""Create a user friendly string for the amount of memory specified as mem"""
mem_magnitude = ("bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
mag = 0
# determine order of magnitude
while mem > 1000:
mem /= 1000
mag += 1
return f"{mem:.1f} {mem_magnitude[mag]}"
def run_command(cmd: Union[str, List[str]]) -> str:
"""Runs a command using the shell and returns the output from stdout"""
try:
if sys.version_info < (3, 7):
cap_out = subprocess.run(cmd, stdout=subprocess.PIPE, check=False)
else:
cap_out = subprocess.run(cmd, capture_output=True, check=False)
return cap_out.stdout.decode("utf-8")
except FileNotFoundError:
# non-Linux system should end up here
return f"Unknown (unable to find the '{cmd}' command)"
def os_name_info() -> str:
"""Obtain Operating System Name (and possibly the version)"""
os_info = None
# man page os-release(5) details meaning of the fields
if Path("/etc/os-release").is_file():
os_info = from_file_find_line_portion(
"/etc/os-release", "PRETTY_NAME", "=")
# alternative location
elif Path("/usr/lib/os-release").is_file():
os_info = from_file_find_line_portion(
"/usr/lib/os-release", "PRETTY_NAME", "="
)
# fallback on Python's os name
if os_info is None or os_info == "":
os_info = os.name
# if the above is insufficient, take a look at neofetch's approach to OS detection
return os_info
# Note: Intended to be used on informational files like /proc
def from_file_find_line_portion(
filename: str, start: str, sep: str, fieldnum: int = 1
) -> Optional[str]:
"""open filename, finds the line starting with the 'start' string.
Splits the line using separator and returns a "fieldnum" from the split."""
with open(filename, encoding='utf8') as file:
result = ""
for line in file:
if line.startswith(start):
result = line.split(sep)[fieldnum].strip()
return result
def get_postgresql_config(version: int) -> str:
"""Retrieve postgres configuration file"""
try:
with open(f"/etc/postgresql/{version}/main/postgresql.conf", encoding='utf8') as file:
db_config = file.read()
file.close()
return db_config
except IOError:
return f"**Could not read '/etc/postgresql/{version}/main/postgresql.conf'**"
def report_system_information(config: Configuration) -> None:
"""Generate a report about the host system including software versions, memory,
storage, and database configuration."""
with connect(config.get_libpq_dsn(), dbname='postgres') as conn:
postgresql_ver: str = '.'.join(map(str, server_version_tuple(conn)))
with conn.cursor() as cur:
cur.execute("SELECT datname FROM pg_catalog.pg_database WHERE datname=%s",
(config.get_database_params()['dbname'], ))
nominatim_db_exists = cur.rowcount > 0
if nominatim_db_exists:
with connect(config.get_libpq_dsn()) as conn:
postgis_ver: str = execute_scalar(conn, 'SELECT postgis_lib_version()')
else:
postgis_ver = "Unable to connect to database"
postgresql_config: str = get_postgresql_config(int(float(postgresql_ver)))
# Note: psutil.disk_partitions() is similar to run_command("lsblk")
# Note: run_command("systemd-detect-virt") only works on Linux, on other OSes
# should give a message: "Unknown (unable to find the 'systemd-detect-virt' command)"
# Generates the Markdown report.
report = f"""
**Instructions**
Use this information in your issue report at https://github.com/osm-search/Nominatim/issues
Redirect the output to a file:
$ ./collect_os_info.py > report.md
**Software Environment:**
- Python version: {sys.version}
- Nominatim version: {NOMINATIM_VERSION!s}
- PostgreSQL version: {postgresql_ver}
- PostGIS version: {postgis_ver}
- OS: {os_name_info()}
**Hardware Configuration:**
- RAM: {friendly_memory_string(psutil.virtual_memory().total)}
- number of CPUs: {psutil.cpu_count(logical=False)}
- bare metal/AWS/other cloud service (per systemd-detect-virt(1)):
{run_command("systemd-detect-virt")}
- type and size of disks:
**`df -h` - df - report file system disk space usage: **
```
{run_command(["df", "-h"])}
```
**lsblk - list block devices: **
```
{run_command("lsblk")}
```
**Postgresql Configuration:**
```
{postgresql_config}
```
**Notes**
Please add any notes about anything above anything above that is incorrect.
"""
print(report)
| 5,455
|
Python
|
.py
| 129
| 36.170543
| 95
| 0.661358
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,792
|
migration.py
|
osm-search_Nominatim/src/nominatim_db/tools/migration.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for database migration to newer software versions.
"""
from typing import List, Tuple, Callable, Any
import logging
from ..errors import UsageError
from ..config import Configuration
from ..db import properties
from ..db.connection import connect, Connection,\
table_exists, register_hstore
from ..version import NominatimVersion, NOMINATIM_VERSION, parse_version
from ..tokenizer import factory as tokenizer_factory
from . import refresh
LOG = logging.getLogger()
_MIGRATION_FUNCTIONS : List[Tuple[NominatimVersion, Callable[..., None]]] = []
def migrate(config: Configuration, paths: Any) -> int:
""" Check for the current database version and execute migrations,
if necesssary.
"""
with connect(config.get_libpq_dsn()) as conn:
register_hstore(conn)
if table_exists(conn, 'nominatim_properties'):
db_version_str = properties.get_property(conn, 'database_version')
else:
db_version_str = None
if db_version_str is not None:
db_version = parse_version(db_version_str)
else:
db_version = None
if db_version is None or db_version < (4, 3, 0, 0):
LOG.fatal('Your database version is older than 4.3. '
'Direct migration is not possible.\n'
'You should strongly consider a reimport. If that is not possible\n'
'please upgrade to 4.3 first and then to the newest version.')
raise UsageError('Migration not possible.')
if db_version == NOMINATIM_VERSION:
LOG.warning("Database already at latest version (%s)", db_version_str)
return 0
LOG.info("Detected database version: %s", db_version_str)
for version, func in _MIGRATION_FUNCTIONS:
if db_version < version:
title = func.__doc__ or ''
LOG.warning("Running: %s (%s)", title.split('\n', 1)[0], version)
kwargs = dict(conn=conn, config=config, paths=paths)
func(**kwargs)
conn.commit()
LOG.warning('Updating SQL functions.')
refresh.create_functions(conn, config)
tokenizer = tokenizer_factory.get_tokenizer_for_db(config)
tokenizer.update_sql_functions(config)
properties.set_property(conn, 'database_version', str(NOMINATIM_VERSION))
conn.commit()
return 0
def _migration(major: int, minor: int, patch: int = 0,
dbpatch: int = 0) -> Callable[[Callable[..., None]], Callable[..., None]]:
""" Decorator for a single migration step. The parameters describe the
version after which the migration is applicable, i.e before changing
from the given version to the next, the migration is required.
All migrations are run in the order in which they are defined in this
file. Do not run global SQL scripts for migrations as you cannot be sure
that these scripts do the same in later versions.
Functions will always be reimported in full at the end of the migration
process, so the migration functions may leave a temporary state behind
there.
"""
def decorator(func: Callable[..., None]) -> Callable[..., None]:
version = NominatimVersion(major, minor, patch, dbpatch)
_MIGRATION_FUNCTIONS.append((version, func))
return func
return decorator
@_migration(4, 4, 99, 0)
def create_postcode_area_lookup_index(conn: Connection, **_: Any) -> None:
""" Create index needed for looking up postcode areas from postocde points.
"""
with conn.cursor() as cur:
cur.execute("""CREATE INDEX IF NOT EXISTS idx_placex_postcode_areas
ON placex USING BTREE (country_code, postcode)
WHERE osm_type = 'R' AND class = 'boundary' AND type = 'postal_code'
""")
@_migration(4, 4, 99, 1)
def create_postcode_parent_index(conn: Connection, **_: Any) -> None:
""" Create index needed for updating postcodes when a parent changes.
"""
if table_exists(conn, 'planet_osm_ways'):
with conn.cursor() as cur:
cur.execute("""CREATE INDEX IF NOT EXISTS
idx_location_postcode_parent_place_id
ON location_postcode USING BTREE (parent_place_id)""")
| 4,616
|
Python
|
.py
| 94
| 40.202128
| 91
| 0.644667
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,793
|
refresh.py
|
osm-search_Nominatim/src/nominatim_db/tools/refresh.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for bringing auxiliary data in the database up-to-date.
"""
from typing import MutableSequence, Tuple, Any, Type, Mapping, Sequence, List, cast
import csv
import gzip
import logging
from pathlib import Path
from psycopg import sql as pysql
from ..config import Configuration
from ..db.connection import Connection, connect, postgis_version_tuple,\
drop_tables
from ..db.utils import execute_file
from ..db.sql_preprocessor import SQLPreprocessor
LOG = logging.getLogger()
OSM_TYPE = {'N': 'node', 'W': 'way', 'R': 'relation'}
def _add_address_level_rows_from_entry(rows: MutableSequence[Tuple[Any, ...]],
entry: Mapping[str, Any]) -> None:
""" Converts a single entry from the JSON format for address rank
descriptions into a flat format suitable for inserting into a
PostgreSQL table and adds these lines to `rows`.
"""
countries = entry.get('countries') or (None, )
for key, values in entry['tags'].items():
for value, ranks in values.items():
if isinstance(ranks, list):
rank_search, rank_address = ranks
else:
rank_search = rank_address = ranks
if not value:
value = None
for country in countries:
rows.append((country, key, value, rank_search, rank_address))
def load_address_levels(conn: Connection, table: str, levels: Sequence[Mapping[str, Any]]) -> None:
""" Replace the `address_levels` table with the contents of `levels'.
A new table is created any previously existing table is dropped.
The table has the following columns:
country, class, type, rank_search, rank_address
"""
rows: List[Tuple[Any, ...]] = []
for entry in levels:
_add_address_level_rows_from_entry(rows, entry)
drop_tables(conn, table)
with conn.cursor() as cur:
cur.execute(pysql.SQL("""CREATE TABLE {} (
country_code varchar(2),
class TEXT,
type TEXT,
rank_search SMALLINT,
rank_address SMALLINT)
""").format(pysql.Identifier(table)))
cur.executemany(pysql.SQL("INSERT INTO {} VALUES (%s, %s, %s, %s, %s)")
.format(pysql.Identifier(table)), rows)
cur.execute(pysql.SQL('CREATE UNIQUE INDEX ON {} (country_code, class, type)')
.format(pysql.Identifier(table)))
conn.commit()
def load_address_levels_from_config(conn: Connection, config: Configuration) -> None:
""" Replace the `address_levels` table with the content as
defined in the given configuration. Uses the parameter
NOMINATIM_ADDRESS_LEVEL_CONFIG to determine the location of the
configuration file.
"""
cfg = config.load_sub_configuration('', config='ADDRESS_LEVEL_CONFIG')
load_address_levels(conn, 'address_levels', cfg)
def create_functions(conn: Connection, config: Configuration,
enable_diff_updates: bool = True,
enable_debug: bool = False) -> None:
""" (Re)create the PL/pgSQL functions.
"""
sql = SQLPreprocessor(conn, config)
sql.run_sql_file(conn, 'functions.sql',
disable_diff_updates=not enable_diff_updates,
debug=enable_debug)
def import_wikipedia_articles(dsn: str, data_path: Path, ignore_errors: bool = False) -> int:
""" Replaces the wikipedia importance tables with new data.
The import is run in a single transaction so that the new data
is replace seamlessly.
Returns 0 if all was well and 1 if the importance file could not
be found. Throws an exception if there was an error reading the file.
"""
if import_importance_csv(dsn, data_path / 'wikimedia-importance.csv.gz') == 0 \
or import_importance_sql(dsn, data_path / 'wikimedia-importance.sql.gz',
ignore_errors) == 0:
return 0
return 1
def import_importance_csv(dsn: str, data_file: Path) -> int:
""" Replace wikipedia importance table with data from a
single CSV file.
The file must be a gzipped CSV and have the following columns:
language, title, importance, wikidata_id
Other columns may be present but will be ignored.
"""
if not data_file.exists():
return 1
# Only import the first occurrence of a wikidata ID.
# This keeps indexes and table small.
wd_done = set()
with connect(dsn) as conn:
drop_tables(conn, 'wikipedia_article', 'wikipedia_redirect', 'wikimedia_importance')
with conn.cursor() as cur:
cur.execute("""CREATE TABLE wikimedia_importance (
language TEXT NOT NULL,
title TEXT NOT NULL,
importance double precision NOT NULL,
wikidata TEXT
) """)
copy_cmd = """COPY wikimedia_importance(language, title, importance, wikidata)
FROM STDIN"""
with gzip.open(str(data_file), 'rt') as fd, cur.copy(copy_cmd) as copy:
for row in csv.DictReader(fd, delimiter='\t', quotechar='|'):
wd_id = int(row['wikidata_id'][1:])
copy.write_row((row['language'],
row['title'],
row['importance'],
None if wd_id in wd_done else row['wikidata_id']))
wd_done.add(wd_id)
cur.execute("""CREATE INDEX IF NOT EXISTS idx_wikimedia_importance_title
ON wikimedia_importance (title)""")
cur.execute("""CREATE INDEX IF NOT EXISTS idx_wikimedia_importance_wikidata
ON wikimedia_importance (wikidata)
WHERE wikidata is not null""")
conn.commit()
return 0
def import_importance_sql(dsn: str, data_file: Path, ignore_errors: bool) -> int:
""" Replace wikipedia importance table with data from an SQL file.
"""
if not data_file.exists():
return 1
pre_code = """BEGIN;
DROP TABLE IF EXISTS "wikipedia_article";
DROP TABLE IF EXISTS "wikipedia_redirect";
DROP TABLE IF EXISTS "wikipedia_importance";
"""
post_code = "COMMIT"
execute_file(dsn, data_file, ignore_errors=ignore_errors,
pre_code=pre_code, post_code=post_code)
return 0
def import_secondary_importance(dsn: str, data_path: Path, ignore_errors: bool = False) -> int:
""" Replaces the secondary importance raster data table with new data.
Returns 0 if all was well and 1 if the raster SQL file could not
be found. Throws an exception if there was an error reading the file.
"""
datafile = data_path / 'secondary_importance.sql.gz'
if not datafile.exists():
return 1
with connect(dsn) as conn:
postgis_version = postgis_version_tuple(conn)
if postgis_version[0] < 3:
LOG.error('PostGIS version is too old for using OSM raster data.')
return 2
execute_file(dsn, datafile, ignore_errors=ignore_errors)
return 0
def recompute_importance(conn: Connection) -> None:
""" Recompute wikipedia links and importance for all entries in placex.
This is a long-running operations that must not be executed in
parallel with updates.
"""
with conn.cursor() as cur:
cur.execute('ALTER TABLE placex DISABLE TRIGGER ALL')
cur.execute("""
UPDATE placex SET (wikipedia, importance) =
(SELECT wikipedia, importance
FROM compute_importance(extratags, country_code, rank_search, centroid))
""")
cur.execute("""
UPDATE placex s SET wikipedia = d.wikipedia, importance = d.importance
FROM placex d
WHERE s.place_id = d.linked_place_id and d.wikipedia is not null
and (s.wikipedia is null or s.importance < d.importance);
""")
cur.execute('ALTER TABLE placex ENABLE TRIGGER ALL')
conn.commit()
def _quote_php_variable(var_type: Type[Any], config: Configuration,
conf_name: str) -> str:
if var_type == bool:
return 'true' if config.get_bool(conf_name) else 'false'
if var_type == int:
return cast(str, getattr(config, conf_name))
if not getattr(config, conf_name):
return 'false'
if var_type == Path:
value = str(config.get_path(conf_name) or '')
else:
value = getattr(config, conf_name)
quoted = value.replace("'", "\\'")
return f"'{quoted}'"
def invalidate_osm_object(osm_type: str, osm_id: int, conn: Connection,
recursive: bool = True) -> None:
""" Mark the given OSM object for reindexing. When 'recursive' is set
to True (the default), then all dependent objects are marked for
reindexing as well.
'osm_type' must be on of 'N' (node), 'W' (way) or 'R' (relation).
If the given object does not exist, then nothing happens.
"""
assert osm_type in ('N', 'R', 'W')
LOG.warning("Invalidating OSM %s %s%s.",
OSM_TYPE[osm_type], osm_id,
' and its dependent places' if recursive else '')
with conn.cursor() as cur:
if recursive:
sql = """SELECT place_force_update(place_id)
FROM placex WHERE osm_type = %s and osm_id = %s"""
else:
sql = """UPDATE placex SET indexed_status = 2
WHERE osm_type = %s and osm_id = %s"""
cur.execute(sql, (osm_type, osm_id))
| 10,264
|
Python
|
.py
| 212
| 37.221698
| 99
| 0.598339
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,794
|
add_osm_data.py
|
osm-search_Nominatim/src/nominatim_db/tools/add_osm_data.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Function to add additional OSM data from a file or the API into the database.
"""
from typing import Any, MutableMapping
from pathlib import Path
import logging
import urllib
from ..db.connection import connect
from ..utils.url_utils import get_url
from .exec_utils import run_osm2pgsql
LOG = logging.getLogger()
def _run_osm2pgsql(dsn: str, options: MutableMapping[str, Any]) -> None:
run_osm2pgsql(options)
# Handle deletions
with connect(dsn) as conn:
with conn.cursor() as cur:
cur.execute('SELECT flush_deleted_places()')
conn.commit()
def add_data_from_file(dsn: str, fname: str, options: MutableMapping[str, Any]) -> int:
""" Adds data from a OSM file to the database. The file may be a normal
OSM file or a diff file in all formats supported by libosmium.
"""
options['import_file'] = Path(fname)
options['append'] = True
_run_osm2pgsql(dsn, options)
# No status update. We don't know where the file came from.
return 0
def add_osm_object(dsn: str, osm_type: str, osm_id: int, use_main_api: bool,
options: MutableMapping[str, Any]) -> int:
""" Add or update a single OSM object from the latest version of the
API.
"""
if use_main_api:
base_url = f'https://www.openstreetmap.org/api/0.6/{osm_type}/{osm_id}'
if osm_type in ('way', 'relation'):
base_url += '/full'
else:
# use Overpass API
if osm_type == 'node':
data = f'node({osm_id});out meta;'
elif osm_type == 'way':
data = f'(way({osm_id});>;);out meta;'
else:
data = f'(rel(id:{osm_id});>;);out meta;'
base_url = 'https://overpass-api.de/api/interpreter?' \
+ urllib.parse.urlencode({'data': data})
options['append'] = True
options['import_data'] = get_url(base_url).encode('utf-8')
_run_osm2pgsql(dsn, options)
return 0
| 2,169
|
Python
|
.py
| 56
| 32.910714
| 87
| 0.642075
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,795
|
postcodes.py
|
osm-search_Nominatim/src/nominatim_db/tools/postcodes.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for importing, updating and otherwise maintaining the table
of artificial postcode centroids.
"""
from typing import Optional, Tuple, Dict, List, TextIO
from collections import defaultdict
from pathlib import Path
import csv
import gzip
import logging
from math import isfinite
from psycopg import sql as pysql
from ..db.connection import connect, Connection, table_exists
from ..utils.centroid import PointsCentroid
from ..data.postcode_format import PostcodeFormatter, CountryPostcodeMatcher
from ..tokenizer.base import AbstractAnalyzer, AbstractTokenizer
LOG = logging.getLogger()
def _to_float(numstr: str, max_value: float) -> float:
""" Convert the number in string into a float. The number is expected
to be in the range of [-max_value, max_value]. Otherwise rises a
ValueError.
"""
num = float(numstr)
if not isfinite(num) or num <= -max_value or num >= max_value:
raise ValueError()
return num
class _PostcodeCollector:
""" Collector for postcodes of a single country.
"""
def __init__(self, country: str, matcher: Optional[CountryPostcodeMatcher]):
self.country = country
self.matcher = matcher
self.collected: Dict[str, PointsCentroid] = defaultdict(PointsCentroid)
self.normalization_cache: Optional[Tuple[str, Optional[str]]] = None
def add(self, postcode: str, x: float, y: float) -> None:
""" Add the given postcode to the collection cache. If the postcode
already existed, it is overwritten with the new centroid.
"""
if self.matcher is not None:
normalized: Optional[str]
if self.normalization_cache and self.normalization_cache[0] == postcode:
normalized = self.normalization_cache[1]
else:
match = self.matcher.match(postcode)
normalized = self.matcher.normalize(match) if match else None
self.normalization_cache = (postcode, normalized)
if normalized:
self.collected[normalized] += (x, y)
def commit(self, conn: Connection, analyzer: AbstractAnalyzer, project_dir: Path) -> None:
""" Update postcodes for the country from the postcodes selected so far
as well as any externally supplied postcodes.
"""
self._update_from_external(analyzer, project_dir)
to_add, to_delete, to_update = self._compute_changes(conn)
LOG.info("Processing country '%s' (%s added, %s deleted, %s updated).",
self.country, len(to_add), len(to_delete), len(to_update))
with conn.cursor() as cur:
if to_add:
cur.executemany(pysql.SQL(
"""INSERT INTO location_postcode
(place_id, indexed_status, country_code,
postcode, geometry)
VALUES (nextval('seq_place'), 1, {}, %s,
ST_SetSRID(ST_MakePoint(%s, %s), 4326))
""").format(pysql.Literal(self.country)),
to_add)
if to_delete:
cur.execute("""DELETE FROM location_postcode
WHERE country_code = %s and postcode = any(%s)
""", (self.country, to_delete))
if to_update:
cur.executemany(
pysql.SQL("""UPDATE location_postcode
SET indexed_status = 2,
geometry = ST_SetSRID(ST_Point(%s, %s), 4326)
WHERE country_code = {} and postcode = %s
""").format(pysql.Literal(self.country)),
to_update)
def _compute_changes(self, conn: Connection) \
-> Tuple[List[Tuple[str, float, float]], List[str], List[Tuple[float, float, str]]]:
""" Compute which postcodes from the collected postcodes have to be
added or modified and which from the location_postcode table
have to be deleted.
"""
to_update = []
to_delete = []
with conn.cursor() as cur:
cur.execute("""SELECT postcode, ST_X(geometry), ST_Y(geometry)
FROM location_postcode
WHERE country_code = %s""",
(self.country, ))
for postcode, x, y in cur:
pcobj = self.collected.pop(postcode, None)
if pcobj:
newx, newy = pcobj.centroid()
if (x - newx) > 0.0000001 or (y - newy) > 0.0000001:
to_update.append((newx, newy, postcode))
else:
to_delete.append(postcode)
to_add = [(k, *v.centroid()) for k, v in self.collected.items()]
self.collected = defaultdict(PointsCentroid)
return to_add, to_delete, to_update
def _update_from_external(self, analyzer: AbstractAnalyzer, project_dir: Path) -> None:
""" Look for an external postcode file for the active country in
the project directory and add missing postcodes when found.
"""
csvfile = self._open_external(project_dir)
if csvfile is None:
return
try:
reader = csv.DictReader(csvfile)
for row in reader:
if 'postcode' not in row or 'lat' not in row or 'lon' not in row:
LOG.warning("Bad format for external postcode file for country '%s'."
" Ignored.", self.country)
return
postcode = analyzer.normalize_postcode(row['postcode'])
if postcode not in self.collected:
try:
# Do float conversation separately, it might throw
centroid = (_to_float(row['lon'], 180),
_to_float(row['lat'], 90))
self.collected[postcode] += centroid
except ValueError:
LOG.warning("Bad coordinates %s, %s in %s country postcode file.",
row['lat'], row['lon'], self.country)
finally:
csvfile.close()
def _open_external(self, project_dir: Path) -> Optional[TextIO]:
fname = project_dir / f'{self.country}_postcodes.csv'
if fname.is_file():
LOG.info("Using external postcode file '%s'.", fname)
return open(fname, 'r', encoding='utf-8')
fname = project_dir / f'{self.country}_postcodes.csv.gz'
if fname.is_file():
LOG.info("Using external postcode file '%s'.", fname)
return gzip.open(fname, 'rt')
return None
def update_postcodes(dsn: str, project_dir: Path, tokenizer: AbstractTokenizer) -> None:
""" Update the table of artificial postcodes.
Computes artificial postcode centroids from the placex table,
potentially enhances it with external data and then updates the
postcodes in the table 'location_postcode'.
"""
matcher = PostcodeFormatter()
with tokenizer.name_analyzer() as analyzer:
with connect(dsn) as conn:
# First get the list of countries that currently have postcodes.
# (Doing this before starting to insert, so it is fast on import.)
with conn.cursor() as cur:
cur.execute("SELECT DISTINCT country_code FROM location_postcode")
todo_countries = set((row[0] for row in cur))
# Recompute the list of valid postcodes from placex.
with conn.cursor(name="placex_postcodes") as cur:
cur.execute("""
SELECT cc, pc, ST_X(centroid), ST_Y(centroid)
FROM (SELECT
COALESCE(plx.country_code,
get_country_code(ST_Centroid(pl.geometry))) as cc,
pl.address->'postcode' as pc,
COALESCE(plx.centroid, ST_Centroid(pl.geometry)) as centroid
FROM place AS pl LEFT OUTER JOIN placex AS plx
ON pl.osm_id = plx.osm_id AND pl.osm_type = plx.osm_type
WHERE pl.address ? 'postcode' AND pl.geometry IS NOT null) xx
WHERE pc IS NOT null AND cc IS NOT null
ORDER BY cc, pc""")
collector = None
for country, postcode, x, y in cur:
if collector is None or country != collector.country:
if collector is not None:
collector.commit(conn, analyzer, project_dir)
collector = _PostcodeCollector(country, matcher.get_matcher(country))
todo_countries.discard(country)
collector.add(postcode, x, y)
if collector is not None:
collector.commit(conn, analyzer, project_dir)
# Now handle any countries that are only in the postcode table.
for country in todo_countries:
fmt = matcher.get_matcher(country)
_PostcodeCollector(country, fmt).commit(conn, analyzer, project_dir)
conn.commit()
analyzer.update_postcodes_from_db()
def can_compute(dsn: str) -> bool:
"""
Check that the place table exists so that
postcodes can be computed.
"""
with connect(dsn) as conn:
return table_exists(conn, 'place')
| 9,832
|
Python
|
.py
| 195
| 36.902564
| 94
| 0.575328
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,796
|
convert_sqlite.py
|
osm-search_Nominatim/src/nominatim_db/tools/convert_sqlite.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Exporting a Nominatim database to SQlite.
"""
from typing import Set, Any, Optional, Union
import datetime as dt
import logging
from pathlib import Path
import sqlalchemy as sa
import nominatim_api as napi
from nominatim_api.search.query_analyzer_factory import make_query_analyzer
from nominatim_api.typing import SaSelect, SaRow
from nominatim_api.sql.sqlalchemy_types import Geometry, IntArray
LOG = logging.getLogger()
async def convert(project_dir: Optional[Union[str, Path]],
outfile: Path, options: Set[str]) -> None:
""" Export an existing database to sqlite. The resulting database
will be usable against the Python frontend of Nominatim.
"""
api = napi.NominatimAPIAsync(project_dir)
try:
outapi = napi.NominatimAPIAsync(project_dir,
{'NOMINATIM_DATABASE_DSN': f"sqlite:dbname={outfile}",
'NOMINATIM_DATABASE_RW': '1'})
try:
async with api.begin() as src, outapi.begin() as dest:
writer = SqliteWriter(src, dest, options)
await writer.write()
finally:
await outapi.close()
finally:
await api.close()
class SqliteWriter:
""" Worker class which creates a new SQLite database.
"""
def __init__(self, src: napi.SearchConnection,
dest: napi.SearchConnection, options: Set[str]) -> None:
self.src = src
self.dest = dest
self.options = options
async def write(self) -> None:
""" Create the database structure and copy the data from
the source database to the destination.
"""
LOG.warning('Setting up spatialite')
await self.dest.execute(sa.select(sa.func.InitSpatialMetaData(True, 'WGS84')))
await self.create_tables()
await self.copy_data()
if 'search' in self.options:
await self.create_word_table()
await self.create_indexes()
async def create_tables(self) -> None:
""" Set up the database tables.
"""
LOG.warning('Setting up tables')
if 'search' not in self.options:
self.dest.t.meta.remove(self.dest.t.search_name)
else:
await self.create_class_tables()
await self.dest.connection.run_sync(self.dest.t.meta.create_all)
# Convert all Geometry columns to Spatialite geometries
for table in self.dest.t.meta.sorted_tables:
for col in table.c:
if isinstance(col.type, Geometry):
await self.dest.execute(sa.select(
sa.func.RecoverGeometryColumn(table.name, col.name, 4326,
col.type.subtype.upper(), 'XY')))
async def create_class_tables(self) -> None:
""" Set up the table that serve class/type-specific geometries.
"""
sql = sa.text("""SELECT tablename FROM pg_tables
WHERE tablename LIKE 'place_classtype_%'""")
for res in await self.src.execute(sql):
for db in (self.src, self.dest):
sa.Table(res[0], db.t.meta,
sa.Column('place_id', sa.BigInteger),
sa.Column('centroid', Geometry))
async def create_word_table(self) -> None:
""" Create the word table.
This table needs the property information to determine the
correct format. Therefore needs to be done after all other
data has been copied.
"""
await make_query_analyzer(self.src)
await make_query_analyzer(self.dest)
src = self.src.t.meta.tables['word']
dest = self.dest.t.meta.tables['word']
await self.dest.connection.run_sync(dest.create)
LOG.warning("Copying word table")
async_result = await self.src.connection.stream(sa.select(src))
async for partition in async_result.partitions(10000):
data = [{k: getattr(r, k) for k in r._fields} for r in partition]
await self.dest.execute(dest.insert(), data)
await self.dest.connection.run_sync(sa.Index('idx_word_woken', dest.c.word_token).create)
async def copy_data(self) -> None:
""" Copy data for all registered tables.
"""
def _getfield(row: SaRow, key: str) -> Any:
value = getattr(row, key)
if isinstance(value, dt.datetime):
if value.tzinfo is not None:
value = value.astimezone(dt.timezone.utc)
return value
for table in self.dest.t.meta.sorted_tables:
LOG.warning("Copying '%s'", table.name)
async_result = await self.src.connection.stream(self.select_from(table.name))
async for partition in async_result.partitions(10000):
data = [{('class_' if k == 'class' else k): _getfield(r, k)
for k in r._fields}
for r in partition]
await self.dest.execute(table.insert(), data)
# Set up a minimal copy of pg_tables used to look up the class tables later.
pg_tables = sa.Table('pg_tables', self.dest.t.meta,
sa.Column('schemaname', sa.Text, default='public'),
sa.Column('tablename', sa.Text))
await self.dest.connection.run_sync(pg_tables.create)
data = [{'tablename': t} for t in self.dest.t.meta.tables]
await self.dest.execute(pg_tables.insert().values(data))
async def create_indexes(self) -> None:
""" Add indexes necessary for the frontend.
"""
# reverse place node lookup needs an extra table to simulate a
# partial index with adaptive buffering.
await self.dest.execute(sa.text(
""" CREATE TABLE placex_place_node_areas AS
SELECT place_id, ST_Expand(geometry,
14.0 * exp(-0.2 * rank_search) - 0.03) as geometry
FROM placex
WHERE rank_address between 5 and 25
and osm_type = 'N'
and linked_place_id is NULL """))
await self.dest.execute(sa.select(
sa.func.RecoverGeometryColumn('placex_place_node_areas', 'geometry',
4326, 'GEOMETRY', 'XY')))
await self.dest.execute(sa.select(sa.func.CreateSpatialIndex(
'placex_place_node_areas', 'geometry')))
# Remaining indexes.
await self.create_spatial_index('country_grid', 'geometry')
await self.create_spatial_index('placex', 'geometry')
await self.create_spatial_index('osmline', 'linegeo')
await self.create_spatial_index('tiger', 'linegeo')
await self.create_index('placex', 'place_id')
await self.create_index('placex', 'parent_place_id')
await self.create_index('placex', 'rank_address')
await self.create_index('addressline', 'place_id')
await self.create_index('postcode', 'place_id')
await self.create_index('osmline', 'place_id')
await self.create_index('tiger', 'place_id')
if 'search' in self.options:
await self.create_spatial_index('postcode', 'geometry')
await self.create_spatial_index('search_name', 'centroid')
await self.create_index('search_name', 'place_id')
await self.create_index('osmline', 'parent_place_id')
await self.create_index('tiger', 'parent_place_id')
await self.create_search_index()
for t in self.dest.t.meta.tables:
if t.startswith('place_classtype_'):
await self.dest.execute(sa.select(
sa.func.CreateSpatialIndex(t, 'centroid')))
async def create_spatial_index(self, table: str, column: str) -> None:
""" Create a spatial index on the given table and column.
"""
await self.dest.execute(sa.select(
sa.func.CreateSpatialIndex(getattr(self.dest.t, table).name, column)))
async def create_index(self, table_name: str, column: str) -> None:
""" Create a simple index on the given table and column.
"""
table = getattr(self.dest.t, table_name)
await self.dest.connection.run_sync(
sa.Index(f"idx_{table}_{column}", getattr(table.c, column)).create)
async def create_search_index(self) -> None:
""" Create the tables and indexes needed for word lookup.
"""
LOG.warning("Creating reverse search table")
rsn = sa.Table('reverse_search_name', self.dest.t.meta,
sa.Column('word', sa.Integer()),
sa.Column('column', sa.Text()),
sa.Column('places', IntArray))
await self.dest.connection.run_sync(rsn.create)
tsrc = self.src.t.search_name
for column in ('name_vector', 'nameaddress_vector'):
sql = sa.select(sa.func.unnest(getattr(tsrc.c, column)).label('word'),
sa.func.ArrayAgg(tsrc.c.place_id).label('places'))\
.group_by('word')
async_result = await self.src.connection.stream(sql)
async for partition in async_result.partitions(100):
data = []
for row in partition:
row.places.sort()
data.append({'word': row.word,
'column': column,
'places': row.places})
await self.dest.execute(rsn.insert(), data)
await self.dest.connection.run_sync(
sa.Index('idx_reverse_search_name_word', rsn.c.word).create)
def select_from(self, table: str) -> SaSelect:
""" Create the SQL statement to select the source columns and rows.
"""
columns = self.src.t.meta.tables[table].c
if table == 'placex':
# SQLite struggles with Geometries that are larger than 5MB,
# so simplify those.
return sa.select(*(c for c in columns if not isinstance(c.type, Geometry)),
sa.func.ST_AsText(columns.centroid).label('centroid'),
sa.func.ST_AsText(
sa.case((sa.func.ST_MemSize(columns.geometry) < 5000000,
columns.geometry),
else_=sa.func.ST_SimplifyPreserveTopology(
columns.geometry, 0.0001)
)).label('geometry'))
sql = sa.select(*(sa.func.ST_AsText(c).label(c.name)
if isinstance(c.type, Geometry) else c for c in columns))
return sql
| 11,157
|
Python
|
.py
| 218
| 38.022936
| 97
| 0.580663
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,797
|
check_database.py
|
osm-search_Nominatim/src/nominatim_db/tools/check_database.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Collection of functions that check if the database is complete and functional.
"""
from typing import Callable, Optional, Any, Union, Tuple, Mapping, List
from enum import Enum
from textwrap import dedent
from ..config import Configuration
from ..db.connection import connect, Connection, server_version_tuple,\
index_exists, table_exists, execute_scalar
from ..db import properties
from ..errors import UsageError
from ..tokenizer import factory as tokenizer_factory
from . import freeze
from ..version import NOMINATIM_VERSION, parse_version
CHECKLIST = []
class CheckState(Enum):
""" Possible states of a check. FATAL stops check execution entirely.
"""
OK = 0
FAIL = 1
FATAL = 2
NOT_APPLICABLE = 3
WARN = 4
CheckResult = Union[CheckState, Tuple[CheckState, Mapping[str, Any]]]
CheckFunc = Callable[[Connection, Configuration], CheckResult]
def _check(hint: Optional[str] = None) -> Callable[[CheckFunc], CheckFunc]:
""" Decorator for checks. It adds the function to the list of
checks to execute and adds the code for printing progress messages.
"""
def decorator(func: CheckFunc) -> CheckFunc:
title = (func.__doc__ or '').split('\n', 1)[0].strip()
def run_check(conn: Connection, config: Configuration) -> CheckState:
print(title, end=' ... ')
ret = func(conn, config)
if isinstance(ret, tuple):
ret, params = ret
else:
params = {}
if ret == CheckState.OK:
print('\033[92mOK\033[0m')
elif ret == CheckState.WARN:
print('\033[93mWARNING\033[0m')
if hint:
print('')
print(dedent(hint.format(**params)))
elif ret == CheckState.NOT_APPLICABLE:
print('not applicable')
else:
print('\x1B[31mFailed\033[0m')
if hint:
print(dedent(hint.format(**params)))
return ret
CHECKLIST.append(run_check)
return run_check
return decorator
class _BadConnection:
def __init__(self, msg: str) -> None:
self.msg = msg
def close(self) -> None:
""" Dummy function to provide the implementation.
"""
def check_database(config: Configuration) -> int:
""" Run a number of checks on the database and return the status.
"""
try:
conn = connect(config.get_libpq_dsn())
except UsageError as err:
conn = _BadConnection(str(err)) # type: ignore[assignment]
overall_result = 0
for check in CHECKLIST:
ret = check(conn, config)
if ret == CheckState.FATAL:
conn.close()
return 1
if ret in (CheckState.FATAL, CheckState.FAIL):
overall_result = 1
conn.close()
return overall_result
def _get_indexes(conn: Connection) -> List[str]:
indexes = ['idx_place_addressline_address_place_id',
'idx_placex_rank_search',
'idx_placex_rank_address',
'idx_placex_parent_place_id',
'idx_placex_geometry_reverse_lookupplacenode',
'idx_placex_geometry_reverse_lookuppolygon',
'idx_placex_geometry_placenode',
'idx_osmline_parent_place_id',
'idx_osmline_parent_osm_id',
'idx_postcode_id',
'idx_postcode_postcode'
]
# These won't exist if --reverse-only import was used
if table_exists(conn, 'search_name'):
indexes.extend(('idx_search_name_nameaddress_vector',
'idx_search_name_name_vector',
'idx_search_name_centroid'))
if server_version_tuple(conn) >= (11, 0, 0):
indexes.extend(('idx_placex_housenumber',
'idx_osmline_parent_osm_id_with_hnr'))
# These won't exist if --no-updates import was used
if table_exists(conn, 'place'):
indexes.extend(('idx_location_area_country_place_id',
'idx_place_osm_unique',
'idx_placex_rank_address_sector',
'idx_placex_rank_boundaries_sector'))
return indexes
# CHECK FUNCTIONS
#
# Functions are executed in the order they appear here.
@_check(hint="""\
{error}
Hints:
* Is the database server started?
* Check the NOMINATIM_DATABASE_DSN variable in your local .env
* Try connecting to the database with the same settings
Project directory: {config.project_dir}
Current setting of NOMINATIM_DATABASE_DSN: {config.DATABASE_DSN}
""")
def check_connection(conn: Any, config: Configuration) -> CheckResult:
""" Checking database connection
"""
if isinstance(conn, _BadConnection):
return CheckState.FATAL, dict(error=conn.msg, config=config)
return CheckState.OK
@_check(hint="""\
Database version ({db_version}) doesn't match Nominatim version ({nom_version})
Hints:
* Are you connecting to the correct database?
{instruction}
Check the Migration chapter of the Administration Guide.
Project directory: {config.project_dir}
Current setting of NOMINATIM_DATABASE_DSN: {config.DATABASE_DSN}
""")
def check_database_version(conn: Connection, config: Configuration) -> CheckResult:
""" Checking database_version matches Nominatim software version
"""
if table_exists(conn, 'nominatim_properties'):
db_version_str = properties.get_property(conn, 'database_version')
else:
db_version_str = None
if db_version_str is not None:
db_version = parse_version(db_version_str)
if db_version == NOMINATIM_VERSION:
return CheckState.OK
instruction = (
'Run migrations: nominatim admin --migrate'
if db_version < NOMINATIM_VERSION
else 'You need to upgrade the Nominatim software.'
)
else:
instruction = ''
return CheckState.FATAL, dict(db_version=db_version_str,
nom_version=NOMINATIM_VERSION,
instruction=instruction,
config=config)
@_check(hint="""\
placex table not found
Hints:
* Are you connecting to the correct database?
* Did the import process finish without errors?
Project directory: {config.project_dir}
Current setting of NOMINATIM_DATABASE_DSN: {config.DATABASE_DSN}
""")
def check_placex_table(conn: Connection, config: Configuration) -> CheckResult:
""" Checking for placex table
"""
if table_exists(conn, 'placex'):
return CheckState.OK
return CheckState.FATAL, dict(config=config)
@_check(hint="""placex table has no data. Did the import finish successfully?""")
def check_placex_size(conn: Connection, _: Configuration) -> CheckResult:
""" Checking for placex content
"""
cnt = execute_scalar(conn, 'SELECT count(*) FROM (SELECT * FROM placex LIMIT 100) x')
return CheckState.OK if cnt > 0 else CheckState.FATAL
@_check(hint="""{msg}""")
def check_tokenizer(_: Connection, config: Configuration) -> CheckResult:
""" Checking that tokenizer works
"""
try:
tokenizer = tokenizer_factory.get_tokenizer_for_db(config)
except UsageError:
return CheckState.FAIL, dict(msg="""\
Cannot load tokenizer. Did the import finish successfully?""")
result = tokenizer.check_database(config)
if result is None:
return CheckState.OK
return CheckState.FAIL, dict(msg=result)
@_check(hint="""\
Wikipedia/Wikidata importance tables missing.
Quality of search results may be degraded. Reverse geocoding is unaffected.
See https://nominatim.org/release-docs/latest/admin/Import/#wikipediawikidata-rankings
""")
def check_existance_wikipedia(conn: Connection, _: Configuration) -> CheckResult:
""" Checking for wikipedia/wikidata data
"""
if not table_exists(conn, 'search_name') or not table_exists(conn, 'place'):
return CheckState.NOT_APPLICABLE
if table_exists(conn, 'wikimedia_importance'):
cnt = execute_scalar(conn, 'SELECT count(*) FROM wikimedia_importance')
else:
cnt = execute_scalar(conn, 'SELECT count(*) FROM wikipedia_article')
return CheckState.WARN if cnt == 0 else CheckState.OK
@_check(hint="""\
The indexing didn't finish. {count} entries are not yet indexed.
To index the remaining entries, run: {index_cmd}
""")
def check_indexing(conn: Connection, _: Configuration) -> CheckResult:
""" Checking indexing status
"""
cnt = execute_scalar(conn, 'SELECT count(*) FROM placex WHERE indexed_status > 0')
if cnt == 0:
return CheckState.OK
if freeze.is_frozen(conn):
index_cmd="""\
Database is marked frozen, it cannot be updated.
Low counts of unindexed places are fine."""
return CheckState.WARN, dict(count=cnt, index_cmd=index_cmd)
if index_exists(conn, 'idx_placex_rank_search'):
# Likely just an interrupted update.
index_cmd = 'nominatim index'
else:
# Looks like the import process got interrupted.
index_cmd = 'nominatim import --continue indexing'
return CheckState.FAIL, dict(count=cnt, index_cmd=index_cmd)
@_check(hint="""\
The following indexes are missing:
{indexes}
Rerun the index creation with: nominatim import --continue db-postprocess
""")
def check_database_indexes(conn: Connection, _: Configuration) -> CheckResult:
""" Checking that database indexes are complete
"""
missing = []
for index in _get_indexes(conn):
if not index_exists(conn, index):
missing.append(index)
if missing:
return CheckState.FAIL, dict(indexes='\n '.join(missing))
return CheckState.OK
@_check(hint="""\
At least one index is invalid. That can happen, e.g. when index creation was
disrupted and later restarted. You should delete the affected indices
and recreate them.
Invalid indexes:
{indexes}
""")
def check_database_index_valid(conn: Connection, _: Configuration) -> CheckResult:
""" Checking that all database indexes are valid
"""
with conn.cursor() as cur:
cur.execute(""" SELECT relname FROM pg_class, pg_index
WHERE pg_index.indisvalid = false
AND pg_index.indexrelid = pg_class.oid""")
broken = [c[0] for c in cur]
if broken:
return CheckState.FAIL, dict(indexes='\n '.join(broken))
return CheckState.OK
@_check(hint="""\
{error}
Run TIGER import again: nominatim add-data --tiger-data <DIR>
""")
def check_tiger_table(conn: Connection, config: Configuration) -> CheckResult:
""" Checking TIGER external data table.
"""
if not config.get_bool('USE_US_TIGER_DATA'):
return CheckState.NOT_APPLICABLE
if not table_exists(conn, 'location_property_tiger'):
return CheckState.FAIL, dict(error='TIGER data table not found.')
if execute_scalar(conn, 'SELECT count(*) FROM location_property_tiger') == 0:
return CheckState.FAIL, dict(error='TIGER data table is empty.')
return CheckState.OK
| 11,963
|
Python
|
.py
| 278
| 33.863309
| 99
| 0.625614
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,798
|
__init__.py
|
osm-search_Nominatim/src/nominatim_db/tools/__init__.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Module with functions for importing, updating Nominatim databases
as well as general maintenance helpers.
"""
| 325
|
Python
|
.py
| 10
| 31.5
| 65
| 0.774603
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,799
|
replication.py
|
osm-search_Nominatim/src/nominatim_db/tools/replication.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for updating a database from a replication source.
"""
from typing import ContextManager, MutableMapping, Any, Generator, cast, Iterator
from contextlib import contextmanager
import datetime as dt
from enum import Enum
import logging
import time
import types
import urllib.request as urlrequest
from ..errors import UsageError
from ..db import status
from ..db.connection import Connection, connect, server_version_tuple
from .exec_utils import run_osm2pgsql
try:
from osmium.replication.server import ReplicationServer
from osmium import WriteHandler
from osmium import version as pyo_version
import requests
except ModuleNotFoundError as exc:
logging.getLogger().critical("pyosmium not installed. Replication functions not available.\n"
"To install pyosmium via pip: pip install osmium")
raise UsageError("replication tools not available") from exc
LOG = logging.getLogger()
def init_replication(conn: Connection, base_url: str,
socket_timeout: int = 60) -> None:
""" Set up replication for the server at the given base URL.
"""
LOG.info("Using replication source: %s", base_url)
date = status.compute_database_date(conn)
# margin of error to make sure we get all data
date -= dt.timedelta(hours=3)
with _make_replication_server(base_url, socket_timeout) as repl:
seq = repl.timestamp_to_sequence(date)
if seq is None:
LOG.fatal("Cannot reach the configured replication service '%s'.\n"
"Does the URL point to a directory containing OSM update data?",
base_url)
raise UsageError("Failed to reach replication service")
status.set_status(conn, date=date, seq=seq)
LOG.warning("Updates initialised at sequence %s (%s)", seq, date)
def check_for_updates(conn: Connection, base_url: str,
socket_timeout: int = 60) -> int:
""" Check if new data is available from the replication service at the
given base URL.
"""
_, seq, _ = status.get_status(conn)
if seq is None:
LOG.error("Replication not set up. "
"Please run 'nominatim replication --init' first.")
return 254
with _make_replication_server(base_url, socket_timeout) as repl:
state = repl.get_state_info()
if state is None:
LOG.error("Cannot get state for URL %s.", base_url)
return 253
if state.sequence <= seq:
LOG.warning("Database is up to date.")
return 2
LOG.warning("New data available (%i => %i).", seq, state.sequence)
return 0
class UpdateState(Enum):
""" Possible states after an update has run.
"""
UP_TO_DATE = 0
MORE_PENDING = 2
NO_CHANGES = 3
def update(dsn: str, options: MutableMapping[str, Any],
socket_timeout: int = 60) -> UpdateState:
""" Update database from the next batch of data. Returns the state of
updates according to `UpdateState`.
"""
with connect(dsn) as conn:
startdate, startseq, indexed = status.get_status(conn)
conn.commit()
if startseq is None:
LOG.error("Replication not set up. "
"Please run 'nominatim replication --init' first.")
raise UsageError("Replication not set up.")
assert startdate is not None
if not indexed and options['indexed_only']:
LOG.info("Skipping update. There is data that needs indexing.")
return UpdateState.MORE_PENDING
last_since_update = dt.datetime.now(dt.timezone.utc) - startdate
update_interval = dt.timedelta(seconds=options['update_interval'])
if last_since_update < update_interval:
duration = (update_interval - last_since_update).seconds
LOG.warning("Sleeping for %s sec before next update.", duration)
time.sleep(duration)
if options['import_file'].exists():
options['import_file'].unlink()
# Read updates into file.
with _make_replication_server(options['base_url'], socket_timeout) as repl:
outhandler = WriteHandler(str(options['import_file']))
endseq = repl.apply_diffs(outhandler, startseq + 1,
max_size=options['max_diff_size'] * 1024)
outhandler.close()
if endseq is None:
return UpdateState.NO_CHANGES
with connect(dsn) as conn:
run_osm2pgsql_updates(conn, options)
# Write the current status to the file
endstate = repl.get_state_info(endseq)
status.set_status(conn, endstate.timestamp if endstate else None,
seq=endseq, indexed=False)
conn.commit()
return UpdateState.UP_TO_DATE
def run_osm2pgsql_updates(conn: Connection, options: MutableMapping[str, Any]) -> None:
""" Run osm2pgsql in append mode.
"""
# Remove any stale deletion marks.
with conn.cursor() as cur:
cur.execute('TRUNCATE place_to_be_deleted')
conn.commit()
# Consume updates with osm2pgsql.
options['append'] = True
options['disable_jit'] = server_version_tuple(conn) >= (11, 0)
run_osm2pgsql(options)
# Handle deletions
with conn.cursor() as cur:
cur.execute('SELECT flush_deleted_places()')
conn.commit()
def _make_replication_server(url: str, timeout: int) -> ContextManager[ReplicationServer]:
""" Returns a ReplicationServer in form of a context manager.
Creates a light wrapper around older versions of pyosmium that did
not support the context manager interface.
"""
if hasattr(ReplicationServer, '__enter__'):
# Patches the open_url function for pyosmium >= 3.2
# where the socket timeout is no longer respected.
def patched_open_url(self: ReplicationServer, url: urlrequest.Request) -> Any:
""" Download a resource from the given URL and return a byte sequence
of the content.
"""
headers = {"User-Agent" : f"Nominatim (pyosmium/{pyo_version.pyosmium_release})"}
if self.session is not None:
return self.session.get(url.get_full_url(),
headers=headers, timeout=timeout or None,
stream=True)
@contextmanager
def _get_url_with_session() -> Iterator[requests.Response]:
with requests.Session() as session:
request = session.get(url.get_full_url(),
headers=headers, timeout=timeout or None,
stream=True)
yield request
return _get_url_with_session()
repl = ReplicationServer(url)
setattr(repl, 'open_url', types.MethodType(patched_open_url, repl))
return cast(ContextManager[ReplicationServer], repl)
@contextmanager
def get_cm() -> Generator[ReplicationServer, None, None]:
yield ReplicationServer(url)
return get_cm()
| 7,276
|
Python
|
.py
| 161
| 36.608696
| 97
| 0.651534
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|