repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
Nominatim | Nominatim-master/nominatim/api/search/icu_tokenizer.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of query analysis for the ICU tokenizer.
"""
from typing import Tuple, Dict, List, Optional, NamedTuple, Iterator, Any, cast
from copy import copy
from collections import defaultdict
import dataclasses
import difflib
from icu import Transliterator
import sqlalchemy as sa
from nominatim.typing import SaRow
from nominatim.api.connection import SearchConnection
from nominatim.api.logging import log
from nominatim.api.search import query as qmod
from nominatim.api.search.query_analyzer_factory import AbstractQueryAnalyzer
DB_TO_TOKEN_TYPE = {
'W': qmod.TokenType.WORD,
'w': qmod.TokenType.PARTIAL,
'H': qmod.TokenType.HOUSENUMBER,
'P': qmod.TokenType.POSTCODE,
'C': qmod.TokenType.COUNTRY
}
class QueryPart(NamedTuple):
""" Normalized and transliterated form of a single term in the query.
When the term came out of a split during the transliteration,
the normalized string is the full word before transliteration.
The word number keeps track of the word before transliteration
and can be used to identify partial transliterated terms.
"""
token: str
normalized: str
word_number: int
QueryParts = List[QueryPart]
WordDict = Dict[str, List[qmod.TokenRange]]
def yield_words(terms: List[QueryPart], start: int) -> Iterator[Tuple[str, qmod.TokenRange]]:
""" Return all combinations of words in the terms list after the
given position.
"""
total = len(terms)
for first in range(start, total):
word = terms[first].token
yield word, qmod.TokenRange(first, first + 1)
for last in range(first + 1, min(first + 20, total)):
word = ' '.join((word, terms[last].token))
yield word, qmod.TokenRange(first, last + 1)
@dataclasses.dataclass
class ICUToken(qmod.Token):
""" Specialised token for ICU tokenizer.
"""
word_token: str
info: Optional[Dict[str, Any]]
def get_category(self) -> Tuple[str, str]:
assert self.info
return self.info.get('class', ''), self.info.get('type', '')
def rematch(self, norm: str) -> None:
""" Check how well the token matches the given normalized string
and add a penalty, if necessary.
"""
if not self.lookup_word:
return
seq = difflib.SequenceMatcher(a=self.lookup_word, b=norm)
distance = 0
for tag, afrom, ato, bfrom, bto in seq.get_opcodes():
if tag == 'delete' and (afrom == 0 or ato == len(self.lookup_word)):
distance += 1
elif tag == 'replace':
distance += max((ato-afrom), (bto-bfrom))
elif tag != 'equal':
distance += abs((ato-afrom) - (bto-bfrom))
self.penalty += (distance/len(self.lookup_word))
@staticmethod
def from_db_row(row: SaRow) -> 'ICUToken':
""" Create a ICUToken from the row of the word table.
"""
count = 1 if row.info is None else row.info.get('count', 1)
penalty = 0.0
if row.type == 'w':
penalty = 0.3
elif row.type == 'H':
penalty = sum(0.1 for c in row.word_token if c != ' ' and not c.isdigit())
if all(not c.isdigit() for c in row.word_token):
penalty += 0.2 * (len(row.word_token) - 1)
if row.info is None:
lookup_word = row.word
else:
lookup_word = row.info.get('lookup', row.word)
if lookup_word:
lookup_word = lookup_word.split('@', 1)[0]
else:
lookup_word = row.word_token
return ICUToken(penalty=penalty, token=row.word_id, count=count,
lookup_word=lookup_word, is_indexed=True,
word_token=row.word_token, info=row.info)
class ICUQueryAnalyzer(AbstractQueryAnalyzer):
""" Converter for query strings into a tokenized query
using the tokens created by a ICU tokenizer.
"""
def __init__(self, conn: SearchConnection) -> None:
self.conn = conn
async def setup(self) -> None:
""" Set up static data structures needed for the analysis.
"""
rules = await self.conn.get_property('tokenizer_import_normalisation')
self.normalizer = Transliterator.createFromRules("normalization", rules)
rules = await self.conn.get_property('tokenizer_import_transliteration')
self.transliterator = Transliterator.createFromRules("transliteration", rules)
if 'word' not in self.conn.t.meta.tables:
sa.Table('word', self.conn.t.meta,
sa.Column('word_id', sa.Integer),
sa.Column('word_token', sa.Text, nullable=False),
sa.Column('type', sa.Text, nullable=False),
sa.Column('word', sa.Text),
sa.Column('info', self.conn.t.types.Json))
async def analyze_query(self, phrases: List[qmod.Phrase]) -> qmod.QueryStruct:
""" Analyze the given list of phrases and return the
tokenized query.
"""
log().section('Analyze query (using ICU tokenizer)')
normalized = list(filter(lambda p: p.text,
(qmod.Phrase(p.ptype, self.normalize_text(p.text))
for p in phrases)))
query = qmod.QueryStruct(normalized)
log().var_dump('Normalized query', query.source)
if not query.source:
return query
parts, words = self.split_query(query)
log().var_dump('Transliterated query', lambda: _dump_transliterated(query, parts))
for row in await self.lookup_in_db(list(words.keys())):
for trange in words[row.word_token]:
token = ICUToken.from_db_row(row)
if row.type == 'S':
if row.info['op'] in ('in', 'near'):
if trange.start == 0:
query.add_token(trange, qmod.TokenType.CATEGORY, token)
else:
query.add_token(trange, qmod.TokenType.QUALIFIER, token)
if trange.start == 0 or trange.end == query.num_token_slots():
token = copy(token)
token.penalty += 0.1 * (query.num_token_slots())
query.add_token(trange, qmod.TokenType.CATEGORY, token)
else:
query.add_token(trange, DB_TO_TOKEN_TYPE[row.type], token)
self.add_extra_tokens(query, parts)
self.rerank_tokens(query, parts)
log().table_dump('Word tokens', _dump_word_tokens(query))
return query
def normalize_text(self, text: str) -> str:
""" Bring the given text into a normalized form. That is the
standardized form search will work with. All information removed
at this stage is inevitably lost.
"""
return cast(str, self.normalizer.transliterate(text))
def split_query(self, query: qmod.QueryStruct) -> Tuple[QueryParts, WordDict]:
""" Transliterate the phrases and split them into tokens.
Returns the list of transliterated tokens together with their
normalized form and a dictionary of words for lookup together
with their position.
"""
parts: QueryParts = []
phrase_start = 0
words = defaultdict(list)
wordnr = 0
for phrase in query.source:
query.nodes[-1].ptype = phrase.ptype
for word in phrase.text.split(' '):
trans = self.transliterator.transliterate(word)
if trans:
for term in trans.split(' '):
if term:
parts.append(QueryPart(term, word, wordnr))
query.add_node(qmod.BreakType.TOKEN, phrase.ptype)
query.nodes[-1].btype = qmod.BreakType.WORD
wordnr += 1
query.nodes[-1].btype = qmod.BreakType.PHRASE
for word, wrange in yield_words(parts, phrase_start):
words[word].append(wrange)
phrase_start = len(parts)
query.nodes[-1].btype = qmod.BreakType.END
return parts, words
async def lookup_in_db(self, words: List[str]) -> 'sa.Result[Any]':
""" Return the token information from the database for the
given word tokens.
"""
t = self.conn.t.meta.tables['word']
return await self.conn.execute(t.select().where(t.c.word_token.in_(words)))
def add_extra_tokens(self, query: qmod.QueryStruct, parts: QueryParts) -> None:
""" Add tokens to query that are not saved in the database.
"""
for part, node, i in zip(parts, query.nodes, range(1000)):
if len(part.token) <= 4 and part[0].isdigit()\
and not node.has_tokens(i+1, qmod.TokenType.HOUSENUMBER):
query.add_token(qmod.TokenRange(i, i+1), qmod.TokenType.HOUSENUMBER,
ICUToken(0.5, 0, 1, part.token, True, part.token, None))
def rerank_tokens(self, query: qmod.QueryStruct, parts: QueryParts) -> None:
""" Add penalties to tokens that depend on presence of other token.
"""
for i, node, tlist in query.iter_token_lists():
if tlist.ttype == qmod.TokenType.POSTCODE:
for repl in node.starting:
if repl.end == tlist.end and repl.ttype != qmod.TokenType.POSTCODE \
and (repl.ttype != qmod.TokenType.HOUSENUMBER
or len(tlist.tokens[0].lookup_word) > 4):
repl.add_penalty(0.39)
elif tlist.ttype == qmod.TokenType.HOUSENUMBER \
and len(tlist.tokens[0].lookup_word) <= 3:
if any(c.isdigit() for c in tlist.tokens[0].lookup_word):
for repl in node.starting:
if repl.end == tlist.end and repl.ttype != qmod.TokenType.HOUSENUMBER:
repl.add_penalty(0.5 - tlist.tokens[0].penalty)
elif tlist.ttype not in (qmod.TokenType.COUNTRY, qmod.TokenType.PARTIAL):
norm = parts[i].normalized
for j in range(i + 1, tlist.end):
if parts[j - 1].word_number != parts[j].word_number:
norm += ' ' + parts[j].normalized
for token in tlist.tokens:
cast(ICUToken, token).rematch(norm)
def _dump_transliterated(query: qmod.QueryStruct, parts: QueryParts) -> str:
out = query.nodes[0].btype.value
for node, part in zip(query.nodes[1:], parts):
out += part.token + node.btype.value
return out
def _dump_word_tokens(query: qmod.QueryStruct) -> Iterator[List[Any]]:
yield ['type', 'token', 'word_token', 'lookup_word', 'penalty', 'count', 'info']
for node in query.nodes:
for tlist in node.starting:
for token in tlist.tokens:
t = cast(ICUToken, token)
yield [tlist.ttype.name, t.token, t.word_token or '',
t.lookup_word or '', t.penalty, t.count, t.info]
async def create_query_analyzer(conn: SearchConnection) -> AbstractQueryAnalyzer:
""" Create and set up a new query analyzer for a database based
on the ICU tokenizer.
"""
out = ICUQueryAnalyzer(conn)
await out.setup()
return out
| 11,732 | 38.240803 | 94 | py |
Nominatim | Nominatim-master/nominatim/api/search/geocoder.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Public interface to the search code.
"""
from typing import List, Any, Optional, Iterator, Tuple
import itertools
from nominatim.api.connection import SearchConnection
from nominatim.api.types import SearchDetails
from nominatim.api.results import SearchResults, add_result_details
from nominatim.api.search.token_assignment import yield_token_assignments
from nominatim.api.search.db_search_builder import SearchBuilder, build_poi_search, wrap_near_search
from nominatim.api.search.db_searches import AbstractSearch
from nominatim.api.search.query_analyzer_factory import make_query_analyzer, AbstractQueryAnalyzer
from nominatim.api.search.query import Phrase, QueryStruct
from nominatim.api.logging import log
class ForwardGeocoder:
""" Main class responsible for place search.
"""
def __init__(self, conn: SearchConnection, params: SearchDetails) -> None:
self.conn = conn
self.params = params
self.query_analyzer: Optional[AbstractQueryAnalyzer] = None
@property
def limit(self) -> int:
""" Return the configured maximum number of search results.
"""
return self.params.max_results
async def build_searches(self,
phrases: List[Phrase]) -> Tuple[QueryStruct, List[AbstractSearch]]:
""" Analyse the query and return the tokenized query and list of
possible searches over it.
"""
if self.query_analyzer is None:
self.query_analyzer = await make_query_analyzer(self.conn)
query = await self.query_analyzer.analyze_query(phrases)
searches: List[AbstractSearch] = []
if query.num_token_slots() > 0:
# 2. Compute all possible search interpretations
log().section('Compute abstract searches')
search_builder = SearchBuilder(query, self.params)
num_searches = 0
for assignment in yield_token_assignments(query):
searches.extend(search_builder.build(assignment))
log().table_dump('Searches for assignment',
_dump_searches(searches, query, num_searches))
num_searches = len(searches)
searches.sort(key=lambda s: s.penalty)
return query, searches
async def execute_searches(self, query: QueryStruct,
searches: List[AbstractSearch]) -> SearchResults:
""" Run the abstract searches against the database until a result
is found.
"""
log().section('Execute database searches')
results = SearchResults()
num_results = 0
min_ranking = 1000.0
prev_penalty = 0.0
for i, search in enumerate(searches):
if search.penalty > prev_penalty and (search.penalty > min_ranking or i > 20):
break
log().table_dump(f"{i + 1}. Search", _dump_searches([search], query))
for result in await search.lookup(self.conn, self.params):
results.append(result)
min_ranking = min(min_ranking, result.ranking + 0.5, search.penalty + 0.3)
log().result_dump('Results', ((r.accuracy, r) for r in results[num_results:]))
num_results = len(results)
prev_penalty = search.penalty
if results:
min_ranking = min(r.ranking for r in results)
results = SearchResults(r for r in results if r.ranking < min_ranking + 0.5)
if results:
min_rank = min(r.rank_search for r in results)
results = SearchResults(r for r in results
if r.ranking + 0.05 * (r.rank_search - min_rank)
< min_ranking + 0.5)
results.sort(key=lambda r: r.accuracy - r.calculated_importance())
results = SearchResults(results[:self.limit])
return results
async def lookup_pois(self, categories: List[Tuple[str, str]],
phrases: List[Phrase]) -> SearchResults:
""" Look up places by category. If phrase is given, a place search
over the phrase will be executed first and places close to the
results returned.
"""
log().function('forward_lookup_pois', categories=categories, params=self.params)
if phrases:
query, searches = await self.build_searches(phrases)
if query:
searches = [wrap_near_search(categories, s) for s in searches[:50]]
results = await self.execute_searches(query, searches)
else:
results = SearchResults()
else:
search = build_poi_search(categories, self.params.countries)
results = await search.lookup(self.conn, self.params)
await add_result_details(self.conn, results, self.params)
log().result_dump('Final Results', ((r.accuracy, r) for r in results))
return results
async def lookup(self, phrases: List[Phrase]) -> SearchResults:
""" Look up a single free-text query.
"""
log().function('forward_lookup', phrases=phrases, params=self.params)
results = SearchResults()
if self.params.is_impossible():
return results
query, searches = await self.build_searches(phrases)
if searches:
# Execute SQL until an appropriate result is found.
results = await self.execute_searches(query, searches[:50])
await add_result_details(self.conn, results, self.params)
log().result_dump('Final Results', ((r.accuracy, r) for r in results))
return results
# pylint: disable=invalid-name,too-many-locals
def _dump_searches(searches: List[AbstractSearch], query: QueryStruct,
start: int = 0) -> Iterator[Optional[List[Any]]]:
yield ['Penalty', 'Lookups', 'Housenr', 'Postcode', 'Countries', 'Qualifier', 'Rankings']
def tk(tl: List[int]) -> str:
tstr = [f"{query.find_lookup_word_by_id(t)}({t})" for t in tl]
return f"[{','.join(tstr)}]"
def fmt_ranking(f: Any) -> str:
if not f:
return ''
ranks = ','.join((f"{tk(r.tokens)}^{r.penalty:.3g}" for r in f.rankings))
if len(ranks) > 100:
ranks = ranks[:100] + '...'
return f"{f.column}({ranks},def={f.default:.3g})"
def fmt_lookup(l: Any) -> str:
if not l:
return ''
return f"{l.lookup_type}({l.column}{tk(l.tokens)})"
def fmt_cstr(c: Any) -> str:
if not c:
return ''
return f'{c[0]}^{c[1]}'
for search in searches[start:]:
fields = ('lookups', 'rankings', 'countries', 'housenumbers',
'postcodes', 'qualifier')
iters = itertools.zip_longest([f"{search.penalty:.3g}"],
*(getattr(search, attr, []) for attr in fields),
fillvalue= '')
for penalty, lookup, rank, cc, hnr, pc, qual in iters:
yield [penalty, fmt_lookup(lookup), fmt_cstr(hnr),
fmt_cstr(pc), fmt_cstr(cc), fmt_cstr(qual), fmt_ranking(rank)]
yield None
| 7,448 | 37.796875 | 100 | py |
Nominatim | Nominatim-master/nominatim/api/search/db_search_fields.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Data structures for more complex fields in abstract search descriptions.
"""
from typing import List, Tuple, Iterator, cast
import dataclasses
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import ARRAY
from nominatim.typing import SaFromClause, SaColumn, SaExpression
from nominatim.api.search.query import Token
@dataclasses.dataclass
class WeightedStrings:
""" A list of strings together with a penalty.
"""
values: List[str]
penalties: List[float]
def __bool__(self) -> bool:
return bool(self.values)
def __iter__(self) -> Iterator[Tuple[str, float]]:
return iter(zip(self.values, self.penalties))
def get_penalty(self, value: str, default: float = 1000.0) -> float:
""" Get the penalty for the given value. Returns the given default
if the value does not exist.
"""
try:
return self.penalties[self.values.index(value)]
except ValueError:
pass
return default
@dataclasses.dataclass
class WeightedCategories:
""" A list of class/type tuples together with a penalty.
"""
values: List[Tuple[str, str]]
penalties: List[float]
def __bool__(self) -> bool:
return bool(self.values)
def __iter__(self) -> Iterator[Tuple[Tuple[str, str], float]]:
return iter(zip(self.values, self.penalties))
def get_penalty(self, value: Tuple[str, str], default: float = 1000.0) -> float:
""" Get the penalty for the given value. Returns the given default
if the value does not exist.
"""
try:
return self.penalties[self.values.index(value)]
except ValueError:
pass
return default
def sql_restrict(self, table: SaFromClause) -> SaExpression:
""" Return an SQLAlcheny expression that restricts the
class and type columns of the given table to the values
in the list.
Must not be used with an empty list.
"""
assert self.values
if len(self.values) == 1:
return sa.and_(table.c.class_ == self.values[0][0],
table.c.type == self.values[0][1])
return sa.or_(*(sa.and_(table.c.class_ == c, table.c.type == t)
for c, t in self.values))
@dataclasses.dataclass(order=True)
class RankedTokens:
""" List of tokens together with the penalty of using it.
"""
penalty: float
tokens: List[int]
def with_token(self, t: Token, transition_penalty: float) -> 'RankedTokens':
""" Create a new RankedTokens list with the given token appended.
The tokens penalty as well as the given transision penalty
are added to the overall penalty.
"""
return RankedTokens(self.penalty + t.penalty + transition_penalty,
self.tokens + [t.token])
@dataclasses.dataclass
class FieldRanking:
""" A list of rankings to be applied sequentially until one matches.
The matched ranking determines the penalty. If none matches a
default penalty is applied.
"""
column: str
default: float
rankings: List[RankedTokens]
def normalize_penalty(self) -> float:
""" Reduce the default and ranking penalties, such that the minimum
penalty is 0. Return the penalty that was subtracted.
"""
if self.rankings:
min_penalty = min(self.default, min(r.penalty for r in self.rankings))
else:
min_penalty = self.default
if min_penalty > 0.0:
self.default -= min_penalty
for ranking in self.rankings:
ranking.penalty -= min_penalty
return min_penalty
def sql_penalty(self, table: SaFromClause) -> SaColumn:
""" Create an SQL expression for the rankings.
"""
assert self.rankings
return sa.func.weigh_search(table.c[self.column],
[f"{{{','.join((str(s) for s in r.tokens))}}}"
for r in self.rankings],
[r.penalty for r in self.rankings],
self.default)
@dataclasses.dataclass
class FieldLookup:
""" A list of tokens to be searched for. The column names the database
column to search in and the lookup_type the operator that is applied.
'lookup_all' requires all tokens to match. 'lookup_any' requires
one of the tokens to match. 'restrict' requires to match all tokens
but avoids the use of indexes.
"""
column: str
tokens: List[int]
lookup_type: str
def sql_condition(self, table: SaFromClause) -> SaColumn:
""" Create an SQL expression for the given match condition.
"""
col = table.c[self.column]
if self.lookup_type == 'lookup_all':
return col.contains(self.tokens)
if self.lookup_type == 'lookup_any':
return cast(SaColumn, col.overlap(self.tokens))
return sa.func.array_cat(col, sa.text('ARRAY[]::integer[]'),
type_=ARRAY(sa.Integer())).contains(self.tokens)
class SearchData:
""" Search fields derived from query and token assignment
to be used with the SQL queries.
"""
penalty: float
lookups: List[FieldLookup] = []
rankings: List[FieldRanking]
housenumbers: WeightedStrings = WeightedStrings([], [])
postcodes: WeightedStrings = WeightedStrings([], [])
countries: WeightedStrings = WeightedStrings([], [])
qualifiers: WeightedCategories = WeightedCategories([], [])
def set_strings(self, field: str, tokens: List[Token]) -> None:
""" Set on of the WeightedStrings properties from the given
token list. Adapt the global penalty, so that the
minimum penalty is 0.
"""
if tokens:
min_penalty = min(t.penalty for t in tokens)
self.penalty += min_penalty
wstrs = WeightedStrings([t.lookup_word for t in tokens],
[t.penalty - min_penalty for t in tokens])
setattr(self, field, wstrs)
def set_qualifiers(self, tokens: List[Token]) -> None:
""" Set the qulaifier field from the given tokens.
"""
if tokens:
min_penalty = min(t.penalty for t in tokens)
self.penalty += min_penalty
self.qualifiers = WeightedCategories([t.get_category() for t in tokens],
[t.penalty - min_penalty for t in tokens])
def set_ranking(self, rankings: List[FieldRanking]) -> None:
""" Set the list of rankings and normalize the ranking.
"""
self.rankings = []
for ranking in rankings:
if ranking.rankings:
self.penalty += ranking.normalize_penalty()
self.rankings.append(ranking)
else:
self.penalty += ranking.default
| 7,269 | 32.971963 | 91 | py |
Nominatim | Nominatim-master/nominatim/api/search/db_search_builder.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Convertion from token assignment to an abstract DB search.
"""
from typing import Optional, List, Tuple, Iterator
import heapq
from nominatim.api.types import SearchDetails, DataLayer
from nominatim.api.search.query import QueryStruct, Token, TokenType, TokenRange, BreakType
from nominatim.api.search.token_assignment import TokenAssignment
import nominatim.api.search.db_search_fields as dbf
import nominatim.api.search.db_searches as dbs
from nominatim.api.logging import log
def wrap_near_search(categories: List[Tuple[str, str]],
search: dbs.AbstractSearch) -> dbs.NearSearch:
""" Create a new search that wraps the given search in a search
for near places of the given category.
"""
return dbs.NearSearch(penalty=search.penalty,
categories=dbf.WeightedCategories(categories,
[0.0] * len(categories)),
search=search)
def build_poi_search(category: List[Tuple[str, str]],
countries: Optional[List[str]]) -> dbs.PoiSearch:
""" Create a new search for places by the given category, possibly
constraint to the given countries.
"""
if countries:
ccs = dbf.WeightedStrings(countries, [0.0] * len(countries))
else:
ccs = dbf.WeightedStrings([], [])
class _PoiData(dbf.SearchData):
penalty = 0.0
qualifiers = dbf.WeightedCategories(category, [0.0] * len(category))
countries=ccs
return dbs.PoiSearch(_PoiData())
class SearchBuilder:
""" Build the abstract search queries from token assignments.
"""
def __init__(self, query: QueryStruct, details: SearchDetails) -> None:
self.query = query
self.details = details
@property
def configured_for_country(self) -> bool:
""" Return true if the search details are configured to
allow countries in the result.
"""
return self.details.min_rank <= 4 and self.details.max_rank >= 4 \
and self.details.layer_enabled(DataLayer.ADDRESS)
@property
def configured_for_postcode(self) -> bool:
""" Return true if the search details are configured to
allow postcodes in the result.
"""
return self.details.min_rank <= 5 and self.details.max_rank >= 11\
and self.details.layer_enabled(DataLayer.ADDRESS)
@property
def configured_for_housenumbers(self) -> bool:
""" Return true if the search details are configured to
allow addresses in the result.
"""
return self.details.max_rank >= 30 \
and self.details.layer_enabled(DataLayer.ADDRESS)
def build(self, assignment: TokenAssignment) -> Iterator[dbs.AbstractSearch]:
""" Yield all possible abstract searches for the given token assignment.
"""
sdata = self.get_search_data(assignment)
if sdata is None:
return
categories = self.get_search_categories(assignment)
if assignment.name is None:
if categories and not sdata.postcodes:
sdata.qualifiers = categories
categories = None
builder = self.build_poi_search(sdata)
elif assignment.housenumber:
hnr_tokens = self.query.get_tokens(assignment.housenumber,
TokenType.HOUSENUMBER)
builder = self.build_housenumber_search(sdata, hnr_tokens, assignment.address)
else:
builder = self.build_special_search(sdata, assignment.address,
bool(categories))
else:
builder = self.build_name_search(sdata, assignment.name, assignment.address,
bool(categories))
if categories:
penalty = min(categories.penalties)
categories.penalties = [p - penalty for p in categories.penalties]
for search in builder:
yield dbs.NearSearch(penalty, categories, search)
else:
yield from builder
def build_poi_search(self, sdata: dbf.SearchData) -> Iterator[dbs.AbstractSearch]:
""" Build abstract search query for a simple category search.
This kind of search requires an additional geographic constraint.
"""
if not sdata.housenumbers \
and ((self.details.viewbox and self.details.bounded_viewbox) or self.details.near):
yield dbs.PoiSearch(sdata)
def build_special_search(self, sdata: dbf.SearchData,
address: List[TokenRange],
is_category: bool) -> Iterator[dbs.AbstractSearch]:
""" Build abstract search queries for searches that do not involve
a named place.
"""
if sdata.qualifiers:
# No special searches over qualifiers supported.
return
if sdata.countries and not address and not sdata.postcodes \
and self.configured_for_country:
yield dbs.CountrySearch(sdata)
if sdata.postcodes and (is_category or self.configured_for_postcode):
penalty = 0.0 if sdata.countries else 0.1
if address:
sdata.lookups = [dbf.FieldLookup('nameaddress_vector',
[t.token for r in address
for t in self.query.get_partials_list(r)],
'restrict')]
penalty += 0.2
yield dbs.PostcodeSearch(penalty, sdata)
def build_housenumber_search(self, sdata: dbf.SearchData, hnrs: List[Token],
address: List[TokenRange]) -> Iterator[dbs.AbstractSearch]:
""" Build a simple address search for special entries where the
housenumber is the main name token.
"""
partial_tokens: List[int] = []
for trange in address:
partial_tokens.extend(t.token for t in self.query.get_partials_list(trange))
sdata.lookups = [dbf.FieldLookup('name_vector', [t.token for t in hnrs], 'lookup_any'),
dbf.FieldLookup('nameaddress_vector', partial_tokens, 'lookup_all')
]
yield dbs.PlaceSearch(0.05, sdata, sum(t.count for t in hnrs))
def build_name_search(self, sdata: dbf.SearchData,
name: TokenRange, address: List[TokenRange],
is_category: bool) -> Iterator[dbs.AbstractSearch]:
""" Build abstract search queries for simple name or address searches.
"""
if is_category or not sdata.housenumbers or self.configured_for_housenumbers:
ranking = self.get_name_ranking(name)
name_penalty = ranking.normalize_penalty()
if ranking.rankings:
sdata.rankings.append(ranking)
for penalty, count, lookup in self.yield_lookups(name, address):
sdata.lookups = lookup
yield dbs.PlaceSearch(penalty + name_penalty, sdata, count)
def yield_lookups(self, name: TokenRange, address: List[TokenRange])\
-> Iterator[Tuple[float, int, List[dbf.FieldLookup]]]:
""" Yield all variants how the given name and address should best
be searched for. This takes into account how frequent the terms
are and tries to find a lookup that optimizes index use.
"""
penalty = 0.0 # extra penalty currently unused
name_partials = self.query.get_partials_list(name)
exp_name_count = min(t.count for t in name_partials)
addr_partials = []
for trange in address:
addr_partials.extend(self.query.get_partials_list(trange))
addr_tokens = [t.token for t in addr_partials]
partials_indexed = all(t.is_indexed for t in name_partials) \
and all(t.is_indexed for t in addr_partials)
if (len(name_partials) > 3 or exp_name_count < 1000) and partials_indexed:
# Lookup by name partials, use address partials to restrict results.
lookup = [dbf.FieldLookup('name_vector',
[t.token for t in name_partials], 'lookup_all')]
if addr_tokens:
lookup.append(dbf.FieldLookup('nameaddress_vector', addr_tokens, 'restrict'))
yield penalty, exp_name_count, lookup
return
exp_addr_count = min(t.count for t in addr_partials) if addr_partials else exp_name_count
if exp_addr_count < 1000 and partials_indexed:
# Lookup by address partials and restrict results through name terms.
# Give this a small penalty because lookups in the address index are
# more expensive
yield penalty + exp_addr_count/5000, exp_addr_count,\
[dbf.FieldLookup('name_vector', [t.token for t in name_partials], 'restrict'),
dbf.FieldLookup('nameaddress_vector', addr_tokens, 'lookup_all')]
return
# Partial term to frequent. Try looking up by rare full names first.
name_fulls = self.query.get_tokens(name, TokenType.WORD)
rare_names = list(filter(lambda t: t.count < 1000, name_fulls))
# At this point drop unindexed partials from the address.
# This might yield wrong results, nothing we can do about that.
if not partials_indexed:
addr_tokens = [t.token for t in addr_partials if t.is_indexed]
log().var_dump('before', penalty)
penalty += 1.2 * sum(t.penalty for t in addr_partials if not t.is_indexed)
log().var_dump('after', penalty)
if rare_names:
# Any of the full names applies with all of the partials from the address
lookup = [dbf.FieldLookup('name_vector', [t.token for t in rare_names], 'lookup_any')]
if addr_tokens:
lookup.append(dbf.FieldLookup('nameaddress_vector', addr_tokens, 'restrict'))
yield penalty, sum(t.count for t in rare_names), lookup
# To catch remaining results, lookup by name and address
# We only do this if there is a reasonable number of results expected.
if min(exp_name_count, exp_addr_count) < 10000:
if all(t.is_indexed for t in name_partials):
lookup = [dbf.FieldLookup('name_vector',
[t.token for t in name_partials], 'lookup_all')]
else:
# we don't have the partials, try with the non-rare names
non_rare_names = [t.token for t in name_fulls if t.count >= 1000]
if not non_rare_names:
return
lookup = [dbf.FieldLookup('name_vector', non_rare_names, 'lookup_any')]
if addr_tokens:
lookup.append(dbf.FieldLookup('nameaddress_vector', addr_tokens, 'lookup_all'))
yield penalty + 0.1 * max(0, 5 - len(name_partials) - len(addr_tokens)),\
min(exp_name_count, exp_addr_count), lookup
def get_name_ranking(self, trange: TokenRange) -> dbf.FieldRanking:
""" Create a ranking expression for a name term in the given range.
"""
name_fulls = self.query.get_tokens(trange, TokenType.WORD)
ranks = [dbf.RankedTokens(t.penalty, [t.token]) for t in name_fulls]
ranks.sort(key=lambda r: r.penalty)
# Fallback, sum of penalty for partials
name_partials = self.query.get_partials_list(trange)
default = sum(t.penalty for t in name_partials) + 0.2
return dbf.FieldRanking('name_vector', default, ranks)
def get_addr_ranking(self, trange: TokenRange) -> dbf.FieldRanking:
""" Create a list of ranking expressions for an address term
for the given ranges.
"""
todo: List[Tuple[int, int, dbf.RankedTokens]] = []
heapq.heappush(todo, (0, trange.start, dbf.RankedTokens(0.0, [])))
ranks: List[dbf.RankedTokens] = []
while todo: # pylint: disable=too-many-nested-blocks
neglen, pos, rank = heapq.heappop(todo)
for tlist in self.query.nodes[pos].starting:
if tlist.ttype in (TokenType.PARTIAL, TokenType.WORD):
if tlist.end < trange.end:
chgpenalty = PENALTY_WORDCHANGE[self.query.nodes[tlist.end].btype]
if tlist.ttype == TokenType.PARTIAL:
penalty = rank.penalty + chgpenalty \
+ max(t.penalty for t in tlist.tokens)
heapq.heappush(todo, (neglen - 1, tlist.end,
dbf.RankedTokens(penalty, rank.tokens)))
else:
for t in tlist.tokens:
heapq.heappush(todo, (neglen - 1, tlist.end,
rank.with_token(t, chgpenalty)))
elif tlist.end == trange.end:
if tlist.ttype == TokenType.PARTIAL:
ranks.append(dbf.RankedTokens(rank.penalty
+ max(t.penalty for t in tlist.tokens),
rank.tokens))
else:
ranks.extend(rank.with_token(t, 0.0) for t in tlist.tokens)
if len(ranks) >= 10:
# Too many variants, bail out and only add
# Worst-case Fallback: sum of penalty of partials
name_partials = self.query.get_partials_list(trange)
default = sum(t.penalty for t in name_partials) + 0.2
ranks.append(dbf.RankedTokens(rank.penalty + default, []))
# Bail out of outer loop
todo.clear()
break
ranks.sort(key=lambda r: len(r.tokens))
default = ranks[0].penalty + 0.3
del ranks[0]
ranks.sort(key=lambda r: r.penalty)
return dbf.FieldRanking('nameaddress_vector', default, ranks)
def get_search_data(self, assignment: TokenAssignment) -> Optional[dbf.SearchData]:
""" Collect the tokens for the non-name search fields in the
assignment.
"""
sdata = dbf.SearchData()
sdata.penalty = assignment.penalty
if assignment.country:
tokens = self.query.get_tokens(assignment.country, TokenType.COUNTRY)
if self.details.countries:
tokens = [t for t in tokens if t.lookup_word in self.details.countries]
if not tokens:
return None
sdata.set_strings('countries', tokens)
elif self.details.countries:
sdata.countries = dbf.WeightedStrings(self.details.countries,
[0.0] * len(self.details.countries))
if assignment.housenumber:
sdata.set_strings('housenumbers',
self.query.get_tokens(assignment.housenumber,
TokenType.HOUSENUMBER))
if assignment.postcode:
sdata.set_strings('postcodes',
self.query.get_tokens(assignment.postcode,
TokenType.POSTCODE))
if assignment.qualifier:
sdata.set_qualifiers(self.query.get_tokens(assignment.qualifier,
TokenType.QUALIFIER))
if assignment.address:
sdata.set_ranking([self.get_addr_ranking(r) for r in assignment.address])
else:
sdata.rankings = []
return sdata
def get_search_categories(self,
assignment: TokenAssignment) -> Optional[dbf.WeightedCategories]:
""" Collect tokens for category search or use the categories
requested per parameter.
Returns None if no category search is requested.
"""
if assignment.category:
tokens = [t for t in self.query.get_tokens(assignment.category,
TokenType.CATEGORY)
if not self.details.categories
or t.get_category() in self.details.categories]
return dbf.WeightedCategories([t.get_category() for t in tokens],
[t.penalty for t in tokens])
if self.details.categories:
return dbf.WeightedCategories(self.details.categories,
[0.0] * len(self.details.categories))
return None
PENALTY_WORDCHANGE = {
BreakType.START: 0.0,
BreakType.END: 0.0,
BreakType.PHRASE: 0.0,
BreakType.WORD: 0.1,
BreakType.PART: 0.2,
BreakType.TOKEN: 0.4
}
| 17,524 | 45.118421 | 98 | py |
Nominatim | Nominatim-master/nominatim/api/search/__init__.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Module for forward search.
"""
# pylint: disable=useless-import-alias
from .geocoder import (ForwardGeocoder as ForwardGeocoder)
from .query import (Phrase as Phrase,
PhraseType as PhraseType)
from .query_analyzer_factory import (make_query_analyzer as make_query_analyzer)
| 510 | 30.9375 | 80 | py |
Nominatim | Nominatim-master/nominatim/api/search/token_assignment.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Create query interpretations where each vertice in the query is assigned
a specific function (expressed as a token type).
"""
from typing import Optional, List, Iterator
import dataclasses
import nominatim.api.search.query as qmod
from nominatim.api.logging import log
# pylint: disable=too-many-return-statements,too-many-branches
@dataclasses.dataclass
class TypedRange:
""" A token range for a specific type of tokens.
"""
ttype: qmod.TokenType
trange: qmod.TokenRange
PENALTY_TOKENCHANGE = {
qmod.BreakType.START: 0.0,
qmod.BreakType.END: 0.0,
qmod.BreakType.PHRASE: 0.0,
qmod.BreakType.WORD: 0.1,
qmod.BreakType.PART: 0.2,
qmod.BreakType.TOKEN: 0.4
}
TypedRangeSeq = List[TypedRange]
@dataclasses.dataclass
class TokenAssignment: # pylint: disable=too-many-instance-attributes
""" Representation of a possible assignment of token types
to the tokens in a tokenized query.
"""
penalty: float = 0.0
name: Optional[qmod.TokenRange] = None
address: List[qmod.TokenRange] = dataclasses.field(default_factory=list)
housenumber: Optional[qmod.TokenRange] = None
postcode: Optional[qmod.TokenRange] = None
country: Optional[qmod.TokenRange] = None
category: Optional[qmod.TokenRange] = None
qualifier: Optional[qmod.TokenRange] = None
@staticmethod
def from_ranges(ranges: TypedRangeSeq) -> 'TokenAssignment':
""" Create a new token assignment from a sequence of typed spans.
"""
out = TokenAssignment()
for token in ranges:
if token.ttype == qmod.TokenType.PARTIAL:
out.address.append(token.trange)
elif token.ttype == qmod.TokenType.HOUSENUMBER:
out.housenumber = token.trange
elif token.ttype == qmod.TokenType.POSTCODE:
out.postcode = token.trange
elif token.ttype == qmod.TokenType.COUNTRY:
out.country = token.trange
elif token.ttype == qmod.TokenType.CATEGORY:
out.category = token.trange
elif token.ttype == qmod.TokenType.QUALIFIER:
out.qualifier = token.trange
return out
class _TokenSequence:
""" Working state used to put together the token assignements.
Represents an intermediate state while traversing the tokenized
query.
"""
def __init__(self, seq: TypedRangeSeq,
direction: int = 0, penalty: float = 0.0) -> None:
self.seq = seq
self.direction = direction
self.penalty = penalty
def __str__(self) -> str:
seq = ''.join(f'[{r.trange.start} - {r.trange.end}: {r.ttype.name}]' for r in self.seq)
return f'{seq} (dir: {self.direction}, penalty: {self.penalty})'
@property
def end_pos(self) -> int:
""" Return the index of the global end of the current sequence.
"""
return self.seq[-1].trange.end if self.seq else 0
def has_types(self, *ttypes: qmod.TokenType) -> bool:
""" Check if the current sequence contains any typed ranges of
the given types.
"""
return any(s.ttype in ttypes for s in self.seq)
def is_final(self) -> bool:
""" Return true when the sequence cannot be extended by any
form of token anymore.
"""
# Country and category must be the final term for left-to-right
return len(self.seq) > 1 and \
self.seq[-1].ttype in (qmod.TokenType.COUNTRY, qmod.TokenType.CATEGORY)
def appendable(self, ttype: qmod.TokenType) -> Optional[int]:
""" Check if the give token type is appendable to the existing sequence.
Returns None if the token type is not appendable, otherwise the
new direction of the sequence after adding such a type. The
token is not added.
"""
if ttype == qmod.TokenType.WORD:
return None
if not self.seq:
# Append unconditionally to the empty list
if ttype == qmod.TokenType.COUNTRY:
return -1
if ttype in (qmod.TokenType.HOUSENUMBER, qmod.TokenType.QUALIFIER):
return 1
return self.direction
# Name tokens are always acceptable and don't change direction
if ttype == qmod.TokenType.PARTIAL:
return self.direction
# Other tokens may only appear once
if self.has_types(ttype):
return None
if ttype == qmod.TokenType.HOUSENUMBER:
if self.direction == 1:
if len(self.seq) == 1 and self.seq[0].ttype == qmod.TokenType.QUALIFIER:
return None
if len(self.seq) > 2 \
or self.has_types(qmod.TokenType.POSTCODE, qmod.TokenType.COUNTRY):
return None # direction left-to-right: housenumber must come before anything
elif self.direction == -1 \
or self.has_types(qmod.TokenType.POSTCODE, qmod.TokenType.COUNTRY):
return -1 # force direction right-to-left if after other terms
return self.direction
if ttype == qmod.TokenType.POSTCODE:
if self.direction == -1:
if self.has_types(qmod.TokenType.HOUSENUMBER, qmod.TokenType.QUALIFIER):
return None
return -1
if self.direction == 1:
return None if self.has_types(qmod.TokenType.COUNTRY) else 1
if self.has_types(qmod.TokenType.HOUSENUMBER, qmod.TokenType.QUALIFIER):
return 1
return self.direction
if ttype == qmod.TokenType.COUNTRY:
return None if self.direction == -1 else 1
if ttype == qmod.TokenType.CATEGORY:
return self.direction
if ttype == qmod.TokenType.QUALIFIER:
if self.direction == 1:
if (len(self.seq) == 1
and self.seq[0].ttype in (qmod.TokenType.PARTIAL, qmod.TokenType.CATEGORY)) \
or (len(self.seq) == 2
and self.seq[0].ttype == qmod.TokenType.CATEGORY
and self.seq[1].ttype == qmod.TokenType.PARTIAL):
return 1
return None
if self.direction == -1:
return -1
tempseq = self.seq[1:] if self.seq[0].ttype == qmod.TokenType.CATEGORY else self.seq
if len(tempseq) == 0:
return 1
if len(tempseq) == 1 and self.seq[0].ttype == qmod.TokenType.HOUSENUMBER:
return None
if len(tempseq) > 1 or self.has_types(qmod.TokenType.POSTCODE, qmod.TokenType.COUNTRY):
return -1
return 0
return None
def advance(self, ttype: qmod.TokenType, end_pos: int,
btype: qmod.BreakType) -> Optional['_TokenSequence']:
""" Return a new token sequence state with the given token type
extended.
"""
newdir = self.appendable(ttype)
if newdir is None:
return None
if not self.seq:
newseq = [TypedRange(ttype, qmod.TokenRange(0, end_pos))]
new_penalty = 0.0
else:
last = self.seq[-1]
if btype != qmod.BreakType.PHRASE and last.ttype == ttype:
# extend the existing range
newseq = self.seq[:-1] + [TypedRange(ttype, last.trange.replace_end(end_pos))]
new_penalty = 0.0
else:
# start a new range
newseq = list(self.seq) + [TypedRange(ttype,
qmod.TokenRange(last.trange.end, end_pos))]
new_penalty = PENALTY_TOKENCHANGE[btype]
return _TokenSequence(newseq, newdir, self.penalty + new_penalty)
def _adapt_penalty_from_priors(self, priors: int, new_dir: int) -> bool:
if priors == 2:
self.penalty += 1.0
elif priors > 2:
if self.direction == 0:
self.direction = new_dir
else:
return False
return True
def recheck_sequence(self) -> bool:
""" Check that the sequence is a fully valid token assignment
and addapt direction and penalties further if necessary.
This function catches some impossible assignments that need
forward context and can therefore not be exluded when building
the assignment.
"""
# housenumbers may not be further than 2 words from the beginning.
# If there are two words in front, give it a penalty.
hnrpos = next((i for i, tr in enumerate(self.seq)
if tr.ttype == qmod.TokenType.HOUSENUMBER),
None)
if hnrpos is not None:
if self.direction != -1:
priors = sum(1 for t in self.seq[:hnrpos] if t.ttype == qmod.TokenType.PARTIAL)
if not self._adapt_penalty_from_priors(priors, -1):
return False
if self.direction != 1:
priors = sum(1 for t in self.seq[hnrpos+1:] if t.ttype == qmod.TokenType.PARTIAL)
if not self._adapt_penalty_from_priors(priors, 1):
return False
return True
def get_assignments(self, query: qmod.QueryStruct) -> Iterator[TokenAssignment]:
""" Yield possible assignments for the current sequence.
This function splits up general name assignments into name
and address and yields all possible variants of that.
"""
base = TokenAssignment.from_ranges(self.seq)
# Postcode search (postcode-only search is covered in next case)
if base.postcode is not None and base.address:
if (base.postcode.start == 0 and self.direction != -1)\
or (base.postcode.end == query.num_token_slots() and self.direction != 1):
log().comment('postcode search')
# <address>,<postcode> should give preference to address search
if base.postcode.start == 0:
penalty = self.penalty
else:
penalty = self.penalty + 0.1
yield dataclasses.replace(base, penalty=penalty)
# Postcode or country-only search
if not base.address:
if not base.housenumber and (base.postcode or base.country or base.category):
log().comment('postcode/country search')
yield dataclasses.replace(base, penalty=self.penalty)
else:
# <postcode>,<address> should give preference to postcode search
if base.postcode and base.postcode.start == 0:
self.penalty += 0.1
# Use entire first word as name
if self.direction != -1:
log().comment('first word = name')
yield dataclasses.replace(base, name=base.address[0],
penalty=self.penalty,
address=base.address[1:])
# Use entire last word as name
if self.direction == -1 or (self.direction == 0 and len(base.address) > 1):
log().comment('last word = name')
yield dataclasses.replace(base, name=base.address[-1],
penalty=self.penalty,
address=base.address[:-1])
# variant for special housenumber searches
if base.housenumber:
yield dataclasses.replace(base, penalty=self.penalty)
# Use beginning of first word as name
if self.direction != -1:
first = base.address[0]
if (not base.housenumber or first.end >= base.housenumber.start)\
and (not base.qualifier or first.start >= base.qualifier.end):
for i in range(first.start + 1, first.end):
name, addr = first.split(i)
penalty = self.penalty + PENALTY_TOKENCHANGE[query.nodes[i].btype]
log().comment(f'split first word = name ({i - first.start})')
yield dataclasses.replace(base, name=name, penalty=penalty,
address=[addr] + base.address[1:])
# Use end of last word as name
if self.direction != 1:
last = base.address[-1]
if (not base.housenumber or last.start <= base.housenumber.end)\
and (not base.qualifier or last.end <= base.qualifier.start):
for i in range(last.start + 1, last.end):
addr, name = last.split(i)
penalty = self.penalty + PENALTY_TOKENCHANGE[query.nodes[i].btype]
log().comment(f'split last word = name ({i - last.start})')
yield dataclasses.replace(base, name=name, penalty=penalty,
address=base.address[:-1] + [addr])
def yield_token_assignments(query: qmod.QueryStruct) -> Iterator[TokenAssignment]:
""" Return possible word type assignments to word positions.
The assignments are computed from the concrete tokens listed
in the tokenized query.
The result includes the penalty for transitions from one word type to
another. It does not include penalties for transitions within a
type.
"""
todo = [_TokenSequence([], direction=0 if query.source[0].ptype == qmod.PhraseType.NONE else 1)]
while todo:
state = todo.pop()
node = query.nodes[state.end_pos]
for tlist in node.starting:
newstate = state.advance(tlist.ttype, tlist.end, node.btype)
if newstate is not None:
if newstate.end_pos == query.num_token_slots():
if newstate.recheck_sequence():
log().var_dump('Assignment', newstate)
yield from newstate.get_assignments(query)
elif not newstate.is_final():
todo.append(newstate)
| 14,523 | 39.569832 | 100 | py |
Nominatim | Nominatim-master/nominatim/api/search/query.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Datastructures for a tokenized query.
"""
from typing import List, Tuple, Optional, NamedTuple, Iterator
from abc import ABC, abstractmethod
import dataclasses
import enum
class BreakType(enum.Enum):
""" Type of break between tokens.
"""
START = '<'
""" Begin of the query. """
END = '>'
""" End of the query. """
PHRASE = ','
""" Break between two phrases. """
WORD = ' '
""" Break between words. """
PART = '-'
""" Break inside a word, for example a hyphen or apostrophe. """
TOKEN = '`'
""" Break created as a result of tokenization.
This may happen in languages without spaces between words.
"""
class TokenType(enum.Enum):
""" Type of token.
"""
WORD = enum.auto()
""" Full name of a place. """
PARTIAL = enum.auto()
""" Word term without breaks, does not necessarily represent a full name. """
HOUSENUMBER = enum.auto()
""" Housenumber term. """
POSTCODE = enum.auto()
""" Postal code term. """
COUNTRY = enum.auto()
""" Country name or reference. """
QUALIFIER = enum.auto()
""" Special term used together with name (e.g. _Hotel_ Bellevue). """
CATEGORY = enum.auto()
""" Special term used as searchable object(e.g. supermarket in ...). """
class PhraseType(enum.Enum):
""" Designation of a phrase.
"""
NONE = 0
""" No specific designation (i.e. source is free-form query). """
AMENITY = enum.auto()
""" Contains name or type of a POI. """
STREET = enum.auto()
""" Contains a street name optionally with a housenumber. """
CITY = enum.auto()
""" Contains the postal city. """
COUNTY = enum.auto()
""" Contains the equivalent of a county. """
STATE = enum.auto()
""" Contains a state or province. """
POSTCODE = enum.auto()
""" Contains a postal code. """
COUNTRY = enum.auto()
""" Contains the country name or code. """
def compatible_with(self, ttype: TokenType) -> bool:
""" Check if the given token type can be used with the phrase type.
"""
if self == PhraseType.NONE:
return True
if self == PhraseType.AMENITY:
return ttype in (TokenType.WORD, TokenType.PARTIAL,
TokenType.QUALIFIER, TokenType.CATEGORY)
if self == PhraseType.STREET:
return ttype in (TokenType.WORD, TokenType.PARTIAL, TokenType.HOUSENUMBER)
if self == PhraseType.POSTCODE:
return ttype == TokenType.POSTCODE
if self == PhraseType.COUNTRY:
return ttype == TokenType.COUNTRY
return ttype in (TokenType.WORD, TokenType.PARTIAL)
@dataclasses.dataclass
class Token(ABC):
""" Base type for tokens.
Specific query analyzers must implement the concrete token class.
"""
penalty: float
token: int
count: int
lookup_word: str
is_indexed: bool
@abstractmethod
def get_category(self) -> Tuple[str, str]:
""" Return the category restriction for qualifier terms and
category objects.
"""
class TokenRange(NamedTuple):
""" Indexes of query nodes over which a token spans.
"""
start: int
end: int
def replace_start(self, new_start: int) -> 'TokenRange':
""" Return a new token range with the new start.
"""
return TokenRange(new_start, self.end)
def replace_end(self, new_end: int) -> 'TokenRange':
""" Return a new token range with the new end.
"""
return TokenRange(self.start, new_end)
def split(self, index: int) -> Tuple['TokenRange', 'TokenRange']:
""" Split the span into two spans at the given index.
The index must be within the span.
"""
return self.replace_end(index), self.replace_start(index)
@dataclasses.dataclass
class TokenList:
""" List of all tokens of a given type going from one breakpoint to another.
"""
end: int
ttype: TokenType
tokens: List[Token]
def add_penalty(self, penalty: float) -> None:
""" Add the given penalty to all tokens in the list.
"""
for token in self.tokens:
token.penalty += penalty
@dataclasses.dataclass
class QueryNode:
""" A node of the querry representing a break between terms.
"""
btype: BreakType
ptype: PhraseType
starting: List[TokenList] = dataclasses.field(default_factory=list)
def has_tokens(self, end: int, *ttypes: TokenType) -> bool:
""" Check if there are tokens of the given types ending at the
given node.
"""
return any(tl.end == end and tl.ttype in ttypes for tl in self.starting)
def get_tokens(self, end: int, ttype: TokenType) -> Optional[List[Token]]:
""" Get the list of tokens of the given type starting at this node
and ending at the node 'end'. Returns 'None' if no such
tokens exist.
"""
for tlist in self.starting:
if tlist.end == end and tlist.ttype == ttype:
return tlist.tokens
return None
@dataclasses.dataclass
class Phrase:
""" A normalized query part. Phrases may be typed which means that
they then represent a specific part of the address.
"""
ptype: PhraseType
text: str
class QueryStruct:
""" A tokenized search query together with the normalized source
from which the tokens have been parsed.
The query contains a list of nodes that represent the breaks
between words. Tokens span between nodes, which don't necessarily
need to be direct neighbours. Thus the query is represented as a
directed acyclic graph.
When created, a query contains a single node: the start of the
query. Further nodes can be added by appending to 'nodes'.
"""
def __init__(self, source: List[Phrase]) -> None:
self.source = source
self.nodes: List[QueryNode] = \
[QueryNode(BreakType.START, source[0].ptype if source else PhraseType.NONE)]
def num_token_slots(self) -> int:
""" Return the length of the query in vertice steps.
"""
return len(self.nodes) - 1
def add_node(self, btype: BreakType, ptype: PhraseType) -> None:
""" Append a new break node with the given break type.
The phrase type denotes the type for any tokens starting
at the node.
"""
self.nodes.append(QueryNode(btype, ptype))
def add_token(self, trange: TokenRange, ttype: TokenType, token: Token) -> None:
""" Add a token to the query. 'start' and 'end' are the indexes of the
nodes from which to which the token spans. The indexes must exist
and are expected to be in the same phrase.
'ttype' denotes the type of the token and 'token' the token to
be inserted.
If the token type is not compatible with the phrase it should
be added to, then the token is silently dropped.
"""
snode = self.nodes[trange.start]
if snode.ptype.compatible_with(ttype):
tlist = snode.get_tokens(trange.end, ttype)
if tlist is None:
snode.starting.append(TokenList(trange.end, ttype, [token]))
else:
tlist.append(token)
def get_tokens(self, trange: TokenRange, ttype: TokenType) -> List[Token]:
""" Get the list of tokens of a given type, spanning the given
nodes. The nodes must exist. If no tokens exist, an
empty list is returned.
"""
return self.nodes[trange.start].get_tokens(trange.end, ttype) or []
def get_partials_list(self, trange: TokenRange) -> List[Token]:
""" Create a list of partial tokens between the given nodes.
The list is composed of the first token of type PARTIAL
going to the subsequent node. Such PARTIAL tokens are
assumed to exist.
"""
return [next(iter(self.get_tokens(TokenRange(i, i+1), TokenType.PARTIAL)))
for i in range(trange.start, trange.end)]
def iter_token_lists(self) -> Iterator[Tuple[int, QueryNode, TokenList]]:
""" Iterator over all token lists in the query.
"""
for i, node in enumerate(self.nodes):
for tlist in node.starting:
yield i, node, tlist
def find_lookup_word_by_id(self, token: int) -> str:
""" Find the first token with the given token ID and return
its lookup word. Returns 'None' if no such token exists.
The function is very slow and must only be used for
debugging.
"""
for node in self.nodes:
for tlist in node.starting:
for t in tlist.tokens:
if t.token == token:
return f"[{tlist.ttype.name[0]}]{t.lookup_word}"
return 'None'
| 9,201 | 32.220217 | 88 | py |
Nominatim | Nominatim-master/nominatim/api/search/legacy_tokenizer.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of query analysis for the legacy tokenizer.
"""
from typing import Tuple, Dict, List, Optional, Iterator, Any, cast
from copy import copy
from collections import defaultdict
import dataclasses
import sqlalchemy as sa
from nominatim.typing import SaRow
from nominatim.api.connection import SearchConnection
from nominatim.api.logging import log
from nominatim.api.search import query as qmod
from nominatim.api.search.query_analyzer_factory import AbstractQueryAnalyzer
def yield_words(terms: List[str], start: int) -> Iterator[Tuple[str, qmod.TokenRange]]:
""" Return all combinations of words in the terms list after the
given position.
"""
total = len(terms)
for first in range(start, total):
word = terms[first]
yield word, qmod.TokenRange(first, first + 1)
for last in range(first + 1, min(first + 20, total)):
word = ' '.join((word, terms[last]))
yield word, qmod.TokenRange(first, last + 1)
@dataclasses.dataclass
class LegacyToken(qmod.Token):
""" Specialised token for legacy tokenizer.
"""
word_token: str
category: Optional[Tuple[str, str]]
country: Optional[str]
operator: Optional[str]
@property
def info(self) -> Dict[str, Any]:
""" Dictionary of additional propoerties of the token.
Should only be used for debugging purposes.
"""
return {'category': self.category,
'country': self.country,
'operator': self.operator}
def get_category(self) -> Tuple[str, str]:
assert self.category
return self.category
class LegacyQueryAnalyzer(AbstractQueryAnalyzer):
""" Converter for query strings into a tokenized query
using the tokens created by a legacy tokenizer.
"""
def __init__(self, conn: SearchConnection) -> None:
self.conn = conn
async def setup(self) -> None:
""" Set up static data structures needed for the analysis.
"""
self.max_word_freq = int(await self.conn.get_property('tokenizer_maxwordfreq'))
if 'word' not in self.conn.t.meta.tables:
sa.Table('word', self.conn.t.meta,
sa.Column('word_id', sa.Integer),
sa.Column('word_token', sa.Text, nullable=False),
sa.Column('word', sa.Text),
sa.Column('class', sa.Text),
sa.Column('type', sa.Text),
sa.Column('country_code', sa.Text),
sa.Column('search_name_count', sa.Integer),
sa.Column('operator', sa.Text))
async def analyze_query(self, phrases: List[qmod.Phrase]) -> qmod.QueryStruct:
""" Analyze the given list of phrases and return the
tokenized query.
"""
log().section('Analyze query (using Legacy tokenizer)')
normalized = []
if phrases:
for row in await self.conn.execute(sa.select(*(sa.func.make_standard_name(p.text)
for p in phrases))):
normalized = [qmod.Phrase(p.ptype, r) for r, p in zip(row, phrases) if r]
break
query = qmod.QueryStruct(normalized)
log().var_dump('Normalized query', query.source)
if not query.source:
return query
parts, words = self.split_query(query)
lookup_words = list(words.keys())
log().var_dump('Split query', parts)
log().var_dump('Extracted words', lookup_words)
for row in await self.lookup_in_db(lookup_words):
for trange in words[row.word_token.strip()]:
token, ttype = self.make_token(row)
if ttype == qmod.TokenType.CATEGORY:
if trange.start == 0:
query.add_token(trange, qmod.TokenType.CATEGORY, token)
elif ttype == qmod.TokenType.QUALIFIER:
query.add_token(trange, qmod.TokenType.QUALIFIER, token)
if trange.start == 0 or trange.end == query.num_token_slots():
token = copy(token)
token.penalty += 0.1 * (query.num_token_slots())
query.add_token(trange, qmod.TokenType.CATEGORY, token)
elif ttype != qmod.TokenType.PARTIAL or trange.start + 1 == trange.end:
query.add_token(trange, ttype, token)
self.add_extra_tokens(query, parts)
self.rerank_tokens(query)
log().table_dump('Word tokens', _dump_word_tokens(query))
return query
def split_query(self, query: qmod.QueryStruct) -> Tuple[List[str],
Dict[str, List[qmod.TokenRange]]]:
""" Transliterate the phrases and split them into tokens.
Returns a list of transliterated tokens and a dictionary
of words for lookup together with their position.
"""
parts: List[str] = []
phrase_start = 0
words = defaultdict(list)
for phrase in query.source:
query.nodes[-1].ptype = phrase.ptype
for trans in phrase.text.split(' '):
if trans:
for term in trans.split(' '):
if term:
parts.append(trans)
query.add_node(qmod.BreakType.TOKEN, phrase.ptype)
query.nodes[-1].btype = qmod.BreakType.WORD
query.nodes[-1].btype = qmod.BreakType.PHRASE
for word, wrange in yield_words(parts, phrase_start):
words[word].append(wrange)
phrase_start = len(parts)
query.nodes[-1].btype = qmod.BreakType.END
return parts, words
async def lookup_in_db(self, words: List[str]) -> 'sa.Result[Any]':
""" Return the token information from the database for the
given word tokens.
"""
t = self.conn.t.meta.tables['word']
sql = t.select().where(t.c.word_token.in_(words + [' ' + w for w in words]))
return await self.conn.execute(sql)
def make_token(self, row: SaRow) -> Tuple[LegacyToken, qmod.TokenType]:
""" Create a LegacyToken from the row of the word table.
Also determines the type of token.
"""
penalty = 0.0
is_indexed = True
rowclass = getattr(row, 'class')
if row.country_code is not None:
ttype = qmod.TokenType.COUNTRY
lookup_word = row.country_code
elif rowclass is not None:
if rowclass == 'place' and row.type == 'house':
ttype = qmod.TokenType.HOUSENUMBER
lookup_word = row.word_token[1:]
elif rowclass == 'place' and row.type == 'postcode':
ttype = qmod.TokenType.POSTCODE
lookup_word = row.word_token[1:]
else:
ttype = qmod.TokenType.CATEGORY if row.operator in ('in', 'near')\
else qmod.TokenType.QUALIFIER
lookup_word = row.word
elif row.word_token.startswith(' '):
ttype = qmod.TokenType.WORD
lookup_word = row.word or row.word_token[1:]
else:
ttype = qmod.TokenType.PARTIAL
lookup_word = row.word_token
penalty = 0.21
if row.search_name_count > self.max_word_freq:
is_indexed = False
return LegacyToken(penalty=penalty, token=row.word_id,
count=row.search_name_count or 1,
lookup_word=lookup_word,
word_token=row.word_token.strip(),
category=(rowclass, row.type) if rowclass is not None else None,
country=row.country_code,
operator=row.operator,
is_indexed=is_indexed),\
ttype
def add_extra_tokens(self, query: qmod.QueryStruct, parts: List[str]) -> None:
""" Add tokens to query that are not saved in the database.
"""
for part, node, i in zip(parts, query.nodes, range(1000)):
if len(part) <= 4 and part.isdigit()\
and not node.has_tokens(i+1, qmod.TokenType.HOUSENUMBER):
query.add_token(qmod.TokenRange(i, i+1), qmod.TokenType.HOUSENUMBER,
LegacyToken(penalty=0.5, token=0, count=1,
lookup_word=part, word_token=part,
category=None, country=None,
operator=None, is_indexed=True))
def rerank_tokens(self, query: qmod.QueryStruct) -> None:
""" Add penalties to tokens that depend on presence of other token.
"""
for _, node, tlist in query.iter_token_lists():
if tlist.ttype == qmod.TokenType.POSTCODE:
for repl in node.starting:
if repl.end == tlist.end and repl.ttype != qmod.TokenType.POSTCODE \
and (repl.ttype != qmod.TokenType.HOUSENUMBER
or len(tlist.tokens[0].lookup_word) > 4):
repl.add_penalty(0.39)
elif tlist.ttype == qmod.TokenType.HOUSENUMBER \
and len(tlist.tokens[0].lookup_word) <= 3:
if any(c.isdigit() for c in tlist.tokens[0].lookup_word):
for repl in node.starting:
if repl.end == tlist.end and repl.ttype != qmod.TokenType.HOUSENUMBER:
repl.add_penalty(0.5 - tlist.tokens[0].penalty)
def _dump_word_tokens(query: qmod.QueryStruct) -> Iterator[List[Any]]:
yield ['type', 'token', 'word_token', 'lookup_word', 'penalty', 'count', 'info']
for node in query.nodes:
for tlist in node.starting:
for token in tlist.tokens:
t = cast(LegacyToken, token)
yield [tlist.ttype.name, t.token, t.word_token or '',
t.lookup_word or '', t.penalty, t.count, t.info]
async def create_query_analyzer(conn: SearchConnection) -> AbstractQueryAnalyzer:
""" Create and set up a new query analyzer for a database based
on the ICU tokenizer.
"""
out = LegacyQueryAnalyzer(conn)
await out.setup()
return out
| 10,691 | 39.653992 | 94 | py |
Nominatim | Nominatim-master/nominatim/clicmd/special_phrases.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the 'special-phrases' command.
"""
import argparse
import logging
from pathlib import Path
from nominatim.errors import UsageError
from nominatim.db.connection import connect
from nominatim.tools.special_phrases.sp_importer import SPImporter, SpecialPhraseLoader
from nominatim.tools.special_phrases.sp_wiki_loader import SPWikiLoader
from nominatim.tools.special_phrases.sp_csv_loader import SPCsvLoader
from nominatim.clicmd.args import NominatimArgs
LOG = logging.getLogger()
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to avoid eventually unused imports.
# pylint: disable=E0012,C0415
class ImportSpecialPhrases:
"""\
Import special phrases.
Special phrases are search terms that narrow down the type of object
that should be searched. For example, you might want to search for
'Hotels in Barcelona'. The OSM wiki has a selection of special phrases
in many languages, which can be imported with this command.
You can also provide your own phrases in a CSV file. The file needs to have
the following five columns:
* phrase - the term expected for searching
* class - the OSM tag key of the object type
* type - the OSM tag value of the object type
* operator - the kind of search to be done (one of: in, near, name, -)
* plural - whether the term is a plural or not (Y/N)
An example file can be found in the Nominatim sources at
'test/testdb/full_en_phrases_test.csv'.
The import can be further configured to ignore specific key/value pairs.
This is particularly useful when importing phrases from the wiki. The
default configuration excludes some very common tags like building=yes.
The configuration can be customized by putting a file `phrase-settings.json`
with custom rules into the project directory or by using the `--config`
option to point to another configuration file.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Input arguments')
group.add_argument('--import-from-wiki', action='store_true',
help='Import special phrases from the OSM wiki to the database')
group.add_argument('--import-from-csv', metavar='FILE',
help='Import special phrases from a CSV file')
group.add_argument('--no-replace', action='store_true',
help='Keep the old phrases and only add the new ones')
def run(self, args: NominatimArgs) -> int:
if args.import_from_wiki:
self.start_import(args, SPWikiLoader(args.config))
if args.import_from_csv:
if not Path(args.import_from_csv).is_file():
LOG.fatal("CSV file '%s' does not exist.", args.import_from_csv)
raise UsageError('Cannot access file.')
self.start_import(args, SPCsvLoader(args.import_from_csv))
return 0
def start_import(self, args: NominatimArgs, loader: SpecialPhraseLoader) -> None:
"""
Create the SPImporter object containing the right
sp loader and then start the import of special phrases.
"""
from ..tokenizer import factory as tokenizer_factory
tokenizer = tokenizer_factory.get_tokenizer_for_db(args.config)
should_replace = not args.no_replace
with connect(args.config.get_libpq_dsn()) as db_connection:
SPImporter(
args.config, db_connection, loader
).import_phrases(tokenizer, should_replace)
| 3,842 | 39.882979 | 91 | py |
Nominatim | Nominatim-master/nominatim/clicmd/add_data.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the 'add-data' subcommand.
"""
from typing import cast
import argparse
import logging
import psutil
from nominatim.clicmd.args import NominatimArgs
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to avoid eventually unused imports.
# pylint: disable=E0012,C0415
LOG = logging.getLogger()
class UpdateAddData:
"""\
Add additional data from a file or an online source.
This command allows to add or update the search data in the database.
The data can come either from an OSM file or single OSM objects can
directly be downloaded from the OSM API. This function only loads the
data into the database. Afterwards it still needs to be integrated
in the search index. Use the `nominatim index` command for that.
The command can also be used to add external non-OSM data to the
database. At the moment the only supported format is TIGER housenumber
data. See the online documentation at
https://nominatim.org/release-docs/latest/admin/Import/#installing-tiger-housenumber-data-for-the-us
for more information.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group_name = parser.add_argument_group('Source')
group1 = group_name.add_mutually_exclusive_group(required=True)
group1.add_argument('--file', metavar='FILE',
help='Import data from an OSM file or diff file')
group1.add_argument('--diff', metavar='FILE',
help='Import data from an OSM diff file (deprecated: use --file)')
group1.add_argument('--node', metavar='ID', type=int,
help='Import a single node from the API')
group1.add_argument('--way', metavar='ID', type=int,
help='Import a single way from the API')
group1.add_argument('--relation', metavar='ID', type=int,
help='Import a single relation from the API')
group1.add_argument('--tiger-data', metavar='DIR',
help='Add housenumbers from the US TIGER census database')
group2 = parser.add_argument_group('Extra arguments')
group2.add_argument('--use-main-api', action='store_true',
help='Use OSM API instead of Overpass to download objects')
group2.add_argument('--osm2pgsql-cache', metavar='SIZE', type=int,
help='Size of cache to be used by osm2pgsql (in MB)')
group2.add_argument('--socket-timeout', dest='socket_timeout', type=int, default=60,
help='Set timeout for file downloads')
def run(self, args: NominatimArgs) -> int:
from nominatim.tokenizer import factory as tokenizer_factory
from nominatim.tools import tiger_data, add_osm_data
if args.tiger_data:
tokenizer = tokenizer_factory.get_tokenizer_for_db(args.config)
return tiger_data.add_tiger_data(args.tiger_data,
args.config,
args.threads or psutil.cpu_count() or 1,
tokenizer)
osm2pgsql_params = args.osm2pgsql_options(default_cache=1000, default_threads=1)
if args.file or args.diff:
return add_osm_data.add_data_from_file(args.config.get_libpq_dsn(),
cast(str, args.file or args.diff),
osm2pgsql_params)
if args.node:
return add_osm_data.add_osm_object(args.config.get_libpq_dsn(),
'node', args.node,
args.use_main_api,
osm2pgsql_params)
if args.way:
return add_osm_data.add_osm_object(args.config.get_libpq_dsn(),
'way', args.way,
args.use_main_api,
osm2pgsql_params)
if args.relation:
return add_osm_data.add_osm_object(args.config.get_libpq_dsn(),
'relation', args.relation,
args.use_main_api,
osm2pgsql_params)
return 0
| 4,735 | 45.431373 | 104 | py |
Nominatim | Nominatim-master/nominatim/clicmd/setup.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the 'import' subcommand.
"""
from typing import Optional
import argparse
import logging
from pathlib import Path
import psutil
from nominatim.config import Configuration
from nominatim.db.connection import connect
from nominatim.db import status, properties
from nominatim.tokenizer.base import AbstractTokenizer
from nominatim.version import NOMINATIM_VERSION
from nominatim.clicmd.args import NominatimArgs
from nominatim.errors import UsageError
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to avoid eventually unused imports.
# pylint: disable=C0415
LOG = logging.getLogger()
class SetupAll:
"""\
Create a new Nominatim database from an OSM file.
This sub-command sets up a new Nominatim database from scratch starting
with creating a new database in Postgresql. The user running this command
needs superuser rights on the database.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group_name = parser.add_argument_group('Required arguments')
group1 = group_name.add_mutually_exclusive_group(required=True)
group1.add_argument('--osm-file', metavar='FILE', action='append',
help='OSM file to be imported'
' (repeat for importing multiple files)')
group1.add_argument('--continue', dest='continue_at',
choices=['load-data', 'indexing', 'db-postprocess'],
help='Continue an import that was interrupted')
group2 = parser.add_argument_group('Optional arguments')
group2.add_argument('--osm2pgsql-cache', metavar='SIZE', type=int,
help='Size of cache to be used by osm2pgsql (in MB)')
group2.add_argument('--reverse-only', action='store_true',
help='Do not create tables and indexes for searching')
group2.add_argument('--no-partitions', action='store_true',
help=("Do not partition search indices "
"(speeds up import of single country extracts)"))
group2.add_argument('--no-updates', action='store_true',
help="Do not keep tables that are only needed for "
"updating the database later")
group2.add_argument('--offline', action='store_true',
help="Do not attempt to load any additional data from the internet")
group3 = parser.add_argument_group('Expert options')
group3.add_argument('--ignore-errors', action='store_true',
help='Continue import even when errors in SQL are present')
group3.add_argument('--index-noanalyse', action='store_true',
help='Do not perform analyse operations during index (expert only)')
def run(self, args: NominatimArgs) -> int: # pylint: disable=too-many-statements
from ..data import country_info
from ..tools import database_import, refresh, postcodes, freeze
from ..indexer.indexer import Indexer
num_threads = args.threads or psutil.cpu_count() or 1
country_info.setup_country_config(args.config)
if args.continue_at is None:
files = args.get_osm_file_list()
if not files:
raise UsageError("No input files (use --osm-file).")
LOG.warning('Creating database')
database_import.setup_database_skeleton(args.config.get_libpq_dsn(),
rouser=args.config.DATABASE_WEBUSER)
LOG.warning('Setting up country tables')
country_info.setup_country_tables(args.config.get_libpq_dsn(),
args.config.lib_dir.data,
args.no_partitions)
LOG.warning('Importing OSM data file')
database_import.import_osm_data(files,
args.osm2pgsql_options(0, 1),
drop=args.no_updates,
ignore_errors=args.ignore_errors)
LOG.warning('Importing wikipedia importance data')
data_path = Path(args.config.WIKIPEDIA_DATA_PATH or args.project_dir)
if refresh.import_wikipedia_articles(args.config.get_libpq_dsn(),
data_path) > 0:
LOG.error('Wikipedia importance dump file not found. '
'Calculating importance values of locations will not '
'use Wikipedia importance data.')
LOG.warning('Importing secondary importance raster data')
if refresh.import_secondary_importance(args.config.get_libpq_dsn(),
args.project_dir) != 0:
LOG.error('Secondary importance file not imported. '
'Falling back to default ranking.')
self._setup_tables(args.config, args.reverse_only)
if args.continue_at is None or args.continue_at == 'load-data':
LOG.warning('Initialise tables')
with connect(args.config.get_libpq_dsn()) as conn:
database_import.truncate_data_tables(conn)
LOG.warning('Load data into placex table')
database_import.load_data(args.config.get_libpq_dsn(), num_threads)
LOG.warning("Setting up tokenizer")
tokenizer = self._get_tokenizer(args.continue_at, args.config)
if args.continue_at is None or args.continue_at == 'load-data':
LOG.warning('Calculate postcodes')
postcodes.update_postcodes(args.config.get_libpq_dsn(),
args.project_dir, tokenizer)
if args.continue_at is None or args.continue_at in ('load-data', 'indexing'):
LOG.warning('Indexing places')
indexer = Indexer(args.config.get_libpq_dsn(), tokenizer, num_threads)
indexer.index_full(analyse=not args.index_noanalyse)
LOG.warning('Post-process tables')
with connect(args.config.get_libpq_dsn()) as conn:
database_import.create_search_indices(conn, args.config,
drop=args.no_updates,
threads=num_threads)
LOG.warning('Create search index for default country names.')
country_info.create_country_names(conn, tokenizer,
args.config.get_str_list('LANGUAGES'))
if args.no_updates:
freeze.drop_update_tables(conn)
tokenizer.finalize_import(args.config)
LOG.warning('Recompute word counts')
tokenizer.update_statistics()
webdir = args.project_dir / 'website'
LOG.warning('Setup website at %s', webdir)
with connect(args.config.get_libpq_dsn()) as conn:
refresh.setup_website(webdir, args.config, conn)
self._finalize_database(args.config.get_libpq_dsn(), args.offline)
return 0
def _setup_tables(self, config: Configuration, reverse_only: bool) -> None:
""" Set up the basic database layout: tables, indexes and functions.
"""
from ..tools import database_import, refresh
with connect(config.get_libpq_dsn()) as conn:
LOG.warning('Create functions (1st pass)')
refresh.create_functions(conn, config, False, False)
LOG.warning('Create tables')
database_import.create_tables(conn, config, reverse_only=reverse_only)
refresh.load_address_levels_from_config(conn, config)
LOG.warning('Create functions (2nd pass)')
refresh.create_functions(conn, config, False, False)
LOG.warning('Create table triggers')
database_import.create_table_triggers(conn, config)
LOG.warning('Create partition tables')
database_import.create_partition_tables(conn, config)
LOG.warning('Create functions (3rd pass)')
refresh.create_functions(conn, config, False, False)
def _get_tokenizer(self, continue_at: Optional[str],
config: Configuration) -> AbstractTokenizer:
""" Set up a new tokenizer or load an already initialised one.
"""
from ..tokenizer import factory as tokenizer_factory
if continue_at is None or continue_at == 'load-data':
# (re)initialise the tokenizer data
return tokenizer_factory.create_tokenizer(config)
# just load the tokenizer
return tokenizer_factory.get_tokenizer_for_db(config)
def _finalize_database(self, dsn: str, offline: bool) -> None:
""" Determine the database date and set the status accordingly.
"""
with connect(dsn) as conn:
if not offline:
try:
dbdate = status.compute_database_date(conn)
status.set_status(conn, dbdate)
LOG.info('Database is at %s.', dbdate)
except Exception as exc: # pylint: disable=broad-except
LOG.error('Cannot determine date of database: %s', exc)
properties.set_property(conn, 'database_version', str(NOMINATIM_VERSION))
| 9,731 | 45.564593 | 96 | py |
Nominatim | Nominatim-master/nominatim/clicmd/refresh.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of 'refresh' subcommand.
"""
from typing import Tuple, Optional
import argparse
import logging
from pathlib import Path
from nominatim.config import Configuration
from nominatim.db.connection import connect
from nominatim.tokenizer.base import AbstractTokenizer
from nominatim.clicmd.args import NominatimArgs
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to avoid eventually unused imports.
# pylint: disable=E0012,C0415
LOG = logging.getLogger()
def _parse_osm_object(obj: str) -> Tuple[str, int]:
""" Parse the given argument into a tuple of OSM type and ID.
Raises an ArgumentError if the format is not recognized.
"""
if len(obj) < 2 or obj[0].lower() not in 'nrw' or not obj[1:].isdigit():
raise argparse.ArgumentTypeError("Cannot parse OSM ID. Expect format: [N|W|R]<id>.")
return (obj[0].upper(), int(obj[1:]))
class UpdateRefresh:
"""\
Recompute auxiliary data used by the indexing process.
This sub-commands updates various static data and functions in the database.
It usually needs to be run after changing various aspects of the
configuration. The configuration documentation will mention the exact
command to use in such case.
Warning: the 'update' command must not be run in parallel with other update
commands like 'replication' or 'add-data'.
"""
def __init__(self) -> None:
self.tokenizer: Optional[AbstractTokenizer] = None
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Data arguments')
group.add_argument('--postcodes', action='store_true',
help='Update postcode centroid table')
group.add_argument('--word-tokens', action='store_true',
help='Clean up search terms')
group.add_argument('--word-counts', action='store_true',
help='Compute frequency of full-word search terms')
group.add_argument('--address-levels', action='store_true',
help='Reimport address level configuration')
group.add_argument('--functions', action='store_true',
help='Update the PL/pgSQL functions in the database')
group.add_argument('--wiki-data', action='store_true',
help='Update Wikipedia/data importance numbers')
group.add_argument('--secondary-importance', action='store_true',
help='Update secondary importance raster data')
group.add_argument('--importance', action='store_true',
help='Recompute place importances (expensive!)')
group.add_argument('--website', action='store_true',
help='Refresh the directory that serves the scripts for the web API')
group.add_argument('--data-object', action='append',
type=_parse_osm_object, metavar='OBJECT',
help='Mark the given OSM object as requiring an update'
' (format: [NWR]<id>)')
group.add_argument('--data-area', action='append',
type=_parse_osm_object, metavar='OBJECT',
help='Mark the area around the given OSM object as requiring an update'
' (format: [NWR]<id>)')
group = parser.add_argument_group('Arguments for function refresh')
group.add_argument('--no-diff-updates', action='store_false', dest='diffs',
help='Do not enable code for propagating updates')
group.add_argument('--enable-debug-statements', action='store_true',
help='Enable debug warning statements in functions')
def run(self, args: NominatimArgs) -> int: #pylint: disable=too-many-branches, too-many-statements
from ..tools import refresh, postcodes
from ..indexer.indexer import Indexer
if args.postcodes:
if postcodes.can_compute(args.config.get_libpq_dsn()):
LOG.warning("Update postcodes centroid")
tokenizer = self._get_tokenizer(args.config)
postcodes.update_postcodes(args.config.get_libpq_dsn(),
args.project_dir, tokenizer)
indexer = Indexer(args.config.get_libpq_dsn(), tokenizer,
args.threads or 1)
indexer.index_postcodes()
else:
LOG.error("The place table doesn't exist. "
"Postcode updates on a frozen database is not possible.")
if args.word_tokens:
LOG.warning('Updating word tokens')
tokenizer = self._get_tokenizer(args.config)
tokenizer.update_word_tokens()
if args.word_counts:
LOG.warning('Recompute word statistics')
self._get_tokenizer(args.config).update_statistics()
if args.address_levels:
LOG.warning('Updating address levels')
with connect(args.config.get_libpq_dsn()) as conn:
refresh.load_address_levels_from_config(conn, args.config)
# Attention: must come BEFORE functions
if args.secondary_importance:
with connect(args.config.get_libpq_dsn()) as conn:
# If the table did not exist before, then the importance code
# needs to be enabled.
if not conn.table_exists('secondary_importance'):
args.functions = True
LOG.warning('Import secondary importance raster data from %s', args.project_dir)
if refresh.import_secondary_importance(args.config.get_libpq_dsn(),
args.project_dir) > 0:
LOG.fatal('FATAL: Cannot update sendary importance raster data')
return 1
if args.functions:
LOG.warning('Create functions')
with connect(args.config.get_libpq_dsn()) as conn:
refresh.create_functions(conn, args.config,
args.diffs, args.enable_debug_statements)
self._get_tokenizer(args.config).update_sql_functions(args.config)
if args.wiki_data:
data_path = Path(args.config.WIKIPEDIA_DATA_PATH
or args.project_dir)
LOG.warning('Import wikipdia article importance from %s', data_path)
if refresh.import_wikipedia_articles(args.config.get_libpq_dsn(),
data_path) > 0:
LOG.fatal('FATAL: Wikipedia importance dump file not found')
return 1
# Attention: importance MUST come after wiki data import.
if args.importance:
LOG.warning('Update importance values for database')
with connect(args.config.get_libpq_dsn()) as conn:
refresh.recompute_importance(conn)
if args.website:
webdir = args.project_dir / 'website'
LOG.warning('Setting up website directory at %s', webdir)
# This is a little bit hacky: call the tokenizer setup, so that
# the tokenizer directory gets repopulated as well, in case it
# wasn't there yet.
self._get_tokenizer(args.config)
with connect(args.config.get_libpq_dsn()) as conn:
refresh.setup_website(webdir, args.config, conn)
if args.data_object or args.data_area:
with connect(args.config.get_libpq_dsn()) as conn:
for obj in args.data_object or []:
refresh.invalidate_osm_object(*obj, conn, recursive=False)
for obj in args.data_area or []:
refresh.invalidate_osm_object(*obj, conn, recursive=True)
conn.commit()
return 0
def _get_tokenizer(self, config: Configuration) -> AbstractTokenizer:
if self.tokenizer is None:
from ..tokenizer import factory as tokenizer_factory
self.tokenizer = tokenizer_factory.get_tokenizer_for_db(config)
return self.tokenizer
| 8,530 | 45.36413 | 102 | py |
Nominatim | Nominatim-master/nominatim/clicmd/args.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Provides custom functions over command-line arguments.
"""
from typing import Optional, List, Dict, Any, Sequence, Tuple
import argparse
import logging
from functools import reduce
from pathlib import Path
from nominatim.errors import UsageError
from nominatim.config import Configuration
from nominatim.typing import Protocol
import nominatim.api as napi
LOG = logging.getLogger()
class Subcommand(Protocol):
"""
Interface to be implemented by classes implementing a CLI subcommand.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
"""
Fill the given parser for the subcommand with the appropriate
parameters.
"""
def run(self, args: 'NominatimArgs') -> int:
"""
Run the subcommand with the given parsed arguments.
"""
class NominatimArgs:
""" Customized namespace class for the nominatim command line tool
to receive the command-line arguments.
"""
# Basic environment set by root program.
config: Configuration
project_dir: Path
phpcgi_path: Path
# Global switches
version: bool
subcommand: Optional[str]
command: Subcommand
# Shared parameters
osm2pgsql_cache: Optional[int]
socket_timeout: int
# Arguments added to all subcommands.
verbose: int
threads: Optional[int]
# Arguments to 'add-data'
file: Optional[str]
diff: Optional[str]
node: Optional[int]
way: Optional[int]
relation: Optional[int]
tiger_data: Optional[str]
use_main_api: bool
# Arguments to 'admin'
warm: bool
check_database: bool
migrate: bool
collect_os_info: bool
analyse_indexing: bool
target: Optional[str]
osm_id: Optional[str]
place_id: Optional[int]
# Arguments to 'import'
osm_file: List[str]
continue_at: Optional[str]
reverse_only: bool
no_partitions: bool
no_updates: bool
offline: bool
ignore_errors: bool
index_noanalyse: bool
# Arguments to 'index'
boundaries_only: bool
no_boundaries: bool
minrank: int
maxrank: int
# Arguments to 'export'
output_type: str
output_format: str
output_all_postcodes: bool
language: Optional[str]
restrict_to_country: Optional[str]
restrict_to_osm_node: Optional[int]
restrict_to_osm_way: Optional[int]
restrict_to_osm_relation: Optional[int]
# Arguments to 'refresh'
postcodes: bool
word_tokens: bool
word_counts: bool
address_levels: bool
functions: bool
wiki_data: bool
secondary_importance: bool
importance: bool
website: bool
diffs: bool
enable_debug_statements: bool
data_object: Sequence[Tuple[str, int]]
data_area: Sequence[Tuple[str, int]]
# Arguments to 'replication'
init: bool
update_functions: bool
check_for_updates: bool
once: bool
catch_up: bool
do_index: bool
# Arguments to 'serve'
server: str
engine: str
# Arguments to 'special-phrases
import_from_wiki: bool
import_from_csv: Optional[str]
no_replace: bool
# Arguments to all query functions
format: str
addressdetails: bool
extratags: bool
namedetails: bool
lang: Optional[str]
polygon_output: Optional[str]
polygon_threshold: Optional[float]
# Arguments to 'search'
query: Optional[str]
amenity: Optional[str]
street: Optional[str]
city: Optional[str]
county: Optional[str]
state: Optional[str]
country: Optional[str]
postalcode: Optional[str]
countrycodes: Optional[str]
exclude_place_ids: Optional[str]
limit: int
viewbox: Optional[str]
bounded: bool
dedupe: bool
# Arguments to 'reverse'
lat: float
lon: float
zoom: Optional[int]
layers: Optional[Sequence[str]]
# Arguments to 'lookup'
ids: Sequence[str]
# Arguments to 'details'
object_class: Optional[str]
linkedplaces: bool
hierarchy: bool
keywords: bool
polygon_geojson: bool
group_hierarchy: bool
def osm2pgsql_options(self, default_cache: int,
default_threads: int) -> Dict[str, Any]:
""" Return the standard osm2pgsql options that can be derived
from the command line arguments. The resulting dict can be
further customized and then used in `run_osm2pgsql()`.
"""
return dict(osm2pgsql=self.config.OSM2PGSQL_BINARY or self.config.lib_dir.osm2pgsql,
osm2pgsql_cache=self.osm2pgsql_cache or default_cache,
osm2pgsql_style=self.config.get_import_style_file(),
osm2pgsql_style_path=self.config.config_dir,
threads=self.threads or default_threads,
dsn=self.config.get_libpq_dsn(),
flatnode_file=str(self.config.get_path('FLATNODE_FILE') or ''),
tablespaces=dict(slim_data=self.config.TABLESPACE_OSM_DATA,
slim_index=self.config.TABLESPACE_OSM_INDEX,
main_data=self.config.TABLESPACE_PLACE_DATA,
main_index=self.config.TABLESPACE_PLACE_INDEX
)
)
def get_osm_file_list(self) -> Optional[List[Path]]:
""" Return the --osm-file argument as a list of Paths or None
if no argument was given. The function also checks if the files
exist and raises a UsageError if one cannot be found.
"""
if not self.osm_file:
return None
files = [Path(f) for f in self.osm_file]
for fname in files:
if not fname.is_file():
LOG.fatal("OSM file '%s' does not exist.", fname)
raise UsageError('Cannot access file.')
return files
def get_geometry_output(self) -> napi.GeometryFormat:
""" Get the requested geometry output format in a API-compatible
format.
"""
if not self.polygon_output:
return napi.GeometryFormat.NONE
if self.polygon_output == 'geojson':
return napi.GeometryFormat.GEOJSON
if self.polygon_output == 'kml':
return napi.GeometryFormat.KML
if self.polygon_output == 'svg':
return napi.GeometryFormat.SVG
if self.polygon_output == 'text':
return napi.GeometryFormat.TEXT
try:
return napi.GeometryFormat[self.polygon_output.upper()]
except KeyError as exp:
raise UsageError(f"Unknown polygon output format '{self.polygon_output}'.") from exp
def get_locales(self, default: Optional[str]) -> napi.Locales:
""" Get the locales from the language parameter.
"""
if self.lang:
return napi.Locales.from_accept_languages(self.lang)
if default:
return napi.Locales.from_accept_languages(default)
return napi.Locales()
def get_layers(self, default: napi.DataLayer) -> Optional[napi.DataLayer]:
""" Get the list of selected layers as a DataLayer enum.
"""
if not self.layers:
return default
return reduce(napi.DataLayer.__or__,
(napi.DataLayer[s.upper()] for s in self.layers))
| 7,561 | 28.084615 | 96 | py |
Nominatim | Nominatim-master/nominatim/clicmd/api.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Subcommand definitions for API calls from the command line.
"""
from typing import Mapping, Dict, Any
import argparse
import logging
import json
import sys
from nominatim.tools.exec_utils import run_api_script
from nominatim.errors import UsageError
from nominatim.clicmd.args import NominatimArgs
import nominatim.api as napi
import nominatim.api.v1 as api_output
from nominatim.api.v1.helpers import zoom_to_rank, deduplicate_results
import nominatim.api.logging as loglib
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
LOG = logging.getLogger()
STRUCTURED_QUERY = (
('amenity', 'name and/or type of POI'),
('street', 'housenumber and street'),
('city', 'city, town or village'),
('county', 'county'),
('state', 'state'),
('country', 'country'),
('postalcode', 'postcode')
)
EXTRADATA_PARAMS = (
('addressdetails', 'Include a breakdown of the address into elements'),
('extratags', ("Include additional information if available "
"(e.g. wikipedia link, opening hours)")),
('namedetails', 'Include a list of alternative names')
)
def _add_api_output_arguments(parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Output arguments')
group.add_argument('--format', default='jsonv2',
choices=['xml', 'json', 'jsonv2', 'geojson', 'geocodejson', 'debug'],
help='Format of result')
for name, desc in EXTRADATA_PARAMS:
group.add_argument('--' + name, action='store_true', help=desc)
group.add_argument('--lang', '--accept-language', metavar='LANGS',
help='Preferred language order for presenting search results')
group.add_argument('--polygon-output',
choices=['geojson', 'kml', 'svg', 'text'],
help='Output geometry of results as a GeoJSON, KML, SVG or WKT')
group.add_argument('--polygon-threshold', type=float, default = 0.0,
metavar='TOLERANCE',
help=("Simplify output geometry."
"Parameter is difference tolerance in degrees."))
def _run_api(endpoint: str, args: NominatimArgs, params: Mapping[str, object]) -> int:
script_file = args.project_dir / 'website' / (endpoint + '.php')
if not script_file.exists():
LOG.error("Cannot find API script file.\n\n"
"Make sure to run 'nominatim' from the project directory \n"
"or use the option --project-dir.")
raise UsageError("API script not found.")
return run_api_script(endpoint, args.project_dir,
phpcgi_bin=args.phpcgi_path, params=params)
class APISearch:
"""\
Execute a search query.
This command works exactly the same as if calling the /search endpoint on
the web API. See the online documentation for more details on the
various parameters:
https://nominatim.org/release-docs/latest/api/Search/
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Query arguments')
group.add_argument('--query',
help='Free-form query string')
for name, desc in STRUCTURED_QUERY:
group.add_argument('--' + name, help='Structured query: ' + desc)
_add_api_output_arguments(parser)
group = parser.add_argument_group('Result limitation')
group.add_argument('--countrycodes', metavar='CC,..',
help='Limit search results to one or more countries')
group.add_argument('--exclude_place_ids', metavar='ID,..',
help='List of search object to be excluded')
group.add_argument('--limit', type=int, default=10,
help='Limit the number of returned results')
group.add_argument('--viewbox', metavar='X1,Y1,X2,Y2',
help='Preferred area to find search results')
group.add_argument('--bounded', action='store_true',
help='Strictly restrict results to viewbox area')
group = parser.add_argument_group('Other arguments')
group.add_argument('--no-dedupe', action='store_false', dest='dedupe',
help='Do not remove duplicates from the result list')
def run(self, args: NominatimArgs) -> int:
if args.format == 'debug':
loglib.set_log_output('text')
api = napi.NominatimAPI(args.project_dir)
params: Dict[str, Any] = {'max_results': args.limit + min(args.limit, 10),
'address_details': True, # needed for display name
'geometry_output': args.get_geometry_output(),
'geometry_simplification': args.polygon_threshold,
'countries': args.countrycodes,
'excluded': args.exclude_place_ids,
'viewbox': args.viewbox,
'bounded_viewbox': args.bounded
}
if args.query:
results = api.search(args.query, **params)
else:
results = api.search_address(amenity=args.amenity,
street=args.street,
city=args.city,
county=args.county,
state=args.state,
postalcode=args.postalcode,
country=args.country,
**params)
for result in results:
result.localize(args.get_locales(api.config.DEFAULT_LANGUAGE))
if args.dedupe and len(results) > 1:
results = deduplicate_results(results, args.limit)
if args.format == 'debug':
print(loglib.get_and_disable())
return 0
output = api_output.format_result(
results,
args.format,
{'extratags': args.extratags,
'namedetails': args.namedetails,
'addressdetails': args.addressdetails})
if args.format != 'xml':
# reformat the result, so it is pretty-printed
json.dump(json.loads(output), sys.stdout, indent=4, ensure_ascii=False)
else:
sys.stdout.write(output)
sys.stdout.write('\n')
return 0
class APIReverse:
"""\
Execute API reverse query.
This command works exactly the same as if calling the /reverse endpoint on
the web API. See the online documentation for more details on the
various parameters:
https://nominatim.org/release-docs/latest/api/Reverse/
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Query arguments')
group.add_argument('--lat', type=float, required=True,
help='Latitude of coordinate to look up (in WGS84)')
group.add_argument('--lon', type=float, required=True,
help='Longitude of coordinate to look up (in WGS84)')
group.add_argument('--zoom', type=int,
help='Level of detail required for the address')
group.add_argument('--layer', metavar='LAYER',
choices=[n.name.lower() for n in napi.DataLayer if n.name],
action='append', required=False, dest='layers',
help='OSM id to lookup in format <NRW><id> (may be repeated)')
_add_api_output_arguments(parser)
def run(self, args: NominatimArgs) -> int:
if args.format == 'debug':
loglib.set_log_output('text')
api = napi.NominatimAPI(args.project_dir)
result = api.reverse(napi.Point(args.lon, args.lat),
max_rank=zoom_to_rank(args.zoom or 18),
layers=args.get_layers(napi.DataLayer.ADDRESS | napi.DataLayer.POI),
address_details=True, # needed for display name
geometry_output=args.get_geometry_output(),
geometry_simplification=args.polygon_threshold)
if args.format == 'debug':
print(loglib.get_and_disable())
return 0
if result:
result.localize(args.get_locales(api.config.DEFAULT_LANGUAGE))
output = api_output.format_result(
napi.ReverseResults([result]),
args.format,
{'extratags': args.extratags,
'namedetails': args.namedetails,
'addressdetails': args.addressdetails})
if args.format != 'xml':
# reformat the result, so it is pretty-printed
json.dump(json.loads(output), sys.stdout, indent=4, ensure_ascii=False)
else:
sys.stdout.write(output)
sys.stdout.write('\n')
return 0
LOG.error("Unable to geocode.")
return 42
class APILookup:
"""\
Execute API lookup query.
This command works exactly the same as if calling the /lookup endpoint on
the web API. See the online documentation for more details on the
various parameters:
https://nominatim.org/release-docs/latest/api/Lookup/
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Query arguments')
group.add_argument('--id', metavar='OSMID',
action='append', required=True, dest='ids',
help='OSM id to lookup in format <NRW><id> (may be repeated)')
_add_api_output_arguments(parser)
def run(self, args: NominatimArgs) -> int:
if args.format == 'debug':
loglib.set_log_output('text')
api = napi.NominatimAPI(args.project_dir)
if args.format == 'debug':
print(loglib.get_and_disable())
return 0
places = [napi.OsmID(o[0], int(o[1:])) for o in args.ids]
results = api.lookup(places,
address_details=True, # needed for display name
geometry_output=args.get_geometry_output(),
geometry_simplification=args.polygon_threshold or 0.0)
for result in results:
result.localize(args.get_locales(api.config.DEFAULT_LANGUAGE))
output = api_output.format_result(
results,
args.format,
{'extratags': args.extratags,
'namedetails': args.namedetails,
'addressdetails': args.addressdetails})
if args.format != 'xml':
# reformat the result, so it is pretty-printed
json.dump(json.loads(output), sys.stdout, indent=4, ensure_ascii=False)
else:
sys.stdout.write(output)
sys.stdout.write('\n')
return 0
class APIDetails:
"""\
Execute API details query.
This command works exactly the same as if calling the /details endpoint on
the web API. See the online documentation for more details on the
various parameters:
https://nominatim.org/release-docs/latest/api/Details/
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Query arguments')
objs = group.add_mutually_exclusive_group(required=True)
objs.add_argument('--node', '-n', type=int,
help="Look up the OSM node with the given ID.")
objs.add_argument('--way', '-w', type=int,
help="Look up the OSM way with the given ID.")
objs.add_argument('--relation', '-r', type=int,
help="Look up the OSM relation with the given ID.")
objs.add_argument('--place_id', '-p', type=int,
help='Database internal identifier of the OSM object to look up')
group.add_argument('--class', dest='object_class',
help=("Class type to disambiguated multiple entries "
"of the same object."))
group = parser.add_argument_group('Output arguments')
group.add_argument('--addressdetails', action='store_true',
help='Include a breakdown of the address into elements')
group.add_argument('--keywords', action='store_true',
help='Include a list of name keywords and address keywords')
group.add_argument('--linkedplaces', action='store_true',
help='Include a details of places that are linked with this one')
group.add_argument('--hierarchy', action='store_true',
help='Include details of places lower in the address hierarchy')
group.add_argument('--group_hierarchy', action='store_true',
help='Group the places by type')
group.add_argument('--polygon_geojson', action='store_true',
help='Include geometry of result')
group.add_argument('--lang', '--accept-language', metavar='LANGS',
help='Preferred language order for presenting search results')
def run(self, args: NominatimArgs) -> int:
place: napi.PlaceRef
if args.node:
place = napi.OsmID('N', args.node, args.object_class)
elif args.way:
place = napi.OsmID('W', args.way, args.object_class)
elif args.relation:
place = napi.OsmID('R', args.relation, args.object_class)
else:
assert args.place_id is not None
place = napi.PlaceID(args.place_id)
api = napi.NominatimAPI(args.project_dir)
result = api.details(place,
address_details=args.addressdetails,
linked_places=args.linkedplaces,
parented_places=args.hierarchy,
keywords=args.keywords,
geometry_output=napi.GeometryFormat.GEOJSON
if args.polygon_geojson
else napi.GeometryFormat.NONE)
if result:
locales = args.get_locales(api.config.DEFAULT_LANGUAGE)
result.localize(locales)
output = api_output.format_result(
result,
'json',
{'locales': locales,
'group_hierarchy': args.group_hierarchy})
# reformat the result, so it is pretty-printed
json.dump(json.loads(output), sys.stdout, indent=4, ensure_ascii=False)
sys.stdout.write('\n')
return 0
LOG.error("Object not found in database.")
return 42
class APIStatus:
"""
Execute API status query.
This command works exactly the same as if calling the /status endpoint on
the web API. See the online documentation for more details on the
various parameters:
https://nominatim.org/release-docs/latest/api/Status/
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
formats = api_output.list_formats(napi.StatusResult)
group = parser.add_argument_group('API parameters')
group.add_argument('--format', default=formats[0], choices=formats,
help='Format of result')
def run(self, args: NominatimArgs) -> int:
status = napi.NominatimAPI(args.project_dir).status()
print(api_output.format_result(status, args.format, {}))
return 0
| 16,244 | 40.335878 | 97 | py |
Nominatim | Nominatim-master/nominatim/clicmd/admin.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the 'admin' subcommand.
"""
import logging
import argparse
from nominatim.tools.exec_utils import run_legacy_script
from nominatim.clicmd.args import NominatimArgs
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to avoid eventually unused imports.
# pylint: disable=E0012,C0415
LOG = logging.getLogger()
class AdminFuncs:
"""\
Analyse and maintain the database.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Admin tasks')
objs = group.add_mutually_exclusive_group(required=True)
objs.add_argument('--warm', action='store_true',
help='Warm database caches for search and reverse queries')
objs.add_argument('--check-database', action='store_true',
help='Check that the database is complete and operational')
objs.add_argument('--migrate', action='store_true',
help='Migrate the database to a new software version')
objs.add_argument('--analyse-indexing', action='store_true',
help='Print performance analysis of the indexing process')
objs.add_argument('--collect-os-info', action="store_true",
help="Generate a report about the host system information")
group = parser.add_argument_group('Arguments for cache warming')
group.add_argument('--search-only', action='store_const', dest='target',
const='search',
help="Only pre-warm tables for search queries")
group.add_argument('--reverse-only', action='store_const', dest='target',
const='reverse',
help="Only pre-warm tables for reverse queries")
group = parser.add_argument_group('Arguments for index anaysis')
mgroup = group.add_mutually_exclusive_group()
mgroup.add_argument('--osm-id', type=str,
help='Analyse indexing of the given OSM object')
mgroup.add_argument('--place-id', type=int,
help='Analyse indexing of the given Nominatim object')
def run(self, args: NominatimArgs) -> int:
if args.warm:
return self._warm(args)
if args.check_database:
LOG.warning('Checking database')
from ..tools import check_database
return check_database.check_database(args.config)
if args.analyse_indexing:
LOG.warning('Analysing performance of indexing function')
from ..tools import admin
admin.analyse_indexing(args.config, osm_id=args.osm_id, place_id=args.place_id)
return 0
if args.migrate:
LOG.warning('Checking for necessary database migrations')
from ..tools import migration
return migration.migrate(args.config, args)
if args.collect_os_info:
LOG.warning("Reporting System Information")
from ..tools import collect_os_info
collect_os_info.report_system_information(args.config)
return 0
return 1
def _warm(self, args: NominatimArgs) -> int:
LOG.warning('Warming database caches')
params = ['warm.php']
if args.target == 'reverse':
params.append('--reverse-only')
if args.target == 'search':
params.append('--search-only')
return run_legacy_script(*params, config=args.config)
| 3,803 | 40.347826 | 91 | py |
Nominatim | Nominatim-master/nominatim/clicmd/__init__.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Subcommand definitions for the command-line tool.
"""
# mypy and pylint disagree about the style of explicit exports,
# see https://github.com/PyCQA/pylint/issues/6006.
# pylint: disable=useless-import-alias
from nominatim.clicmd.setup import SetupAll as SetupAll
from nominatim.clicmd.replication import UpdateReplication as UpdateReplication
from nominatim.clicmd.api import (APISearch as APISearch,
APIReverse as APIReverse,
APILookup as APILookup,
APIDetails as APIDetails,
APIStatus as APIStatus)
from nominatim.clicmd.index import UpdateIndex as UpdateIndex
from nominatim.clicmd.refresh import UpdateRefresh as UpdateRefresh
from nominatim.clicmd.add_data import UpdateAddData as UpdateAddData
from nominatim.clicmd.admin import AdminFuncs as AdminFuncs
from nominatim.clicmd.freeze import SetupFreeze as SetupFreeze
from nominatim.clicmd.special_phrases import ImportSpecialPhrases as ImportSpecialPhrases
| 1,262 | 45.777778 | 89 | py |
Nominatim | Nominatim-master/nominatim/clicmd/freeze.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the 'freeze' subcommand.
"""
import argparse
from nominatim.db.connection import connect
from nominatim.clicmd.args import NominatimArgs
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to avoid eventually unused imports.
# pylint: disable=E0012,C0415
class SetupFreeze:
"""\
Make database read-only.
About half of data in the Nominatim database is kept only to be able to
keep the data up-to-date with new changes made in OpenStreetMap. This
command drops all this data and only keeps the part needed for geocoding
itself.
This command has the same effect as the `--no-updates` option for imports.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
pass # No options
def run(self, args: NominatimArgs) -> int:
from ..tools import freeze
with connect(args.config.get_libpq_dsn()) as conn:
freeze.drop_update_tables(conn)
freeze.drop_flatnode_file(args.config.get_path('FLATNODE_FILE'))
return 0
| 1,298 | 28.522727 | 78 | py |
Nominatim | Nominatim-master/nominatim/clicmd/index.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the 'index' subcommand.
"""
import argparse
import psutil
from nominatim.db import status
from nominatim.db.connection import connect
from nominatim.clicmd.args import NominatimArgs
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to avoid eventually unused imports.
# pylint: disable=E0012,C0415
class UpdateIndex:
"""\
Reindex all new and modified data.
Indexing is the process of computing the address and search terms for
the places in the database. Every time data is added or changed, indexing
needs to be run. Imports and replication updates automatically take care
of indexing. For other cases, this function allows to run indexing manually.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Filter arguments')
group.add_argument('--boundaries-only', action='store_true',
help="""Index only administrative boundaries.""")
group.add_argument('--no-boundaries', action='store_true',
help="""Index everything except administrative boundaries.""")
group.add_argument('--minrank', '-r', type=int, metavar='RANK', default=0,
help='Minimum/starting rank')
group.add_argument('--maxrank', '-R', type=int, metavar='RANK', default=30,
help='Maximum/finishing rank')
def run(self, args: NominatimArgs) -> int:
from ..indexer.indexer import Indexer
from ..tokenizer import factory as tokenizer_factory
tokenizer = tokenizer_factory.get_tokenizer_for_db(args.config)
indexer = Indexer(args.config.get_libpq_dsn(), tokenizer,
args.threads or psutil.cpu_count() or 1)
if not args.no_boundaries:
indexer.index_boundaries(args.minrank, args.maxrank)
if not args.boundaries_only:
indexer.index_by_rank(args.minrank, args.maxrank)
if not args.no_boundaries and not args.boundaries_only \
and args.minrank == 0 and args.maxrank == 30:
with connect(args.config.get_libpq_dsn()) as conn:
status.set_indexed(conn, True)
return 0
| 2,502 | 36.924242 | 89 | py |
Nominatim | Nominatim-master/nominatim/clicmd/replication.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Implementation of the 'replication' sub-command.
"""
from typing import Optional
import argparse
import datetime as dt
import logging
import socket
import time
from nominatim.db import status
from nominatim.db.connection import connect
from nominatim.errors import UsageError
from nominatim.clicmd.args import NominatimArgs
LOG = logging.getLogger()
# Do not repeat documentation of subcommand classes.
# pylint: disable=C0111
# Using non-top-level imports to make pyosmium optional for replication only.
# pylint: disable=C0415
class UpdateReplication:
"""\
Update the database using an online replication service.
An OSM replication service is an online service that provides regular
updates (OSM diff files) for the planet or update they provide. The OSMF
provides the primary replication service for the full planet at
https://planet.osm.org/replication/ but there are other providers of
extracts of OSM data who provide such a service as well.
This sub-command allows to set up such a replication service and download
and import updates at regular intervals. You need to call '--init' once to
set up the process or whenever you change the replication configuration
parameters. Without any arguments, the sub-command will go into a loop and
continuously apply updates as they become available. Giving `--once` just
downloads and imports the next batch of updates.
"""
def add_args(self, parser: argparse.ArgumentParser) -> None:
group = parser.add_argument_group('Arguments for initialisation')
group.add_argument('--init', action='store_true',
help='Initialise the update process')
group.add_argument('--no-update-functions', dest='update_functions',
action='store_false',
help="Do not update the trigger function to "
"support differential updates (EXPERT)")
group = parser.add_argument_group('Arguments for updates')
group.add_argument('--check-for-updates', action='store_true',
help='Check if new updates are available and exit')
group.add_argument('--once', action='store_true',
help="Download and apply updates only once. When "
"not set, updates are continuously applied")
group.add_argument('--catch-up', action='store_true',
help="Download and apply updates until no new "
"data is available on the server")
group.add_argument('--no-index', action='store_false', dest='do_index',
help=("Do not index the new data. Only usable "
"together with --once"))
group.add_argument('--osm2pgsql-cache', metavar='SIZE', type=int,
help='Size of cache to be used by osm2pgsql (in MB)')
group = parser.add_argument_group('Download parameters')
group.add_argument('--socket-timeout', dest='socket_timeout', type=int, default=60,
help='Set timeout for file downloads')
def _init_replication(self, args: NominatimArgs) -> int:
from ..tools import replication, refresh
LOG.warning("Initialising replication updates")
with connect(args.config.get_libpq_dsn()) as conn:
replication.init_replication(conn, base_url=args.config.REPLICATION_URL,
socket_timeout=args.socket_timeout)
if args.update_functions:
LOG.warning("Create functions")
refresh.create_functions(conn, args.config, True, False)
return 0
def _check_for_updates(self, args: NominatimArgs) -> int:
from ..tools import replication
with connect(args.config.get_libpq_dsn()) as conn:
return replication.check_for_updates(conn, base_url=args.config.REPLICATION_URL,
socket_timeout=args.socket_timeout)
def _report_update(self, batchdate: dt.datetime,
start_import: dt.datetime,
start_index: Optional[dt.datetime]) -> None:
def round_time(delta: dt.timedelta) -> dt.timedelta:
return dt.timedelta(seconds=int(delta.total_seconds()))
end = dt.datetime.now(dt.timezone.utc)
LOG.warning("Update completed. Import: %s. %sTotal: %s. Remaining backlog: %s.",
round_time((start_index or end) - start_import),
f"Indexing: {round_time(end - start_index)} " if start_index else '',
round_time(end - start_import),
round_time(end - batchdate))
def _compute_update_interval(self, args: NominatimArgs) -> int:
if args.catch_up:
return 0
update_interval = args.config.get_int('REPLICATION_UPDATE_INTERVAL')
# Sanity check to not overwhelm the Geofabrik servers.
if 'download.geofabrik.de' in args.config.REPLICATION_URL\
and update_interval < 86400:
LOG.fatal("Update interval too low for download.geofabrik.de.\n"
"Please check install documentation "
"(https://nominatim.org/release-docs/latest/admin/Import-and-Update#"
"setting-up-the-update-process).")
raise UsageError("Invalid replication update interval setting.")
return update_interval
def _update(self, args: NominatimArgs) -> None:
# pylint: disable=too-many-locals
from ..tools import replication
from ..indexer.indexer import Indexer
from ..tokenizer import factory as tokenizer_factory
update_interval = self._compute_update_interval(args)
params = args.osm2pgsql_options(default_cache=2000, default_threads=1)
params.update(base_url=args.config.REPLICATION_URL,
update_interval=update_interval,
import_file=args.project_dir / 'osmosischange.osc',
max_diff_size=args.config.get_int('REPLICATION_MAX_DIFF'),
indexed_only=not args.once)
if not args.once:
if not args.do_index:
LOG.fatal("Indexing cannot be disabled when running updates continuously.")
raise UsageError("Bad argument '--no-index'.")
recheck_interval = args.config.get_int('REPLICATION_RECHECK_INTERVAL')
tokenizer = tokenizer_factory.get_tokenizer_for_db(args.config)
indexer = Indexer(args.config.get_libpq_dsn(), tokenizer, args.threads or 1)
dsn = args.config.get_libpq_dsn()
while True:
start = dt.datetime.now(dt.timezone.utc)
state = replication.update(dsn, params, socket_timeout=args.socket_timeout)
with connect(dsn) as conn:
if state is not replication.UpdateState.NO_CHANGES:
status.log_status(conn, start, 'import')
batchdate, _, _ = status.get_status(conn)
conn.commit()
if state is not replication.UpdateState.NO_CHANGES and args.do_index:
index_start = dt.datetime.now(dt.timezone.utc)
indexer.index_full(analyse=False)
with connect(dsn) as conn:
status.set_indexed(conn, True)
status.log_status(conn, index_start, 'index')
conn.commit()
else:
index_start = None
if state is replication.UpdateState.NO_CHANGES and \
args.catch_up or update_interval > 40*60:
while indexer.has_pending():
indexer.index_full(analyse=False)
if LOG.isEnabledFor(logging.WARNING):
assert batchdate is not None
self._report_update(batchdate, start, index_start)
if args.once or (args.catch_up and state is replication.UpdateState.NO_CHANGES):
break
if state is replication.UpdateState.NO_CHANGES:
LOG.warning("No new changes. Sleeping for %d sec.", recheck_interval)
time.sleep(recheck_interval)
def run(self, args: NominatimArgs) -> int:
socket.setdefaulttimeout(args.socket_timeout)
if args.init:
return self._init_replication(args)
if args.check_for_updates:
return self._check_for_updates(args)
self._update(args)
return 0
| 8,838 | 42.975124 | 92 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/base.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Abstract class definitions for tokenizers. These base classes are here
mainly for documentation purposes.
"""
from abc import ABC, abstractmethod
from typing import List, Tuple, Dict, Any, Optional, Iterable
from pathlib import Path
from nominatim.config import Configuration
from nominatim.data.place_info import PlaceInfo
from nominatim.typing import Protocol
class AbstractAnalyzer(ABC):
""" The analyzer provides the functions for analysing names and building
the token database.
Analyzers are instantiated on a per-thread base. Access to global data
structures must be synchronised accordingly.
"""
def __enter__(self) -> 'AbstractAnalyzer':
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
@abstractmethod
def close(self) -> None:
""" Free all resources used by the analyzer.
"""
@abstractmethod
def get_word_token_info(self, words: List[str]) -> List[Tuple[str, str, int]]:
""" Return token information for the given list of words.
The function is used for testing and debugging only
and does not need to be particularly efficient.
Arguments:
words: A list of words to look up the tokens for.
If a word starts with # it is assumed to be a full name
otherwise is a partial term.
Returns:
The function returns the list of all tuples that could be
found for the given words. Each list entry is a tuple of
(original word, word token, word id).
"""
@abstractmethod
def normalize_postcode(self, postcode: str) -> str:
""" Convert the postcode to its standardized form.
This function must yield exactly the same result as the SQL function
`token_normalized_postcode()`.
Arguments:
postcode: The postcode to be normalized.
Returns:
The given postcode after normalization.
"""
@abstractmethod
def update_postcodes_from_db(self) -> None:
""" Update the tokenizer's postcode tokens from the current content
of the `location_postcode` table.
"""
@abstractmethod
def update_special_phrases(self,
phrases: Iterable[Tuple[str, str, str, str]],
should_replace: bool) -> None:
""" Update the tokenizer's special phrase tokens from the given
list of special phrases.
Arguments:
phrases: The new list of special phrases. Each entry is
a tuple of (phrase, class, type, operator).
should_replace: If true, replace the current list of phrases.
When false, just add the given phrases to the
ones that already exist.
"""
@abstractmethod
def add_country_names(self, country_code: str, names: Dict[str, str]) -> None:
""" Add the given names to the tokenizer's list of country tokens.
Arguments:
country_code: two-letter country code for the country the names
refer to.
names: Dictionary of name type to name.
"""
@abstractmethod
def process_place(self, place: PlaceInfo) -> Any:
""" Extract tokens for the given place and compute the
information to be handed to the PL/pgSQL processor for building
the search index.
Arguments:
place: Place information retrieved from the database.
Returns:
A JSON-serialisable structure that will be handed into
the database via the `token_info` field.
"""
class AbstractTokenizer(ABC):
""" The tokenizer instance is the central instance of the tokenizer in
the system. There will only be a single instance of the tokenizer
active at any time.
"""
@abstractmethod
def init_new_db(self, config: Configuration, init_db: bool = True) -> None:
""" Set up a new tokenizer for the database.
The function should copy all necessary data into the project
directory or save it in the property table to make sure that
the tokenizer remains stable over updates.
Arguments:
config: Read-only object with configuration options.
init_db: When set to False, then initialisation of database
tables should be skipped. This option is only required for
migration purposes and can be safely ignored by custom
tokenizers.
TODO: can we move the init_db parameter somewhere else?
"""
@abstractmethod
def init_from_project(self, config: Configuration) -> None:
""" Initialise the tokenizer from an existing database setup.
The function should load all previously saved configuration from
the project directory and/or the property table.
Arguments:
config: Read-only object with configuration options.
"""
@abstractmethod
def finalize_import(self, config: Configuration) -> None:
""" This function is called at the very end of an import when all
data has been imported and indexed. The tokenizer may create
at this point any additional indexes and data structures needed
during query time.
Arguments:
config: Read-only object with configuration options.
"""
@abstractmethod
def update_sql_functions(self, config: Configuration) -> None:
""" Update the SQL part of the tokenizer. This function is called
automatically on migrations or may be called explicitly by the
user through the `nominatim refresh --functions` command.
The tokenizer must only update the code of the tokenizer. The
data structures or data itself must not be changed by this function.
Arguments:
config: Read-only object with configuration options.
"""
@abstractmethod
def check_database(self, config: Configuration) -> Optional[str]:
""" Check that the database is set up correctly and ready for being
queried.
Arguments:
config: Read-only object with configuration options.
Returns:
If an issue was found, return an error message with the
description of the issue as well as hints for the user on
how to resolve the issue. If everything is okay, return `None`.
"""
@abstractmethod
def update_statistics(self) -> None:
""" Recompute any tokenizer statistics necessary for efficient lookup.
This function is meant to be called from time to time by the user
to improve performance. However, the tokenizer must not depend on
it to be called in order to work.
"""
@abstractmethod
def update_word_tokens(self) -> None:
""" Do house-keeping on the tokenizers internal data structures.
Remove unused word tokens, resort data etc.
"""
@abstractmethod
def name_analyzer(self) -> AbstractAnalyzer:
""" Create a new analyzer for tokenizing names and queries
using this tokinzer. Analyzers are context managers and should
be used accordingly:
```
with tokenizer.name_analyzer() as analyzer:
analyser.tokenize()
```
When used outside the with construct, the caller must ensure to
call the close() function before destructing the analyzer.
"""
class TokenizerModule(Protocol):
""" Interface that must be exported by modules that implement their
own tokenizer.
"""
def create(self, dsn: str, data_dir: Path) -> AbstractTokenizer:
""" Factory for new tokenizers.
"""
| 8,402 | 33.438525 | 82 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/icu_tokenizer.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tokenizer implementing normalisation as used before Nominatim 4 but using
libICU instead of the PostgreSQL module.
"""
from typing import Optional, Sequence, List, Tuple, Mapping, Any, cast, \
Dict, Set, Iterable
import itertools
import json
import logging
from pathlib import Path
from textwrap import dedent
from nominatim.db.connection import connect, Connection, Cursor
from nominatim.config import Configuration
from nominatim.db.utils import CopyBuffer
from nominatim.db.sql_preprocessor import SQLPreprocessor
from nominatim.data.place_info import PlaceInfo
from nominatim.tokenizer.icu_rule_loader import ICURuleLoader
from nominatim.tokenizer.place_sanitizer import PlaceSanitizer
from nominatim.data.place_name import PlaceName
from nominatim.tokenizer.icu_token_analysis import ICUTokenAnalysis
from nominatim.tokenizer.base import AbstractAnalyzer, AbstractTokenizer
DBCFG_TERM_NORMALIZATION = "tokenizer_term_normalization"
LOG = logging.getLogger()
def create(dsn: str, data_dir: Path) -> 'ICUTokenizer':
""" Create a new instance of the tokenizer provided by this module.
"""
return ICUTokenizer(dsn, data_dir)
class ICUTokenizer(AbstractTokenizer):
""" This tokenizer uses libICU to convert names and queries to ASCII.
Otherwise it uses the same algorithms and data structures as the
normalization routines in Nominatim 3.
"""
def __init__(self, dsn: str, data_dir: Path) -> None:
self.dsn = dsn
self.data_dir = data_dir
self.loader: Optional[ICURuleLoader] = None
def init_new_db(self, config: Configuration, init_db: bool = True) -> None:
""" Set up a new tokenizer for the database.
This copies all necessary data in the project directory to make
sure the tokenizer remains stable even over updates.
"""
self.loader = ICURuleLoader(config)
self._install_php(config.lib_dir.php, overwrite=True)
self._save_config()
if init_db:
self.update_sql_functions(config)
self._init_db_tables(config)
def init_from_project(self, config: Configuration) -> None:
""" Initialise the tokenizer from the project directory.
"""
self.loader = ICURuleLoader(config)
with connect(self.dsn) as conn:
self.loader.load_config_from_db(conn)
self._install_php(config.lib_dir.php, overwrite=False)
def finalize_import(self, config: Configuration) -> None:
""" Do any required postprocessing to make the tokenizer data ready
for use.
"""
with connect(self.dsn) as conn:
sqlp = SQLPreprocessor(conn, config)
sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_indices.sql')
def update_sql_functions(self, config: Configuration) -> None:
""" Reimport the SQL functions for this tokenizer.
"""
with connect(self.dsn) as conn:
sqlp = SQLPreprocessor(conn, config)
sqlp.run_sql_file(conn, 'tokenizer/icu_tokenizer.sql')
def check_database(self, config: Configuration) -> None:
""" Check that the tokenizer is set up correctly.
"""
# Will throw an error if there is an issue.
self.init_from_project(config)
def update_statistics(self) -> None:
""" Recompute frequencies for all name words.
"""
with connect(self.dsn) as conn:
if conn.table_exists('search_name'):
with conn.cursor() as cur:
cur.drop_table("word_frequencies")
LOG.info("Computing word frequencies")
cur.execute("""CREATE TEMP TABLE word_frequencies AS
SELECT unnest(name_vector) as id, count(*)
FROM search_name GROUP BY id""")
cur.execute("CREATE INDEX ON word_frequencies(id)")
LOG.info("Update word table with recomputed frequencies")
cur.execute("""UPDATE word
SET info = info || jsonb_build_object('count', count)
FROM word_frequencies WHERE word_id = id""")
cur.drop_table("word_frequencies")
conn.commit()
def _cleanup_housenumbers(self) -> None:
""" Remove unused house numbers.
"""
with connect(self.dsn) as conn:
if not conn.table_exists('search_name'):
return
with conn.cursor(name="hnr_counter") as cur:
cur.execute("""SELECT DISTINCT word_id, coalesce(info->>'lookup', word_token)
FROM word
WHERE type = 'H'
AND NOT EXISTS(SELECT * FROM search_name
WHERE ARRAY[word.word_id] && name_vector)
AND (char_length(coalesce(word, word_token)) > 6
OR coalesce(word, word_token) not similar to '\\d+')
""")
candidates = {token: wid for wid, token in cur}
with conn.cursor(name="hnr_counter") as cur:
cur.execute("""SELECT housenumber FROM placex
WHERE housenumber is not null
AND (char_length(housenumber) > 6
OR housenumber not similar to '\\d+')
""")
for row in cur:
for hnr in row[0].split(';'):
candidates.pop(hnr, None)
LOG.info("There are %s outdated housenumbers.", len(candidates))
LOG.debug("Outdated housenumbers: %s", candidates.keys())
if candidates:
with conn.cursor() as cur:
cur.execute("""DELETE FROM word WHERE word_id = any(%s)""",
(list(candidates.values()), ))
conn.commit()
def update_word_tokens(self) -> None:
""" Remove unused tokens.
"""
LOG.warning("Cleaning up housenumber tokens.")
self._cleanup_housenumbers()
LOG.warning("Tokenizer house-keeping done.")
def name_analyzer(self) -> 'ICUNameAnalyzer':
""" Create a new analyzer for tokenizing names and queries
using this tokinzer. Analyzers are context managers and should
be used accordingly:
```
with tokenizer.name_analyzer() as analyzer:
analyser.tokenize()
```
When used outside the with construct, the caller must ensure to
call the close() function before destructing the analyzer.
Analyzers are not thread-safe. You need to instantiate one per thread.
"""
assert self.loader is not None
return ICUNameAnalyzer(self.dsn, self.loader.make_sanitizer(),
self.loader.make_token_analysis())
def _install_php(self, phpdir: Path, overwrite: bool = True) -> None:
""" Install the php script for the tokenizer.
"""
assert self.loader is not None
php_file = self.data_dir / "tokenizer.php"
if not php_file.exists() or overwrite:
php_file.write_text(dedent(f"""\
<?php
@define('CONST_Max_Word_Frequency', 10000000);
@define('CONST_Term_Normalization_Rules', "{self.loader.normalization_rules}");
@define('CONST_Transliteration', "{self.loader.get_search_rules()}");
require_once('{phpdir}/tokenizer/icu_tokenizer.php');"""), encoding='utf-8')
def _save_config(self) -> None:
""" Save the configuration that needs to remain stable for the given
database as database properties.
"""
assert self.loader is not None
with connect(self.dsn) as conn:
self.loader.save_config_to_db(conn)
def _init_db_tables(self, config: Configuration) -> None:
""" Set up the word table and fill it with pre-computed word
frequencies.
"""
with connect(self.dsn) as conn:
sqlp = SQLPreprocessor(conn, config)
sqlp.run_sql_file(conn, 'tokenizer/icu_tokenizer_tables.sql')
conn.commit()
class ICUNameAnalyzer(AbstractAnalyzer):
""" The ICU analyzer uses the ICU library for splitting names.
Each instance opens a connection to the database to request the
normalization.
"""
def __init__(self, dsn: str, sanitizer: PlaceSanitizer,
token_analysis: ICUTokenAnalysis) -> None:
self.conn: Optional[Connection] = connect(dsn).connection
self.conn.autocommit = True
self.sanitizer = sanitizer
self.token_analysis = token_analysis
self._cache = _TokenCache()
def close(self) -> None:
""" Free all resources used by the analyzer.
"""
if self.conn:
self.conn.close()
self.conn = None
def _search_normalized(self, name: str) -> str:
""" Return the search token transliteration of the given name.
"""
return cast(str, self.token_analysis.search.transliterate(name)).strip()
def _normalized(self, name: str) -> str:
""" Return the normalized version of the given name with all
non-relevant information removed.
"""
return cast(str, self.token_analysis.normalizer.transliterate(name)).strip()
def get_word_token_info(self, words: Sequence[str]) -> List[Tuple[str, str, int]]:
""" Return token information for the given list of words.
If a word starts with # it is assumed to be a full name
otherwise is a partial name.
The function returns a list of tuples with
(original word, word token, word id).
The function is used for testing and debugging only
and not necessarily efficient.
"""
assert self.conn is not None
full_tokens = {}
partial_tokens = {}
for word in words:
if word.startswith('#'):
full_tokens[word] = self._search_normalized(word[1:])
else:
partial_tokens[word] = self._search_normalized(word)
with self.conn.cursor() as cur:
cur.execute("""SELECT word_token, word_id
FROM word WHERE word_token = ANY(%s) and type = 'W'
""", (list(full_tokens.values()),))
full_ids = {r[0]: r[1] for r in cur}
cur.execute("""SELECT word_token, word_id
FROM word WHERE word_token = ANY(%s) and type = 'w'""",
(list(partial_tokens.values()),))
part_ids = {r[0]: r[1] for r in cur}
return [(k, v, full_ids.get(v, None)) for k, v in full_tokens.items()] \
+ [(k, v, part_ids.get(v, None)) for k, v in partial_tokens.items()]
def normalize_postcode(self, postcode: str) -> str:
""" Convert the postcode to a standardized form.
This function must yield exactly the same result as the SQL function
'token_normalized_postcode()'.
"""
return postcode.strip().upper()
def update_postcodes_from_db(self) -> None:
""" Update postcode tokens in the word table from the location_postcode
table.
"""
assert self.conn is not None
analyzer = self.token_analysis.analysis.get('@postcode')
with self.conn.cursor() as cur:
# First get all postcode names currently in the word table.
cur.execute("SELECT DISTINCT word FROM word WHERE type = 'P'")
word_entries = set((entry[0] for entry in cur))
# Then compute the required postcode names from the postcode table.
needed_entries = set()
cur.execute("SELECT country_code, postcode FROM location_postcode")
for cc, postcode in cur:
info = PlaceInfo({'country_code': cc,
'class': 'place', 'type': 'postcode',
'address': {'postcode': postcode}})
address = self.sanitizer.process_names(info)[1]
for place in address:
if place.kind == 'postcode':
if analyzer is None:
postcode_name = place.name.strip().upper()
variant_base = None
else:
postcode_name = analyzer.get_canonical_id(place)
variant_base = place.get_attr("variant")
if variant_base:
needed_entries.add(f'{postcode_name}@{variant_base}')
else:
needed_entries.add(postcode_name)
break
# Now update the word table.
self._delete_unused_postcode_words(word_entries - needed_entries)
self._add_missing_postcode_words(needed_entries - word_entries)
def _delete_unused_postcode_words(self, tokens: Iterable[str]) -> None:
assert self.conn is not None
if tokens:
with self.conn.cursor() as cur:
cur.execute("DELETE FROM word WHERE type = 'P' and word = any(%s)",
(list(tokens), ))
def _add_missing_postcode_words(self, tokens: Iterable[str]) -> None:
assert self.conn is not None
if not tokens:
return
analyzer = self.token_analysis.analysis.get('@postcode')
terms = []
for postcode_name in tokens:
if '@' in postcode_name:
term, variant = postcode_name.split('@', 2)
term = self._search_normalized(term)
if analyzer is None:
variants = [term]
else:
variants = analyzer.compute_variants(variant)
if term not in variants:
variants.append(term)
else:
variants = [self._search_normalized(postcode_name)]
terms.append((postcode_name, variants))
if terms:
with self.conn.cursor() as cur:
cur.execute_values("""SELECT create_postcode_word(pc, var)
FROM (VALUES %s) AS v(pc, var)""",
terms)
def update_special_phrases(self, phrases: Iterable[Tuple[str, str, str, str]],
should_replace: bool) -> None:
""" Replace the search index for special phrases with the new phrases.
If `should_replace` is True, then the previous set of will be
completely replaced. Otherwise the phrases are added to the
already existing ones.
"""
assert self.conn is not None
norm_phrases = set(((self._normalized(p[0]), p[1], p[2], p[3])
for p in phrases))
with self.conn.cursor() as cur:
# Get the old phrases.
existing_phrases = set()
cur.execute("SELECT word, info FROM word WHERE type = 'S'")
for word, info in cur:
existing_phrases.add((word, info['class'], info['type'],
info.get('op') or '-'))
added = self._add_special_phrases(cur, norm_phrases, existing_phrases)
if should_replace:
deleted = self._remove_special_phrases(cur, norm_phrases,
existing_phrases)
else:
deleted = 0
LOG.info("Total phrases: %s. Added: %s. Deleted: %s",
len(norm_phrases), added, deleted)
def _add_special_phrases(self, cursor: Cursor,
new_phrases: Set[Tuple[str, str, str, str]],
existing_phrases: Set[Tuple[str, str, str, str]]) -> int:
""" Add all phrases to the database that are not yet there.
"""
to_add = new_phrases - existing_phrases
added = 0
with CopyBuffer() as copystr:
for word, cls, typ, oper in to_add:
term = self._search_normalized(word)
if term:
copystr.add(term, 'S', word,
json.dumps({'class': cls, 'type': typ,
'op': oper if oper in ('in', 'near') else None}))
added += 1
copystr.copy_out(cursor, 'word',
columns=['word_token', 'type', 'word', 'info'])
return added
def _remove_special_phrases(self, cursor: Cursor,
new_phrases: Set[Tuple[str, str, str, str]],
existing_phrases: Set[Tuple[str, str, str, str]]) -> int:
""" Remove all phrases from the database that are no longer in the
new phrase list.
"""
to_delete = existing_phrases - new_phrases
if to_delete:
cursor.execute_values(
""" DELETE FROM word USING (VALUES %s) as v(name, in_class, in_type, op)
WHERE type = 'S' and word = name
and info->>'class' = in_class and info->>'type' = in_type
and ((op = '-' and info->>'op' is null) or op = info->>'op')
""", to_delete)
return len(to_delete)
def add_country_names(self, country_code: str, names: Mapping[str, str]) -> None:
""" Add default names for the given country to the search index.
"""
# Make sure any name preprocessing for country names applies.
info = PlaceInfo({'name': names, 'country_code': country_code,
'rank_address': 4, 'class': 'boundary',
'type': 'administrative'})
self._add_country_full_names(country_code,
self.sanitizer.process_names(info)[0],
internal=True)
def _add_country_full_names(self, country_code: str, names: Sequence[PlaceName],
internal: bool = False) -> None:
""" Add names for the given country from an already sanitized
name list.
"""
assert self.conn is not None
word_tokens = set()
for name in names:
norm_name = self._search_normalized(name.name)
if norm_name:
word_tokens.add(norm_name)
with self.conn.cursor() as cur:
# Get existing names
cur.execute("""SELECT word_token, coalesce(info ? 'internal', false) as is_internal
FROM word
WHERE type = 'C' and word = %s""",
(country_code, ))
# internal/external names
existing_tokens: Dict[bool, Set[str]] = {True: set(), False: set()}
for word in cur:
existing_tokens[word[1]].add(word[0])
# Delete names that no longer exist.
gone_tokens = existing_tokens[internal] - word_tokens
if internal:
gone_tokens.update(existing_tokens[False] & word_tokens)
if gone_tokens:
cur.execute("""DELETE FROM word
USING unnest(%s) as token
WHERE type = 'C' and word = %s
and word_token = token""",
(list(gone_tokens), country_code))
# Only add those names that are not yet in the list.
new_tokens = word_tokens - existing_tokens[True]
if not internal:
new_tokens -= existing_tokens[False]
if new_tokens:
if internal:
sql = """INSERT INTO word (word_token, type, word, info)
(SELECT token, 'C', %s, '{"internal": "yes"}'
FROM unnest(%s) as token)
"""
else:
sql = """INSERT INTO word (word_token, type, word)
(SELECT token, 'C', %s
FROM unnest(%s) as token)
"""
cur.execute(sql, (country_code, list(new_tokens)))
def process_place(self, place: PlaceInfo) -> Mapping[str, Any]:
""" Determine tokenizer information about the given place.
Returns a JSON-serializable structure that will be handed into
the database via the token_info field.
"""
token_info = _TokenInfo()
names, address = self.sanitizer.process_names(place)
if names:
token_info.set_names(*self._compute_name_tokens(names))
if place.is_country():
assert place.country_code is not None
self._add_country_full_names(place.country_code, names)
if address:
self._process_place_address(token_info, address)
return token_info.to_dict()
def _process_place_address(self, token_info: '_TokenInfo',
address: Sequence[PlaceName]) -> None:
for item in address:
if item.kind == 'postcode':
token_info.set_postcode(self._add_postcode(item))
elif item.kind == 'housenumber':
token_info.add_housenumber(*self._compute_housenumber_token(item))
elif item.kind == 'street':
token_info.add_street(self._retrieve_full_tokens(item.name))
elif item.kind == 'place':
if not item.suffix:
token_info.add_place(self._compute_partial_tokens(item.name))
elif not item.kind.startswith('_') and not item.suffix and \
item.kind not in ('country', 'full', 'inclusion'):
token_info.add_address_term(item.kind, self._compute_partial_tokens(item.name))
def _compute_housenumber_token(self, hnr: PlaceName) -> Tuple[Optional[int], Optional[str]]:
""" Normalize the housenumber and return the word token and the
canonical form.
"""
assert self.conn is not None
analyzer = self.token_analysis.analysis.get('@housenumber')
result: Tuple[Optional[int], Optional[str]] = (None, None)
if analyzer is None:
# When no custom analyzer is set, simply normalize and transliterate
norm_name = self._search_normalized(hnr.name)
if norm_name:
result = self._cache.housenumbers.get(norm_name, result)
if result[0] is None:
with self.conn.cursor() as cur:
hid = cur.scalar("SELECT getorcreate_hnr_id(%s)", (norm_name, ))
result = hid, norm_name
self._cache.housenumbers[norm_name] = result
else:
# Otherwise use the analyzer to determine the canonical name.
# Per convention we use the first variant as the 'lookup name', the
# name that gets saved in the housenumber field of the place.
word_id = analyzer.get_canonical_id(hnr)
if word_id:
result = self._cache.housenumbers.get(word_id, result)
if result[0] is None:
variants = analyzer.compute_variants(word_id)
if variants:
with self.conn.cursor() as cur:
hid = cur.scalar("SELECT create_analyzed_hnr_id(%s, %s)",
(word_id, list(variants)))
result = hid, variants[0]
self._cache.housenumbers[word_id] = result
return result
def _compute_partial_tokens(self, name: str) -> List[int]:
""" Normalize the given term, split it into partial words and return
then token list for them.
"""
assert self.conn is not None
norm_name = self._search_normalized(name)
tokens = []
need_lookup = []
for partial in norm_name.split():
token = self._cache.partials.get(partial)
if token:
tokens.append(token)
else:
need_lookup.append(partial)
if need_lookup:
with self.conn.cursor() as cur:
cur.execute("""SELECT word, getorcreate_partial_word(word)
FROM unnest(%s) word""",
(need_lookup, ))
for partial, token in cur:
assert token is not None
tokens.append(token)
self._cache.partials[partial] = token
return tokens
def _retrieve_full_tokens(self, name: str) -> List[int]:
""" Get the full name token for the given name, if it exists.
The name is only retrieved for the standard analyser.
"""
assert self.conn is not None
norm_name = self._search_normalized(name)
# return cached if possible
if norm_name in self._cache.fulls:
return self._cache.fulls[norm_name]
with self.conn.cursor() as cur:
cur.execute("SELECT word_id FROM word WHERE word_token = %s and type = 'W'",
(norm_name, ))
full = [row[0] for row in cur]
self._cache.fulls[norm_name] = full
return full
def _compute_name_tokens(self, names: Sequence[PlaceName]) -> Tuple[Set[int], Set[int]]:
""" Computes the full name and partial name tokens for the given
dictionary of names.
"""
assert self.conn is not None
full_tokens: Set[int] = set()
partial_tokens: Set[int] = set()
for name in names:
analyzer_id = name.get_attr('analyzer')
analyzer = self.token_analysis.get_analyzer(analyzer_id)
word_id = analyzer.get_canonical_id(name)
if analyzer_id is None:
token_id = word_id
else:
token_id = f'{word_id}@{analyzer_id}'
full, part = self._cache.names.get(token_id, (None, None))
if full is None:
variants = analyzer.compute_variants(word_id)
if not variants:
continue
with self.conn.cursor() as cur:
cur.execute("SELECT * FROM getorcreate_full_word(%s, %s)",
(token_id, variants))
full, part = cast(Tuple[int, List[int]], cur.fetchone())
self._cache.names[token_id] = (full, part)
assert part is not None
full_tokens.add(full)
partial_tokens.update(part)
return full_tokens, partial_tokens
def _add_postcode(self, item: PlaceName) -> Optional[str]:
""" Make sure the normalized postcode is present in the word table.
"""
assert self.conn is not None
analyzer = self.token_analysis.analysis.get('@postcode')
if analyzer is None:
postcode_name = item.name.strip().upper()
variant_base = None
else:
postcode_name = analyzer.get_canonical_id(item)
variant_base = item.get_attr("variant")
if variant_base:
postcode = f'{postcode_name}@{variant_base}'
else:
postcode = postcode_name
if postcode not in self._cache.postcodes:
term = self._search_normalized(postcode_name)
if not term:
return None
variants = {term}
if analyzer is not None and variant_base:
variants.update(analyzer.compute_variants(variant_base))
with self.conn.cursor() as cur:
cur.execute("SELECT create_postcode_word(%s, %s)",
(postcode, list(variants)))
self._cache.postcodes.add(postcode)
return postcode_name
class _TokenInfo:
""" Collect token information to be sent back to the database.
"""
def __init__(self) -> None:
self.names: Optional[str] = None
self.housenumbers: Set[str] = set()
self.housenumber_tokens: Set[int] = set()
self.street_tokens: Optional[Set[int]] = None
self.place_tokens: Set[int] = set()
self.address_tokens: Dict[str, str] = {}
self.postcode: Optional[str] = None
def _mk_array(self, tokens: Iterable[Any]) -> str:
return f"{{{','.join((str(s) for s in tokens))}}}"
def to_dict(self) -> Dict[str, Any]:
""" Return the token information in database importable format.
"""
out: Dict[str, Any] = {}
if self.names:
out['names'] = self.names
if self.housenumbers:
out['hnr'] = ';'.join(self.housenumbers)
out['hnr_tokens'] = self._mk_array(self.housenumber_tokens)
if self.street_tokens is not None:
out['street'] = self._mk_array(self.street_tokens)
if self.place_tokens:
out['place'] = self._mk_array(self.place_tokens)
if self.address_tokens:
out['addr'] = self.address_tokens
if self.postcode:
out['postcode'] = self.postcode
return out
def set_names(self, fulls: Iterable[int], partials: Iterable[int]) -> None:
""" Adds token information for the normalised names.
"""
self.names = self._mk_array(itertools.chain(fulls, partials))
def add_housenumber(self, token: Optional[int], hnr: Optional[str]) -> None:
""" Extract housenumber information from a list of normalised
housenumbers.
"""
if token:
assert hnr is not None
self.housenumbers.add(hnr)
self.housenumber_tokens.add(token)
def add_street(self, tokens: Iterable[int]) -> None:
""" Add addr:street match terms.
"""
if self.street_tokens is None:
self.street_tokens = set()
self.street_tokens.update(tokens)
def add_place(self, tokens: Iterable[int]) -> None:
""" Add addr:place search and match terms.
"""
self.place_tokens.update(tokens)
def add_address_term(self, key: str, partials: Iterable[int]) -> None:
""" Add additional address terms.
"""
if partials:
self.address_tokens[key] = self._mk_array(partials)
def set_postcode(self, postcode: Optional[str]) -> None:
""" Set the postcode to the given one.
"""
self.postcode = postcode
class _TokenCache:
""" Cache for token information to avoid repeated database queries.
This cache is not thread-safe and needs to be instantiated per
analyzer.
"""
def __init__(self) -> None:
self.names: Dict[str, Tuple[int, List[int]]] = {}
self.partials: Dict[str, int] = {}
self.fulls: Dict[str, List[int]] = {}
self.postcodes: Set[str] = set()
self.housenumbers: Dict[str, Tuple[Optional[int], Optional[str]]] = {}
| 31,837 | 38.113022 | 96 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/place_sanitizer.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Handler for cleaning name and address tags in place information before it
is handed to the token analysis.
"""
from typing import Optional, List, Mapping, Sequence, Callable, Any, Tuple
from nominatim.errors import UsageError
from nominatim.config import Configuration
from nominatim.tokenizer.sanitizers.config import SanitizerConfig
from nominatim.tokenizer.sanitizers.base import SanitizerHandler, ProcessInfo
from nominatim.data.place_name import PlaceName
from nominatim.data.place_info import PlaceInfo
class PlaceSanitizer:
""" Controller class which applies sanitizer functions on the place
names and address before they are used by the token analysers.
"""
def __init__(self, rules: Optional[Sequence[Mapping[str, Any]]],
config: Configuration) -> None:
self.handlers: List[Callable[[ProcessInfo], None]] = []
if rules:
for func in rules:
if 'step' not in func:
raise UsageError("Sanitizer rule is missing the 'step' attribute.")
if not isinstance(func['step'], str):
raise UsageError("'step' attribute must be a simple string.")
module: SanitizerHandler = \
config.load_plugin_module(func['step'], 'nominatim.tokenizer.sanitizers')
self.handlers.append(module.create(SanitizerConfig(func)))
def process_names(self, place: PlaceInfo) -> Tuple[List[PlaceName], List[PlaceName]]:
""" Extract a sanitized list of names and address parts from the
given place. The function returns a tuple
(list of names, list of address names)
"""
obj = ProcessInfo(place)
for func in self.handlers:
func(obj)
return obj.names, obj.address
| 2,020 | 36.425926 | 93 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/factory.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for creating a tokenizer or initialising the right one for an
existing database.
A tokenizer is something that is bound to the lifetime of a database. It
can be chosen and configured before the initial import but then needs to
be used consistently when querying and updating the database.
This module provides the functions to create and configure a new tokenizer
as well as instantiating the appropriate tokenizer for updating an existing
database.
A tokenizer usually also includes PHP code for querying. The appropriate PHP
normalizer module is installed, when the tokenizer is created.
"""
from typing import Optional
import logging
import importlib
from pathlib import Path
from nominatim.errors import UsageError
from nominatim.db import properties
from nominatim.db.connection import connect
from nominatim.config import Configuration
from nominatim.tokenizer.base import AbstractTokenizer, TokenizerModule
LOG = logging.getLogger()
def _import_tokenizer(name: str) -> TokenizerModule:
""" Load the tokenizer.py module from project directory.
"""
src_file = Path(__file__).parent / (name + '_tokenizer.py')
if not src_file.is_file():
LOG.fatal("No tokenizer named '%s' available. "
"Check the setting of NOMINATIM_TOKENIZER.", name)
raise UsageError('Tokenizer not found')
return importlib.import_module('nominatim.tokenizer.' + name + '_tokenizer')
def create_tokenizer(config: Configuration, init_db: bool = True,
module_name: Optional[str] = None) -> AbstractTokenizer:
""" Create a new tokenizer as defined by the given configuration.
The tokenizer data and code is copied into the 'tokenizer' directory
of the project directory and the tokenizer loaded from its new location.
"""
if module_name is None:
module_name = config.TOKENIZER
# Create the directory for the tokenizer data
assert config.project_dir is not None
basedir = config.project_dir / 'tokenizer'
if not basedir.exists():
basedir.mkdir()
elif not basedir.is_dir():
LOG.fatal("Tokenizer directory '%s' cannot be created.", basedir)
raise UsageError("Tokenizer setup failed.")
# Import and initialize the tokenizer.
tokenizer_module = _import_tokenizer(module_name)
tokenizer = tokenizer_module.create(config.get_libpq_dsn(), basedir)
tokenizer.init_new_db(config, init_db=init_db)
with connect(config.get_libpq_dsn()) as conn:
properties.set_property(conn, 'tokenizer', module_name)
return tokenizer
def get_tokenizer_for_db(config: Configuration) -> AbstractTokenizer:
""" Instantiate a tokenizer for an existing database.
The function looks up the appropriate tokenizer in the database
and initialises it.
"""
assert config.project_dir is not None
basedir = config.project_dir / 'tokenizer'
if not basedir.is_dir():
# Directory will be repopulated by tokenizer below.
basedir.mkdir()
with connect(config.get_libpq_dsn()) as conn:
name = properties.get_property(conn, 'tokenizer')
if name is None:
LOG.fatal("Tokenizer was not set up properly. Database property missing.")
raise UsageError('Cannot initialize tokenizer.')
tokenizer_module = _import_tokenizer(name)
tokenizer = tokenizer_module.create(config.get_libpq_dsn(), basedir)
tokenizer.init_from_project(config)
return tokenizer
| 3,695 | 34.883495 | 82 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/icu_token_analysis.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Container class collecting all components required to transform an OSM name
into a Nominatim token.
"""
from typing import Mapping, Optional, TYPE_CHECKING
from icu import Transliterator
from nominatim.tokenizer.token_analysis.base import Analyzer
if TYPE_CHECKING:
from typing import Any
from nominatim.tokenizer.icu_rule_loader import TokenAnalyzerRule # pylint: disable=cyclic-import
class ICUTokenAnalysis:
""" Container class collecting the transliterators and token analysis
modules for a single Analyser instance.
"""
def __init__(self, norm_rules: str, trans_rules: str,
analysis_rules: Mapping[Optional[str], 'TokenAnalyzerRule']):
self.normalizer = Transliterator.createFromRules("icu_normalization",
norm_rules)
trans_rules += ";[:Space:]+ > ' '"
self.to_ascii = Transliterator.createFromRules("icu_to_ascii",
trans_rules)
self.search = Transliterator.createFromRules("icu_search",
norm_rules + trans_rules)
self.analysis = {name: arules.create(self.normalizer, self.to_ascii)
for name, arules in analysis_rules.items()}
def get_analyzer(self, name: Optional[str]) -> Analyzer:
""" Return the given named analyzer. If no analyzer with that
name exists, return the default analyzer.
"""
return self.analysis.get(name) or self.analysis[None]
| 1,784 | 39.568182 | 101 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/__init__.py | 0 | 0 | 0 | py | |
Nominatim | Nominatim-master/nominatim/tokenizer/icu_rule_loader.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper class to create ICU rules from a configuration file.
"""
from typing import Mapping, Any, Dict, Optional
import io
import json
import logging
from icu import Transliterator
from nominatim.config import flatten_config_list, Configuration
from nominatim.db.properties import set_property, get_property
from nominatim.db.connection import Connection
from nominatim.errors import UsageError
from nominatim.tokenizer.place_sanitizer import PlaceSanitizer
from nominatim.tokenizer.icu_token_analysis import ICUTokenAnalysis
from nominatim.tokenizer.token_analysis.base import AnalysisModule, Analyzer
import nominatim.data.country_info
LOG = logging.getLogger()
DBCFG_IMPORT_NORM_RULES = "tokenizer_import_normalisation"
DBCFG_IMPORT_TRANS_RULES = "tokenizer_import_transliteration"
DBCFG_IMPORT_ANALYSIS_RULES = "tokenizer_import_analysis_rules"
def _get_section(rules: Mapping[str, Any], section: str) -> Any:
""" Get the section named 'section' from the rules. If the section does
not exist, raise a usage error with a meaningful message.
"""
if section not in rules:
LOG.fatal("Section '%s' not found in tokenizer config.", section)
raise UsageError("Syntax error in tokenizer configuration file.")
return rules[section]
class ICURuleLoader:
""" Compiler for ICU rules from a tokenizer configuration file.
"""
def __init__(self, config: Configuration) -> None:
self.config = config
rules = config.load_sub_configuration('icu_tokenizer.yaml',
config='TOKENIZER_CONFIG')
# Make sure country information is available to analyzers and sanitizers.
nominatim.data.country_info.setup_country_config(config)
self.normalization_rules = self._cfg_to_icu_rules(rules, 'normalization')
self.transliteration_rules = self._cfg_to_icu_rules(rules, 'transliteration')
self.analysis_rules = _get_section(rules, 'token-analysis')
self._setup_analysis()
# Load optional sanitizer rule set.
self.sanitizer_rules = rules.get('sanitizers', [])
def load_config_from_db(self, conn: Connection) -> None:
""" Get previously saved parts of the configuration from the
database.
"""
rules = get_property(conn, DBCFG_IMPORT_NORM_RULES)
if rules is not None:
self.normalization_rules = rules
rules = get_property(conn, DBCFG_IMPORT_TRANS_RULES)
if rules is not None:
self.transliteration_rules = rules
rules = get_property(conn, DBCFG_IMPORT_ANALYSIS_RULES)
if rules:
self.analysis_rules = json.loads(rules)
else:
self.analysis_rules = []
self._setup_analysis()
def save_config_to_db(self, conn: Connection) -> None:
""" Save the part of the configuration that cannot be changed into
the database.
"""
set_property(conn, DBCFG_IMPORT_NORM_RULES, self.normalization_rules)
set_property(conn, DBCFG_IMPORT_TRANS_RULES, self.transliteration_rules)
set_property(conn, DBCFG_IMPORT_ANALYSIS_RULES, json.dumps(self.analysis_rules))
def make_sanitizer(self) -> PlaceSanitizer:
""" Create a place sanitizer from the configured rules.
"""
return PlaceSanitizer(self.sanitizer_rules, self.config)
def make_token_analysis(self) -> ICUTokenAnalysis:
""" Create a token analyser from the reviouly loaded rules.
"""
return ICUTokenAnalysis(self.normalization_rules,
self.transliteration_rules, self.analysis)
def get_search_rules(self) -> str:
""" Return the ICU rules to be used during search.
The rules combine normalization and transliteration.
"""
# First apply the normalization rules.
rules = io.StringIO()
rules.write(self.normalization_rules)
# Then add transliteration.
rules.write(self.transliteration_rules)
return rules.getvalue()
def get_normalization_rules(self) -> str:
""" Return rules for normalisation of a term.
"""
return self.normalization_rules
def get_transliteration_rules(self) -> str:
""" Return the rules for converting a string into its asciii representation.
"""
return self.transliteration_rules
def _setup_analysis(self) -> None:
""" Process the rules used for creating the various token analyzers.
"""
self.analysis: Dict[Optional[str], TokenAnalyzerRule] = {}
if not isinstance(self.analysis_rules, list):
raise UsageError("Configuration section 'token-analysis' must be a list.")
norm = Transliterator.createFromRules("rule_loader_normalization",
self.normalization_rules)
trans = Transliterator.createFromRules("rule_loader_transliteration",
self.transliteration_rules)
for section in self.analysis_rules:
name = section.get('id', None)
if name in self.analysis:
if name is None:
LOG.fatal("ICU tokenizer configuration has two default token analyzers.")
else:
LOG.fatal("ICU tokenizer configuration has two token "
"analyzers with id '%s'.", name)
raise UsageError("Syntax error in ICU tokenizer config.")
self.analysis[name] = TokenAnalyzerRule(section, norm, trans,
self.config)
@staticmethod
def _cfg_to_icu_rules(rules: Mapping[str, Any], section: str) -> str:
""" Load an ICU ruleset from the given section. If the section is a
simple string, it is interpreted as a file name and the rules are
loaded verbatim from the given file. The filename is expected to be
relative to the tokenizer rule file. If the section is a list then
each line is assumed to be a rule. All rules are concatenated and returned.
"""
content = _get_section(rules, section)
if content is None:
return ''
return ';'.join(flatten_config_list(content, section)) + ';'
class TokenAnalyzerRule:
""" Factory for a single analysis module. The class saves the configuration
and creates a new token analyzer on request.
"""
def __init__(self, rules: Mapping[str, Any],
normalizer: Any, transliterator: Any,
config: Configuration) -> None:
analyzer_name = _get_section(rules, 'analyzer')
if not analyzer_name or not isinstance(analyzer_name, str):
raise UsageError("'analyzer' parameter needs to be simple string")
self._analysis_mod: AnalysisModule = \
config.load_plugin_module(analyzer_name, 'nominatim.tokenizer.token_analysis')
self.config = self._analysis_mod.configure(rules, normalizer,
transliterator)
def create(self, normalizer: Any, transliterator: Any) -> Analyzer:
""" Create a new analyser instance for the given rule.
"""
return self._analysis_mod.create(normalizer, transliterator, self.config)
| 7,603 | 37.598985 | 93 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/legacy_tokenizer.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tokenizer implementing normalisation as used before Nominatim 4.
"""
from typing import Optional, Sequence, List, Tuple, Mapping, Any, Callable, \
cast, Dict, Set, Iterable
from collections import OrderedDict
import logging
from pathlib import Path
import re
import shutil
from textwrap import dedent
from icu import Transliterator
import psycopg2
import psycopg2.extras
from nominatim.db.connection import connect, Connection
from nominatim.config import Configuration
from nominatim.db import properties
from nominatim.db import utils as db_utils
from nominatim.db.sql_preprocessor import SQLPreprocessor
from nominatim.data.place_info import PlaceInfo
from nominatim.errors import UsageError
from nominatim.tokenizer.base import AbstractAnalyzer, AbstractTokenizer
DBCFG_NORMALIZATION = "tokenizer_normalization"
DBCFG_MAXWORDFREQ = "tokenizer_maxwordfreq"
LOG = logging.getLogger()
def create(dsn: str, data_dir: Path) -> 'LegacyTokenizer':
""" Create a new instance of the tokenizer provided by this module.
"""
return LegacyTokenizer(dsn, data_dir)
def _install_module(config_module_path: str, src_dir: Path, module_dir: Path) -> str:
""" Copies the PostgreSQL normalisation module into the project
directory if necessary. For historical reasons the module is
saved in the '/module' subdirectory and not with the other tokenizer
data.
The function detects when the installation is run from the
build directory. It doesn't touch the module in that case.
"""
# Custom module locations are simply used as is.
if config_module_path:
LOG.info("Using custom path for database module at '%s'", config_module_path)
return config_module_path
# Compatibility mode for builddir installations.
if module_dir.exists() and src_dir.samefile(module_dir):
LOG.info('Running from build directory. Leaving database module as is.')
return str(module_dir)
# In any other case install the module in the project directory.
if not module_dir.exists():
module_dir.mkdir()
destfile = module_dir / 'nominatim.so'
shutil.copy(str(src_dir / 'nominatim.so'), str(destfile))
destfile.chmod(0o755)
LOG.info('Database module installed at %s', str(destfile))
return str(module_dir)
def _check_module(module_dir: str, conn: Connection) -> None:
""" Try to use the PostgreSQL module to confirm that it is correctly
installed and accessible from PostgreSQL.
"""
with conn.cursor() as cur:
try:
cur.execute("""CREATE FUNCTION nominatim_test_import_func(text)
RETURNS text AS %s, 'transliteration'
LANGUAGE c IMMUTABLE STRICT;
DROP FUNCTION nominatim_test_import_func(text)
""", (f'{module_dir}/nominatim.so', ))
except psycopg2.DatabaseError as err:
LOG.fatal("Error accessing database module: %s", err)
raise UsageError("Database module cannot be accessed.") from err
class LegacyTokenizer(AbstractTokenizer):
""" The legacy tokenizer uses a special PostgreSQL module to normalize
names and queries. The tokenizer thus implements normalization through
calls to the database.
"""
def __init__(self, dsn: str, data_dir: Path) -> None:
self.dsn = dsn
self.data_dir = data_dir
self.normalization: Optional[str] = None
def init_new_db(self, config: Configuration, init_db: bool = True) -> None:
""" Set up a new tokenizer for the database.
This copies all necessary data in the project directory to make
sure the tokenizer remains stable even over updates.
"""
assert config.project_dir is not None
module_dir = _install_module(config.DATABASE_MODULE_PATH,
config.lib_dir.module,
config.project_dir / 'module')
self.normalization = config.TERM_NORMALIZATION
self._install_php(config, overwrite=True)
with connect(self.dsn) as conn:
_check_module(module_dir, conn)
self._save_config(conn, config)
conn.commit()
if init_db:
self.update_sql_functions(config)
self._init_db_tables(config)
def init_from_project(self, config: Configuration) -> None:
""" Initialise the tokenizer from the project directory.
"""
assert config.project_dir is not None
with connect(self.dsn) as conn:
self.normalization = properties.get_property(conn, DBCFG_NORMALIZATION)
if not (config.project_dir / 'module' / 'nominatim.so').exists():
_install_module(config.DATABASE_MODULE_PATH,
config.lib_dir.module,
config.project_dir / 'module')
self._install_php(config, overwrite=False)
def finalize_import(self, config: Configuration) -> None:
""" Do any required postprocessing to make the tokenizer data ready
for use.
"""
with connect(self.dsn) as conn:
sqlp = SQLPreprocessor(conn, config)
sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_indices.sql')
def update_sql_functions(self, config: Configuration) -> None:
""" Reimport the SQL functions for this tokenizer.
"""
assert config.project_dir is not None
with connect(self.dsn) as conn:
max_word_freq = properties.get_property(conn, DBCFG_MAXWORDFREQ)
modulepath = config.DATABASE_MODULE_PATH or \
str((config.project_dir / 'module').resolve())
sqlp = SQLPreprocessor(conn, config)
sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer.sql',
max_word_freq=max_word_freq,
modulepath=modulepath)
def check_database(self, _: Configuration) -> Optional[str]:
""" Check that the tokenizer is set up correctly.
"""
hint = """\
The Postgresql extension nominatim.so was not correctly loaded.
Error: {error}
Hints:
* Check the output of the CMmake/make installation step
* Does nominatim.so exist?
* Does nominatim.so exist on the database server?
* Can nominatim.so be accessed by the database user?
"""
with connect(self.dsn) as conn:
with conn.cursor() as cur:
try:
out = cur.scalar("SELECT make_standard_name('a')")
except psycopg2.Error as err:
return hint.format(error=str(err))
if out != 'a':
return hint.format(error='Unexpected result for make_standard_name()')
return None
def migrate_database(self, config: Configuration) -> None:
""" Initialise the project directory of an existing database for
use with this tokenizer.
This is a special migration function for updating existing databases
to new software versions.
"""
assert config.project_dir is not None
self.normalization = config.TERM_NORMALIZATION
module_dir = _install_module(config.DATABASE_MODULE_PATH,
config.lib_dir.module,
config.project_dir / 'module')
with connect(self.dsn) as conn:
_check_module(module_dir, conn)
self._save_config(conn, config)
def update_statistics(self) -> None:
""" Recompute the frequency of full words.
"""
with connect(self.dsn) as conn:
if conn.table_exists('search_name'):
with conn.cursor() as cur:
cur.drop_table("word_frequencies")
LOG.info("Computing word frequencies")
cur.execute("""CREATE TEMP TABLE word_frequencies AS
SELECT unnest(name_vector) as id, count(*)
FROM search_name GROUP BY id""")
cur.execute("CREATE INDEX ON word_frequencies(id)")
LOG.info("Update word table with recomputed frequencies")
cur.execute("""UPDATE word SET search_name_count = count
FROM word_frequencies
WHERE word_token like ' %' and word_id = id""")
cur.drop_table("word_frequencies")
conn.commit()
def update_word_tokens(self) -> None:
""" No house-keeping implemented for the legacy tokenizer.
"""
LOG.info("No tokenizer clean-up available.")
def name_analyzer(self) -> 'LegacyNameAnalyzer':
""" Create a new analyzer for tokenizing names and queries
using this tokinzer. Analyzers are context managers and should
be used accordingly:
```
with tokenizer.name_analyzer() as analyzer:
analyser.tokenize()
```
When used outside the with construct, the caller must ensure to
call the close() function before destructing the analyzer.
Analyzers are not thread-safe. You need to instantiate one per thread.
"""
normalizer = Transliterator.createFromRules("phrase normalizer",
self.normalization)
return LegacyNameAnalyzer(self.dsn, normalizer)
def _install_php(self, config: Configuration, overwrite: bool = True) -> None:
""" Install the php script for the tokenizer.
"""
php_file = self.data_dir / "tokenizer.php"
if not php_file.exists() or overwrite:
php_file.write_text(dedent(f"""\
<?php
@define('CONST_Max_Word_Frequency', {config.MAX_WORD_FREQUENCY});
@define('CONST_Term_Normalization_Rules', "{config.TERM_NORMALIZATION}");
require_once('{config.lib_dir.php}/tokenizer/legacy_tokenizer.php');
"""), encoding='utf-8')
def _init_db_tables(self, config: Configuration) -> None:
""" Set up the word table and fill it with pre-computed word
frequencies.
"""
with connect(self.dsn) as conn:
sqlp = SQLPreprocessor(conn, config)
sqlp.run_sql_file(conn, 'tokenizer/legacy_tokenizer_tables.sql')
conn.commit()
LOG.warning("Precomputing word tokens")
db_utils.execute_file(self.dsn, config.lib_dir.data / 'words.sql')
def _save_config(self, conn: Connection, config: Configuration) -> None:
""" Save the configuration that needs to remain stable for the given
database as database properties.
"""
assert self.normalization is not None
properties.set_property(conn, DBCFG_NORMALIZATION, self.normalization)
properties.set_property(conn, DBCFG_MAXWORDFREQ, config.MAX_WORD_FREQUENCY)
class LegacyNameAnalyzer(AbstractAnalyzer):
""" The legacy analyzer uses the special Postgresql module for
splitting names.
Each instance opens a connection to the database to request the
normalization.
"""
def __init__(self, dsn: str, normalizer: Any):
self.conn: Optional[Connection] = connect(dsn).connection
self.conn.autocommit = True
self.normalizer = normalizer
psycopg2.extras.register_hstore(self.conn)
self._cache = _TokenCache(self.conn)
def close(self) -> None:
""" Free all resources used by the analyzer.
"""
if self.conn:
self.conn.close()
self.conn = None
def get_word_token_info(self, words: Sequence[str]) -> List[Tuple[str, str, int]]:
""" Return token information for the given list of words.
If a word starts with # it is assumed to be a full name
otherwise is a partial name.
The function returns a list of tuples with
(original word, word token, word id).
The function is used for testing and debugging only
and not necessarily efficient.
"""
assert self.conn is not None
with self.conn.cursor() as cur:
cur.execute("""SELECT t.term, word_token, word_id
FROM word, (SELECT unnest(%s::TEXT[]) as term) t
WHERE word_token = (CASE
WHEN left(t.term, 1) = '#' THEN
' ' || make_standard_name(substring(t.term from 2))
ELSE
make_standard_name(t.term)
END)
and class is null and country_code is null""",
(words, ))
return [(r[0], r[1], r[2]) for r in cur]
def normalize(self, phrase: str) -> str:
""" Normalize the given phrase, i.e. remove all properties that
are irrelevant for search.
"""
return cast(str, self.normalizer.transliterate(phrase))
def normalize_postcode(self, postcode: str) -> str:
""" Convert the postcode to a standardized form.
This function must yield exactly the same result as the SQL function
'token_normalized_postcode()'.
"""
return postcode.strip().upper()
def update_postcodes_from_db(self) -> None:
""" Update postcode tokens in the word table from the location_postcode
table.
"""
assert self.conn is not None
with self.conn.cursor() as cur:
# This finds us the rows in location_postcode and word that are
# missing in the other table.
cur.execute("""SELECT * FROM
(SELECT pc, word FROM
(SELECT distinct(postcode) as pc FROM location_postcode) p
FULL JOIN
(SELECT word FROM word
WHERE class ='place' and type = 'postcode') w
ON pc = word) x
WHERE pc is null or word is null""")
to_delete = []
to_add = []
for postcode, word in cur:
if postcode is None:
to_delete.append(word)
else:
to_add.append(postcode)
if to_delete:
cur.execute("""DELETE FROM WORD
WHERE class ='place' and type = 'postcode'
and word = any(%s)
""", (to_delete, ))
if to_add:
cur.execute("""SELECT count(create_postcode_id(pc))
FROM unnest(%s) as pc
""", (to_add, ))
def update_special_phrases(self, phrases: Iterable[Tuple[str, str, str, str]],
should_replace: bool) -> None:
""" Replace the search index for special phrases with the new phrases.
"""
assert self.conn is not None
norm_phrases = set(((self.normalize(p[0]), p[1], p[2], p[3])
for p in phrases))
with self.conn.cursor() as cur:
# Get the old phrases.
existing_phrases = set()
cur.execute("""SELECT word, class, type, operator FROM word
WHERE class != 'place'
OR (type != 'house' AND type != 'postcode')""")
for label, cls, typ, oper in cur:
existing_phrases.add((label, cls, typ, oper or '-'))
to_add = norm_phrases - existing_phrases
to_delete = existing_phrases - norm_phrases
if to_add:
cur.execute_values(
""" INSERT INTO word (word_id, word_token, word, class, type,
search_name_count, operator)
(SELECT nextval('seq_word'), ' ' || make_standard_name(name), name,
class, type, 0,
CASE WHEN op in ('in', 'near') THEN op ELSE null END
FROM (VALUES %s) as v(name, class, type, op))""",
to_add)
if to_delete and should_replace:
cur.execute_values(
""" DELETE FROM word USING (VALUES %s) as v(name, in_class, in_type, op)
WHERE word = name and class = in_class and type = in_type
and ((op = '-' and operator is null) or op = operator)""",
to_delete)
LOG.info("Total phrases: %s. Added: %s. Deleted: %s",
len(norm_phrases), len(to_add), len(to_delete))
def add_country_names(self, country_code: str, names: Mapping[str, str]) -> None:
""" Add names for the given country to the search index.
"""
assert self.conn is not None
with self.conn.cursor() as cur:
cur.execute(
"""INSERT INTO word (word_id, word_token, country_code)
(SELECT nextval('seq_word'), lookup_token, %s
FROM (SELECT DISTINCT ' ' || make_standard_name(n) as lookup_token
FROM unnest(%s)n) y
WHERE NOT EXISTS(SELECT * FROM word
WHERE word_token = lookup_token and country_code = %s))
""", (country_code, list(names.values()), country_code))
def process_place(self, place: PlaceInfo) -> Mapping[str, Any]:
""" Determine tokenizer information about the given place.
Returns a JSON-serialisable structure that will be handed into
the database via the token_info field.
"""
assert self.conn is not None
token_info = _TokenInfo(self._cache)
names = place.name
if names:
token_info.add_names(self.conn, names)
if place.is_country():
assert place.country_code is not None
self.add_country_names(place.country_code, names)
address = place.address
if address:
self._process_place_address(token_info, address)
return token_info.data
def _process_place_address(self, token_info: '_TokenInfo', address: Mapping[str, str]) -> None:
assert self.conn is not None
hnrs = []
addr_terms = []
for key, value in address.items():
if key == 'postcode':
# Make sure the normalized postcode is present in the word table.
if re.search(r'[:,;]', value) is None:
norm_pc = self.normalize_postcode(value)
token_info.set_postcode(norm_pc)
self._cache.add_postcode(self.conn, norm_pc)
elif key in ('housenumber', 'streetnumber', 'conscriptionnumber'):
hnrs.append(value)
elif key == 'street':
token_info.add_street(self.conn, value)
elif key == 'place':
token_info.add_place(self.conn, value)
elif not key.startswith('_') \
and key not in ('country', 'full', 'inclusion'):
addr_terms.append((key, value))
if hnrs:
token_info.add_housenumbers(self.conn, hnrs)
if addr_terms:
token_info.add_address_terms(self.conn, addr_terms)
class _TokenInfo:
""" Collect token information to be sent back to the database.
"""
def __init__(self, cache: '_TokenCache') -> None:
self.cache = cache
self.data: Dict[str, Any] = {}
def add_names(self, conn: Connection, names: Mapping[str, str]) -> None:
""" Add token information for the names of the place.
"""
with conn.cursor() as cur:
# Create the token IDs for all names.
self.data['names'] = cur.scalar("SELECT make_keywords(%s)::text",
(names, ))
def add_housenumbers(self, conn: Connection, hnrs: Sequence[str]) -> None:
""" Extract housenumber information from the address.
"""
if len(hnrs) == 1:
token = self.cache.get_housenumber(hnrs[0])
if token is not None:
self.data['hnr_tokens'] = token
self.data['hnr'] = hnrs[0]
return
# split numbers if necessary
simple_list: List[str] = []
for hnr in hnrs:
simple_list.extend((x.strip() for x in re.split(r'[;,]', hnr)))
if len(simple_list) > 1:
simple_list = list(set(simple_list))
with conn.cursor() as cur:
cur.execute("SELECT * FROM create_housenumbers(%s)", (simple_list, ))
result = cur.fetchone()
assert result is not None
self.data['hnr_tokens'], self.data['hnr'] = result
def set_postcode(self, postcode: str) -> None:
""" Set or replace the postcode token with the given value.
"""
self.data['postcode'] = postcode
def add_street(self, conn: Connection, street: str) -> None:
""" Add addr:street match terms.
"""
def _get_street(name: str) -> Optional[str]:
with conn.cursor() as cur:
return cast(Optional[str],
cur.scalar("SELECT word_ids_from_name(%s)::text", (name, )))
tokens = self.cache.streets.get(street, _get_street)
self.data['street'] = tokens or '{}'
def add_place(self, conn: Connection, place: str) -> None:
""" Add addr:place search and match terms.
"""
def _get_place(name: str) -> Tuple[List[int], List[int]]:
with conn.cursor() as cur:
cur.execute("""SELECT make_keywords(hstore('name' , %s))::text,
word_ids_from_name(%s)::text""",
(name, name))
return cast(Tuple[List[int], List[int]], cur.fetchone())
self.data['place_search'], self.data['place_match'] = \
self.cache.places.get(place, _get_place)
def add_address_terms(self, conn: Connection, terms: Sequence[Tuple[str, str]]) -> None:
""" Add additional address terms.
"""
def _get_address_term(name: str) -> Tuple[List[int], List[int]]:
with conn.cursor() as cur:
cur.execute("""SELECT addr_ids_from_name(%s)::text,
word_ids_from_name(%s)::text""",
(name, name))
return cast(Tuple[List[int], List[int]], cur.fetchone())
tokens = {}
for key, value in terms:
items = self.cache.address_terms.get(value, _get_address_term)
if items[0] or items[1]:
tokens[key] = items
if tokens:
self.data['addr'] = tokens
class _LRU:
""" Least recently used cache that accepts a generator function to
produce the item when there is a cache miss.
"""
def __init__(self, maxsize: int = 128):
self.data: 'OrderedDict[str, Any]' = OrderedDict()
self.maxsize = maxsize
def get(self, key: str, generator: Callable[[str], Any]) -> Any:
""" Get the item with the given key from the cache. If nothing
is found in the cache, generate the value through the
generator function and store it in the cache.
"""
value = self.data.get(key)
if value is not None:
self.data.move_to_end(key)
else:
value = generator(key)
if len(self.data) >= self.maxsize:
self.data.popitem(last=False)
self.data[key] = value
return value
class _TokenCache:
""" Cache for token information to avoid repeated database queries.
This cache is not thread-safe and needs to be instantiated per
analyzer.
"""
def __init__(self, conn: Connection):
# various LRU caches
self.streets = _LRU(maxsize=256)
self.places = _LRU(maxsize=128)
self.address_terms = _LRU(maxsize=1024)
# Lookup houseunumbers up to 100 and cache them
with conn.cursor() as cur:
cur.execute("""SELECT i, ARRAY[getorcreate_housenumber_id(i::text)]::text
FROM generate_series(1, 100) as i""")
self._cached_housenumbers: Dict[str, str] = {str(r[0]): r[1] for r in cur}
# For postcodes remember the ones that have already been added
self.postcodes: Set[str] = set()
def get_housenumber(self, number: str) -> Optional[str]:
""" Get a housenumber token from the cache.
"""
return self._cached_housenumbers.get(number)
def add_postcode(self, conn: Connection, postcode: str) -> None:
""" Make sure the given postcode is in the database.
"""
if postcode not in self.postcodes:
with conn.cursor() as cur:
cur.execute('SELECT create_postcode_id(%s)', (postcode, ))
self.postcodes.add(postcode)
| 25,781 | 37.423249 | 99 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/sanitizers/base.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Common data types and protocols for sanitizers.
"""
from typing import Optional, List, Mapping, Callable
from nominatim.tokenizer.sanitizers.config import SanitizerConfig
from nominatim.data.place_info import PlaceInfo
from nominatim.data.place_name import PlaceName
from nominatim.typing import Protocol, Final
class ProcessInfo:
""" Container class for information handed into to handler functions.
The 'names' and 'address' members are mutable. A handler must change
them by either modifying the lists place or replacing the old content
with a new list.
"""
def __init__(self, place: PlaceInfo):
self.place: Final = place
self.names = self._convert_name_dict(place.name)
self.address = self._convert_name_dict(place.address)
@staticmethod
def _convert_name_dict(names: Optional[Mapping[str, str]]) -> List[PlaceName]:
""" Convert a dictionary of names into a list of PlaceNames.
The dictionary key is split into the primary part of the key
and the suffix (the part after an optional colon).
"""
out = []
if names:
for key, value in names.items():
parts = key.split(':', 1)
out.append(PlaceName(value.strip(),
parts[0].strip(),
parts[1].strip() if len(parts) > 1 else None))
return out
class SanitizerHandler(Protocol):
""" Protocol for sanitizer modules.
"""
def create(self, config: SanitizerConfig) -> Callable[[ProcessInfo], None]:
"""
Create a function for sanitizing a place.
Arguments:
config: A dictionary with the additional configuration options
specified in the tokenizer configuration
Return:
The result must be a callable that takes a place description
and transforms name and address as reuqired.
"""
| 2,199 | 32.846154 | 83 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/sanitizers/strip_brace_terms.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
This sanitizer creates additional name variants for names that have
addendums in brackets (e.g. "Halle (Saale)"). The additional variant contains
only the main name part with the bracket part removed.
"""
from typing import Callable
from nominatim.tokenizer.sanitizers.base import ProcessInfo
from nominatim.tokenizer.sanitizers.config import SanitizerConfig
def create(_: SanitizerConfig) -> Callable[[ProcessInfo], None]:
""" Create a name processing function that creates additional name variants
for bracket addendums.
"""
def _process(obj: ProcessInfo) -> None:
""" Add variants for names that have a bracket extension.
"""
if obj.names:
new_names = []
for name in (n for n in obj.names if '(' in n.name):
new_name = name.name.split('(')[0].strip()
if new_name:
new_names.append(name.clone(name=new_name))
obj.names.extend(new_names)
return _process
| 1,207 | 33.514286 | 79 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/sanitizers/delete_tags.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Sanitizer which prevents certain tags from getting into the search index.
It remove tags which matches all properties given below.
Arguments:
type: Define which type of tags should be considered for removal.
There are two types of tags 'name' and 'address' tags.
Takes a string 'name' or 'address'. (default: 'name')
filter-kind: Define which 'kind' of tags should be removed.
Takes a string or list of strings where each
string is a regular expression. A tag is considered
to be a candidate for removal if its 'kind' property
fully matches any of the given regular expressions.
Note that by default all 'kind' of tags are considered.
suffix: Define the 'suffix' property of the tags which should be
removed. Takes a string or list of strings where each
string is a regular expression. A tag is considered to be a
candidate for removal if its 'suffix' property fully
matches any of the given regular expressions. Note that by
default tags with any suffix value are considered including
those which don't have a suffix at all.
name: Define the 'name' property corresponding to the 'kind' property
of the tag. Takes a string or list of strings where each string
is a regular expression. A tag is considered to be a candidate
for removal if its name fully matches any of the given regular
expressions. Note that by default tags with any 'name' are
considered.
country_code: Define the country code of places whose tags should be
considered for removed. Takes a string or list of strings
where each string is a two-letter lower-case country code.
Note that by default tags of places with any country code
are considered including those which don't have a country
code at all.
rank_address: Define the address rank of places whose tags should be
considered for removal. Takes a string or list of strings
where each string is a number or range of number or the
form <from>-<to>.
Note that default is '0-30', which means that tags of all
places are considered.
See https://nominatim.org/release-docs/latest/customize/Ranking/#address-rank
to learn more about address rank.
"""
from typing import Callable, List, Tuple, Sequence
from nominatim.tokenizer.sanitizers.base import ProcessInfo
from nominatim.data.place_name import PlaceName
from nominatim.tokenizer.sanitizers.config import SanitizerConfig
class _TagSanitizer:
def __init__(self, config: SanitizerConfig) -> None:
self.type = config.get('type', 'name')
self.filter_kind = config.get_filter('filter-kind')
self.country_codes = config.get_string_list('country_code', [])
self.filter_suffix = config.get_filter('suffix')
self.filter_name = config.get_filter('name')
self.allowed_ranks = self._set_allowed_ranks(
config.get_string_list("rank_address", ["0-30"])
)
self.has_country_code = config.get('country_code', None) is not None
def __call__(self, obj: ProcessInfo) -> None:
tags = obj.names if self.type == 'name' else obj.address
if not tags \
or not self.allowed_ranks[obj.place.rank_address] \
or self.has_country_code \
and obj.place.country_code not in self.country_codes:
return
filtered_tags: List[PlaceName] = []
for tag in tags:
if not self.filter_kind(tag.kind) \
or not self.filter_suffix(tag.suffix or '') \
or not self.filter_name(tag.name):
filtered_tags.append(tag)
if self.type == 'name':
obj.names = filtered_tags
else:
obj.address = filtered_tags
def _set_allowed_ranks(self, ranks: Sequence[str]) -> Tuple[bool, ...]:
""" Returns a tuple of 31 boolean values corresponding to the
address ranks 0-30. Value at index 'i' is True if rank 'i'
is present in the ranks or lies in the range of any of the
ranks provided in the sanitizer configuration, otherwise
the value is False.
"""
allowed_ranks = [False] * 31
for rank in ranks:
intvl = [int(x) for x in rank.split('-')]
start, end = intvl[0], intvl[0] if len(intvl) == 1 else intvl[1]
for i in range(start, end + 1):
allowed_ranks[i] = True
return tuple(allowed_ranks)
def create(config: SanitizerConfig) -> Callable[[ProcessInfo], None]:
""" Create a function to process removal of certain tags.
"""
return _TagSanitizer(config)
| 5,178 | 39.147287 | 95 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/sanitizers/tag_analyzer_by_language.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
This sanitizer sets the `analyzer` property depending on the
language of the tag. The language is taken from the suffix of the name.
If a name already has an analyzer tagged, then this is kept.
Arguments:
filter-kind: Restrict the names the sanitizer should be applied to
the given tags. The parameter expects a list of
regular expressions which are matched against 'kind'.
Note that a match against the full string is expected.
whitelist: Restrict the set of languages that should be tagged.
Expects a list of acceptable suffixes. When unset,
all 2- and 3-letter lower-case codes are accepted.
use-defaults: Configure what happens when the name has no suffix.
When set to 'all', a variant is created for
each of the default languages in the country
the feature is in. When set to 'mono', a variant is
only created, when exactly one language is spoken
in the country. The default is to do nothing with
the default languages of a country.
mode: Define how the variants are created and may be 'replace' or
'append'. When set to 'append' the original name (without
any analyzer tagged) is retained. (default: replace)
"""
from typing import Callable, Dict, Optional, List
from nominatim.data import country_info
from nominatim.tokenizer.sanitizers.base import ProcessInfo
from nominatim.tokenizer.sanitizers.config import SanitizerConfig
class _AnalyzerByLanguage:
""" Processor for tagging the language of names in a place.
"""
def __init__(self, config: SanitizerConfig) -> None:
self.filter_kind = config.get_filter('filter-kind')
self.replace = config.get('mode', 'replace') != 'append'
self.whitelist = config.get('whitelist')
self._compute_default_languages(config.get('use-defaults', 'no'))
def _compute_default_languages(self, use_defaults: str) -> None:
self.deflangs: Dict[Optional[str], List[str]] = {}
if use_defaults in ('mono', 'all'):
for ccode, clangs in country_info.iterate('languages'):
if len(clangs) == 1 or use_defaults == 'all':
if self.whitelist:
self.deflangs[ccode] = [l for l in clangs if l in self.whitelist]
else:
self.deflangs[ccode] = clangs
def _suffix_matches(self, suffix: str) -> bool:
if self.whitelist is None:
return len(suffix) in (2, 3) and suffix.islower()
return suffix in self.whitelist
def __call__(self, obj: ProcessInfo) -> None:
if not obj.names:
return
more_names = []
for name in (n for n in obj.names
if not n.has_attr('analyzer') and self.filter_kind(n.kind)):
if name.suffix:
langs = [name.suffix] if self._suffix_matches(name.suffix) else None
else:
langs = self.deflangs.get(obj.place.country_code)
if langs:
if self.replace:
name.set_attr('analyzer', langs[0])
else:
more_names.append(name.clone(attr={'analyzer': langs[0]}))
more_names.extend(name.clone(attr={'analyzer': l}) for l in langs[1:])
obj.names.extend(more_names)
def create(config: SanitizerConfig) -> Callable[[ProcessInfo], None]:
""" Create a function that sets the analyzer property depending on the
language of the tag.
"""
return _AnalyzerByLanguage(config)
| 3,902 | 38.03 | 89 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/sanitizers/clean_housenumbers.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Sanitizer that preprocesses address tags for house numbers. The sanitizer
allows to
* define which tags are to be considered house numbers (see 'filter-kind')
* split house number lists into individual numbers (see 'delimiters')
Arguments:
delimiters: Define the set of characters to be used for
splitting a list of house numbers into parts. (default: ',;')
filter-kind: Define the address tags that are considered to be a
house number. Either takes a single string or a list of strings,
where each string is a regular expression. An address item
is considered a house number if the 'kind' fully matches any
of the given regular expressions. (default: 'housenumber')
convert-to-name: Define house numbers that should be treated as a name
instead of a house number. Either takes a single string
or a list of strings, where each string is a regular
expression that must match the full house number value.
"""
from typing import Callable, Iterator, List
from nominatim.tokenizer.sanitizers.base import ProcessInfo
from nominatim.data.place_name import PlaceName
from nominatim.tokenizer.sanitizers.config import SanitizerConfig
class _HousenumberSanitizer:
def __init__(self, config: SanitizerConfig) -> None:
self.filter_kind = config.get_filter('filter-kind', ['housenumber'])
self.split_regexp = config.get_delimiter()
self.filter_name = config.get_filter('convert-to-name', 'FAIL_ALL')
def __call__(self, obj: ProcessInfo) -> None:
if not obj.address:
return
new_address: List[PlaceName] = []
for item in obj.address:
if self.filter_kind(item.kind):
if self.filter_name(item.name):
obj.names.append(item.clone(kind='housenumber'))
else:
new_address.extend(item.clone(kind='housenumber', name=n)
for n in self.sanitize(item.name))
else:
# Don't touch other address items.
new_address.append(item)
obj.address = new_address
def sanitize(self, value: str) -> Iterator[str]:
""" Extract housenumbers in a regularized format from an OSM value.
The function works as a generator that yields all valid housenumbers
that can be created from the value.
"""
for hnr in self.split_regexp.split(value):
if hnr:
yield from self._regularize(hnr)
def _regularize(self, hnr: str) -> Iterator[str]:
yield hnr
def create(config: SanitizerConfig) -> Callable[[ProcessInfo], None]:
""" Create a housenumber processing function.
"""
return _HousenumberSanitizer(config)
| 3,085 | 37.098765 | 81 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/sanitizers/config.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Configuration for Sanitizers.
"""
from typing import Sequence, Union, Optional, Pattern, Callable, Any, TYPE_CHECKING
from collections import UserDict
import re
from nominatim.errors import UsageError
# working around missing generics in Python < 3.8
# See https://github.com/python/typing/issues/60#issuecomment-869757075
if TYPE_CHECKING:
_BaseUserDict = UserDict[str, Any]
else:
_BaseUserDict = UserDict
class SanitizerConfig(_BaseUserDict):
""" The `SanitizerConfig` class is a read-only dictionary
with configuration options for the sanitizer.
In addition to the usual dictionary functions, the class provides
accessors to standard sanitizer options that are used by many of the
sanitizers.
"""
def get_string_list(self, param: str, default: Sequence[str] = tuple()) -> Sequence[str]:
""" Extract a configuration parameter as a string list.
Arguments:
param: Name of the configuration parameter.
default: Takes a tuple or list of strings which will
be returned if the parameter is missing in the
sanitizer configuration.
Note that if this default parameter is not
provided then an empty list is returned.
Returns:
If the parameter value is a simple string, it is returned as a
one-item list. If the parameter value does not exist, the given
default is returned. If the parameter value is a list, it is
checked to contain only strings before being returned.
"""
values = self.data.get(param, None)
if values is None:
return list(default)
if isinstance(values, str):
return [values] if values else []
if not isinstance(values, (list, tuple)):
raise UsageError(f"Parameter '{param}' must be string or list of strings.")
if any(not isinstance(value, str) for value in values):
raise UsageError(f"Parameter '{param}' must be string or list of strings.")
return values
def get_bool(self, param: str, default: Optional[bool] = None) -> bool:
""" Extract a configuration parameter as a boolean.
Arguments:
param: Name of the configuration parameter. The parameter must
contain one of the yaml boolean values or an
UsageError will be raised.
default: Value to return, when the parameter is missing.
When set to `None`, the parameter must be defined.
Returns:
Boolean value of the given parameter.
"""
value = self.data.get(param, default)
if not isinstance(value, bool):
raise UsageError(f"Parameter '{param}' must be a boolean value ('yes' or 'no').")
return value
def get_delimiter(self, default: str = ',;') -> Pattern[str]:
""" Return the 'delimiters' parameter in the configuration as a
compiled regular expression that can be used to split strings on
these delimiters.
Arguments:
default: Delimiters to be used when 'delimiters' parameter
is not explicitly configured.
Returns:
A regular expression pattern which can be used to
split a string. The regular expression makes sure that the
resulting names are stripped and that repeated delimiters
are ignored. It may still create empty fields on occasion. The
code needs to filter those.
"""
delimiter_set = set(self.data.get('delimiters', default))
if not delimiter_set:
raise UsageError("Empty 'delimiter' parameter not allowed for sanitizer.")
return re.compile('\\s*[{}]+\\s*'.format(''.join('\\' + d for d in delimiter_set)))
def get_filter(self, param: str, default: Union[str, Sequence[str]] = 'PASS_ALL'
) -> Callable[[str], bool]:
""" Returns a filter function for the given parameter of the sanitizer
configuration.
The value provided for the parameter in sanitizer configuration
should be a string or list of strings, where each string is a regular
expression. These regular expressions will later be used by the
filter function to filter strings.
Arguments:
param: The parameter for which the filter function
will be created.
default: Defines the behaviour of filter function if
parameter is missing in the sanitizer configuration.
Takes a string(PASS_ALL or FAIL_ALL) or a list of strings.
Any other value of string or an empty list is not allowed,
and will raise a ValueError. If the value is PASS_ALL, the filter
function will let all strings to pass, if the value is FAIL_ALL,
filter function will let no strings to pass.
If value provided is a list of strings each string
is treated as a regular expression. In this case these regular
expressions will be used by the filter function.
By default allow filter function to let all strings pass.
Returns:
A filter function that takes a target string as the argument and
returns True if it fully matches any of the regular expressions
otherwise returns False.
"""
filters = self.get_string_list(param) or default
if filters == 'PASS_ALL':
return lambda _: True
if filters == 'FAIL_ALL':
return lambda _: False
if filters and isinstance(filters, (list, tuple)):
regexes = [re.compile(regex) for regex in filters]
return lambda target: any(regex.fullmatch(target) for regex in regexes)
raise ValueError("Default parameter must be a non-empty list or a string value \
('PASS_ALL' or 'FAIL_ALL').")
| 6,546 | 42.072368 | 93 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/sanitizers/clean_postcodes.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Sanitizer that filters postcodes by their officially allowed pattern.
Arguments:
convert-to-address: If set to 'yes' (the default), then postcodes that do
not conform with their country-specific pattern are
converted to an address component. That means that
the postcode does not take part when computing the
postcode centroids of a country but is still searchable.
When set to 'no', non-conforming postcodes are not
searchable either.
default-pattern: Pattern to use, when there is none available for the
country in question. Warning: will not be used for
objects that have no country assigned. These are always
assumed to have no postcode.
"""
from typing import Callable, Optional, Tuple
from nominatim.data.postcode_format import PostcodeFormatter
from nominatim.tokenizer.sanitizers.base import ProcessInfo
from nominatim.tokenizer.sanitizers.config import SanitizerConfig
class _PostcodeSanitizer:
def __init__(self, config: SanitizerConfig) -> None:
self.convert_to_address = config.get_bool('convert-to-address', True)
self.matcher = PostcodeFormatter()
default_pattern = config.get('default-pattern')
if default_pattern is not None and isinstance(default_pattern, str):
self.matcher.set_default_pattern(default_pattern)
def __call__(self, obj: ProcessInfo) -> None:
if not obj.address:
return
postcodes = ((i, o) for i, o in enumerate(obj.address) if o.kind == 'postcode')
for pos, postcode in postcodes:
formatted = self.scan(postcode.name, obj.place.country_code)
if formatted is None:
if self.convert_to_address:
postcode.kind = 'unofficial_postcode'
else:
obj.address.pop(pos)
else:
postcode.name = formatted[0]
postcode.set_attr('variant', formatted[1])
def scan(self, postcode: str, country: Optional[str]) -> Optional[Tuple[str, str]]:
""" Check the postcode for correct formatting and return the
normalized version. Returns None if the postcode does not
correspond to the official format of the given country.
"""
match = self.matcher.match(country, postcode)
if match is None:
return None
assert country is not None
return self.matcher.normalize(country, match),\
' '.join(filter(lambda p: p is not None, match.groups()))
def create(config: SanitizerConfig) -> Callable[[ProcessInfo], None]:
""" Create a function that filters postcodes by their officially allowed pattern.
"""
return _PostcodeSanitizer(config)
| 3,130 | 37.654321 | 87 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/sanitizers/__init__.py | 0 | 0 | 0 | py | |
Nominatim | Nominatim-master/nominatim/tokenizer/sanitizers/clean_tiger_tags.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Sanitizer that preprocesses tags from the TIGER import.
It makes the following changes:
* remove state reference from tiger:county
"""
from typing import Callable
import re
from nominatim.tokenizer.sanitizers.base import ProcessInfo
from nominatim.tokenizer.sanitizers.config import SanitizerConfig
COUNTY_MATCH = re.compile('(.*), [A-Z][A-Z]')
def _clean_tiger_county(obj: ProcessInfo) -> None:
""" Remove the state reference from tiger:county tags.
This transforms a name like 'Hamilton, AL' into 'Hamilton'.
If no state reference is detected at the end, the name is left as is.
"""
if not obj.address:
return
for item in obj.address:
if item.kind == 'tiger' and item.suffix == 'county':
m = COUNTY_MATCH.fullmatch(item.name)
if m:
item.name = m[1]
# Switch kind and suffix, the split left them reversed.
item.kind = 'county'
item.suffix = 'tiger'
return
def create(_: SanitizerConfig) -> Callable[[ProcessInfo], None]:
""" Create a function that preprocesses tags from the TIGER import.
"""
return _clean_tiger_county
| 1,390 | 28.595745 | 77 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/sanitizers/split_name_list.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Sanitizer that splits lists of names into their components.
Arguments:
delimiters: Define the set of characters to be used for
splitting the list. (default: ',;')
"""
from typing import Callable
from nominatim.tokenizer.sanitizers.base import ProcessInfo
from nominatim.tokenizer.sanitizers.config import SanitizerConfig
def create(config: SanitizerConfig) -> Callable[[ProcessInfo], None]:
""" Create a name processing function that splits name values with
multiple values into their components.
"""
regexp = config.get_delimiter()
def _process(obj: ProcessInfo) -> None:
if not obj.names:
return
new_names = []
for name in obj.names:
split_names = regexp.split(name.name)
if len(split_names) == 1:
new_names.append(name)
else:
new_names.extend(name.clone(name=n) for n in split_names if n)
obj.names = new_names
return _process
| 1,208 | 29.225 | 78 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/token_analysis/base.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Common data types and protocols for analysers.
"""
from typing import Mapping, List, Any
from nominatim.typing import Protocol
from nominatim.data.place_name import PlaceName
class Analyzer(Protocol):
""" The `create()` function of an analysis module needs to return an
object that implements the following functions.
"""
def get_canonical_id(self, name: PlaceName) -> str:
""" Return the canonical form of the given name. The canonical ID must
be unique (the same ID must always yield the same variants) and
must be a form from which the variants can be derived.
Arguments:
name: Extended place name description as prepared by
the sanitizers.
Returns:
ID string with a canonical form of the name. The string may
be empty, when the analyzer cannot analyze the name at all,
for example because the character set in use does not match.
"""
def compute_variants(self, canonical_id: str) -> List[str]:
""" Compute the transliterated spelling variants for the given
canonical ID.
Arguments:
canonical_id: ID string previously computed with
`get_canonical_id()`.
Returns:
A list of possible spelling variants. All strings must have
been transformed with the global normalizer and
transliterator ICU rules. Otherwise they cannot be matched
against the input by the query frontend.
The list may be empty, when there are no useful
spelling variants. This may happen when an analyzer only
usually outputs additional variants to the canonical spelling
and there are no such variants.
"""
class AnalysisModule(Protocol):
""" The setup of the token analysis is split into two parts:
configuration and analyser factory. A token analysis module must
therefore implement the two functions here described.
"""
def configure(self, rules: Mapping[str, Any],
normalizer: Any, transliterator: Any) -> Any:
""" Prepare the configuration of the analysis module.
This function should prepare all data that can be shared
between instances of this analyser.
Arguments:
rules: A dictionary with the additional configuration options
as specified in the tokenizer configuration.
normalizer: an ICU Transliterator with the compiled
global normalization rules.
transliterator: an ICU Transliterator with the compiled
global transliteration rules.
Returns:
A data object with configuration data. This will be handed
as is into the `create()` function and may be
used freely by the analysis module as needed.
"""
def create(self, normalizer: Any, transliterator: Any, config: Any) -> Analyzer:
""" Create a new instance of the analyser.
A separate instance of the analyser is created for each thread
when used in multi-threading context.
Arguments:
normalizer: an ICU Transliterator with the compiled normalization
rules.
transliterator: an ICU Transliterator with the compiled
transliteration rules.
config: The object that was returned by the call to configure().
Returns:
A new analyzer instance. This must be an object that implements
the Analyzer protocol.
"""
| 4,057 | 40.835052 | 84 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/token_analysis/generic_mutation.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Creator for mutation variants for the generic token analysis.
"""
from typing import Sequence, Iterable, Iterator, Tuple
import itertools
import logging
import re
from nominatim.errors import UsageError
LOG = logging.getLogger()
def _zigzag(outer: Iterable[str], inner: Iterable[str]) -> Iterator[str]:
return itertools.chain.from_iterable(itertools.zip_longest(outer, inner, fillvalue=''))
class MutationVariantGenerator:
""" Generates name variants by applying a regular expression to the name
and replacing it with one or more variants. When the regular expression
matches more than once, each occurrence is replaced with all replacement
patterns.
"""
def __init__(self, pattern: str, replacements: Sequence[str]):
self.pattern = re.compile(pattern)
self.replacements = replacements
if self.pattern.groups > 0:
LOG.fatal("The mutation pattern %s contains a capturing group. "
"This is not allowed.", pattern)
raise UsageError("Bad mutation pattern in configuration.")
def generate(self, names: Iterable[str]) -> Iterator[str]:
""" Generator function for the name variants. 'names' is an iterable
over a set of names for which the variants are to be generated.
"""
for name in names:
parts = self.pattern.split(name)
if len(parts) == 1:
yield name
else:
for seps in self._fillers(len(parts)):
yield ''.join(_zigzag(parts, seps))
def _fillers(self, num_parts: int) -> Iterator[Tuple[str, ...]]:
""" Returns a generator for strings to join the given number of string
parts in all possible combinations.
"""
return itertools.product(self.replacements, repeat=num_parts - 1)
| 2,073 | 34.758621 | 91 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/token_analysis/postcodes.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Specialized processor for postcodes. Supports a 'lookup' variant of the
token, which produces variants with optional spaces.
"""
from typing import Any, List
from nominatim.tokenizer.token_analysis.generic_mutation import MutationVariantGenerator
from nominatim.data.place_name import PlaceName
### Configuration section
def configure(*_: Any) -> None:
""" All behaviour is currently hard-coded.
"""
return None
### Analysis section
def create(normalizer: Any, transliterator: Any, config: None) -> 'PostcodeTokenAnalysis': # pylint: disable=W0613
""" Create a new token analysis instance for this module.
"""
return PostcodeTokenAnalysis(normalizer, transliterator)
class PostcodeTokenAnalysis:
""" Special normalization and variant generation for postcodes.
This analyser must not be used with anything but postcodes as
it follows some special rules: the canonial ID is the form that
is used for the output. `compute_variants` then needs to ensure that
the generated variants once more follow the standard normalization
and transliteration, so that postcodes are correctly recognised by
the search algorithm.
"""
def __init__(self, norm: Any, trans: Any) -> None:
self.norm = norm
self.trans = trans
self.mutator = MutationVariantGenerator(' ', (' ', ''))
def get_canonical_id(self, name: PlaceName) -> str:
""" Return the standard form of the postcode.
"""
return name.name.strip().upper()
def compute_variants(self, norm_name: str) -> List[str]:
""" Compute the spelling variants for the given normalized postcode.
Takes the canonical form of the postcode, normalizes it using the
standard rules and then creates variants of the result where
all spaces are optional.
"""
# Postcodes follow their own transliteration rules.
# Make sure at this point, that the terms are normalized in a way
# that they are searchable with the standard transliteration rules.
return [self.trans.transliterate(term) for term in
self.mutator.generate([self.norm.transliterate(norm_name)]) if term]
| 2,446 | 36.075758 | 114 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/token_analysis/config_variants.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Parser for configuration for variants.
"""
from typing import Any, Iterator, Tuple, List, Optional, Set, NamedTuple
from collections import defaultdict
import itertools
import re
from nominatim.config import flatten_config_list
from nominatim.errors import UsageError
class ICUVariant(NamedTuple):
""" A single replacement rule for variant creation.
"""
source: str
replacement: str
def get_variant_config(in_rules: Any,
normalizer: Any) -> Tuple[List[Tuple[str, List[str]]], str]:
""" Convert the variant definition from the configuration into
replacement sets.
Returns a tuple containing the replacement set and the list of characters
used in the replacements.
"""
immediate = defaultdict(list)
chars: Set[str] = set()
if in_rules:
vset: Set[ICUVariant] = set()
rules = flatten_config_list(in_rules, 'variants')
vmaker = _VariantMaker(normalizer)
for section in rules:
for rule in (section.get('words') or []):
vset.update(vmaker.compute(rule))
# Intermediate reorder by source. Also compute required character set.
for variant in vset:
if variant.source[-1] == ' ' and variant.replacement[-1] == ' ':
replstr = variant.replacement[:-1]
else:
replstr = variant.replacement
immediate[variant.source].append(replstr)
chars.update(variant.source)
return list(immediate.items()), ''.join(chars)
class _VariantMaker:
""" Generator for all necessary ICUVariants from a single variant rule.
All text in rules is normalized to make sure the variants match later.
"""
def __init__(self, normalizer: Any) -> None:
self.norm = normalizer
def compute(self, rule: Any) -> Iterator[ICUVariant]:
""" Generator for all ICUVariant tuples from a single variant rule.
"""
parts = re.split(r'(\|)?([=-])>', rule)
if len(parts) != 4:
raise UsageError(f"Syntax error in variant rule: {rule}")
decompose = parts[1] is None
src_terms = [self._parse_variant_word(t) for t in parts[0].split(',')]
repl_terms = (self.norm.transliterate(t).strip() for t in parts[3].split(','))
# If the source should be kept, add a 1:1 replacement
if parts[2] == '-':
for src in src_terms:
if src:
for froms, tos in _create_variants(*src, src[0], decompose):
yield ICUVariant(froms, tos)
for src, repl in itertools.product(src_terms, repl_terms):
if src and repl:
for froms, tos in _create_variants(*src, repl, decompose):
yield ICUVariant(froms, tos)
def _parse_variant_word(self, name: str) -> Optional[Tuple[str, str, str]]:
name = name.strip()
match = re.fullmatch(r'([~^]?)([^~$^]*)([~$]?)', name)
if match is None or (match.group(1) == '~' and match.group(3) == '~'):
raise UsageError(f"Invalid variant word descriptor '{name}'")
norm_name = self.norm.transliterate(match.group(2)).strip()
if not norm_name:
return None
return norm_name, match.group(1), match.group(3)
_FLAG_MATCH = {'^': '^ ',
'$': ' ^',
'': ' '}
def _create_variants(src: str, preflag: str, postflag: str,
repl: str, decompose: bool) -> Iterator[Tuple[str, str]]:
if preflag == '~':
postfix = _FLAG_MATCH[postflag]
# suffix decomposition
src = src + postfix
repl = repl + postfix
yield src, repl
yield ' ' + src, ' ' + repl
if decompose:
yield src, ' ' + repl
yield ' ' + src, repl
elif postflag == '~':
# prefix decomposition
prefix = _FLAG_MATCH[preflag]
src = prefix + src
repl = prefix + repl
yield src, repl
yield src + ' ', repl + ' '
if decompose:
yield src, repl + ' '
yield src + ' ', repl
else:
prefix = _FLAG_MATCH[preflag]
postfix = _FLAG_MATCH[postflag]
yield prefix + src + postfix, prefix + repl + postfix
| 4,506 | 31.192857 | 86 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/token_analysis/housenumbers.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Specialized processor for housenumbers. Analyses common housenumber patterns
and creates variants for them.
"""
from typing import Any, List, cast
import re
from nominatim.data.place_name import PlaceName
from nominatim.tokenizer.token_analysis.generic_mutation import MutationVariantGenerator
RE_NON_DIGIT = re.compile('[^0-9]')
RE_DIGIT_ALPHA = re.compile(r'(\d)\s*([^\d\s␣])')
RE_ALPHA_DIGIT = re.compile(r'([^\s\d␣])\s*(\d)')
RE_NAMED_PART = re.compile(r'[a-z]{4}')
### Configuration section
def configure(*_: Any) -> None:
""" All behaviour is currently hard-coded.
"""
return None
### Analysis section
def create(normalizer: Any, transliterator: Any, config: None) -> 'HousenumberTokenAnalysis': # pylint: disable=W0613
""" Create a new token analysis instance for this module.
"""
return HousenumberTokenAnalysis(normalizer, transliterator)
class HousenumberTokenAnalysis:
""" Detects common housenumber patterns and normalizes them.
"""
def __init__(self, norm: Any, trans: Any) -> None:
self.norm = norm
self.trans = trans
self.mutator = MutationVariantGenerator('␣', (' ', ''))
def get_canonical_id(self, name: PlaceName) -> str:
""" Return the normalized form of the housenumber.
"""
# shortcut for number-only numbers, which make up 90% of the data.
if RE_NON_DIGIT.search(name.name) is None:
return name.name
norm = cast(str, self.trans.transliterate(self.norm.transliterate(name.name)))
# If there is a significant non-numeric part, use as is.
if RE_NAMED_PART.search(norm) is None:
# Otherwise add optional spaces between digits and letters.
(norm_opt, cnt1) = RE_DIGIT_ALPHA.subn(r'\1␣\2', norm)
(norm_opt, cnt2) = RE_ALPHA_DIGIT.subn(r'\1␣\2', norm_opt)
# Avoid creating too many variants per number.
if cnt1 + cnt2 <= 4:
return norm_opt
return norm
def compute_variants(self, norm_name: str) -> List[str]:
""" Compute the spelling variants for the given normalized housenumber.
Generates variants for optional spaces (marked with '␣').
"""
return list(self.mutator.generate([norm_name]))
| 2,487 | 34.042254 | 117 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/token_analysis/generic.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Generic processor for names that creates abbreviation variants.
"""
from typing import Mapping, Dict, Any, Iterable, Iterator, Optional, List, cast
import itertools
import datrie
from nominatim.errors import UsageError
from nominatim.data.place_name import PlaceName
from nominatim.tokenizer.token_analysis.config_variants import get_variant_config
from nominatim.tokenizer.token_analysis.generic_mutation import MutationVariantGenerator
### Configuration section
def configure(rules: Mapping[str, Any], normalizer: Any, _: Any) -> Dict[str, Any]:
""" Extract and preprocess the configuration for this module.
"""
config: Dict[str, Any] = {}
config['replacements'], config['chars'] = get_variant_config(rules.get('variants'),
normalizer)
config['variant_only'] = rules.get('mode', '') == 'variant-only'
# parse mutation rules
config['mutations'] = []
for rule in rules.get('mutations', []):
if 'pattern' not in rule:
raise UsageError("Missing field 'pattern' in mutation configuration.")
if not isinstance(rule['pattern'], str):
raise UsageError("Field 'pattern' in mutation configuration "
"must be a simple text field.")
if 'replacements' not in rule:
raise UsageError("Missing field 'replacements' in mutation configuration.")
if not isinstance(rule['replacements'], list):
raise UsageError("Field 'replacements' in mutation configuration "
"must be a list of texts.")
config['mutations'].append((rule['pattern'], rule['replacements']))
return config
### Analysis section
def create(normalizer: Any, transliterator: Any,
config: Mapping[str, Any]) -> 'GenericTokenAnalysis':
""" Create a new token analysis instance for this module.
"""
return GenericTokenAnalysis(normalizer, transliterator, config)
class GenericTokenAnalysis:
""" Collects the different transformation rules for normalisation of names
and provides the functions to apply the transformations.
"""
def __init__(self, norm: Any, to_ascii: Any, config: Mapping[str, Any]) -> None:
self.norm = norm
self.to_ascii = to_ascii
self.variant_only = config['variant_only']
# Set up datrie
if config['replacements']:
self.replacements = datrie.Trie(config['chars'])
for src, repllist in config['replacements']:
self.replacements[src] = repllist
else:
self.replacements = None
# set up mutation rules
self.mutations = [MutationVariantGenerator(*cfg) for cfg in config['mutations']]
def get_canonical_id(self, name: PlaceName) -> str:
""" Return the normalized form of the name. This is the standard form
from which possible variants for the name can be derived.
"""
return cast(str, self.norm.transliterate(name.name)).strip()
def compute_variants(self, norm_name: str) -> List[str]:
""" Compute the spelling variants for the given normalized name
and transliterate the result.
"""
variants = self._generate_word_variants(norm_name)
for mutation in self.mutations:
variants = mutation.generate(variants)
return [name for name in self._transliterate_unique_list(norm_name, variants) if name]
def _transliterate_unique_list(self, norm_name: str,
iterable: Iterable[str]) -> Iterator[Optional[str]]:
seen = set()
if self.variant_only:
seen.add(norm_name)
for variant in map(str.strip, iterable):
if variant not in seen:
seen.add(variant)
yield self.to_ascii.transliterate(variant).strip()
def _generate_word_variants(self, norm_name: str) -> Iterable[str]:
baseform = '^ ' + norm_name + ' ^'
baselen = len(baseform)
partials = ['']
startpos = 0
if self.replacements is not None:
pos = 0
force_space = False
while pos < baselen:
full, repl = self.replacements.longest_prefix_item(baseform[pos:],
(None, None))
if full is not None:
done = baseform[startpos:pos]
partials = [v + done + r
for v, r in itertools.product(partials, repl)
if not force_space or r.startswith(' ')]
if len(partials) > 128:
# If too many variants are produced, they are unlikely
# to be helpful. Only use the original term.
startpos = 0
break
startpos = pos + len(full)
if full[-1] == ' ':
startpos -= 1
force_space = True
pos = startpos
else:
pos += 1
force_space = False
# No variants detected? Fast return.
if startpos == 0:
return (norm_name, )
if startpos < baselen:
return (part[1:] + baseform[startpos:-1] for part in partials)
return (part[1:-1] for part in partials)
| 5,698 | 36.741722 | 94 | py |
Nominatim | Nominatim-master/nominatim/tokenizer/token_analysis/__init__.py | 0 | 0 | 0 | py | |
Nominatim | Nominatim-master/nominatim/indexer/indexer.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Main work horse for indexing (computing addresses) the database.
"""
from typing import Optional, Any, cast
import logging
import time
import psycopg2.extras
from nominatim.tokenizer.base import AbstractTokenizer
from nominatim.indexer.progress import ProgressLogger
from nominatim.indexer import runners
from nominatim.db.async_connection import DBConnection, WorkerPool
from nominatim.db.connection import connect, Connection, Cursor
from nominatim.typing import DictCursorResults
LOG = logging.getLogger()
class PlaceFetcher:
""" Asynchronous connection that fetches place details for processing.
"""
def __init__(self, dsn: str, setup_conn: Connection) -> None:
self.wait_time = 0.0
self.current_ids: Optional[DictCursorResults] = None
self.conn: Optional[DBConnection] = DBConnection(dsn,
cursor_factory=psycopg2.extras.DictCursor)
with setup_conn.cursor() as cur:
# need to fetch those manually because register_hstore cannot
# fetch them on an asynchronous connection below.
hstore_oid = cur.scalar("SELECT 'hstore'::regtype::oid")
hstore_array_oid = cur.scalar("SELECT 'hstore[]'::regtype::oid")
psycopg2.extras.register_hstore(self.conn.conn, oid=hstore_oid,
array_oid=hstore_array_oid)
def close(self) -> None:
""" Close the underlying asynchronous connection.
"""
if self.conn:
self.conn.close()
self.conn = None
def fetch_next_batch(self, cur: Cursor, runner: runners.Runner) -> bool:
""" Send a request for the next batch of places.
If details for the places are required, they will be fetched
asynchronously.
Returns true if there is still data available.
"""
ids = cast(Optional[DictCursorResults], cur.fetchmany(100))
if not ids:
self.current_ids = None
return False
assert self.conn is not None
self.current_ids = runner.get_place_details(self.conn, ids)
return True
def get_batch(self) -> DictCursorResults:
""" Get the next batch of data, previously requested with
`fetch_next_batch`.
"""
assert self.conn is not None
assert self.conn.cursor is not None
if self.current_ids is not None and not self.current_ids:
tstart = time.time()
self.conn.wait()
self.wait_time += time.time() - tstart
self.current_ids = cast(Optional[DictCursorResults],
self.conn.cursor.fetchall())
return self.current_ids if self.current_ids is not None else []
def __enter__(self) -> 'PlaceFetcher':
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
assert self.conn is not None
self.conn.wait()
self.close()
class Indexer:
""" Main indexing routine.
"""
def __init__(self, dsn: str, tokenizer: AbstractTokenizer, num_threads: int):
self.dsn = dsn
self.tokenizer = tokenizer
self.num_threads = num_threads
def has_pending(self) -> bool:
""" Check if any data still needs indexing.
This function must only be used after the import has finished.
Otherwise it will be very expensive.
"""
with connect(self.dsn) as conn:
with conn.cursor() as cur:
cur.execute("SELECT 'a' FROM placex WHERE indexed_status > 0 LIMIT 1")
return cur.rowcount > 0
def index_full(self, analyse: bool = True) -> None:
""" Index the complete database. This will first index boundaries
followed by all other objects. When `analyse` is True, then the
database will be analysed at the appropriate places to
ensure that database statistics are updated.
"""
with connect(self.dsn) as conn:
conn.autocommit = True
def _analyze() -> None:
if analyse:
with conn.cursor() as cur:
cur.execute('ANALYZE')
if self.index_by_rank(0, 4) > 0:
_analyze()
if self.index_boundaries(0, 30) > 100:
_analyze()
if self.index_by_rank(5, 25) > 100:
_analyze()
if self.index_by_rank(26, 30) > 1000:
_analyze()
if self.index_postcodes() > 100:
_analyze()
def index_boundaries(self, minrank: int, maxrank: int) -> int:
""" Index only administrative boundaries within the given rank range.
"""
total = 0
LOG.warning("Starting indexing boundaries using %s threads",
self.num_threads)
with self.tokenizer.name_analyzer() as analyzer:
for rank in range(max(minrank, 4), min(maxrank, 26)):
total += self._index(runners.BoundaryRunner(rank, analyzer))
return total
def index_by_rank(self, minrank: int, maxrank: int) -> int:
""" Index all entries of placex in the given rank range (inclusive)
in order of their address rank.
When rank 30 is requested then also interpolations and
places with address rank 0 will be indexed.
"""
total = 0
maxrank = min(maxrank, 30)
LOG.warning("Starting indexing rank (%i to %i) using %i threads",
minrank, maxrank, self.num_threads)
with self.tokenizer.name_analyzer() as analyzer:
for rank in range(max(1, minrank), maxrank + 1):
total += self._index(runners.RankRunner(rank, analyzer), 20 if rank == 30 else 1)
if maxrank == 30:
total += self._index(runners.RankRunner(0, analyzer))
total += self._index(runners.InterpolationRunner(analyzer), 20)
return total
def index_postcodes(self) -> int:
"""Index the entries of the location_postcode table.
"""
LOG.warning("Starting indexing postcodes using %s threads", self.num_threads)
return self._index(runners.PostcodeRunner(), 20)
def update_status_table(self) -> None:
""" Update the status in the status table to 'indexed'.
"""
with connect(self.dsn) as conn:
with conn.cursor() as cur:
cur.execute('UPDATE import_status SET indexed = true')
conn.commit()
def _index(self, runner: runners.Runner, batch: int = 1) -> int:
""" Index a single rank or table. `runner` describes the SQL to use
for indexing. `batch` describes the number of objects that
should be processed with a single SQL statement
"""
LOG.warning("Starting %s (using batch size %s)", runner.name(), batch)
with connect(self.dsn) as conn:
psycopg2.extras.register_hstore(conn)
with conn.cursor() as cur:
total_tuples = cur.scalar(runner.sql_count_objects())
LOG.debug("Total number of rows: %i", total_tuples)
conn.commit()
progress = ProgressLogger(runner.name(), total_tuples)
if total_tuples > 0:
with conn.cursor(name='places') as cur:
cur.execute(runner.sql_get_objects())
with PlaceFetcher(self.dsn, conn) as fetcher:
with WorkerPool(self.dsn, self.num_threads) as pool:
has_more = fetcher.fetch_next_batch(cur, runner)
while has_more:
places = fetcher.get_batch()
# asynchronously get the next batch
has_more = fetcher.fetch_next_batch(cur, runner)
# And insert the current batch
for idx in range(0, len(places), batch):
part = places[idx:idx + batch]
LOG.debug("Processing places: %s", str(part))
runner.index_places(pool.next_free_worker(), part)
progress.add(len(part))
LOG.info("Wait time: fetcher: %.2fs, pool: %.2fs",
fetcher.wait_time, pool.wait_time)
conn.commit()
return progress.done()
| 8,831 | 35.345679 | 97 | py |
Nominatim | Nominatim-master/nominatim/indexer/runners.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Mix-ins that provide the actual commands for the indexer for various indexing
tasks.
"""
from typing import Any, List
import functools
from psycopg2 import sql as pysql
import psycopg2.extras
from nominatim.data.place_info import PlaceInfo
from nominatim.tokenizer.base import AbstractAnalyzer
from nominatim.db.async_connection import DBConnection
from nominatim.typing import Query, DictCursorResult, DictCursorResults, Protocol
# pylint: disable=C0111
def _mk_valuelist(template: str, num: int) -> pysql.Composed:
return pysql.SQL(',').join([pysql.SQL(template)] * num)
def _analyze_place(place: DictCursorResult, analyzer: AbstractAnalyzer) -> psycopg2.extras.Json:
return psycopg2.extras.Json(analyzer.process_place(PlaceInfo(place)))
class Runner(Protocol):
def name(self) -> str: ...
def sql_count_objects(self) -> Query: ...
def sql_get_objects(self) -> Query: ...
def get_place_details(self, worker: DBConnection,
ids: DictCursorResults) -> DictCursorResults: ...
def index_places(self, worker: DBConnection, places: DictCursorResults) -> None: ...
class AbstractPlacexRunner:
""" Returns SQL commands for indexing of the placex table.
"""
SELECT_SQL = pysql.SQL('SELECT place_id FROM placex ')
UPDATE_LINE = "(%s, %s::hstore, %s::hstore, %s::int, %s::jsonb)"
def __init__(self, rank: int, analyzer: AbstractAnalyzer) -> None:
self.rank = rank
self.analyzer = analyzer
@functools.lru_cache(maxsize=1)
def _index_sql(self, num_places: int) -> pysql.Composed:
return pysql.SQL(
""" UPDATE placex
SET indexed_status = 0, address = v.addr, token_info = v.ti,
name = v.name, linked_place_id = v.linked_place_id
FROM (VALUES {}) as v(id, name, addr, linked_place_id, ti)
WHERE place_id = v.id
""").format(_mk_valuelist(AbstractPlacexRunner.UPDATE_LINE, num_places))
def get_place_details(self, worker: DBConnection, ids: DictCursorResults) -> DictCursorResults:
worker.perform("""SELECT place_id, extra.*
FROM placex, LATERAL placex_indexing_prepare(placex) as extra
WHERE place_id IN %s""",
(tuple((p[0] for p in ids)), ))
return []
def index_places(self, worker: DBConnection, places: DictCursorResults) -> None:
values: List[Any] = []
for place in places:
for field in ('place_id', 'name', 'address', 'linked_place_id'):
values.append(place[field])
values.append(_analyze_place(place, self.analyzer))
worker.perform(self._index_sql(len(places)), values)
class RankRunner(AbstractPlacexRunner):
""" Returns SQL commands for indexing one rank within the placex table.
"""
def name(self) -> str:
return f"rank {self.rank}"
def sql_count_objects(self) -> pysql.Composed:
return pysql.SQL("""SELECT count(*) FROM placex
WHERE rank_address = {} and indexed_status > 0
""").format(pysql.Literal(self.rank))
def sql_get_objects(self) -> pysql.Composed:
return self.SELECT_SQL + pysql.SQL(
"""WHERE indexed_status > 0 and rank_address = {}
ORDER BY geometry_sector
""").format(pysql.Literal(self.rank))
class BoundaryRunner(AbstractPlacexRunner):
""" Returns SQL commands for indexing the administrative boundaries
of a certain rank.
"""
def name(self) -> str:
return f"boundaries rank {self.rank}"
def sql_count_objects(self) -> pysql.Composed:
return pysql.SQL("""SELECT count(*) FROM placex
WHERE indexed_status > 0
AND rank_search = {}
AND class = 'boundary' and type = 'administrative'
""").format(pysql.Literal(self.rank))
def sql_get_objects(self) -> pysql.Composed:
return self.SELECT_SQL + pysql.SQL(
"""WHERE indexed_status > 0 and rank_search = {}
and class = 'boundary' and type = 'administrative'
ORDER BY partition, admin_level
""").format(pysql.Literal(self.rank))
class InterpolationRunner:
""" Returns SQL commands for indexing the address interpolation table
location_property_osmline.
"""
def __init__(self, analyzer: AbstractAnalyzer) -> None:
self.analyzer = analyzer
def name(self) -> str:
return "interpolation lines (location_property_osmline)"
def sql_count_objects(self) -> str:
return """SELECT count(*) FROM location_property_osmline
WHERE indexed_status > 0"""
def sql_get_objects(self) -> str:
return """SELECT place_id
FROM location_property_osmline
WHERE indexed_status > 0
ORDER BY geometry_sector"""
def get_place_details(self, worker: DBConnection, ids: DictCursorResults) -> DictCursorResults:
worker.perform("""SELECT place_id, get_interpolation_address(address, osm_id) as address
FROM location_property_osmline WHERE place_id IN %s""",
(tuple((p[0] for p in ids)), ))
return []
@functools.lru_cache(maxsize=1)
def _index_sql(self, num_places: int) -> pysql.Composed:
return pysql.SQL("""UPDATE location_property_osmline
SET indexed_status = 0, address = v.addr, token_info = v.ti
FROM (VALUES {}) as v(id, addr, ti)
WHERE place_id = v.id
""").format(_mk_valuelist("(%s, %s::hstore, %s::jsonb)", num_places))
def index_places(self, worker: DBConnection, places: DictCursorResults) -> None:
values: List[Any] = []
for place in places:
values.extend((place[x] for x in ('place_id', 'address')))
values.append(_analyze_place(place, self.analyzer))
worker.perform(self._index_sql(len(places)), values)
class PostcodeRunner(Runner):
""" Provides the SQL commands for indexing the location_postcode table.
"""
def name(self) -> str:
return "postcodes (location_postcode)"
def sql_count_objects(self) -> str:
return 'SELECT count(*) FROM location_postcode WHERE indexed_status > 0'
def sql_get_objects(self) -> str:
return """SELECT place_id FROM location_postcode
WHERE indexed_status > 0
ORDER BY country_code, postcode"""
def get_place_details(self, worker: DBConnection, ids: DictCursorResults) -> DictCursorResults:
return ids
def index_places(self, worker: DBConnection, places: DictCursorResults) -> None:
worker.perform(pysql.SQL("""UPDATE location_postcode SET indexed_status = 0
WHERE place_id IN ({})""")
.format(pysql.SQL(',').join((pysql.Literal(i[0]) for i in places))))
| 7,344 | 36.284264 | 99 | py |
Nominatim | Nominatim-master/nominatim/indexer/progress.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helpers for progress logging.
"""
import logging
from datetime import datetime
LOG = logging.getLogger()
INITIAL_PROGRESS = 10
class ProgressLogger:
""" Tracks and prints progress for the indexing process.
`name` is the name of the indexing step being tracked.
`total` sets up the total number of items that need processing.
`log_interval` denotes the interval in seconds at which progress
should be reported.
"""
def __init__(self, name: str, total: int, log_interval: int = 1) -> None:
self.name = name
self.total_places = total
self.done_places = 0
self.rank_start_time = datetime.now()
self.log_interval = log_interval
self.next_info = INITIAL_PROGRESS if LOG.isEnabledFor(logging.WARNING) else total + 1
def add(self, num: int = 1) -> None:
""" Mark `num` places as processed. Print a log message if the
logging is at least info and the log interval has passed.
"""
self.done_places += num
if self.done_places < self.next_info:
return
now = datetime.now()
done_time = (now - self.rank_start_time).total_seconds()
if done_time < 2:
self.next_info = self.done_places + INITIAL_PROGRESS
return
places_per_sec = self.done_places / done_time
eta = (self.total_places - self.done_places) / places_per_sec
LOG.warning("Done %d in %d @ %.3f per second - %s ETA (seconds): %.2f",
self.done_places, int(done_time),
places_per_sec, self.name, eta)
self.next_info += int(places_per_sec) * self.log_interval
def done(self) -> int:
""" Print final statistics about the progress.
"""
rank_end_time = datetime.now()
if rank_end_time == self.rank_start_time:
diff_seconds = 0.0
places_per_sec = float(self.done_places)
else:
diff_seconds = (rank_end_time - self.rank_start_time).total_seconds()
places_per_sec = self.done_places / diff_seconds
LOG.warning("Done %d/%d in %d @ %.3f per second - FINISHED %s\n",
self.done_places, self.total_places, int(diff_seconds),
places_per_sec, self.name)
return self.done_places
| 2,551 | 33.026667 | 93 | py |
Nominatim | Nominatim-master/nominatim/indexer/__init__.py | 0 | 0 | 0 | py | |
Nominatim | Nominatim-master/nominatim/utils/__init__.py | 0 | 0 | 0 | py | |
Nominatim | Nominatim-master/nominatim/utils/json_writer.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Streaming JSON encoder.
"""
from typing import Any, TypeVar, Optional, Callable
import io
try:
import ujson as json
except ModuleNotFoundError:
import json # type: ignore[no-redef]
T = TypeVar('T') # pylint: disable=invalid-name
class JsonWriter:
""" JSON encoder that renders the output directly into an output
stream. This is a very simple writer which produces JSON in a
compact as possible form.
The writer does not check for syntactic correctness. It is the
responsibility of the caller to call the write functions in an
order that produces correct JSON.
All functions return the writer object itself so that function
calls can be chained.
"""
def __init__(self) -> None:
self.data = io.StringIO()
self.pending = ''
def __call__(self) -> str:
""" Return the rendered JSON content as a string.
The writer remains usable after calling this function.
"""
if self.pending:
assert self.pending in (']', '}')
self.data.write(self.pending)
self.pending = ''
return self.data.getvalue()
def start_object(self) -> 'JsonWriter':
""" Write the open bracket of a JSON object.
"""
if self.pending:
self.data.write(self.pending)
self.pending = '{'
return self
def end_object(self) -> 'JsonWriter':
""" Write the closing bracket of a JSON object.
"""
assert self.pending in (',', '{', '')
if self.pending == '{':
self.data.write(self.pending)
self.pending = '}'
return self
def start_array(self) -> 'JsonWriter':
""" Write the opening bracket of a JSON array.
"""
if self.pending:
self.data.write(self.pending)
self.pending = '['
return self
def end_array(self) -> 'JsonWriter':
""" Write the closing bracket of a JSON array.
"""
assert self.pending in (',', '[', '')
if self.pending == '[':
self.data.write(self.pending)
self.pending = ']'
return self
def key(self, name: str) -> 'JsonWriter':
""" Write the key string of a JSON object.
"""
assert self.pending
self.data.write(self.pending)
self.data.write(json.dumps(name, ensure_ascii=False))
self.pending = ':'
return self
def value(self, value: Any) -> 'JsonWriter':
""" Write out a value as JSON. The function uses the json.dumps()
function for encoding the JSON. Thus any value that can be
encoded by that function is permissible here.
"""
return self.raw(json.dumps(value, ensure_ascii=False))
def float(self, value: float, precision: int) -> 'JsonWriter':
""" Write out a float value with the given precision.
"""
return self.raw(f"{value:0.{precision}f}")
def next(self) -> 'JsonWriter':
""" Write out a delimiter comma between JSON object or array elements.
"""
if self.pending:
self.data.write(self.pending)
self.pending = ','
return self
def raw(self, raw_json: str) -> 'JsonWriter':
""" Write out the given value as is. This function is useful if
a value is already available in JSON format.
"""
if self.pending:
self.data.write(self.pending)
self.pending = ''
self.data.write(raw_json)
return self
def keyval(self, key: str, value: Any) -> 'JsonWriter':
""" Write out an object element with the given key and value.
This is a shortcut for calling 'key()', 'value()' and 'next()'.
"""
self.key(key)
self.value(value)
return self.next()
def keyval_not_none(self, key: str, value: Optional[T],
transform: Optional[Callable[[T], Any]] = None) -> 'JsonWriter':
""" Write out an object element only if the value is not None.
If 'transform' is given, it must be a function that takes the
value type and returns a JSON encodable type. The transform
function will be called before the value is written out.
"""
if value is not None:
self.key(key)
self.value(transform(value) if transform else value)
self.next()
return self
| 4,693 | 30.293333 | 88 | py |
Nominatim | Nominatim-master/nominatim/utils/centroid.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for computation of centroids.
"""
from typing import Tuple, Any
from collections.abc import Collection
class PointsCentroid:
""" Centroid computation from single points using an online algorithm.
More points may be added at any time.
Coordinates are internally treated as a 7-digit fixed-point float
(i.e. in OSM style).
"""
def __init__(self) -> None:
self.sum_x = 0
self.sum_y = 0
self.count = 0
def centroid(self) -> Tuple[float, float]:
""" Return the centroid of all points collected so far.
"""
if self.count == 0:
raise ValueError("No points available for centroid.")
return (float(self.sum_x/self.count)/10000000,
float(self.sum_y/self.count)/10000000)
def __len__(self) -> int:
return self.count
def __iadd__(self, other: Any) -> 'PointsCentroid':
if isinstance(other, Collection) and len(other) == 2:
if all(isinstance(p, (float, int)) for p in other):
x, y = other
self.sum_x += int(x * 10000000)
self.sum_y += int(y * 10000000)
self.count += 1
return self
raise ValueError("Can only add 2-element tuples to centroid.")
| 1,510 | 29.22 | 74 | py |
Nominatim | Nominatim-master/nominatim/data/postcode_format.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for formatting postcodes according to their country-specific
format.
"""
from typing import Any, Mapping, Optional, Set, Match
import re
from nominatim.errors import UsageError
from nominatim.data import country_info
class CountryPostcodeMatcher:
""" Matches and formats a postcode according to a format definition
of the given country.
"""
def __init__(self, country_code: str, config: Mapping[str, Any]) -> None:
if 'pattern' not in config:
raise UsageError("Field 'pattern' required for 'postcode' "
f"for country '{country_code}'")
pc_pattern = config['pattern'].replace('d', '[0-9]').replace('l', '[A-Z]')
self.norm_pattern = re.compile(f'\\s*(?:{country_code.upper()}[ -]?)?(.*)\\s*')
self.pattern = re.compile(pc_pattern)
self.output = config.get('output', r'\g<0>')
def match(self, postcode: str) -> Optional[Match[str]]:
""" Match the given postcode against the postcode pattern for this
matcher. Returns a `re.Match` object if the match was successful
and None otherwise.
"""
# Upper-case, strip spaces and leading country code.
normalized = self.norm_pattern.fullmatch(postcode.upper())
if normalized:
return self.pattern.fullmatch(normalized.group(1))
return None
def normalize(self, match: Match[str]) -> str:
""" Return the default format of the postcode for the given match.
`match` must be a `re.Match` object previously returned by
`match()`
"""
return match.expand(self.output)
class PostcodeFormatter:
""" Container for different postcode formats of the world and
access functions.
"""
def __init__(self) -> None:
# Objects without a country code can't have a postcode per definition.
self.country_without_postcode: Set[Optional[str]] = {None}
self.country_matcher = {}
self.default_matcher = CountryPostcodeMatcher('', {'pattern': '.*'})
for ccode, prop in country_info.iterate('postcode'):
if prop is False:
self.country_without_postcode.add(ccode)
elif isinstance(prop, dict):
self.country_matcher[ccode] = CountryPostcodeMatcher(ccode, prop)
else:
raise UsageError(f"Invalid entry 'postcode' for country '{ccode}'")
def set_default_pattern(self, pattern: str) -> None:
""" Set the postcode match pattern to use, when a country does not
have a specific pattern.
"""
self.default_matcher = CountryPostcodeMatcher('', {'pattern': pattern})
def get_matcher(self, country_code: Optional[str]) -> Optional[CountryPostcodeMatcher]:
""" Return the CountryPostcodeMatcher for the given country.
Returns None if the country doesn't have a postcode and the
default matcher if there is no specific matcher configured for
the country.
"""
if country_code in self.country_without_postcode:
return None
assert country_code is not None
return self.country_matcher.get(country_code, self.default_matcher)
def match(self, country_code: Optional[str], postcode: str) -> Optional[Match[str]]:
""" Match the given postcode against the postcode pattern for this
matcher. Returns a `re.Match` object if the country has a pattern
and the match was successful or None if the match failed.
"""
if country_code in self.country_without_postcode:
return None
assert country_code is not None
return self.country_matcher.get(country_code, self.default_matcher).match(postcode)
def normalize(self, country_code: str, match: Match[str]) -> str:
""" Return the default format of the postcode for the given match.
`match` must be a `re.Match` object previously returned by
`match()`
"""
return self.country_matcher.get(country_code, self.default_matcher).normalize(match)
| 4,356 | 36.886957 | 92 | py |
Nominatim | Nominatim-master/nominatim/data/place_info.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Wrapper around place information the indexer gets from the database and hands to
the tokenizer.
"""
from typing import Optional, Mapping, Any, Tuple
class PlaceInfo:
""" This data class contains all information the tokenizer can access
about a place.
"""
def __init__(self, info: Mapping[str, Any]) -> None:
self._info = info
@property
def name(self) -> Optional[Mapping[str, str]]:
""" A dictionary with the names of the place. Keys and values represent
the full key and value of the corresponding OSM tag. Which tags
are saved as names is determined by the import style.
The property may be None if the place has no names.
"""
return self._info.get('name')
@property
def address(self) -> Optional[Mapping[str, str]]:
""" A dictionary with the address elements of the place. They key
usually corresponds to the suffix part of the key of an OSM
'addr:*' or 'isin:*' tag. There are also some special keys like
`country` or `country_code` which merge OSM keys that contain
the same information. See [Import Styles][1] for details.
The property may be None if the place has no address information.
[1]: ../customize/Import-Styles.md
"""
return self._info.get('address')
@property
def country_code(self) -> Optional[str]:
""" The country code of the country the place is in. Guaranteed
to be a two-letter lower-case string. If the place is not inside
any country, the property is set to None.
"""
return self._info.get('country_code')
@property
def rank_address(self) -> int:
""" The [rank address][1] before any rank correction is applied.
[1]: ../customize/Ranking.md#address-rank
"""
return self._info.get('rank_address', 0)
@property
def centroid(self) -> Optional[Tuple[float, float]]:
""" A center point of the place in WGS84. May be None when the
geometry of the place is unknown.
"""
x, y = self._info.get('centroid_x'), self._info.get('centroid_y')
return None if x is None or y is None else (x, y)
def is_a(self, key: str, value: str) -> bool:
""" Set to True when the place's primary tag corresponds to the given
key and value.
"""
return self._info.get('class') == key and self._info.get('type') == value
def is_country(self) -> bool:
""" Set to True when the place is a valid country boundary.
"""
return self.rank_address == 4 \
and self.is_a('boundary', 'administrative') \
and self.country_code is not None
| 2,999 | 33.482759 | 81 | py |
Nominatim | Nominatim-master/nominatim/data/__init__.py | 0 | 0 | 0 | py | |
Nominatim | Nominatim-master/nominatim/data/country_info.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for importing and managing static country information.
"""
from typing import Dict, Any, Iterable, Tuple, Optional, Container, overload
from pathlib import Path
import psycopg2.extras
from nominatim.db import utils as db_utils
from nominatim.db.connection import connect, Connection
from nominatim.errors import UsageError
from nominatim.config import Configuration
from nominatim.tokenizer.base import AbstractTokenizer
def _flatten_name_list(names: Any) -> Dict[str, str]:
if names is None:
return {}
if not isinstance(names, dict):
raise UsageError("Expected key-value list for names in country_settings.py")
flat = {}
for prefix, remain in names.items():
if isinstance(remain, str):
flat[prefix] = remain
elif not isinstance(remain, dict):
raise UsageError("Entries in names must be key-value lists.")
else:
for suffix, name in remain.items():
if suffix == 'default':
flat[prefix] = name
else:
flat[f'{prefix}:{suffix}'] = name
return flat
class _CountryInfo:
""" Caches country-specific properties from the configuration file.
"""
def __init__(self) -> None:
self._info: Dict[str, Dict[str, Any]] = {}
def load(self, config: Configuration) -> None:
""" Load the country properties from the configuration files,
if they are not loaded yet.
"""
if not self._info:
self._info = config.load_sub_configuration('country_settings.yaml')
for prop in self._info.values():
# Convert languages into a list for simpler handling.
if 'languages' not in prop:
prop['languages'] = []
elif not isinstance(prop['languages'], list):
prop['languages'] = [x.strip()
for x in prop['languages'].split(',')]
prop['names'] = _flatten_name_list(prop.get('names'))
def items(self) -> Iterable[Tuple[str, Dict[str, Any]]]:
""" Return tuples of (country_code, property dict) as iterable.
"""
return self._info.items()
def get(self, country_code: str) -> Dict[str, Any]:
""" Get country information for the country with the given country code.
"""
return self._info.get(country_code, {})
_COUNTRY_INFO = _CountryInfo()
def setup_country_config(config: Configuration) -> None:
""" Load country properties from the configuration file.
Needs to be called before using any other functions in this
file.
"""
_COUNTRY_INFO.load(config)
@overload
def iterate() -> Iterable[Tuple[str, Dict[str, Any]]]:
...
@overload
def iterate(prop: str) -> Iterable[Tuple[str, Any]]:
...
def iterate(prop: Optional[str] = None) -> Iterable[Tuple[str, Dict[str, Any]]]:
""" Iterate over country code and properties.
When `prop` is None, all countries are returned with their complete
set of properties.
If `prop` is given, then only countries are returned where the
given property is set. The second item of the tuple contains only
the content of the given property.
"""
if prop is None:
return _COUNTRY_INFO.items()
return ((c, p[prop]) for c, p in _COUNTRY_INFO.items() if prop in p)
def setup_country_tables(dsn: str, sql_dir: Path, ignore_partitions: bool = False) -> None:
""" Create and populate the tables with basic static data that provides
the background for geocoding. Data is assumed to not yet exist.
"""
db_utils.execute_file(dsn, sql_dir / 'country_osm_grid.sql.gz')
params = []
for ccode, props in _COUNTRY_INFO.items():
if ccode is not None and props is not None:
if ignore_partitions:
partition = 0
else:
partition = props.get('partition', 0)
lang = props['languages'][0] if len(
props['languages']) == 1 else None
params.append((ccode, props['names'], lang, partition))
with connect(dsn) as conn:
with conn.cursor() as cur:
psycopg2.extras.register_hstore(cur)
cur.execute(
""" CREATE TABLE public.country_name (
country_code character varying(2),
name public.hstore,
derived_name public.hstore,
country_default_language_code text,
partition integer
); """)
cur.execute_values(
""" INSERT INTO public.country_name
(country_code, name, country_default_language_code, partition) VALUES %s
""", params)
conn.commit()
def create_country_names(conn: Connection, tokenizer: AbstractTokenizer,
languages: Optional[Container[str]] = None) -> None:
""" Add default country names to search index. `languages` is a comma-
separated list of language codes as used in OSM. If `languages` is not
empty then only name translations for the given languages are added
to the index.
"""
def _include_key(key: str) -> bool:
return ':' not in key or not languages or \
key[key.index(':') + 1:] in languages
with conn.cursor() as cur:
psycopg2.extras.register_hstore(cur)
cur.execute("""SELECT country_code, name FROM country_name
WHERE country_code is not null""")
with tokenizer.name_analyzer() as analyzer:
for code, name in cur:
names = {'countrycode': code}
# country names (only in languages as provided)
if name:
names.update({k : v for k, v in name.items() if _include_key(k)})
analyzer.add_country_names(code, names)
conn.commit()
| 6,230 | 34.403409 | 92 | py |
Nominatim | Nominatim-master/nominatim/data/place_name.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Data class for a single name of a place.
"""
from typing import Optional, Dict, Mapping
class PlaceName:
""" Each name and address part of a place is encapsulated in an object of
this class. It saves not only the name proper but also describes the
kind of name with two properties:
* `kind` describes the name of the OSM key used without any suffixes
(i.e. the part after the colon removed)
* `suffix` contains the suffix of the OSM tag, if any. The suffix
is the part of the key after the first colon.
In addition to that, a name may have arbitrary additional attributes.
How attributes are used, depends on the sanitizers and token analysers.
The exception is the 'analyzer' attribute. This attribute determines
which token analysis module will be used to finalize the treatment of
names.
"""
def __init__(self, name: str, kind: str, suffix: Optional[str]):
self.name = name
self.kind = kind
self.suffix = suffix
self.attr: Dict[str, str] = {}
def __repr__(self) -> str:
return f"PlaceName(name={self.name!r},kind={self.kind!r},suffix={self.suffix!r})"
def clone(self, name: Optional[str] = None,
kind: Optional[str] = None,
suffix: Optional[str] = None,
attr: Optional[Mapping[str, str]] = None) -> 'PlaceName':
""" Create a deep copy of the place name, optionally with the
given parameters replaced. In the attribute list only the given
keys are updated. The list is not replaced completely.
In particular, the function cannot to be used to remove an
attribute from a place name.
"""
newobj = PlaceName(name or self.name,
kind or self.kind,
suffix or self.suffix)
newobj.attr.update(self.attr)
if attr:
newobj.attr.update(attr)
return newobj
def set_attr(self, key: str, value: str) -> None:
""" Add the given property to the name. If the property was already
set, then the value is overwritten.
"""
self.attr[key] = value
def get_attr(self, key: str, default: Optional[str] = None) -> Optional[str]:
""" Return the given property or the value of 'default' if it
is not set.
"""
return self.attr.get(key, default)
def has_attr(self, key: str) -> bool:
""" Check if the given attribute is set.
"""
return key in self.attr
| 2,816 | 34.658228 | 89 | py |
Nominatim | Nominatim-master/nominatim/server/__init__.py | 0 | 0 | 0 | py | |
Nominatim | Nominatim-master/nominatim/server/starlette/server.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Server implementation using the starlette webserver framework.
"""
from typing import Any, Optional, Mapping, Callable, cast, Coroutine
from pathlib import Path
from starlette.applications import Starlette
from starlette.routing import Route
from starlette.exceptions import HTTPException
from starlette.responses import Response
from starlette.requests import Request
from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware
from nominatim.api import NominatimAPIAsync
import nominatim.api.v1 as api_impl
from nominatim.config import Configuration
class ParamWrapper(api_impl.ASGIAdaptor):
""" Adaptor class for server glue to Starlette framework.
"""
def __init__(self, request: Request) -> None:
self.request = request
def get(self, name: str, default: Optional[str] = None) -> Optional[str]:
return self.request.query_params.get(name, default=default)
def get_header(self, name: str, default: Optional[str] = None) -> Optional[str]:
return self.request.headers.get(name, default)
def error(self, msg: str, status: int = 400) -> HTTPException:
return HTTPException(status, detail=msg,
headers={'content-type': self.content_type})
def create_response(self, status: int, output: str) -> Response:
return Response(output, status_code=status, media_type=self.content_type)
def config(self) -> Configuration:
return cast(Configuration, self.request.app.state.API.config)
def _wrap_endpoint(func: api_impl.EndpointFunc)\
-> Callable[[Request], Coroutine[Any, Any, Response]]:
async def _callback(request: Request) -> Response:
return cast(Response, await func(request.app.state.API, ParamWrapper(request)))
return _callback
def get_application(project_dir: Path,
environ: Optional[Mapping[str, str]] = None,
debug: bool = True) -> Starlette:
""" Create a Nominatim falcon ASGI application.
"""
config = Configuration(project_dir, environ)
routes = []
legacy_urls = config.get_bool('SERVE_LEGACY_URLS')
for name, func in api_impl.ROUTES:
endpoint = _wrap_endpoint(func)
routes.append(Route(f"/{name}", endpoint=endpoint))
if legacy_urls:
routes.append(Route(f"/{name}.php", endpoint=endpoint))
middleware = []
if config.get_bool('CORS_NOACCESSCONTROL'):
middleware.append(Middleware(CORSMiddleware, allow_origins=['*']))
async def _shutdown() -> None:
await app.state.API.close()
app = Starlette(debug=debug, routes=routes, middleware=middleware,
on_shutdown=[_shutdown])
app.state.API = NominatimAPIAsync(project_dir, environ)
return app
def run_wsgi() -> Starlette:
""" Entry point for uvicorn.
"""
return get_application(Path('.'), debug=False)
| 3,127 | 31.583333 | 87 | py |
Nominatim | Nominatim-master/nominatim/server/starlette/__init__.py | 0 | 0 | 0 | py | |
Nominatim | Nominatim-master/nominatim/server/falcon/server.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Server implementation using the falcon webserver framework.
"""
from typing import Optional, Mapping, cast, Any
from pathlib import Path
from falcon.asgi import App, Request, Response
from nominatim.api import NominatimAPIAsync
import nominatim.api.v1 as api_impl
from nominatim.config import Configuration
class HTTPNominatimError(Exception):
""" A special exception class for errors raised during processing.
"""
def __init__(self, msg: str, status: int, content_type: str) -> None:
self.msg = msg
self.status = status
self.content_type = content_type
async def nominatim_error_handler(req: Request, resp: Response, #pylint: disable=unused-argument
exception: HTTPNominatimError,
_: Any) -> None:
""" Special error handler that passes message and content type as
per exception info.
"""
resp.status = exception.status
resp.text = exception.msg
resp.content_type = exception.content_type
class ParamWrapper(api_impl.ASGIAdaptor):
""" Adaptor class for server glue to Falcon framework.
"""
def __init__(self, req: Request, resp: Response,
config: Configuration) -> None:
self.request = req
self.response = resp
self._config = config
def get(self, name: str, default: Optional[str] = None) -> Optional[str]:
return cast(Optional[str], self.request.get_param(name, default=default))
def get_header(self, name: str, default: Optional[str] = None) -> Optional[str]:
return cast(Optional[str], self.request.get_header(name, default=default))
def error(self, msg: str, status: int = 400) -> HTTPNominatimError:
return HTTPNominatimError(msg, status, self.content_type)
def create_response(self, status: int, output: str) -> None:
self.response.status = status
self.response.text = output
self.response.content_type = self.content_type
def config(self) -> Configuration:
return self._config
class EndpointWrapper:
""" Converter for server glue endpoint functions to Falcon request handlers.
"""
def __init__(self, func: api_impl.EndpointFunc, api: NominatimAPIAsync) -> None:
self.func = func
self.api = api
async def on_get(self, req: Request, resp: Response) -> None:
""" Implementation of the endpoint.
"""
await self.func(self.api, ParamWrapper(req, resp, self.api.config))
def get_application(project_dir: Path,
environ: Optional[Mapping[str, str]] = None) -> App:
""" Create a Nominatim Falcon ASGI application.
"""
api = NominatimAPIAsync(project_dir, environ)
app = App(cors_enable=api.config.get_bool('CORS_NOACCESSCONTROL'))
app.add_error_handler(HTTPNominatimError, nominatim_error_handler)
legacy_urls = api.config.get_bool('SERVE_LEGACY_URLS')
for name, func in api_impl.ROUTES:
endpoint = EndpointWrapper(func, api)
app.add_route(f"/{name}", endpoint)
if legacy_urls:
app.add_route(f"/{name}.php", endpoint)
return app
def run_wsgi() -> App:
""" Entry point for uvicorn.
Make sure uvicorn is run from the project directory.
"""
return get_application(Path('.'))
| 3,536 | 30.580357 | 96 | py |
Nominatim | Nominatim-master/nominatim/server/falcon/__init__.py | 0 | 0 | 0 | py | |
Nominatim | Nominatim-master/test/python/mock_legacy_word_table.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Legacy word table for testing with functions to prefil and test contents
of the table.
"""
class MockLegacyWordTable:
""" A word table for testing using legacy word table structure.
"""
def __init__(self, conn):
self.conn = conn
with conn.cursor() as cur:
cur.execute("""CREATE TABLE word (word_id INTEGER,
word_token text,
word text,
class text,
type text,
country_code varchar(2),
search_name_count INTEGER,
operator TEXT)""")
conn.commit()
def add_full_word(self, word_id, word, word_token=None):
with self.conn.cursor() as cur:
cur.execute("""INSERT INTO word (word_id, word_token, word)
VALUES (%s, %s, %s)
""", (word_id, ' ' + (word_token or word), word))
self.conn.commit()
def add_special(self, word_token, word, cls, typ, oper):
with self.conn.cursor() as cur:
cur.execute("""INSERT INTO word (word_token, word, class, type, operator)
VALUES (%s, %s, %s, %s, %s)
""", (word_token, word, cls, typ, oper))
self.conn.commit()
def add_country(self, country_code, word_token):
with self.conn.cursor() as cur:
cur.execute("INSERT INTO word (word_token, country_code) VALUES(%s, %s)",
(word_token, country_code))
self.conn.commit()
def add_postcode(self, word_token, postcode):
with self.conn.cursor() as cur:
cur.execute("""INSERT INTO word (word_token, word, class, type)
VALUES (%s, %s, 'place', 'postcode')
""", (word_token, postcode))
self.conn.commit()
def count(self):
with self.conn.cursor() as cur:
return cur.scalar("SELECT count(*) FROM word")
def count_special(self):
with self.conn.cursor() as cur:
return cur.scalar("SELECT count(*) FROM word WHERE class != 'place'")
def get_special(self):
with self.conn.cursor() as cur:
cur.execute("""SELECT word_token, word, class, type, operator
FROM word WHERE class != 'place'""")
result = set((tuple(row) for row in cur))
assert len(result) == cur.rowcount, "Word table has duplicates."
return result
def get_country(self):
with self.conn.cursor() as cur:
cur.execute("""SELECT country_code, word_token
FROM word WHERE country_code is not null""")
result = set((tuple(row) for row in cur))
assert len(result) == cur.rowcount, "Word table has duplicates."
return result
def get_postcodes(self):
with self.conn.cursor() as cur:
cur.execute("""SELECT word FROM word
WHERE class = 'place' and type = 'postcode'""")
return set((row[0] for row in cur))
def get_partial_words(self):
with self.conn.cursor() as cur:
cur.execute("""SELECT word_token, search_name_count FROM word
WHERE class is null and country_code is null
and not word_token like ' %'""")
return set((tuple(row) for row in cur))
| 3,840 | 37.029703 | 85 | py |
Nominatim | Nominatim-master/test/python/cursor.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Specialised psycopg2 cursor with shortcut functions useful for testing.
"""
import psycopg2.extras
class CursorForTesting(psycopg2.extras.DictCursor):
""" Extension to the DictCursor class that provides execution
short-cuts that simplify writing assertions.
"""
def scalar(self, sql, params=None):
""" Execute a query with a single return value and return this value.
Raises an assertion when not exactly one row is returned.
"""
self.execute(sql, params)
assert self.rowcount == 1
return self.fetchone()[0]
def row_set(self, sql, params=None):
""" Execute a query and return the result as a set of tuples.
Fails when the SQL command returns duplicate rows.
"""
self.execute(sql, params)
result = set((tuple(row) for row in self))
assert len(result) == self.rowcount
return result
def table_exists(self, table):
""" Check that a table with the given name exists in the database.
"""
num = self.scalar("""SELECT count(*) FROM pg_tables
WHERE tablename = %s""", (table, ))
return num == 1
def index_exists(self, table, index):
""" Check that an indexwith the given name exists on the given table.
"""
num = self.scalar("""SELECT count(*) FROM pg_indexes
WHERE tablename = %s and indexname = %s""",
(table, index))
return num == 1
def table_rows(self, table, where=None):
""" Return the number of rows in the given table.
"""
if where is None:
return self.scalar('SELECT count(*) FROM ' + table)
return self.scalar('SELECT count(*) FROM {} WHERE {}'.format(table, where))
def execute_values(self, *args, **kwargs):
""" Execute the execute_values() function on the cursor.
"""
psycopg2.extras.execute_values(self, *args, **kwargs)
| 2,221 | 31.676471 | 83 | py |
Nominatim | Nominatim-master/test/python/conftest.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
import itertools
import sys
from pathlib import Path
import psycopg2
import pytest
# always test against the source
SRC_DIR = (Path(__file__) / '..' / '..' / '..').resolve()
sys.path.insert(0, str(SRC_DIR))
from nominatim.config import Configuration
from nominatim.db import connection
from nominatim.db.sql_preprocessor import SQLPreprocessor
import nominatim.tokenizer.factory
import dummy_tokenizer
import mocks
from cursor import CursorForTesting
@pytest.fixture
def src_dir():
return SRC_DIR
@pytest.fixture
def temp_db(monkeypatch):
""" Create an empty database for the test. The database name is also
exported into NOMINATIM_DATABASE_DSN.
"""
name = 'test_nominatim_python_unittest'
conn = psycopg2.connect(database='postgres')
conn.set_isolation_level(0)
with conn.cursor() as cur:
cur.execute('DROP DATABASE IF EXISTS {}'.format(name))
cur.execute('CREATE DATABASE {}'.format(name))
conn.close()
monkeypatch.setenv('NOMINATIM_DATABASE_DSN', 'dbname=' + name)
yield name
conn = psycopg2.connect(database='postgres')
conn.set_isolation_level(0)
with conn.cursor() as cur:
cur.execute('DROP DATABASE IF EXISTS {}'.format(name))
conn.close()
@pytest.fixture
def dsn(temp_db):
return 'dbname=' + temp_db
@pytest.fixture
def temp_db_with_extensions(temp_db):
conn = psycopg2.connect(database=temp_db)
with conn.cursor() as cur:
cur.execute('CREATE EXTENSION hstore; CREATE EXTENSION postgis;')
conn.commit()
conn.close()
return temp_db
@pytest.fixture
def temp_db_conn(temp_db):
""" Connection to the test database.
"""
with connection.connect('dbname=' + temp_db) as conn:
yield conn
@pytest.fixture
def temp_db_cursor(temp_db):
""" Connection and cursor towards the test database. The connection will
be in auto-commit mode.
"""
conn = psycopg2.connect('dbname=' + temp_db)
conn.set_isolation_level(0)
with conn.cursor(cursor_factory=CursorForTesting) as cur:
yield cur
conn.close()
@pytest.fixture
def table_factory(temp_db_cursor):
""" A fixture that creates new SQL tables, potentially filled with
content.
"""
def mk_table(name, definition='id INT', content=None):
temp_db_cursor.execute('CREATE TABLE {} ({})'.format(name, definition))
if content is not None:
temp_db_cursor.execute_values("INSERT INTO {} VALUES %s".format(name), content)
return mk_table
@pytest.fixture
def def_config():
cfg = Configuration(None)
cfg.set_libdirs(module='.', osm2pgsql='.')
return cfg
@pytest.fixture
def project_env(tmp_path):
projdir = tmp_path / 'project'
projdir.mkdir()
cfg = Configuration(projdir)
cfg.set_libdirs(module='.', osm2pgsql='.')
return cfg
@pytest.fixture
def property_table(table_factory, temp_db_conn):
table_factory('nominatim_properties', 'property TEXT, value TEXT')
return mocks.MockPropertyTable(temp_db_conn)
@pytest.fixture
def status_table(table_factory):
""" Create an empty version of the status table and
the status logging table.
"""
table_factory('import_status',
"""lastimportdate timestamp with time zone NOT NULL,
sequence_id integer,
indexed boolean""")
table_factory('import_osmosis_log',
"""batchend timestamp,
batchseq integer,
batchsize bigint,
starttime timestamp,
endtime timestamp,
event text""")
@pytest.fixture
def place_table(temp_db_with_extensions, table_factory):
""" Create an empty version of the place table.
"""
table_factory('place',
"""osm_id int8 NOT NULL,
osm_type char(1) NOT NULL,
class text NOT NULL,
type text NOT NULL,
name hstore,
admin_level smallint,
address hstore,
extratags hstore,
geometry Geometry(Geometry,4326) NOT NULL""")
@pytest.fixture
def place_row(place_table, temp_db_cursor):
""" A factory for rows in the place table. The table is created as a
prerequisite to the fixture.
"""
psycopg2.extras.register_hstore(temp_db_cursor)
idseq = itertools.count(1001)
def _insert(osm_type='N', osm_id=None, cls='amenity', typ='cafe', names=None,
admin_level=None, address=None, extratags=None, geom=None):
temp_db_cursor.execute("INSERT INTO place VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)",
(osm_id or next(idseq), osm_type, cls, typ, names,
admin_level, address, extratags,
geom or 'SRID=4326;POINT(0 0)'))
return _insert
@pytest.fixture
def placex_table(temp_db_with_extensions, temp_db_conn):
""" Create an empty version of the place table.
"""
return mocks.MockPlacexTable(temp_db_conn)
@pytest.fixture
def osmline_table(temp_db_with_extensions, table_factory):
table_factory('location_property_osmline',
"""place_id BIGINT,
osm_id BIGINT,
parent_place_id BIGINT,
geometry_sector INTEGER,
indexed_date TIMESTAMP,
startnumber INTEGER,
endnumber INTEGER,
partition SMALLINT,
indexed_status SMALLINT,
linegeo GEOMETRY,
interpolationtype TEXT,
address HSTORE,
postcode TEXT,
country_code VARCHAR(2)""")
@pytest.fixture
def sql_preprocessor_cfg(tmp_path, table_factory, temp_db_with_extensions):
table_factory('country_name', 'partition INT', ((0, ), (1, ), (2, )))
cfg = Configuration(None)
cfg.set_libdirs(module='.', osm2pgsql='.', sql=tmp_path)
return cfg
@pytest.fixture
def sql_preprocessor(sql_preprocessor_cfg, temp_db_conn):
return SQLPreprocessor(temp_db_conn, sql_preprocessor_cfg)
@pytest.fixture
def tokenizer_mock(monkeypatch, property_table):
""" Sets up the configuration so that the test dummy tokenizer will be
loaded when the tokenizer factory is used. Also returns a factory
with which a new dummy tokenizer may be created.
"""
monkeypatch.setenv('NOMINATIM_TOKENIZER', 'dummy')
def _import_dummy(*args, **kwargs):
return dummy_tokenizer
monkeypatch.setattr(nominatim.tokenizer.factory, "_import_tokenizer", _import_dummy)
property_table.set('tokenizer', 'dummy')
def _create_tokenizer():
return dummy_tokenizer.DummyTokenizer(None, None)
return _create_tokenizer
| 7,129 | 28.832636 | 95 | py |
Nominatim | Nominatim-master/test/python/dummy_tokenizer.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tokenizer for testing.
"""
from nominatim.data.place_info import PlaceInfo
from nominatim.config import Configuration
def create(dsn, data_dir):
""" Create a new instance of the tokenizer provided by this module.
"""
return DummyTokenizer(dsn, data_dir)
class DummyTokenizer:
def __init__(self, dsn, data_dir):
self.dsn = dsn
self.data_dir = data_dir
self.init_state = None
self.analyser_cache = {}
def init_new_db(self, *args, **kwargs):
assert self.init_state is None
self.init_state = "new"
def init_from_project(self, config):
assert isinstance(config, Configuration)
assert self.init_state is None
self.init_state = "loaded"
@staticmethod
def finalize_import(_):
pass
def name_analyzer(self):
return DummyNameAnalyzer(self.analyser_cache)
class DummyNameAnalyzer:
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __init__(self, cache):
self.analyser_cache = cache
cache['countries'] = []
def close(self):
pass
@staticmethod
def normalize_postcode(postcode):
return postcode
@staticmethod
def update_postcodes_from_db():
pass
def update_special_phrases(self, phrases, should_replace):
self.analyser_cache['special_phrases'] = phrases
def add_country_names(self, code, names):
self.analyser_cache['countries'].append((code, names))
@staticmethod
def process_place(place):
assert isinstance(place, PlaceInfo)
return {}
| 1,869 | 21.804878 | 71 | py |
Nominatim | Nominatim-master/test/python/mocks.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Custom mocks for testing.
"""
import itertools
import psycopg2.extras
from nominatim.db import properties
# This must always point to the mock word table for the default tokenizer.
from mock_icu_word_table import MockIcuWordTable as MockWordTable
class MockPlacexTable:
""" A placex table for testing.
"""
def __init__(self, conn):
self.idseq = itertools.count(10000)
self.conn = conn
with conn.cursor() as cur:
cur.execute("""CREATE TABLE placex (
place_id BIGINT,
parent_place_id BIGINT,
linked_place_id BIGINT,
importance FLOAT,
indexed_date TIMESTAMP,
geometry_sector INTEGER,
rank_address SMALLINT,
rank_search SMALLINT,
partition SMALLINT,
indexed_status SMALLINT,
osm_id int8,
osm_type char(1),
class text,
type text,
name hstore,
admin_level smallint,
address hstore,
extratags hstore,
token_info jsonb,
geometry Geometry(Geometry,4326),
wikipedia TEXT,
country_code varchar(2),
housenumber TEXT,
postcode TEXT,
centroid GEOMETRY(Geometry, 4326))""")
cur.execute("CREATE SEQUENCE IF NOT EXISTS seq_place")
conn.commit()
def add(self, osm_type='N', osm_id=None, cls='amenity', typ='cafe', names=None,
admin_level=None, address=None, extratags=None, geom='POINT(10 4)',
country=None, housenumber=None):
with self.conn.cursor() as cur:
psycopg2.extras.register_hstore(cur)
cur.execute("""INSERT INTO placex (place_id, osm_type, osm_id, class,
type, name, admin_level, address,
housenumber,
extratags, geometry, country_code)
VALUES(nextval('seq_place'), %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""",
(osm_type, osm_id or next(self.idseq), cls, typ, names,
admin_level, address, housenumber, extratags, 'SRID=4326;' + geom,
country))
self.conn.commit()
class MockPropertyTable:
""" A property table for testing.
"""
def __init__(self, conn):
self.conn = conn
def set(self, name, value):
""" Set a property in the table to the given value.
"""
properties.set_property(self.conn, name, value)
def get(self, name):
""" Set a property in the table to the given value.
"""
return properties.get_property(self.conn, name)
| 3,474 | 38.488636 | 104 | py |
Nominatim | Nominatim-master/test/python/mock_icu_word_table.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Legacy word table for testing with functions to prefil and test contents
of the table.
"""
class MockIcuWordTable:
""" A word table for testing using legacy word table structure.
"""
def __init__(self, conn):
self.conn = conn
with conn.cursor() as cur:
cur.execute("""CREATE TABLE word (word_id INTEGER,
word_token text NOT NULL,
type text NOT NULL,
word text,
info jsonb)""")
conn.commit()
def add_full_word(self, word_id, word, word_token=None):
with self.conn.cursor() as cur:
cur.execute("""INSERT INTO word (word_id, word_token, type, word, info)
VALUES(%s, %s, 'W', %s, '{}'::jsonb)""",
(word_id, word or word_token, word))
self.conn.commit()
def add_special(self, word_token, word, cls, typ, oper):
with self.conn.cursor() as cur:
cur.execute("""INSERT INTO word (word_token, type, word, info)
VALUES (%s, 'S', %s,
json_build_object('class', %s,
'type', %s,
'op', %s))
""", (word_token, word, cls, typ, oper))
self.conn.commit()
def add_country(self, country_code, word_token):
with self.conn.cursor() as cur:
cur.execute("""INSERT INTO word (word_token, type, word)
VALUES(%s, 'C', %s)""",
(word_token, country_code))
self.conn.commit()
def add_postcode(self, word_token, postcode):
with self.conn.cursor() as cur:
cur.execute("""INSERT INTO word (word_token, type, word)
VALUES (%s, 'P', %s)
""", (word_token, postcode))
self.conn.commit()
def add_housenumber(self, word_id, word_tokens, word=None):
with self.conn.cursor() as cur:
if isinstance(word_tokens, str):
# old style without analyser
cur.execute("""INSERT INTO word (word_id, word_token, type)
VALUES (%s, %s, 'H')
""", (word_id, word_tokens))
else:
if word is None:
word = word_tokens[0]
for token in word_tokens:
cur.execute("""INSERT INTO word (word_id, word_token, type, word, info)
VALUES (%s, %s, 'H', %s, jsonb_build_object('lookup', %s))
""", (word_id, token, word, word_tokens[0]))
self.conn.commit()
def count(self):
with self.conn.cursor() as cur:
return cur.scalar("SELECT count(*) FROM word")
def count_special(self):
with self.conn.cursor() as cur:
return cur.scalar("SELECT count(*) FROM word WHERE type = 'S'")
def count_housenumbers(self):
with self.conn.cursor() as cur:
return cur.scalar("SELECT count(*) FROM word WHERE type = 'H'")
def get_special(self):
with self.conn.cursor() as cur:
cur.execute("SELECT word_token, info, word FROM word WHERE type = 'S'")
result = set(((row[0], row[2], row[1]['class'],
row[1]['type'], row[1]['op']) for row in cur))
assert len(result) == cur.rowcount, "Word table has duplicates."
return result
def get_country(self):
with self.conn.cursor() as cur:
cur.execute("SELECT word, word_token FROM word WHERE type = 'C'")
result = set((tuple(row) for row in cur))
assert len(result) == cur.rowcount, "Word table has duplicates."
return result
def get_postcodes(self):
with self.conn.cursor() as cur:
cur.execute("SELECT word FROM word WHERE type = 'P'")
return set((row[0] for row in cur))
def get_partial_words(self):
with self.conn.cursor() as cur:
cur.execute("SELECT word_token, info FROM word WHERE type ='w'")
return set(((row[0], row[1]['count']) for row in cur))
| 4,622 | 36.893443 | 96 | py |
Nominatim | Nominatim-master/test/python/cli/test_cmd_api.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for API access commands of command-line interface wrapper.
"""
import json
import pytest
import nominatim.clicmd.api
import nominatim.api as napi
class TestCliStatusCall:
@pytest.fixture(autouse=True)
def setup_status_mock(self, monkeypatch):
monkeypatch.setattr(napi.NominatimAPI, 'status',
lambda self: napi.StatusResult(200, 'OK'))
def test_status_simple(self, cli_call, tmp_path):
result = cli_call('status', '--project-dir', str(tmp_path))
assert result == 0
def test_status_json_format(self, cli_call, tmp_path, capsys):
result = cli_call('status', '--project-dir', str(tmp_path),
'--format', 'json')
assert result == 0
json.loads(capsys.readouterr().out)
class TestCliDetailsCall:
@pytest.fixture(autouse=True)
def setup_status_mock(self, monkeypatch):
result = napi.DetailedResult(napi.SourceTable.PLACEX, ('place', 'thing'),
napi.Point(1.0, -3.0))
monkeypatch.setattr(napi.NominatimAPI, 'details',
lambda *args, **kwargs: result)
@pytest.mark.parametrize("params", [('--node', '1'),
('--way', '1'),
('--relation', '1'),
('--place_id', '10001')])
def test_details_json_format(self, cli_call, tmp_path, capsys, params):
result = cli_call('details', '--project-dir', str(tmp_path), *params)
assert result == 0
json.loads(capsys.readouterr().out)
class TestCliReverseCall:
@pytest.fixture(autouse=True)
def setup_reverse_mock(self, monkeypatch):
result = napi.ReverseResult(napi.SourceTable.PLACEX, ('place', 'thing'),
napi.Point(1.0, -3.0),
names={'name':'Name', 'name:fr': 'Nom'},
extratags={'extra':'Extra'})
monkeypatch.setattr(napi.NominatimAPI, 'reverse',
lambda *args, **kwargs: result)
def test_reverse_simple(self, cli_call, tmp_path, capsys):
result = cli_call('reverse', '--project-dir', str(tmp_path),
'--lat', '34', '--lon', '34')
assert result == 0
out = json.loads(capsys.readouterr().out)
assert out['name'] == 'Name'
assert 'address' not in out
assert 'extratags' not in out
assert 'namedetails' not in out
@pytest.mark.parametrize('param,field', [('--addressdetails', 'address'),
('--extratags', 'extratags'),
('--namedetails', 'namedetails')])
def test_reverse_extra_stuff(self, cli_call, tmp_path, capsys, param, field):
result = cli_call('reverse', '--project-dir', str(tmp_path),
'--lat', '34', '--lon', '34', param)
assert result == 0
out = json.loads(capsys.readouterr().out)
assert field in out
def test_reverse_format(self, cli_call, tmp_path, capsys):
result = cli_call('reverse', '--project-dir', str(tmp_path),
'--lat', '34', '--lon', '34', '--format', 'geojson')
assert result == 0
out = json.loads(capsys.readouterr().out)
assert out['type'] == 'FeatureCollection'
def test_reverse_language(self, cli_call, tmp_path, capsys):
result = cli_call('reverse', '--project-dir', str(tmp_path),
'--lat', '34', '--lon', '34', '--lang', 'fr')
assert result == 0
out = json.loads(capsys.readouterr().out)
assert out['name'] == 'Nom'
class TestCliLookupCall:
@pytest.fixture(autouse=True)
def setup_lookup_mock(self, monkeypatch):
result = napi.SearchResult(napi.SourceTable.PLACEX, ('place', 'thing'),
napi.Point(1.0, -3.0),
names={'name':'Name', 'name:fr': 'Nom'},
extratags={'extra':'Extra'})
monkeypatch.setattr(napi.NominatimAPI, 'lookup',
lambda *args, **kwargs: napi.SearchResults([result]))
def test_lookup_simple(self, cli_call, tmp_path, capsys):
result = cli_call('lookup', '--project-dir', str(tmp_path),
'--id', 'N34')
assert result == 0
out = json.loads(capsys.readouterr().out)
assert len(out) == 1
assert out[0]['name'] == 'Name'
assert 'address' not in out[0]
assert 'extratags' not in out[0]
assert 'namedetails' not in out[0]
@pytest.mark.parametrize('endpoint, params', [('search', ('--query', 'Berlin')),
('search_address', ('--city', 'Berlin'))
])
def test_search(cli_call, tmp_path, capsys, monkeypatch, endpoint, params):
result = napi.SearchResult(napi.SourceTable.PLACEX, ('place', 'thing'),
napi.Point(1.0, -3.0),
names={'name':'Name', 'name:fr': 'Nom'},
extratags={'extra':'Extra'})
monkeypatch.setattr(napi.NominatimAPI, endpoint,
lambda *args, **kwargs: napi.SearchResults([result]))
result = cli_call('search', '--project-dir', str(tmp_path), *params)
assert result == 0
out = json.loads(capsys.readouterr().out)
assert len(out) == 1
assert out[0]['name'] == 'Name'
assert 'address' not in out[0]
assert 'extratags' not in out[0]
assert 'namedetails' not in out[0]
| 5,980 | 33.976608 | 86 | py |
Nominatim | Nominatim-master/test/python/cli/test_cli.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for command line interface wrapper.
These tests just check that the various command line parameters route to the
correct functionionality. They use a lot of monkeypatching to avoid executing
the actual functions.
"""
import importlib
import pytest
import nominatim.indexer.indexer
import nominatim.tools.add_osm_data
import nominatim.tools.freeze
def test_cli_help(cli_call, capsys):
""" Running nominatim tool without arguments prints help.
"""
assert cli_call() == 1
captured = capsys.readouterr()
assert captured.out.startswith('usage:')
def test_cli_version(cli_call, capsys):
""" Running nominatim tool --version prints a version string.
"""
assert cli_call('--version') == 0
captured = capsys.readouterr()
assert captured.out.startswith('Nominatim version')
@pytest.mark.parametrize("name,oid", [('file', 'foo.osm'), ('diff', 'foo.osc')])
def test_cli_add_data_file_command(cli_call, mock_func_factory, name, oid):
mock_run_legacy = mock_func_factory(nominatim.tools.add_osm_data, 'add_data_from_file')
assert cli_call('add-data', '--' + name, str(oid)) == 0
assert mock_run_legacy.called == 1
@pytest.mark.parametrize("name,oid", [('node', 12), ('way', 8), ('relation', 32)])
def test_cli_add_data_object_command(cli_call, mock_func_factory, name, oid):
mock_run_legacy = mock_func_factory(nominatim.tools.add_osm_data, 'add_osm_object')
assert cli_call('add-data', '--' + name, str(oid)) == 0
assert mock_run_legacy.called == 1
def test_cli_add_data_tiger_data(cli_call, cli_tokenizer_mock, mock_func_factory):
mock = mock_func_factory(nominatim.tools.tiger_data, 'add_tiger_data')
assert cli_call('add-data', '--tiger-data', 'somewhere') == 0
assert mock.called == 1
def test_cli_serve_php(cli_call, mock_func_factory):
func = mock_func_factory(nominatim.cli, 'run_php_server')
cli_call('serve') == 0
assert func.called == 1
def test_cli_serve_starlette_custom_server(cli_call, mock_func_factory):
pytest.importorskip("starlette")
mod = pytest.importorskip("uvicorn")
func = mock_func_factory(mod, "run")
cli_call('serve', '--engine', 'starlette', '--server', 'foobar:4545') == 0
assert func.called == 1
assert func.last_kwargs['host'] == 'foobar'
assert func.last_kwargs['port'] == 4545
def test_cli_serve_starlette_custom_server_bad_port(cli_call, mock_func_factory):
pytest.importorskip("starlette")
mod = pytest.importorskip("uvicorn")
func = mock_func_factory(mod, "run")
cli_call('serve', '--engine', 'starlette', '--server', 'foobar:45:45') == 1
@pytest.mark.parametrize("engine", ['falcon', 'starlette'])
def test_cli_serve_uvicorn_based(cli_call, engine, mock_func_factory):
pytest.importorskip(engine)
mod = pytest.importorskip("uvicorn")
func = mock_func_factory(mod, "run")
cli_call('serve', '--engine', engine) == 0
assert func.called == 1
assert func.last_kwargs['host'] == '127.0.0.1'
assert func.last_kwargs['port'] == 8088
def test_cli_export_command(cli_call, mock_run_legacy):
assert cli_call('export', '--output-all-postcodes') == 0
assert mock_run_legacy.called == 1
assert mock_run_legacy.last_args[0] == 'export.php'
@pytest.mark.parametrize("param,value", [('output-type', 'country'),
('output-format', 'street;city'),
('language', 'xf'),
('restrict-to-country', 'us'),
('restrict-to-osm-node', '536'),
('restrict-to-osm-way', '727'),
('restrict-to-osm-relation', '197532')
])
def test_export_parameters(src_dir, tmp_path, param, value, monkeypatch):
(tmp_path / 'admin').mkdir()
(tmp_path / 'admin' / 'export.php').write_text(f"""<?php
exit(strpos(implode(' ', $_SERVER['argv']), '--{param} {value}') >= 0 ? 0 : 10);
""")
monkeypatch.setattr(nominatim.paths, 'PHPLIB_DIR', tmp_path)
assert nominatim.cli.nominatim(module_dir='MODULE NOT AVAILABLE',
osm2pgsql_path='OSM2PGSQL NOT AVAILABLE',
phpcgi_path='/usr/bin/php-cgi',
cli_args=['export', '--' + param, value]) == 0
class TestCliWithDb:
@pytest.fixture(autouse=True)
def setup_cli_call(self, cli_call, temp_db, cli_tokenizer_mock):
self.call_nominatim = cli_call
self.tokenizer_mock = cli_tokenizer_mock
def test_freeze_command(self, mock_func_factory):
mock_drop = mock_func_factory(nominatim.tools.freeze, 'drop_update_tables')
mock_flatnode = mock_func_factory(nominatim.tools.freeze, 'drop_flatnode_file')
assert self.call_nominatim('freeze') == 0
assert mock_drop.called == 1
assert mock_flatnode.called == 1
@pytest.mark.parametrize("params,do_bnds,do_ranks", [
([], 1, 1),
(['--boundaries-only'], 1, 0),
(['--no-boundaries'], 0, 1),
(['--boundaries-only', '--no-boundaries'], 0, 0)])
def test_index_command(self, mock_func_factory, table_factory,
params, do_bnds, do_ranks):
table_factory('import_status', 'indexed bool')
bnd_mock = mock_func_factory(nominatim.indexer.indexer.Indexer, 'index_boundaries')
rank_mock = mock_func_factory(nominatim.indexer.indexer.Indexer, 'index_by_rank')
assert self.call_nominatim('index', *params) == 0
assert bnd_mock.called == do_bnds
assert rank_mock.called == do_ranks
def test_special_phrases_wiki_command(self, mock_func_factory):
func = mock_func_factory(nominatim.clicmd.special_phrases.SPImporter, 'import_phrases')
self.call_nominatim('special-phrases', '--import-from-wiki', '--no-replace')
assert func.called == 1
def test_special_phrases_csv_command(self, src_dir, mock_func_factory):
func = mock_func_factory(nominatim.clicmd.special_phrases.SPImporter, 'import_phrases')
testdata = src_dir / 'test' / 'testdb'
csv_path = str((testdata / 'full_en_phrases_test.csv').resolve())
self.call_nominatim('special-phrases', '--import-from-csv', csv_path)
assert func.called == 1
def test_special_phrases_csv_bad_file(self, src_dir):
testdata = src_dir / 'something349053905.csv'
self.call_nominatim('special-phrases', '--import-from-csv',
str(testdata.resolve())) == 1
| 6,969 | 35.492147 | 95 | py |
Nominatim | Nominatim-master/test/python/cli/conftest.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
import pytest
import nominatim.cli
class MockParamCapture:
""" Mock that records the parameters with which a function was called
as well as the number of calls.
"""
def __init__(self, retval=0):
self.called = 0
self.return_value = retval
self.last_args = None
self.last_kwargs = None
def __call__(self, *args, **kwargs):
self.called += 1
self.last_args = args
self.last_kwargs = kwargs
return self.return_value
class DummyTokenizer:
def __init__(self, *args, **kwargs):
self.update_sql_functions_called = False
self.finalize_import_called = False
self.update_statistics_called = False
self.update_word_tokens_called = False
def update_sql_functions(self, *args):
self.update_sql_functions_called = True
def finalize_import(self, *args):
self.finalize_import_called = True
def update_statistics(self):
self.update_statistics_called = True
def update_word_tokens(self):
self.update_word_tokens_called = True
@pytest.fixture
def cli_call(src_dir):
""" Call the nominatim main function with the correct paths set.
Returns a function that can be called with the desired CLI arguments.
"""
def _call_nominatim(*args):
return nominatim.cli.nominatim(module_dir='MODULE NOT AVAILABLE',
osm2pgsql_path='OSM2PGSQL NOT AVAILABLE',
phpcgi_path='/usr/bin/php-cgi',
cli_args=args)
return _call_nominatim
@pytest.fixture
def mock_run_legacy(monkeypatch):
mock = MockParamCapture()
monkeypatch.setattr(nominatim.cli, 'run_legacy_script', mock)
return mock
@pytest.fixture
def mock_func_factory(monkeypatch):
def get_mock(module, func):
mock = MockParamCapture()
mock.func_name = func
monkeypatch.setattr(module, func, mock)
return mock
return get_mock
@pytest.fixture
def cli_tokenizer_mock(monkeypatch):
tok = DummyTokenizer()
monkeypatch.setattr(nominatim.tokenizer.factory, 'get_tokenizer_for_db',
lambda *args: tok)
monkeypatch.setattr(nominatim.tokenizer.factory, 'create_tokenizer',
lambda *args: tok)
return tok
| 2,567 | 27.853933 | 80 | py |
Nominatim | Nominatim-master/test/python/cli/test_cmd_admin.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Test for the command line interface wrapper admin subcommand.
These tests just check that the various command line parameters route to the
correct functionionality. They use a lot of monkeypatching to avoid executing
the actual functions.
"""
import pytest
import nominatim.tools.admin
import nominatim.tools.check_database
import nominatim.tools.migration
import nominatim.clicmd.admin
@pytest.mark.parametrize("params", [('--warm', ),
('--warm', '--reverse-only'),
('--warm', '--search-only')])
def test_admin_command_legacy(cli_call, mock_func_factory, params):
mock_run_legacy = mock_func_factory(nominatim.clicmd.admin, 'run_legacy_script')
assert cli_call('admin', *params) == 0
assert mock_run_legacy.called == 1
def test_admin_command_check_database(cli_call, mock_func_factory):
mock = mock_func_factory(nominatim.tools.check_database, 'check_database')
assert cli_call('admin', '--check-database') == 0
assert mock.called == 1
def test_admin_migrate(cli_call, mock_func_factory):
mock = mock_func_factory(nominatim.tools.migration, 'migrate')
assert cli_call('admin', '--migrate') == 0
assert mock.called == 1
class TestCliAdminWithDb:
@pytest.fixture(autouse=True)
def setup_cli_call(self, cli_call, temp_db, cli_tokenizer_mock):
self.call_nominatim = cli_call
self.tokenizer_mock = cli_tokenizer_mock
@pytest.mark.parametrize("func, params", [('analyse_indexing', ('--analyse-indexing', ))])
def test_analyse_indexing(self, mock_func_factory, func, params):
mock = mock_func_factory(nominatim.tools.admin, func)
assert self.call_nominatim('admin', *params) == 0
assert mock.called == 1
| 1,991 | 31.655738 | 94 | py |
Nominatim | Nominatim-master/test/python/cli/test_cmd_import.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for import command of the command-line interface wrapper.
"""
import pytest
import nominatim.tools.database_import
import nominatim.data.country_info
import nominatim.tools.refresh
import nominatim.tools.postcodes
import nominatim.indexer.indexer
import nominatim.db.properties
class TestCliImportWithDb:
@pytest.fixture(autouse=True)
def setup_cli_call(self, cli_call, temp_db, cli_tokenizer_mock):
self.call_nominatim = cli_call
self.tokenizer_mock = cli_tokenizer_mock
def test_import_missing_file(self):
assert self.call_nominatim('import', '--osm-file', 'sfsafegwedgw.reh.erh') == 1
def test_import_bad_file(self):
assert self.call_nominatim('import', '--osm-file', '.') == 1
@pytest.mark.parametrize('with_updates', [True, False])
def test_import_full(self, mock_func_factory, with_updates, place_table, property_table):
mocks = [
mock_func_factory(nominatim.tools.database_import, 'setup_database_skeleton'),
mock_func_factory(nominatim.data.country_info, 'setup_country_tables'),
mock_func_factory(nominatim.tools.database_import, 'import_osm_data'),
mock_func_factory(nominatim.tools.refresh, 'import_wikipedia_articles'),
mock_func_factory(nominatim.tools.refresh, 'import_secondary_importance'),
mock_func_factory(nominatim.tools.database_import, 'truncate_data_tables'),
mock_func_factory(nominatim.tools.database_import, 'load_data'),
mock_func_factory(nominatim.tools.database_import, 'create_tables'),
mock_func_factory(nominatim.tools.database_import, 'create_table_triggers'),
mock_func_factory(nominatim.tools.database_import, 'create_partition_tables'),
mock_func_factory(nominatim.tools.database_import, 'create_search_indices'),
mock_func_factory(nominatim.data.country_info, 'create_country_names'),
mock_func_factory(nominatim.tools.refresh, 'load_address_levels_from_config'),
mock_func_factory(nominatim.tools.postcodes, 'update_postcodes'),
mock_func_factory(nominatim.indexer.indexer.Indexer, 'index_full'),
mock_func_factory(nominatim.tools.refresh, 'setup_website'),
]
params = ['import', '--osm-file', __file__]
if with_updates:
mocks.append(mock_func_factory(nominatim.tools.freeze, 'drop_update_tables'))
params.append('--no-updates')
cf_mock = mock_func_factory(nominatim.tools.refresh, 'create_functions')
assert self.call_nominatim(*params) == 0
assert self.tokenizer_mock.finalize_import_called
assert cf_mock.called > 1
for mock in mocks:
assert mock.called == 1, "Mock '{}' not called".format(mock.func_name)
def test_import_continue_load_data(self, mock_func_factory):
mocks = [
mock_func_factory(nominatim.tools.database_import, 'truncate_data_tables'),
mock_func_factory(nominatim.tools.database_import, 'load_data'),
mock_func_factory(nominatim.tools.database_import, 'create_search_indices'),
mock_func_factory(nominatim.data.country_info, 'create_country_names'),
mock_func_factory(nominatim.tools.postcodes, 'update_postcodes'),
mock_func_factory(nominatim.indexer.indexer.Indexer, 'index_full'),
mock_func_factory(nominatim.tools.refresh, 'setup_website'),
mock_func_factory(nominatim.db.properties, 'set_property')
]
assert self.call_nominatim('import', '--continue', 'load-data') == 0
assert self.tokenizer_mock.finalize_import_called
for mock in mocks:
assert mock.called == 1, "Mock '{}' not called".format(mock.func_name)
def test_import_continue_indexing(self, mock_func_factory, placex_table,
temp_db_conn):
mocks = [
mock_func_factory(nominatim.tools.database_import, 'create_search_indices'),
mock_func_factory(nominatim.data.country_info, 'create_country_names'),
mock_func_factory(nominatim.indexer.indexer.Indexer, 'index_full'),
mock_func_factory(nominatim.tools.refresh, 'setup_website'),
mock_func_factory(nominatim.db.properties, 'set_property')
]
assert self.call_nominatim('import', '--continue', 'indexing') == 0
for mock in mocks:
assert mock.called == 1, "Mock '{}' not called".format(mock.func_name)
# Calling it again still works for the index
assert self.call_nominatim('import', '--continue', 'indexing') == 0
def test_import_continue_postprocess(self, mock_func_factory):
mocks = [
mock_func_factory(nominatim.tools.database_import, 'create_search_indices'),
mock_func_factory(nominatim.data.country_info, 'create_country_names'),
mock_func_factory(nominatim.tools.refresh, 'setup_website'),
mock_func_factory(nominatim.db.properties, 'set_property')
]
assert self.call_nominatim('import', '--continue', 'db-postprocess') == 0
assert self.tokenizer_mock.finalize_import_called
for mock in mocks:
assert mock.called == 1, "Mock '{}' not called".format(mock.func_name)
| 5,561 | 42.795276 | 93 | py |
Nominatim | Nominatim-master/test/python/cli/test_cmd_replication.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for replication command of command-line interface wrapper.
"""
import datetime as dt
import time
import pytest
import nominatim.cli
import nominatim.indexer.indexer
import nominatim.tools.replication
import nominatim.tools.refresh
from nominatim.db import status
@pytest.fixture
def tokenizer_mock(monkeypatch):
class DummyTokenizer:
def __init__(self, *args, **kwargs):
self.update_sql_functions_called = False
self.finalize_import_called = False
def update_sql_functions(self, *args):
self.update_sql_functions_called = True
def finalize_import(self, *args):
self.finalize_import_called = True
tok = DummyTokenizer()
monkeypatch.setattr(nominatim.tokenizer.factory, 'get_tokenizer_for_db',
lambda *args: tok)
monkeypatch.setattr(nominatim.tokenizer.factory, 'create_tokenizer',
lambda *args: tok)
return tok
@pytest.fixture
def init_status(temp_db_conn, status_table):
status.set_status(temp_db_conn, date=dt.datetime.now(dt.timezone.utc), seq=1)
@pytest.fixture
def index_mock(mock_func_factory, tokenizer_mock, init_status):
return mock_func_factory(nominatim.indexer.indexer.Indexer, 'index_full')
@pytest.fixture
def update_mock(mock_func_factory, init_status, tokenizer_mock):
return mock_func_factory(nominatim.tools.replication, 'update')
class TestCliReplication:
@pytest.fixture(autouse=True)
def setup_cli_call(self, cli_call, temp_db):
self.call_nominatim = lambda *args: cli_call('replication', *args)
@pytest.fixture(autouse=True)
def setup_update_function(self, monkeypatch):
def _mock_updates(states):
monkeypatch.setattr(nominatim.tools.replication, 'update',
lambda *args, **kwargs: states.pop())
self.update_states = _mock_updates
@pytest.mark.parametrize("params,func", [
(('--init',), 'init_replication'),
(('--init', '--no-update-functions'), 'init_replication'),
(('--check-for-updates',), 'check_for_updates')
])
def test_replication_command(self, mock_func_factory, params, func):
func_mock = mock_func_factory(nominatim.tools.replication, func)
if params == ('--init',):
umock = mock_func_factory(nominatim.tools.refresh, 'create_functions')
assert self.call_nominatim(*params) == 0
assert func_mock.called == 1
if params == ('--init',):
assert umock.called == 1
def test_replication_update_bad_interval(self, monkeypatch):
monkeypatch.setenv('NOMINATIM_REPLICATION_UPDATE_INTERVAL', 'xx')
assert self.call_nominatim() == 1
def test_replication_update_bad_interval_for_geofabrik(self, monkeypatch):
monkeypatch.setenv('NOMINATIM_REPLICATION_URL',
'https://download.geofabrik.de/europe/italy-updates')
assert self.call_nominatim() == 1
def test_replication_update_continuous_no_index(self):
assert self.call_nominatim('--no-index') == 1
def test_replication_update_once_no_index(self, update_mock):
assert self.call_nominatim('--once', '--no-index') == 0
assert str(update_mock.last_args[1]['osm2pgsql']).endswith('OSM2PGSQL NOT AVAILABLE')
def test_replication_update_custom_osm2pgsql(self, monkeypatch, update_mock):
monkeypatch.setenv('NOMINATIM_OSM2PGSQL_BINARY', '/secret/osm2pgsql')
assert self.call_nominatim('--once', '--no-index') == 0
assert str(update_mock.last_args[1]['osm2pgsql']) == '/secret/osm2pgsql'
@pytest.mark.parametrize("update_interval", [60, 3600])
def test_replication_catchup(self, placex_table, monkeypatch, index_mock, update_interval):
monkeypatch.setenv('NOMINATIM_REPLICATION_UPDATE_INTERVAL', str(update_interval))
self.update_states([nominatim.tools.replication.UpdateState.NO_CHANGES])
assert self.call_nominatim('--catch-up') == 0
def test_replication_update_custom_threads(self, update_mock):
assert self.call_nominatim('--once', '--no-index', '--threads', '4') == 0
assert update_mock.last_args[1]['threads'] == 4
def test_replication_update_continuous(self, index_mock):
self.update_states([nominatim.tools.replication.UpdateState.UP_TO_DATE,
nominatim.tools.replication.UpdateState.UP_TO_DATE])
with pytest.raises(IndexError):
self.call_nominatim()
assert index_mock.called == 2
def test_replication_update_continuous_no_change(self, mock_func_factory,
index_mock):
self.update_states([nominatim.tools.replication.UpdateState.NO_CHANGES,
nominatim.tools.replication.UpdateState.UP_TO_DATE])
sleep_mock = mock_func_factory(time, 'sleep')
with pytest.raises(IndexError):
self.call_nominatim()
assert index_mock.called == 1
assert sleep_mock.called == 1
assert sleep_mock.last_args[0] == 60
| 5,435 | 33.405063 | 95 | py |
Nominatim | Nominatim-master/test/python/cli/test_cmd_refresh.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for command line interface wrapper for refresk command.
"""
import pytest
import nominatim.tools.refresh
import nominatim.tools.postcodes
import nominatim.indexer.indexer
class TestRefresh:
@pytest.fixture(autouse=True)
def setup_cli_call(self, cli_call, temp_db, cli_tokenizer_mock):
self.call_nominatim = cli_call
self.tokenizer_mock = cli_tokenizer_mock
@pytest.mark.parametrize("command,func", [
('address-levels', 'load_address_levels_from_config'),
('wiki-data', 'import_wikipedia_articles'),
('importance', 'recompute_importance'),
('website', 'setup_website'),
])
def test_refresh_command(self, mock_func_factory, command, func):
func_mock = mock_func_factory(nominatim.tools.refresh, func)
assert self.call_nominatim('refresh', '--' + command) == 0
assert func_mock.called == 1
def test_refresh_word_count(self):
assert self.call_nominatim('refresh', '--word-count') == 0
assert self.tokenizer_mock.update_statistics_called
def test_refresh_word_tokens(self):
assert self.call_nominatim('refresh', '--word-tokens') == 0
assert self.tokenizer_mock.update_word_tokens_called
def test_refresh_postcodes(self, mock_func_factory, place_table):
func_mock = mock_func_factory(nominatim.tools.postcodes, 'update_postcodes')
idx_mock = mock_func_factory(nominatim.indexer.indexer.Indexer, 'index_postcodes')
assert self.call_nominatim('refresh', '--postcodes') == 0
assert func_mock.called == 1
assert idx_mock.called == 1
def test_refresh_postcodes_no_place_table(self):
# Do nothing without the place table
assert self.call_nominatim('refresh', '--postcodes') == 0
def test_refresh_create_functions(self, mock_func_factory):
func_mock = mock_func_factory(nominatim.tools.refresh, 'create_functions')
assert self.call_nominatim('refresh', '--functions') == 0
assert func_mock.called == 1
assert self.tokenizer_mock.update_sql_functions_called
def test_refresh_wikidata_file_not_found(self, monkeypatch):
monkeypatch.setenv('NOMINATIM_WIKIPEDIA_DATA_PATH', 'gjoiergjeroi345Q')
assert self.call_nominatim('refresh', '--wiki-data') == 1
def test_refresh_secondary_importance_file_not_found(self):
assert self.call_nominatim('refresh', '--secondary-importance') == 1
def test_refresh_secondary_importance_new_table(self, mock_func_factory):
mocks = [mock_func_factory(nominatim.tools.refresh, 'import_secondary_importance'),
mock_func_factory(nominatim.tools.refresh, 'create_functions')]
assert self.call_nominatim('refresh', '--secondary-importance') == 0
assert mocks[0].called == 1
assert mocks[1].called == 1
def test_refresh_importance_computed_after_wiki_import(self, monkeypatch):
calls = []
monkeypatch.setattr(nominatim.tools.refresh, 'import_wikipedia_articles',
lambda *args, **kwargs: calls.append('import') or 0)
monkeypatch.setattr(nominatim.tools.refresh, 'recompute_importance',
lambda *args, **kwargs: calls.append('update'))
assert self.call_nominatim('refresh', '--importance', '--wiki-data') == 0
assert calls == ['import', 'update']
@pytest.mark.parametrize('params', [('--data-object', 'w234'),
('--data-object', 'N23', '--data-object', 'N24'),
('--data-area', 'R7723'),
('--data-area', 'r7723', '--data-area', 'r2'),
('--data-area', 'R9284425', '--data-object', 'n1234567894567')])
def test_refresh_objects(self, params, mock_func_factory):
func_mock = mock_func_factory(nominatim.tools.refresh, 'invalidate_osm_object')
assert self.call_nominatim('refresh', *params) == 0
assert func_mock.called == len(params)/2
@pytest.mark.parametrize('func', ('--data-object', '--data-area'))
@pytest.mark.parametrize('param', ('234', 'a55', 'R 453', 'Rel'))
def test_refresh_objects_bad_param(self, func, param, mock_func_factory):
func_mock = mock_func_factory(nominatim.tools.refresh, 'invalidate_osm_object')
self.call_nominatim('refresh', func, param) == 1
assert func_mock.called == 0
| 4,806 | 39.737288 | 104 | py |
Nominatim | Nominatim-master/test/python/tools/test_refresh_setup_website.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for setting up the website scripts.
"""
import subprocess
import pytest
from nominatim.tools import refresh
@pytest.fixture
def test_script(tmp_path):
(tmp_path / 'php').mkdir()
website_dir = (tmp_path / 'php' / 'website')
website_dir.mkdir()
def _create_file(code):
outfile = website_dir / 'reverse-only-search.php'
outfile.write_text('<?php\n{}\n'.format(code), 'utf-8')
return _create_file
@pytest.fixture
def run_website_script(tmp_path, project_env, temp_db_conn):
project_env.lib_dir.php = tmp_path / 'php'
def _runner():
refresh.setup_website(tmp_path, project_env, temp_db_conn)
proc = subprocess.run(['/usr/bin/env', 'php', '-Cq',
tmp_path / 'search.php'], check=False)
return proc.returncode
return _runner
def test_basedir_created(tmp_path, project_env, temp_db_conn):
webdir = tmp_path / 'website'
assert not webdir.exists()
refresh.setup_website(webdir, project_env, temp_db_conn)
assert webdir.exists()
@pytest.mark.parametrize("setting,retval", (('yes', 10), ('no', 20)))
def test_setup_website_check_bool(monkeypatch, test_script, run_website_script,
setting, retval):
monkeypatch.setenv('NOMINATIM_CORS_NOACCESSCONTROL', setting)
test_script('exit(CONST_NoAccessControl ? 10 : 20);')
assert run_website_script() == retval
@pytest.mark.parametrize("setting", (0, 10, 99067))
def test_setup_website_check_int(monkeypatch, test_script, run_website_script, setting):
monkeypatch.setenv('NOMINATIM_LOOKUP_MAX_COUNT', str(setting))
test_script('exit(CONST_Places_Max_ID_count == {} ? 10 : 20);'.format(setting))
assert run_website_script() == 10
def test_setup_website_check_empty_str(monkeypatch, test_script, run_website_script):
monkeypatch.setenv('NOMINATIM_DEFAULT_LANGUAGE', '')
test_script('exit(CONST_Default_Language === false ? 10 : 20);')
assert run_website_script() == 10
def test_setup_website_check_str(monkeypatch, test_script, run_website_script):
monkeypatch.setenv('NOMINATIM_DEFAULT_LANGUAGE', 'ffde 2')
test_script('exit(CONST_Default_Language === "ffde 2" ? 10 : 20);')
assert run_website_script() == 10
def test_relative_log_file(project_env, monkeypatch, test_script, run_website_script):
monkeypatch.setenv('NOMINATIM_LOG_FILE', 'access.log')
expected_file = str(project_env.project_dir / 'access.log')
test_script(f'exit(CONST_Log_File === "{expected_file}" ? 10 : 20);')
assert run_website_script() == 10
def test_variable_with_bracket(project_env, monkeypatch, test_script, run_website_script):
monkeypatch.setenv('NOMINATIM_DATABASE_DSN', 'pgsql:dbname=nominatim;user=foo;password=4{5')
test_script('exit(CONST_Database_DSN === "pgsql:dbname=nominatim;user=foo;password=4{5" ? 10 : 20);')
assert run_website_script() == 10
| 3,149 | 29 | 105 | py |
Nominatim | Nominatim-master/test/python/tools/test_exec_utils.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for tools.exec_utils module.
"""
from pathlib import Path
import subprocess
import pytest
from nominatim.config import Configuration
import nominatim.tools.exec_utils as exec_utils
import nominatim.paths
class TestRunLegacyScript:
@pytest.fixture(autouse=True)
def setup_nominatim_env(self, tmp_path, monkeypatch):
tmp_phplib_dir = tmp_path / 'phplib'
tmp_phplib_dir.mkdir()
(tmp_phplib_dir / 'admin').mkdir()
monkeypatch.setattr(nominatim.paths, 'PHPLIB_DIR', tmp_phplib_dir)
self.phplib_dir = tmp_phplib_dir
self.config = Configuration(tmp_path)
self.config.set_libdirs(module='.', osm2pgsql='default_osm2pgsql',
php=tmp_phplib_dir)
def mk_script(self, code):
codefile = self.phplib_dir / 'admin' / 't.php'
codefile.write_text('<?php\n' + code + '\n')
return 't.php'
@pytest.mark.parametrize("return_code", (0, 1, 15, 255))
def test_run_legacy_return_exit_code(self, return_code):
fname = self.mk_script('exit({});'.format(return_code))
assert return_code == \
exec_utils.run_legacy_script(fname, config=self.config)
def test_run_legacy_return_throw_on_fail(self):
fname = self.mk_script('exit(11);')
with pytest.raises(subprocess.CalledProcessError):
exec_utils.run_legacy_script(fname, config=self.config,
throw_on_fail=True)
def test_run_legacy_return_dont_throw_on_success(self):
fname = self.mk_script('exit(0);')
assert exec_utils.run_legacy_script(fname, config=self.config,
throw_on_fail=True) == 0
def test_run_legacy_use_given_module_path(self):
fname = self.mk_script("exit($_SERVER['NOMINATIM_DATABASE_MODULE_PATH'] == '' ? 0 : 23);")
assert exec_utils.run_legacy_script(fname, config=self.config) == 0
def test_run_legacy_do_not_overwrite_module_path(self, monkeypatch):
monkeypatch.setenv('NOMINATIM_DATABASE_MODULE_PATH', 'other')
fname = self.mk_script(
"exit($_SERVER['NOMINATIM_DATABASE_MODULE_PATH'] == 'other' ? 0 : 1);")
assert exec_utils.run_legacy_script(fname, config=self.config) == 0
def test_run_legacy_default_osm2pgsql_binary(self, monkeypatch):
fname = self.mk_script("exit($_SERVER['NOMINATIM_OSM2PGSQL_BINARY'] == 'default_osm2pgsql' ? 0 : 23);")
assert exec_utils.run_legacy_script(fname, config=self.config) == 0
def test_run_legacy_override_osm2pgsql_binary(self, monkeypatch):
monkeypatch.setenv('NOMINATIM_OSM2PGSQL_BINARY', 'somethingelse')
fname = self.mk_script("exit($_SERVER['NOMINATIM_OSM2PGSQL_BINARY'] == 'somethingelse' ? 0 : 23);")
assert exec_utils.run_legacy_script(fname, config=self.config) == 0
class TestRunApiScript:
@staticmethod
@pytest.fixture(autouse=True)
def setup_project_dir(tmp_path):
webdir = tmp_path / 'website'
webdir.mkdir()
(webdir / 'test.php').write_text("<?php\necho 'OK\n';")
@staticmethod
def test_run_api(tmp_path):
assert exec_utils.run_api_script('test', tmp_path) == 0
@staticmethod
def test_run_api_execution_error(tmp_path):
assert exec_utils.run_api_script('badname', tmp_path) != 0
@staticmethod
def test_run_api_with_extra_env(tmp_path):
extra_env = dict(SCRIPT_FILENAME=str(tmp_path / 'website' / 'test.php'))
assert exec_utils.run_api_script('badname', tmp_path, extra_env=extra_env) == 0
@staticmethod
def test_custom_phpcgi(tmp_path, capfd):
assert exec_utils.run_api_script('test', tmp_path, phpcgi_bin='env',
params={'q' : 'Berlin'}) == 0
captured = capfd.readouterr()
assert '?q=Berlin' in captured.out
@staticmethod
def test_fail_on_error_output(tmp_path):
# Starting PHP 8 the PHP CLI no longer has STDERR defined as constant
php = """
<?php
if(!defined('STDERR')) define('STDERR', fopen('php://stderr', 'wb'));
fwrite(STDERR, 'WARNING'.PHP_EOL);
"""
(tmp_path / 'website' / 'bad.php').write_text(php)
assert exec_utils.run_api_script('bad', tmp_path) == 1
### run_osm2pgsql
def test_run_osm2pgsql(osm2pgsql_options):
osm2pgsql_options['append'] = False
osm2pgsql_options['import_file'] = 'foo.bar'
osm2pgsql_options['tablespaces']['slim_data'] = 'extra'
exec_utils.run_osm2pgsql(osm2pgsql_options)
def test_run_osm2pgsql_disable_jit(osm2pgsql_options):
osm2pgsql_options['append'] = True
osm2pgsql_options['import_file'] = 'foo.bar'
osm2pgsql_options['disable_jit'] = True
exec_utils.run_osm2pgsql(osm2pgsql_options)
| 5,050 | 33.59589 | 111 | py |
Nominatim | Nominatim-master/test/python/tools/test_migration.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for migration functions
"""
import pytest
import psycopg2.extras
from nominatim.tools import migration
from nominatim.errors import UsageError
import nominatim.version
from mock_legacy_word_table import MockLegacyWordTable
class DummyTokenizer:
def update_sql_functions(self, config):
pass
@pytest.fixture
def postprocess_mock(monkeypatch):
monkeypatch.setattr(migration.refresh, 'create_functions', lambda *args: args)
monkeypatch.setattr(migration.tokenizer_factory, 'get_tokenizer_for_db',
lambda *args: DummyTokenizer())
@pytest.fixture
def legacy_word_table(temp_db_conn):
return MockLegacyWordTable(temp_db_conn)
def test_no_migration_old_versions(temp_db_with_extensions, table_factory, def_config):
table_factory('country_name', 'name HSTORE, country_code TEXT')
with pytest.raises(UsageError, match='Migration not possible'):
migration.migrate(def_config, {})
def test_set_up_migration_for_36(temp_db_with_extensions, temp_db_cursor,
table_factory, def_config, monkeypatch,
postprocess_mock):
psycopg2.extras.register_hstore(temp_db_cursor)
# don't actually run any migration, except the property table creation
monkeypatch.setattr(migration, '_MIGRATION_FUNCTIONS',
[((3, 5, 0, 99), migration.add_nominatim_property_table)])
# Use a r/o user name that always exists
monkeypatch.setenv('NOMINATIM_DATABASE_WEBUSER', 'postgres')
table_factory('country_name', 'name HSTORE, country_code TEXT',
(({str(x): 'a' for x in range(200)}, 'gb'),))
assert not temp_db_cursor.table_exists('nominatim_properties')
assert migration.migrate(def_config, {}) == 0
assert temp_db_cursor.table_exists('nominatim_properties')
assert 1 == temp_db_cursor.scalar(""" SELECT count(*) FROM nominatim_properties
WHERE property = 'database_version'""")
def test_already_at_version(def_config, property_table):
property_table.set('database_version',
'{0[0]}.{0[1]}.{0[2]}-{0[3]}'.format(nominatim.version.NOMINATIM_VERSION))
assert migration.migrate(def_config, {}) == 0
def test_run_single_migration(def_config, temp_db_cursor, property_table,
monkeypatch, postprocess_mock):
oldversion = [x for x in nominatim.version.NOMINATIM_VERSION]
oldversion[0] -= 1
property_table.set('database_version',
'{0[0]}.{0[1]}.{0[2]}-{0[3]}'.format(oldversion))
done = {'old': False, 'new': False}
def _migration(**_):
""" Dummy migration"""
done['new'] = True
def _old_migration(**_):
""" Dummy migration"""
done['old'] = True
oldversion[0] = 0
monkeypatch.setattr(migration, '_MIGRATION_FUNCTIONS',
[(tuple(oldversion), _old_migration),
(nominatim.version.NOMINATIM_VERSION, _migration)])
assert migration.migrate(def_config, {}) == 0
assert done['new']
assert not done['old']
assert property_table.get('database_version') == \
'{0[0]}.{0[1]}.{0[2]}-{0[3]}'.format(nominatim.version.NOMINATIM_VERSION)
###### Tests for specific migrations
#
# Each migration should come with two tests:
# 1. Test that migration from old to new state works as expected.
# 2. Test that the migration can be rerun on the new state without side effects.
@pytest.mark.parametrize('in_attr', ('', 'with time zone'))
def test_import_status_timestamp_change(temp_db_conn, temp_db_cursor,
table_factory, in_attr):
table_factory('import_status',
f"""lastimportdate timestamp {in_attr},
sequence_id integer,
indexed boolean""")
migration.import_status_timestamp_change(temp_db_conn)
temp_db_conn.commit()
assert temp_db_cursor.scalar("""SELECT data_type FROM information_schema.columns
WHERE table_name = 'import_status'
and column_name = 'lastimportdate'""")\
== 'timestamp with time zone'
def test_add_nominatim_property_table(temp_db_conn, temp_db_cursor,
def_config, monkeypatch):
# Use a r/o user name that always exists
monkeypatch.setenv('NOMINATIM_DATABASE_WEBUSER', 'postgres')
assert not temp_db_cursor.table_exists('nominatim_properties')
migration.add_nominatim_property_table(temp_db_conn, def_config)
temp_db_conn.commit()
assert temp_db_cursor.table_exists('nominatim_properties')
def test_add_nominatim_property_table_repeat(temp_db_conn, temp_db_cursor,
def_config, property_table):
assert temp_db_cursor.table_exists('nominatim_properties')
migration.add_nominatim_property_table(temp_db_conn, def_config)
temp_db_conn.commit()
assert temp_db_cursor.table_exists('nominatim_properties')
def test_change_housenumber_transliteration(temp_db_conn, temp_db_cursor,
legacy_word_table, placex_table):
placex_table.add(housenumber='3A')
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION make_standard_name(name TEXT)
RETURNS TEXT AS $$ SELECT lower(name) $$ LANGUAGE SQL """)
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION getorcreate_housenumber_id(lookup_word TEXT)
RETURNS INTEGER AS $$ SELECT 4325 $$ LANGUAGE SQL """)
migration.change_housenumber_transliteration(temp_db_conn)
temp_db_conn.commit()
assert temp_db_cursor.scalar('SELECT housenumber from placex') == '3a'
migration.change_housenumber_transliteration(temp_db_conn)
temp_db_conn.commit()
assert temp_db_cursor.scalar('SELECT housenumber from placex') == '3a'
def test_switch_placenode_geometry_index(temp_db_conn, temp_db_cursor, placex_table):
temp_db_cursor.execute("""CREATE INDEX idx_placex_adminname
ON placex (place_id)""")
migration.switch_placenode_geometry_index(temp_db_conn)
temp_db_conn.commit()
assert temp_db_cursor.index_exists('placex', 'idx_placex_geometry_placenode')
assert not temp_db_cursor.index_exists('placex', 'idx_placex_adminname')
def test_switch_placenode_geometry_index_repeat(temp_db_conn, temp_db_cursor, placex_table):
temp_db_cursor.execute("""CREATE INDEX idx_placex_geometry_placenode
ON placex (place_id)""")
migration.switch_placenode_geometry_index(temp_db_conn)
temp_db_conn.commit()
assert temp_db_cursor.index_exists('placex', 'idx_placex_geometry_placenode')
assert not temp_db_cursor.index_exists('placex', 'idx_placex_adminname')
assert temp_db_cursor.scalar("""SELECT indexdef from pg_indexes
WHERE tablename = 'placex'
and indexname = 'idx_placex_geometry_placenode'
""").endswith('(place_id)')
def test_install_legacy_tokenizer(temp_db_conn, temp_db_cursor, project_env,
property_table, table_factory, monkeypatch,
tmp_path):
table_factory('placex', 'place_id BIGINT')
table_factory('location_property_osmline', 'place_id BIGINT')
# Setting up the tokenizer is problematic
class MiniTokenizer:
def migrate_database(self, config):
pass
monkeypatch.setattr(migration.tokenizer_factory, 'create_tokenizer',
lambda cfg, **kwargs: MiniTokenizer())
migration.install_legacy_tokenizer(temp_db_conn, project_env)
temp_db_conn.commit()
def test_install_legacy_tokenizer_repeat(temp_db_conn, temp_db_cursor,
def_config, property_table):
property_table.set('tokenizer', 'dummy')
migration.install_legacy_tokenizer(temp_db_conn, def_config)
temp_db_conn.commit()
def test_create_tiger_housenumber_index(temp_db_conn, temp_db_cursor, table_factory):
table_factory('location_property_tiger',
'parent_place_id BIGINT, startnumber INT, endnumber INT')
migration.create_tiger_housenumber_index(temp_db_conn)
temp_db_conn.commit()
if temp_db_conn.server_version_tuple() >= (11, 0, 0):
assert temp_db_cursor.index_exists('location_property_tiger',
'idx_location_property_tiger_housenumber_migrated')
migration.create_tiger_housenumber_index(temp_db_conn)
temp_db_conn.commit()
| 8,965 | 36.991525 | 101 | py |
Nominatim | Nominatim-master/test/python/tools/test_sp_wiki_loader.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for methods of the SPWikiLoader class.
"""
import pytest
from nominatim.tools.special_phrases.sp_wiki_loader import SPWikiLoader
@pytest.fixture
def sp_wiki_loader(src_dir, monkeypatch, def_config):
"""
Return an instance of SPWikiLoader.
"""
monkeypatch.setenv('NOMINATIM_LANGUAGES', 'en')
loader = SPWikiLoader(def_config)
def _mock_wiki_content(lang):
xml_test_content = src_dir / 'test' / 'testdata' / 'special_phrases_test_content.txt'
return xml_test_content.read_text()
monkeypatch.setattr('nominatim.tools.special_phrases.sp_wiki_loader._get_wiki_content',
_mock_wiki_content)
return loader
def test_generate_phrases(sp_wiki_loader):
"""
Test objects returned from the next() method.
It should return all SpecialPhrases objects of
the 'en' special phrases.
"""
phrases = list(sp_wiki_loader.generate_phrases())
assert set((p.p_label, p.p_class, p.p_type, p.p_operator) for p in phrases) ==\
{('Zip Line', 'aerialway', 'zip_line', '-'),
('Zip Lines', 'aerialway', 'zip_line', '-'),
('Zip Line in', 'aerialway', 'zip_line', 'in'),
('Zip Lines in', 'aerialway', 'zip_line', 'in'),
('Zip Line near', 'aerialway', 'zip_line', 'near'),
('Animal shelter', 'amenity', 'animal_shelter', '-'),
('Animal shelters', 'amenity', 'animal_shelter', '-'),
('Animal shelter in', 'amenity', 'animal_shelter', 'in'),
('Animal shelters in', 'amenity', 'animal_shelter', 'in'),
('Animal shelter near', 'amenity', 'animal_shelter', 'near'),
('Animal shelters near', 'amenity', 'animal_shelter', 'near'),
('Drinking Water near', 'amenity', 'drinking_water', 'near'),
('Water', 'amenity', 'drinking_water', '-'),
('Water in', 'amenity', 'drinking_water', 'in'),
('Water near', 'amenity', 'drinking_water', 'near'),
('Embassy', 'amenity', 'embassy', '-'),
('Embassys', 'amenity', 'embassy', '-'),
('Embassies', 'amenity', 'embassy', '-')}
| 2,434 | 40.982759 | 93 | py |
Nominatim | Nominatim-master/test/python/tools/conftest.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
import pytest
@pytest.fixture
def osm2pgsql_options(temp_db):
""" A standard set of options for osm2pgsql.
"""
return dict(osm2pgsql='echo',
osm2pgsql_cache=10,
osm2pgsql_style='style.file',
threads=1,
dsn='dbname=' + temp_db,
flatnode_file='',
tablespaces=dict(slim_data='', slim_index='',
main_data='', main_index=''))
| 670 | 30.952381 | 62 | py |
Nominatim | Nominatim-master/test/python/tools/test_replication.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for replication functionality.
"""
import datetime as dt
import time
import pytest
from osmium.replication.server import OsmosisState
import nominatim.tools.replication
import nominatim.db.status as status
from nominatim.errors import UsageError
OSM_NODE_DATA = """\
<osm version="0.6" generator="OpenStreetMap server" copyright="OpenStreetMap and contributors" attribution="http://www.openstreetmap.org/copyright" license="http://opendatacommons.org/licenses/odbl/1-0/">
<node id="100" visible="true" version="1" changeset="2047" timestamp="2006-01-27T22:09:10Z" user="Foo" uid="111" lat="48.7586670" lon="8.1343060">
</node>
</osm>
"""
@pytest.fixture(autouse=True)
def setup_status_table(status_table):
pass
### init replication
def test_init_replication_bad_base_url(monkeypatch, place_row, temp_db_conn):
place_row(osm_type='N', osm_id=100)
monkeypatch.setattr(nominatim.db.status, "get_url", lambda u: OSM_NODE_DATA)
with pytest.raises(UsageError, match="Failed to reach replication service"):
nominatim.tools.replication.init_replication(temp_db_conn, 'https://test.io')
def test_init_replication_success(monkeypatch, place_row, temp_db_conn, temp_db_cursor):
place_row(osm_type='N', osm_id=100)
monkeypatch.setattr(nominatim.db.status, "get_url", lambda u: OSM_NODE_DATA)
monkeypatch.setattr(nominatim.tools.replication.ReplicationServer,
"timestamp_to_sequence",
lambda self, date: 234)
nominatim.tools.replication.init_replication(temp_db_conn, 'https://test.io')
expected_date = dt.datetime.strptime('2006-01-27T19:09:10', status.ISODATE_FORMAT)\
.replace(tzinfo=dt.timezone.utc)
assert temp_db_cursor.row_set("SELECT * FROM import_status") \
== {(expected_date, 234, True)}
### checking for updates
def test_check_for_updates_empty_status_table(temp_db_conn):
assert nominatim.tools.replication.check_for_updates(temp_db_conn, 'https://test.io') == 254
def test_check_for_updates_seq_not_set(temp_db_conn):
status.set_status(temp_db_conn, dt.datetime.now(dt.timezone.utc))
assert nominatim.tools.replication.check_for_updates(temp_db_conn, 'https://test.io') == 254
def test_check_for_updates_no_state(monkeypatch, temp_db_conn):
status.set_status(temp_db_conn, dt.datetime.now(dt.timezone.utc), seq=345)
monkeypatch.setattr(nominatim.tools.replication.ReplicationServer,
"get_state_info", lambda self: None)
assert nominatim.tools.replication.check_for_updates(temp_db_conn, 'https://test.io') == 253
@pytest.mark.parametrize("server_sequence,result", [(344, 2), (345, 2), (346, 0)])
def test_check_for_updates_no_new_data(monkeypatch, temp_db_conn,
server_sequence, result):
date = dt.datetime.now(dt.timezone.utc)
status.set_status(temp_db_conn, date, seq=345)
monkeypatch.setattr(nominatim.tools.replication.ReplicationServer,
"get_state_info",
lambda self: OsmosisState(server_sequence, date))
assert nominatim.tools.replication.check_for_updates(temp_db_conn, 'https://test.io') == result
### updating
@pytest.fixture
def update_options(tmpdir):
return dict(base_url='https://test.io',
indexed_only=False,
update_interval=3600,
import_file=tmpdir / 'foo.osm',
max_diff_size=1)
def test_update_empty_status_table(dsn):
with pytest.raises(UsageError):
nominatim.tools.replication.update(dsn, {})
def test_update_already_indexed(temp_db_conn, dsn):
status.set_status(temp_db_conn, dt.datetime.now(dt.timezone.utc), seq=34, indexed=False)
assert nominatim.tools.replication.update(dsn, dict(indexed_only=True)) \
== nominatim.tools.replication.UpdateState.MORE_PENDING
def test_update_no_data_no_sleep(monkeypatch, temp_db_conn, dsn, update_options):
date = dt.datetime.now(dt.timezone.utc) - dt.timedelta(days=1)
status.set_status(temp_db_conn, date, seq=34)
monkeypatch.setattr(nominatim.tools.replication.ReplicationServer,
"apply_diffs",
lambda *args, **kwargs: None)
sleeptime = []
monkeypatch.setattr(time, 'sleep', sleeptime.append)
assert nominatim.tools.replication.update(dsn, update_options) \
== nominatim.tools.replication.UpdateState.NO_CHANGES
assert not sleeptime
def test_update_no_data_sleep(monkeypatch, temp_db_conn, dsn, update_options):
date = dt.datetime.now(dt.timezone.utc) - dt.timedelta(minutes=30)
status.set_status(temp_db_conn, date, seq=34)
monkeypatch.setattr(nominatim.tools.replication.ReplicationServer,
"apply_diffs",
lambda *args, **kwargs: None)
sleeptime = []
monkeypatch.setattr(time, 'sleep', sleeptime.append)
assert nominatim.tools.replication.update(dsn, update_options) \
== nominatim.tools.replication.UpdateState.NO_CHANGES
assert len(sleeptime) == 1
assert sleeptime[0] < 3600
assert sleeptime[0] > 0
| 5,398 | 35.234899 | 204 | py |
Nominatim | Nominatim-master/test/python/tools/test_refresh_create_functions.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for creating PL/pgSQL functions for Nominatim.
"""
import pytest
from nominatim.tools.refresh import create_functions
class TestCreateFunctions:
@pytest.fixture(autouse=True)
def init_env(self, sql_preprocessor, temp_db_conn, def_config, tmp_path):
self.conn = temp_db_conn
self.config = def_config
def_config.lib_dir.sql = tmp_path
def write_functions(self, content):
sqlfile = self.config.lib_dir.sql / 'functions.sql'
sqlfile.write_text(content)
def test_create_functions(self, temp_db_cursor):
self.write_functions("""CREATE OR REPLACE FUNCTION test() RETURNS INTEGER
AS $$
BEGIN
RETURN 43;
END;
$$ LANGUAGE plpgsql IMMUTABLE;
""")
create_functions(self.conn, self.config)
assert temp_db_cursor.scalar('SELECT test()') == 43
@pytest.mark.parametrize("dbg,ret", ((True, 43), (False, 22)))
def test_create_functions_with_template(self, temp_db_cursor, dbg, ret):
self.write_functions("""CREATE OR REPLACE FUNCTION test() RETURNS INTEGER
AS $$
BEGIN
{% if debug %}
RETURN 43;
{% else %}
RETURN 22;
{% endif %}
END;
$$ LANGUAGE plpgsql IMMUTABLE;
""")
create_functions(self.conn, self.config, enable_debug=dbg)
assert temp_db_cursor.scalar('SELECT test()') == ret
| 1,991 | 33.344828 | 81 | py |
Nominatim | Nominatim-master/test/python/tools/test_refresh_address_levels.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for function for importing address ranks.
"""
import json
from pathlib import Path
import pytest
from nominatim.tools.refresh import load_address_levels, load_address_levels_from_config
def test_load_ranks_def_config(temp_db_conn, temp_db_cursor, def_config):
load_address_levels_from_config(temp_db_conn, def_config)
assert temp_db_cursor.table_rows('address_levels') > 0
def test_load_ranks_from_project_dir(project_env, temp_db_conn, temp_db_cursor):
test_file = project_env.project_dir / 'address-levels.json'
test_file.write_text('[{"tags":{"place":{"sea":2}}}]')
load_address_levels_from_config(temp_db_conn, project_env)
assert temp_db_cursor.table_rows('address_levels') == 1
def test_load_ranks_from_broken_file(project_env, temp_db_conn):
test_file = project_env.project_dir / 'address-levels.json'
test_file.write_text('[{"tags":"place":{"sea":2}}}]')
with pytest.raises(json.decoder.JSONDecodeError):
load_address_levels_from_config(temp_db_conn, project_env)
def test_load_ranks_country(temp_db_conn, temp_db_cursor):
load_address_levels(temp_db_conn, 'levels',
[{"tags": {"place": {"village": 14}}},
{"countries": ['de'],
"tags": {"place": {"village": 15}}},
{"countries": ['uk', 'us'],
"tags": {"place": {"village": 16}}}
])
assert temp_db_cursor.row_set('SELECT * FROM levels') == \
set([(None, 'place', 'village', 14, 14),
('de', 'place', 'village', 15, 15),
('uk', 'place', 'village', 16, 16),
('us', 'place', 'village', 16, 16),
])
def test_load_ranks_default_value(temp_db_conn, temp_db_cursor):
load_address_levels(temp_db_conn, 'levels',
[{"tags": {"boundary": {"": 28}}},
{"countries": ['hu'],
"tags": {"boundary": {"": 29}}}
])
assert temp_db_cursor.row_set('SELECT * FROM levels') == \
set([(None, 'boundary', None, 28, 28),
('hu', 'boundary', None, 29, 29),
])
def test_load_ranks_multiple_keys(temp_db_conn, temp_db_cursor):
load_address_levels(temp_db_conn, 'levels',
[{"tags": {"place": {"city": 14},
"boundary": {"administrative2" : 4}}
}])
assert temp_db_cursor.row_set('SELECT * FROM levels') == \
set([(None, 'place', 'city', 14, 14),
(None, 'boundary', 'administrative2', 4, 4),
])
def test_load_ranks_address(temp_db_conn, temp_db_cursor):
load_address_levels(temp_db_conn, 'levels',
[{"tags": {"place": {"city": 14,
"town" : [14, 13]}}
}])
assert temp_db_cursor.row_set('SELECT * FROM levels') == \
set([(None, 'place', 'city', 14, 14),
(None, 'place', 'town', 14, 13),
])
| 3,351 | 35.835165 | 88 | py |
Nominatim | Nominatim-master/test/python/tools/test_freeze.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for freeze functions (removing unused database parts).
"""
from nominatim.tools import freeze
NOMINATIM_RUNTIME_TABLES = [
'country_name', 'country_osm_grid',
'location_postcode', 'location_property_osmline', 'location_property_tiger',
'placex', 'place_adressline',
'search_name',
'word'
]
NOMINATIM_DROP_TABLES = [
'address_levels',
'location_area', 'location_area_country', 'location_area_large_100',
'location_road_1',
'place', 'planet_osm_nodes', 'planet_osm_rels', 'planet_osm_ways',
'search_name_111',
'wikipedia_article', 'wikipedia_redirect'
]
def test_drop_tables(temp_db_conn, temp_db_cursor, table_factory):
for table in NOMINATIM_RUNTIME_TABLES + NOMINATIM_DROP_TABLES:
table_factory(table)
assert not freeze.is_frozen(temp_db_conn)
freeze.drop_update_tables(temp_db_conn)
for table in NOMINATIM_RUNTIME_TABLES:
assert temp_db_cursor.table_exists(table)
for table in NOMINATIM_DROP_TABLES:
assert not temp_db_cursor.table_exists(table)
assert freeze.is_frozen(temp_db_conn)
def test_drop_flatnode_file_no_file():
freeze.drop_flatnode_file(None)
def test_drop_flatnode_file_file_already_gone(tmp_path):
freeze.drop_flatnode_file(tmp_path / 'something.store')
def test_drop_flatnode_file_delete(tmp_path):
flatfile = tmp_path / 'flatnode.store'
flatfile.write_text('Some content')
freeze.drop_flatnode_file(flatfile)
assert not flatfile.exists()
| 1,704 | 27.416667 | 80 | py |
Nominatim | Nominatim-master/test/python/tools/test_check_database.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for database integrity checks.
"""
import pytest
from nominatim.tools import check_database as chkdb
def test_check_database_unknown_db(def_config, monkeypatch):
monkeypatch.setenv('NOMINATIM_DATABASE_DSN', 'pgsql:dbname=fjgkhughwgh2423gsags')
assert chkdb.check_database(def_config) == 1
def test_check_database_fatal_test(def_config, temp_db):
assert chkdb.check_database(def_config) == 1
def test_check_conection_good(temp_db_conn, def_config):
assert chkdb.check_connection(temp_db_conn, def_config) == chkdb.CheckState.OK
def test_check_conection_bad(def_config):
badconn = chkdb._BadConnection('Error')
assert chkdb.check_connection(badconn, def_config) == chkdb.CheckState.FATAL
def test_check_placex_table_good(table_factory, temp_db_conn, def_config):
table_factory('placex')
assert chkdb.check_placex_table(temp_db_conn, def_config) == chkdb.CheckState.OK
def test_check_placex_table_bad(temp_db_conn, def_config):
assert chkdb.check_placex_table(temp_db_conn, def_config) == chkdb.CheckState.FATAL
def test_check_placex_table_size_good(table_factory, temp_db_conn, def_config):
table_factory('placex', content=((1, ), (2, )))
assert chkdb.check_placex_size(temp_db_conn, def_config) == chkdb.CheckState.OK
def test_check_placex_table_size_bad(table_factory, temp_db_conn, def_config):
table_factory('placex')
assert chkdb.check_placex_size(temp_db_conn, def_config) == chkdb.CheckState.FATAL
def test_check_tokenizer_missing(temp_db_conn, def_config, tmp_path):
def_config.project_dir = tmp_path
assert chkdb.check_tokenizer(temp_db_conn, def_config) == chkdb.CheckState.FAIL
@pytest.mark.parametrize("check_result,state", [(None, chkdb.CheckState.OK),
("Something wrong", chkdb.CheckState.FAIL)])
def test_check_tokenizer(temp_db_conn, def_config, monkeypatch,
check_result, state):
class _TestTokenizer:
@staticmethod
def check_database(_):
return check_result
monkeypatch.setattr(chkdb.tokenizer_factory, 'get_tokenizer_for_db',
lambda *a, **k: _TestTokenizer())
assert chkdb.check_tokenizer(temp_db_conn, def_config) == state
def test_check_indexing_good(table_factory, temp_db_conn, def_config):
table_factory('placex', 'place_id int, indexed_status smallint',
content=((1, 0), (2, 0)))
assert chkdb.check_indexing(temp_db_conn, def_config) == chkdb.CheckState.OK
def test_check_indexing_bad(table_factory, temp_db_conn, def_config):
table_factory('placex', 'place_id int, indexed_status smallint',
content=((1, 0), (2, 2)))
assert chkdb.check_indexing(temp_db_conn, def_config) == chkdb.CheckState.WARN
def test_check_database_indexes_bad(temp_db_conn, def_config):
assert chkdb.check_database_indexes(temp_db_conn, def_config) == chkdb.CheckState.FAIL
def test_check_database_indexes_valid(temp_db_conn, def_config):
assert chkdb.check_database_index_valid(temp_db_conn, def_config) == chkdb.CheckState.OK
def test_check_tiger_table_disabled(temp_db_conn, def_config, monkeypatch):
monkeypatch.setenv('NOMINATIM_USE_US_TIGER_DATA', 'no')
assert chkdb.check_tiger_table(temp_db_conn, def_config) == chkdb.CheckState.NOT_APPLICABLE
def test_check_tiger_table_enabled(temp_db_cursor, temp_db_conn, def_config, monkeypatch):
monkeypatch.setenv('NOMINATIM_USE_US_TIGER_DATA', 'yes')
assert chkdb.check_tiger_table(temp_db_conn, def_config) == chkdb.CheckState.FAIL
temp_db_cursor.execute('CREATE TABLE location_property_tiger (place_id int)')
assert chkdb.check_tiger_table(temp_db_conn, def_config) == chkdb.CheckState.FAIL
temp_db_cursor.execute('INSERT INTO location_property_tiger VALUES (1), (2)')
assert chkdb.check_tiger_table(temp_db_conn, def_config) == chkdb.CheckState.OK
| 4,137 | 38.788462 | 95 | py |
Nominatim | Nominatim-master/test/python/tools/test_postcodes.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for functions to maintain the artificial postcode table.
"""
import subprocess
import pytest
from nominatim.tools import postcodes
from nominatim.data import country_info
import dummy_tokenizer
class MockPostcodeTable:
""" A location_postcode table for testing.
"""
def __init__(self, conn):
self.conn = conn
with conn.cursor() as cur:
cur.execute("""CREATE TABLE location_postcode (
place_id BIGINT,
parent_place_id BIGINT,
rank_search SMALLINT,
rank_address SMALLINT,
indexed_status SMALLINT,
indexed_date TIMESTAMP,
country_code varchar(2),
postcode TEXT,
geometry GEOMETRY(Geometry, 4326))""")
cur.execute("""CREATE OR REPLACE FUNCTION token_normalized_postcode(postcode TEXT)
RETURNS TEXT AS $$ BEGIN RETURN postcode; END; $$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION get_country_code(place geometry)
RETURNS TEXT AS $$ BEGIN
RETURN null;
END; $$ LANGUAGE plpgsql;
""")
conn.commit()
def add(self, country, postcode, x, y):
with self.conn.cursor() as cur:
cur.execute("""INSERT INTO location_postcode (place_id, indexed_status,
country_code, postcode,
geometry)
VALUES (nextval('seq_place'), 1, %s, %s,
'SRID=4326;POINT(%s %s)')""",
(country, postcode, x, y))
self.conn.commit()
@property
def row_set(self):
with self.conn.cursor() as cur:
cur.execute("""SELECT country_code, postcode,
ST_X(geometry), ST_Y(geometry)
FROM location_postcode""")
return set((tuple(row) for row in cur))
@pytest.fixture
def tokenizer():
return dummy_tokenizer.DummyTokenizer(None, None)
@pytest.fixture
def postcode_table(def_config, temp_db_conn, placex_table):
country_info.setup_country_config(def_config)
return MockPostcodeTable(temp_db_conn)
@pytest.fixture
def insert_implicit_postcode(placex_table, place_row):
"""
Inserts data into the placex and place table
which can then be used to compute one postcode.
"""
def _insert_implicit_postcode(osm_id, country, geometry, address):
placex_table.add(osm_id=osm_id, country=country, geom=geometry)
place_row(osm_id=osm_id, geom='SRID=4326;'+geometry, address=address)
return _insert_implicit_postcode
def test_postcodes_empty(dsn, postcode_table, place_table,
tmp_path, tokenizer):
postcodes.update_postcodes(dsn, tmp_path, tokenizer)
assert not postcode_table.row_set
def test_postcodes_add_new(dsn, postcode_table, tmp_path,
insert_implicit_postcode, tokenizer):
insert_implicit_postcode(1, 'xx', 'POINT(10 12)', dict(postcode='9486'))
postcode_table.add('yy', '9486', 99, 34)
postcodes.update_postcodes(dsn, tmp_path, tokenizer)
assert postcode_table.row_set == {('xx', '9486', 10, 12), }
def test_postcodes_replace_coordinates(dsn, postcode_table, tmp_path,
insert_implicit_postcode, tokenizer):
insert_implicit_postcode(1, 'xx', 'POINT(10 12)', dict(postcode='AB 4511'))
postcode_table.add('xx', 'AB 4511', 99, 34)
postcodes.update_postcodes(dsn, tmp_path, tokenizer)
assert postcode_table.row_set == {('xx', 'AB 4511', 10, 12)}
def test_postcodes_replace_coordinates_close(dsn, postcode_table, tmp_path,
insert_implicit_postcode, tokenizer):
insert_implicit_postcode(1, 'xx', 'POINT(10 12)', dict(postcode='AB 4511'))
postcode_table.add('xx', 'AB 4511', 10, 11.99999)
postcodes.update_postcodes(dsn, tmp_path, tokenizer)
assert postcode_table.row_set == {('xx', 'AB 4511', 10, 11.99999)}
def test_postcodes_remove(dsn, postcode_table, tmp_path,
insert_implicit_postcode, tokenizer):
insert_implicit_postcode(1, 'xx', 'POINT(10 12)', dict(postcode='AB 4511'))
postcode_table.add('xx', 'badname', 10, 12)
postcodes.update_postcodes(dsn, tmp_path, tokenizer)
assert postcode_table.row_set == {('xx', 'AB 4511', 10, 12)}
def test_postcodes_ignore_empty_country(dsn, postcode_table, tmp_path,
insert_implicit_postcode, tokenizer):
insert_implicit_postcode(1, None, 'POINT(10 12)', dict(postcode='AB 4511'))
postcodes.update_postcodes(dsn, tmp_path, tokenizer)
assert not postcode_table.row_set
def test_postcodes_remove_all(dsn, postcode_table, place_table,
tmp_path, tokenizer):
postcode_table.add('ch', '5613', 10, 12)
postcodes.update_postcodes(dsn, tmp_path, tokenizer)
assert not postcode_table.row_set
def test_postcodes_multi_country(dsn, postcode_table, tmp_path,
insert_implicit_postcode, tokenizer):
insert_implicit_postcode(1, 'de', 'POINT(10 12)', dict(postcode='54451'))
insert_implicit_postcode(2, 'cc', 'POINT(100 56)', dict(postcode='DD23 T'))
insert_implicit_postcode(3, 'de', 'POINT(10.3 11.0)', dict(postcode='54452'))
insert_implicit_postcode(4, 'cc', 'POINT(10.3 11.0)', dict(postcode='54452'))
postcodes.update_postcodes(dsn, tmp_path, tokenizer)
assert postcode_table.row_set == {('de', '54451', 10, 12),
('de', '54452', 10.3, 11.0),
('cc', '54452', 10.3, 11.0),
('cc', 'DD23 T', 100, 56)}
@pytest.mark.parametrize("gzipped", [True, False])
def test_postcodes_extern(dsn, postcode_table, tmp_path,
insert_implicit_postcode, tokenizer, gzipped):
insert_implicit_postcode(1, 'xx', 'POINT(10 12)', dict(postcode='AB 4511'))
extfile = tmp_path / 'xx_postcodes.csv'
extfile.write_text("postcode,lat,lon\nAB 4511,-4,-1\nCD 4511,-5, -10")
if gzipped:
subprocess.run(['gzip', str(extfile)])
assert not extfile.is_file()
postcodes.update_postcodes(dsn, tmp_path, tokenizer)
assert postcode_table.row_set == {('xx', 'AB 4511', 10, 12),
('xx', 'CD 4511', -10, -5)}
def test_postcodes_extern_bad_column(dsn, postcode_table, tmp_path,
insert_implicit_postcode, tokenizer):
insert_implicit_postcode(1, 'xx', 'POINT(10 12)', dict(postcode='AB 4511'))
extfile = tmp_path / 'xx_postcodes.csv'
extfile.write_text("postode,lat,lon\nAB 4511,-4,-1\nCD 4511,-5, -10")
postcodes.update_postcodes(dsn, tmp_path, tokenizer)
assert postcode_table.row_set == {('xx', 'AB 4511', 10, 12)}
def test_postcodes_extern_bad_number(dsn, insert_implicit_postcode,
postcode_table, tmp_path, tokenizer):
insert_implicit_postcode(1, 'xx', 'POINT(10 12)', dict(postcode='AB 4511'))
extfile = tmp_path / 'xx_postcodes.csv'
extfile.write_text("postcode,lat,lon\nXX 4511,-4,NaN\nCD 4511,-5, -10\n34,200,0")
postcodes.update_postcodes(dsn, tmp_path, tokenizer)
assert postcode_table.row_set == {('xx', 'AB 4511', 10, 12),
('xx', 'CD 4511', -10, -5)}
def test_can_compute(dsn, table_factory):
assert not postcodes.can_compute(dsn)
table_factory('place')
assert postcodes.can_compute(dsn)
def test_no_placex_entry(dsn, tmp_path, temp_db_cursor, place_row, postcode_table, tokenizer):
#Rewrite the get_country_code function to verify its execution.
temp_db_cursor.execute("""
CREATE OR REPLACE FUNCTION get_country_code(place geometry)
RETURNS TEXT AS $$ BEGIN
RETURN 'yy';
END; $$ LANGUAGE plpgsql;
""")
place_row(geom='SRID=4326;POINT(10 12)', address=dict(postcode='AB 4511'))
postcodes.update_postcodes(dsn, tmp_path, tokenizer)
assert postcode_table.row_set == {('yy', 'AB 4511', 10, 12)}
def test_discard_badly_formatted_postcodes(dsn, tmp_path, temp_db_cursor, place_row, postcode_table, tokenizer):
#Rewrite the get_country_code function to verify its execution.
temp_db_cursor.execute("""
CREATE OR REPLACE FUNCTION get_country_code(place geometry)
RETURNS TEXT AS $$ BEGIN
RETURN 'fr';
END; $$ LANGUAGE plpgsql;
""")
place_row(geom='SRID=4326;POINT(10 12)', address=dict(postcode='AB 4511'))
postcodes.update_postcodes(dsn, tmp_path, tokenizer)
assert not postcode_table.row_set
| 9,265 | 37.769874 | 112 | py |
Nominatim | Nominatim-master/test/python/tools/test_refresh.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Test for various refresh functions.
"""
from pathlib import Path
import pytest
from nominatim.tools import refresh
def test_refresh_import_wikipedia_not_existing(dsn):
assert refresh.import_wikipedia_articles(dsn, Path('.')) == 1
def test_refresh_import_secondary_importance_non_existing(dsn):
assert refresh.import_secondary_importance(dsn, Path('.')) == 1
def test_refresh_import_secondary_importance_testdb(dsn, src_dir, temp_db_conn, temp_db_cursor):
temp_db_cursor.execute('CREATE EXTENSION postgis')
if temp_db_conn.postgis_version_tuple()[0] < 3:
assert refresh.import_secondary_importance(dsn, src_dir / 'test' / 'testdb') > 0
else:
temp_db_cursor.execute('CREATE EXTENSION postgis_raster')
assert refresh.import_secondary_importance(dsn, src_dir / 'test' / 'testdb') == 0
assert temp_db_conn.table_exists('secondary_importance')
@pytest.mark.parametrize("replace", (True, False))
def test_refresh_import_wikipedia(dsn, src_dir, table_factory, temp_db_cursor, replace):
if replace:
table_factory('wikipedia_article')
table_factory('wikipedia_redirect')
# use the small wikipedia file for the API testdb
assert refresh.import_wikipedia_articles(dsn, src_dir / 'test' / 'testdb') == 0
assert temp_db_cursor.table_rows('wikipedia_article') > 0
assert temp_db_cursor.table_rows('wikipedia_redirect') > 0
def test_recompute_importance(placex_table, table_factory, temp_db_conn, temp_db_cursor):
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION compute_importance(extratags HSTORE,
country_code varchar(2),
rank_search SMALLINT,
centroid GEOMETRY,
OUT importance FLOAT,
OUT wikipedia TEXT)
AS $$ SELECT 0.1::float, 'foo'::text $$ LANGUAGE SQL""")
refresh.recompute_importance(temp_db_conn)
@pytest.mark.parametrize('osm_type', ('N', 'W', 'R'))
def test_invalidate_osm_object_simple(placex_table, osm_type, temp_db_conn, temp_db_cursor):
placex_table.add(osm_type=osm_type, osm_id=57283)
refresh.invalidate_osm_object(osm_type, 57283, temp_db_conn, recursive=False)
temp_db_conn.commit()
assert 2 == temp_db_cursor.scalar("""SELECT indexed_status FROM placex
WHERE osm_type = %s and osm_id = %s""",
(osm_type, 57283))
def test_invalidate_osm_object_nonexisting_simple(placex_table, temp_db_conn, temp_db_cursor):
placex_table.add(osm_type='W', osm_id=57283)
refresh.invalidate_osm_object('N', 57283, temp_db_conn, recursive=False)
temp_db_conn.commit()
assert 0 == temp_db_cursor.scalar("""SELECT count(*) FROM placex
WHERE indexed_status > 0""")
@pytest.mark.parametrize('osm_type', ('N', 'W', 'R'))
def test_invalidate_osm_object_recursive(placex_table, osm_type, temp_db_conn, temp_db_cursor):
placex_table.add(osm_type=osm_type, osm_id=57283)
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION place_force_update(placeid BIGINT)
RETURNS BOOLEAN AS $$
BEGIN
UPDATE placex SET indexed_status = 522
WHERE place_id = placeid;
RETURN TRUE;
END;
$$
LANGUAGE plpgsql;""")
refresh.invalidate_osm_object(osm_type, 57283, temp_db_conn)
temp_db_conn.commit()
assert 522 == temp_db_cursor.scalar("""SELECT indexed_status FROM placex
WHERE osm_type = %s and osm_id = %s""",
(osm_type, 57283))
| 4,188 | 40.068627 | 96 | py |
Nominatim | Nominatim-master/test/python/tools/test_sp_csv_loader.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for methods of the SPCsvLoader class.
"""
import pytest
from nominatim.errors import UsageError
from nominatim.tools.special_phrases.sp_csv_loader import SPCsvLoader
from nominatim.tools.special_phrases.special_phrase import SpecialPhrase
@pytest.fixture
def sp_csv_loader(src_dir):
"""
Return an instance of SPCsvLoader.
"""
csv_path = (src_dir / 'test' / 'testdata' / 'sp_csv_test.csv').resolve()
loader = SPCsvLoader(csv_path)
return loader
def test_generate_phrases(sp_csv_loader):
"""
Test method parse_csv()
Should return the right SpecialPhrase objects.
"""
phrases = list(sp_csv_loader.generate_phrases())
assert len(phrases) == 42
assert len(set(phrases)) == 41
assert SpecialPhrase('Billboard', 'advertising', 'billboard', '-') in phrases
assert SpecialPhrase('Zip Lines', 'aerialway', 'zip_line', '-') in phrases
def test_invalid_cvs_file():
"""
Test method check_csv_validity()
It should raise an exception when file with a
different exception than .csv is given.
"""
loader = SPCsvLoader('test.wrong')
with pytest.raises(UsageError, match='not a csv file'):
next(loader.generate_phrases())
| 1,453 | 28.08 | 81 | py |
Nominatim | Nominatim-master/test/python/tools/test_admin.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for maintenance and analysis functions.
"""
import pytest
from nominatim.errors import UsageError
from nominatim.tools import admin
from nominatim.tokenizer import factory
@pytest.fixture(autouse=True)
def create_placex_table(project_env, tokenizer_mock, temp_db_cursor, placex_table):
""" All tests in this module require the placex table to be set up.
"""
temp_db_cursor.execute("DROP TYPE IF EXISTS prepare_update_info CASCADE")
temp_db_cursor.execute("""CREATE TYPE prepare_update_info AS (
name HSTORE,
address HSTORE,
rank_address SMALLINT,
country_code TEXT,
class TEXT,
type TEXT,
linked_place_id BIGINT
)""")
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION placex_indexing_prepare(p placex,
OUT result prepare_update_info)
AS $$
BEGIN
result.address := p.address;
result.name := p.name;
result.class := p.class;
result.type := p.type;
result.country_code := p.country_code;
result.rank_address := p.rank_address;
END;
$$ LANGUAGE plpgsql STABLE;
""")
factory.create_tokenizer(project_env)
def test_analyse_indexing_no_objects(project_env):
with pytest.raises(UsageError):
admin.analyse_indexing(project_env)
@pytest.mark.parametrize("oid", ['1234', 'N123a', 'X123'])
def test_analyse_indexing_bad_osmid(project_env, oid):
with pytest.raises(UsageError):
admin.analyse_indexing(project_env, osm_id=oid)
def test_analyse_indexing_unknown_osmid(project_env):
with pytest.raises(UsageError):
admin.analyse_indexing(project_env, osm_id='W12345674')
def test_analyse_indexing_with_place_id(project_env, temp_db_cursor):
temp_db_cursor.execute("INSERT INTO placex (place_id) VALUES(12345)")
admin.analyse_indexing(project_env, place_id=12345)
def test_analyse_indexing_with_osm_id(project_env, temp_db_cursor):
temp_db_cursor.execute("""INSERT INTO placex (place_id, osm_type, osm_id)
VALUES(9988, 'N', 10000)""")
admin.analyse_indexing(project_env, osm_id='N10000')
| 2,800 | 37.369863 | 90 | py |
Nominatim | Nominatim-master/test/python/tools/test_database_import.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for functions to import a new database.
"""
from pathlib import Path
from contextlib import closing
import pytest
import psycopg2
from nominatim.tools import database_import
from nominatim.errors import UsageError
class TestDatabaseSetup:
DBNAME = 'test_nominatim_python_unittest'
@pytest.fixture(autouse=True)
def setup_nonexistant_db(self):
conn = psycopg2.connect(database='postgres')
try:
conn.set_isolation_level(0)
with conn.cursor() as cur:
cur.execute(f'DROP DATABASE IF EXISTS {self.DBNAME}')
yield True
with conn.cursor() as cur:
cur.execute(f'DROP DATABASE IF EXISTS {self.DBNAME}')
finally:
conn.close()
@pytest.fixture
def cursor(self):
conn = psycopg2.connect(database=self.DBNAME)
try:
with conn.cursor() as cur:
yield cur
finally:
conn.close()
def conn(self):
return closing(psycopg2.connect(database=self.DBNAME))
def test_setup_skeleton(self):
database_import.setup_database_skeleton(f'dbname={self.DBNAME}')
# Check that all extensions are set up.
with self.conn() as conn:
with conn.cursor() as cur:
cur.execute('CREATE TABLE t (h HSTORE, geom GEOMETRY(Geometry, 4326))')
def test_unsupported_pg_version(self, monkeypatch):
monkeypatch.setattr(database_import, 'POSTGRESQL_REQUIRED_VERSION', (100, 4))
with pytest.raises(UsageError, match='PostgreSQL server is too old.'):
database_import.setup_database_skeleton(f'dbname={self.DBNAME}')
def test_create_db_explicit_ro_user(self):
database_import.setup_database_skeleton(f'dbname={self.DBNAME}',
rouser='postgres')
def test_create_db_missing_ro_user(self):
with pytest.raises(UsageError, match='Missing read-only user.'):
database_import.setup_database_skeleton(f'dbname={self.DBNAME}',
rouser='sdfwkjkjgdugu2;jgsafkljas;')
def test_setup_extensions_old_postgis(self, monkeypatch):
monkeypatch.setattr(database_import, 'POSTGIS_REQUIRED_VERSION', (50, 50))
with pytest.raises(UsageError, match='PostGIS is too old.'):
database_import.setup_database_skeleton(f'dbname={self.DBNAME}')
def test_setup_skeleton_already_exists(temp_db):
with pytest.raises(UsageError):
database_import.setup_database_skeleton(f'dbname={temp_db}')
def test_import_osm_data_simple(table_factory, osm2pgsql_options, capfd):
table_factory('place', content=((1, ), ))
database_import.import_osm_data(Path('file.pbf'), osm2pgsql_options)
captured = capfd.readouterr()
assert '--create' in captured.out
assert '--output gazetteer' in captured.out
assert f'--style {osm2pgsql_options["osm2pgsql_style"]}' in captured.out
assert f'--number-processes {osm2pgsql_options["threads"]}' in captured.out
assert f'--cache {osm2pgsql_options["osm2pgsql_cache"]}' in captured.out
assert 'file.pbf' in captured.out
def test_import_osm_data_multifile(table_factory, tmp_path, osm2pgsql_options, capfd):
table_factory('place', content=((1, ), ))
osm2pgsql_options['osm2pgsql_cache'] = 0
files = [tmp_path / 'file1.osm', tmp_path / 'file2.osm']
for f in files:
f.write_text('test')
database_import.import_osm_data(files, osm2pgsql_options)
captured = capfd.readouterr()
assert 'file1.osm' in captured.out
assert 'file2.osm' in captured.out
def test_import_osm_data_simple_no_data(table_factory, osm2pgsql_options):
table_factory('place')
with pytest.raises(UsageError, match='No data imported'):
database_import.import_osm_data(Path('file.pbf'), osm2pgsql_options)
def test_import_osm_data_simple_ignore_no_data(table_factory, osm2pgsql_options):
table_factory('place')
database_import.import_osm_data(Path('file.pbf'), osm2pgsql_options,
ignore_errors=True)
def test_import_osm_data_drop(table_factory, temp_db_conn, tmp_path, osm2pgsql_options):
table_factory('place', content=((1, ), ))
table_factory('planet_osm_nodes')
flatfile = tmp_path / 'flatfile'
flatfile.write_text('touch')
osm2pgsql_options['flatnode_file'] = str(flatfile.resolve())
database_import.import_osm_data(Path('file.pbf'), osm2pgsql_options, drop=True)
assert not flatfile.exists()
assert not temp_db_conn.table_exists('planet_osm_nodes')
def test_import_osm_data_default_cache(table_factory, osm2pgsql_options, capfd):
table_factory('place', content=((1, ), ))
osm2pgsql_options['osm2pgsql_cache'] = 0
database_import.import_osm_data(Path(__file__), osm2pgsql_options)
captured = capfd.readouterr()
assert f'--cache {osm2pgsql_options["osm2pgsql_cache"]}' in captured.out
@pytest.mark.parametrize("with_search", (True, False))
def test_truncate_database_tables(temp_db_conn, temp_db_cursor, table_factory, with_search):
tables = ['placex', 'place_addressline', 'location_area',
'location_area_country',
'location_property_tiger', 'location_property_osmline',
'location_postcode', 'location_road_23']
if with_search:
tables.append('search_name')
for table in tables:
table_factory(table, content=((1, ), (2, ), (3, )))
assert temp_db_cursor.table_rows(table) == 3
database_import.truncate_data_tables(temp_db_conn)
for table in tables:
assert temp_db_cursor.table_rows(table) == 0
@pytest.mark.parametrize("threads", (1, 5))
def test_load_data(dsn, place_row, placex_table, osmline_table,
temp_db_cursor, threads):
for func in ('precompute_words', 'getorcreate_housenumber_id', 'make_standard_name'):
temp_db_cursor.execute(f"""CREATE FUNCTION {func} (src TEXT)
RETURNS TEXT AS $$ SELECT 'a'::TEXT $$ LANGUAGE SQL
""")
for oid in range(100, 130):
place_row(osm_id=oid)
place_row(osm_type='W', osm_id=342, cls='place', typ='houses',
geom='SRID=4326;LINESTRING(0 0, 10 10)')
database_import.load_data(dsn, threads)
assert temp_db_cursor.table_rows('placex') == 30
assert temp_db_cursor.table_rows('location_property_osmline') == 1
class TestSetupSQL:
@pytest.fixture(autouse=True)
def init_env(self, temp_db, tmp_path, def_config, sql_preprocessor_cfg):
def_config.lib_dir.sql = tmp_path / 'sql'
def_config.lib_dir.sql.mkdir()
self.config = def_config
def write_sql(self, fname, content):
(self.config.lib_dir.sql / fname).write_text(content)
@pytest.mark.parametrize("reverse", [True, False])
def test_create_tables(self, temp_db_conn, temp_db_cursor, reverse):
self.write_sql('tables.sql',
"""CREATE FUNCTION test() RETURNS bool
AS $$ SELECT {{db.reverse_only}} $$ LANGUAGE SQL""")
database_import.create_tables(temp_db_conn, self.config, reverse)
temp_db_cursor.scalar('SELECT test()') == reverse
def test_create_table_triggers(self, temp_db_conn, temp_db_cursor):
self.write_sql('table-triggers.sql',
"""CREATE FUNCTION test() RETURNS TEXT
AS $$ SELECT 'a'::text $$ LANGUAGE SQL""")
database_import.create_table_triggers(temp_db_conn, self.config)
temp_db_cursor.scalar('SELECT test()') == 'a'
def test_create_partition_tables(self, temp_db_conn, temp_db_cursor):
self.write_sql('partition-tables.src.sql',
"""CREATE FUNCTION test() RETURNS TEXT
AS $$ SELECT 'b'::text $$ LANGUAGE SQL""")
database_import.create_partition_tables(temp_db_conn, self.config)
temp_db_cursor.scalar('SELECT test()') == 'b'
@pytest.mark.parametrize("drop", [True, False])
def test_create_search_indices(self, temp_db_conn, temp_db_cursor, drop):
self.write_sql('indices.sql',
"""CREATE FUNCTION test() RETURNS bool
AS $$ SELECT {{drop}} $$ LANGUAGE SQL""")
database_import.create_search_indices(temp_db_conn, self.config, drop)
temp_db_cursor.scalar('SELECT test()') == drop
| 8,715 | 33.587302 | 92 | py |
Nominatim | Nominatim-master/test/python/tools/test_import_special_phrases.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for import special phrases methods
of the class SPImporter.
"""
from shutil import copyfile
import pytest
from nominatim.tools.special_phrases.sp_importer import SPImporter
from nominatim.tools.special_phrases.sp_wiki_loader import SPWikiLoader
from nominatim.tools.special_phrases.special_phrase import SpecialPhrase
from nominatim.errors import UsageError
from cursor import CursorForTesting
@pytest.fixture
def sp_importer(temp_db_conn, def_config, monkeypatch):
"""
Return an instance of SPImporter.
"""
monkeypatch.setenv('NOMINATIM_LANGUAGES', 'en')
loader = SPWikiLoader(def_config)
return SPImporter(def_config, temp_db_conn, loader)
@pytest.fixture
def xml_wiki_content(src_dir):
"""
return the content of the static xml test file.
"""
xml_test_content = src_dir / 'test' / 'testdata' / 'special_phrases_test_content.txt'
return xml_test_content.read_text()
@pytest.fixture
def default_phrases(table_factory):
table_factory('place_classtype_testclasstypetable_to_delete')
table_factory('place_classtype_testclasstypetable_to_keep')
def test_fetch_existing_place_classtype_tables(sp_importer, table_factory):
"""
Check for the fetch_existing_place_classtype_tables() method.
It should return the table just created.
"""
table_factory('place_classtype_testclasstypetable')
sp_importer._fetch_existing_place_classtype_tables()
contained_table = sp_importer.table_phrases_to_delete.pop()
assert contained_table == 'place_classtype_testclasstypetable'
def test_check_sanity_class(sp_importer):
"""
Check for _check_sanity() method.
If a wrong class or type is given, an UsageError should raise.
If a good class and type are given, nothing special happens.
"""
assert not sp_importer._check_sanity(SpecialPhrase('en', '', 'type', ''))
assert not sp_importer._check_sanity(SpecialPhrase('en', 'class', '', ''))
assert sp_importer._check_sanity(SpecialPhrase('en', 'class', 'type', ''))
def test_load_white_and_black_lists(sp_importer):
"""
Test that _load_white_and_black_lists() well return
black list and white list and that they are of dict type.
"""
black_list, white_list = sp_importer._load_white_and_black_lists()
assert isinstance(black_list, dict) and isinstance(white_list, dict)
def test_create_place_classtype_indexes(temp_db_with_extensions, temp_db_conn,
table_factory, sp_importer):
"""
Test that _create_place_classtype_indexes() create the
place_id index and centroid index on the right place_class_type table.
"""
phrase_class = 'class'
phrase_type = 'type'
table_name = 'place_classtype_{}_{}'.format(phrase_class, phrase_type)
table_factory(table_name, 'place_id BIGINT, centroid GEOMETRY')
sp_importer._create_place_classtype_indexes('', phrase_class, phrase_type)
assert check_placeid_and_centroid_indexes(temp_db_conn, phrase_class, phrase_type)
def test_create_place_classtype_table(temp_db_conn, placex_table, sp_importer):
"""
Test that _create_place_classtype_table() create
the right place_classtype table.
"""
phrase_class = 'class'
phrase_type = 'type'
sp_importer._create_place_classtype_table('', phrase_class, phrase_type)
assert check_table_exist(temp_db_conn, phrase_class, phrase_type)
def test_grant_access_to_web_user(temp_db_conn, table_factory, def_config, sp_importer):
"""
Test that _grant_access_to_webuser() give
right access to the web user.
"""
phrase_class = 'class'
phrase_type = 'type'
table_name = 'place_classtype_{}_{}'.format(phrase_class, phrase_type)
table_factory(table_name)
sp_importer._grant_access_to_webuser(phrase_class, phrase_type)
assert check_grant_access(temp_db_conn, def_config.DATABASE_WEBUSER, phrase_class, phrase_type)
def test_create_place_classtype_table_and_indexes(
temp_db_conn, def_config, placex_table,
sp_importer):
"""
Test that _create_place_classtype_table_and_indexes()
create the right place_classtype tables and place_id indexes
and centroid indexes and grant access to the web user
for the given set of pairs.
"""
pairs = set([('class1', 'type1'), ('class2', 'type2')])
sp_importer._create_classtype_table_and_indexes(pairs)
for pair in pairs:
assert check_table_exist(temp_db_conn, pair[0], pair[1])
assert check_placeid_and_centroid_indexes(temp_db_conn, pair[0], pair[1])
assert check_grant_access(temp_db_conn, def_config.DATABASE_WEBUSER, pair[0], pair[1])
def test_remove_non_existent_tables_from_db(sp_importer, default_phrases,
temp_db_conn):
"""
Check for the remove_non_existent_phrases_from_db() method.
It should removed entries from the word table which are contained
in the words_phrases_to_delete set and not those also contained
in the words_phrases_still_exist set.
place_classtype tables contained in table_phrases_to_delete should
be deleted.
"""
sp_importer.table_phrases_to_delete = {
'place_classtype_testclasstypetable_to_delete'
}
query_tables = """
SELECT table_name
FROM information_schema.tables
WHERE table_schema='public'
AND table_name like 'place_classtype_%';
"""
sp_importer._remove_non_existent_tables_from_db()
# Changes are not committed yet. Use temp_db_conn for checking results.
with temp_db_conn.cursor(cursor_factory=CursorForTesting) as cur:
assert cur.row_set(query_tables) \
== {('place_classtype_testclasstypetable_to_keep', )}
@pytest.mark.parametrize("should_replace", [(True), (False)])
def test_import_phrases(monkeypatch, temp_db_conn, def_config, sp_importer,
placex_table, table_factory, tokenizer_mock,
xml_wiki_content, should_replace):
"""
Check that the main import_phrases() method is well executed.
It should create the place_classtype table, the place_id and centroid indexes,
grand access to the web user and executing the SQL functions for amenities.
It should also update the database well by deleting or preserving existing entries
of the database.
"""
#Add some data to the database before execution in order to test
#what is deleted and what is preserved.
table_factory('place_classtype_amenity_animal_shelter')
table_factory('place_classtype_wrongclass_wrongtype')
monkeypatch.setattr('nominatim.tools.special_phrases.sp_wiki_loader._get_wiki_content',
lambda lang: xml_wiki_content)
tokenizer = tokenizer_mock()
sp_importer.import_phrases(tokenizer, should_replace)
assert len(tokenizer.analyser_cache['special_phrases']) == 18
class_test = 'aerialway'
type_test = 'zip_line'
assert check_table_exist(temp_db_conn, class_test, type_test)
assert check_placeid_and_centroid_indexes(temp_db_conn, class_test, type_test)
assert check_grant_access(temp_db_conn, def_config.DATABASE_WEBUSER, class_test, type_test)
assert check_table_exist(temp_db_conn, 'amenity', 'animal_shelter')
if should_replace:
assert not check_table_exist(temp_db_conn, 'wrong_class', 'wrong_type')
assert temp_db_conn.table_exists('place_classtype_amenity_animal_shelter')
if should_replace:
assert not temp_db_conn.table_exists('place_classtype_wrongclass_wrongtype')
def check_table_exist(temp_db_conn, phrase_class, phrase_type):
"""
Verify that the place_classtype table exists for the given
phrase_class and phrase_type.
"""
return temp_db_conn.table_exists('place_classtype_{}_{}'.format(phrase_class, phrase_type))
def check_grant_access(temp_db_conn, user, phrase_class, phrase_type):
"""
Check that the web user has been granted right access to the
place_classtype table of the given phrase_class and phrase_type.
"""
table_name = 'place_classtype_{}_{}'.format(phrase_class, phrase_type)
with temp_db_conn.cursor() as temp_db_cursor:
temp_db_cursor.execute("""
SELECT * FROM information_schema.role_table_grants
WHERE table_name='{}'
AND grantee='{}'
AND privilege_type='SELECT'""".format(table_name, user))
return temp_db_cursor.fetchone()
def check_placeid_and_centroid_indexes(temp_db_conn, phrase_class, phrase_type):
"""
Check that the place_id index and centroid index exist for the
place_classtype table of the given phrase_class and phrase_type.
"""
index_prefix = 'idx_place_classtype_{}_{}_'.format(phrase_class, phrase_type)
return (
temp_db_conn.index_exists(index_prefix + 'centroid')
and
temp_db_conn.index_exists(index_prefix + 'place_id')
)
| 9,308 | 37.466942 | 99 | py |
Nominatim | Nominatim-master/test/python/tools/test_tiger_data.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Test for tiger data function
"""
import tarfile
from textwrap import dedent
import pytest
from nominatim.tools import tiger_data, freeze
from nominatim.errors import UsageError
class MockTigerTable:
def __init__(self, conn):
self.conn = conn
with conn.cursor() as cur:
cur.execute("""CREATE TABLE tiger (linegeo GEOMETRY,
start INTEGER,
stop INTEGER,
interpol TEXT,
token_info JSONB,
postcode TEXT)""")
# We need this table to determine if the database is frozen or not
cur.execute("CREATE TABLE place (number INTEGER)")
def count(self):
with self.conn.cursor() as cur:
return cur.scalar("SELECT count(*) FROM tiger")
def row(self):
with self.conn.cursor() as cur:
cur.execute("SELECT * FROM tiger LIMIT 1")
return cur.fetchone()
@pytest.fixture
def tiger_table(def_config, temp_db_conn, sql_preprocessor,
temp_db_with_extensions, tmp_path):
def_config.lib_dir.sql = tmp_path / 'sql'
def_config.lib_dir.sql.mkdir()
(def_config.lib_dir.sql / 'tiger_import_start.sql').write_text(
"""CREATE OR REPLACE FUNCTION tiger_line_import(linegeo GEOMETRY, start INTEGER,
stop INTEGER, interpol TEXT,
token_info JSONB, postcode TEXT)
RETURNS INTEGER AS $$
INSERT INTO tiger VALUES(linegeo, start, stop, interpol, token_info, postcode)
RETURNING 1
$$ LANGUAGE SQL;""")
(def_config.lib_dir.sql / 'tiger_import_finish.sql').write_text(
"""DROP FUNCTION tiger_line_import (linegeo GEOMETRY, in_startnumber INTEGER,
in_endnumber INTEGER, interpolationtype TEXT,
token_info JSONB, in_postcode TEXT);""")
return MockTigerTable(temp_db_conn)
@pytest.fixture
def csv_factory(tmp_path):
def _mk_file(fname, hnr_from=1, hnr_to=9, interpol='odd', street='Main St',
city='Newtown', state='AL', postcode='12345',
geometry='LINESTRING(-86.466995 32.428956,-86.466923 32.428933)'):
(tmp_path / (fname + '.csv')).write_text(dedent("""\
from;to;interpolation;street;city;state;postcode;geometry
{};{};{};{};{};{};{};{}
""".format(hnr_from, hnr_to, interpol, street, city, state,
postcode, geometry)))
return _mk_file
@pytest.mark.parametrize("threads", (1, 5))
def test_add_tiger_data(def_config, src_dir, tiger_table, tokenizer_mock, threads):
tiger_data.add_tiger_data(str(src_dir / 'test' / 'testdb' / 'tiger'),
def_config, threads, tokenizer_mock())
assert tiger_table.count() == 6213
def test_add_tiger_data_database_frozen(def_config, temp_db_conn, tiger_table, tokenizer_mock,
tmp_path):
freeze.drop_update_tables(temp_db_conn)
with pytest.raises(UsageError) as excinfo:
tiger_data.add_tiger_data(str(tmp_path), def_config, 1, tokenizer_mock())
assert "database frozen" in str(excinfo.value)
assert tiger_table.count() == 0
def test_add_tiger_data_no_files(def_config, tiger_table, tokenizer_mock,
tmp_path):
tiger_data.add_tiger_data(str(tmp_path), def_config, 1, tokenizer_mock())
assert tiger_table.count() == 0
def test_add_tiger_data_bad_file(def_config, tiger_table, tokenizer_mock,
tmp_path):
sqlfile = tmp_path / '1010.csv'
sqlfile.write_text("""Random text""")
tiger_data.add_tiger_data(str(tmp_path), def_config, 1, tokenizer_mock())
assert tiger_table.count() == 0
def test_add_tiger_data_hnr_nan(def_config, tiger_table, tokenizer_mock,
csv_factory, tmp_path):
csv_factory('file1', hnr_from=99)
csv_factory('file2', hnr_from='L12')
csv_factory('file3', hnr_to='12.4')
tiger_data.add_tiger_data(str(tmp_path), def_config, 1, tokenizer_mock())
assert tiger_table.count() == 1
assert tiger_table.row()['start'] == 99
@pytest.mark.parametrize("threads", (1, 5))
def test_add_tiger_data_tarfile(def_config, tiger_table, tokenizer_mock,
tmp_path, src_dir, threads):
tar = tarfile.open(str(tmp_path / 'sample.tar.gz'), "w:gz")
tar.add(str(src_dir / 'test' / 'testdb' / 'tiger' / '01001.csv'))
tar.close()
tiger_data.add_tiger_data(str(tmp_path / 'sample.tar.gz'), def_config, threads,
tokenizer_mock())
assert tiger_table.count() == 6213
def test_add_tiger_data_bad_tarfile(def_config, tiger_table, tokenizer_mock,
tmp_path):
tarfile = tmp_path / 'sample.tar.gz'
tarfile.write_text("""Random text""")
with pytest.raises(UsageError):
tiger_data.add_tiger_data(str(tarfile), def_config, 1, tokenizer_mock())
def test_add_tiger_data_empty_tarfile(def_config, tiger_table, tokenizer_mock,
tmp_path):
tar = tarfile.open(str(tmp_path / 'sample.tar.gz'), "w:gz")
tar.add(__file__)
tar.close()
tiger_data.add_tiger_data(str(tmp_path / 'sample.tar.gz'), def_config, 1,
tokenizer_mock())
assert tiger_table.count() == 0
| 5,836 | 35.943038 | 94 | py |
Nominatim | Nominatim-master/test/python/tools/test_add_osm_data.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for functions to add additional data to the database.
"""
from pathlib import Path
import pytest
from nominatim.tools import add_osm_data
class CaptureGetUrl:
def __init__(self, monkeypatch):
self.url = None
monkeypatch.setattr(add_osm_data, 'get_url', self)
def __call__(self, url):
self.url = url
return '<xml></xml>'
@pytest.fixture(autouse=True)
def setup_delete_postprocessing(temp_db_cursor):
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION flush_deleted_places()
RETURNS INTEGER AS $$ SELECT 1 $$ LANGUAGE SQL""")
def test_import_osm_file_simple(dsn, table_factory, osm2pgsql_options, capfd):
assert add_osm_data.add_data_from_file(dsn, Path('change.osm'), osm2pgsql_options) == 0
captured = capfd.readouterr()
assert '--append' in captured.out
assert '--output gazetteer' in captured.out
assert f'--style {osm2pgsql_options["osm2pgsql_style"]}' in captured.out
assert f'--number-processes {osm2pgsql_options["threads"]}' in captured.out
assert f'--cache {osm2pgsql_options["osm2pgsql_cache"]}' in captured.out
assert 'change.osm' in captured.out
@pytest.mark.parametrize("osm_type", ['node', 'way', 'relation'])
@pytest.mark.parametrize("main_api,url", [(True, 'https://www.openstreetmap.org/api'),
(False, 'https://overpass-api.de/api/interpreter?')])
def test_import_osm_object_main_api(dsn, osm2pgsql_options, monkeypatch,
capfd, osm_type, main_api, url):
get_url_mock = CaptureGetUrl(monkeypatch)
add_osm_data.add_osm_object(dsn, osm_type, 4536, main_api, osm2pgsql_options)
captured = capfd.readouterr()
assert get_url_mock.url.startswith(url)
assert '--append' in captured.out
assert '--output gazetteer' in captured.out
assert f'--style {osm2pgsql_options["osm2pgsql_style"]}' in captured.out
assert f'--number-processes {osm2pgsql_options["threads"]}' in captured.out
assert f'--cache {osm2pgsql_options["osm2pgsql_cache"]}' in captured.out
assert captured.out.endswith(' -\n')
| 2,355 | 36.396825 | 95 | py |
Nominatim | Nominatim-master/test/python/config/test_config_load_module.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Test for loading extra Python modules.
"""
from pathlib import Path
import sys
import pytest
from nominatim.config import Configuration
@pytest.fixture
def test_config(src_dir, tmp_path):
""" Create a configuration object with project and config directories
in a temporary directory.
"""
(tmp_path / 'project').mkdir()
(tmp_path / 'config').mkdir()
conf = Configuration(tmp_path / 'project')
conf.config_dir = tmp_path / 'config'
return conf
def test_load_default_module(test_config):
module = test_config.load_plugin_module('version', 'nominatim')
assert isinstance(module.NOMINATIM_VERSION, tuple)
def test_load_default_module_with_hyphen(test_config):
module = test_config.load_plugin_module('place-info', 'nominatim.data')
assert isinstance(module.PlaceInfo, object)
def test_load_plugin_module(test_config, tmp_path):
(tmp_path / 'project' / 'testpath').mkdir()
(tmp_path / 'project' / 'testpath' / 'mymod.py')\
.write_text("def my_test_function():\n return 'gjwitlsSG42TG%'")
module = test_config.load_plugin_module('testpath/mymod.py', 'private.something')
assert module.my_test_function() == 'gjwitlsSG42TG%'
# also test reloading module
(tmp_path / 'project' / 'testpath' / 'mymod.py')\
.write_text("def my_test_function():\n return 'hjothjorhj'")
module = test_config.load_plugin_module('testpath/mymod.py', 'private.something')
assert module.my_test_function() == 'gjwitlsSG42TG%'
def test_load_external_library_module(test_config, tmp_path, monkeypatch):
MODULE_NAME = 'foogurenqodr4'
pythonpath = tmp_path / 'priv-python'
pythonpath.mkdir()
(pythonpath / MODULE_NAME).mkdir()
(pythonpath / MODULE_NAME / '__init__.py').write_text('')
(pythonpath / MODULE_NAME / 'tester.py')\
.write_text("def my_test_function():\n return 'gjwitlsSG42TG%'")
monkeypatch.syspath_prepend(pythonpath)
module = test_config.load_plugin_module(f'{MODULE_NAME}.tester', 'private.something')
assert module.my_test_function() == 'gjwitlsSG42TG%'
# also test reloading module
(pythonpath / MODULE_NAME / 'tester.py')\
.write_text("def my_test_function():\n return 'dfigjreigj'")
module = test_config.load_plugin_module(f'{MODULE_NAME}.tester', 'private.something')
assert module.my_test_function() == 'gjwitlsSG42TG%'
del sys.modules[f'{MODULE_NAME}.tester']
| 2,656 | 31.402439 | 89 | py |
Nominatim | Nominatim-master/test/python/config/test_config.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Test for loading dotenv configuration.
"""
from pathlib import Path
import pytest
from nominatim.config import Configuration, flatten_config_list
from nominatim.errors import UsageError
@pytest.fixture
def make_config():
""" Create a configuration object from the given project directory.
"""
def _mk_config(project_dir=None):
return Configuration(project_dir)
return _mk_config
@pytest.fixture
def make_config_path(tmp_path):
""" Create a configuration object with project and config directories
in a temporary directory.
"""
def _mk_config():
(tmp_path / 'project').mkdir()
(tmp_path / 'config').mkdir()
conf = Configuration(tmp_path / 'project')
conf.config_dir = tmp_path / 'config'
return conf
return _mk_config
def test_no_project_dir(make_config):
config = make_config()
assert config.DATABASE_WEBUSER == 'www-data'
@pytest.mark.parametrize("val", ('apache', '"apache"'))
def test_prefer_project_setting_over_default(make_config, val, tmp_path):
envfile = tmp_path / '.env'
envfile.write_text('NOMINATIM_DATABASE_WEBUSER={}\n'.format(val))
config = make_config(tmp_path)
assert config.DATABASE_WEBUSER == 'apache'
def test_prefer_os_environ_over_project_setting(make_config, monkeypatch, tmp_path):
envfile = tmp_path / '.env'
envfile.write_text('NOMINATIM_DATABASE_WEBUSER=apache\n')
monkeypatch.setenv('NOMINATIM_DATABASE_WEBUSER', 'nobody')
config = make_config(tmp_path)
assert config.DATABASE_WEBUSER == 'nobody'
def test_prefer_os_environ_can_unset_project_setting(make_config, monkeypatch, tmp_path):
envfile = tmp_path / '.env'
envfile.write_text('NOMINATIM_DATABASE_WEBUSER=apache\n')
monkeypatch.setenv('NOMINATIM_DATABASE_WEBUSER', '')
config = make_config(tmp_path)
assert config.DATABASE_WEBUSER == ''
def test_get_os_env_add_defaults(make_config, monkeypatch):
config = make_config()
monkeypatch.delenv('NOMINATIM_DATABASE_WEBUSER', raising=False)
assert config.get_os_env()['NOMINATIM_DATABASE_WEBUSER'] == 'www-data'
def test_get_os_env_prefer_os_environ(make_config, monkeypatch):
config = make_config()
monkeypatch.setenv('NOMINATIM_DATABASE_WEBUSER', 'nobody')
assert config.get_os_env()['NOMINATIM_DATABASE_WEBUSER'] == 'nobody'
def test_get_libpq_dsn_convert_default(make_config):
config = make_config()
assert config.get_libpq_dsn() == 'dbname=nominatim'
def test_get_libpq_dsn_convert_php(make_config, monkeypatch):
config = make_config()
monkeypatch.setenv('NOMINATIM_DATABASE_DSN',
'pgsql:dbname=gis;password=foo;host=localhost')
assert config.get_libpq_dsn() == 'dbname=gis password=foo host=localhost'
@pytest.mark.parametrize("val,expect", [('foo bar', "'foo bar'"),
("xy'z", "xy\\'z"),
])
def test_get_libpq_dsn_convert_php_special_chars(make_config, monkeypatch, val, expect):
config = make_config()
monkeypatch.setenv('NOMINATIM_DATABASE_DSN',
'pgsql:dbname=gis;password={}'.format(val))
assert config.get_libpq_dsn() == "dbname=gis password={}".format(expect)
def test_get_libpq_dsn_convert_libpq(make_config, monkeypatch):
config = make_config()
monkeypatch.setenv('NOMINATIM_DATABASE_DSN',
'host=localhost dbname=gis password=foo')
assert config.get_libpq_dsn() == 'host=localhost dbname=gis password=foo'
@pytest.mark.parametrize("value,result",
[(x, True) for x in ('1', 'true', 'True', 'yes', 'YES')] +
[(x, False) for x in ('0', 'false', 'no', 'NO', 'x')])
def test_get_bool(make_config, monkeypatch, value, result):
config = make_config()
monkeypatch.setenv('NOMINATIM_FOOBAR', value)
assert config.get_bool('FOOBAR') == result
def test_get_bool_empty(make_config):
config = make_config()
assert config.DATABASE_MODULE_PATH == ''
assert not config.get_bool('DATABASE_MODULE_PATH')
@pytest.mark.parametrize("value,result", [('0', 0), ('1', 1),
('85762513444', 85762513444)])
def test_get_int_success(make_config, monkeypatch, value, result):
config = make_config()
monkeypatch.setenv('NOMINATIM_FOOBAR', value)
assert config.get_int('FOOBAR') == result
@pytest.mark.parametrize("value", ['1b', 'fg', '0x23'])
def test_get_int_bad_values(make_config, monkeypatch, value):
config = make_config()
monkeypatch.setenv('NOMINATIM_FOOBAR', value)
with pytest.raises(UsageError):
config.get_int('FOOBAR')
def test_get_int_empty(make_config):
config = make_config()
assert config.DATABASE_MODULE_PATH == ''
with pytest.raises(UsageError):
config.get_int('DATABASE_MODULE_PATH')
@pytest.mark.parametrize("value,outlist", [('sd', ['sd']),
('dd,rr', ['dd', 'rr']),
(' a , b ', ['a', 'b'])])
def test_get_str_list_success(make_config, monkeypatch, value, outlist):
config = make_config()
monkeypatch.setenv('NOMINATIM_MYLIST', value)
assert config.get_str_list('MYLIST') == outlist
def test_get_str_list_empty(make_config):
config = make_config()
assert config.get_str_list('LANGUAGES') is None
def test_get_path_empty(make_config):
config = make_config()
assert config.DATABASE_MODULE_PATH == ''
assert not config.get_path('DATABASE_MODULE_PATH')
def test_get_path_absolute(make_config, monkeypatch):
config = make_config()
monkeypatch.setenv('NOMINATIM_FOOBAR', '/dont/care')
result = config.get_path('FOOBAR')
assert isinstance(result, Path)
assert str(result) == '/dont/care'
def test_get_path_relative(make_config, monkeypatch, tmp_path):
config = make_config(tmp_path)
monkeypatch.setenv('NOMINATIM_FOOBAR', 'an/oyster')
result = config.get_path('FOOBAR')
assert isinstance(result, Path)
assert str(result) == str(tmp_path / 'an/oyster')
def test_get_import_style_intern(make_config, src_dir, monkeypatch):
config = make_config()
monkeypatch.setenv('NOMINATIM_IMPORT_STYLE', 'street')
expected = src_dir / 'settings' / 'import-street.lua'
assert config.get_import_style_file() == expected
def test_get_import_style_extern_relative(make_config_path, monkeypatch):
config = make_config_path()
(config.project_dir / 'custom.style').write_text('x')
monkeypatch.setenv('NOMINATIM_IMPORT_STYLE', 'custom.style')
assert str(config.get_import_style_file()) == str(config.project_dir / 'custom.style')
def test_get_import_style_extern_absolute(make_config, tmp_path, monkeypatch):
config = make_config()
cfgfile = tmp_path / 'test.style'
cfgfile.write_text('x')
monkeypatch.setenv('NOMINATIM_IMPORT_STYLE', str(cfgfile))
assert str(config.get_import_style_file()) == str(cfgfile)
def test_load_subconf_from_project_dir(make_config_path):
config = make_config_path()
testfile = config.project_dir / 'test.yaml'
testfile.write_text('cow: muh\ncat: miau\n')
testfile = config.config_dir / 'test.yaml'
testfile.write_text('cow: miau\ncat: muh\n')
rules = config.load_sub_configuration('test.yaml')
assert rules == dict(cow='muh', cat='miau')
def test_load_subconf_from_settings_dir(make_config_path):
config = make_config_path()
testfile = config.config_dir / 'test.yaml'
testfile.write_text('cow: muh\ncat: miau\n')
rules = config.load_sub_configuration('test.yaml')
assert rules == dict(cow='muh', cat='miau')
def test_load_subconf_empty_env_conf(make_config_path, monkeypatch):
monkeypatch.setenv('NOMINATIM_MY_CONFIG', '')
config = make_config_path()
testfile = config.config_dir / 'test.yaml'
testfile.write_text('cow: muh\ncat: miau\n')
rules = config.load_sub_configuration('test.yaml', config='MY_CONFIG')
assert rules == dict(cow='muh', cat='miau')
def test_load_subconf_env_absolute_found(make_config_path, monkeypatch, tmp_path):
monkeypatch.setenv('NOMINATIM_MY_CONFIG', str(tmp_path / 'other.yaml'))
config = make_config_path()
(config.config_dir / 'test.yaml').write_text('cow: muh\ncat: miau\n')
(tmp_path / 'other.yaml').write_text('dog: muh\nfrog: miau\n')
rules = config.load_sub_configuration('test.yaml', config='MY_CONFIG')
assert rules == dict(dog='muh', frog='miau')
def test_load_subconf_env_absolute_not_found(make_config_path, monkeypatch, tmp_path):
monkeypatch.setenv('NOMINATIM_MY_CONFIG', str(tmp_path / 'other.yaml'))
config = make_config_path()
(config.config_dir / 'test.yaml').write_text('cow: muh\ncat: miau\n')
with pytest.raises(UsageError, match='Config file not found.'):
rules = config.load_sub_configuration('test.yaml', config='MY_CONFIG')
@pytest.mark.parametrize("location", ['project_dir', 'config_dir'])
def test_load_subconf_env_relative_found(make_config_path, monkeypatch, location):
monkeypatch.setenv('NOMINATIM_MY_CONFIG', 'other.yaml')
config = make_config_path()
(config.config_dir / 'test.yaml').write_text('cow: muh\ncat: miau\n')
(getattr(config, location) / 'other.yaml').write_text('dog: bark\n')
rules = config.load_sub_configuration('test.yaml', config='MY_CONFIG')
assert rules == dict(dog='bark')
def test_load_subconf_env_relative_not_found(make_config_path, monkeypatch):
monkeypatch.setenv('NOMINATIM_MY_CONFIG', 'other.yaml')
config = make_config_path()
(config.config_dir / 'test.yaml').write_text('cow: muh\ncat: miau\n')
with pytest.raises(UsageError, match='Config file not found.'):
rules = config.load_sub_configuration('test.yaml', config='MY_CONFIG')
def test_load_subconf_json(make_config_path):
config = make_config_path()
(config.project_dir / 'test.json').write_text('{"cow": "muh", "cat": "miau"}')
rules = config.load_sub_configuration('test.json')
assert rules == dict(cow='muh', cat='miau')
def test_load_subconf_not_found(make_config_path):
config = make_config_path()
with pytest.raises(UsageError, match='Config file not found.'):
config.load_sub_configuration('test.yaml')
def test_load_subconf_env_unknown_format(make_config_path):
config = make_config_path()
(config.project_dir / 'test.xml').write_text('<html></html>')
with pytest.raises(UsageError, match='unknown format'):
config.load_sub_configuration('test.xml')
def test_load_subconf_include_absolute(make_config_path, tmp_path):
config = make_config_path()
testfile = config.config_dir / 'test.yaml'
testfile.write_text(f'base: !include {tmp_path}/inc.yaml\n')
(tmp_path / 'inc.yaml').write_text('first: 1\nsecond: 2\n')
rules = config.load_sub_configuration('test.yaml')
assert rules == dict(base=dict(first=1, second=2))
@pytest.mark.parametrize("location", ['project_dir', 'config_dir'])
def test_load_subconf_include_relative(make_config_path, tmp_path, location):
config = make_config_path()
testfile = config.config_dir / 'test.yaml'
testfile.write_text(f'base: !include inc.yaml\n')
(getattr(config, location) / 'inc.yaml').write_text('first: 1\nsecond: 2\n')
rules = config.load_sub_configuration('test.yaml')
assert rules == dict(base=dict(first=1, second=2))
def test_load_subconf_include_bad_format(make_config_path):
config = make_config_path()
testfile = config.config_dir / 'test.yaml'
testfile.write_text(f'base: !include inc.txt\n')
(config.config_dir / 'inc.txt').write_text('first: 1\nsecond: 2\n')
with pytest.raises(UsageError, match='Cannot handle config file format.'):
rules = config.load_sub_configuration('test.yaml')
def test_load_subconf_include_not_found(make_config_path):
config = make_config_path()
testfile = config.config_dir / 'test.yaml'
testfile.write_text(f'base: !include inc.txt\n')
with pytest.raises(UsageError, match='Config file not found.'):
rules = config.load_sub_configuration('test.yaml')
def test_load_subconf_include_recursive(make_config_path):
config = make_config_path()
testfile = config.config_dir / 'test.yaml'
testfile.write_text(f'base: !include inc.yaml\n')
(config.config_dir / 'inc.yaml').write_text('- !include more.yaml\n- upper\n')
(config.config_dir / 'more.yaml').write_text('- the end\n')
rules = config.load_sub_configuration('test.yaml')
assert rules == dict(base=[['the end'], 'upper'])
@pytest.mark.parametrize("content", [[], None])
def test_flatten_config_list_empty(content):
assert flatten_config_list(content) == []
@pytest.mark.parametrize("content", [{'foo': 'bar'}, 'hello world', 3])
def test_flatten_config_list_no_list(content):
with pytest.raises(UsageError):
flatten_config_list(content)
def test_flatten_config_list_allready_flat():
assert flatten_config_list([1, 2, 456]) == [1, 2, 456]
def test_flatten_config_list_nested():
content = [
34,
[{'first': '1st', 'second': '2nd'}, {}],
[[2, 3], [45, [56, 78], 66]],
'end'
]
assert flatten_config_list(content) == \
[34, {'first': '1st', 'second': '2nd'}, {},
2, 3, 45, 56, 78, 66, 'end']
| 13,664 | 29.986395 | 90 | py |
Nominatim | Nominatim-master/test/python/db/test_sql_preprocessor.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for SQL preprocessing.
"""
import pytest
from nominatim.db.sql_preprocessor import SQLPreprocessor
@pytest.fixture
def sql_factory(tmp_path):
def _mk_sql(sql_body):
(tmp_path / 'test.sql').write_text("""
CREATE OR REPLACE FUNCTION test() RETURNS TEXT
AS $$
BEGIN
{}
END;
$$ LANGUAGE plpgsql IMMUTABLE;""".format(sql_body))
return 'test.sql'
return _mk_sql
@pytest.mark.parametrize("expr,ret", [
("'a'", 'a'),
("'{{db.partitions|join}}'", '012'),
("{% if 'country_name' in db.tables %}'yes'{% else %}'no'{% endif %}", "yes"),
("{% if 'xxx' in db.tables %}'yes'{% else %}'no'{% endif %}", "no"),
("'{{db.tablespace.address_data}}'", ""),
("'{{db.tablespace.search_data}}'", 'TABLESPACE "dsearch"'),
("'{{db.tablespace.address_index}}'", 'TABLESPACE "iaddress"'),
("'{{db.tablespace.aux_data}}'", 'TABLESPACE "daux"')
])
def test_load_file_simple(sql_preprocessor_cfg, sql_factory,
temp_db_conn, temp_db_cursor, monkeypatch,
expr, ret):
monkeypatch.setenv('NOMINATIM_TABLESPACE_SEARCH_DATA', 'dsearch')
monkeypatch.setenv('NOMINATIM_TABLESPACE_ADDRESS_INDEX', 'iaddress')
monkeypatch.setenv('NOMINATIM_TABLESPACE_AUX_DATA', 'daux')
sqlfile = sql_factory("RETURN {};".format(expr))
SQLPreprocessor(temp_db_conn, sql_preprocessor_cfg).run_sql_file(temp_db_conn, sqlfile)
assert temp_db_cursor.scalar('SELECT test()') == ret
def test_load_file_with_params(sql_preprocessor, sql_factory, temp_db_conn, temp_db_cursor):
sqlfile = sql_factory("RETURN '{{ foo }} {{ bar }}';")
sql_preprocessor.run_sql_file(temp_db_conn, sqlfile, bar='XX', foo='ZZ')
assert temp_db_cursor.scalar('SELECT test()') == 'ZZ XX'
| 2,034 | 34.701754 | 92 | py |
Nominatim | Nominatim-master/test/python/db/test_async_connection.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for function providing a non-blocking query interface towards PostgreSQL.
"""
from contextlib import closing
import concurrent.futures
import pytest
import psycopg2
from nominatim.db.async_connection import DBConnection, DeadlockHandler
@pytest.fixture
def conn(temp_db):
with closing(DBConnection('dbname=' + temp_db)) as connection:
yield connection
@pytest.fixture
def simple_conns(temp_db):
conn1 = psycopg2.connect('dbname=' + temp_db)
conn2 = psycopg2.connect('dbname=' + temp_db)
yield conn1.cursor(), conn2.cursor()
conn1.close()
conn2.close()
def test_simple_query(conn, temp_db_conn):
conn.connect()
conn.perform('CREATE TABLE foo (id INT)')
conn.wait()
temp_db_conn.table_exists('foo')
def test_wait_for_query(conn):
conn.connect()
conn.perform('SELECT pg_sleep(1)')
assert not conn.is_done()
conn.wait()
def test_bad_query(conn):
conn.connect()
conn.perform('SELECT efasfjsea')
with pytest.raises(psycopg2.ProgrammingError):
conn.wait()
def test_bad_query_ignore(temp_db):
with closing(DBConnection('dbname=' + temp_db, ignore_sql_errors=True)) as conn:
conn.connect()
conn.perform('SELECT efasfjsea')
conn.wait()
def exec_with_deadlock(cur, sql, detector):
with DeadlockHandler(lambda *args: detector.append(1)):
cur.execute(sql)
def test_deadlock(simple_conns):
cur1, cur2 = simple_conns
cur1.execute("""CREATE TABLE t1 (id INT PRIMARY KEY, t TEXT);
INSERT into t1 VALUES (1, 'a'), (2, 'b')""")
cur1.connection.commit()
cur1.execute("UPDATE t1 SET t = 'x' WHERE id = 1")
cur2.execute("UPDATE t1 SET t = 'x' WHERE id = 2")
# This is the tricky part of the test. The first SQL command runs into
# a lock and blocks, so we have to run it in a separate thread. When the
# second deadlocking SQL statement is issued, Postgresql will abort one of
# the two transactions that cause the deadlock. There is no way to tell
# which one of the two. Therefore wrap both in a DeadlockHandler and
# expect that exactly one of the two triggers.
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
deadlock_check = []
try:
future = executor.submit(exec_with_deadlock, cur2,
"UPDATE t1 SET t = 'y' WHERE id = 1",
deadlock_check)
while not future.running():
pass
exec_with_deadlock(cur1, "UPDATE t1 SET t = 'y' WHERE id = 2",
deadlock_check)
finally:
# Whatever happens, make sure the deadlock gets resolved.
cur1.connection.rollback()
future.result()
assert len(deadlock_check) == 1
| 3,065 | 25.894737 | 84 | py |
Nominatim | Nominatim-master/test/python/db/test_connection.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for specialised connection and cursor classes.
"""
import pytest
import psycopg2
from nominatim.db.connection import connect, get_pg_env
@pytest.fixture
def db(dsn):
with connect(dsn) as conn:
yield conn
def test_connection_table_exists(db, table_factory):
assert not db.table_exists('foobar')
table_factory('foobar')
assert db.table_exists('foobar')
def test_has_column_no_table(db):
assert not db.table_has_column('sometable', 'somecolumn')
@pytest.mark.parametrize('name,result', [('tram', True), ('car', False)])
def test_has_column(db, table_factory, name, result):
table_factory('stuff', 'tram TEXT')
assert db.table_has_column('stuff', name) == result
def test_connection_index_exists(db, table_factory, temp_db_cursor):
assert not db.index_exists('some_index')
table_factory('foobar')
temp_db_cursor.execute('CREATE INDEX some_index ON foobar(id)')
assert db.index_exists('some_index')
assert db.index_exists('some_index', table='foobar')
assert not db.index_exists('some_index', table='bar')
def test_drop_table_existing(db, table_factory):
table_factory('dummy')
assert db.table_exists('dummy')
db.drop_table('dummy')
assert not db.table_exists('dummy')
def test_drop_table_non_existsing(db):
db.drop_table('dfkjgjriogjigjgjrdghehtre')
def test_drop_table_non_existing_force(db):
with pytest.raises(psycopg2.ProgrammingError, match='.*does not exist.*'):
db.drop_table('dfkjgjriogjigjgjrdghehtre', if_exists=False)
def test_connection_server_version_tuple(db):
ver = db.server_version_tuple()
assert isinstance(ver, tuple)
assert len(ver) == 2
assert ver[0] > 8
def test_connection_postgis_version_tuple(db, temp_db_with_extensions):
ver = db.postgis_version_tuple()
assert isinstance(ver, tuple)
assert len(ver) == 2
assert ver[0] >= 2
def test_cursor_scalar(db, table_factory):
table_factory('dummy')
with db.cursor() as cur:
assert cur.scalar('SELECT count(*) FROM dummy') == 0
def test_cursor_scalar_many_rows(db):
with db.cursor() as cur:
with pytest.raises(RuntimeError):
cur.scalar('SELECT * FROM pg_tables')
def test_cursor_scalar_no_rows(db, table_factory):
table_factory('dummy')
with db.cursor() as cur:
with pytest.raises(RuntimeError):
cur.scalar('SELECT id FROM dummy')
def test_get_pg_env_add_variable(monkeypatch):
monkeypatch.delenv('PGPASSWORD', raising=False)
env = get_pg_env('user=fooF')
assert env['PGUSER'] == 'fooF'
assert 'PGPASSWORD' not in env
def test_get_pg_env_overwrite_variable(monkeypatch):
monkeypatch.setenv('PGUSER', 'some default')
env = get_pg_env('user=overwriter')
assert env['PGUSER'] == 'overwriter'
def test_get_pg_env_ignore_unknown():
env = get_pg_env('client_encoding=stuff', base_env={})
assert env == {}
| 3,150 | 24.827869 | 78 | py |
Nominatim | Nominatim-master/test/python/db/test_status.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for status table manipulation.
"""
import datetime as dt
import pytest
import nominatim.db.status
from nominatim.errors import UsageError
OSM_NODE_DATA = """\
<osm version="0.6" generator="OpenStreetMap server" copyright="OpenStreetMap and contributors" attribution="http://www.openstreetmap.org/copyright" license="http://opendatacommons.org/licenses/odbl/1-0/">
<node id="45673" visible="true" version="1" changeset="2047" timestamp="2006-01-27T22:09:10Z" user="Foo" uid="111" lat="48.7586670" lon="8.1343060">
</node>
</osm>
"""
def iso_date(date):
return dt.datetime.strptime(date, nominatim.db.status.ISODATE_FORMAT)\
.replace(tzinfo=dt.timezone.utc)
@pytest.fixture(autouse=True)
def setup_status_table(status_table):
pass
def test_compute_database_date_place_empty(place_table, temp_db_conn):
with pytest.raises(UsageError):
nominatim.db.status.compute_database_date(temp_db_conn)
def test_compute_database_date_valid(monkeypatch, place_row, temp_db_conn):
place_row(osm_type='N', osm_id=45673)
requested_url = []
def mock_url(url):
requested_url.append(url)
return OSM_NODE_DATA
monkeypatch.setattr(nominatim.db.status, "get_url", mock_url)
date = nominatim.db.status.compute_database_date(temp_db_conn)
assert requested_url == ['https://www.openstreetmap.org/api/0.6/node/45673/1']
assert date == iso_date('2006-01-27T22:09:10')
def test_compute_database_broken_api(monkeypatch, place_row, temp_db_conn):
place_row(osm_type='N', osm_id=45673)
requested_url = []
def mock_url(url):
requested_url.append(url)
return '<osm version="0.6" generator="OpenStre'
monkeypatch.setattr(nominatim.db.status, "get_url", mock_url)
with pytest.raises(UsageError):
nominatim.db.status.compute_database_date(temp_db_conn)
def test_set_status_empty_table(temp_db_conn, temp_db_cursor):
date = dt.datetime.fromordinal(1000000).replace(tzinfo=dt.timezone.utc)
nominatim.db.status.set_status(temp_db_conn, date=date)
assert temp_db_cursor.row_set("SELECT * FROM import_status") == \
{(date, None, True)}
def test_set_status_filled_table(temp_db_conn, temp_db_cursor):
date = dt.datetime.fromordinal(1000000).replace(tzinfo=dt.timezone.utc)
nominatim.db.status.set_status(temp_db_conn, date=date)
assert temp_db_cursor.table_rows('import_status') == 1
date = dt.datetime.fromordinal(1000100).replace(tzinfo=dt.timezone.utc)
nominatim.db.status.set_status(temp_db_conn, date=date, seq=456, indexed=False)
assert temp_db_cursor.row_set("SELECT * FROM import_status") == \
{(date, 456, False)}
def test_set_status_missing_date(temp_db_conn, temp_db_cursor):
date = dt.datetime.fromordinal(1000000).replace(tzinfo=dt.timezone.utc)
nominatim.db.status.set_status(temp_db_conn, date=date)
assert temp_db_cursor.table_rows('import_status') == 1
nominatim.db.status.set_status(temp_db_conn, date=None, seq=456, indexed=False)
assert temp_db_cursor.row_set("SELECT * FROM import_status") == \
{(date, 456, False)}
def test_get_status_empty_table(temp_db_conn):
assert nominatim.db.status.get_status(temp_db_conn) == (None, None, None)
def test_get_status_success(temp_db_conn):
date = dt.datetime.fromordinal(1000000).replace(tzinfo=dt.timezone.utc)
nominatim.db.status.set_status(temp_db_conn, date=date, seq=667, indexed=False)
assert nominatim.db.status.get_status(temp_db_conn) == \
(date, 667, False)
@pytest.mark.parametrize("old_state", [True, False])
@pytest.mark.parametrize("new_state", [True, False])
def test_set_indexed(temp_db_conn, temp_db_cursor, old_state, new_state):
date = dt.datetime.fromordinal(1000000).replace(tzinfo=dt.timezone.utc)
nominatim.db.status.set_status(temp_db_conn, date=date, indexed=old_state)
nominatim.db.status.set_indexed(temp_db_conn, new_state)
assert temp_db_cursor.scalar("SELECT indexed FROM import_status") == new_state
def test_set_indexed_empty_status(temp_db_conn, temp_db_cursor):
nominatim.db.status.set_indexed(temp_db_conn, True)
assert temp_db_cursor.table_rows("import_status") == 0
def test_log_status(temp_db_conn, temp_db_cursor):
date = dt.datetime.fromordinal(1000000).replace(tzinfo=dt.timezone.utc)
start = dt.datetime.now() - dt.timedelta(hours=1)
nominatim.db.status.set_status(temp_db_conn, date=date, seq=56)
nominatim.db.status.log_status(temp_db_conn, start, 'index')
temp_db_conn.commit()
assert temp_db_cursor.table_rows("import_osmosis_log") == 1
assert temp_db_cursor.scalar("SELECT batchseq FROM import_osmosis_log") == 56
assert temp_db_cursor.scalar("SELECT event FROM import_osmosis_log") == 'index'
| 5,025 | 34.394366 | 204 | py |
Nominatim | Nominatim-master/test/python/db/test_properties.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for property table manpulation.
"""
import pytest
from nominatim.db import properties
@pytest.fixture
def property_factory(property_table, temp_db_cursor):
""" A function fixture that adds a property into the property table.
"""
def _add_property(name, value):
temp_db_cursor.execute("INSERT INTO nominatim_properties VALUES(%s, %s)",
(name, value))
return _add_property
def test_get_property_existing(property_factory, temp_db_conn):
property_factory('foo', 'bar')
assert properties.get_property(temp_db_conn, 'foo') == 'bar'
def test_get_property_unknown(property_factory, temp_db_conn):
property_factory('other', 'bar')
assert properties.get_property(temp_db_conn, 'foo') is None
@pytest.mark.parametrize("prefill", (True, False))
def test_set_property_new(property_factory, temp_db_conn, temp_db_cursor, prefill):
if prefill:
property_factory('something', 'bar')
properties.set_property(temp_db_conn, 'something', 'else')
assert temp_db_cursor.scalar("""SELECT value FROM nominatim_properties
WHERE property = 'something'""") == 'else'
assert properties.get_property(temp_db_conn, 'something') == 'else'
| 1,470 | 29.645833 | 83 | py |
Nominatim | Nominatim-master/test/python/db/test_utils.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for DB utility functions in db.utils
"""
import json
import pytest
import nominatim.db.utils as db_utils
from nominatim.errors import UsageError
def test_execute_file_success(dsn, temp_db_cursor, tmp_path):
tmpfile = tmp_path / 'test.sql'
tmpfile.write_text('CREATE TABLE test (id INT);\nINSERT INTO test VALUES(56);')
db_utils.execute_file(dsn, tmpfile)
assert temp_db_cursor.row_set('SELECT * FROM test') == {(56, )}
def test_execute_file_bad_file(dsn, tmp_path):
with pytest.raises(FileNotFoundError):
db_utils.execute_file(dsn, tmp_path / 'test2.sql')
def test_execute_file_bad_sql(dsn, tmp_path):
tmpfile = tmp_path / 'test.sql'
tmpfile.write_text('CREATE STABLE test (id INT)')
with pytest.raises(UsageError):
db_utils.execute_file(dsn, tmpfile)
def test_execute_file_bad_sql_ignore_errors(dsn, tmp_path):
tmpfile = tmp_path / 'test.sql'
tmpfile.write_text('CREATE STABLE test (id INT)')
db_utils.execute_file(dsn, tmpfile, ignore_errors=True)
def test_execute_file_with_pre_code(dsn, tmp_path, temp_db_cursor):
tmpfile = tmp_path / 'test.sql'
tmpfile.write_text('INSERT INTO test VALUES(4)')
db_utils.execute_file(dsn, tmpfile, pre_code='CREATE TABLE test (id INT)')
assert temp_db_cursor.row_set('SELECT * FROM test') == {(4, )}
def test_execute_file_with_post_code(dsn, tmp_path, temp_db_cursor):
tmpfile = tmp_path / 'test.sql'
tmpfile.write_text('CREATE TABLE test (id INT)')
db_utils.execute_file(dsn, tmpfile, post_code='INSERT INTO test VALUES(23)')
assert temp_db_cursor.row_set('SELECT * FROM test') == {(23, )}
class TestCopyBuffer:
TABLE_NAME = 'copytable'
@pytest.fixture(autouse=True)
def setup_test_table(self, table_factory):
table_factory(self.TABLE_NAME, 'col_a INT, col_b TEXT')
def table_rows(self, cursor):
return cursor.row_set('SELECT * FROM ' + self.TABLE_NAME)
def test_copybuffer_empty(self):
with db_utils.CopyBuffer() as buf:
buf.copy_out(None, "dummy")
def test_all_columns(self, temp_db_cursor):
with db_utils.CopyBuffer() as buf:
buf.add(3, 'hum')
buf.add(None, 'f\\t')
buf.copy_out(temp_db_cursor, self.TABLE_NAME)
assert self.table_rows(temp_db_cursor) == {(3, 'hum'), (None, 'f\\t')}
def test_selected_columns(self, temp_db_cursor):
with db_utils.CopyBuffer() as buf:
buf.add('foo')
buf.copy_out(temp_db_cursor, self.TABLE_NAME,
columns=['col_b'])
assert self.table_rows(temp_db_cursor) == {(None, 'foo')}
def test_reordered_columns(self, temp_db_cursor):
with db_utils.CopyBuffer() as buf:
buf.add('one', 1)
buf.add(' two ', 2)
buf.copy_out(temp_db_cursor, self.TABLE_NAME,
columns=['col_b', 'col_a'])
assert self.table_rows(temp_db_cursor) == {(1, 'one'), (2, ' two ')}
def test_special_characters(self, temp_db_cursor):
with db_utils.CopyBuffer() as buf:
buf.add('foo\tbar')
buf.add('sun\nson')
buf.add('\\N')
buf.copy_out(temp_db_cursor, self.TABLE_NAME,
columns=['col_b'])
assert self.table_rows(temp_db_cursor) == {(None, 'foo\tbar'),
(None, 'sun\nson'),
(None, '\\N')}
class TestCopyBufferJson:
TABLE_NAME = 'copytable'
@pytest.fixture(autouse=True)
def setup_test_table(self, table_factory):
table_factory(self.TABLE_NAME, 'col_a INT, col_b JSONB')
def table_rows(self, cursor):
cursor.execute('SELECT * FROM ' + self.TABLE_NAME)
results = {k: v for k,v in cursor}
assert len(results) == cursor.rowcount
return results
def test_json_object(self, temp_db_cursor):
with db_utils.CopyBuffer() as buf:
buf.add(1, json.dumps({'test': 'value', 'number': 1}))
buf.copy_out(temp_db_cursor, self.TABLE_NAME)
assert self.table_rows(temp_db_cursor) == \
{1: {'test': 'value', 'number': 1}}
def test_json_object_special_chras(self, temp_db_cursor):
with db_utils.CopyBuffer() as buf:
buf.add(1, json.dumps({'te\tst': 'va\nlue', 'nu"mber': None}))
buf.copy_out(temp_db_cursor, self.TABLE_NAME)
assert self.table_rows(temp_db_cursor) == \
{1: {'te\tst': 'va\nlue', 'nu"mber': None}}
| 4,824 | 28.968944 | 83 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.