repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
Nominatim | Nominatim-master/test/python/api/test_api_lookup.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for lookup API call.
"""
import pytest
import nominatim.api as napi
def test_lookup_empty_list(apiobj):
assert apiobj.api.lookup([]) == []
def test_lookup_non_existing(apiobj):
assert apiobj.api.lookup((napi.PlaceID(332), napi.OsmID('W', 4),
napi.OsmID('W', 4, 'highway'))) == []
@pytest.mark.parametrize('idobj', (napi.PlaceID(332), napi.OsmID('W', 4),
napi.OsmID('W', 4, 'highway')))
def test_lookup_single_placex(apiobj, idobj):
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential',
name={'name': 'Road'}, address={'city': 'Barrow'},
extratags={'surface': 'paved'},
parent_place_id=34, linked_place_id=55,
admin_level=15, country_code='gb',
housenumber='4',
postcode='34425', wikipedia='en:Faa',
rank_search=27, rank_address=26,
importance=0.01,
centroid=(23, 34),
geometry='LINESTRING(23 34, 23.1 34, 23.1 34.1, 23 34)')
result = apiobj.api.lookup([idobj])
assert len(result) == 1
result = result[0]
assert result.source_table.name == 'PLACEX'
assert result.category == ('highway', 'residential')
assert result.centroid == (pytest.approx(23.0), pytest.approx(34.0))
assert result.place_id == 332
assert result.osm_object == ('W', 4)
assert result.names == {'name': 'Road'}
assert result.address == {'city': 'Barrow'}
assert result.extratags == {'surface': 'paved'}
assert result.housenumber == '4'
assert result.postcode == '34425'
assert result.wikipedia == 'en:Faa'
assert result.rank_search == 27
assert result.rank_address == 26
assert result.importance == pytest.approx(0.01)
assert result.country_code == 'gb'
assert result.address_rows is None
assert result.linked_rows is None
assert result.parented_rows is None
assert result.name_keywords is None
assert result.address_keywords is None
assert result.geometry == {}
def test_lookup_multiple_places(apiobj):
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential',
name={'name': 'Road'}, address={'city': 'Barrow'},
extratags={'surface': 'paved'},
parent_place_id=34, linked_place_id=55,
admin_level=15, country_code='gb',
housenumber='4',
postcode='34425', wikipedia='en:Faa',
rank_search=27, rank_address=26,
importance=0.01,
centroid=(23, 34),
geometry='LINESTRING(23 34, 23.1 34, 23.1 34.1, 23 34)')
apiobj.add_osmline(place_id=4924, osm_id=9928,
parent_place_id=12,
startnumber=1, endnumber=4, step=1,
country_code='gb', postcode='34425',
address={'city': 'Big'},
geometry='LINESTRING(23 34, 23 35)')
result = apiobj.api.lookup((napi.OsmID('W', 1),
napi.OsmID('W', 4),
napi.OsmID('W', 9928)))
assert len(result) == 2
assert set(r.place_id for r in result) == {332, 4924}
| 3,693 | 34.864078 | 77 | py |
Nominatim | Nominatim-master/test/python/api/conftest.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper fixtures for API call tests.
"""
from pathlib import Path
import pytest
import time
import datetime as dt
import sqlalchemy as sa
import nominatim.api as napi
from nominatim.db.sql_preprocessor import SQLPreprocessor
import nominatim.api.logging as loglib
class APITester:
def __init__(self):
self.api = napi.NominatimAPI(Path('/invalid'))
self.async_to_sync(self.api._async_api.setup_database())
def async_to_sync(self, func):
""" Run an asynchronous function until completion using the
internal loop of the API.
"""
return self.api._loop.run_until_complete(func)
def add_data(self, table, data):
""" Insert data into the given table.
"""
sql = getattr(self.api._async_api._tables, table).insert()
self.async_to_sync(self.exec_async(sql, data))
def add_placex(self, **kw):
name = kw.get('name')
if isinstance(name, str):
name = {'name': name}
centroid = kw.get('centroid', (23.0, 34.0))
geometry = kw.get('geometry', 'POINT(%f %f)' % centroid)
self.add_data('placex',
{'place_id': kw.get('place_id', 1000),
'osm_type': kw.get('osm_type', 'W'),
'osm_id': kw.get('osm_id', 4),
'class_': kw.get('class_', 'highway'),
'type': kw.get('type', 'residential'),
'name': name,
'address': kw.get('address'),
'extratags': kw.get('extratags'),
'parent_place_id': kw.get('parent_place_id'),
'linked_place_id': kw.get('linked_place_id'),
'admin_level': kw.get('admin_level', 15),
'country_code': kw.get('country_code'),
'housenumber': kw.get('housenumber'),
'postcode': kw.get('postcode'),
'wikipedia': kw.get('wikipedia'),
'rank_search': kw.get('rank_search', 30),
'rank_address': kw.get('rank_address', 30),
'importance': kw.get('importance'),
'centroid': 'POINT(%f %f)' % centroid,
'indexed_status': kw.get('indexed_status', 0),
'indexed_date': kw.get('indexed_date',
dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
'geometry': geometry})
def add_address_placex(self, object_id, **kw):
self.add_placex(**kw)
self.add_data('addressline',
{'place_id': object_id,
'address_place_id': kw.get('place_id', 1000),
'distance': kw.get('distance', 0.0),
'cached_rank_address': kw.get('rank_address', 30),
'fromarea': kw.get('fromarea', False),
'isaddress': kw.get('isaddress', True)})
def add_osmline(self, **kw):
self.add_data('osmline',
{'place_id': kw.get('place_id', 10000),
'osm_id': kw.get('osm_id', 4004),
'parent_place_id': kw.get('parent_place_id'),
'indexed_date': kw.get('indexed_date',
dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
'startnumber': kw.get('startnumber', 2),
'endnumber': kw.get('endnumber', 6),
'step': kw.get('step', 2),
'address': kw.get('address'),
'postcode': kw.get('postcode'),
'country_code': kw.get('country_code'),
'linegeo': kw.get('geometry', 'LINESTRING(1.1 -0.2, 1.09 -0.22)')})
def add_tiger(self, **kw):
self.add_data('tiger',
{'place_id': kw.get('place_id', 30000),
'parent_place_id': kw.get('parent_place_id'),
'startnumber': kw.get('startnumber', 2),
'endnumber': kw.get('endnumber', 6),
'step': kw.get('step', 2),
'postcode': kw.get('postcode'),
'linegeo': kw.get('geometry', 'LINESTRING(1.1 -0.2, 1.09 -0.22)')})
def add_postcode(self, **kw):
self.add_data('postcode',
{'place_id': kw.get('place_id', 1000),
'parent_place_id': kw.get('parent_place_id'),
'country_code': kw.get('country_code'),
'postcode': kw.get('postcode'),
'rank_search': kw.get('rank_search', 20),
'rank_address': kw.get('rank_address', 22),
'indexed_date': kw.get('indexed_date',
dt.datetime(2022, 12, 7, 14, 14, 46, 0)),
'geometry': kw.get('geometry', 'POINT(23 34)')})
def add_country(self, country_code, geometry):
self.add_data('country_grid',
{'country_code': country_code,
'area': 0.1,
'geometry': geometry})
def add_country_name(self, country_code, names, partition=0):
self.add_data('country_name',
{'country_code': country_code,
'name': names,
'partition': partition})
def add_search_name(self, place_id, **kw):
centroid = kw.get('centroid', (23.0, 34.0))
self.add_data('search_name',
{'place_id': place_id,
'importance': kw.get('importance', 0.00001),
'search_rank': kw.get('search_rank', 30),
'address_rank': kw.get('address_rank', 30),
'name_vector': kw.get('names', []),
'nameaddress_vector': kw.get('address', []),
'country_code': kw.get('country_code', 'xx'),
'centroid': 'POINT(%f %f)' % centroid})
def add_class_type_table(self, cls, typ):
self.async_to_sync(
self.exec_async(sa.text(f"""CREATE TABLE place_classtype_{cls}_{typ}
AS (SELECT place_id, centroid FROM placex
WHERE class = '{cls}' AND type = '{typ}')
""")))
async def exec_async(self, sql, *args, **kwargs):
async with self.api._async_api.begin() as conn:
return await conn.execute(sql, *args, **kwargs)
async def create_tables(self):
async with self.api._async_api._engine.begin() as conn:
await conn.run_sync(self.api._async_api._tables.meta.create_all)
@pytest.fixture
def apiobj(temp_db_with_extensions, temp_db_conn, monkeypatch):
""" Create an asynchronous SQLAlchemy engine for the test DB.
"""
monkeypatch.setenv('NOMINATIM_USE_US_TIGER_DATA', 'yes')
testapi = APITester()
testapi.async_to_sync(testapi.create_tables())
proc = SQLPreprocessor(temp_db_conn, testapi.api.config)
proc.run_sql_file(temp_db_conn, 'functions/address_lookup.sql')
proc.run_sql_file(temp_db_conn, 'functions/ranking.sql')
loglib.set_log_output('text')
yield testapi
print(loglib.get_and_disable())
testapi.api.close()
| 7,661 | 39.539683 | 89 | py |
Nominatim | Nominatim-master/test/python/api/test_result_formatting_v1.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for formatting results for the V1 API.
These test only ensure that the Python code is correct.
For functional tests see BDD test suite.
"""
import datetime as dt
import json
import pytest
import nominatim.api.v1 as api_impl
import nominatim.api as napi
from nominatim.version import NOMINATIM_VERSION
STATUS_FORMATS = {'text', 'json'}
# StatusResult
def test_status_format_list():
assert set(api_impl.list_formats(napi.StatusResult)) == STATUS_FORMATS
@pytest.mark.parametrize('fmt', list(STATUS_FORMATS))
def test_status_supported(fmt):
assert api_impl.supports_format(napi.StatusResult, fmt)
def test_status_unsupported():
assert not api_impl.supports_format(napi.StatusResult, 'gagaga')
def test_status_format_text():
assert api_impl.format_result(napi.StatusResult(0, 'message here'), 'text', {}) == 'OK'
def test_status_format_text():
assert api_impl.format_result(napi.StatusResult(500, 'message here'), 'text', {}) == 'ERROR: message here'
def test_status_format_json_minimal():
status = napi.StatusResult(700, 'Bad format.')
result = api_impl.format_result(status, 'json', {})
assert result == '{"status":700,"message":"Bad format.","software_version":"%s"}' % (NOMINATIM_VERSION, )
def test_status_format_json_full():
status = napi.StatusResult(0, 'OK')
status.data_updated = dt.datetime(2010, 2, 7, 20, 20, 3, 0, tzinfo=dt.timezone.utc)
status.database_version = '5.6'
result = api_impl.format_result(status, 'json', {})
assert result == '{"status":0,"message":"OK","data_updated":"2010-02-07T20:20:03+00:00","software_version":"%s","database_version":"5.6"}' % (NOMINATIM_VERSION, )
# DetailedResult
def test_search_details_minimal():
search = napi.DetailedResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0))
result = api_impl.format_result(search, 'json', {})
assert json.loads(result) == \
{'category': 'place',
'type': 'thing',
'admin_level': 15,
'names': {},
'localname': '',
'calculated_importance': pytest.approx(0.0000001),
'rank_address': 30,
'rank_search': 30,
'isarea': False,
'addresstags': {},
'extratags': {},
'centroid': {'type': 'Point', 'coordinates': [1.0, 2.0]},
'geometry': {'type': 'Point', 'coordinates': [1.0, 2.0]},
}
def test_search_details_full():
import_date = dt.datetime(2010, 2, 7, 20, 20, 3, 0, tzinfo=dt.timezone.utc)
search = napi.DetailedResult(
source_table=napi.SourceTable.PLACEX,
category=('amenity', 'bank'),
centroid=napi.Point(56.947, -87.44),
place_id=37563,
parent_place_id=114,
linked_place_id=55693,
osm_object=('W', 442100),
admin_level=14,
names={'name': 'Bank', 'name:fr': 'Banque'},
address={'city': 'Niento', 'housenumber': ' 3'},
extratags={'atm': 'yes'},
housenumber='3',
postcode='556 X23',
wikipedia='en:Bank',
rank_address=29,
rank_search=28,
importance=0.0443,
country_code='ll',
indexed_date = import_date
)
search.localize(napi.Locales())
result = api_impl.format_result(search, 'json', {})
assert json.loads(result) == \
{'place_id': 37563,
'parent_place_id': 114,
'osm_type': 'W',
'osm_id': 442100,
'category': 'amenity',
'type': 'bank',
'admin_level': 14,
'localname': 'Bank',
'names': {'name': 'Bank', 'name:fr': 'Banque'},
'addresstags': {'city': 'Niento', 'housenumber': ' 3'},
'housenumber': '3',
'calculated_postcode': '556 X23',
'country_code': 'll',
'indexed_date': '2010-02-07T20:20:03+00:00',
'importance': pytest.approx(0.0443),
'calculated_importance': pytest.approx(0.0443),
'extratags': {'atm': 'yes'},
'calculated_wikipedia': 'en:Bank',
'rank_address': 29,
'rank_search': 28,
'isarea': False,
'centroid': {'type': 'Point', 'coordinates': [56.947, -87.44]},
'geometry': {'type': 'Point', 'coordinates': [56.947, -87.44]},
}
@pytest.mark.parametrize('gtype,isarea', [('ST_Point', False),
('ST_LineString', False),
('ST_Polygon', True),
('ST_MultiPolygon', True)])
def test_search_details_no_geometry(gtype, isarea):
search = napi.DetailedResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0),
geometry={'type': gtype})
result = api_impl.format_result(search, 'json', {})
js = json.loads(result)
assert js['geometry'] == {'type': 'Point', 'coordinates': [1.0, 2.0]}
assert js['isarea'] == isarea
def test_search_details_with_geometry():
search = napi.DetailedResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0),
geometry={'geojson': '{"type":"Point","coordinates":[56.947,-87.44]}'})
result = api_impl.format_result(search, 'json', {})
js = json.loads(result)
assert js['geometry'] == {'type': 'Point', 'coordinates': [56.947, -87.44]}
assert js['isarea'] == False
def test_search_details_with_icon_available():
search = napi.DetailedResult(napi.SourceTable.PLACEX,
('amenity', 'restaurant'),
napi.Point(1.0, 2.0))
result = api_impl.format_result(search, 'json', {'icon_base_url': 'foo'})
js = json.loads(result)
assert js['icon'] == 'foo/food_restaurant.p.20.png'
def test_search_details_with_icon_not_available():
search = napi.DetailedResult(napi.SourceTable.PLACEX,
('amenity', 'tree'),
napi.Point(1.0, 2.0))
result = api_impl.format_result(search, 'json', {'icon_base_url': 'foo'})
js = json.loads(result)
assert 'icon' not in js
def test_search_details_with_address_minimal():
search = napi.DetailedResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0),
address_rows=[
napi.AddressLine(place_id=None,
osm_object=None,
category=('bnd', 'note'),
names={},
extratags=None,
admin_level=None,
fromarea=False,
isaddress=False,
rank_address=10,
distance=0.0)
])
result = api_impl.format_result(search, 'json', {})
js = json.loads(result)
assert js['address'] == [{'localname': '',
'class': 'bnd',
'type': 'note',
'rank_address': 10,
'distance': 0.0,
'isaddress': False}]
@pytest.mark.parametrize('field,outfield', [('address_rows', 'address'),
('linked_rows', 'linked_places'),
('parented_rows', 'hierarchy')
])
def test_search_details_with_further_infos(field, outfield):
search = napi.DetailedResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0))
setattr(search, field, [napi.AddressLine(place_id=3498,
osm_object=('R', 442),
category=('bnd', 'note'),
names={'name': 'Trespass'},
extratags={'access': 'no',
'place_type': 'spec'},
admin_level=4,
fromarea=True,
isaddress=True,
rank_address=10,
distance=0.034)
])
result = api_impl.format_result(search, 'json', {})
js = json.loads(result)
assert js[outfield] == [{'localname': 'Trespass',
'place_id': 3498,
'osm_id': 442,
'osm_type': 'R',
'place_type': 'spec',
'class': 'bnd',
'type': 'note',
'admin_level': 4,
'rank_address': 10,
'distance': 0.034,
'isaddress': True}]
def test_search_details_grouped_hierarchy():
search = napi.DetailedResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0),
parented_rows =
[napi.AddressLine(place_id=3498,
osm_object=('R', 442),
category=('bnd', 'note'),
names={'name': 'Trespass'},
extratags={'access': 'no',
'place_type': 'spec'},
admin_level=4,
fromarea=True,
isaddress=True,
rank_address=10,
distance=0.034)
])
result = api_impl.format_result(search, 'json', {'group_hierarchy': True})
js = json.loads(result)
assert js['hierarchy'] == {'note': [{'localname': 'Trespass',
'place_id': 3498,
'osm_id': 442,
'osm_type': 'R',
'place_type': 'spec',
'class': 'bnd',
'type': 'note',
'admin_level': 4,
'rank_address': 10,
'distance': 0.034,
'isaddress': True}]}
def test_search_details_keywords_name():
search = napi.DetailedResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0),
name_keywords=[
napi.WordInfo(23, 'foo', 'mefoo'),
napi.WordInfo(24, 'foo', 'bafoo')])
result = api_impl.format_result(search, 'json', {'keywords': True})
js = json.loads(result)
assert js['keywords'] == {'name': [{'id': 23, 'token': 'foo'},
{'id': 24, 'token': 'foo'}],
'address': []}
def test_search_details_keywords_address():
search = napi.DetailedResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0),
address_keywords=[
napi.WordInfo(23, 'foo', 'mefoo'),
napi.WordInfo(24, 'foo', 'bafoo')])
result = api_impl.format_result(search, 'json', {'keywords': True})
js = json.loads(result)
assert js['keywords'] == {'address': [{'id': 23, 'token': 'foo'},
{'id': 24, 'token': 'foo'}],
'name': []}
| 13,140 | 39.064024 | 166 | py |
Nominatim | Nominatim-master/test/python/api/test_results.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for result datatype helper functions.
"""
import struct
from binascii import hexlify
import pytest
import pytest_asyncio
import sqlalchemy as sa
from nominatim.api import SourceTable, DetailedResult, Point
import nominatim.api.results as nresults
def mkpoint(x, y):
return hexlify(struct.pack("=biidd", 1, 0x20000001, 4326, x, y)).decode('utf-8')
class FakeRow:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self._mapping = kwargs
def test_minimal_detailed_result():
res = DetailedResult(SourceTable.PLACEX,
('amenity', 'post_box'),
Point(23.1, 0.5))
assert res.lon == 23.1
assert res.lat == 0.5
assert res.calculated_importance() == pytest.approx(0.0000001)
def test_detailed_result_custom_importance():
res = DetailedResult(SourceTable.PLACEX,
('amenity', 'post_box'),
Point(23.1, 0.5),
importance=0.4563)
assert res.calculated_importance() == 0.4563
@pytest.mark.parametrize('func', (nresults.create_from_placex_row,
nresults.create_from_osmline_row,
nresults.create_from_tiger_row,
nresults.create_from_postcode_row))
def test_create_row_none(func):
assert func(None, DetailedResult) is None
@pytest.mark.parametrize('func', (nresults.create_from_osmline_row,
nresults.create_from_tiger_row))
def test_create_row_with_housenumber(func):
row = FakeRow(place_id=2345, osm_type='W', osm_id=111, housenumber=4,
address=None, postcode='99900', country_code='xd',
centroid=mkpoint(0, 0))
res = func(row, DetailedResult)
assert res.housenumber == '4'
assert res.extratags is None
assert res.category == ('place', 'house')
@pytest.mark.parametrize('func', (nresults.create_from_osmline_row,
nresults.create_from_tiger_row))
def test_create_row_without_housenumber(func):
row = FakeRow(place_id=2345, osm_type='W', osm_id=111,
startnumber=1, endnumber=11, step=2,
address=None, postcode='99900', country_code='xd',
centroid=mkpoint(0, 0))
res = func(row, DetailedResult)
assert res.housenumber is None
assert res.extratags == {'startnumber': '1', 'endnumber': '11', 'step': '2'}
assert res.category == ('place', 'houses')
| 2,779 | 32.095238 | 84 | py |
Nominatim | Nominatim-master/test/python/api/test_server_glue_v1.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for the Python web frameworks adaptor, v1 API.
"""
from collections import namedtuple
import json
import xml.etree.ElementTree as ET
from pathlib import Path
import pytest
from nominatim.config import Configuration
import nominatim.api.v1.server_glue as glue
import nominatim.api as napi
import nominatim.api.logging as loglib
class FakeError(BaseException):
def __init__(self, msg, status):
self.msg = msg
self.status = status
def __str__(self):
return f'{self.status} -- {self.msg}'
FakeResponse = namedtuple('FakeResponse', ['status', 'output', 'content_type'])
class FakeAdaptor(glue.ASGIAdaptor):
def __init__(self, params=None, headers=None, config=None):
self.params = params or {}
self.headers = headers or {}
self._config = config or Configuration(None)
def get(self, name, default=None):
return self.params.get(name, default)
def get_header(self, name, default=None):
return self.headers.get(name, default)
def error(self, msg, status=400):
return FakeError(msg, status)
def create_response(self, status, output):
return FakeResponse(status, output, self.content_type)
def config(self):
return self._config
# ASGIAdaptor.get_int/bool()
@pytest.mark.parametrize('func', ['get_int', 'get_bool'])
def test_adaptor_get_int_missing_but_required(func):
with pytest.raises(FakeError, match='^400 -- .*missing'):
getattr(FakeAdaptor(), func)('something')
@pytest.mark.parametrize('func, val', [('get_int', 23), ('get_bool', True)])
def test_adaptor_get_int_missing_with_default(func, val):
assert getattr(FakeAdaptor(), func)('something', val) == val
@pytest.mark.parametrize('inp', ['0', '234', '-4566953498567934876'])
def test_adaptor_get_int_success(inp):
assert FakeAdaptor(params={'foo': inp}).get_int('foo') == int(inp)
assert FakeAdaptor(params={'foo': inp}).get_int('foo', 4) == int(inp)
@pytest.mark.parametrize('inp', ['rs', '4.5', '6f'])
def test_adaptor_get_int_bad_number(inp):
with pytest.raises(FakeError, match='^400 -- .*must be a number'):
FakeAdaptor(params={'foo': inp}).get_int('foo')
@pytest.mark.parametrize('inp', ['1', 'true', 'whatever', 'false'])
def test_adaptor_get_bool_trueish(inp):
assert FakeAdaptor(params={'foo': inp}).get_bool('foo')
def test_adaptor_get_bool_falsish():
assert not FakeAdaptor(params={'foo': '0'}).get_bool('foo')
# ASGIAdaptor.parse_format()
def test_adaptor_parse_format_use_default():
adaptor = FakeAdaptor()
assert adaptor.parse_format(napi.StatusResult, 'text') == 'text'
assert adaptor.content_type == 'text/plain; charset=utf-8'
def test_adaptor_parse_format_use_configured():
adaptor = FakeAdaptor(params={'format': 'json'})
assert adaptor.parse_format(napi.StatusResult, 'text') == 'json'
assert adaptor.content_type == 'application/json'
def test_adaptor_parse_format_invalid_value():
adaptor = FakeAdaptor(params={'format': '@!#'})
with pytest.raises(FakeError, match='^400 -- .*must be one of'):
adaptor.parse_format(napi.StatusResult, 'text')
# ASGIAdaptor.get_accepted_languages()
def test_accepted_languages_from_param():
a = FakeAdaptor(params={'accept-language': 'de'})
assert a.get_accepted_languages() == 'de'
def test_accepted_languages_from_header():
a = FakeAdaptor(headers={'accept-language': 'de'})
assert a.get_accepted_languages() == 'de'
def test_accepted_languages_from_default(monkeypatch):
monkeypatch.setenv('NOMINATIM_DEFAULT_LANGUAGE', 'de')
a = FakeAdaptor()
assert a.get_accepted_languages() == 'de'
def test_accepted_languages_param_over_header():
a = FakeAdaptor(params={'accept-language': 'de'},
headers={'accept-language': 'en'})
assert a.get_accepted_languages() == 'de'
def test_accepted_languages_header_over_default(monkeypatch):
monkeypatch.setenv('NOMINATIM_DEFAULT_LANGUAGE', 'en')
a = FakeAdaptor(headers={'accept-language': 'de'})
assert a.get_accepted_languages() == 'de'
# ASGIAdaptor.raise_error()
class TestAdaptorRaiseError:
@pytest.fixture(autouse=True)
def init_adaptor(self):
self.adaptor = FakeAdaptor()
self.adaptor.setup_debugging()
def run_raise_error(self, msg, status):
with pytest.raises(FakeError) as excinfo:
self.adaptor.raise_error(msg, status=status)
return excinfo.value
def test_without_content_set(self):
err = self.run_raise_error('TEST', 404)
assert self.adaptor.content_type == 'text/plain; charset=utf-8'
assert err.msg == 'TEST'
assert err.status == 404
def test_json(self):
self.adaptor.content_type = 'application/json'
err = self.run_raise_error('TEST', 501)
content = json.loads(err.msg)['error']
assert content['code'] == 501
assert content['message'] == 'TEST'
def test_xml(self):
self.adaptor.content_type = 'text/xml; charset=utf-8'
err = self.run_raise_error('this!', 503)
content = ET.fromstring(err.msg)
assert content.tag == 'error'
assert content.find('code').text == '503'
assert content.find('message').text == 'this!'
def test_raise_error_during_debug():
a = FakeAdaptor(params={'debug': '1'})
a.setup_debugging()
loglib.log().section('Ongoing')
with pytest.raises(FakeError) as excinfo:
a.raise_error('badstate')
content = ET.fromstring(excinfo.value.msg)
assert content.tag == 'html'
assert '>Ongoing<' in excinfo.value.msg
assert 'badstate' in excinfo.value.msg
# ASGIAdaptor.build_response
def test_build_response_without_content_type():
resp = FakeAdaptor().build_response('attention')
assert isinstance(resp, FakeResponse)
assert resp.status == 200
assert resp.output == 'attention'
assert resp.content_type == 'text/plain; charset=utf-8'
def test_build_response_with_status():
a = FakeAdaptor(params={'format': 'json'})
a.parse_format(napi.StatusResult, 'text')
resp = a.build_response('stuff\nmore stuff', status=404)
assert isinstance(resp, FakeResponse)
assert resp.status == 404
assert resp.output == 'stuff\nmore stuff'
assert resp.content_type == 'application/json'
def test_build_response_jsonp_with_json():
a = FakeAdaptor(params={'format': 'json', 'json_callback': 'test.func'})
a.parse_format(napi.StatusResult, 'text')
resp = a.build_response('{}')
assert isinstance(resp, FakeResponse)
assert resp.status == 200
assert resp.output == 'test.func({})'
assert resp.content_type == 'application/javascript'
def test_build_response_jsonp_without_json():
a = FakeAdaptor(params={'format': 'text', 'json_callback': 'test.func'})
a.parse_format(napi.StatusResult, 'text')
resp = a.build_response('{}')
assert isinstance(resp, FakeResponse)
assert resp.status == 200
assert resp.output == '{}'
assert resp.content_type == 'text/plain; charset=utf-8'
@pytest.mark.parametrize('param', ['alert(); func', '\\n', '', 'a b'])
def test_build_response_jsonp_bad_format(param):
a = FakeAdaptor(params={'format': 'json', 'json_callback': param})
a.parse_format(napi.StatusResult, 'text')
with pytest.raises(FakeError, match='^400 -- .*Invalid'):
a.build_response('{}')
# status_endpoint()
class TestStatusEndpoint:
@pytest.fixture(autouse=True)
def patch_status_func(self, monkeypatch):
async def _status(*args, **kwargs):
return self.status
monkeypatch.setattr(napi.NominatimAPIAsync, 'status', _status)
@pytest.mark.asyncio
async def test_status_without_params(self):
a = FakeAdaptor()
self.status = napi.StatusResult(0, 'foo')
resp = await glue.status_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert isinstance(resp, FakeResponse)
assert resp.status == 200
assert resp.content_type == 'text/plain; charset=utf-8'
@pytest.mark.asyncio
async def test_status_with_error(self):
a = FakeAdaptor()
self.status = napi.StatusResult(405, 'foo')
resp = await glue.status_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert isinstance(resp, FakeResponse)
assert resp.status == 500
assert resp.content_type == 'text/plain; charset=utf-8'
@pytest.mark.asyncio
async def test_status_json_with_error(self):
a = FakeAdaptor(params={'format': 'json'})
self.status = napi.StatusResult(405, 'foo')
resp = await glue.status_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert isinstance(resp, FakeResponse)
assert resp.status == 200
assert resp.content_type == 'application/json'
@pytest.mark.asyncio
async def test_status_bad_format(self):
a = FakeAdaptor(params={'format': 'foo'})
self.status = napi.StatusResult(0, 'foo')
with pytest.raises(FakeError):
await glue.status_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
# details_endpoint()
class TestDetailsEndpoint:
@pytest.fixture(autouse=True)
def patch_lookup_func(self, monkeypatch):
self.result = napi.DetailedResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0))
self.lookup_args = []
async def _lookup(*args, **kwargs):
self.lookup_args.extend(args[1:])
return self.result
monkeypatch.setattr(napi.NominatimAPIAsync, 'details', _lookup)
@pytest.mark.asyncio
async def test_details_no_params(self):
a = FakeAdaptor()
with pytest.raises(FakeError, match='^400 -- .*Missing'):
await glue.details_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
@pytest.mark.asyncio
async def test_details_by_place_id(self):
a = FakeAdaptor(params={'place_id': '4573'})
await glue.details_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert self.lookup_args[0].place_id == 4573
@pytest.mark.asyncio
async def test_details_by_osm_id(self):
a = FakeAdaptor(params={'osmtype': 'N', 'osmid': '45'})
await glue.details_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert self.lookup_args[0].osm_type == 'N'
assert self.lookup_args[0].osm_id == 45
assert self.lookup_args[0].osm_class is None
@pytest.mark.asyncio
async def test_details_with_debugging(self):
a = FakeAdaptor(params={'osmtype': 'N', 'osmid': '45', 'debug': '1'})
resp = await glue.details_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
content = ET.fromstring(resp.output)
assert resp.content_type == 'text/html; charset=utf-8'
assert content.tag == 'html'
@pytest.mark.asyncio
async def test_details_no_result(self):
a = FakeAdaptor(params={'place_id': '4573'})
self.result = None
with pytest.raises(FakeError, match='^404 -- .*found'):
await glue.details_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
# reverse_endpoint()
class TestReverseEndPoint:
@pytest.fixture(autouse=True)
def patch_reverse_func(self, monkeypatch):
self.result = napi.ReverseResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0))
async def _reverse(*args, **kwargs):
return self.result
monkeypatch.setattr(napi.NominatimAPIAsync, 'reverse', _reverse)
@pytest.mark.asyncio
@pytest.mark.parametrize('params', [{}, {'lat': '3.4'}, {'lon': '6.7'}])
async def test_reverse_no_params(self, params):
a = FakeAdaptor()
a.params = params
a.params['format'] = 'xml'
with pytest.raises(FakeError, match='^400 -- (?s:.*)missing'):
await glue.reverse_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
@pytest.mark.asyncio
@pytest.mark.parametrize('params', [{'lat': '45.6', 'lon': '4563'}])
async def test_reverse_success(self, params):
a = FakeAdaptor()
a.params = params
a.params['format'] = 'json'
res = await glue.reverse_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert res == ''
@pytest.mark.asyncio
async def test_reverse_success(self):
a = FakeAdaptor()
a.params['lat'] = '56.3'
a.params['lon'] = '6.8'
assert await glue.reverse_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
@pytest.mark.asyncio
async def test_reverse_from_search(self):
a = FakeAdaptor()
a.params['q'] = '34.6 2.56'
a.params['format'] = 'json'
res = await glue.search_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert len(json.loads(res.output)) == 1
# lookup_endpoint()
class TestLookupEndpoint:
@pytest.fixture(autouse=True)
def patch_lookup_func(self, monkeypatch):
self.results = [napi.SearchResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0))]
async def _lookup(*args, **kwargs):
return napi.SearchResults(self.results)
monkeypatch.setattr(napi.NominatimAPIAsync, 'lookup', _lookup)
@pytest.mark.asyncio
async def test_lookup_no_params(self):
a = FakeAdaptor()
a.params['format'] = 'json'
res = await glue.lookup_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert res.output == '[]'
@pytest.mark.asyncio
@pytest.mark.parametrize('param', ['w', 'bad', ''])
async def test_lookup_bad_params(self, param):
a = FakeAdaptor()
a.params['format'] = 'json'
a.params['osm_ids'] = f'W34,{param},N33333'
res = await glue.lookup_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert len(json.loads(res.output)) == 1
@pytest.mark.asyncio
@pytest.mark.parametrize('param', ['p234234', '4563'])
async def test_lookup_bad_osm_type(self, param):
a = FakeAdaptor()
a.params['format'] = 'json'
a.params['osm_ids'] = f'W34,{param},N33333'
res = await glue.lookup_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert len(json.loads(res.output)) == 1
@pytest.mark.asyncio
async def test_lookup_working(self):
a = FakeAdaptor()
a.params['format'] = 'json'
a.params['osm_ids'] = 'N23,W34'
res = await glue.lookup_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert len(json.loads(res.output)) == 1
# search_endpoint()
class TestSearchEndPointSearch:
@pytest.fixture(autouse=True)
def patch_lookup_func(self, monkeypatch):
self.results = [napi.SearchResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0))]
async def _search(*args, **kwargs):
return napi.SearchResults(self.results)
monkeypatch.setattr(napi.NominatimAPIAsync, 'search', _search)
@pytest.mark.asyncio
async def test_search_free_text(self):
a = FakeAdaptor()
a.params['q'] = 'something'
res = await glue.search_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert len(json.loads(res.output)) == 1
@pytest.mark.asyncio
async def test_search_free_text_xml(self):
a = FakeAdaptor()
a.params['q'] = 'something'
a.params['format'] = 'xml'
res = await glue.search_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert res.status == 200
assert res.output.index('something') > 0
@pytest.mark.asyncio
async def test_search_free_and_structured(self):
a = FakeAdaptor()
a.params['q'] = 'something'
a.params['city'] = 'ignored'
res = await glue.search_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert len(json.loads(res.output)) == 1
@pytest.mark.asyncio
@pytest.mark.parametrize('dedupe,numres', [(True, 1), (False, 2)])
async def test_search_dedupe(self, dedupe, numres):
self.results = self.results * 2
a = FakeAdaptor()
a.params['q'] = 'something'
if not dedupe:
a.params['dedupe'] = '0'
res = await glue.search_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert len(json.loads(res.output)) == numres
class TestSearchEndPointSearchAddress:
@pytest.fixture(autouse=True)
def patch_lookup_func(self, monkeypatch):
self.results = [napi.SearchResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0))]
async def _search(*args, **kwargs):
return napi.SearchResults(self.results)
monkeypatch.setattr(napi.NominatimAPIAsync, 'search_address', _search)
@pytest.mark.asyncio
async def test_search_structured(self):
a = FakeAdaptor()
a.params['street'] = 'something'
res = await glue.search_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert len(json.loads(res.output)) == 1
class TestSearchEndPointSearchCategory:
@pytest.fixture(autouse=True)
def patch_lookup_func(self, monkeypatch):
self.results = [napi.SearchResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0))]
async def _search(*args, **kwargs):
return napi.SearchResults(self.results)
monkeypatch.setattr(napi.NominatimAPIAsync, 'search_category', _search)
@pytest.mark.asyncio
async def test_search_category(self):
a = FakeAdaptor()
a.params['q'] = '[shop=fog]'
res = await glue.search_endpoint(napi.NominatimAPIAsync(Path('/invalid')), a)
assert len(json.loads(res.output)) == 1
| 18,533 | 29.284314 | 87 | py |
Nominatim | Nominatim-master/test/python/api/test_result_formatting_v1_reverse.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for formatting reverse results for the V1 API.
These test only ensure that the Python code is correct.
For functional tests see BDD test suite.
"""
import json
import xml.etree.ElementTree as ET
import pytest
import nominatim.api.v1 as api_impl
import nominatim.api as napi
FORMATS = ['json', 'jsonv2', 'geojson', 'geocodejson', 'xml']
@pytest.mark.parametrize('fmt', FORMATS)
def test_format_reverse_minimal(fmt):
reverse = napi.ReverseResult(napi.SourceTable.PLACEX,
('amenity', 'post_box'),
napi.Point(0.3, -8.9))
raw = api_impl.format_result(napi.ReverseResults([reverse]), fmt, {})
if fmt == 'xml':
root = ET.fromstring(raw)
assert root.tag == 'reversegeocode'
else:
result = json.loads(raw)
assert isinstance(result, dict)
@pytest.mark.parametrize('fmt', FORMATS)
def test_format_reverse_no_result(fmt):
raw = api_impl.format_result(napi.ReverseResults(), fmt, {})
if fmt == 'xml':
root = ET.fromstring(raw)
assert root.find('error').text == 'Unable to geocode'
else:
assert json.loads(raw) == {'error': 'Unable to geocode'}
@pytest.mark.parametrize('fmt', FORMATS)
def test_format_reverse_with_osm_id(fmt):
reverse = napi.ReverseResult(napi.SourceTable.PLACEX,
('amenity', 'post_box'),
napi.Point(0.3, -8.9),
place_id=5564,
osm_object=('N', 23))
raw = api_impl.format_result(napi.ReverseResults([reverse]), fmt, {})
if fmt == 'xml':
root = ET.fromstring(raw).find('result')
assert root.attrib['osm_type'] == 'node'
assert root.attrib['osm_id'] == '23'
else:
result = json.loads(raw)
if fmt == 'geocodejson':
props = result['features'][0]['properties']['geocoding']
elif fmt == 'geojson':
props = result['features'][0]['properties']
else:
props = result
assert props['osm_type'] == 'node'
assert props['osm_id'] == 23
@pytest.mark.parametrize('fmt', FORMATS)
def test_format_reverse_with_address(fmt):
reverse = napi.ReverseResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0),
country_code='fe',
address_rows=napi.AddressLines([
napi.AddressLine(place_id=None,
osm_object=None,
category=('place', 'county'),
names={'name': 'Hello'},
extratags=None,
admin_level=5,
fromarea=False,
isaddress=True,
rank_address=10,
distance=0.0),
napi.AddressLine(place_id=None,
osm_object=None,
category=('place', 'county'),
names={'name': 'ByeBye'},
extratags=None,
admin_level=5,
fromarea=False,
isaddress=False,
rank_address=10,
distance=0.0)
]))
reverse.localize(napi.Locales())
raw = api_impl.format_result(napi.ReverseResults([reverse]), fmt,
{'addressdetails': True})
if fmt == 'xml':
root = ET.fromstring(raw)
assert root.find('addressparts').find('county').text == 'Hello'
else:
result = json.loads(raw)
assert isinstance(result, dict)
if fmt == 'geocodejson':
props = result['features'][0]['properties']['geocoding']
assert 'admin' in props
assert props['county'] == 'Hello'
else:
if fmt == 'geojson':
props = result['features'][0]['properties']
else:
props = result
assert 'address' in props
def test_format_reverse_geocodejson_special_parts():
reverse = napi.ReverseResult(napi.SourceTable.PLACEX,
('place', 'house'),
napi.Point(1.0, 2.0),
place_id=33,
country_code='fe',
address_rows=napi.AddressLines([
napi.AddressLine(place_id=None,
osm_object=None,
category=('place', 'house_number'),
names={'ref': '1'},
extratags=None,
admin_level=15,
fromarea=False,
isaddress=True,
rank_address=10,
distance=0.0),
napi.AddressLine(place_id=None,
osm_object=None,
category=('place', 'postcode'),
names={'ref': '99446'},
extratags=None,
admin_level=11,
fromarea=False,
isaddress=True,
rank_address=10,
distance=0.0),
napi.AddressLine(place_id=33,
osm_object=None,
category=('place', 'county'),
names={'name': 'Hello'},
extratags=None,
admin_level=5,
fromarea=False,
isaddress=True,
rank_address=10,
distance=0.0)
]))
reverse.localize(napi.Locales())
raw = api_impl.format_result(napi.ReverseResults([reverse]), 'geocodejson',
{'addressdetails': True})
props = json.loads(raw)['features'][0]['properties']['geocoding']
assert props['housenumber'] == '1'
assert props['postcode'] == '99446'
assert 'county' not in props
@pytest.mark.parametrize('fmt', FORMATS)
def test_format_reverse_with_address_none(fmt):
reverse = napi.ReverseResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0),
address_rows=napi.AddressLines())
raw = api_impl.format_result(napi.ReverseResults([reverse]), fmt,
{'addressdetails': True})
if fmt == 'xml':
root = ET.fromstring(raw)
assert root.find('addressparts') is None
else:
result = json.loads(raw)
assert isinstance(result, dict)
if fmt == 'geocodejson':
props = result['features'][0]['properties']['geocoding']
print(props)
assert 'admin' in props
else:
if fmt == 'geojson':
props = result['features'][0]['properties']
else:
props = result
assert 'address' in props
@pytest.mark.parametrize('fmt', ['json', 'jsonv2', 'geojson', 'xml'])
def test_format_reverse_with_extratags(fmt):
reverse = napi.ReverseResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0),
extratags={'one': 'A', 'two':'B'})
raw = api_impl.format_result(napi.ReverseResults([reverse]), fmt,
{'extratags': True})
if fmt == 'xml':
root = ET.fromstring(raw)
assert root.find('extratags').find('tag').attrib['key'] == 'one'
else:
result = json.loads(raw)
if fmt == 'geojson':
extra = result['features'][0]['properties']['extratags']
else:
extra = result['extratags']
assert extra == {'one': 'A', 'two':'B'}
@pytest.mark.parametrize('fmt', ['json', 'jsonv2', 'geojson', 'xml'])
def test_format_reverse_with_extratags_none(fmt):
reverse = napi.ReverseResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0))
raw = api_impl.format_result(napi.ReverseResults([reverse]), fmt,
{'extratags': True})
if fmt == 'xml':
root = ET.fromstring(raw)
assert root.find('extratags') is not None
else:
result = json.loads(raw)
if fmt == 'geojson':
extra = result['features'][0]['properties']['extratags']
else:
extra = result['extratags']
assert extra is None
@pytest.mark.parametrize('fmt', ['json', 'jsonv2', 'geojson', 'xml'])
def test_format_reverse_with_namedetails_with_name(fmt):
reverse = napi.ReverseResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0),
names={'name': 'A', 'ref':'1'})
raw = api_impl.format_result(napi.ReverseResults([reverse]), fmt,
{'namedetails': True})
if fmt == 'xml':
root = ET.fromstring(raw)
assert root.find('namedetails').find('name').text == 'A'
else:
result = json.loads(raw)
if fmt == 'geojson':
extra = result['features'][0]['properties']['namedetails']
else:
extra = result['namedetails']
assert extra == {'name': 'A', 'ref':'1'}
@pytest.mark.parametrize('fmt', ['json', 'jsonv2', 'geojson', 'xml'])
def test_format_reverse_with_namedetails_without_name(fmt):
reverse = napi.ReverseResult(napi.SourceTable.PLACEX,
('place', 'thing'),
napi.Point(1.0, 2.0))
raw = api_impl.format_result(napi.ReverseResults([reverse]), fmt,
{'namedetails': True})
if fmt == 'xml':
root = ET.fromstring(raw)
assert root.find('namedetails') is not None
else:
result = json.loads(raw)
if fmt == 'geojson':
extra = result['features'][0]['properties']['namedetails']
else:
extra = result['namedetails']
assert extra is None
@pytest.mark.parametrize('fmt', ['json', 'jsonv2'])
def test_search_details_with_icon_available(fmt):
reverse = napi.ReverseResult(napi.SourceTable.PLACEX,
('amenity', 'restaurant'),
napi.Point(1.0, 2.0))
result = api_impl.format_result(napi.ReverseResults([reverse]), fmt,
{'icon_base_url': 'foo'})
js = json.loads(result)
assert js['icon'] == 'foo/food_restaurant.p.20.png'
@pytest.mark.parametrize('fmt', ['json', 'jsonv2'])
def test_search_details_with_icon_not_available(fmt):
reverse = napi.ReverseResult(napi.SourceTable.PLACEX,
('amenity', 'tree'),
napi.Point(1.0, 2.0))
result = api_impl.format_result(napi.ReverseResults([reverse]), fmt,
{'icon_base_url': 'foo'})
assert 'icon' not in json.loads(result)
| 12,990 | 39.095679 | 87 | py |
Nominatim | Nominatim-master/test/python/api/test_api_status.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for the status API call.
"""
from pathlib import Path
import datetime as dt
import pytest
from nominatim.version import NOMINATIM_VERSION, NominatimVersion
import nominatim.api as napi
def test_status_no_extra_info(apiobj):
result = apiobj.api.status()
assert result.status == 0
assert result.message == 'OK'
assert result.software_version == NOMINATIM_VERSION
assert result.database_version is None
assert result.data_updated is None
def test_status_full(apiobj):
import_date = dt.datetime(2022, 12, 7, 14, 14, 46, 0, tzinfo=dt.timezone.utc)
apiobj.add_data('import_status',
[{'lastimportdate': import_date}])
apiobj.add_data('properties',
[{'property': 'database_version', 'value': '99.5.4-2'}])
result = apiobj.api.status()
assert result.status == 0
assert result.message == 'OK'
assert result.software_version == NOMINATIM_VERSION
assert result.database_version == NominatimVersion(99, 5, 4, 2)
assert result.data_updated == import_date
def test_status_database_not_found(monkeypatch):
monkeypatch.setenv('NOMINATIM_DATABASE_DSN', 'dbname=rgjdfkgjedkrgdfkngdfkg')
api = napi.NominatimAPI(Path('/invalid'), {})
result = api.status()
assert result.status == 700
assert result.message == 'Database connection failed'
assert result.software_version == NOMINATIM_VERSION
assert result.database_version is None
assert result.data_updated is None
| 1,705 | 30.018182 | 81 | py |
Nominatim | Nominatim-master/test/python/api/test_api_types.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for loading of parameter dataclasses.
"""
import pytest
from nominatim.errors import UsageError
import nominatim.api.types as typ
def test_no_params_defaults():
params = typ.LookupDetails.from_kwargs({})
assert not params.parented_places
assert params.geometry_simplification == 0.0
@pytest.mark.parametrize('k,v', [('geometry_output', 'a'),
('linked_places', 0),
('geometry_simplification', 'NaN')])
def test_bad_format_reverse(k, v):
with pytest.raises(UsageError):
params = typ.ReverseDetails.from_kwargs({k: v})
@pytest.mark.parametrize('rin,rout', [(-23, 0), (0, 0), (1, 1),
(15, 15), (30, 30), (31, 30)])
def test_rank_params(rin, rout):
params = typ.ReverseDetails.from_kwargs({'max_rank': rin})
assert params.max_rank == rout
| 1,100 | 29.583333 | 69 | py |
Nominatim | Nominatim-master/test/python/api/test_api_reverse.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for reverse API call.
These tests make sure that all Python code is correct and executable.
Functional tests can be found in the BDD test suite.
"""
import json
import pytest
import nominatim.api as napi
def test_reverse_rank_30(apiobj):
apiobj.add_placex(place_id=223, class_='place', type='house',
housenumber='1',
centroid=(1.3, 0.7),
geometry='POINT(1.3 0.7)')
result = apiobj.api.reverse((1.3, 0.7))
assert result is not None
assert result.place_id == 223
@pytest.mark.parametrize('country', ['de', 'us'])
def test_reverse_street(apiobj, country):
apiobj.add_placex(place_id=990, class_='highway', type='service',
rank_search=27, rank_address=27,
name = {'name': 'My Street'},
centroid=(10.0, 10.0),
country_code=country,
geometry='LINESTRING(9.995 10, 10.005 10)')
assert apiobj.api.reverse((9.995, 10)).place_id == 990
def test_reverse_ignore_unindexed(apiobj):
apiobj.add_placex(place_id=223, class_='place', type='house',
housenumber='1',
indexed_status=2,
centroid=(1.3, 0.7),
geometry='POINT(1.3 0.7)')
result = apiobj.api.reverse((1.3, 0.7))
assert result is None
@pytest.mark.parametrize('y,layer,place_id', [(0.7, napi.DataLayer.ADDRESS, 223),
(0.70001, napi.DataLayer.POI, 224),
(0.7, napi.DataLayer.ADDRESS | napi.DataLayer.POI, 224),
(0.70001, napi.DataLayer.ADDRESS | napi.DataLayer.POI, 223),
(0.7, napi.DataLayer.MANMADE, 225),
(0.7, napi.DataLayer.RAILWAY, 226),
(0.7, napi.DataLayer.NATURAL, 227),
(0.70003, napi.DataLayer.MANMADE | napi.DataLayer.RAILWAY, 225),
(0.70003, napi.DataLayer.MANMADE | napi.DataLayer.NATURAL, 225)])
def test_reverse_rank_30_layers(apiobj, y, layer, place_id):
apiobj.add_placex(place_id=223, class_='place', type='house',
housenumber='1',
rank_address=30,
rank_search=30,
centroid=(1.3, 0.70001))
apiobj.add_placex(place_id=224, class_='amenity', type='toilet',
rank_address=30,
rank_search=30,
centroid=(1.3, 0.7))
apiobj.add_placex(place_id=225, class_='man_made', type='tower',
rank_address=0,
rank_search=30,
centroid=(1.3, 0.70003))
apiobj.add_placex(place_id=226, class_='railway', type='station',
rank_address=0,
rank_search=30,
centroid=(1.3, 0.70004))
apiobj.add_placex(place_id=227, class_='natural', type='cave',
rank_address=0,
rank_search=30,
centroid=(1.3, 0.70005))
assert apiobj.api.reverse((1.3, y), layers=layer).place_id == place_id
def test_reverse_poi_layer_with_no_pois(apiobj):
apiobj.add_placex(place_id=223, class_='place', type='house',
housenumber='1',
rank_address=30,
rank_search=30,
centroid=(1.3, 0.70001))
assert apiobj.api.reverse((1.3, 0.70001), max_rank=29,
layers=napi.DataLayer.POI) is None
def test_reverse_housenumber_on_street(apiobj):
apiobj.add_placex(place_id=990, class_='highway', type='service',
rank_search=27, rank_address=27,
name = {'name': 'My Street'},
centroid=(10.0, 10.0),
geometry='LINESTRING(9.995 10, 10.005 10)')
apiobj.add_placex(place_id=991, class_='place', type='house',
parent_place_id=990,
rank_search=30, rank_address=30,
housenumber='23',
centroid=(10.0, 10.00001))
assert apiobj.api.reverse((10.0, 10.0), max_rank=30).place_id == 991
assert apiobj.api.reverse((10.0, 10.0), max_rank=27).place_id == 990
assert apiobj.api.reverse((10.0, 10.00001), max_rank=30).place_id == 991
def test_reverse_housenumber_interpolation(apiobj):
apiobj.add_placex(place_id=990, class_='highway', type='service',
rank_search=27, rank_address=27,
name = {'name': 'My Street'},
centroid=(10.0, 10.0),
geometry='LINESTRING(9.995 10, 10.005 10)')
apiobj.add_placex(place_id=991, class_='place', type='house',
parent_place_id=990,
rank_search=30, rank_address=30,
housenumber='23',
centroid=(10.0, 10.00002))
apiobj.add_osmline(place_id=992,
parent_place_id=990,
startnumber=1, endnumber=3, step=1,
centroid=(10.0, 10.00001),
geometry='LINESTRING(9.995 10.00001, 10.005 10.00001)')
assert apiobj.api.reverse((10.0, 10.0)).place_id == 992
def test_reverse_housenumber_point_interpolation(apiobj):
apiobj.add_placex(place_id=990, class_='highway', type='service',
rank_search=27, rank_address=27,
name = {'name': 'My Street'},
centroid=(10.0, 10.0),
geometry='LINESTRING(9.995 10, 10.005 10)')
apiobj.add_osmline(place_id=992,
parent_place_id=990,
startnumber=42, endnumber=42, step=1,
centroid=(10.0, 10.00001),
geometry='POINT(10.0 10.00001)')
res = apiobj.api.reverse((10.0, 10.0))
assert res.place_id == 992
assert res.housenumber == '42'
def test_reverse_tiger_number(apiobj):
apiobj.add_placex(place_id=990, class_='highway', type='service',
rank_search=27, rank_address=27,
name = {'name': 'My Street'},
centroid=(10.0, 10.0),
country_code='us',
geometry='LINESTRING(9.995 10, 10.005 10)')
apiobj.add_tiger(place_id=992,
parent_place_id=990,
startnumber=1, endnumber=3, step=1,
centroid=(10.0, 10.00001),
geometry='LINESTRING(9.995 10.00001, 10.005 10.00001)')
assert apiobj.api.reverse((10.0, 10.0)).place_id == 992
assert apiobj.api.reverse((10.0, 10.00001)).place_id == 992
def test_reverse_point_tiger(apiobj):
apiobj.add_placex(place_id=990, class_='highway', type='service',
rank_search=27, rank_address=27,
name = {'name': 'My Street'},
centroid=(10.0, 10.0),
country_code='us',
geometry='LINESTRING(9.995 10, 10.005 10)')
apiobj.add_tiger(place_id=992,
parent_place_id=990,
startnumber=1, endnumber=1, step=1,
centroid=(10.0, 10.00001),
geometry='POINT(10.0 10.00001)')
res = apiobj.api.reverse((10.0, 10.0))
assert res.place_id == 992
assert res.housenumber == '1'
def test_reverse_low_zoom_address(apiobj):
apiobj.add_placex(place_id=1001, class_='place', type='house',
housenumber='1',
rank_address=30,
rank_search=30,
centroid=(59.3, 80.70001))
apiobj.add_placex(place_id=1002, class_='place', type='town',
name={'name': 'Town'},
rank_address=16,
rank_search=16,
centroid=(59.3, 80.70001),
geometry="""POLYGON((59.3 80.70001, 59.3001 80.70001,
59.3001 80.70101, 59.3 80.70101, 59.3 80.70001))""")
assert apiobj.api.reverse((59.30005, 80.7005)).place_id == 1001
assert apiobj.api.reverse((59.30005, 80.7005), max_rank=18).place_id == 1002
def test_reverse_place_node_in_area(apiobj):
apiobj.add_placex(place_id=1002, class_='place', type='town',
name={'name': 'Town Area'},
rank_address=16,
rank_search=16,
centroid=(59.3, 80.70001),
geometry="""POLYGON((59.3 80.70001, 59.3001 80.70001,
59.3001 80.70101, 59.3 80.70101, 59.3 80.70001))""")
apiobj.add_placex(place_id=1003, class_='place', type='suburb',
name={'name': 'Suburb Point'},
osm_type='N',
rank_address=18,
rank_search=18,
centroid=(59.30004, 80.70055))
assert apiobj.api.reverse((59.30004, 80.70055)).place_id == 1003
@pytest.mark.parametrize('layer,place_id', [(napi.DataLayer.MANMADE, 225),
(napi.DataLayer.RAILWAY, 226),
(napi.DataLayer.NATURAL, 227),
(napi.DataLayer.MANMADE | napi.DataLayer.RAILWAY, 225),
(napi.DataLayer.MANMADE | napi.DataLayer.NATURAL, 225)])
def test_reverse_larger_area_layers(apiobj, layer, place_id):
apiobj.add_placex(place_id=225, class_='man_made', type='dam',
name={'name': 'Dam'},
rank_address=0,
rank_search=25,
centroid=(1.3, 0.70003))
apiobj.add_placex(place_id=226, class_='railway', type='yard',
name={'name': 'Dam'},
rank_address=0,
rank_search=20,
centroid=(1.3, 0.70004))
apiobj.add_placex(place_id=227, class_='natural', type='spring',
name={'name': 'Dam'},
rank_address=0,
rank_search=16,
centroid=(1.3, 0.70005))
assert apiobj.api.reverse((1.3, 0.7), layers=layer).place_id == place_id
def test_reverse_country_lookup_no_objects(apiobj):
apiobj.add_country('xx', 'POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))')
assert apiobj.api.reverse((0.5, 0.5)) is None
@pytest.mark.parametrize('rank', [4, 30])
def test_reverse_country_lookup_country_only(apiobj, rank):
apiobj.add_country('xx', 'POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))')
apiobj.add_placex(place_id=225, class_='place', type='country',
name={'name': 'My Country'},
rank_address=4,
rank_search=4,
country_code='xx',
centroid=(0.7, 0.7))
assert apiobj.api.reverse((0.5, 0.5), max_rank=rank).place_id == 225
def test_reverse_country_lookup_place_node_inside(apiobj):
apiobj.add_country('xx', 'POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))')
apiobj.add_placex(place_id=225, class_='place', type='state',
osm_type='N',
name={'name': 'My State'},
rank_address=6,
rank_search=6,
country_code='xx',
centroid=(0.5, 0.505))
assert apiobj.api.reverse((0.5, 0.5)).place_id == 225
@pytest.mark.parametrize('gtype', list(napi.GeometryFormat))
def test_reverse_geometry_output_placex(apiobj, gtype):
apiobj.add_country('xx', 'POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))')
apiobj.add_placex(place_id=1001, class_='place', type='house',
housenumber='1',
rank_address=30,
rank_search=30,
centroid=(59.3, 80.70001))
apiobj.add_placex(place_id=1003, class_='place', type='suburb',
name={'name': 'Suburb Point'},
osm_type='N',
rank_address=18,
rank_search=18,
country_code='xx',
centroid=(0.5, 0.5))
assert apiobj.api.reverse((59.3, 80.70001), geometry_output=gtype).place_id == 1001
assert apiobj.api.reverse((0.5, 0.5), geometry_output=gtype).place_id == 1003
def test_reverse_simplified_geometry(apiobj):
apiobj.add_placex(place_id=1001, class_='place', type='house',
housenumber='1',
rank_address=30,
rank_search=30,
centroid=(59.3, 80.70001))
details = dict(geometry_output=napi.GeometryFormat.GEOJSON,
geometry_simplification=0.1)
assert apiobj.api.reverse((59.3, 80.70001), **details).place_id == 1001
def test_reverse_interpolation_geometry(apiobj):
apiobj.add_osmline(place_id=992,
parent_place_id=990,
startnumber=1, endnumber=3, step=1,
centroid=(10.0, 10.00001),
geometry='LINESTRING(9.995 10.00001, 10.005 10.00001)')
assert apiobj.api.reverse((10.0, 10.0), geometry_output=napi.GeometryFormat.TEXT)\
.geometry['text'] == 'POINT(10 10.00001)'
def test_reverse_tiger_geometry(apiobj):
apiobj.add_placex(place_id=990, class_='highway', type='service',
rank_search=27, rank_address=27,
name = {'name': 'My Street'},
centroid=(10.0, 10.0),
country_code='us',
geometry='LINESTRING(9.995 10, 10.005 10)')
apiobj.add_tiger(place_id=992,
parent_place_id=990,
startnumber=1, endnumber=3, step=1,
centroid=(10.0, 10.00001),
geometry='LINESTRING(9.995 10.00001, 10.005 10.00001)')
output = apiobj.api.reverse((10.0, 10.0),
geometry_output=napi.GeometryFormat.GEOJSON).geometry['geojson']
assert json.loads(output) == {'coordinates': [10, 10.00001], 'type': 'Point'}
| 14,724 | 41.805233 | 111 | py |
Nominatim | Nominatim-master/test/python/api/test_localization.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Test functions for adapting results to the user's locale.
"""
import pytest
from nominatim.api import Locales
def test_display_name_empty_names():
l = Locales(['en', 'de'])
assert l.display_name(None) == ''
assert l.display_name({}) == ''
def test_display_name_none_localized():
l = Locales()
assert l.display_name({}) == ''
assert l.display_name({'name:de': 'DE', 'name': 'ALL'}) == 'ALL'
assert l.display_name({'ref': '34', 'name:de': 'DE'}) == '34'
def test_display_name_localized():
l = Locales(['en', 'de'])
assert l.display_name({}) == ''
assert l.display_name({'name:de': 'DE', 'name': 'ALL'}) == 'DE'
assert l.display_name({'ref': '34', 'name:de': 'DE'}) == 'DE'
def test_display_name_preference():
l = Locales(['en', 'de'])
assert l.display_name({}) == ''
assert l.display_name({'name:de': 'DE', 'name:en': 'EN'}) == 'EN'
assert l.display_name({'official_name:en': 'EN', 'name:de': 'DE'}) == 'DE'
@pytest.mark.parametrize('langstr,langlist',
[('fr', ['fr']),
('fr-FR', ['fr-FR', 'fr']),
('de,fr-FR', ['de', 'fr-FR', 'fr']),
('fr,de,fr-FR', ['fr', 'de', 'fr-FR']),
('en;q=0.5,fr', ['fr', 'en']),
('en;q=0.5,fr,en-US', ['fr', 'en-US', 'en']),
('en,fr;garbage,de', ['en', 'de'])])
def test_from_language_preferences(langstr, langlist):
assert Locales.from_accept_languages(langstr).languages == langlist
| 1,783 | 32.037037 | 78 | py |
Nominatim | Nominatim-master/test/python/api/test_api_search.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for search API calls.
These tests make sure that all Python code is correct and executable.
Functional tests can be found in the BDD test suite.
"""
import json
import pytest
import sqlalchemy as sa
import nominatim.api as napi
import nominatim.api.logging as loglib
@pytest.fixture(autouse=True)
def setup_icu_tokenizer(apiobj):
""" Setup the propoerties needed for using the ICU tokenizer.
"""
apiobj.add_data('properties',
[{'property': 'tokenizer', 'value': 'icu'},
{'property': 'tokenizer_import_normalisation', 'value': ':: lower();'},
{'property': 'tokenizer_import_transliteration', 'value': "'1' > '/1/'; 'ä' > 'ä '"},
])
def test_search_no_content(apiobj, table_factory):
table_factory('word',
definition='word_id INT, word_token TEXT, type TEXT, word TEXT, info JSONB')
assert apiobj.api.search('foo') == []
def test_search_simple_word(apiobj, table_factory):
table_factory('word',
definition='word_id INT, word_token TEXT, type TEXT, word TEXT, info JSONB',
content=[(55, 'test', 'W', 'test', None),
(2, 'test', 'w', 'test', None)])
apiobj.add_placex(place_id=444, class_='place', type='village',
centroid=(1.3, 0.7))
apiobj.add_search_name(444, names=[2, 55])
results = apiobj.api.search('TEST')
assert [r.place_id for r in results] == [444]
@pytest.mark.parametrize('logtype', ['text', 'html'])
def test_search_with_debug(apiobj, table_factory, logtype):
table_factory('word',
definition='word_id INT, word_token TEXT, type TEXT, word TEXT, info JSONB',
content=[(55, 'test', 'W', 'test', None),
(2, 'test', 'w', 'test', None)])
apiobj.add_placex(place_id=444, class_='place', type='village',
centroid=(1.3, 0.7))
apiobj.add_search_name(444, names=[2, 55])
loglib.set_log_output(logtype)
results = apiobj.api.search('TEST')
assert loglib.get_and_disable()
def test_address_no_content(apiobj, table_factory):
table_factory('word',
definition='word_id INT, word_token TEXT, type TEXT, word TEXT, info JSONB')
assert apiobj.api.search_address(amenity='hotel',
street='Main St 34',
city='Happyville',
county='Wideland',
state='Praerie',
postalcode='55648',
country='xx') == []
@pytest.mark.parametrize('atype,address,search', [('street', 26, 26),
('city', 16, 18),
('county', 12, 12),
('state', 8, 8)])
def test_address_simple_places(apiobj, table_factory, atype, address, search):
table_factory('word',
definition='word_id INT, word_token TEXT, type TEXT, word TEXT, info JSONB',
content=[(55, 'test', 'W', 'test', None),
(2, 'test', 'w', 'test', None)])
apiobj.add_placex(place_id=444,
rank_address=address, rank_search=search,
centroid=(1.3, 0.7))
apiobj.add_search_name(444, names=[2, 55], address_rank=address, search_rank=search)
results = apiobj.api.search_address(**{atype: 'TEST'})
assert [r.place_id for r in results] == [444]
def test_address_country(apiobj, table_factory):
table_factory('word',
definition='word_id INT, word_token TEXT, type TEXT, word TEXT, info JSONB',
content=[(None, 'ro', 'C', 'ro', None)])
apiobj.add_country('ro', 'POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))')
apiobj.add_country_name('ro', {'name': 'România'})
assert len(apiobj.api.search_address(country='ro')) == 1
def test_category_no_categories(apiobj, table_factory):
table_factory('word',
definition='word_id INT, word_token TEXT, type TEXT, word TEXT, info JSONB')
assert apiobj.api.search_category([], near_query='Berlin') == []
def test_category_no_content(apiobj, table_factory):
table_factory('word',
definition='word_id INT, word_token TEXT, type TEXT, word TEXT, info JSONB')
assert apiobj.api.search_category([('amenity', 'restaurant')]) == []
def test_category_simple_restaurant(apiobj, table_factory):
table_factory('word',
definition='word_id INT, word_token TEXT, type TEXT, word TEXT, info JSONB')
apiobj.add_placex(place_id=444, class_='amenity', type='restaurant',
centroid=(1.3, 0.7))
apiobj.add_search_name(444, names=[2, 55], address_rank=16, search_rank=18)
results = apiobj.api.search_category([('amenity', 'restaurant')],
near=(1.3, 0.701), near_radius=0.015)
assert [r.place_id for r in results] == [444]
def test_category_with_search_phrase(apiobj, table_factory):
table_factory('word',
definition='word_id INT, word_token TEXT, type TEXT, word TEXT, info JSONB',
content=[(55, 'test', 'W', 'test', None),
(2, 'test', 'w', 'test', None)])
apiobj.add_placex(place_id=444, class_='place', type='village',
rank_address=16, rank_search=18,
centroid=(1.3, 0.7))
apiobj.add_search_name(444, names=[2, 55], address_rank=16, search_rank=18)
apiobj.add_placex(place_id=95, class_='amenity', type='restaurant',
centroid=(1.3, 0.7003))
results = apiobj.api.search_category([('amenity', 'restaurant')],
near_query='TEST')
assert [r.place_id for r in results] == [95]
| 6,198 | 37.74375 | 106 | py |
Nominatim | Nominatim-master/test/python/api/test_api_connection.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for enhanced connection class for API functions.
"""
from pathlib import Path
import pytest
import pytest_asyncio
import sqlalchemy as sa
from nominatim.api import NominatimAPIAsync
@pytest_asyncio.fixture
async def apiobj(temp_db):
""" Create an asynchronous SQLAlchemy engine for the test DB.
"""
api = NominatimAPIAsync(Path('/invalid'), {})
yield api
await api.close()
@pytest.mark.asyncio
async def test_run_scalar(apiobj, table_factory):
table_factory('foo', definition='that TEXT', content=(('a', ),))
async with apiobj.begin() as conn:
assert await conn.scalar(sa.text('SELECT * FROM foo')) == 'a'
@pytest.mark.asyncio
async def test_run_execute(apiobj, table_factory):
table_factory('foo', definition='that TEXT', content=(('a', ),))
async with apiobj.begin() as conn:
result = await conn.execute(sa.text('SELECT * FROM foo'))
assert result.fetchone()[0] == 'a'
@pytest.mark.asyncio
async def test_get_property_existing_cached(apiobj, table_factory):
table_factory('nominatim_properties',
definition='property TEXT, value TEXT',
content=(('dbv', '96723'), ))
async with apiobj.begin() as conn:
assert await conn.get_property('dbv') == '96723'
await conn.execute(sa.text('TRUNCATE nominatim_properties'))
assert await conn.get_property('dbv') == '96723'
@pytest.mark.asyncio
async def test_get_property_existing_uncached(apiobj, table_factory):
table_factory('nominatim_properties',
definition='property TEXT, value TEXT',
content=(('dbv', '96723'), ))
async with apiobj.begin() as conn:
assert await conn.get_property('dbv') == '96723'
await conn.execute(sa.text("UPDATE nominatim_properties SET value = '1'"))
assert await conn.get_property('dbv', cached=False) == '1'
@pytest.mark.asyncio
@pytest.mark.parametrize('param', ['foo', 'DB:server_version'])
async def test_get_property_missing(apiobj, table_factory, param):
table_factory('nominatim_properties',
definition='property TEXT, value TEXT')
async with apiobj.begin() as conn:
with pytest.raises(ValueError):
await conn.get_property(param)
@pytest.mark.asyncio
async def test_get_db_property_existing(apiobj):
async with apiobj.begin() as conn:
assert await conn.get_db_property('server_version') > 0
@pytest.mark.asyncio
async def test_get_db_property_existing(apiobj):
async with apiobj.begin() as conn:
with pytest.raises(ValueError):
await conn.get_db_property('dfkgjd.rijg')
| 2,872 | 29.56383 | 82 | py |
Nominatim | Nominatim-master/test/python/api/test_api_details.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for details API call.
"""
import datetime as dt
import pytest
import nominatim.api as napi
@pytest.mark.parametrize('idobj', (napi.PlaceID(332), napi.OsmID('W', 4),
napi.OsmID('W', 4, 'highway')))
def test_lookup_in_placex(apiobj, idobj):
import_date = dt.datetime(2022, 12, 7, 14, 14, 46, 0)
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential',
name={'name': 'Road'}, address={'city': 'Barrow'},
extratags={'surface': 'paved'},
parent_place_id=34, linked_place_id=55,
admin_level=15, country_code='gb',
housenumber='4',
postcode='34425', wikipedia='en:Faa',
rank_search=27, rank_address=26,
importance=0.01,
centroid=(23, 34),
indexed_date=import_date,
geometry='LINESTRING(23 34, 23.1 34, 23.1 34.1, 23 34)')
result = apiobj.api.details(idobj)
assert result is not None
assert result.source_table.name == 'PLACEX'
assert result.category == ('highway', 'residential')
assert result.centroid == (pytest.approx(23.0), pytest.approx(34.0))
assert result.place_id == 332
assert result.parent_place_id == 34
assert result.linked_place_id == 55
assert result.osm_object == ('W', 4)
assert result.admin_level == 15
assert result.names == {'name': 'Road'}
assert result.address == {'city': 'Barrow'}
assert result.extratags == {'surface': 'paved'}
assert result.housenumber == '4'
assert result.postcode == '34425'
assert result.wikipedia == 'en:Faa'
assert result.rank_search == 27
assert result.rank_address == 26
assert result.importance == pytest.approx(0.01)
assert result.country_code == 'gb'
assert result.indexed_date == import_date.replace(tzinfo=dt.timezone.utc)
assert result.address_rows is None
assert result.linked_rows is None
assert result.parented_rows is None
assert result.name_keywords is None
assert result.address_keywords is None
assert result.geometry == {'type': 'ST_LineString'}
def test_lookup_in_placex_minimal_info(apiobj):
import_date = dt.datetime(2022, 12, 7, 14, 14, 46, 0)
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential',
admin_level=15,
rank_search=27, rank_address=26,
centroid=(23, 34),
indexed_date=import_date,
geometry='LINESTRING(23 34, 23.1 34, 23.1 34.1, 23 34)')
result = apiobj.api.details(napi.PlaceID(332))
assert result is not None
assert result.source_table.name == 'PLACEX'
assert result.category == ('highway', 'residential')
assert result.centroid == (pytest.approx(23.0), pytest.approx(34.0))
assert result.place_id == 332
assert result.parent_place_id is None
assert result.linked_place_id is None
assert result.osm_object == ('W', 4)
assert result.admin_level == 15
assert result.names is None
assert result.address is None
assert result.extratags is None
assert result.housenumber is None
assert result.postcode is None
assert result.wikipedia is None
assert result.rank_search == 27
assert result.rank_address == 26
assert result.importance is None
assert result.country_code is None
assert result.indexed_date == import_date.replace(tzinfo=dt.timezone.utc)
assert result.address_rows is None
assert result.linked_rows is None
assert result.parented_rows is None
assert result.name_keywords is None
assert result.address_keywords is None
assert result.geometry == {'type': 'ST_LineString'}
def test_lookup_in_placex_with_geometry(apiobj):
apiobj.add_placex(place_id=332,
geometry='LINESTRING(23 34, 23.1 34)')
result = apiobj.api.details(napi.PlaceID(332), geometry_output=napi.GeometryFormat.GEOJSON)
assert result.geometry == {'geojson': '{"type":"LineString","coordinates":[[23,34],[23.1,34]]}'}
def test_lookup_placex_with_address_details(apiobj):
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', name='Street',
country_code='pl',
rank_search=27, rank_address=26)
apiobj.add_address_placex(332, fromarea=False, isaddress=False,
distance=0.0034,
place_id=1000, osm_type='N', osm_id=3333,
class_='place', type='suburb', name='Smallplace',
country_code='pl', admin_level=13,
rank_search=24, rank_address=23)
apiobj.add_address_placex(332, fromarea=True, isaddress=True,
place_id=1001, osm_type='N', osm_id=3334,
class_='place', type='city', name='Bigplace',
country_code='pl',
rank_search=17, rank_address=16)
result = apiobj.api.details(napi.PlaceID(332), address_details=True)
assert result.address_rows == [
napi.AddressLine(place_id=332, osm_object=('W', 4),
category=('highway', 'residential'),
names={'name': 'Street'}, extratags={},
admin_level=15, fromarea=True, isaddress=True,
rank_address=26, distance=0.0),
napi.AddressLine(place_id=1000, osm_object=('N', 3333),
category=('place', 'suburb'),
names={'name': 'Smallplace'}, extratags={},
admin_level=13, fromarea=False, isaddress=True,
rank_address=23, distance=0.0034),
napi.AddressLine(place_id=1001, osm_object=('N', 3334),
category=('place', 'city'),
names={'name': 'Bigplace'}, extratags={},
admin_level=15, fromarea=True, isaddress=True,
rank_address=16, distance=0.0),
napi.AddressLine(place_id=None, osm_object=None,
category=('place', 'country_code'),
names={'ref': 'pl'}, extratags={},
admin_level=None, fromarea=True, isaddress=False,
rank_address=4, distance=0.0)
]
def test_lookup_place_with_linked_places_none_existing(apiobj):
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', name='Street',
country_code='pl', linked_place_id=45,
rank_search=27, rank_address=26)
result = apiobj.api.details(napi.PlaceID(332), linked_places=True)
assert result.linked_rows == []
def test_lookup_place_with_linked_places_existing(apiobj):
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', name='Street',
country_code='pl', linked_place_id=45,
rank_search=27, rank_address=26)
apiobj.add_placex(place_id=1001, osm_type='W', osm_id=5,
class_='highway', type='residential', name='Street',
country_code='pl', linked_place_id=332,
rank_search=27, rank_address=26)
apiobj.add_placex(place_id=1002, osm_type='W', osm_id=6,
class_='highway', type='residential', name='Street',
country_code='pl', linked_place_id=332,
rank_search=27, rank_address=26)
result = apiobj.api.details(napi.PlaceID(332), linked_places=True)
assert result.linked_rows == [
napi.AddressLine(place_id=1001, osm_object=('W', 5),
category=('highway', 'residential'),
names={'name': 'Street'}, extratags={},
admin_level=15, fromarea=False, isaddress=True,
rank_address=26, distance=0.0),
napi.AddressLine(place_id=1002, osm_object=('W', 6),
category=('highway', 'residential'),
names={'name': 'Street'}, extratags={},
admin_level=15, fromarea=False, isaddress=True,
rank_address=26, distance=0.0),
]
def test_lookup_place_with_parented_places_not_existing(apiobj):
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', name='Street',
country_code='pl', parent_place_id=45,
rank_search=27, rank_address=26)
result = apiobj.api.details(napi.PlaceID(332), parented_places=True)
assert result.parented_rows == []
def test_lookup_place_with_parented_places_existing(apiobj):
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', name='Street',
country_code='pl', parent_place_id=45,
rank_search=27, rank_address=26)
apiobj.add_placex(place_id=1001, osm_type='N', osm_id=5,
class_='place', type='house', housenumber='23',
country_code='pl', parent_place_id=332,
rank_search=30, rank_address=30)
apiobj.add_placex(place_id=1002, osm_type='W', osm_id=6,
class_='highway', type='residential', name='Street',
country_code='pl', parent_place_id=332,
rank_search=27, rank_address=26)
result = apiobj.api.details(napi.PlaceID(332), parented_places=True)
assert result.parented_rows == [
napi.AddressLine(place_id=1001, osm_object=('N', 5),
category=('place', 'house'),
names={'housenumber': '23'}, extratags={},
admin_level=15, fromarea=False, isaddress=True,
rank_address=30, distance=0.0),
]
@pytest.mark.parametrize('idobj', (napi.PlaceID(4924), napi.OsmID('W', 9928)))
def test_lookup_in_osmline(apiobj, idobj):
import_date = dt.datetime(2022, 12, 7, 14, 14, 46, 0)
apiobj.add_osmline(place_id=4924, osm_id=9928,
parent_place_id=12,
startnumber=1, endnumber=4, step=1,
country_code='gb', postcode='34425',
address={'city': 'Big'},
indexed_date=import_date,
geometry='LINESTRING(23 34, 23 35)')
result = apiobj.api.details(idobj)
assert result is not None
assert result.source_table.name == 'OSMLINE'
assert result.category == ('place', 'houses')
assert result.centroid == (pytest.approx(23.0), pytest.approx(34.5))
assert result.place_id == 4924
assert result.parent_place_id == 12
assert result.linked_place_id is None
assert result.osm_object == ('W', 9928)
assert result.admin_level == 15
assert result.names is None
assert result.address == {'city': 'Big'}
assert result.extratags == {'startnumber': '1', 'endnumber': '4', 'step': '1'}
assert result.housenumber is None
assert result.postcode == '34425'
assert result.wikipedia is None
assert result.rank_search == 30
assert result.rank_address == 30
assert result.importance is None
assert result.country_code == 'gb'
assert result.indexed_date == import_date.replace(tzinfo=dt.timezone.utc)
assert result.address_rows is None
assert result.linked_rows is None
assert result.parented_rows is None
assert result.name_keywords is None
assert result.address_keywords is None
assert result.geometry == {'type': 'ST_LineString'}
def test_lookup_in_osmline_split_interpolation(apiobj):
apiobj.add_osmline(place_id=1000, osm_id=9,
startnumber=2, endnumber=4, step=1)
apiobj.add_osmline(place_id=1001, osm_id=9,
startnumber=6, endnumber=9, step=1)
apiobj.add_osmline(place_id=1002, osm_id=9,
startnumber=11, endnumber=20, step=1)
for i in range(1, 6):
result = apiobj.api.details(napi.OsmID('W', 9, str(i)))
assert result.place_id == 1000
for i in range(7, 11):
result = apiobj.api.details(napi.OsmID('W', 9, str(i)))
assert result.place_id == 1001
for i in range(12, 22):
result = apiobj.api.details(napi.OsmID('W', 9, str(i)))
assert result.place_id == 1002
def test_lookup_osmline_with_address_details(apiobj):
apiobj.add_osmline(place_id=9000, osm_id=9,
startnumber=2, endnumber=4, step=1,
parent_place_id=332)
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', name='Street',
country_code='pl',
rank_search=27, rank_address=26)
apiobj.add_address_placex(332, fromarea=False, isaddress=False,
distance=0.0034,
place_id=1000, osm_type='N', osm_id=3333,
class_='place', type='suburb', name='Smallplace',
country_code='pl', admin_level=13,
rank_search=24, rank_address=23)
apiobj.add_address_placex(332, fromarea=True, isaddress=True,
place_id=1001, osm_type='N', osm_id=3334,
class_='place', type='city', name='Bigplace',
country_code='pl',
rank_search=17, rank_address=16)
result = apiobj.api.details(napi.PlaceID(9000), address_details=True)
assert result.address_rows == [
napi.AddressLine(place_id=None, osm_object=None,
category=('place', 'house_number'),
names={'ref': '2'}, extratags={},
admin_level=None, fromarea=True, isaddress=True,
rank_address=28, distance=0.0),
napi.AddressLine(place_id=332, osm_object=('W', 4),
category=('highway', 'residential'),
names={'name': 'Street'}, extratags={},
admin_level=15, fromarea=True, isaddress=True,
rank_address=26, distance=0.0),
napi.AddressLine(place_id=1000, osm_object=('N', 3333),
category=('place', 'suburb'),
names={'name': 'Smallplace'}, extratags={},
admin_level=13, fromarea=False, isaddress=True,
rank_address=23, distance=0.0034),
napi.AddressLine(place_id=1001, osm_object=('N', 3334),
category=('place', 'city'),
names={'name': 'Bigplace'}, extratags={},
admin_level=15, fromarea=True, isaddress=True,
rank_address=16, distance=0.0),
napi.AddressLine(place_id=None, osm_object=None,
category=('place', 'country_code'),
names={'ref': 'pl'}, extratags={},
admin_level=None, fromarea=True, isaddress=False,
rank_address=4, distance=0.0)
]
def test_lookup_in_tiger(apiobj):
apiobj.add_tiger(place_id=4924,
parent_place_id=12,
startnumber=1, endnumber=4, step=1,
postcode='34425',
geometry='LINESTRING(23 34, 23 35)')
apiobj.add_placex(place_id=12,
category=('highway', 'residential'),
osm_type='W', osm_id=6601223,
geometry='LINESTRING(23 34, 23 35)')
result = apiobj.api.details(napi.PlaceID(4924))
assert result is not None
assert result.source_table.name == 'TIGER'
assert result.category == ('place', 'houses')
assert result.centroid == (pytest.approx(23.0), pytest.approx(34.5))
assert result.place_id == 4924
assert result.parent_place_id == 12
assert result.linked_place_id is None
assert result.osm_object == ('W', 6601223)
assert result.admin_level == 15
assert result.names is None
assert result.address is None
assert result.extratags == {'startnumber': '1', 'endnumber': '4', 'step': '1'}
assert result.housenumber is None
assert result.postcode == '34425'
assert result.wikipedia is None
assert result.rank_search == 30
assert result.rank_address == 30
assert result.importance is None
assert result.country_code == 'us'
assert result.indexed_date is None
assert result.address_rows is None
assert result.linked_rows is None
assert result.parented_rows is None
assert result.name_keywords is None
assert result.address_keywords is None
assert result.geometry == {'type': 'ST_LineString'}
def test_lookup_tiger_with_address_details(apiobj):
apiobj.add_tiger(place_id=9000,
startnumber=2, endnumber=4, step=1,
parent_place_id=332)
apiobj.add_placex(place_id=332, osm_type='W', osm_id=4,
class_='highway', type='residential', name='Street',
country_code='us',
rank_search=27, rank_address=26)
apiobj.add_address_placex(332, fromarea=False, isaddress=False,
distance=0.0034,
place_id=1000, osm_type='N', osm_id=3333,
class_='place', type='suburb', name='Smallplace',
country_code='us', admin_level=13,
rank_search=24, rank_address=23)
apiobj.add_address_placex(332, fromarea=True, isaddress=True,
place_id=1001, osm_type='N', osm_id=3334,
class_='place', type='city', name='Bigplace',
country_code='us',
rank_search=17, rank_address=16)
result = apiobj.api.details(napi.PlaceID(9000), address_details=True)
assert result.address_rows == [
napi.AddressLine(place_id=None, osm_object=None,
category=('place', 'house_number'),
names={'ref': '2'}, extratags={},
admin_level=None, fromarea=True, isaddress=True,
rank_address=28, distance=0.0),
napi.AddressLine(place_id=332, osm_object=('W', 4),
category=('highway', 'residential'),
names={'name': 'Street'}, extratags={},
admin_level=15, fromarea=True, isaddress=True,
rank_address=26, distance=0.0),
napi.AddressLine(place_id=1000, osm_object=('N', 3333),
category=('place', 'suburb'),
names={'name': 'Smallplace'}, extratags={},
admin_level=13, fromarea=False, isaddress=True,
rank_address=23, distance=0.0034),
napi.AddressLine(place_id=1001, osm_object=('N', 3334),
category=('place', 'city'),
names={'name': 'Bigplace'}, extratags={},
admin_level=15, fromarea=True, isaddress=True,
rank_address=16, distance=0.0),
napi.AddressLine(place_id=None, osm_object=None,
category=('place', 'country_code'),
names={'ref': 'us'}, extratags={},
admin_level=None, fromarea=True, isaddress=False,
rank_address=4, distance=0.0)
]
def test_lookup_in_postcode(apiobj):
import_date = dt.datetime(2022, 12, 7, 14, 14, 46, 0)
apiobj.add_postcode(place_id=554,
parent_place_id=152,
postcode='34 425',
country_code='gb',
rank_search=20, rank_address=22,
indexed_date=import_date,
geometry='POINT(-9.45 5.6)')
result = apiobj.api.details(napi.PlaceID(554))
assert result is not None
assert result.source_table.name == 'POSTCODE'
assert result.category == ('place', 'postcode')
assert result.centroid == (pytest.approx(-9.45), pytest.approx(5.6))
assert result.place_id == 554
assert result.parent_place_id == 152
assert result.linked_place_id is None
assert result.osm_object is None
assert result.admin_level == 15
assert result.names == {'ref': '34 425'}
assert result.address is None
assert result.extratags is None
assert result.housenumber is None
assert result.postcode is None
assert result.wikipedia is None
assert result.rank_search == 20
assert result.rank_address == 22
assert result.importance is None
assert result.country_code == 'gb'
assert result.indexed_date == import_date.replace(tzinfo=dt.timezone.utc)
assert result.address_rows is None
assert result.linked_rows is None
assert result.parented_rows is None
assert result.name_keywords is None
assert result.address_keywords is None
assert result.geometry == {'type': 'ST_Point'}
def test_lookup_postcode_with_address_details(apiobj):
apiobj.add_postcode(place_id=9000,
parent_place_id=332,
postcode='34 425',
country_code='gb',
rank_search=25, rank_address=25)
apiobj.add_placex(place_id=332, osm_type='N', osm_id=3333,
class_='place', type='suburb', name='Smallplace',
country_code='gb', admin_level=13,
rank_search=24, rank_address=23)
apiobj.add_address_placex(332, fromarea=True, isaddress=True,
place_id=1001, osm_type='N', osm_id=3334,
class_='place', type='city', name='Bigplace',
country_code='gb',
rank_search=17, rank_address=16)
result = apiobj.api.details(napi.PlaceID(9000), address_details=True)
assert result.address_rows == [
napi.AddressLine(place_id=332, osm_object=('N', 3333),
category=('place', 'suburb'),
names={'name': 'Smallplace'}, extratags={},
admin_level=13, fromarea=True, isaddress=True,
rank_address=23, distance=0.0),
napi.AddressLine(place_id=1001, osm_object=('N', 3334),
category=('place', 'city'),
names={'name': 'Bigplace'}, extratags={},
admin_level=15, fromarea=True, isaddress=True,
rank_address=16, distance=0.0),
napi.AddressLine(place_id=None, osm_object=None,
category=('place', 'postcode'),
names={'ref': '34 425'}, extratags={},
admin_level=None, fromarea=False, isaddress=True,
rank_address=5, distance=0.0),
napi.AddressLine(place_id=None, osm_object=None,
category=('place', 'country_code'),
names={'ref': 'gb'}, extratags={},
admin_level=None, fromarea=True, isaddress=False,
rank_address=4, distance=0.0)
]
@pytest.mark.parametrize('objid', [napi.PlaceID(1736),
napi.OsmID('W', 55),
napi.OsmID('N', 55, 'amenity')])
def test_lookup_missing_object(apiobj, objid):
apiobj.add_placex(place_id=1, osm_type='N', osm_id=55,
class_='place', type='suburb')
assert apiobj.api.details(objid) is None
@pytest.mark.parametrize('gtype', (napi.GeometryFormat.KML,
napi.GeometryFormat.SVG,
napi.GeometryFormat.TEXT))
def test_lookup_unsupported_geometry(apiobj, gtype):
apiobj.add_placex(place_id=332)
with pytest.raises(ValueError):
apiobj.api.details(napi.PlaceID(332), geometry_output=gtype)
| 25,663 | 43.633043 | 100 | py |
Nominatim | Nominatim-master/test/python/api/test_helpers_v1.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for the helper functions for v1 API.
"""
import pytest
import nominatim.api.v1.helpers as helper
@pytest.mark.parametrize('inp', ['', 'abc', '12 23', 'abc -78.90, 12.456 def'])
def test_extract_coords_no_coords(inp):
query, x, y = helper.extract_coords_from_query(inp)
assert query == inp
assert x is None
assert y is None
def test_extract_coords_null_island():
assert ('', 0.0, 0.0) == helper.extract_coords_from_query('0.0 -0.0')
def test_extract_coords_with_text_before():
assert ('abc', 12.456, -78.90) == helper.extract_coords_from_query('abc -78.90, 12.456')
def test_extract_coords_with_text_after():
assert ('abc', 12.456, -78.90) == helper.extract_coords_from_query('-78.90, 12.456 abc')
@pytest.mark.parametrize('inp', [' [12.456,-78.90] ', ' 12.456,-78.90 '])
def test_extract_coords_with_spaces(inp):
assert ('', -78.90, 12.456) == helper.extract_coords_from_query(inp)
@pytest.mark.parametrize('inp', ['40 26.767 N 79 58.933 W',
'40° 26.767′ N 79° 58.933′ W',
"40° 26.767' N 79° 58.933' W",
"40° 26.767'\n"
" N 79° 58.933' W",
'N 40 26.767, W 79 58.933',
'N 40°26.767′, W 79°58.933′',
' N 40°26.767′, W 79°58.933′',
"N 40°26.767', W 79°58.933'",
'40 26 46 N 79 58 56 W',
'40° 26′ 46″ N 79° 58′ 56″ W',
'40° 26′ 46.00″ N 79° 58′ 56.00″ W',
'40°26′46″N 79°58′56″W',
'N 40 26 46 W 79 58 56',
'N 40° 26′ 46″, W 79° 58′ 56″',
'N 40° 26\' 46", W 79° 58\' 56"',
'N 40° 26\' 46", W 79° 58\' 56"',
'40.446 -79.982',
'40.446,-79.982',
'40.446° N 79.982° W',
'N 40.446° W 79.982°',
'[40.446 -79.982]',
'[40.446,-79.982]',
' 40.446 , -79.982 ',
' 40.446 , -79.982 ',
' 40.446 , -79.982 ',
' 40.446, -79.982 '])
def test_extract_coords_formats(inp):
query, x, y = helper.extract_coords_from_query(inp)
assert query == ''
assert pytest.approx(x, abs=0.001) == -79.982
assert pytest.approx(y, abs=0.001) == 40.446
query, x, y = helper.extract_coords_from_query('foo bar ' + inp)
assert query == 'foo bar'
assert pytest.approx(x, abs=0.001) == -79.982
assert pytest.approx(y, abs=0.001) == 40.446
query, x, y = helper.extract_coords_from_query(inp + ' x')
assert query == 'x'
assert pytest.approx(x, abs=0.001) == -79.982
assert pytest.approx(y, abs=0.001) == 40.446
def test_extract_coords_formats_southeast():
query, x, y = helper.extract_coords_from_query('S 40 26.767, E 79 58.933')
assert query == ''
assert pytest.approx(x, abs=0.001) == 79.982
assert pytest.approx(y, abs=0.001) == -40.446
@pytest.mark.parametrize('inp', ['[shop=fish] foo bar',
'foo [shop=fish] bar',
'foo [shop=fish]bar',
'foo bar [shop=fish]'])
def test_extract_category_good(inp):
query, cls, typ = helper.extract_category_from_query(inp)
assert query == 'foo bar'
assert cls == 'shop'
assert typ == 'fish'
def test_extract_category_only():
assert helper.extract_category_from_query('[shop=market]') == ('', 'shop', 'market')
@pytest.mark.parametrize('inp', ['house []', 'nothing', '[352]'])
def test_extract_category_no_match(inp):
assert helper.extract_category_from_query(inp) == (inp, None, None)
| 4,043 | 34.787611 | 94 | py |
Nominatim | Nominatim-master/test/python/api/search/test_icu_query_analyzer.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for query analyzer for ICU tokenizer.
"""
from pathlib import Path
import pytest
import pytest_asyncio
from nominatim.api import NominatimAPIAsync
from nominatim.api.search.query import Phrase, PhraseType, TokenType, BreakType
import nominatim.api.search.icu_tokenizer as tok
from nominatim.api.logging import set_log_output, get_and_disable
async def add_word(conn, word_id, word_token, wtype, word, info = None):
t = conn.t.meta.tables['word']
await conn.execute(t.insert(), {'word_id': word_id,
'word_token': word_token,
'type': wtype,
'word': word,
'info': info})
def make_phrase(query):
return [Phrase(PhraseType.NONE, s) for s in query.split(',')]
@pytest_asyncio.fixture
async def conn(table_factory):
""" Create an asynchronous SQLAlchemy engine for the test DB.
"""
table_factory('nominatim_properties',
definition='property TEXT, value TEXT',
content=(('tokenizer_import_normalisation', ':: lower();'),
('tokenizer_import_transliteration', "'1' > '/1/'; 'ä' > 'ä '")))
table_factory('word',
definition='word_id INT, word_token TEXT, type TEXT, word TEXT, info JSONB')
api = NominatimAPIAsync(Path('/invalid'), {})
async with api.begin() as conn:
yield conn
await api.close()
@pytest.mark.asyncio
async def test_empty_phrase(conn):
ana = await tok.create_query_analyzer(conn)
query = await ana.analyze_query([])
assert len(query.source) == 0
assert query.num_token_slots() == 0
@pytest.mark.asyncio
async def test_single_phrase_with_unknown_terms(conn):
ana = await tok.create_query_analyzer(conn)
await add_word(conn, 1, 'foo', 'w', 'FOO')
query = await ana.analyze_query(make_phrase('foo BAR'))
assert len(query.source) == 1
assert query.source[0].ptype == PhraseType.NONE
assert query.source[0].text == 'foo bar'
assert query.num_token_slots() == 2
assert len(query.nodes[0].starting) == 1
assert not query.nodes[1].starting
@pytest.mark.asyncio
async def test_multiple_phrases(conn):
ana = await tok.create_query_analyzer(conn)
await add_word(conn, 1, 'one', 'w', 'one')
await add_word(conn, 2, 'two', 'w', 'two')
await add_word(conn, 100, 'one two', 'W', 'one two')
await add_word(conn, 3, 'three', 'w', 'three')
query = await ana.analyze_query(make_phrase('one two,three'))
assert len(query.source) == 2
@pytest.mark.asyncio
async def test_splitting_in_transliteration(conn):
ana = await tok.create_query_analyzer(conn)
await add_word(conn, 1, 'mä', 'W', 'ma')
await add_word(conn, 2, 'fo', 'W', 'fo')
query = await ana.analyze_query(make_phrase('mäfo'))
assert query.num_token_slots() == 2
assert query.nodes[0].starting
assert query.nodes[1].starting
assert query.nodes[1].btype == BreakType.TOKEN
@pytest.mark.asyncio
@pytest.mark.parametrize('term,order', [('23456', ['POSTCODE', 'HOUSENUMBER', 'WORD', 'PARTIAL']),
('3', ['HOUSENUMBER', 'POSTCODE', 'WORD', 'PARTIAL'])
])
async def test_penalty_postcodes_and_housenumbers(conn, term, order):
ana = await tok.create_query_analyzer(conn)
await add_word(conn, 1, term, 'P', None)
await add_word(conn, 2, term, 'H', term)
await add_word(conn, 3, term, 'w', term)
await add_word(conn, 4, term, 'W', term)
query = await ana.analyze_query(make_phrase(term))
assert query.num_token_slots() == 1
torder = [(tl.tokens[0].penalty, tl.ttype.name) for tl in query.nodes[0].starting]
torder.sort()
assert [t[1] for t in torder] == order
@pytest.mark.asyncio
async def test_category_words_only_at_beginning(conn):
ana = await tok.create_query_analyzer(conn)
await add_word(conn, 1, 'foo', 'S', 'FOO', {'op': 'in'})
await add_word(conn, 2, 'bar', 'w', 'BAR')
query = await ana.analyze_query(make_phrase('foo BAR foo'))
assert query.num_token_slots() == 3
assert len(query.nodes[0].starting) == 1
assert query.nodes[0].starting[0].ttype == TokenType.CATEGORY
assert not query.nodes[2].starting
@pytest.mark.asyncio
async def test_qualifier_words(conn):
ana = await tok.create_query_analyzer(conn)
await add_word(conn, 1, 'foo', 'S', None, {'op': '-'})
await add_word(conn, 2, 'bar', 'w', None)
query = await ana.analyze_query(make_phrase('foo BAR foo BAR foo'))
assert query.num_token_slots() == 5
assert set(t.ttype for t in query.nodes[0].starting) == {TokenType.CATEGORY, TokenType.QUALIFIER}
assert set(t.ttype for t in query.nodes[2].starting) == {TokenType.QUALIFIER}
assert set(t.ttype for t in query.nodes[4].starting) == {TokenType.CATEGORY, TokenType.QUALIFIER}
@pytest.mark.asyncio
async def test_add_unknown_housenumbers(conn):
ana = await tok.create_query_analyzer(conn)
await add_word(conn, 1, '23', 'H', '23')
query = await ana.analyze_query(make_phrase('466 23 99834 34a'))
assert query.num_token_slots() == 4
assert query.nodes[0].starting[0].ttype == TokenType.HOUSENUMBER
assert len(query.nodes[0].starting[0].tokens) == 1
assert query.nodes[0].starting[0].tokens[0].token == 0
assert query.nodes[1].starting[0].ttype == TokenType.HOUSENUMBER
assert len(query.nodes[1].starting[0].tokens) == 1
assert query.nodes[1].starting[0].tokens[0].token == 1
assert not query.nodes[2].starting
assert not query.nodes[3].starting
@pytest.mark.asyncio
@pytest.mark.parametrize('logtype', ['text', 'html'])
async def test_log_output(conn, logtype):
ana = await tok.create_query_analyzer(conn)
await add_word(conn, 1, 'foo', 'w', 'FOO')
set_log_output(logtype)
await ana.analyze_query(make_phrase('foo'))
assert get_and_disable()
| 6,199 | 32.15508 | 101 | py |
Nominatim | Nominatim-master/test/python/api/search/test_api_search_query.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for tokenized query data structures.
"""
import pytest
from nominatim.api.search import query
class MyToken(query.Token):
def get_category(self):
return 'this', 'that'
def mktoken(tid: int):
return MyToken(3.0, tid, 1, 'foo', True)
@pytest.mark.parametrize('ptype,ttype', [('NONE', 'WORD'),
('AMENITY', 'QUALIFIER'),
('STREET', 'PARTIAL'),
('CITY', 'WORD'),
('COUNTRY', 'COUNTRY'),
('POSTCODE', 'POSTCODE')])
def test_phrase_compatible(ptype, ttype):
assert query.PhraseType[ptype].compatible_with(query.TokenType[ttype])
@pytest.mark.parametrize('ptype', ['COUNTRY', 'POSTCODE'])
def test_phrase_incompatible(ptype):
assert not query.PhraseType[ptype].compatible_with(query.TokenType.PARTIAL)
def test_query_node_empty():
qn = query.QueryNode(query.BreakType.PHRASE, query.PhraseType.NONE)
assert not qn.has_tokens(3, query.TokenType.PARTIAL)
assert qn.get_tokens(3, query.TokenType.WORD) is None
def test_query_node_with_content():
qn = query.QueryNode(query.BreakType.PHRASE, query.PhraseType.NONE)
qn.starting.append(query.TokenList(2, query.TokenType.PARTIAL, [mktoken(100), mktoken(101)]))
qn.starting.append(query.TokenList(2, query.TokenType.WORD, [mktoken(1000)]))
assert not qn.has_tokens(3, query.TokenType.PARTIAL)
assert not qn.has_tokens(2, query.TokenType.COUNTRY)
assert qn.has_tokens(2, query.TokenType.PARTIAL)
assert qn.has_tokens(2, query.TokenType.WORD)
assert qn.get_tokens(3, query.TokenType.PARTIAL) is None
assert qn.get_tokens(2, query.TokenType.COUNTRY) is None
assert len(qn.get_tokens(2, query.TokenType.PARTIAL)) == 2
assert len(qn.get_tokens(2, query.TokenType.WORD)) == 1
def test_query_struct_empty():
q = query.QueryStruct([])
assert q.num_token_slots() == 0
def test_query_struct_with_tokens():
q = query.QueryStruct([query.Phrase(query.PhraseType.NONE, 'foo bar')])
q.add_node(query.BreakType.WORD, query.PhraseType.NONE)
q.add_node(query.BreakType.END, query.PhraseType.NONE)
assert q.num_token_slots() == 2
q.add_token(query.TokenRange(0, 1), query.TokenType.PARTIAL, mktoken(1))
q.add_token(query.TokenRange(1, 2), query.TokenType.PARTIAL, mktoken(2))
q.add_token(query.TokenRange(1, 2), query.TokenType.WORD, mktoken(99))
q.add_token(query.TokenRange(1, 2), query.TokenType.WORD, mktoken(98))
assert q.get_tokens(query.TokenRange(0, 2), query.TokenType.WORD) == []
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.WORD)) == 2
partials = q.get_partials_list(query.TokenRange(0, 2))
assert len(partials) == 2
assert [t.token for t in partials] == [1, 2]
assert q.find_lookup_word_by_id(4) == 'None'
assert q.find_lookup_word_by_id(99) == '[W]foo'
def test_query_struct_incompatible_token():
q = query.QueryStruct([query.Phrase(query.PhraseType.COUNTRY, 'foo bar')])
q.add_node(query.BreakType.WORD, query.PhraseType.COUNTRY)
q.add_node(query.BreakType.END, query.PhraseType.NONE)
q.add_token(query.TokenRange(0, 1), query.TokenType.PARTIAL, mktoken(1))
q.add_token(query.TokenRange(1, 2), query.TokenType.COUNTRY, mktoken(100))
assert q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL) == []
assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.COUNTRY)) == 1
| 3,768 | 35.95098 | 97 | py |
Nominatim | Nominatim-master/test/python/api/search/test_token_assignment.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Test for creation of token assignments from tokenized queries.
"""
import pytest
from nominatim.api.search.query import QueryStruct, Phrase, PhraseType, BreakType, TokenType, TokenRange, Token
from nominatim.api.search.token_assignment import yield_token_assignments, TokenAssignment, PENALTY_TOKENCHANGE
class MyToken(Token):
def get_category(self):
return 'this', 'that'
def make_query(*args):
q = None
dummy = MyToken(3.0, 45, 1, 'foo', True)
for btype, ptype, tlist in args:
if q is None:
q = QueryStruct([Phrase(ptype, '')])
else:
q.add_node(btype, ptype)
start = len(q.nodes) - 1
for end, ttype in tlist:
q.add_token(TokenRange(start, end), ttype, dummy)
q.add_node(BreakType.END, PhraseType.NONE)
return q
def check_assignments(actual, *expected):
todo = list(expected)
for assignment in actual:
assert assignment in todo, f"Unexpected assignment: {assignment}"
todo.remove(assignment)
assert not todo, f"Missing assignments: {expected}"
def test_query_with_missing_tokens():
q = QueryStruct([Phrase(PhraseType.NONE, '')])
q.add_node(BreakType.END, PhraseType.NONE)
assert list(yield_token_assignments(q)) == []
def test_one_word_query():
q = make_query((BreakType.START, PhraseType.NONE,
[(1, TokenType.PARTIAL),
(1, TokenType.WORD),
(1, TokenType.HOUSENUMBER)]))
res = list(yield_token_assignments(q))
assert res == [TokenAssignment(name=TokenRange(0, 1))]
def test_single_postcode():
q = make_query((BreakType.START, PhraseType.NONE,
[(1, TokenType.POSTCODE)]))
res = list(yield_token_assignments(q))
assert res == [TokenAssignment(postcode=TokenRange(0, 1))]
def test_single_country_name():
q = make_query((BreakType.START, PhraseType.NONE,
[(1, TokenType.COUNTRY)]))
res = list(yield_token_assignments(q))
assert res == [TokenAssignment(country=TokenRange(0, 1))]
def test_single_word_poi_search():
q = make_query((BreakType.START, PhraseType.NONE,
[(1, TokenType.CATEGORY),
(1, TokenType.QUALIFIER)]))
res = list(yield_token_assignments(q))
assert res == [TokenAssignment(category=TokenRange(0, 1))]
@pytest.mark.parametrize('btype', [BreakType.WORD, BreakType.PART, BreakType.TOKEN])
def test_multiple_simple_words(btype):
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]),
(btype, PhraseType.NONE, [(2, TokenType.PARTIAL)]),
(btype, PhraseType.NONE, [(3, TokenType.PARTIAL)]))
penalty = PENALTY_TOKENCHANGE[btype]
check_assignments(yield_token_assignments(q),
TokenAssignment(name=TokenRange(0, 3)),
TokenAssignment(penalty=penalty, name=TokenRange(0, 2),
address=[TokenRange(2, 3)]),
TokenAssignment(penalty=penalty, name=TokenRange(0, 1),
address=[TokenRange(1, 3)]),
TokenAssignment(penalty=penalty, name=TokenRange(1, 3),
address=[TokenRange(0, 1)]),
TokenAssignment(penalty=penalty, name=TokenRange(2, 3),
address=[TokenRange(0, 2)])
)
def test_multiple_words_respect_phrase_break():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]),
(BreakType.PHRASE, PhraseType.NONE, [(2, TokenType.PARTIAL)]))
check_assignments(yield_token_assignments(q),
TokenAssignment(name=TokenRange(0, 1),
address=[TokenRange(1, 2)]),
TokenAssignment(name=TokenRange(1, 2),
address=[TokenRange(0, 1)]))
def test_housenumber_and_street():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.HOUSENUMBER)]),
(BreakType.PHRASE, PhraseType.NONE, [(2, TokenType.PARTIAL)]))
check_assignments(yield_token_assignments(q),
TokenAssignment(name=TokenRange(1, 2),
housenumber=TokenRange(0, 1)),
TokenAssignment(address=[TokenRange(1, 2)],
housenumber=TokenRange(0, 1)))
def test_housenumber_and_street_backwards():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]),
(BreakType.PHRASE, PhraseType.NONE, [(2, TokenType.HOUSENUMBER)]))
check_assignments(yield_token_assignments(q),
TokenAssignment(name=TokenRange(0, 1),
housenumber=TokenRange(1, 2)),
TokenAssignment(address=[TokenRange(0, 1)],
housenumber=TokenRange(1, 2)))
def test_housenumber_and_postcode():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.HOUSENUMBER)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(4, TokenType.POSTCODE)]))
check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=pytest.approx(0.3),
name=TokenRange(0, 1),
housenumber=TokenRange(1, 2),
address=[TokenRange(2, 3)],
postcode=TokenRange(3, 4)),
TokenAssignment(penalty=pytest.approx(0.3),
housenumber=TokenRange(1, 2),
address=[TokenRange(0, 1), TokenRange(2, 3)],
postcode=TokenRange(3, 4)))
def test_postcode_and_housenumber():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.POSTCODE)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(4, TokenType.HOUSENUMBER)]))
check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=pytest.approx(0.3),
name=TokenRange(2, 3),
housenumber=TokenRange(3, 4),
address=[TokenRange(0, 1)],
postcode=TokenRange(1, 2)),
TokenAssignment(penalty=pytest.approx(0.3),
housenumber=TokenRange(3, 4),
address=[TokenRange(0, 1), TokenRange(2, 3)],
postcode=TokenRange(1, 2)))
def test_country_housenumber_postcode():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.COUNTRY)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.HOUSENUMBER)]),
(BreakType.WORD, PhraseType.NONE, [(4, TokenType.POSTCODE)]))
check_assignments(yield_token_assignments(q))
@pytest.mark.parametrize('ttype', [TokenType.POSTCODE, TokenType.COUNTRY,
TokenType.CATEGORY, TokenType.QUALIFIER])
def test_housenumber_with_only_special_terms(ttype):
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.HOUSENUMBER)]),
(BreakType.WORD, PhraseType.NONE, [(2, ttype)]))
check_assignments(yield_token_assignments(q))
@pytest.mark.parametrize('ttype', [TokenType.POSTCODE, TokenType.HOUSENUMBER, TokenType.COUNTRY])
def test_multiple_special_tokens(ttype):
q = make_query((BreakType.START, PhraseType.NONE, [(1, ttype)]),
(BreakType.PHRASE, PhraseType.NONE, [(2, TokenType.PARTIAL)]),
(BreakType.PHRASE, PhraseType.NONE, [(3, ttype)]))
check_assignments(yield_token_assignments(q))
def test_housenumber_many_phrases():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]),
(BreakType.PHRASE, PhraseType.NONE, [(2, TokenType.PARTIAL)]),
(BreakType.PHRASE, PhraseType.NONE, [(3, TokenType.PARTIAL)]),
(BreakType.PHRASE, PhraseType.NONE, [(4, TokenType.HOUSENUMBER)]),
(BreakType.WORD, PhraseType.NONE, [(5, TokenType.PARTIAL)]))
check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=0.1,
name=TokenRange(4, 5),
housenumber=TokenRange(3, 4),\
address=[TokenRange(0, 1), TokenRange(1, 2),
TokenRange(2, 3)]),
TokenAssignment(penalty=0.1,
housenumber=TokenRange(3, 4),\
address=[TokenRange(0, 1), TokenRange(1, 2),
TokenRange(2, 3), TokenRange(4, 5)]))
def test_country_at_beginning():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.COUNTRY)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.PARTIAL)]))
check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=0.1, name=TokenRange(1, 2),
country=TokenRange(0, 1)))
def test_country_at_end():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.COUNTRY)]))
check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=0.1, name=TokenRange(0, 1),
country=TokenRange(1, 2)))
def test_country_in_middle():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.COUNTRY)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)]))
check_assignments(yield_token_assignments(q))
def test_postcode_with_designation():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.POSTCODE)]),
(BreakType.PHRASE, PhraseType.NONE, [(2, TokenType.PARTIAL)]))
check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=0.1, name=TokenRange(1, 2),
postcode=TokenRange(0, 1)),
TokenAssignment(postcode=TokenRange(0, 1),
address=[TokenRange(1, 2)]))
def test_postcode_with_designation_backwards():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]),
(BreakType.PHRASE, PhraseType.NONE, [(2, TokenType.POSTCODE)]))
check_assignments(yield_token_assignments(q),
TokenAssignment(name=TokenRange(0, 1),
postcode=TokenRange(1, 2)),
TokenAssignment(penalty=0.1, postcode=TokenRange(1, 2),
address=[TokenRange(0, 1)]))
def test_category_at_beginning():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.CATEGORY)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.PARTIAL)]))
check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=0.1, name=TokenRange(1, 2),
category=TokenRange(0, 1)))
def test_category_at_end():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.CATEGORY)]))
check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=0.1, name=TokenRange(0, 1),
category=TokenRange(1, 2)))
def test_category_in_middle():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.CATEGORY)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)]))
check_assignments(yield_token_assignments(q))
def test_qualifier_at_beginning():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.QUALIFIER)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)]))
check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=0.1, name=TokenRange(1, 3),
qualifier=TokenRange(0, 1)),
TokenAssignment(penalty=0.2, name=TokenRange(1, 2),
qualifier=TokenRange(0, 1),
address=[TokenRange(2, 3)]))
def test_qualifier_after_name():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.QUALIFIER)]),
(BreakType.WORD, PhraseType.NONE, [(4, TokenType.PARTIAL)]),
(BreakType.WORD, PhraseType.NONE, [(5, TokenType.PARTIAL)]))
check_assignments(yield_token_assignments(q),
TokenAssignment(penalty=0.2, name=TokenRange(0, 2),
qualifier=TokenRange(2, 3),
address=[TokenRange(3, 5)]),
TokenAssignment(penalty=0.2, name=TokenRange(3, 5),
qualifier=TokenRange(2, 3),
address=[TokenRange(0, 2)]))
def test_qualifier_before_housenumber():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.QUALIFIER)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.HOUSENUMBER)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)]))
check_assignments(yield_token_assignments(q))
def test_qualifier_after_housenumber():
q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.HOUSENUMBER)]),
(BreakType.WORD, PhraseType.NONE, [(2, TokenType.QUALIFIER)]),
(BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)]))
check_assignments(yield_token_assignments(q))
| 15,194 | 43.171512 | 111 | py |
Nominatim | Nominatim-master/test/python/api/search/test_search_poi.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for running the POI searcher.
"""
import pytest
import nominatim.api as napi
from nominatim.api.types import SearchDetails
from nominatim.api.search.db_searches import PoiSearch
from nominatim.api.search.db_search_fields import WeightedStrings, WeightedCategories
def run_search(apiobj, global_penalty, poitypes, poi_penalties=None,
ccodes=[], details=SearchDetails()):
if poi_penalties is None:
poi_penalties = [0.0] * len(poitypes)
class MySearchData:
penalty = global_penalty
qualifiers = WeightedCategories(poitypes, poi_penalties)
countries = WeightedStrings(ccodes, [0.0] * len(ccodes))
search = PoiSearch(MySearchData())
async def run():
async with apiobj.api._async_api.begin() as conn:
return await search.lookup(conn, details)
return apiobj.async_to_sync(run())
@pytest.mark.parametrize('coord,pid', [('34.3, 56.100021', 2),
('5.0, 4.59933', 1)])
def test_simple_near_search_in_placex(apiobj, coord, pid):
apiobj.add_placex(place_id=1, class_='highway', type='bus_stop',
centroid=(5.0, 4.6))
apiobj.add_placex(place_id=2, class_='highway', type='bus_stop',
centroid=(34.3, 56.1))
details = SearchDetails.from_kwargs({'near': coord, 'near_radius': 0.001})
results = run_search(apiobj, 0.1, [('highway', 'bus_stop')], [0.5], details=details)
assert [r.place_id for r in results] == [pid]
@pytest.mark.parametrize('coord,pid', [('34.3, 56.100021', 2),
('34.3, 56.4', 2),
('5.0, 4.59933', 1)])
def test_simple_near_search_in_classtype(apiobj, coord, pid):
apiobj.add_placex(place_id=1, class_='highway', type='bus_stop',
centroid=(5.0, 4.6))
apiobj.add_placex(place_id=2, class_='highway', type='bus_stop',
centroid=(34.3, 56.1))
apiobj.add_class_type_table('highway', 'bus_stop')
details = SearchDetails.from_kwargs({'near': coord, 'near_radius': 0.5})
results = run_search(apiobj, 0.1, [('highway', 'bus_stop')], [0.5], details=details)
assert [r.place_id for r in results] == [pid]
class TestPoiSearchWithRestrictions:
@pytest.fixture(autouse=True, params=["placex", "classtype"])
def fill_database(self, apiobj, request):
apiobj.add_placex(place_id=1, class_='highway', type='bus_stop',
country_code='au',
centroid=(34.3, 56.10003))
apiobj.add_placex(place_id=2, class_='highway', type='bus_stop',
country_code='nz',
centroid=(34.3, 56.1))
if request.param == 'classtype':
apiobj.add_class_type_table('highway', 'bus_stop')
self.args = {'near': '34.3, 56.4', 'near_radius': 0.5}
else:
self.args = {'near': '34.3, 56.100021', 'near_radius': 0.001}
def test_unrestricted(self, apiobj):
results = run_search(apiobj, 0.1, [('highway', 'bus_stop')], [0.5],
details=SearchDetails.from_kwargs(self.args))
assert [r.place_id for r in results] == [1, 2]
def test_restict_country(self, apiobj):
results = run_search(apiobj, 0.1, [('highway', 'bus_stop')], [0.5],
ccodes=['de', 'nz'],
details=SearchDetails.from_kwargs(self.args))
assert [r.place_id for r in results] == [2]
def test_restrict_by_viewbox(self, apiobj):
args = {'bounded_viewbox': True, 'viewbox': '34.299,56.0,34.3001,56.10001'}
args.update(self.args)
results = run_search(apiobj, 0.1, [('highway', 'bus_stop')], [0.5],
ccodes=['de', 'nz'],
details=SearchDetails.from_kwargs(args))
assert [r.place_id for r in results] == [2]
| 4,178 | 37.33945 | 88 | py |
Nominatim | Nominatim-master/test/python/api/search/test_query_analyzer_factory.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for query analyzer creation.
"""
from pathlib import Path
import pytest
from nominatim.api import NominatimAPIAsync
from nominatim.api.search.query_analyzer_factory import make_query_analyzer
from nominatim.api.search.icu_tokenizer import ICUQueryAnalyzer
@pytest.mark.asyncio
async def test_import_icu_tokenizer(table_factory):
table_factory('nominatim_properties',
definition='property TEXT, value TEXT',
content=(('tokenizer', 'icu'),
('tokenizer_import_normalisation', ':: lower();'),
('tokenizer_import_transliteration', "'1' > '/1/'; 'ä' > 'ä '")))
api = NominatimAPIAsync(Path('/invalid'), {})
async with api.begin() as conn:
ana = await make_query_analyzer(conn)
assert isinstance(ana, ICUQueryAnalyzer)
await api.close()
@pytest.mark.asyncio
async def test_import_missing_property(table_factory):
api = NominatimAPIAsync(Path('/invalid'), {})
table_factory('nominatim_properties',
definition='property TEXT, value TEXT')
async with api.begin() as conn:
with pytest.raises(ValueError, match='Property.*not found'):
await make_query_analyzer(conn)
await api.close()
@pytest.mark.asyncio
async def test_import_missing_module(table_factory):
api = NominatimAPIAsync(Path('/invalid'), {})
table_factory('nominatim_properties',
definition='property TEXT, value TEXT',
content=(('tokenizer', 'missing'),))
async with api.begin() as conn:
with pytest.raises(RuntimeError, match='Tokenizer not found'):
await make_query_analyzer(conn)
await api.close()
| 1,934 | 32.362069 | 92 | py |
Nominatim | Nominatim-master/test/python/api/search/test_legacy_query_analyzer.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for query analyzer for legacy tokenizer.
"""
from pathlib import Path
import pytest
import pytest_asyncio
from nominatim.api import NominatimAPIAsync
from nominatim.api.search.query import Phrase, PhraseType, TokenType, BreakType
import nominatim.api.search.legacy_tokenizer as tok
from nominatim.api.logging import set_log_output, get_and_disable
async def add_word(conn, word_id, word_token, word, count):
t = conn.t.meta.tables['word']
await conn.execute(t.insert(), {'word_id': word_id,
'word_token': word_token,
'search_name_count': count,
'word': word})
async def add_housenumber(conn, word_id, hnr):
t = conn.t.meta.tables['word']
await conn.execute(t.insert(), {'word_id': word_id,
'word_token': ' ' + hnr,
'word': hnr,
'class': 'place',
'type': 'house'})
async def add_postcode(conn, word_id, postcode):
t = conn.t.meta.tables['word']
await conn.execute(t.insert(), {'word_id': word_id,
'word_token': ' ' + postcode,
'word': postcode,
'class': 'place',
'type': 'postcode'})
async def add_special_term(conn, word_id, word_token, cls, typ, op):
t = conn.t.meta.tables['word']
await conn.execute(t.insert(), {'word_id': word_id,
'word_token': word_token,
'word': word_token,
'class': cls,
'type': typ,
'operator': op})
def make_phrase(query):
return [Phrase(PhraseType.NONE, s) for s in query.split(',')]
@pytest_asyncio.fixture
async def conn(table_factory, temp_db_cursor):
""" Create an asynchronous SQLAlchemy engine for the test DB.
"""
table_factory('nominatim_properties',
definition='property TEXT, value TEXT',
content=(('tokenizer_maxwordfreq', '10000'), ))
table_factory('word',
definition="""word_id INT, word_token TEXT, word TEXT,
class TEXT, type TEXT, country_code TEXT,
search_name_count INT, operator TEXT
""")
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION make_standard_name(name TEXT)
RETURNS TEXT AS $$ SELECT lower(name); $$ LANGUAGE SQL;""")
api = NominatimAPIAsync(Path('/invalid'), {})
async with api.begin() as conn:
yield conn
await api.close()
@pytest.mark.asyncio
async def test_empty_phrase(conn):
ana = await tok.create_query_analyzer(conn)
query = await ana.analyze_query([])
assert len(query.source) == 0
assert query.num_token_slots() == 0
@pytest.mark.asyncio
async def test_single_phrase_with_unknown_terms(conn):
ana = await tok.create_query_analyzer(conn)
await add_word(conn, 1, 'foo', 'FOO', 3)
query = await ana.analyze_query(make_phrase('foo BAR'))
assert len(query.source) == 1
assert query.source[0].ptype == PhraseType.NONE
assert query.source[0].text == 'foo bar'
assert query.num_token_slots() == 2
assert len(query.nodes[0].starting) == 1
assert not query.nodes[1].starting
@pytest.mark.asyncio
async def test_multiple_phrases(conn):
ana = await tok.create_query_analyzer(conn)
await add_word(conn, 1, 'one', 'one', 13)
await add_word(conn, 2, 'two', 'two', 45)
await add_word(conn, 100, 'one two', 'one two', 3)
await add_word(conn, 3, 'three', 'three', 4584)
query = await ana.analyze_query(make_phrase('one two,three'))
assert len(query.source) == 2
@pytest.mark.asyncio
async def test_housenumber_token(conn):
ana = await tok.create_query_analyzer(conn)
await add_housenumber(conn, 556, '45 a')
query = await ana.analyze_query(make_phrase('45 A'))
assert query.num_token_slots() == 2
assert len(query.nodes[0].starting) == 2
query.nodes[0].starting.sort(key=lambda tl: tl.end)
hn1 = query.nodes[0].starting[0]
assert hn1.ttype == TokenType.HOUSENUMBER
assert hn1.end == 1
assert hn1.tokens[0].token == 0
hn2 = query.nodes[0].starting[1]
assert hn2.ttype == TokenType.HOUSENUMBER
assert hn2.end == 2
assert hn2.tokens[0].token == 556
@pytest.mark.asyncio
async def test_postcode_token(conn):
ana = await tok.create_query_analyzer(conn)
await add_postcode(conn, 34, '45ax')
query = await ana.analyze_query(make_phrase('45AX'))
assert query.num_token_slots() == 1
assert [tl.ttype for tl in query.nodes[0].starting] == [TokenType.POSTCODE]
@pytest.mark.asyncio
async def test_partial_tokens(conn):
ana = await tok.create_query_analyzer(conn)
await add_word(conn, 1, ' foo', 'foo', 99)
await add_word(conn, 1, 'foo', 'FOO', 99)
await add_word(conn, 1, 'bar', 'FOO', 990000)
query = await ana.analyze_query(make_phrase('foo bar'))
assert query.num_token_slots() == 2
first = query.nodes[0].starting
first.sort(key=lambda tl: tl.tokens[0].penalty)
assert [tl.ttype for tl in first] == [TokenType.WORD, TokenType.PARTIAL]
assert all(tl.tokens[0].lookup_word == 'foo' for tl in first)
second = query.nodes[1].starting
assert [tl.ttype for tl in second] == [TokenType.PARTIAL]
assert not second[0].tokens[0].is_indexed
@pytest.mark.asyncio
@pytest.mark.parametrize('term,order', [('23456', ['POSTCODE', 'HOUSENUMBER', 'WORD', 'PARTIAL']),
('3', ['HOUSENUMBER', 'POSTCODE', 'WORD', 'PARTIAL'])
])
async def test_penalty_postcodes_and_housenumbers(conn, term, order):
ana = await tok.create_query_analyzer(conn)
await add_postcode(conn, 1, term)
await add_housenumber(conn, 2, term)
await add_word(conn, 3, term, term, 5)
await add_word(conn, 4, ' ' + term, term, 1)
query = await ana.analyze_query(make_phrase(term))
assert query.num_token_slots() == 1
torder = [(tl.tokens[0].penalty, tl.ttype.name) for tl in query.nodes[0].starting]
torder.sort()
assert [t[1] for t in torder] == order
@pytest.mark.asyncio
async def test_category_words_only_at_beginning(conn):
ana = await tok.create_query_analyzer(conn)
await add_special_term(conn, 1, 'foo', 'amenity', 'restaurant', 'in')
await add_word(conn, 2, ' bar', 'BAR', 1)
query = await ana.analyze_query(make_phrase('foo BAR foo'))
assert query.num_token_slots() == 3
assert len(query.nodes[0].starting) == 1
assert query.nodes[0].starting[0].ttype == TokenType.CATEGORY
assert not query.nodes[2].starting
@pytest.mark.asyncio
async def test_qualifier_words(conn):
ana = await tok.create_query_analyzer(conn)
await add_special_term(conn, 1, 'foo', 'amenity', 'restaurant', '-')
await add_word(conn, 2, ' bar', 'w', None)
query = await ana.analyze_query(make_phrase('foo BAR foo BAR foo'))
assert query.num_token_slots() == 5
assert set(t.ttype for t in query.nodes[0].starting) == {TokenType.CATEGORY, TokenType.QUALIFIER}
assert set(t.ttype for t in query.nodes[2].starting) == {TokenType.QUALIFIER}
assert set(t.ttype for t in query.nodes[4].starting) == {TokenType.CATEGORY, TokenType.QUALIFIER}
@pytest.mark.asyncio
@pytest.mark.parametrize('logtype', ['text', 'html'])
async def test_log_output(conn, logtype):
ana = await tok.create_query_analyzer(conn)
await add_word(conn, 1, 'foo', 'FOO', 99)
set_log_output(logtype)
await ana.analyze_query(make_phrase('foo'))
assert get_and_disable()
| 8,133 | 32.2 | 101 | py |
Nominatim | Nominatim-master/test/python/api/search/test_search_near.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for running the near searcher.
"""
import pytest
import nominatim.api as napi
from nominatim.api.types import SearchDetails
from nominatim.api.search.db_searches import NearSearch, PlaceSearch
from nominatim.api.search.db_search_fields import WeightedStrings, WeightedCategories,\
FieldLookup, FieldRanking, RankedTokens
def run_search(apiobj, global_penalty, cat, cat_penalty=None,
details=SearchDetails()):
class PlaceSearchData:
penalty = 0.0
postcodes = WeightedStrings([], [])
countries = WeightedStrings([], [])
housenumbers = WeightedStrings([], [])
qualifiers = WeightedStrings([], [])
lookups = [FieldLookup('name_vector', [56], 'lookup_all')]
rankings = []
place_search = PlaceSearch(0.0, PlaceSearchData(), 2)
if cat_penalty is None:
cat_penalty = [0.0] * len(cat)
near_search = NearSearch(0.1, WeightedCategories(cat, cat_penalty), place_search)
async def run():
async with apiobj.api._async_api.begin() as conn:
return await near_search.lookup(conn, details)
results = apiobj.async_to_sync(run())
results.sort(key=lambda r: r.accuracy)
return results
def test_no_results_inner_query(apiobj):
assert not run_search(apiobj, 0.4, [('this', 'that')])
class TestNearSearch:
@pytest.fixture(autouse=True)
def fill_database(self, apiobj):
apiobj.add_placex(place_id=100, country_code='us',
centroid=(5.6, 4.3))
apiobj.add_search_name(100, names=[56], country_code='us',
centroid=(5.6, 4.3))
apiobj.add_placex(place_id=101, country_code='mx',
centroid=(-10.3, 56.9))
apiobj.add_search_name(101, names=[56], country_code='mx',
centroid=(-10.3, 56.9))
def test_near_in_placex(self, apiobj):
apiobj.add_placex(place_id=22, class_='amenity', type='bank',
centroid=(5.6001, 4.2994))
apiobj.add_placex(place_id=23, class_='amenity', type='bench',
centroid=(5.6001, 4.2994))
results = run_search(apiobj, 0.1, [('amenity', 'bank')])
assert [r.place_id for r in results] == [22]
def test_multiple_types_near_in_placex(self, apiobj):
apiobj.add_placex(place_id=22, class_='amenity', type='bank',
importance=0.002,
centroid=(5.6001, 4.2994))
apiobj.add_placex(place_id=23, class_='amenity', type='bench',
importance=0.001,
centroid=(5.6001, 4.2994))
results = run_search(apiobj, 0.1, [('amenity', 'bank'),
('amenity', 'bench')])
assert [r.place_id for r in results] == [22, 23]
def test_near_in_classtype(self, apiobj):
apiobj.add_placex(place_id=22, class_='amenity', type='bank',
centroid=(5.6, 4.34))
apiobj.add_placex(place_id=23, class_='amenity', type='bench',
centroid=(5.6, 4.34))
apiobj.add_class_type_table('amenity', 'bank')
apiobj.add_class_type_table('amenity', 'bench')
results = run_search(apiobj, 0.1, [('amenity', 'bank')])
assert [r.place_id for r in results] == [22]
| 3,643 | 34.378641 | 89 | py |
Nominatim | Nominatim-master/test/python/api/search/test_search_places.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for running the generic place searcher.
"""
import pytest
import nominatim.api as napi
from nominatim.api.types import SearchDetails
from nominatim.api.search.db_searches import PlaceSearch
from nominatim.api.search.db_search_fields import WeightedStrings, WeightedCategories,\
FieldLookup, FieldRanking, RankedTokens
def run_search(apiobj, global_penalty, lookup, ranking, count=2,
hnrs=[], pcs=[], ccodes=[], quals=[],
details=SearchDetails()):
class MySearchData:
penalty = global_penalty
postcodes = WeightedStrings(pcs, [0.0] * len(pcs))
countries = WeightedStrings(ccodes, [0.0] * len(ccodes))
housenumbers = WeightedStrings(hnrs, [0.0] * len(hnrs))
qualifiers = WeightedCategories(quals, [0.0] * len(quals))
lookups = lookup
rankings = ranking
search = PlaceSearch(0.0, MySearchData(), count)
async def run():
async with apiobj.api._async_api.begin() as conn:
return await search.lookup(conn, details)
results = apiobj.async_to_sync(run())
results.sort(key=lambda r: r.accuracy)
return results
class TestNameOnlySearches:
@pytest.fixture(autouse=True)
def fill_database(self, apiobj):
apiobj.add_placex(place_id=100, country_code='us',
centroid=(5.6, 4.3))
apiobj.add_search_name(100, names=[1,2,10,11], country_code='us',
centroid=(5.6, 4.3))
apiobj.add_placex(place_id=101, country_code='mx',
centroid=(-10.3, 56.9))
apiobj.add_search_name(101, names=[1,2,20,21], country_code='mx',
centroid=(-10.3, 56.9))
@pytest.mark.parametrize('lookup_type', ['lookup_all', 'restrict'])
@pytest.mark.parametrize('rank,res', [([10], [100, 101]),
([20], [101, 100])])
def test_lookup_all_match(self, apiobj, lookup_type, rank, res):
lookup = FieldLookup('name_vector', [1,2], lookup_type)
ranking = FieldRanking('name_vector', 0.9, [RankedTokens(0.0, rank)])
results = run_search(apiobj, 0.1, [lookup], [ranking])
assert [r.place_id for r in results] == res
@pytest.mark.parametrize('lookup_type', ['lookup_all', 'restrict'])
def test_lookup_all_partial_match(self, apiobj, lookup_type):
lookup = FieldLookup('name_vector', [1,20], lookup_type)
ranking = FieldRanking('name_vector', 0.9, [RankedTokens(0.0, [21])])
results = run_search(apiobj, 0.1, [lookup], [ranking])
assert len(results) == 1
assert results[0].place_id == 101
@pytest.mark.parametrize('rank,res', [([10], [100, 101]),
([20], [101, 100])])
def test_lookup_any_match(self, apiobj, rank, res):
lookup = FieldLookup('name_vector', [11,21], 'lookup_any')
ranking = FieldRanking('name_vector', 0.9, [RankedTokens(0.0, rank)])
results = run_search(apiobj, 0.1, [lookup], [ranking])
assert [r.place_id for r in results] == res
def test_lookup_any_partial_match(self, apiobj):
lookup = FieldLookup('name_vector', [20], 'lookup_all')
ranking = FieldRanking('name_vector', 0.9, [RankedTokens(0.0, [21])])
results = run_search(apiobj, 0.1, [lookup], [ranking])
assert len(results) == 1
assert results[0].place_id == 101
@pytest.mark.parametrize('cc,res', [('us', 100), ('mx', 101)])
def test_lookup_restrict_country(self, apiobj, cc, res):
lookup = FieldLookup('name_vector', [1,2], 'lookup_all')
ranking = FieldRanking('name_vector', 0.9, [RankedTokens(0.0, [10])])
results = run_search(apiobj, 0.1, [lookup], [ranking], ccodes=[cc])
assert [r.place_id for r in results] == [res]
def test_lookup_restrict_placeid(self, apiobj):
lookup = FieldLookup('name_vector', [1,2], 'lookup_all')
ranking = FieldRanking('name_vector', 0.9, [RankedTokens(0.0, [10])])
results = run_search(apiobj, 0.1, [lookup], [ranking],
details=SearchDetails(excluded=[101]))
assert [r.place_id for r in results] == [100]
@pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON,
napi.GeometryFormat.KML,
napi.GeometryFormat.SVG,
napi.GeometryFormat.TEXT])
def test_return_geometries(self, apiobj, geom):
lookup = FieldLookup('name_vector', [20], 'lookup_all')
ranking = FieldRanking('name_vector', 0.9, [RankedTokens(0.0, [21])])
results = run_search(apiobj, 0.1, [lookup], [ranking],
details=SearchDetails(geometry_output=geom))
assert geom.name.lower() in results[0].geometry
@pytest.mark.parametrize('viewbox', ['5.0,4.0,6.0,5.0', '5.7,4.0,6.0,5.0'])
def test_prefer_viewbox(self, apiobj, viewbox):
lookup = FieldLookup('name_vector', [1, 2], 'lookup_all')
ranking = FieldRanking('name_vector', 0.9, [RankedTokens(0.0, [21])])
results = run_search(apiobj, 0.1, [lookup], [ranking])
assert [r.place_id for r in results] == [101, 100]
results = run_search(apiobj, 0.1, [lookup], [ranking],
details=SearchDetails.from_kwargs({'viewbox': viewbox}))
assert [r.place_id for r in results] == [100, 101]
def test_force_viewbox(self, apiobj):
lookup = FieldLookup('name_vector', [1, 2], 'lookup_all')
details=SearchDetails.from_kwargs({'viewbox': '5.0,4.0,6.0,5.0',
'bounded_viewbox': True})
results = run_search(apiobj, 0.1, [lookup], [], details=details)
assert [r.place_id for r in results] == [100]
def test_prefer_near(self, apiobj):
lookup = FieldLookup('name_vector', [1, 2], 'lookup_all')
ranking = FieldRanking('name_vector', 0.9, [RankedTokens(0.0, [21])])
results = run_search(apiobj, 0.1, [lookup], [ranking])
assert [r.place_id for r in results] == [101, 100]
results = run_search(apiobj, 0.1, [lookup], [ranking],
details=SearchDetails.from_kwargs({'near': '5.6,4.3'}))
results.sort(key=lambda r: -r.importance)
assert [r.place_id for r in results] == [100, 101]
def test_force_near(self, apiobj):
lookup = FieldLookup('name_vector', [1, 2], 'lookup_all')
details=SearchDetails.from_kwargs({'near': '5.6,4.3',
'near_radius': 0.11})
results = run_search(apiobj, 0.1, [lookup], [], details=details)
assert [r.place_id for r in results] == [100]
class TestStreetWithHousenumber:
@pytest.fixture(autouse=True)
def fill_database(self, apiobj):
apiobj.add_placex(place_id=1, class_='place', type='house',
parent_place_id=1000,
housenumber='20 a', country_code='es')
apiobj.add_placex(place_id=2, class_='place', type='house',
parent_place_id=1000,
housenumber='21;22', country_code='es')
apiobj.add_placex(place_id=1000, class_='highway', type='residential',
rank_search=26, rank_address=26,
country_code='es')
apiobj.add_search_name(1000, names=[1,2,10,11],
search_rank=26, address_rank=26,
country_code='es')
apiobj.add_placex(place_id=91, class_='place', type='house',
parent_place_id=2000,
housenumber='20', country_code='pt')
apiobj.add_placex(place_id=92, class_='place', type='house',
parent_place_id=2000,
housenumber='22', country_code='pt')
apiobj.add_placex(place_id=93, class_='place', type='house',
parent_place_id=2000,
housenumber='24', country_code='pt')
apiobj.add_placex(place_id=2000, class_='highway', type='residential',
rank_search=26, rank_address=26,
country_code='pt')
apiobj.add_search_name(2000, names=[1,2,20,21],
search_rank=26, address_rank=26,
country_code='pt')
@pytest.mark.parametrize('hnr,res', [('20', [91, 1]), ('20 a', [1]),
('21', [2]), ('22', [2, 92]),
('24', [93]), ('25', [])])
def test_lookup_by_single_housenumber(self, apiobj, hnr, res):
lookup = FieldLookup('name_vector', [1,2], 'lookup_all')
ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])])
results = run_search(apiobj, 0.1, [lookup], [ranking], hnrs=[hnr])
assert [r.place_id for r in results] == res + [1000, 2000]
@pytest.mark.parametrize('cc,res', [('es', [2, 1000]), ('pt', [92, 2000])])
def test_lookup_with_country_restriction(self, apiobj, cc, res):
lookup = FieldLookup('name_vector', [1,2], 'lookup_all')
ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])])
results = run_search(apiobj, 0.1, [lookup], [ranking], hnrs=['22'],
ccodes=[cc])
assert [r.place_id for r in results] == res
def test_lookup_exclude_housenumber_placeid(self, apiobj):
lookup = FieldLookup('name_vector', [1,2], 'lookup_all')
ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])])
results = run_search(apiobj, 0.1, [lookup], [ranking], hnrs=['22'],
details=SearchDetails(excluded=[92]))
assert [r.place_id for r in results] == [2, 1000, 2000]
def test_lookup_exclude_street_placeid(self, apiobj):
lookup = FieldLookup('name_vector', [1,2], 'lookup_all')
ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])])
results = run_search(apiobj, 0.1, [lookup], [ranking], hnrs=['22'],
details=SearchDetails(excluded=[1000]))
assert [r.place_id for r in results] == [2, 92, 2000]
@pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON,
napi.GeometryFormat.KML,
napi.GeometryFormat.SVG,
napi.GeometryFormat.TEXT])
def test_return_geometries(self, apiobj, geom):
lookup = FieldLookup('name_vector', [1, 2], 'lookup_all')
results = run_search(apiobj, 0.1, [lookup], [], hnrs=['20', '21', '22'],
details=SearchDetails(geometry_output=geom))
assert results
assert all(geom.name.lower() in r.geometry for r in results)
class TestInterpolations:
@pytest.fixture(autouse=True)
def fill_database(self, apiobj):
apiobj.add_placex(place_id=990, class_='highway', type='service',
rank_search=27, rank_address=27,
centroid=(10.0, 10.0),
geometry='LINESTRING(9.995 10, 10.005 10)')
apiobj.add_search_name(990, names=[111],
search_rank=27, address_rank=27)
apiobj.add_placex(place_id=991, class_='place', type='house',
parent_place_id=990,
rank_search=30, rank_address=30,
housenumber='23',
centroid=(10.0, 10.00002))
apiobj.add_osmline(place_id=992,
parent_place_id=990,
startnumber=21, endnumber=29, step=2,
centroid=(10.0, 10.00001),
geometry='LINESTRING(9.995 10.00001, 10.005 10.00001)')
@pytest.mark.parametrize('hnr,res', [('21', [992]), ('22', []), ('23', [991])])
def test_lookup_housenumber(self, apiobj, hnr, res):
lookup = FieldLookup('name_vector', [111], 'lookup_all')
results = run_search(apiobj, 0.1, [lookup], [], hnrs=[hnr])
assert [r.place_id for r in results] == res + [990]
class TestTiger:
@pytest.fixture(autouse=True)
def fill_database(self, apiobj):
apiobj.add_placex(place_id=990, class_='highway', type='service',
rank_search=27, rank_address=27,
country_code='us',
centroid=(10.0, 10.0),
geometry='LINESTRING(9.995 10, 10.005 10)')
apiobj.add_search_name(990, names=[111], country_code='us',
search_rank=27, address_rank=27)
apiobj.add_placex(place_id=991, class_='place', type='house',
parent_place_id=990,
rank_search=30, rank_address=30,
housenumber='23',
country_code='us',
centroid=(10.0, 10.00002))
apiobj.add_tiger(place_id=992,
parent_place_id=990,
startnumber=21, endnumber=29, step=2,
centroid=(10.0, 10.00001),
geometry='LINESTRING(9.995 10.00001, 10.005 10.00001)')
@pytest.mark.parametrize('hnr,res', [('21', [992]), ('22', []), ('23', [991])])
def test_lookup_housenumber(self, apiobj, hnr, res):
lookup = FieldLookup('name_vector', [111], 'lookup_all')
results = run_search(apiobj, 0.1, [lookup], [], hnrs=[hnr])
assert [r.place_id for r in results] == res + [990]
class TestLayersRank30:
@pytest.fixture(autouse=True)
def fill_database(self, apiobj):
apiobj.add_placex(place_id=223, class_='place', type='house',
housenumber='1',
rank_address=30,
rank_search=30)
apiobj.add_search_name(223, names=[34],
importance=0.0009,
address_rank=30, search_rank=30)
apiobj.add_placex(place_id=224, class_='amenity', type='toilet',
rank_address=30,
rank_search=30)
apiobj.add_search_name(224, names=[34],
importance=0.0008,
address_rank=30, search_rank=30)
apiobj.add_placex(place_id=225, class_='man_made', type='tower',
rank_address=0,
rank_search=30)
apiobj.add_search_name(225, names=[34],
importance=0.0007,
address_rank=0, search_rank=30)
apiobj.add_placex(place_id=226, class_='railway', type='station',
rank_address=0,
rank_search=30)
apiobj.add_search_name(226, names=[34],
importance=0.0006,
address_rank=0, search_rank=30)
apiobj.add_placex(place_id=227, class_='natural', type='cave',
rank_address=0,
rank_search=30)
apiobj.add_search_name(227, names=[34],
importance=0.0005,
address_rank=0, search_rank=30)
@pytest.mark.parametrize('layer,res', [(napi.DataLayer.ADDRESS, [223]),
(napi.DataLayer.POI, [224]),
(napi.DataLayer.ADDRESS | napi.DataLayer.POI, [223, 224]),
(napi.DataLayer.MANMADE, [225]),
(napi.DataLayer.RAILWAY, [226]),
(napi.DataLayer.NATURAL, [227]),
(napi.DataLayer.MANMADE | napi.DataLayer.NATURAL, [225, 227]),
(napi.DataLayer.MANMADE | napi.DataLayer.RAILWAY, [225, 226])])
def test_layers_rank30(self, apiobj, layer, res):
lookup = FieldLookup('name_vector', [34], 'lookup_any')
results = run_search(apiobj, 0.1, [lookup], [],
details=SearchDetails(layers=layer))
assert [r.place_id for r in results] == res
| 16,819 | 42.57513 | 106 | py |
Nominatim | Nominatim-master/test/python/api/search/test_db_search_builder.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for creating abstract searches from token assignments.
"""
import pytest
from nominatim.api.search.query import Token, TokenRange, BreakType, PhraseType, TokenType, QueryStruct, Phrase
from nominatim.api.search.db_search_builder import SearchBuilder
from nominatim.api.search.token_assignment import TokenAssignment
from nominatim.api.types import SearchDetails
import nominatim.api.search.db_searches as dbs
class MyToken(Token):
def get_category(self):
return 'this', 'that'
def make_query(*args):
q = None
for tlist in args:
if q is None:
q = QueryStruct([Phrase(PhraseType.NONE, '')])
else:
q.add_node(BreakType.WORD, PhraseType.NONE)
start = len(q.nodes) - 1
for end, ttype, tinfo in tlist:
for tid, word in tinfo:
q.add_token(TokenRange(start, end), ttype,
MyToken(0.5 if ttype == TokenType.PARTIAL else 0.0, tid, 1, word, True))
q.add_node(BreakType.END, PhraseType.NONE)
return q
def test_country_search():
q = make_query([(1, TokenType.COUNTRY, [(2, 'de'), (3, 'en')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1))))
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.CountrySearch)
assert set(search.countries.values) == {'de', 'en'}
def test_country_search_with_country_restriction():
q = make_query([(1, TokenType.COUNTRY, [(2, 'de'), (3, 'en')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'en,fr'}))
searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1))))
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.CountrySearch)
assert set(search.countries.values) == {'en'}
def test_country_search_with_confllicting_country_restriction():
q = make_query([(1, TokenType.COUNTRY, [(2, 'de'), (3, 'en')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'fr'}))
searches = list(builder.build(TokenAssignment(country=TokenRange(0, 1))))
assert len(searches) == 0
def test_postcode_search_simple():
q = make_query([(1, TokenType.POSTCODE, [(34, '2367')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1))))
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.PostcodeSearch)
assert search.postcodes.values == ['2367']
assert not search.countries.values
assert not search.lookups
assert not search.rankings
def test_postcode_with_country():
q = make_query([(1, TokenType.POSTCODE, [(34, '2367')])],
[(2, TokenType.COUNTRY, [(1, 'xx')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1),
country=TokenRange(1, 2))))
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.PostcodeSearch)
assert search.postcodes.values == ['2367']
assert search.countries.values == ['xx']
assert not search.lookups
assert not search.rankings
def test_postcode_with_address():
q = make_query([(1, TokenType.POSTCODE, [(34, '2367')])],
[(2, TokenType.PARTIAL, [(100, 'word')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1),
address=[TokenRange(1, 2)])))
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.PostcodeSearch)
assert search.postcodes.values == ['2367']
assert not search.countries
assert search.lookups
assert not search.rankings
def test_postcode_with_address_with_full_word():
q = make_query([(1, TokenType.POSTCODE, [(34, '2367')])],
[(2, TokenType.PARTIAL, [(100, 'word')]),
(2, TokenType.WORD, [(1, 'full')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(postcode=TokenRange(0, 1),
address=[TokenRange(1, 2)])))
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.PostcodeSearch)
assert search.postcodes.values == ['2367']
assert not search.countries
assert search.lookups
assert len(search.rankings) == 1
@pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1', 'bounded_viewbox': True},
{'near': '10,10'}])
def test_category_only(kwargs):
q = make_query([(1, TokenType.CATEGORY, [(2, 'foo')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs))
searches = list(builder.build(TokenAssignment(category=TokenRange(0, 1))))
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.PoiSearch)
assert search.categories.values == [('this', 'that')]
@pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1'},
{}])
def test_category_skipped(kwargs):
q = make_query([(1, TokenType.CATEGORY, [(2, 'foo')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs))
searches = list(builder.build(TokenAssignment(category=TokenRange(0, 1))))
assert len(searches) == 0
def test_name_only_search():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1))))
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.PlaceSearch)
assert not search.postcodes.values
assert not search.countries.values
assert not search.housenumbers.values
assert not search.qualifiers.values
assert len(search.lookups) == 1
assert len(search.rankings) == 1
def test_name_with_qualifier():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])],
[(2, TokenType.QUALIFIER, [(55, 'hotel')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
qualifier=TokenRange(1, 2))))
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.PlaceSearch)
assert not search.postcodes.values
assert not search.countries.values
assert not search.housenumbers.values
assert search.qualifiers.values == [('this', 'that')]
assert len(search.lookups) == 1
assert len(search.rankings) == 1
def test_name_with_housenumber_search():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])],
[(2, TokenType.HOUSENUMBER, [(66, '66')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
housenumber=TokenRange(1, 2))))
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.PlaceSearch)
assert not search.postcodes.values
assert not search.countries.values
assert search.housenumbers.values == ['66']
assert len(search.lookups) == 1
assert len(search.rankings) == 1
def test_name_and_address():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])],
[(2, TokenType.PARTIAL, [(2, 'b')]),
(2, TokenType.WORD, [(101, 'b')])],
[(3, TokenType.PARTIAL, [(3, 'c')]),
(3, TokenType.WORD, [(102, 'c')])]
)
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
address=[TokenRange(1, 2),
TokenRange(2, 3)])))
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.PlaceSearch)
assert not search.postcodes.values
assert not search.countries.values
assert not search.housenumbers.values
assert len(search.lookups) == 2
assert len(search.rankings) == 3
def test_name_and_complex_address():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])],
[(2, TokenType.PARTIAL, [(2, 'b')]),
(3, TokenType.WORD, [(101, 'bc')])],
[(3, TokenType.PARTIAL, [(3, 'c')])],
[(4, TokenType.PARTIAL, [(4, 'd')]),
(4, TokenType.WORD, [(103, 'd')])]
)
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1),
address=[TokenRange(1, 2),
TokenRange(2, 4)])))
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.PlaceSearch)
assert not search.postcodes.values
assert not search.countries.values
assert not search.housenumbers.values
assert len(search.lookups) == 2
assert len(search.rankings) == 2
def test_name_only_near_search():
q = make_query([(1, TokenType.CATEGORY, [(88, 'g')])],
[(2, TokenType.PARTIAL, [(1, 'a')]),
(2, TokenType.WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails())
searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2),
category=TokenRange(0, 1))))
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.NearSearch)
assert isinstance(search.search, dbs.PlaceSearch)
def test_name_only_search_with_category():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]}))
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1))))
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.NearSearch)
assert isinstance(search.search, dbs.PlaceSearch)
def test_name_only_search_with_countries():
q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]),
(1, TokenType.WORD, [(100, 'a')])])
builder = SearchBuilder(q, SearchDetails.from_kwargs({'countries': 'de,en'}))
searches = list(builder.build(TokenAssignment(name=TokenRange(0, 1))))
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.PlaceSearch)
assert not search.postcodes.values
assert set(search.countries.values) == {'de', 'en'}
assert not search.housenumbers.values
def make_counted_searches(name_part, name_full, address_part, address_full):
q = QueryStruct([Phrase(PhraseType.NONE, '')])
for i in range(2):
q.add_node(BreakType.WORD, PhraseType.NONE)
q.add_node(BreakType.END, PhraseType.NONE)
q.add_token(TokenRange(0, 1), TokenType.PARTIAL,
MyToken(0.5, 1, name_part, 'name_part', True))
q.add_token(TokenRange(0, 1), TokenType.WORD,
MyToken(0, 101, name_full, 'name_full', True))
q.add_token(TokenRange(1, 2), TokenType.PARTIAL,
MyToken(0.5, 2, address_part, 'address_part', True))
q.add_token(TokenRange(1, 2), TokenType.WORD,
MyToken(0, 102, address_full, 'address_full', True))
builder = SearchBuilder(q, SearchDetails())
return list(builder.build(TokenAssignment(name=TokenRange(0, 1),
address=[TokenRange(1, 2)])))
def test_infrequent_partials_in_name():
searches = make_counted_searches(1, 1, 1, 1)
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.PlaceSearch)
assert len(search.lookups) == 2
assert len(search.rankings) == 2
assert set((l.column, l.lookup_type) for l in search.lookups) == \
{('name_vector', 'lookup_all'), ('nameaddress_vector', 'restrict')}
def test_frequent_partials_in_name_but_not_in_address():
searches = make_counted_searches(10000, 1, 1, 1)
assert len(searches) == 1
search = searches[0]
assert isinstance(search, dbs.PlaceSearch)
assert len(search.lookups) == 2
assert len(search.rankings) == 2
assert set((l.column, l.lookup_type) for l in search.lookups) == \
{('nameaddress_vector', 'lookup_all'), ('name_vector', 'restrict')}
def test_frequent_partials_in_name_and_address():
searches = make_counted_searches(9999, 1, 9999, 1)
assert len(searches) == 2
assert all(isinstance(s, dbs.PlaceSearch) for s in searches)
searches.sort(key=lambda s: s.penalty)
assert set((l.column, l.lookup_type) for l in searches[0].lookups) == \
{('name_vector', 'lookup_any'), ('nameaddress_vector', 'restrict')}
assert set((l.column, l.lookup_type) for l in searches[1].lookups) == \
{('nameaddress_vector', 'lookup_all'), ('name_vector', 'lookup_all')}
def test_too_frequent_partials_in_name_and_address():
searches = make_counted_searches(10000, 1, 10000, 1)
assert len(searches) == 1
assert all(isinstance(s, dbs.PlaceSearch) for s in searches)
searches.sort(key=lambda s: s.penalty)
assert set((l.column, l.lookup_type) for l in searches[0].lookups) == \
{('name_vector', 'lookup_any'), ('nameaddress_vector', 'restrict')}
| 14,165 | 33.720588 | 111 | py |
Nominatim | Nominatim-master/test/python/api/search/test_search_postcode.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for running the postcode searcher.
"""
import pytest
import nominatim.api as napi
from nominatim.api.types import SearchDetails
from nominatim.api.search.db_searches import PostcodeSearch
from nominatim.api.search.db_search_fields import WeightedStrings, FieldLookup, \
FieldRanking, RankedTokens
def run_search(apiobj, global_penalty, pcs, pc_penalties=None,
ccodes=[], lookup=[], ranking=[], details=SearchDetails()):
if pc_penalties is None:
pc_penalties = [0.0] * len(pcs)
class MySearchData:
penalty = global_penalty
postcodes = WeightedStrings(pcs, pc_penalties)
countries = WeightedStrings(ccodes, [0.0] * len(ccodes))
lookups = lookup
rankings = ranking
search = PostcodeSearch(0.0, MySearchData())
async def run():
async with apiobj.api._async_api.begin() as conn:
return await search.lookup(conn, details)
return apiobj.async_to_sync(run())
def test_postcode_only_search(apiobj):
apiobj.add_postcode(place_id=100, country_code='ch', postcode='12345')
apiobj.add_postcode(place_id=101, country_code='pl', postcode='12 345')
results = run_search(apiobj, 0.3, ['12345', '12 345'], [0.0, 0.1])
assert len(results) == 2
assert [r.place_id for r in results] == [100, 101]
def test_postcode_with_country(apiobj):
apiobj.add_postcode(place_id=100, country_code='ch', postcode='12345')
apiobj.add_postcode(place_id=101, country_code='pl', postcode='12 345')
results = run_search(apiobj, 0.3, ['12345', '12 345'], [0.0, 0.1],
ccodes=['de', 'pl'])
assert len(results) == 1
assert results[0].place_id == 101
class TestPostcodeSearchWithAddress:
@pytest.fixture(autouse=True)
def fill_database(self, apiobj):
apiobj.add_postcode(place_id=100, country_code='ch',
parent_place_id=1000, postcode='12345')
apiobj.add_postcode(place_id=101, country_code='pl',
parent_place_id=2000, postcode='12345')
apiobj.add_placex(place_id=1000, class_='place', type='village',
rank_search=22, rank_address=22,
country_code='ch')
apiobj.add_search_name(1000, names=[1,2,10,11],
search_rank=22, address_rank=22,
country_code='ch')
apiobj.add_placex(place_id=2000, class_='place', type='village',
rank_search=22, rank_address=22,
country_code='pl')
apiobj.add_search_name(2000, names=[1,2,20,21],
search_rank=22, address_rank=22,
country_code='pl')
def test_lookup_both(self, apiobj):
lookup = FieldLookup('name_vector', [1,2], 'restrict')
ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])])
results = run_search(apiobj, 0.1, ['12345'], lookup=[lookup], ranking=[ranking])
assert [r.place_id for r in results] == [100, 101]
def test_restrict_by_name(self, apiobj):
lookup = FieldLookup('name_vector', [10], 'restrict')
results = run_search(apiobj, 0.1, ['12345'], lookup=[lookup])
assert [r.place_id for r in results] == [100]
| 3,591 | 35.653061 | 88 | py |
Nominatim | Nominatim-master/test/python/api/search/test_search_country.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for running the country searcher.
"""
import pytest
import nominatim.api as napi
from nominatim.api.types import SearchDetails
from nominatim.api.search.db_searches import CountrySearch
from nominatim.api.search.db_search_fields import WeightedStrings
def run_search(apiobj, global_penalty, ccodes,
country_penalties=None, details=SearchDetails()):
if country_penalties is None:
country_penalties = [0.0] * len(ccodes)
class MySearchData:
penalty = global_penalty
countries = WeightedStrings(ccodes, country_penalties)
search = CountrySearch(MySearchData())
async def run():
async with apiobj.api._async_api.begin() as conn:
return await search.lookup(conn, details)
return apiobj.async_to_sync(run())
def test_find_from_placex(apiobj):
apiobj.add_placex(place_id=55, class_='boundary', type='administrative',
rank_search=4, rank_address=4,
name={'name': 'Lolaland'},
country_code='yw',
centroid=(10, 10),
geometry='POLYGON((9.5 9.5, 9.5 10.5, 10.5 10.5, 10.5 9.5, 9.5 9.5))')
results = run_search(apiobj, 0.5, ['de', 'yw'], [0.0, 0.3])
assert len(results) == 1
assert results[0].place_id == 55
assert results[0].accuracy == 0.8
def test_find_from_fallback_countries(apiobj):
apiobj.add_country('ro', 'POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))')
apiobj.add_country_name('ro', {'name': 'România'})
results = run_search(apiobj, 0.0, ['ro'])
assert len(results) == 1
assert results[0].names == {'name': 'România'}
def test_find_none(apiobj):
assert len(run_search(apiobj, 0.0, ['xx'])) == 0
| 1,951 | 30.483871 | 92 | py |
Nominatim | Nominatim-master/test/python/tokenizer/test_icu.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for ICU tokenizer.
"""
import shutil
import yaml
import itertools
import pytest
from nominatim.tokenizer import icu_tokenizer
import nominatim.tokenizer.icu_rule_loader
from nominatim.db import properties
from nominatim.db.sql_preprocessor import SQLPreprocessor
from nominatim.data.place_info import PlaceInfo
from mock_icu_word_table import MockIcuWordTable
@pytest.fixture
def word_table(temp_db_conn):
return MockIcuWordTable(temp_db_conn)
@pytest.fixture
def test_config(project_env, tmp_path):
sqldir = tmp_path / 'sql'
sqldir.mkdir()
(sqldir / 'tokenizer').mkdir()
(sqldir / 'tokenizer' / 'icu_tokenizer.sql').write_text("SELECT 'a'")
shutil.copy(str(project_env.lib_dir.sql / 'tokenizer' / 'icu_tokenizer_tables.sql'),
str(sqldir / 'tokenizer' / 'icu_tokenizer_tables.sql'))
project_env.lib_dir.sql = sqldir
return project_env
@pytest.fixture
def tokenizer_factory(dsn, tmp_path, property_table,
sql_preprocessor, place_table, word_table):
(tmp_path / 'tokenizer').mkdir()
def _maker():
return icu_tokenizer.create(dsn, tmp_path / 'tokenizer')
return _maker
@pytest.fixture
def db_prop(temp_db_conn):
def _get_db_property(name):
return properties.get_property(temp_db_conn, name)
return _get_db_property
@pytest.fixture
def analyzer(tokenizer_factory, test_config, monkeypatch,
temp_db_with_extensions, tmp_path):
sql = tmp_path / 'sql' / 'tokenizer' / 'icu_tokenizer.sql'
sql.write_text("SELECT 'a';")
monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', ':: lower();')
tok = tokenizer_factory()
tok.init_new_db(test_config)
monkeypatch.undo()
def _mk_analyser(norm=("[[:Punctuation:][:Space:]]+ > ' '",), trans=(':: upper()',),
variants=('~gasse -> gasse', 'street => st', ),
sanitizers=[], with_housenumber=False,
with_postcode=False):
cfgstr = {'normalization': list(norm),
'sanitizers': sanitizers,
'transliteration': list(trans),
'token-analysis': [{'analyzer': 'generic',
'variants': [{'words': list(variants)}]}]}
if with_housenumber:
cfgstr['token-analysis'].append({'id': '@housenumber',
'analyzer': 'housenumbers'})
if with_postcode:
cfgstr['token-analysis'].append({'id': '@postcode',
'analyzer': 'postcodes'})
(test_config.project_dir / 'icu_tokenizer.yaml').write_text(yaml.dump(cfgstr))
tok.loader = nominatim.tokenizer.icu_rule_loader.ICURuleLoader(test_config)
return tok.name_analyzer()
return _mk_analyser
@pytest.fixture
def sql_functions(temp_db_conn, def_config, src_dir):
orig_sql = def_config.lib_dir.sql
def_config.lib_dir.sql = src_dir / 'lib-sql'
sqlproc = SQLPreprocessor(temp_db_conn, def_config)
sqlproc.run_sql_file(temp_db_conn, 'functions/utils.sql')
sqlproc.run_sql_file(temp_db_conn, 'tokenizer/icu_tokenizer.sql')
def_config.lib_dir.sql = orig_sql
@pytest.fixture
def getorcreate_full_word(temp_db_cursor):
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION getorcreate_full_word(
norm_term TEXT, lookup_terms TEXT[],
OUT full_token INT,
OUT partial_tokens INT[])
AS $$
DECLARE
partial_terms TEXT[] = '{}'::TEXT[];
term TEXT;
term_id INTEGER;
term_count INTEGER;
BEGIN
SELECT min(word_id) INTO full_token
FROM word WHERE info->>'word' = norm_term and type = 'W';
IF full_token IS NULL THEN
full_token := nextval('seq_word');
INSERT INTO word (word_id, word_token, type, info)
SELECT full_token, lookup_term, 'W',
json_build_object('word', norm_term, 'count', 0)
FROM unnest(lookup_terms) as lookup_term;
END IF;
FOR term IN SELECT unnest(string_to_array(unnest(lookup_terms), ' ')) LOOP
term := trim(term);
IF NOT (ARRAY[term] <@ partial_terms) THEN
partial_terms := partial_terms || term;
END IF;
END LOOP;
partial_tokens := '{}'::INT[];
FOR term IN SELECT unnest(partial_terms) LOOP
SELECT min(word_id), max(info->>'count') INTO term_id, term_count
FROM word WHERE word_token = term and type = 'w';
IF term_id IS NULL THEN
term_id := nextval('seq_word');
term_count := 0;
INSERT INTO word (word_id, word_token, type, info)
VALUES (term_id, term, 'w', json_build_object('count', term_count));
END IF;
IF NOT (ARRAY[term_id] <@ partial_tokens) THEN
partial_tokens := partial_tokens || term_id;
END IF;
END LOOP;
END;
$$
LANGUAGE plpgsql;
""")
def test_init_new(tokenizer_factory, test_config, db_prop):
tok = tokenizer_factory()
tok.init_new_db(test_config)
assert db_prop(nominatim.tokenizer.icu_rule_loader.DBCFG_IMPORT_NORM_RULES) \
.startswith(':: lower ();')
def test_init_word_table(tokenizer_factory, test_config, place_row, temp_db_cursor):
place_row(names={'name' : 'Test Area', 'ref' : '52'})
place_row(names={'name' : 'No Area'})
place_row(names={'name' : 'Holzstrasse'})
tok = tokenizer_factory()
tok.init_new_db(test_config)
assert temp_db_cursor.table_exists('word')
def test_init_from_project(test_config, tokenizer_factory):
tok = tokenizer_factory()
tok.init_new_db(test_config)
tok = tokenizer_factory()
tok.init_from_project(test_config)
assert tok.loader is not None
def test_update_sql_functions(db_prop, temp_db_cursor,
tokenizer_factory, test_config, table_factory,
monkeypatch):
tok = tokenizer_factory()
tok.init_new_db(test_config)
table_factory('test', 'txt TEXT')
func_file = test_config.lib_dir.sql / 'tokenizer' / 'icu_tokenizer.sql'
func_file.write_text("""INSERT INTO test VALUES (1133)""")
tok.update_sql_functions(test_config)
test_content = temp_db_cursor.row_set('SELECT * FROM test')
assert test_content == set((('1133', ), ))
def test_finalize_import(tokenizer_factory, temp_db_conn,
temp_db_cursor, test_config, sql_preprocessor_cfg):
func_file = test_config.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer_indices.sql'
func_file.write_text("""CREATE FUNCTION test() RETURNS TEXT
AS $$ SELECT 'b'::text $$ LANGUAGE SQL""")
tok = tokenizer_factory()
tok.init_new_db(test_config)
tok.finalize_import(test_config)
temp_db_cursor.scalar('SELECT test()') == 'b'
def test_check_database(test_config, tokenizer_factory,
temp_db_cursor, sql_preprocessor_cfg):
tok = tokenizer_factory()
tok.init_new_db(test_config)
assert tok.check_database(test_config) is None
def test_update_statistics_reverse_only(word_table, tokenizer_factory):
tok = tokenizer_factory()
tok.update_statistics()
def test_update_statistics(word_table, table_factory, temp_db_cursor, tokenizer_factory):
word_table.add_full_word(1000, 'hello')
table_factory('search_name',
'place_id BIGINT, name_vector INT[]',
[(12, [1000])])
tok = tokenizer_factory()
tok.update_statistics()
assert temp_db_cursor.scalar("""SELECT count(*) FROM word
WHERE type = 'W' and
(info->>'count')::int > 0""") > 0
def test_normalize_postcode(analyzer):
with analyzer() as anl:
anl.normalize_postcode('123') == '123'
anl.normalize_postcode('ab-34 ') == 'AB-34'
anl.normalize_postcode('38 Б') == '38 Б'
class TestPostcodes:
@pytest.fixture(autouse=True)
def setup(self, analyzer, sql_functions):
sanitizers = [{'step': 'clean-postcodes'}]
with analyzer(sanitizers=sanitizers, with_postcode=True) as anl:
self.analyzer = anl
yield anl
def process_postcode(self, cc, postcode):
return self.analyzer.process_place(PlaceInfo({'country_code': cc,
'address': {'postcode': postcode}}))
def test_update_postcodes_from_db_empty(self, table_factory, word_table):
table_factory('location_postcode', 'country_code TEXT, postcode TEXT',
content=(('de', '12345'), ('se', '132 34'),
('bm', 'AB23'), ('fr', '12345')))
self.analyzer.update_postcodes_from_db()
assert word_table.count() == 5
assert word_table.get_postcodes() == {'12345', '132 34@132 34', 'AB 23@AB 23'}
def test_update_postcodes_from_db_ambigious(self, table_factory, word_table):
table_factory('location_postcode', 'country_code TEXT, postcode TEXT',
content=(('in', '123456'), ('sg', '123456')))
self.analyzer.update_postcodes_from_db()
assert word_table.count() == 3
assert word_table.get_postcodes() == {'123456', '123456@123 456'}
def test_update_postcodes_from_db_add_and_remove(self, table_factory, word_table):
table_factory('location_postcode', 'country_code TEXT, postcode TEXT',
content=(('ch', '1234'), ('bm', 'BC 45'), ('bm', 'XX45')))
word_table.add_postcode(' 1234', '1234')
word_table.add_postcode(' 5678', '5678')
self.analyzer.update_postcodes_from_db()
assert word_table.count() == 5
assert word_table.get_postcodes() == {'1234', 'BC 45@BC 45', 'XX 45@XX 45'}
def test_process_place_postcode_simple(self, word_table):
info = self.process_postcode('de', '12345')
assert info['postcode'] == '12345'
assert word_table.get_postcodes() == {'12345', }
def test_process_place_postcode_with_space(self, word_table):
info = self.process_postcode('in', '123 567')
assert info['postcode'] == '123567'
assert word_table.get_postcodes() == {'123567@123 567', }
def test_update_special_phrase_empty_table(analyzer, word_table):
with analyzer() as anl:
anl.update_special_phrases([
("König bei", "amenity", "royal", "near"),
("Könige ", "amenity", "royal", "-"),
("street", "highway", "primary", "in")
], True)
assert word_table.get_special() \
== {('KÖNIG BEI', 'König bei', 'amenity', 'royal', 'near'),
('KÖNIGE', 'Könige', 'amenity', 'royal', None),
('STREET', 'street', 'highway', 'primary', 'in')}
def test_update_special_phrase_delete_all(analyzer, word_table):
word_table.add_special('FOO', 'foo', 'amenity', 'prison', 'in')
word_table.add_special('BAR', 'bar', 'highway', 'road', None)
assert word_table.count_special() == 2
with analyzer() as anl:
anl.update_special_phrases([], True)
assert word_table.count_special() == 0
def test_update_special_phrases_no_replace(analyzer, word_table):
word_table.add_special('FOO', 'foo', 'amenity', 'prison', 'in')
word_table.add_special('BAR', 'bar', 'highway', 'road', None)
assert word_table.count_special() == 2
with analyzer() as anl:
anl.update_special_phrases([], False)
assert word_table.count_special() == 2
def test_update_special_phrase_modify(analyzer, word_table):
word_table.add_special('FOO', 'foo', 'amenity', 'prison', 'in')
word_table.add_special('BAR', 'bar', 'highway', 'road', None)
assert word_table.count_special() == 2
with analyzer() as anl:
anl.update_special_phrases([
('prison', 'amenity', 'prison', 'in'),
('bar', 'highway', 'road', '-'),
('garden', 'leisure', 'garden', 'near')
], True)
assert word_table.get_special() \
== {('PRISON', 'prison', 'amenity', 'prison', 'in'),
('BAR', 'bar', 'highway', 'road', None),
('GARDEN', 'garden', 'leisure', 'garden', 'near')}
def test_add_country_names_new(analyzer, word_table):
with analyzer() as anl:
anl.add_country_names('es', {'name': 'Espagña', 'name:en': 'Spain'})
assert word_table.get_country() == {('es', 'ESPAGÑA'), ('es', 'SPAIN')}
def test_add_country_names_extend(analyzer, word_table):
word_table.add_country('ch', 'SCHWEIZ')
with analyzer() as anl:
anl.add_country_names('ch', {'name': 'Schweiz', 'name:fr': 'Suisse'})
assert word_table.get_country() == {('ch', 'SCHWEIZ'), ('ch', 'SUISSE')}
class TestPlaceNames:
@pytest.fixture(autouse=True)
def setup(self, analyzer, sql_functions):
sanitizers = [{'step': 'split-name-list'},
{'step': 'strip-brace-terms'}]
with analyzer(sanitizers=sanitizers) as anl:
self.analyzer = anl
yield anl
def expect_name_terms(self, info, *expected_terms):
tokens = self.analyzer.get_word_token_info(expected_terms)
for token in tokens:
assert token[2] is not None, "No token for {0}".format(token)
assert eval(info['names']) == set((t[2] for t in tokens))
def process_named_place(self, names):
return self.analyzer.process_place(PlaceInfo({'name': names}))
def test_simple_names(self):
info = self.process_named_place({'name': 'Soft bAr', 'ref': '34'})
self.expect_name_terms(info, '#Soft bAr', '#34', 'Soft', 'bAr', '34')
@pytest.mark.parametrize('sep', [',' , ';'])
def test_names_with_separator(self, sep):
info = self.process_named_place({'name': sep.join(('New York', 'Big Apple'))})
self.expect_name_terms(info, '#New York', '#Big Apple',
'new', 'york', 'big', 'apple')
def test_full_names_with_bracket(self):
info = self.process_named_place({'name': 'Houseboat (left)'})
self.expect_name_terms(info, '#Houseboat (left)', '#Houseboat',
'houseboat', 'left')
def test_country_name(self, word_table):
place = PlaceInfo({'name' : {'name': 'Norge'},
'country_code': 'no',
'rank_address': 4,
'class': 'boundary',
'type': 'administrative'})
info = self.analyzer.process_place(place)
self.expect_name_terms(info, '#norge', 'norge')
assert word_table.get_country() == {('no', 'NORGE')}
class TestPlaceAddress:
@pytest.fixture(autouse=True)
def setup(self, analyzer, sql_functions):
hnr = {'step': 'clean-housenumbers',
'filter-kind': ['housenumber', 'conscriptionnumber', 'streetnumber']}
with analyzer(trans=(":: upper()", "'🜵' > ' '"), sanitizers=[hnr]) as anl:
self.analyzer = anl
yield anl
@pytest.fixture
def getorcreate_hnr_id(self, temp_db_cursor):
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION getorcreate_hnr_id(lookup_term TEXT)
RETURNS INTEGER AS $$
SELECT -nextval('seq_word')::INTEGER; $$ LANGUAGE SQL""")
def process_address(self, **kwargs):
return self.analyzer.process_place(PlaceInfo({'address': kwargs}))
def name_token_set(self, *expected_terms):
tokens = self.analyzer.get_word_token_info(expected_terms)
for token in tokens:
assert token[2] is not None, "No token for {0}".format(token)
return set((t[2] for t in tokens))
@pytest.mark.parametrize('pcode', ['12345', 'AB 123', '34-345'])
def test_process_place_postcode(self, word_table, pcode):
self.process_address(postcode=pcode)
assert word_table.get_postcodes() == {pcode, }
@pytest.mark.parametrize('hnr', ['123a', '1', '101'])
def test_process_place_housenumbers_simple(self, hnr, getorcreate_hnr_id):
info = self.process_address(housenumber=hnr)
assert info['hnr'] == hnr.upper()
assert info['hnr_tokens'] == "{-1}"
def test_process_place_housenumbers_duplicates(self, getorcreate_hnr_id):
info = self.process_address(housenumber='134',
conscriptionnumber='134',
streetnumber='99a')
assert set(info['hnr'].split(';')) == set(('134', '99A'))
assert info['hnr_tokens'] == "{-1,-2}"
def test_process_place_housenumbers_cached(self, getorcreate_hnr_id):
info = self.process_address(housenumber="45")
assert info['hnr_tokens'] == "{-1}"
info = self.process_address(housenumber="46")
assert info['hnr_tokens'] == "{-2}"
info = self.process_address(housenumber="41;45")
assert eval(info['hnr_tokens']) == {-1, -3}
info = self.process_address(housenumber="41")
assert eval(info['hnr_tokens']) == {-3}
def test_process_place_street(self):
self.analyzer.process_place(PlaceInfo({'name': {'name' : 'Grand Road'}}))
info = self.process_address(street='Grand Road')
assert eval(info['street']) == self.name_token_set('#Grand Road')
def test_process_place_nonexisting_street(self):
info = self.process_address(street='Grand Road')
assert info['street'] == '{}'
def test_process_place_multiple_street_tags(self):
self.analyzer.process_place(PlaceInfo({'name': {'name' : 'Grand Road',
'ref': '05989'}}))
info = self.process_address(**{'street': 'Grand Road',
'street:sym_ul': '05989'})
assert eval(info['street']) == self.name_token_set('#Grand Road', '#05989')
def test_process_place_street_empty(self):
info = self.process_address(street='🜵')
assert info['street'] == '{}'
def test_process_place_street_from_cache(self):
self.analyzer.process_place(PlaceInfo({'name': {'name' : 'Grand Road'}}))
self.process_address(street='Grand Road')
# request address again
info = self.process_address(street='Grand Road')
assert eval(info['street']) == self.name_token_set('#Grand Road')
def test_process_place_place(self):
info = self.process_address(place='Honu Lulu')
assert eval(info['place']) == self.name_token_set('HONU', 'LULU')
def test_process_place_place_extra(self):
info = self.process_address(**{'place:en': 'Honu Lulu'})
assert 'place' not in info
def test_process_place_place_empty(self):
info = self.process_address(place='🜵')
assert 'place' not in info
def test_process_place_address_terms(self):
info = self.process_address(country='de', city='Zwickau', state='Sachsen',
suburb='Zwickau', street='Hauptstr',
full='right behind the church')
city = self.name_token_set('ZWICKAU')
state = self.name_token_set('SACHSEN')
result = {k: eval(v) for k,v in info['addr'].items()}
assert result == {'city': city, 'suburb': city, 'state': state}
def test_process_place_multiple_address_terms(self):
info = self.process_address(**{'city': 'Bruxelles', 'city:de': 'Brüssel'})
result = {k: eval(v) for k,v in info['addr'].items()}
assert result == {'city': self.name_token_set('Bruxelles')}
def test_process_place_address_terms_empty(self):
info = self.process_address(country='de', city=' ', street='Hauptstr',
full='right behind the church')
assert 'addr' not in info
class TestPlaceHousenumberWithAnalyser:
@pytest.fixture(autouse=True)
def setup(self, analyzer, sql_functions):
hnr = {'step': 'clean-housenumbers',
'filter-kind': ['housenumber', 'conscriptionnumber', 'streetnumber']}
with analyzer(trans=(":: upper()", "'🜵' > ' '"), sanitizers=[hnr], with_housenumber=True) as anl:
self.analyzer = anl
yield anl
@pytest.fixture
def getorcreate_hnr_id(self, temp_db_cursor):
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION create_analyzed_hnr_id(norm_term TEXT, lookup_terms TEXT[])
RETURNS INTEGER AS $$
SELECT -nextval('seq_word')::INTEGER; $$ LANGUAGE SQL""")
def process_address(self, **kwargs):
return self.analyzer.process_place(PlaceInfo({'address': kwargs}))
def name_token_set(self, *expected_terms):
tokens = self.analyzer.get_word_token_info(expected_terms)
for token in tokens:
assert token[2] is not None, "No token for {0}".format(token)
return set((t[2] for t in tokens))
@pytest.mark.parametrize('hnr', ['123 a', '1', '101'])
def test_process_place_housenumbers_simple(self, hnr, getorcreate_hnr_id):
info = self.process_address(housenumber=hnr)
assert info['hnr'] == hnr.upper()
assert info['hnr_tokens'] == "{-1}"
def test_process_place_housenumbers_duplicates(self, getorcreate_hnr_id):
info = self.process_address(housenumber='134',
conscriptionnumber='134',
streetnumber='99a')
assert set(info['hnr'].split(';')) == set(('134', '99 A'))
assert info['hnr_tokens'] == "{-1,-2}"
def test_process_place_housenumbers_cached(self, getorcreate_hnr_id):
info = self.process_address(housenumber="45")
assert info['hnr_tokens'] == "{-1}"
info = self.process_address(housenumber="46")
assert info['hnr_tokens'] == "{-2}"
info = self.process_address(housenumber="41;45")
assert eval(info['hnr_tokens']) == {-1, -3}
info = self.process_address(housenumber="41")
assert eval(info['hnr_tokens']) == {-3}
class TestUpdateWordTokens:
@pytest.fixture(autouse=True)
def setup(self, tokenizer_factory, table_factory, placex_table, word_table):
table_factory('search_name', 'place_id BIGINT, name_vector INT[]')
self.tok = tokenizer_factory()
@pytest.fixture
def search_entry(self, temp_db_cursor):
place_id = itertools.count(1000)
def _insert(*args):
temp_db_cursor.execute("INSERT INTO search_name VALUES (%s, %s)",
(next(place_id), list(args)))
return _insert
@pytest.fixture(params=['simple', 'analyzed'])
def add_housenumber(self, request, word_table):
if request.param == 'simple':
def _make(hid, hnr):
word_table.add_housenumber(hid, hnr)
elif request.param == 'analyzed':
def _make(hid, hnr):
word_table.add_housenumber(hid, [hnr])
return _make
@pytest.mark.parametrize('hnr', ('1a', '1234567', '34 5'))
def test_remove_unused_housenumbers(self, add_housenumber, word_table, hnr):
word_table.add_housenumber(1000, hnr)
assert word_table.count_housenumbers() == 1
self.tok.update_word_tokens()
assert word_table.count_housenumbers() == 0
def test_keep_unused_numeral_housenumbers(self, add_housenumber, word_table):
add_housenumber(1000, '5432')
assert word_table.count_housenumbers() == 1
self.tok.update_word_tokens()
assert word_table.count_housenumbers() == 1
def test_keep_housenumbers_from_search_name_table(self, add_housenumber, word_table, search_entry):
add_housenumber(9999, '5432a')
add_housenumber(9991, '9 a')
search_entry(123, 9999, 34)
assert word_table.count_housenumbers() == 2
self.tok.update_word_tokens()
assert word_table.count_housenumbers() == 1
def test_keep_housenumbers_from_placex_table(self, add_housenumber, word_table, placex_table):
add_housenumber(9999, '5432a')
add_housenumber(9990, '34z')
placex_table.add(housenumber='34z')
placex_table.add(housenumber='25432a')
assert word_table.count_housenumbers() == 2
self.tok.update_word_tokens()
assert word_table.count_housenumbers() == 1
def test_keep_housenumbers_from_placex_table_hnr_list(self, add_housenumber, word_table, placex_table):
add_housenumber(9991, '9 b')
add_housenumber(9990, '34z')
placex_table.add(housenumber='9 a;9 b;9 c')
assert word_table.count_housenumbers() == 2
self.tok.update_word_tokens()
assert word_table.count_housenumbers() == 1
| 25,149 | 33.078591 | 120 | py |
Nominatim | Nominatim-master/test/python/tokenizer/test_place_sanitizer.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for execution of the sanitztion step.
"""
import pytest
from nominatim.errors import UsageError
import nominatim.tokenizer.place_sanitizer as sanitizer
from nominatim.data.place_info import PlaceInfo
def test_placeinfo_clone_new_name():
place = sanitizer.PlaceName('foo', 'ki', 'su')
newplace = place.clone(name='bar')
assert place.name == 'foo'
assert newplace.name == 'bar'
assert newplace.kind == 'ki'
assert newplace.suffix == 'su'
def test_placeinfo_clone_merge_attr():
place = sanitizer.PlaceName('foo', 'ki', 'su')
place.set_attr('a1', 'v1')
place.set_attr('a2', 'v2')
newplace = place.clone(attr={'a2': 'new', 'b2': 'foo'})
assert place.get_attr('a2') == 'v2'
assert place.get_attr('b2') is None
assert newplace.get_attr('a1') == 'v1'
assert newplace.get_attr('a2') == 'new'
assert newplace.get_attr('b2') == 'foo'
def test_placeinfo_has_attr():
place = sanitizer.PlaceName('foo', 'ki', 'su')
place.set_attr('a1', 'v1')
assert place.has_attr('a1')
assert not place.has_attr('whatever')
def test_sanitizer_default(def_config):
san = sanitizer.PlaceSanitizer([{'step': 'split-name-list'}], def_config)
name, address = san.process_names(PlaceInfo({'name': {'name:de:de': '1;2;3'},
'address': {'street': 'Bald'}}))
assert len(name) == 3
assert all(isinstance(n, sanitizer.PlaceName) for n in name)
assert all(n.kind == 'name' for n in name)
assert all(n.suffix == 'de:de' for n in name)
assert len(address) == 1
assert all(isinstance(n, sanitizer.PlaceName) for n in address)
@pytest.mark.parametrize('rules', [None, []])
def test_sanitizer_empty_list(def_config, rules):
san = sanitizer.PlaceSanitizer(rules, def_config)
name, address = san.process_names(PlaceInfo({'name': {'name:de:de': '1;2;3'}}))
assert len(name) == 1
assert all(isinstance(n, sanitizer.PlaceName) for n in name)
def test_sanitizer_missing_step_definition(def_config):
with pytest.raises(UsageError):
san = sanitizer.PlaceSanitizer([{'id': 'split-name-list'}], def_config)
| 2,379 | 29.512821 | 84 | py |
Nominatim | Nominatim-master/test/python/tokenizer/test_factory.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for creating new tokenizers.
"""
import pytest
from nominatim.db import properties
from nominatim.tokenizer import factory
from nominatim.errors import UsageError
from dummy_tokenizer import DummyTokenizer
def test_setup_bad_tokenizer_name(project_env, monkeypatch):
monkeypatch.setenv('NOMINATIM_TOKENIZER', 'dummy')
with pytest.raises(UsageError):
factory.create_tokenizer(project_env)
class TestFactory:
@pytest.fixture(autouse=True)
def init_env(self, project_env, property_table, tokenizer_mock):
self.config = project_env
def test_setup_dummy_tokenizer(self, temp_db_conn):
tokenizer = factory.create_tokenizer(self.config)
assert isinstance(tokenizer, DummyTokenizer)
assert tokenizer.init_state == "new"
assert (self.config.project_dir / 'tokenizer').is_dir()
assert properties.get_property(temp_db_conn, 'tokenizer') == 'dummy'
def test_setup_tokenizer_dir_exists(self):
(self.config.project_dir / 'tokenizer').mkdir()
tokenizer = factory.create_tokenizer(self.config)
assert isinstance(tokenizer, DummyTokenizer)
assert tokenizer.init_state == "new"
def test_setup_tokenizer_dir_failure(self):
(self.config.project_dir / 'tokenizer').write_text("foo")
with pytest.raises(UsageError):
factory.create_tokenizer(self.config)
def test_load_tokenizer(self):
factory.create_tokenizer(self.config)
tokenizer = factory.get_tokenizer_for_db(self.config)
assert isinstance(tokenizer, DummyTokenizer)
assert tokenizer.init_state == "loaded"
def test_load_repopulate_tokenizer_dir(self):
factory.create_tokenizer(self.config)
self.config.project_dir = self.config.project_dir
factory.get_tokenizer_for_db(self.config)
assert (self.config.project_dir / 'tokenizer').exists()
def test_load_missing_property(self, temp_db_cursor):
factory.create_tokenizer(self.config)
temp_db_cursor.execute("TRUNCATE TABLE nominatim_properties")
with pytest.raises(UsageError):
factory.get_tokenizer_for_db(self.config)
| 2,402 | 28.304878 | 76 | py |
Nominatim | Nominatim-master/test/python/tokenizer/test_legacy.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Test for legacy tokenizer.
"""
import shutil
import re
import pytest
from nominatim.data.place_info import PlaceInfo
from nominatim.tokenizer import legacy_tokenizer
from nominatim.db import properties
from nominatim.errors import UsageError
from mock_legacy_word_table import MockLegacyWordTable
# Force use of legacy word table
@pytest.fixture
def word_table(temp_db_conn):
return MockLegacyWordTable(temp_db_conn)
@pytest.fixture
def test_config(project_env, tmp_path):
module_dir = tmp_path / 'module_src'
module_dir.mkdir()
(module_dir / 'nominatim.so').write_text('TEST nominatim.so')
project_env.lib_dir.module = module_dir
sqldir = tmp_path / 'sql'
sqldir.mkdir()
(sqldir / 'tokenizer').mkdir()
# Get the original SQL but replace make_standard_name to avoid module use.
init_sql = (project_env.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer.sql').read_text()
for fn in ('transliteration', 'gettokenstring'):
init_sql = re.sub(f'CREATE OR REPLACE FUNCTION {fn}[^;]*;',
'', init_sql, re.DOTALL)
init_sql += """
CREATE OR REPLACE FUNCTION make_standard_name(name TEXT)
RETURNS TEXT AS $$ SELECT lower(name); $$ LANGUAGE SQL;
"""
# Also load util functions. Some are needed by the tokenizer.
init_sql += (project_env.lib_dir.sql / 'functions' / 'utils.sql').read_text()
(sqldir / 'tokenizer' / 'legacy_tokenizer.sql').write_text(init_sql)
(sqldir / 'words.sql').write_text("SELECT 'a'")
shutil.copy(str(project_env.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer_tables.sql'),
str(sqldir / 'tokenizer' / 'legacy_tokenizer_tables.sql'))
project_env.lib_dir.sql = sqldir
project_env.lib_dir.data = sqldir
return project_env
@pytest.fixture
def tokenizer_factory(dsn, tmp_path, property_table):
(tmp_path / 'tokenizer').mkdir()
def _maker():
return legacy_tokenizer.create(dsn, tmp_path / 'tokenizer')
return _maker
@pytest.fixture
def tokenizer_setup(tokenizer_factory, test_config, monkeypatch, sql_preprocessor):
monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
tok = tokenizer_factory()
tok.init_new_db(test_config)
@pytest.fixture
def analyzer(tokenizer_factory, test_config, monkeypatch, sql_preprocessor,
word_table, temp_db_with_extensions, tmp_path):
monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', ':: lower();')
tok = tokenizer_factory()
tok.init_new_db(test_config)
monkeypatch.undo()
with tok.name_analyzer() as analyzer:
yield analyzer
@pytest.fixture
def make_standard_name(temp_db_cursor):
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION make_standard_name(name TEXT)
RETURNS TEXT AS $$ SELECT '#' || lower(name) || '#'; $$ LANGUAGE SQL""")
@pytest.fixture
def create_postcode_id(temp_db_cursor):
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION create_postcode_id(postcode TEXT)
RETURNS BOOLEAN AS $$
INSERT INTO word (word_token, word, class, type)
VALUES (' ' || postcode, postcode, 'place', 'postcode')
RETURNING True;
$$ LANGUAGE SQL""")
def test_init_new(tokenizer_factory, test_config, monkeypatch,
temp_db_conn, sql_preprocessor):
monkeypatch.setenv('NOMINATIM_TERM_NORMALIZATION', 'xxvv')
monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
tok = tokenizer_factory()
tok.init_new_db(test_config)
assert properties.get_property(temp_db_conn, legacy_tokenizer.DBCFG_NORMALIZATION) == 'xxvv'
outfile = test_config.project_dir / 'module' / 'nominatim.so'
assert outfile.exists()
assert outfile.read_text() == 'TEST nominatim.so'
assert outfile.stat().st_mode == 33261
def test_init_module_load_failed(tokenizer_factory, test_config):
tok = tokenizer_factory()
with pytest.raises(UsageError):
tok.init_new_db(test_config)
def test_init_module_custom(tokenizer_factory, test_config,
monkeypatch, tmp_path, sql_preprocessor):
module_dir = (tmp_path / 'custom').resolve()
module_dir.mkdir()
(module_dir/ 'nominatim.so').write_text('CUSTOM nomiantim.so')
monkeypatch.setenv('NOMINATIM_DATABASE_MODULE_PATH', str(module_dir))
monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
tok = tokenizer_factory()
tok.init_new_db(test_config)
assert not (test_config.project_dir / 'module').exists()
def test_init_from_project(tokenizer_setup, tokenizer_factory, test_config):
tok = tokenizer_factory()
tok.init_from_project(test_config)
assert tok.normalization is not None
def test_update_sql_functions(sql_preprocessor, temp_db_conn,
tokenizer_factory, test_config, table_factory,
monkeypatch, temp_db_cursor):
monkeypatch.setenv('NOMINATIM_MAX_WORD_FREQUENCY', '1133')
monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
tok = tokenizer_factory()
tok.init_new_db(test_config)
monkeypatch.undo()
assert properties.get_property(temp_db_conn, legacy_tokenizer.DBCFG_MAXWORDFREQ) == '1133'
table_factory('test', 'txt TEXT')
func_file = test_config.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer.sql'
func_file.write_text("""INSERT INTO test VALUES ('{{max_word_freq}}'),
('{{modulepath}}')""")
tok.update_sql_functions(test_config)
test_content = temp_db_cursor.row_set('SELECT * FROM test')
assert test_content == set((('1133', ), (str(test_config.project_dir / 'module'), )))
def test_finalize_import(tokenizer_factory, temp_db_conn,
temp_db_cursor, test_config, monkeypatch,
sql_preprocessor_cfg):
monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
func_file = test_config.lib_dir.sql / 'tokenizer' / 'legacy_tokenizer_indices.sql'
func_file.write_text("""CREATE FUNCTION test() RETURNS TEXT
AS $$ SELECT 'b'::text $$ LANGUAGE SQL""")
tok = tokenizer_factory()
tok.init_new_db(test_config)
tok.finalize_import(test_config)
temp_db_cursor.scalar('SELECT test()') == 'b'
def test_migrate_database(tokenizer_factory, test_config, temp_db_conn, monkeypatch):
monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
tok = tokenizer_factory()
tok.migrate_database(test_config)
assert properties.get_property(temp_db_conn, legacy_tokenizer.DBCFG_MAXWORDFREQ) is not None
assert properties.get_property(temp_db_conn, legacy_tokenizer.DBCFG_NORMALIZATION) is not None
outfile = test_config.project_dir / 'module' / 'nominatim.so'
assert outfile.exists()
assert outfile.read_text() == 'TEST nominatim.so'
assert outfile.stat().st_mode == 33261
def test_check_database(test_config, tokenizer_factory, monkeypatch,
temp_db_cursor, sql_preprocessor_cfg):
monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
tok = tokenizer_factory()
tok.init_new_db(test_config)
assert tok.check_database(False) is None
def test_check_database_no_tokenizer(test_config, tokenizer_factory):
tok = tokenizer_factory()
assert tok.check_database(False) is not None
def test_check_database_bad_setup(test_config, tokenizer_factory, monkeypatch,
temp_db_cursor, sql_preprocessor_cfg):
monkeypatch.setattr(legacy_tokenizer, '_check_module', lambda m, c: None)
tok = tokenizer_factory()
tok.init_new_db(test_config)
# Inject a bad transliteration.
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION make_standard_name(name TEXT)
RETURNS TEXT AS $$ SELECT 'garbage'::text; $$ LANGUAGE SQL""")
assert tok.check_database(False) is not None
def test_update_statistics_reverse_only(word_table, tokenizer_factory):
tok = tokenizer_factory()
tok.update_statistics()
def test_update_statistics(word_table, table_factory, temp_db_cursor, tokenizer_factory):
word_table.add_full_word(1000, 'hello')
table_factory('search_name',
'place_id BIGINT, name_vector INT[]',
[(12, [1000])])
tok = tokenizer_factory()
tok.update_statistics()
assert temp_db_cursor.scalar("""SELECT count(*) FROM word
WHERE word_token like ' %' and
search_name_count > 0""") > 0
def test_update_word_tokens(tokenizer_factory):
tok = tokenizer_factory()
# This is a noop and should just pass.
tok.update_word_tokens()
def test_normalize(analyzer):
assert analyzer.normalize('TEsT') == 'test'
def test_update_postcodes_from_db_empty(analyzer, table_factory, word_table,
create_postcode_id):
table_factory('location_postcode', 'postcode TEXT',
content=(('1234',), ('12 34',), ('AB23',), ('1234',)))
analyzer.update_postcodes_from_db()
assert word_table.get_postcodes() == {'1234', '12 34', 'AB23'}
def test_update_postcodes_from_db_add_and_remove(analyzer, table_factory, word_table,
create_postcode_id):
table_factory('location_postcode', 'postcode TEXT',
content=(('1234',), ('45BC', ), ('XX45', )))
word_table.add_postcode(' 1234', '1234')
word_table.add_postcode(' 5678', '5678')
analyzer.update_postcodes_from_db()
assert word_table.get_postcodes() == {'1234', '45BC', 'XX45'}
def test_update_special_phrase_empty_table(analyzer, word_table, make_standard_name):
analyzer.update_special_phrases([
("König bei", "amenity", "royal", "near"),
("Könige", "amenity", "royal", "-"),
("könige", "amenity", "royal", "-"),
("strasse", "highway", "primary", "in")
], True)
assert word_table.get_special() \
== set(((' #könig bei#', 'könig bei', 'amenity', 'royal', 'near'),
(' #könige#', 'könige', 'amenity', 'royal', None),
(' #strasse#', 'strasse', 'highway', 'primary', 'in')))
def test_update_special_phrase_delete_all(analyzer, word_table, make_standard_name):
word_table.add_special(' #foo#', 'foo', 'amenity', 'prison', 'in')
word_table.add_special(' #bar#', 'bar', 'highway', 'road', None)
assert word_table.count_special() == 2
analyzer.update_special_phrases([], True)
assert word_table.count_special() == 0
def test_update_special_phrases_no_replace(analyzer, word_table, make_standard_name):
word_table.add_special(' #foo#', 'foo', 'amenity', 'prison', 'in')
word_table.add_special(' #bar#', 'bar', 'highway', 'road', None)
assert word_table.count_special() == 2
analyzer.update_special_phrases([], False)
assert word_table.count_special() == 2
def test_update_special_phrase_modify(analyzer, word_table, make_standard_name):
word_table.add_special(' #foo#', 'foo', 'amenity', 'prison', 'in')
word_table.add_special(' #bar#', 'bar', 'highway', 'road', None)
assert word_table.count_special() == 2
analyzer.update_special_phrases([
('prison', 'amenity', 'prison', 'in'),
('bar', 'highway', 'road', '-'),
('garden', 'leisure', 'garden', 'near')
], True)
assert word_table.get_special() \
== set(((' #prison#', 'prison', 'amenity', 'prison', 'in'),
(' #bar#', 'bar', 'highway', 'road', None),
(' #garden#', 'garden', 'leisure', 'garden', 'near')))
def test_add_country_names(analyzer, word_table, make_standard_name):
analyzer.add_country_names('de', {'name': 'Germany',
'name:de': 'Deutschland',
'short_name': 'germany'})
assert word_table.get_country() \
== {('de', ' #germany#'),
('de', ' #deutschland#')}
def test_add_more_country_names(analyzer, word_table, make_standard_name):
word_table.add_country('fr', ' #france#')
word_table.add_country('it', ' #italy#')
word_table.add_country('it', ' #itala#')
analyzer.add_country_names('it', {'name': 'Italy', 'ref': 'IT'})
assert word_table.get_country() \
== {('fr', ' #france#'),
('it', ' #italy#'),
('it', ' #itala#'),
('it', ' #it#')}
@pytest.mark.parametrize('pcode', ['12345', 'AB 123', '34-345'])
def test_process_place_postcode(analyzer, create_postcode_id, word_table, pcode):
analyzer.process_place(PlaceInfo({'address': {'postcode' : pcode}}))
assert word_table.get_postcodes() == {pcode, }
@pytest.mark.parametrize('pcode', ['12:23', 'ab;cd;f', '123;836'])
def test_process_place_bad_postcode(analyzer, create_postcode_id, word_table, pcode):
analyzer.process_place(PlaceInfo({'address': {'postcode' : pcode}}))
assert not word_table.get_postcodes()
class TestHousenumberName:
@staticmethod
@pytest.fixture(autouse=True)
def setup_create_housenumbers(temp_db_cursor):
temp_db_cursor.execute("""CREATE OR REPLACE FUNCTION create_housenumbers(
housenumbers TEXT[],
OUT tokens TEXT, OUT normtext TEXT)
AS $$
SELECT housenumbers::TEXT, array_to_string(housenumbers, ';')
$$ LANGUAGE SQL""")
@staticmethod
@pytest.mark.parametrize('hnr', ['123a', '1', '101'])
def test_process_place_housenumbers_simple(analyzer, hnr):
info = analyzer.process_place(PlaceInfo({'address': {'housenumber' : hnr}}))
assert info['hnr'] == hnr
assert info['hnr_tokens'].startswith("{")
@staticmethod
def test_process_place_housenumbers_lists(analyzer):
info = analyzer.process_place(PlaceInfo({'address': {'conscriptionnumber' : '1; 2;3'}}))
assert set(info['hnr'].split(';')) == set(('1', '2', '3'))
@staticmethod
def test_process_place_housenumbers_duplicates(analyzer):
info = analyzer.process_place(PlaceInfo({'address': {'housenumber' : '134',
'conscriptionnumber' : '134',
'streetnumber' : '99a'}}))
assert set(info['hnr'].split(';')) == set(('134', '99a'))
class TestPlaceNames:
@pytest.fixture(autouse=True)
def setup(self, analyzer):
self.analyzer = analyzer
def expect_name_terms(self, info, *expected_terms):
tokens = self.analyzer.get_word_token_info(list(expected_terms))
for token in tokens:
assert token[2] is not None, "No token for {0}".format(token)
assert eval(info['names']) == set((t[2] for t in tokens)),\
f"Expected: {tokens}\nGot: {info['names']}"
def process_named_place(self, names):
return self.analyzer.process_place(PlaceInfo({'name': names}))
def test_simple_names(self):
info = self.process_named_place({'name': 'Soft bAr', 'ref': '34'})
self.expect_name_terms(info, '#Soft bAr', '#34', 'Soft', 'bAr', '34')
@pytest.mark.parametrize('sep', [',' , ';'])
def test_names_with_separator(self, sep):
info = self.process_named_place({'name': sep.join(('New York', 'Big Apple'))})
self.expect_name_terms(info, '#New York', '#Big Apple',
'new', 'york', 'big', 'apple')
def test_full_names_with_bracket(self):
info = self.process_named_place({'name': 'Houseboat (left)'})
self.expect_name_terms(info, '#Houseboat (left)', '#Houseboat',
'houseboat', '(left)')
def test_country_name(self, word_table):
place = PlaceInfo({'name' : {'name': 'Norge'},
'country_code': 'no',
'rank_address': 4,
'class': 'boundary',
'type': 'administrative'})
info = self.analyzer.process_place(place)
self.expect_name_terms(info, '#norge', 'norge')
assert word_table.get_country() == {('no', ' norge')}
class TestPlaceAddress:
@pytest.fixture(autouse=True)
def setup(self, analyzer):
self.analyzer = analyzer
@pytest.fixture
def getorcreate_hnr_id(self, temp_db_cursor):
temp_db_cursor.execute("""CREATE SEQUENCE seq_hnr start 1;
CREATE OR REPLACE FUNCTION getorcreate_housenumber_id(lookup_word TEXT)
RETURNS INTEGER AS $$
SELECT -nextval('seq_hnr')::INTEGER; $$ LANGUAGE SQL""")
def process_address(self, **kwargs):
return self.analyzer.process_place(PlaceInfo({'address': kwargs}))
def name_token_set(self, *expected_terms):
tokens = self.analyzer.get_word_token_info(list(expected_terms))
for token in tokens:
assert token[2] is not None, "No token for {0}".format(token)
return set((t[2] for t in tokens))
@pytest.mark.parametrize('pcode', ['12345', 'AB 123', '34-345'])
def test_process_place_postcode(self, word_table, pcode):
self.process_address(postcode=pcode)
assert word_table.get_postcodes() == {pcode, }
@pytest.mark.parametrize('pcode', ['12:23', 'ab;cd;f', '123;836'])
def test_process_place_bad_postcode(self, word_table, pcode):
self.process_address(postcode=pcode)
assert not word_table.get_postcodes()
@pytest.mark.parametrize('hnr', ['123a', '0', '101'])
def test_process_place_housenumbers_simple(self, hnr, getorcreate_hnr_id):
info = self.process_address(housenumber=hnr)
assert info['hnr'] == hnr.lower()
assert info['hnr_tokens'] == "{-1}"
def test_process_place_housenumbers_lists(self, getorcreate_hnr_id):
info = self.process_address(conscriptionnumber='1; 2;3')
assert set(info['hnr'].split(';')) == set(('1', '2', '3'))
assert info['hnr_tokens'] == "{-1,-2,-3}"
def test_process_place_housenumbers_duplicates(self, getorcreate_hnr_id):
info = self.process_address(housenumber='134',
conscriptionnumber='134',
streetnumber='99A')
assert set(info['hnr'].split(';')) == set(('134', '99a'))
assert info['hnr_tokens'] == "{-1,-2}"
def test_process_place_street(self):
# legacy tokenizer only indexes known names
self.analyzer.process_place(PlaceInfo({'name': {'name' : 'Grand Road'}}))
info = self.process_address(street='Grand Road')
assert eval(info['street']) == self.name_token_set('#Grand Road')
def test_process_place_street_empty(self):
info = self.process_address(street='🜵')
assert info['street'] == '{}'
def test_process_place_place(self):
self.analyzer.process_place(PlaceInfo({'name': {'name' : 'Honu Lulu'}}))
info = self.process_address(place='Honu Lulu')
assert eval(info['place_search']) == self.name_token_set('#Honu Lulu',
'Honu', 'Lulu')
assert eval(info['place_match']) == self.name_token_set('#Honu Lulu')
def test_process_place_place_empty(self):
info = self.process_address(place='🜵')
assert 'place' not in info
def test_process_place_address_terms(self):
for name in ('Zwickau', 'Haupstraße', 'Sachsen'):
self.analyzer.process_place(PlaceInfo({'name': {'name' : name}}))
info = self.process_address(country='de', city='Zwickau', state='Sachsen',
suburb='Zwickau', street='Hauptstr',
full='right behind the church')
city = self.name_token_set('ZWICKAU')
state = self.name_token_set('SACHSEN')
print(info)
result = {k: eval(v[0]) for k,v in info['addr'].items()}
assert result == {'city': city, 'suburb': city, 'state': state}
def test_process_place_address_terms_empty(self):
info = self.process_address(country='de', city=' ', street='Hauptstr',
full='right behind the church')
assert 'addr' not in info
| 21,117 | 34.672297 | 105 | py |
Nominatim | Nominatim-master/test/python/tokenizer/test_icu_rule_loader.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for converting a config file to ICU rules.
"""
from textwrap import dedent
import pytest
import yaml
from nominatim.tokenizer.icu_rule_loader import ICURuleLoader
from nominatim.errors import UsageError
from icu import Transliterator
CONFIG_SECTIONS = ('normalization', 'transliteration', 'token-analysis')
class TestIcuRuleLoader:
@pytest.fixture(autouse=True)
def init_env(self, project_env):
self.project_env = project_env
def write_config(self, content):
(self.project_env.project_dir / 'icu_tokenizer.yaml').write_text(dedent(content))
def config_rules(self, *variants):
content = dedent("""\
normalization:
- ":: NFD ()"
- "[[:Nonspacing Mark:] [:Cf:]] >"
- ":: lower ()"
- "[[:Punctuation:][:Space:]]+ > ' '"
- ":: NFC ()"
transliteration:
- ":: Latin ()"
- "[[:Punctuation:][:Space:]]+ > ' '"
token-analysis:
- analyzer: generic
variants:
- words:
""")
content += '\n'.join((" - " + s for s in variants)) + '\n'
self.write_config(content)
def get_replacements(self, *variants):
self.config_rules(*variants)
loader = ICURuleLoader(self.project_env)
rules = loader.analysis[None].config['replacements']
return sorted((k, sorted(v)) for k,v in rules)
def test_empty_rule_set(self):
self.write_config("""\
normalization:
transliteration:
token-analysis:
- analyzer: generic
variants:
""")
rules = ICURuleLoader(self.project_env)
assert rules.get_search_rules() == ''
assert rules.get_normalization_rules() == ''
assert rules.get_transliteration_rules() == ''
@pytest.mark.parametrize("section", CONFIG_SECTIONS)
def test_missing_section(self, section):
rule_cfg = { s: [] for s in CONFIG_SECTIONS if s != section}
self.write_config(yaml.dump(rule_cfg))
with pytest.raises(UsageError):
ICURuleLoader(self.project_env)
def test_get_search_rules(self):
self.config_rules()
loader = ICURuleLoader(self.project_env)
rules = loader.get_search_rules()
trans = Transliterator.createFromRules("test", rules)
assert trans.transliterate(" Baum straße ") == " baum straße "
assert trans.transliterate(" Baumstraße ") == " baumstraße "
assert trans.transliterate(" Baumstrasse ") == " baumstrasse "
assert trans.transliterate(" Baumstr ") == " baumstr "
assert trans.transliterate(" Baumwegstr ") == " baumwegstr "
assert trans.transliterate(" Αθήνα ") == " athēna "
assert trans.transliterate(" проспект ") == " prospekt "
def test_get_normalization_rules(self):
self.config_rules()
loader = ICURuleLoader(self.project_env)
rules = loader.get_normalization_rules()
trans = Transliterator.createFromRules("test", rules)
assert trans.transliterate(" проспект-Prospekt ") == " проспект prospekt "
def test_get_transliteration_rules(self):
self.config_rules()
loader = ICURuleLoader(self.project_env)
rules = loader.get_transliteration_rules()
trans = Transliterator.createFromRules("test", rules)
assert trans.transliterate(" проспект-Prospekt ") == " prospekt Prospekt "
def test_transliteration_rules_from_file(self):
self.write_config("""\
normalization:
transliteration:
- "'ax' > 'b'"
- !include transliteration.yaml
token-analysis:
- analyzer: generic
variants:
""")
transpath = self.project_env.project_dir / ('transliteration.yaml')
transpath.write_text('- "x > y"')
loader = ICURuleLoader(self.project_env)
rules = loader.get_transliteration_rules()
trans = Transliterator.createFromRules("test", rules)
assert trans.transliterate(" axxt ") == " byt "
def test_search_rules(self):
self.config_rules('~street => s,st', 'master => mstr')
proc = ICURuleLoader(self.project_env).make_token_analysis()
assert proc.search.transliterate('Master Street').strip() == 'master street'
assert proc.search.transliterate('Earnes St').strip() == 'earnes st'
assert proc.search.transliterate('Nostreet').strip() == 'nostreet'
@pytest.mark.parametrize("variant", ['foo > bar', 'foo -> bar -> bar',
'~foo~ -> bar', 'fo~ o -> bar'])
def test_invalid_variant_description(self, variant):
self.config_rules(variant)
with pytest.raises(UsageError):
ICURuleLoader(self.project_env)
def test_add_full(self):
repl = self.get_replacements("foo -> bar")
assert repl == [(' foo ', [' bar', ' foo'])]
def test_replace_full(self):
repl = self.get_replacements("foo => bar")
assert repl == [(' foo ', [' bar'])]
def test_add_suffix_no_decompose(self):
repl = self.get_replacements("~berg |-> bg")
assert repl == [(' berg ', [' berg', ' bg']),
('berg ', ['berg', 'bg'])]
def test_replace_suffix_no_decompose(self):
repl = self.get_replacements("~berg |=> bg")
assert repl == [(' berg ', [' bg']),('berg ', ['bg'])]
def test_add_suffix_decompose(self):
repl = self.get_replacements("~berg -> bg")
assert repl == [(' berg ', [' berg', ' bg', 'berg', 'bg']),
('berg ', [' berg', ' bg', 'berg', 'bg'])]
def test_replace_suffix_decompose(self):
repl = self.get_replacements("~berg => bg")
assert repl == [(' berg ', [' bg', 'bg']),
('berg ', [' bg', 'bg'])]
def test_add_prefix_no_compose(self):
repl = self.get_replacements("hinter~ |-> hnt")
assert repl == [(' hinter', [' hinter', ' hnt']),
(' hinter ', [' hinter', ' hnt'])]
def test_replace_prefix_no_compose(self):
repl = self.get_replacements("hinter~ |=> hnt")
assert repl == [(' hinter', [' hnt']), (' hinter ', [' hnt'])]
def test_add_prefix_compose(self):
repl = self.get_replacements("hinter~-> h")
assert repl == [(' hinter', [' h', ' h ', ' hinter', ' hinter ']),
(' hinter ', [' h', ' h', ' hinter', ' hinter'])]
def test_replace_prefix_compose(self):
repl = self.get_replacements("hinter~=> h")
assert repl == [(' hinter', [' h', ' h ']),
(' hinter ', [' h', ' h'])]
def test_add_beginning_only(self):
repl = self.get_replacements("^Premier -> Pr")
assert repl == [('^ premier ', ['^ pr', '^ premier'])]
def test_replace_beginning_only(self):
repl = self.get_replacements("^Premier => Pr")
assert repl == [('^ premier ', ['^ pr'])]
def test_add_final_only(self):
repl = self.get_replacements("road$ -> rd")
assert repl == [(' road ^', [' rd ^', ' road ^'])]
def test_replace_final_only(self):
repl = self.get_replacements("road$ => rd")
assert repl == [(' road ^', [' rd ^'])]
def test_decompose_only(self):
repl = self.get_replacements("~foo -> foo")
assert repl == [(' foo ', [' foo', 'foo']),
('foo ', [' foo', 'foo'])]
def test_add_suffix_decompose_end_only(self):
repl = self.get_replacements("~berg |-> bg", "~berg$ -> bg")
assert repl == [(' berg ', [' berg', ' bg']),
(' berg ^', [' berg ^', ' bg ^', 'berg ^', 'bg ^']),
('berg ', ['berg', 'bg']),
('berg ^', [' berg ^', ' bg ^', 'berg ^', 'bg ^'])]
def test_replace_suffix_decompose_end_only(self):
repl = self.get_replacements("~berg |=> bg", "~berg$ => bg")
assert repl == [(' berg ', [' bg']),
(' berg ^', [' bg ^', 'bg ^']),
('berg ', ['bg']),
('berg ^', [' bg ^', 'bg ^'])]
def test_add_multiple_suffix(self):
repl = self.get_replacements("~berg,~burg -> bg")
assert repl == [(' berg ', [' berg', ' bg', 'berg', 'bg']),
(' burg ', [' bg', ' burg', 'bg', 'burg']),
('berg ', [' berg', ' bg', 'berg', 'bg']),
('burg ', [' bg', ' burg', 'bg', 'burg'])]
| 8,899 | 31.129964 | 89 | py |
Nominatim | Nominatim-master/test/python/tokenizer/sanitizers/test_delete_tags.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for the sanitizer that normalizes housenumbers.
"""
import pytest
from nominatim.data.place_info import PlaceInfo
from nominatim.tokenizer.place_sanitizer import PlaceSanitizer
class TestWithDefault:
@pytest.fixture(autouse=True)
def setup_country(self, def_config):
self.config = def_config
def run_sanitizer_on(self, type, **kwargs):
place = PlaceInfo({type: {k.replace('_', ':'): v for k, v in kwargs.items()},
'country_code': 'de', 'rank_address': 30})
sanitizer_args = {'step': 'delete-tags'}
name, address = PlaceSanitizer([sanitizer_args],
self.config).process_names(place)
return {
'name': sorted([(p.name, p.kind, p.suffix or '') for p in name]),
'address': sorted([(p.name, p.kind, p.suffix or '') for p in address])
}
def test_on_name(self):
res = self.run_sanitizer_on('name', name='foo', ref='bar', ref_abc='baz')
assert res.get('name') == []
def test_on_address(self):
res = self.run_sanitizer_on('address', name='foo', ref='bar', ref_abc='baz')
assert res.get('address') == [('bar', 'ref', ''), ('baz', 'ref', 'abc'),
('foo', 'name', '')]
class TestTypeField:
@pytest.fixture(autouse=True)
def setup_country(self, def_config):
self.config = def_config
def run_sanitizer_on(self, type, **kwargs):
place = PlaceInfo({'name': {k.replace('_', ':'): v for k, v in kwargs.items()},
'country_code': 'de', 'rank_address': 30})
sanitizer_args = {
'step': 'delete-tags',
'type': type,
}
name, _ = PlaceSanitizer([sanitizer_args],
self.config).process_names(place)
return sorted([(p.name, p.kind, p.suffix or '') for p in name])
def test_name_type(self):
res = self.run_sanitizer_on('name', name='foo', ref='bar', ref_abc='baz')
assert res == []
def test_address_type(self):
res = self.run_sanitizer_on('address', name='foo', ref='bar', ref_abc='baz')
assert res == [('bar', 'ref', ''), ('baz', 'ref', 'abc'),
('foo', 'name', '')]
class TestFilterKind:
@pytest.fixture(autouse=True)
def setup_country(self, def_config):
self.config = def_config
def run_sanitizer_on(self, filt, **kwargs):
place = PlaceInfo({'name': {k.replace('_', ':'): v for k, v in kwargs.items()},
'country_code': 'de', 'rank_address': 30})
sanitizer_args = {
'step': 'delete-tags',
'filter-kind': filt,
}
name, _ = PlaceSanitizer([sanitizer_args],
self.config).process_names(place)
return sorted([(p.name, p.kind, p.suffix or '') for p in name])
def test_single_exact_name(self):
res = self.run_sanitizer_on(['name'], ref='foo', name='foo',
name_abc='bar', ref_abc='bar')
assert res == [('bar', 'ref', 'abc'), ('foo', 'ref', '')]
def test_single_pattern(self):
res = self.run_sanitizer_on(['.*name'],
name_fr='foo', ref_fr='foo', namexx_fr='bar',
shortname_fr='bar', name='bar')
assert res == [('bar', 'namexx', 'fr'), ('foo', 'ref', 'fr')]
def test_multiple_patterns(self):
res = self.run_sanitizer_on(['.*name', 'ref'],
name_fr='foo', ref_fr='foo', oldref_fr='foo',
namexx_fr='bar', shortname_fr='baz', name='baz')
assert res == [('bar', 'namexx', 'fr'), ('foo', 'oldref', 'fr')]
class TestRankAddress:
@pytest.fixture(autouse=True)
def setup_country(self, def_config):
self.config = def_config
def run_sanitizer_on(self, rank_addr, **kwargs):
place = PlaceInfo({'name': {k.replace('_', ':'): v for k, v in kwargs.items()},
'country_code': 'de', 'rank_address': 30})
sanitizer_args = {
'step': 'delete-tags',
'rank_address': rank_addr
}
name, _ = PlaceSanitizer([sanitizer_args],
self.config).process_names(place)
return sorted([(p.name, p.kind, p.suffix or '') for p in name])
def test_single_rank(self):
res = self.run_sanitizer_on('30', name='foo', ref='bar')
assert res == []
def test_single_rank_fail(self):
res = self.run_sanitizer_on('28', name='foo', ref='bar')
assert res == [('bar', 'ref', ''), ('foo', 'name', '')]
def test_ranged_rank_pass(self):
res = self.run_sanitizer_on('26-30', name='foo', ref='bar')
assert res == []
def test_ranged_rank_fail(self):
res = self.run_sanitizer_on('26-29', name='foo', ref='bar')
assert res == [('bar', 'ref', ''), ('foo', 'name', '')]
def test_mixed_rank_pass(self):
res = self.run_sanitizer_on(['4', '20-28', '30', '10-12'], name='foo', ref='bar')
assert res == []
def test_mixed_rank_fail(self):
res = self.run_sanitizer_on(['4-8', '10', '26-29', '18'], name='foo', ref='bar')
assert res == [('bar', 'ref', ''), ('foo', 'name', '')]
class TestSuffix:
@pytest.fixture(autouse=True)
def setup_country(self, def_config):
self.config = def_config
def run_sanitizer_on(self, suffix, **kwargs):
place = PlaceInfo({'name': {k.replace('_', ':'): v for k, v in kwargs.items()},
'country_code': 'de', 'rank_address': 30})
sanitizer_args = {
'step': 'delete-tags',
'suffix': suffix,
}
name, _ = PlaceSanitizer([sanitizer_args],
self.config).process_names(place)
return sorted([(p.name, p.kind, p.suffix or '') for p in name])
def test_single_suffix(self):
res = self.run_sanitizer_on('abc', name='foo', name_abc='foo',
name_pqr='bar', ref='bar', ref_abc='baz')
assert res == [('bar', 'name', 'pqr'), ('bar', 'ref', ''), ('foo', 'name', '')]
def test_multiple_suffix(self):
res = self.run_sanitizer_on(['abc.*', 'pqr'], name='foo', name_abcxx='foo',
ref_pqr='bar', name_pqrxx='baz')
assert res == [('baz', 'name', 'pqrxx'), ('foo', 'name', '')]
class TestCountryCodes:
@pytest.fixture(autouse=True)
def setup_country(self, def_config):
self.config = def_config
def run_sanitizer_on(self, country_code, **kwargs):
place = PlaceInfo({'name': {k.replace('_', ':'): v for k, v in kwargs.items()},
'country_code': 'de', 'rank_address': 30})
sanitizer_args = {
'step': 'delete-tags',
'country_code': country_code,
}
name, _ = PlaceSanitizer([sanitizer_args],
self.config).process_names(place)
return sorted([(p.name, p.kind) for p in name])
def test_single_country_code_pass(self):
res = self.run_sanitizer_on('de', name='foo', ref='bar')
assert res == []
def test_single_country_code_fail(self):
res = self.run_sanitizer_on('in', name='foo', ref='bar')
assert res == [('bar', 'ref'), ('foo', 'name')]
def test_empty_country_code_list(self):
res = self.run_sanitizer_on([], name='foo', ref='bar')
assert res == [('bar', 'ref'), ('foo', 'name')]
def test_multiple_country_code_pass(self):
res = self.run_sanitizer_on(['in', 'de', 'fr'], name='foo', ref='bar')
assert res == []
def test_multiple_country_code_fail(self):
res = self.run_sanitizer_on(['in', 'au', 'fr'], name='foo', ref='bar')
assert res == [('bar', 'ref'), ('foo', 'name')]
class TestAllParameters:
@pytest.fixture(autouse=True)
def setup_country(self, def_config):
self.config = def_config
def run_sanitizer_on(self, country_code, rank_addr, suffix, **kwargs):
place = PlaceInfo({'name': {k.replace('_', ':'): v for k, v in kwargs.items()},
'country_code': 'de', 'rank_address': 30})
sanitizer_args = {
'step': 'delete-tags',
'type': 'name',
'filter-kind': ['name', 'ref'],
'country_code': country_code,
'rank_address': rank_addr,
'suffix': suffix,
'name': r'[\s\S]*',
}
name, _ = PlaceSanitizer([sanitizer_args],
self.config).process_names(place)
return sorted([(p.name, p.kind, p.suffix or '') for p in name])
def test_string_arguments_pass(self):
res = self.run_sanitizer_on('de', '25-30', r'[\s\S]*',
name='foo', ref='foo', name_abc='bar', ref_abc='baz')
assert res == []
def test_string_arguments_fail(self):
res = self.run_sanitizer_on('in', '25-30', r'[\s\S]*',
name='foo', ref='foo', name_abc='bar', ref_abc='baz')
assert res == [('bar', 'name', 'abc'), ('baz', 'ref', 'abc'),
('foo', 'name', ''), ('foo', 'ref', '')]
def test_list_arguments_pass(self):
res = self.run_sanitizer_on(['de', 'in'], ['20-28', '30'], [r'abc.*', r'[\s\S]*'],
name='foo', ref='foo', name_abcxx='bar', ref_pqr='baz')
assert res == []
def test_list_arguments_fail(self):
res = self.run_sanitizer_on(['de', 'in'], ['14', '20-29'], [r'abc.*', r'pqr'],
name='foo', ref_abc='foo', name_abcxx='bar', ref_pqr='baz')
assert res == [('bar', 'name', 'abcxx'), ('baz', 'ref', 'pqr'),
('foo', 'name', ''), ('foo', 'ref', 'abc')]
def test_mix_arguments_pass(self):
res = self.run_sanitizer_on('de', ['10', '20-28', '30'], r'[\s\S]*',
name_abc='foo', ref_abc='foo', name_abcxx='bar', ref_pqr='baz')
assert res == []
def test_mix_arguments_fail(self):
res = self.run_sanitizer_on(['de', 'in'], ['10', '20-28', '30'], r'abc.*',
name='foo', ref='foo', name_pqr='bar', ref_pqr='baz')
assert res == [('bar', 'name', 'pqr'), ('baz', 'ref', 'pqr'),
('foo', 'name', ''), ('foo', 'ref', '')] | 11,161 | 33.134557 | 99 | py |
Nominatim | Nominatim-master/test/python/tokenizer/sanitizers/test_split_name_list.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for the sanitizer that splits multivalue lists.
"""
import pytest
from nominatim.tokenizer.place_sanitizer import PlaceSanitizer
from nominatim.data.place_info import PlaceInfo
from nominatim.errors import UsageError
class TestSplitName:
@pytest.fixture(autouse=True)
def setup_country(self, def_config):
self.config = def_config
def run_sanitizer_on(self, **kwargs):
place = PlaceInfo({'name': kwargs})
name, _ = PlaceSanitizer([{'step': 'split-name-list'}], self.config).process_names(place)
return sorted([(p.name, p.kind, p.suffix) for p in name])
def sanitize_with_delimiter(self, delimiter, name):
place = PlaceInfo({'name': {'name': name}})
san = PlaceSanitizer([{'step': 'split-name-list', 'delimiters': delimiter}],
self.config)
name, _ = san.process_names(place)
return sorted([p.name for p in name])
def test_simple(self):
assert self.run_sanitizer_on(name='ABC') == [('ABC', 'name', None)]
assert self.run_sanitizer_on(name='') == [('', 'name', None)]
def test_splits(self):
assert self.run_sanitizer_on(name='A;B;C') == [('A', 'name', None),
('B', 'name', None),
('C', 'name', None)]
assert self.run_sanitizer_on(short_name=' House, boat ') == [('House', 'short_name', None),
('boat', 'short_name', None)]
def test_empty_fields(self):
assert self.run_sanitizer_on(name='A;;B') == [('A', 'name', None),
('B', 'name', None)]
assert self.run_sanitizer_on(name='A; ,B') == [('A', 'name', None),
('B', 'name', None)]
assert self.run_sanitizer_on(name=' ;B') == [('B', 'name', None)]
assert self.run_sanitizer_on(name='B,') == [('B', 'name', None)]
def test_custom_delimiters(self):
assert self.sanitize_with_delimiter(':', '12:45,3') == ['12', '45,3']
assert self.sanitize_with_delimiter('\\', 'a;\\b!#@ \\') == ['a;', 'b!#@']
assert self.sanitize_with_delimiter('[]', 'foo[to]be') == ['be', 'foo', 'to']
assert self.sanitize_with_delimiter(' ', 'morning sun') == ['morning', 'sun']
def test_empty_delimiter_set(self):
with pytest.raises(UsageError):
self.sanitize_with_delimiter('', 'abc')
def test_no_name_list(def_config):
place = PlaceInfo({'address': {'housenumber': '3'}})
name, address = PlaceSanitizer([{'step': 'split-name-list'}], def_config).process_names(place)
assert not name
assert len(address) == 1
| 3,013 | 36.675 | 99 | py |
Nominatim | Nominatim-master/test/python/tokenizer/sanitizers/test_clean_tiger_tags.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for sanitizer that clean up TIGER tags.
"""
import pytest
from nominatim.tokenizer.place_sanitizer import PlaceSanitizer
from nominatim.data.place_info import PlaceInfo
class TestCleanTigerTags:
@pytest.fixture(autouse=True)
def setup_country(self, def_config):
self.config = def_config
def run_sanitizer_on(self, addr):
place = PlaceInfo({'address': addr})
_, outaddr = PlaceSanitizer([{'step': 'clean-tiger-tags'}], self.config).process_names(place)
return sorted([(p.name, p.kind, p.suffix) for p in outaddr])
@pytest.mark.parametrize('inname,outname', [('Hamilton, AL', 'Hamilton'),
('Little, Borough, CA', 'Little, Borough')])
def test_well_formatted(self, inname, outname):
assert self.run_sanitizer_on({'tiger:county': inname})\
== [(outname, 'county', 'tiger')]
@pytest.mark.parametrize('name', ('Hamilton', 'Big, Road', ''))
def test_badly_formatted(self, name):
assert self.run_sanitizer_on({'tiger:county': name})\
== [(name, 'county', 'tiger')]
def test_unmatched(self):
assert self.run_sanitizer_on({'tiger:country': 'US'})\
== [('US', 'tiger', 'country')]
| 1,470 | 32.431818 | 101 | py |
Nominatim | Nominatim-master/test/python/tokenizer/sanitizers/test_tag_analyzer_by_language.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for the sanitizer that enables language-dependent analyzers.
"""
import pytest
from nominatim.data.place_info import PlaceInfo
from nominatim.tokenizer.place_sanitizer import PlaceSanitizer
from nominatim.data.country_info import setup_country_config
class TestWithDefaults:
@pytest.fixture(autouse=True)
def setup_country(self, def_config):
self.config = def_config
def run_sanitizer_on(self, country, **kwargs):
place = PlaceInfo({'name': {k.replace('_', ':'): v for k, v in kwargs.items()},
'country_code': country})
name, _ = PlaceSanitizer([{'step': 'tag-analyzer-by-language'}],
self.config).process_names(place)
return sorted([(p.name, p.kind, p.suffix, p.attr) for p in name])
def test_no_names(self):
assert self.run_sanitizer_on('de') == []
def test_simple(self):
res = self.run_sanitizer_on('fr', name='Foo',name_de='Zoo', ref_abc='M')
assert res == [('Foo', 'name', None, {}),
('M', 'ref', 'abc', {'analyzer': 'abc'}),
('Zoo', 'name', 'de', {'analyzer': 'de'})]
@pytest.mark.parametrize('suffix', ['DE', 'asbc'])
def test_illegal_suffix(self, suffix):
assert self.run_sanitizer_on('fr', **{'name_' + suffix: 'Foo'}) \
== [('Foo', 'name', suffix, {})]
class TestFilterKind:
@pytest.fixture(autouse=True)
def setup_country(self, def_config):
self.config = def_config
def run_sanitizer_on(self, filt, **kwargs):
place = PlaceInfo({'name': {k.replace('_', ':'): v for k, v in kwargs.items()},
'country_code': 'de'})
name, _ = PlaceSanitizer([{'step': 'tag-analyzer-by-language',
'filter-kind': filt}],
self.config).process_names(place)
return sorted([(p.name, p.kind, p.suffix, p.attr) for p in name])
def test_single_exact_name(self):
res = self.run_sanitizer_on(['name'], name_fr='A', ref_fr='12',
shortname_fr='C', name='D')
assert res == [('12', 'ref', 'fr', {}),
('A', 'name', 'fr', {'analyzer': 'fr'}),
('C', 'shortname', 'fr', {}),
('D', 'name', None, {})]
def test_single_pattern(self):
res = self.run_sanitizer_on(['.*name'],
name_fr='A', ref_fr='12', namexx_fr='B',
shortname_fr='C', name='D')
assert res == [('12', 'ref', 'fr', {}),
('A', 'name', 'fr', {'analyzer': 'fr'}),
('B', 'namexx', 'fr', {}),
('C', 'shortname', 'fr', {'analyzer': 'fr'}),
('D', 'name', None, {})]
def test_multiple_patterns(self):
res = self.run_sanitizer_on(['.*name', 'ref'],
name_fr='A', ref_fr='12', oldref_fr='X',
namexx_fr='B', shortname_fr='C', name='D')
assert res == [('12', 'ref', 'fr', {'analyzer': 'fr'}),
('A', 'name', 'fr', {'analyzer': 'fr'}),
('B', 'namexx', 'fr', {}),
('C', 'shortname', 'fr', {'analyzer': 'fr'}),
('D', 'name', None, {}),
('X', 'oldref', 'fr', {})]
class TestDefaultCountry:
@pytest.fixture(autouse=True)
def setup_country(self, def_config):
setup_country_config(def_config)
self.config = def_config
def run_sanitizer_append(self, mode, country, **kwargs):
place = PlaceInfo({'name': {k.replace('_', ':'): v for k, v in kwargs.items()},
'country_code': country})
name, _ = PlaceSanitizer([{'step': 'tag-analyzer-by-language',
'use-defaults': mode,
'mode': 'append'}],
self.config).process_names(place)
assert all(isinstance(p.attr, dict) for p in name)
assert all(len(p.attr) <= 1 for p in name)
assert all(not p.attr or ('analyzer' in p.attr and p.attr['analyzer'])
for p in name)
return sorted([(p.name, p.attr.get('analyzer', '')) for p in name])
def run_sanitizer_replace(self, mode, country, **kwargs):
place = PlaceInfo({'name': {k.replace('_', ':'): v for k, v in kwargs.items()},
'country_code': country})
name, _ = PlaceSanitizer([{'step': 'tag-analyzer-by-language',
'use-defaults': mode,
'mode': 'replace'}],
self.config).process_names(place)
assert all(isinstance(p.attr, dict) for p in name)
assert all(len(p.attr) <= 1 for p in name)
assert all(not p.attr or ('analyzer' in p.attr and p.attr['analyzer'])
for p in name)
return sorted([(p.name, p.attr.get('analyzer', '')) for p in name])
def test_missing_country(self):
place = PlaceInfo({'name': {'name': 'something'}})
name, _ = PlaceSanitizer([{'step': 'tag-analyzer-by-language',
'use-defaults': 'all',
'mode': 'replace'}],
self.config).process_names(place)
assert len(name) == 1
assert name[0].name == 'something'
assert name[0].suffix is None
assert 'analyzer' not in name[0].attr
def test_mono_unknown_country(self):
expect = [('XX', '')]
assert self.run_sanitizer_replace('mono', 'xx', name='XX') == expect
assert self.run_sanitizer_append('mono', 'xx', name='XX') == expect
def test_mono_monoling_replace(self):
res = self.run_sanitizer_replace('mono', 'de', name='Foo')
assert res == [('Foo', 'de')]
def test_mono_monoling_append(self):
res = self.run_sanitizer_append('mono', 'de', name='Foo')
assert res == [('Foo', ''), ('Foo', 'de')]
def test_mono_multiling(self):
expect = [('XX', '')]
assert self.run_sanitizer_replace('mono', 'ch', name='XX') == expect
assert self.run_sanitizer_append('mono', 'ch', name='XX') == expect
def test_all_unknown_country(self):
expect = [('XX', '')]
assert self.run_sanitizer_replace('all', 'xx', name='XX') == expect
assert self.run_sanitizer_append('all', 'xx', name='XX') == expect
def test_all_monoling_replace(self):
res = self.run_sanitizer_replace('all', 'de', name='Foo')
assert res == [('Foo', 'de')]
def test_all_monoling_append(self):
res = self.run_sanitizer_append('all', 'de', name='Foo')
assert res == [('Foo', ''), ('Foo', 'de')]
def test_all_multiling_append(self):
res = self.run_sanitizer_append('all', 'ch', name='XX')
assert res == [('XX', ''),
('XX', 'de'), ('XX', 'fr'), ('XX', 'it'), ('XX', 'rm')]
def test_all_multiling_replace(self):
res = self.run_sanitizer_replace('all', 'ch', name='XX')
assert res == [('XX', 'de'), ('XX', 'fr'), ('XX', 'it'), ('XX', 'rm')]
class TestCountryWithWhitelist:
@pytest.fixture(autouse=True)
def setup_country(self, def_config):
self.config = def_config
def run_sanitizer_on(self, mode, country, **kwargs):
place = PlaceInfo({'name': {k.replace('_', ':'): v for k, v in kwargs.items()},
'country_code': country})
name, _ = PlaceSanitizer([{'step': 'tag-analyzer-by-language',
'use-defaults': mode,
'mode': 'replace',
'whitelist': ['de', 'fr', 'ru']}],
self.config).process_names(place)
assert all(isinstance(p.attr, dict) for p in name)
assert all(len(p.attr) <= 1 for p in name)
assert all(not p.attr or ('analyzer' in p.attr and p.attr['analyzer'])
for p in name)
return sorted([(p.name, p.attr.get('analyzer', '')) for p in name])
def test_mono_monoling(self):
assert self.run_sanitizer_on('mono', 'de', name='Foo') == [('Foo', 'de')]
assert self.run_sanitizer_on('mono', 'pt', name='Foo') == [('Foo', '')]
def test_mono_multiling(self):
assert self.run_sanitizer_on('mono', 'ca', name='Foo') == [('Foo', '')]
def test_all_monoling(self):
assert self.run_sanitizer_on('all', 'de', name='Foo') == [('Foo', 'de')]
assert self.run_sanitizer_on('all', 'pt', name='Foo') == [('Foo', '')]
def test_all_multiling(self):
assert self.run_sanitizer_on('all', 'ca', name='Foo') == [('Foo', 'fr')]
assert self.run_sanitizer_on('all', 'ch', name='Foo') \
== [('Foo', 'de'), ('Foo', 'fr')]
class TestWhiteList:
@pytest.fixture(autouse=True)
def setup_country(self, def_config):
self.config = def_config
def run_sanitizer_on(self, whitelist, **kwargs):
place = PlaceInfo({'name': {k.replace('_', ':'): v for k, v in kwargs.items()}})
name, _ = PlaceSanitizer([{'step': 'tag-analyzer-by-language',
'mode': 'replace',
'whitelist': whitelist}],
self.config).process_names(place)
assert all(isinstance(p.attr, dict) for p in name)
assert all(len(p.attr) <= 1 for p in name)
assert all(not p.attr or ('analyzer' in p.attr and p.attr['analyzer'])
for p in name)
return sorted([(p.name, p.attr.get('analyzer', '')) for p in name])
def test_in_whitelist(self):
assert self.run_sanitizer_on(['de', 'xx'], ref_xx='123') == [('123', 'xx')]
def test_not_in_whitelist(self):
assert self.run_sanitizer_on(['de', 'xx'], ref_yy='123') == [('123', '')]
def test_empty_whitelist(self):
assert self.run_sanitizer_on([], ref_yy='123') == [('123', '')]
| 10,499 | 35.33218 | 88 | py |
Nominatim | Nominatim-master/test/python/tokenizer/sanitizers/test_strip_brace_terms.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for the sanitizer that handles braced suffixes.
"""
import pytest
from nominatim.tokenizer.place_sanitizer import PlaceSanitizer
from nominatim.data.place_info import PlaceInfo
class TestStripBrace:
@pytest.fixture(autouse=True)
def setup_country(self, def_config):
self.config = def_config
def run_sanitizer_on(self, **kwargs):
place = PlaceInfo({'name': kwargs})
name, _ = PlaceSanitizer([{'step': 'strip-brace-terms'}], self.config).process_names(place)
return sorted([(p.name, p.kind, p.suffix) for p in name])
def test_no_braces(self):
assert self.run_sanitizer_on(name='foo', ref='23') == [('23', 'ref', None),
('foo', 'name', None)]
def test_simple_braces(self):
assert self.run_sanitizer_on(name='Halle (Saale)', ref='3')\
== [('3', 'ref', None), ('Halle', 'name', None), ('Halle (Saale)', 'name', None)]
assert self.run_sanitizer_on(name='ack ( bar')\
== [('ack', 'name', None), ('ack ( bar', 'name', None)]
def test_only_braces(self):
assert self.run_sanitizer_on(name='(maybe)') == [('(maybe)', 'name', None)]
def test_double_braces(self):
assert self.run_sanitizer_on(name='a((b))') == [('a', 'name', None),
('a((b))', 'name', None)]
assert self.run_sanitizer_on(name='a (b) (c)') == [('a', 'name', None),
('a (b) (c)', 'name', None)]
def test_no_names(def_config):
place = PlaceInfo({'address': {'housenumber': '3'}})
name, address = PlaceSanitizer([{'step': 'strip-brace-terms'}], def_config).process_names(place)
assert not name
assert len(address) == 1
| 2,024 | 34.526316 | 100 | py |
Nominatim | Nominatim-master/test/python/tokenizer/sanitizers/test_sanitizer_config.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for sanitizer configuration helper functions.
"""
import pytest
from nominatim.errors import UsageError
from nominatim.tokenizer.sanitizers.config import SanitizerConfig
def test_string_list_default_empty():
assert SanitizerConfig().get_string_list('op') == []
def test_string_list_default_something():
assert SanitizerConfig().get_string_list('op', default=['a', 'b']) == ['a', 'b']
def test_string_list_value_string():
assert SanitizerConfig({'op': 't'}).get_string_list('op', default=['a', 'b']) == ['t']
def test_string_list_value_list():
assert SanitizerConfig({'op': ['1', '2']}).get_string_list('op') == ['1', '2']
def test_string_list_value_empty():
assert SanitizerConfig({'op': ''}).get_string_list('op', default=['a', 'b']) == []
def test_string_list_value_dict():
with pytest.raises(UsageError):
SanitizerConfig({'op': {'1': 'a'}}).get_string_list('op')
def test_string_list_value_int_list():
with pytest.raises(UsageError):
SanitizerConfig({'op': [1, 2]}).get_string_list('op')
@pytest.mark.parametrize('inp', ('fg34', 'f\\f', 'morning [glory]', '56.78'))
def test_create_split_regex_no_params_unsplit(inp):
regex = SanitizerConfig().get_delimiter()
assert list(regex.split(inp)) == [inp]
@pytest.mark.parametrize('inp,outp', [('here,there', ['here', 'there']),
('ying;;yang', ['ying', 'yang']),
(';a; ;c;d,', ['', 'a', '', 'c', 'd', '']),
('1, 3 ,5', ['1', '3', '5'])
])
def test_create_split_regex_no_params_split(inp, outp):
regex = SanitizerConfig().get_delimiter()
assert list(regex.split(inp)) == outp
@pytest.mark.parametrize('delimiter', ['.', '\\', '[]', ' ', '/.*+'])
def test_create_split_regex_custom(delimiter):
regex = SanitizerConfig({'delimiters': delimiter}).get_delimiter()
assert list(regex.split(f'out{delimiter}house')) == ['out', 'house']
assert list(regex.split('out,house')) == ['out,house']
def test_create_split_regex_empty_delimiter():
with pytest.raises(UsageError):
regex = SanitizerConfig({'delimiters': ''}).get_delimiter()
@pytest.mark.parametrize('inp', ('name', 'name:de', 'na\\me', '.*', ''))
def test_create_name_filter_no_param_no_default(inp):
filt = SanitizerConfig({'filter-kind': 'place'}).get_filter('name')
assert filt(inp)
@pytest.mark.parametrize('inp', ('name', 'name:de', 'na\\me', '.*', ''))
def test_create_name_filter_no_param_default_pass_all(inp):
filt = SanitizerConfig().get_filter('name', 'PASS_ALL')
assert filt(inp)
@pytest.mark.parametrize('inp', ('name', 'name:de', 'na\\me', '.*', ''))
def test_create_name_filter_no_param_default_fail_all(inp):
filt = SanitizerConfig().get_filter('name', 'FAIL_ALL')
assert not filt(inp)
def test_create_name_filter_no_param_default_invalid_string():
with pytest.raises(ValueError):
filt = SanitizerConfig().get_filter('name', 'abc')
def test_create_name_filter_no_param_default_empty_list():
with pytest.raises(ValueError):
filt = SanitizerConfig().get_filter('name', [])
@pytest.mark.parametrize('kind', ('de', 'name:de', 'ende'))
def test_create_kind_filter_default_positive(kind):
filt = SanitizerConfig().get_filter('filter-kind', ['.*de'])
assert filt(kind)
@pytest.mark.parametrize('kind', ('de', 'name:de', 'ende'))
def test_create_kind_filter_default_negetive(kind):
filt = SanitizerConfig().get_filter('filter-kind', ['.*fr'])
assert not filt(kind)
@pytest.mark.parametrize('kind', ('lang', 'lang:de', 'langxx'))
def test_create_kind_filter_custom_regex_positive(kind):
filt = SanitizerConfig({'filter-kind': 'lang.*'}
).get_filter('filter-kind', ['.*fr'])
assert filt(kind)
@pytest.mark.parametrize('kind', ('de ', '123', '', 'bedece'))
def test_create_kind_filter_custom_regex_negative(kind):
filt = SanitizerConfig({'filter-kind': '.*de'}).get_filter('filter-kind')
assert not filt(kind)
@pytest.mark.parametrize('kind', ('name', 'fr', 'name:fr', 'frfr', '34'))
def test_create_kind_filter_many_positive(kind):
filt = SanitizerConfig({'filter-kind': ['.*fr', 'name', r'\d+']}
).get_filter('filter-kind')
assert filt(kind)
@pytest.mark.parametrize('kind', ('name:de', 'fridge', 'a34', '.*', '\\'))
def test_create_kind_filter_many_negative(kind):
filt = SanitizerConfig({'filter-kind': ['.*fr', 'name', r'\d+']}
).get_filter('filter-kind')
assert not filt(kind)
| 4,802 | 31.02 | 90 | py |
Nominatim | Nominatim-master/test/python/tokenizer/sanitizers/test_clean_postcodes.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for the sanitizer that normalizes postcodes.
"""
import pytest
from nominatim.tokenizer.place_sanitizer import PlaceSanitizer
from nominatim.data.place_info import PlaceInfo
from nominatim.data import country_info
@pytest.fixture
def sanitize(def_config, request):
country_info.setup_country_config(def_config)
sanitizer_args = {'step': 'clean-postcodes'}
for mark in request.node.iter_markers(name="sanitizer_params"):
sanitizer_args.update({k.replace('_', '-') : v for k,v in mark.kwargs.items()})
def _run(country=None, **kwargs):
pi = {'address': kwargs}
if country is not None:
pi['country_code'] = country
_, address = PlaceSanitizer([sanitizer_args], def_config).process_names(PlaceInfo(pi))
return sorted([(p.kind, p.name) for p in address])
return _run
@pytest.mark.parametrize("country", (None, 'ae'))
def test_postcode_no_country(sanitize, country):
assert sanitize(country=country, postcode='23231') == [('unofficial_postcode', '23231')]
@pytest.mark.parametrize("country", (None, 'ae'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_no_country_drop(sanitize, country):
assert sanitize(country=country, postcode='23231') == []
@pytest.mark.parametrize("postcode", ('12345', ' 12345 ', 'de 12345',
'DE12345', 'DE 12345', 'DE-12345'))
def test_postcode_pass_good_format(sanitize, postcode):
assert sanitize(country='de', postcode=postcode) == [('postcode', '12345')]
@pytest.mark.parametrize("postcode", ('123456', '', ' ', '.....',
'DE 12345', 'DEF12345', 'CH 12345'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_drop_bad_format(sanitize, postcode):
assert sanitize(country='de', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('1234', '9435', '99000'))
def test_postcode_cyprus_pass(sanitize, postcode):
assert sanitize(country='cy', postcode=postcode) == [('postcode', postcode)]
@pytest.mark.parametrize("postcode", ('91234', '99a45', '567'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_cyprus_fail(sanitize, postcode):
assert sanitize(country='cy', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('123456', 'A33F2G7'))
def test_postcode_kazakhstan_pass(sanitize, postcode):
assert sanitize(country='kz', postcode=postcode) == [('postcode', postcode)]
@pytest.mark.parametrize("postcode", ('V34T6Y923456', '99345'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_kazakhstan_fail(sanitize, postcode):
assert sanitize(country='kz', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('675 34', '67534', 'SE-675 34', 'SE67534'))
def test_postcode_sweden_pass(sanitize, postcode):
assert sanitize(country='se', postcode=postcode) == [('postcode', '675 34')]
@pytest.mark.parametrize("postcode", ('67 345', '671123'))
@pytest.mark.sanitizer_params(convert_to_address=False)
def test_postcode_sweden_fail(sanitize, postcode):
assert sanitize(country='se', postcode=postcode) == []
@pytest.mark.parametrize("postcode", ('AB1', '123-456-7890', '1 as 44'))
@pytest.mark.sanitizer_params(default_pattern='[A-Z0-9- ]{3,12}')
def test_postcode_default_pattern_pass(sanitize, postcode):
assert sanitize(country='an', postcode=postcode) == [('postcode', postcode.upper())]
@pytest.mark.parametrize("postcode", ('C', '12', 'ABC123DEF 456', '1234,5678', '11223;11224'))
@pytest.mark.sanitizer_params(convert_to_address=False, default_pattern='[A-Z0-9- ]{3,12}')
def test_postcode_default_pattern_fail(sanitize, postcode):
assert sanitize(country='an', postcode=postcode) == []
| 3,980 | 37.650485 | 94 | py |
Nominatim | Nominatim-master/test/python/tokenizer/sanitizers/test_clean_housenumbers.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for the sanitizer that normalizes housenumbers.
"""
import pytest
from nominatim.tokenizer.place_sanitizer import PlaceSanitizer
from nominatim.data.place_info import PlaceInfo
@pytest.fixture
def sanitize(request, def_config):
sanitizer_args = {'step': 'clean-housenumbers'}
for mark in request.node.iter_markers(name="sanitizer_params"):
sanitizer_args.update({k.replace('_', '-') : v for k,v in mark.kwargs.items()})
def _run(**kwargs):
place = PlaceInfo({'address': kwargs})
_, address = PlaceSanitizer([sanitizer_args], def_config).process_names(place)
return sorted([(p.kind, p.name) for p in address])
return _run
def test_simple_number(sanitize):
assert sanitize(housenumber='34') == [('housenumber', '34')]
@pytest.mark.parametrize('number', ['1;2;3', '1,2,3', '1; 3 ,2',
'2,,3,1', '1;2;3;;', ';3;2;1'])
def test_housenumber_lists(sanitize, number):
assert sanitize(housenumber=number) == \
[('housenumber', '1'), ('housenumber', '2'), ('housenumber', '3')]
@pytest.mark.sanitizer_params(filter_kind=('number', 'streetnumber'))
def test_filter_kind(sanitize):
assert sanitize(housenumber='34', number='4', badnumber='65') == \
[('badnumber', '65'), ('housenumber', '34'), ('housenumber', '4')]
@pytest.mark.parametrize('number', ('6523', 'n/a', '4'))
def test_convert_to_name_converted(def_config, number):
sanitizer_args = {'step': 'clean-housenumbers',
'convert-to-name': (r'\d+', 'n/a')}
place = PlaceInfo({'address': {'housenumber': number}})
names, address = PlaceSanitizer([sanitizer_args], def_config).process_names(place)
assert ('housenumber', number) in set((p.kind, p.name) for p in names)
assert 'housenumber' not in set(p.kind for p in address)
@pytest.mark.parametrize('number', ('a54', 'n.a', 'bow'))
def test_convert_to_name_unconverted(def_config, number):
sanitizer_args = {'step': 'clean-housenumbers',
'convert-to-name': (r'\d+', 'n/a')}
place = PlaceInfo({'address': {'housenumber': number}})
names, address = PlaceSanitizer([sanitizer_args], def_config).process_names(place)
assert 'housenumber' not in set(p.kind for p in names)
assert ('housenumber', number) in set((p.kind, p.name) for p in address)
| 2,573 | 36.304348 | 87 | py |
Nominatim | Nominatim-master/test/python/tokenizer/token_analysis/test_analysis_postcodes.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for special postcode analysis and variant generation.
"""
import pytest
from icu import Transliterator
import nominatim.tokenizer.token_analysis.postcodes as module
from nominatim.data.place_name import PlaceName
from nominatim.errors import UsageError
DEFAULT_NORMALIZATION = """ :: NFD ();
'🜳' > ' ';
[[:Nonspacing Mark:] [:Cf:]] >;
:: lower ();
[[:Punctuation:][:Space:]]+ > ' ';
:: NFC ();
"""
DEFAULT_TRANSLITERATION = """ :: Latin ();
'🜵' > ' ';
"""
@pytest.fixture
def analyser():
rules = { 'analyzer': 'postcodes'}
config = module.configure(rules, DEFAULT_NORMALIZATION)
trans = Transliterator.createFromRules("test_trans", DEFAULT_TRANSLITERATION)
norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
return module.create(norm, trans, config)
def get_normalized_variants(proc, name):
norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
return proc.compute_variants(norm.transliterate(name).strip())
@pytest.mark.parametrize('name,norm', [('12', '12'),
('A 34 ', 'A 34'),
('34-av', '34-AV')])
def test_get_canonical_id(analyser, name, norm):
assert analyser.get_canonical_id(PlaceName(name=name, kind='', suffix='')) == norm
@pytest.mark.parametrize('postcode,variants', [('12345', {'12345'}),
('AB-998', {'ab 998', 'ab998'}),
('23 FGH D3', {'23 fgh d3', '23fgh d3',
'23 fghd3', '23fghd3'})])
def test_compute_variants(analyser, postcode, variants):
out = analyser.compute_variants(postcode)
assert len(out) == len(set(out))
assert set(out) == variants
| 2,234 | 35.048387 | 87 | py |
Nominatim | Nominatim-master/test/python/tokenizer/token_analysis/test_generic_mutation.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for generic token analysis, mutation part.
"""
import pytest
from icu import Transliterator
import nominatim.tokenizer.token_analysis.generic as module
from nominatim.errors import UsageError
DEFAULT_NORMALIZATION = """ '🜳' > ' ';
[[:Nonspacing Mark:] [:Cf:]] >;
:: lower ();
[[:Punctuation:][:Space:]]+ > ' '
"""
DEFAULT_TRANSLITERATION = """ :: Latin ();
'🜵' > ' ';
"""
class TestMutationNoVariants:
def make_analyser(self, *mutations):
rules = { 'analyzer': 'generic',
'mutations': [ {'pattern': m[0], 'replacements': m[1]}
for m in mutations]
}
trans = Transliterator.createFromRules("test_trans", DEFAULT_TRANSLITERATION)
norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
config = module.configure(rules, norm, trans)
self.analysis = module.create(norm, trans, config)
def variants(self, name):
norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
return set(self.analysis.compute_variants(norm.transliterate(name).strip()))
@pytest.mark.parametrize('pattern', ('(capture)', ['a list']))
def test_bad_pattern(self, pattern):
with pytest.raises(UsageError):
self.make_analyser((pattern, ['b']))
@pytest.mark.parametrize('replacements', (None, 'a string'))
def test_bad_replacement(self, replacements):
with pytest.raises(UsageError):
self.make_analyser(('a', replacements))
def test_simple_replacement(self):
self.make_analyser(('a', ['b']))
assert self.variants('none') == {'none'}
assert self.variants('abba') == {'bbbb'}
assert self.variants('2 aar') == {'2 bbr'}
def test_multichar_replacement(self):
self.make_analyser(('1 1', ['1 1 1']))
assert self.variants('1 1456') == {'1 1 1456'}
assert self.variants('1 1 1') == {'1 1 1 1'}
def test_removement_replacement(self):
self.make_analyser((' ', [' ', '']))
assert self.variants('A 345') == {'a 345', 'a345'}
assert self.variants('a g b') == {'a g b', 'ag b', 'a gb', 'agb'}
def test_regex_pattern(self):
self.make_analyser(('[^a-z]+', ['XXX', ' ']))
assert self.variants('a-34n12') == {'aXXXnXXX', 'aXXXn', 'a nXXX', 'a n'}
def test_multiple_mutations(self):
self.make_analyser(('ä', ['ä', 'ae']), ('ö', ['ö', 'oe']))
assert self.variants('Längenöhr') == {'längenöhr', 'laengenöhr',
'längenoehr', 'laengenoehr'}
| 2,991 | 31.879121 | 85 | py |
Nominatim | Nominatim-master/test/python/tokenizer/token_analysis/test_generic.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for import name normalisation and variant generation.
"""
import pytest
from icu import Transliterator
import nominatim.tokenizer.token_analysis.generic as module
from nominatim.errors import UsageError
DEFAULT_NORMALIZATION = """ :: NFD ();
'🜳' > ' ';
[[:Nonspacing Mark:] [:Cf:]] >;
:: lower ();
[[:Punctuation:][:Space:]]+ > ' ';
:: NFC ();
"""
DEFAULT_TRANSLITERATION = """ :: Latin ();
'🜵' > ' ';
"""
def make_analyser(*variants, variant_only=False):
rules = { 'analyzer': 'generic', 'variants': [{'words': variants}]}
if variant_only:
rules['mode'] = 'variant-only'
trans = Transliterator.createFromRules("test_trans", DEFAULT_TRANSLITERATION)
norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
config = module.configure(rules, norm, trans)
return module.create(norm, trans, config)
def get_normalized_variants(proc, name):
norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
return proc.compute_variants(norm.transliterate(name).strip())
def test_no_variants():
rules = { 'analyzer': 'generic' }
trans = Transliterator.createFromRules("test_trans", DEFAULT_TRANSLITERATION)
norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
config = module.configure(rules, norm, trans)
proc = module.create(norm, trans, config)
assert get_normalized_variants(proc, '大德!') == ['dà dé']
def test_variants_empty():
proc = make_analyser('saint -> 🜵', 'street -> st')
assert get_normalized_variants(proc, '🜵') == []
assert get_normalized_variants(proc, '🜳') == []
assert get_normalized_variants(proc, 'saint') == ['saint']
VARIANT_TESTS = [
(('~strasse,~straße -> str', '~weg => weg'), "hallo", {'hallo'}),
(('weg => wg',), "holzweg", {'holzweg'}),
(('weg -> wg',), "holzweg", {'holzweg'}),
(('~weg => weg',), "holzweg", {'holz weg', 'holzweg'}),
(('~weg -> weg',), "holzweg", {'holz weg', 'holzweg'}),
(('~weg => w',), "holzweg", {'holz w', 'holzw'}),
(('~weg -> w',), "holzweg", {'holz weg', 'holzweg', 'holz w', 'holzw'}),
(('~weg => weg',), "Meier Weg", {'meier weg', 'meierweg'}),
(('~weg -> weg',), "Meier Weg", {'meier weg', 'meierweg'}),
(('~weg => w',), "Meier Weg", {'meier w', 'meierw'}),
(('~weg -> w',), "Meier Weg", {'meier weg', 'meierweg', 'meier w', 'meierw'}),
(('weg => wg',), "Meier Weg", {'meier wg'}),
(('weg -> wg',), "Meier Weg", {'meier weg', 'meier wg'}),
(('~strasse,~straße -> str', '~weg => weg'), "Bauwegstraße",
{'bauweg straße', 'bauweg str', 'bauwegstraße', 'bauwegstr'}),
(('am => a', 'bach => b'), "am bach", {'a b'}),
(('am => a', '~bach => b'), "am bach", {'a b'}),
(('am -> a', '~bach -> b'), "am bach", {'am bach', 'a bach', 'am b', 'a b'}),
(('am -> a', '~bach -> b'), "ambach", {'ambach', 'am bach', 'amb', 'am b'}),
(('saint -> s,st', 'street -> st'), "Saint Johns Street",
{'saint johns street', 's johns street', 'st johns street',
'saint johns st', 's johns st', 'st johns st'}),
(('river$ -> r',), "River Bend Road", {'river bend road'}),
(('river$ -> r',), "Bent River", {'bent river', 'bent r'}),
(('^north => n',), "North 2nd Street", {'n 2nd street'}),
(('^north => n',), "Airport North", {'airport north'}),
(('am -> a',), "am am am am am am am am", {'am am am am am am am am'}),
(('am => a',), "am am am am am am am am", {'a a a a a a a a'})
]
@pytest.mark.parametrize("rules,name,variants", VARIANT_TESTS)
def test_variants(rules, name, variants):
proc = make_analyser(*rules)
result = get_normalized_variants(proc, name)
assert len(result) == len(set(result))
assert set(get_normalized_variants(proc, name)) == variants
VARIANT_ONLY_TESTS = [
(('weg => wg',), "hallo", set()),
(('weg => wg',), "Meier Weg", {'meier wg'}),
(('weg -> wg',), "Meier Weg", {'meier wg'}),
]
@pytest.mark.parametrize("rules,name,variants", VARIANT_ONLY_TESTS)
def test_variants_only(rules, name, variants):
proc = make_analyser(*rules, variant_only=True)
result = get_normalized_variants(proc, name)
assert len(result) == len(set(result))
assert set(get_normalized_variants(proc, name)) == variants
class TestGetReplacements:
@staticmethod
def configure_rules(*variants):
rules = { 'analyzer': 'generic', 'variants': [{'words': variants}]}
trans = Transliterator.createFromRules("test_trans", DEFAULT_TRANSLITERATION)
norm = Transliterator.createFromRules("test_norm", DEFAULT_NORMALIZATION)
return module.configure(rules, norm, trans)
def get_replacements(self, *variants):
config = self.configure_rules(*variants)
return sorted((k, sorted(v)) for k,v in config['replacements'])
@pytest.mark.parametrize("variant", ['foo > bar', 'foo -> bar -> bar',
'~foo~ -> bar', 'fo~ o -> bar'])
def test_invalid_variant_description(self, variant):
with pytest.raises(UsageError):
self.configure_rules(variant)
@pytest.mark.parametrize("rule", ["!!! -> bar", "bar => !!!"])
def test_ignore_unnormalizable_terms(self, rule):
repl = self.get_replacements(rule)
assert repl == []
def test_add_full(self):
repl = self.get_replacements("foo -> bar")
assert repl == [(' foo ', [' bar', ' foo'])]
def test_replace_full(self):
repl = self.get_replacements("foo => bar")
assert repl == [(' foo ', [' bar'])]
def test_add_suffix_no_decompose(self):
repl = self.get_replacements("~berg |-> bg")
assert repl == [(' berg ', [' berg', ' bg']),
('berg ', ['berg', 'bg'])]
def test_replace_suffix_no_decompose(self):
repl = self.get_replacements("~berg |=> bg")
assert repl == [(' berg ', [' bg']),('berg ', ['bg'])]
def test_add_suffix_decompose(self):
repl = self.get_replacements("~berg -> bg")
assert repl == [(' berg ', [' berg', ' bg', 'berg', 'bg']),
('berg ', [' berg', ' bg', 'berg', 'bg'])]
def test_replace_suffix_decompose(self):
repl = self.get_replacements("~berg => bg")
assert repl == [(' berg ', [' bg', 'bg']),
('berg ', [' bg', 'bg'])]
def test_add_prefix_no_compose(self):
repl = self.get_replacements("hinter~ |-> hnt")
assert repl == [(' hinter', [' hinter', ' hnt']),
(' hinter ', [' hinter', ' hnt'])]
def test_replace_prefix_no_compose(self):
repl = self.get_replacements("hinter~ |=> hnt")
assert repl == [(' hinter', [' hnt']), (' hinter ', [' hnt'])]
def test_add_prefix_compose(self):
repl = self.get_replacements("hinter~-> h")
assert repl == [(' hinter', [' h', ' h ', ' hinter', ' hinter ']),
(' hinter ', [' h', ' h', ' hinter', ' hinter'])]
def test_replace_prefix_compose(self):
repl = self.get_replacements("hinter~=> h")
assert repl == [(' hinter', [' h', ' h ']),
(' hinter ', [' h', ' h'])]
def test_add_beginning_only(self):
repl = self.get_replacements("^Premier -> Pr")
assert repl == [('^ premier ', ['^ pr', '^ premier'])]
def test_replace_beginning_only(self):
repl = self.get_replacements("^Premier => Pr")
assert repl == [('^ premier ', ['^ pr'])]
def test_add_final_only(self):
repl = self.get_replacements("road$ -> rd")
assert repl == [(' road ^', [' rd ^', ' road ^'])]
def test_replace_final_only(self):
repl = self.get_replacements("road$ => rd")
assert repl == [(' road ^', [' rd ^'])]
def test_decompose_only(self):
repl = self.get_replacements("~foo -> foo")
assert repl == [(' foo ', [' foo', 'foo']),
('foo ', [' foo', 'foo'])]
def test_add_suffix_decompose_end_only(self):
repl = self.get_replacements("~berg |-> bg", "~berg$ -> bg")
assert repl == [(' berg ', [' berg', ' bg']),
(' berg ^', [' berg ^', ' bg ^', 'berg ^', 'bg ^']),
('berg ', ['berg', 'bg']),
('berg ^', [' berg ^', ' bg ^', 'berg ^', 'bg ^'])]
def test_replace_suffix_decompose_end_only(self):
repl = self.get_replacements("~berg |=> bg", "~berg$ => bg")
assert repl == [(' berg ', [' bg']),
(' berg ^', [' bg ^', 'bg ^']),
('berg ', ['bg']),
('berg ^', [' bg ^', 'bg ^'])]
@pytest.mark.parametrize('rule', ["~berg,~burg -> bg",
"~berg, ~burg -> bg",
"~berg,,~burg -> bg"])
def test_add_multiple_suffix(self, rule):
repl = self.get_replacements(rule)
assert repl == [(' berg ', [' berg', ' bg', 'berg', 'bg']),
(' burg ', [' bg', ' burg', 'bg', 'burg']),
('berg ', [' berg', ' bg', 'berg', 'bg']),
('burg ', [' bg', ' burg', 'bg', 'burg'])]
| 9,562 | 33.648551 | 85 | py |
Nominatim | Nominatim-master/test/python/indexer/test_indexing.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for running the indexing.
"""
import itertools
import pytest
from nominatim.indexer import indexer
from nominatim.tokenizer import factory
class IndexerTestDB:
def __init__(self, conn):
self.placex_id = itertools.count(100000)
self.osmline_id = itertools.count(500000)
self.postcode_id = itertools.count(700000)
self.conn = conn
self.conn.set_isolation_level(0)
with self.conn.cursor() as cur:
cur.execute('CREATE EXTENSION hstore')
cur.execute("""CREATE TABLE placex (place_id BIGINT,
name HSTORE,
class TEXT,
type TEXT,
linked_place_id BIGINT,
rank_address SMALLINT,
rank_search SMALLINT,
indexed_status SMALLINT,
indexed_date TIMESTAMP,
partition SMALLINT,
admin_level SMALLINT,
country_code TEXT,
address HSTORE,
token_info JSONB,
geometry_sector INTEGER)""")
cur.execute("""CREATE TABLE location_property_osmline (
place_id BIGINT,
osm_id BIGINT,
address HSTORE,
token_info JSONB,
indexed_status SMALLINT,
indexed_date TIMESTAMP,
geometry_sector INTEGER)""")
cur.execute("""CREATE TABLE location_postcode (
place_id BIGINT,
indexed_status SMALLINT,
indexed_date TIMESTAMP,
country_code varchar(2),
postcode TEXT)""")
cur.execute("""CREATE OR REPLACE FUNCTION date_update() RETURNS TRIGGER
AS $$
BEGIN
IF NEW.indexed_status = 0 and OLD.indexed_status != 0 THEN
NEW.indexed_date = now();
END IF;
RETURN NEW;
END; $$ LANGUAGE plpgsql;""")
cur.execute("DROP TYPE IF EXISTS prepare_update_info CASCADE")
cur.execute("""CREATE TYPE prepare_update_info AS (
name HSTORE,
address HSTORE,
rank_address SMALLINT,
country_code TEXT,
class TEXT,
type TEXT,
linked_place_id BIGINT
)""")
cur.execute("""CREATE OR REPLACE FUNCTION placex_indexing_prepare(p placex,
OUT result prepare_update_info)
AS $$
BEGIN
result.address := p.address;
result.name := p.name;
result.class := p.class;
result.type := p.type;
result.country_code := p.country_code;
result.rank_address := p.rank_address;
END;
$$ LANGUAGE plpgsql STABLE;
""")
cur.execute("""CREATE OR REPLACE FUNCTION
get_interpolation_address(in_address HSTORE, wayid BIGINT)
RETURNS HSTORE AS $$
BEGIN
RETURN in_address;
END;
$$ LANGUAGE plpgsql STABLE;
""")
for table in ('placex', 'location_property_osmline', 'location_postcode'):
cur.execute("""CREATE TRIGGER {0}_update BEFORE UPDATE ON {0}
FOR EACH ROW EXECUTE PROCEDURE date_update()
""".format(table))
def scalar(self, query):
with self.conn.cursor() as cur:
cur.execute(query)
return cur.fetchone()[0]
def add_place(self, cls='place', typ='locality',
rank_search=30, rank_address=30, sector=20):
next_id = next(self.placex_id)
with self.conn.cursor() as cur:
cur.execute("""INSERT INTO placex
(place_id, class, type, rank_search, rank_address,
indexed_status, geometry_sector)
VALUES (%s, %s, %s, %s, %s, 1, %s)""",
(next_id, cls, typ, rank_search, rank_address, sector))
return next_id
def add_admin(self, **kwargs):
kwargs['cls'] = 'boundary'
kwargs['typ'] = 'administrative'
return self.add_place(**kwargs)
def add_osmline(self, sector=20):
next_id = next(self.osmline_id)
with self.conn.cursor() as cur:
cur.execute("""INSERT INTO location_property_osmline
(place_id, osm_id, indexed_status, geometry_sector)
VALUES (%s, %s, 1, %s)""",
(next_id, next_id, sector))
return next_id
def add_postcode(self, country, postcode):
next_id = next(self.postcode_id)
with self.conn.cursor() as cur:
cur.execute("""INSERT INTO location_postcode
(place_id, indexed_status, country_code, postcode)
VALUES (%s, 1, %s, %s)""",
(next_id, country, postcode))
return next_id
def placex_unindexed(self):
return self.scalar('SELECT count(*) from placex where indexed_status > 0')
def osmline_unindexed(self):
return self.scalar("""SELECT count(*) from location_property_osmline
WHERE indexed_status > 0""")
@pytest.fixture
def test_db(temp_db_conn):
yield IndexerTestDB(temp_db_conn)
@pytest.fixture
def test_tokenizer(tokenizer_mock, project_env):
return factory.create_tokenizer(project_env)
@pytest.mark.parametrize("threads", [1, 15])
def test_index_all_by_rank(test_db, threads, test_tokenizer):
for rank in range(31):
test_db.add_place(rank_address=rank, rank_search=rank)
test_db.add_osmline()
assert test_db.placex_unindexed() == 31
assert test_db.osmline_unindexed() == 1
idx = indexer.Indexer('dbname=test_nominatim_python_unittest', test_tokenizer, threads)
idx.index_by_rank(0, 30)
assert test_db.placex_unindexed() == 0
assert test_db.osmline_unindexed() == 0
assert test_db.scalar("""SELECT count(*) from placex
WHERE indexed_status = 0 and indexed_date is null""") == 0
# ranks come in order of rank address
assert test_db.scalar("""
SELECT count(*) FROM placex p WHERE rank_address > 0
AND indexed_date >= (SELECT min(indexed_date) FROM placex o
WHERE p.rank_address < o.rank_address)""") == 0
# placex address ranked objects come before interpolations
assert test_db.scalar(
"""SELECT count(*) FROM placex WHERE rank_address > 0
AND indexed_date >
(SELECT min(indexed_date) FROM location_property_osmline)""") == 0
# rank 0 comes after all other placex objects
assert test_db.scalar(
"""SELECT count(*) FROM placex WHERE rank_address > 0
AND indexed_date >
(SELECT min(indexed_date) FROM placex WHERE rank_address = 0)""") == 0
@pytest.mark.parametrize("threads", [1, 15])
def test_index_partial_without_30(test_db, threads, test_tokenizer):
for rank in range(31):
test_db.add_place(rank_address=rank, rank_search=rank)
test_db.add_osmline()
assert test_db.placex_unindexed() == 31
assert test_db.osmline_unindexed() == 1
idx = indexer.Indexer('dbname=test_nominatim_python_unittest',
test_tokenizer, threads)
idx.index_by_rank(4, 15)
assert test_db.placex_unindexed() == 19
assert test_db.osmline_unindexed() == 1
assert test_db.scalar("""
SELECT count(*) FROM placex
WHERE indexed_status = 0 AND not rank_address between 4 and 15""") == 0
@pytest.mark.parametrize("threads", [1, 15])
def test_index_partial_with_30(test_db, threads, test_tokenizer):
for rank in range(31):
test_db.add_place(rank_address=rank, rank_search=rank)
test_db.add_osmline()
assert test_db.placex_unindexed() == 31
assert test_db.osmline_unindexed() == 1
idx = indexer.Indexer('dbname=test_nominatim_python_unittest', test_tokenizer, threads)
idx.index_by_rank(28, 30)
assert test_db.placex_unindexed() == 27
assert test_db.osmline_unindexed() == 0
assert test_db.scalar("""
SELECT count(*) FROM placex
WHERE indexed_status = 0 AND rank_address between 1 and 27""") == 0
@pytest.mark.parametrize("threads", [1, 15])
def test_index_boundaries(test_db, threads, test_tokenizer):
for rank in range(4, 10):
test_db.add_admin(rank_address=rank, rank_search=rank)
for rank in range(31):
test_db.add_place(rank_address=rank, rank_search=rank)
test_db.add_osmline()
assert test_db.placex_unindexed() == 37
assert test_db.osmline_unindexed() == 1
idx = indexer.Indexer('dbname=test_nominatim_python_unittest', test_tokenizer, threads)
idx.index_boundaries(0, 30)
assert test_db.placex_unindexed() == 31
assert test_db.osmline_unindexed() == 1
assert test_db.scalar("""
SELECT count(*) FROM placex
WHERE indexed_status = 0 AND class != 'boundary'""") == 0
@pytest.mark.parametrize("threads", [1, 15])
def test_index_postcodes(test_db, threads, test_tokenizer):
for postcode in range(1000):
test_db.add_postcode('de', postcode)
for postcode in range(32000, 33000):
test_db.add_postcode('us', postcode)
idx = indexer.Indexer('dbname=test_nominatim_python_unittest', test_tokenizer, threads)
idx.index_postcodes()
assert test_db.scalar("""SELECT count(*) FROM location_postcode
WHERE indexed_status != 0""") == 0
@pytest.mark.parametrize("analyse", [True, False])
def test_index_full(test_db, analyse, test_tokenizer):
for rank in range(4, 10):
test_db.add_admin(rank_address=rank, rank_search=rank)
for rank in range(31):
test_db.add_place(rank_address=rank, rank_search=rank)
test_db.add_osmline()
for postcode in range(1000):
test_db.add_postcode('de', postcode)
idx = indexer.Indexer('dbname=test_nominatim_python_unittest', test_tokenizer, 4)
idx.index_full(analyse=analyse)
assert test_db.placex_unindexed() == 0
assert test_db.osmline_unindexed() == 0
assert test_db.scalar("""SELECT count(*) FROM location_postcode
WHERE indexed_status != 0""") == 0
@pytest.mark.parametrize("threads", [1, 15])
def test_index_reopen_connection(test_db, threads, monkeypatch, test_tokenizer):
monkeypatch.setattr(indexer.WorkerPool, "REOPEN_CONNECTIONS_AFTER", 15)
for _ in range(1000):
test_db.add_place(rank_address=30, rank_search=30)
idx = indexer.Indexer('dbname=test_nominatim_python_unittest', test_tokenizer, threads)
idx.index_by_rank(28, 30)
assert test_db.placex_unindexed() == 0
| 12,335 | 40.395973 | 93 | py |
Nominatim | Nominatim-master/test/python/utils/test_json_writer.py | # SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for the streaming JSON writer.
"""
import json
import pytest
from nominatim.utils.json_writer import JsonWriter
@pytest.mark.parametrize("inval,outstr", [(None, 'null'),
(True, 'true'), (False, 'false'),
(23, '23'), (0, '0'), (-1.3, '-1.3'),
('g\nä', '"g\\nä"'), ('"', '"\\\""'),
({}, '{}'), ([], '[]')])
def test_simple_value(inval, outstr):
writer = JsonWriter()
writer.value(inval)
assert writer() == outstr
json.loads(writer())
def test_empty_array():
writer = JsonWriter().start_array().end_array()
assert writer() == '[]'
json.loads(writer())
def test_array_with_single_value():
writer = JsonWriter().start_array().value(None).end_array()
assert writer() == '[null]'
json.loads(writer())
@pytest.mark.parametrize("invals,outstr", [((1, ), '[1]'),
(('a', 'b'), '["a","b"]')])
def test_array_with_data(invals, outstr):
writer = JsonWriter()
writer.start_array()
for ival in invals:
writer.value(ival).next()
writer.end_array()
assert writer() == outstr
json.loads(writer())
def test_empty_object():
writer = JsonWriter().start_object().end_object()
assert writer() == '{}'
json.loads(writer())
def test_object_single_entry():
writer = JsonWriter()\
.start_object()\
.key('something')\
.value(5)\
.end_object()
assert writer() == '{"something":5}'
json.loads(writer())
def test_object_many_values():
writer = JsonWriter()\
.start_object()\
.keyval('foo', None)\
.keyval('bar', {})\
.keyval('baz', 'b\taz')\
.end_object()
assert writer() == '{"foo":null,"bar":{},"baz":"b\\taz"}'
json.loads(writer())
def test_object_many_values_without_none():
writer = JsonWriter()\
.start_object()\
.keyval_not_none('foo', 0)\
.keyval_not_none('bar', None)\
.keyval_not_none('baz', '')\
.keyval_not_none('eve', False,
transform = lambda v: 'yes' if v else 'no')\
.end_object()
assert writer() == '{"foo":0,"baz":"","eve":"no"}'
json.loads(writer())
def test_raw_output():
writer = JsonWriter()\
.start_array()\
.raw('{ "nicely": "formatted here" }').next()\
.value(1)\
.end_array()
assert writer() == '[{ "nicely": "formatted here" },1]'
| 2,998 | 27.028037 | 81 | py |
Nominatim | Nominatim-master/test/python/utils/test_centroid.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for centroid computation.
"""
import pytest
from nominatim.utils.centroid import PointsCentroid
def test_empty_set():
c = PointsCentroid()
with pytest.raises(ValueError, match='No points'):
c.centroid()
@pytest.mark.parametrize("centroid", [(0,0), (-1, 3), [0.0000032, 88.4938]])
def test_one_point_centroid(centroid):
c = PointsCentroid()
c += centroid
assert len(c.centroid()) == 2
assert c.centroid() == (pytest.approx(centroid[0]), pytest.approx(centroid[1]))
def test_multipoint_centroid():
c = PointsCentroid()
c += (20.0, -10.0)
assert c.centroid() == (pytest.approx(20.0), pytest.approx(-10.0))
c += (20.2, -9.0)
assert c.centroid() == (pytest.approx(20.1), pytest.approx(-9.5))
c += (20.2, -9.0)
assert c.centroid() == (pytest.approx(20.13333), pytest.approx(-9.333333))
def test_manypoint_centroid():
c = PointsCentroid()
for _ in range(10000):
c += (4.564732, -0.000034)
assert c.centroid() == (pytest.approx(4.564732), pytest.approx(-0.000034))
@pytest.mark.parametrize("param", ["aa", None, 5, [1, 2, 3], (3, None), ("a", 3.9)])
def test_add_non_tuple(param):
c = PointsCentroid()
with pytest.raises(ValueError, match='2-element tuples'):
c += param
| 1,493 | 25.210526 | 84 | py |
Nominatim | Nominatim-master/test/python/data/test_country_info.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for function that handle country properties.
"""
from textwrap import dedent
import pytest
from nominatim.data import country_info
@pytest.fixture
def loaded_country(def_config):
country_info.setup_country_config(def_config)
@pytest.fixture
def env_with_country_config(project_env):
def _mk_config(cfg):
(project_env.project_dir / 'country_settings.yaml').write_text(dedent(cfg))
return project_env
return _mk_config
@pytest.mark.parametrize("no_partitions", (True, False))
def test_setup_country_tables(src_dir, temp_db_with_extensions, dsn, temp_db_cursor,
loaded_country, no_partitions):
country_info.setup_country_tables(dsn, src_dir / 'data', no_partitions)
assert temp_db_cursor.table_exists('country_name')
assert temp_db_cursor.table_rows('country_name') == \
temp_db_cursor.scalar(
'SELECT count(DISTINCT country_code) FROM country_name')
partitions = temp_db_cursor.row_set(
"SELECT DISTINCT partition FROM country_name")
if no_partitions:
assert partitions == {(0, )}
else:
assert len(partitions) > 10
assert temp_db_cursor.table_exists('country_osm_grid')
assert temp_db_cursor.table_rows('country_osm_grid') > 100
@pytest.mark.parametrize("languages", (None, ['fr', 'en']))
def test_create_country_names(temp_db_with_extensions, temp_db_conn, temp_db_cursor,
table_factory, tokenizer_mock, languages, loaded_country):
table_factory('country_name', 'country_code varchar(2), name hstore',
content=(('us', '"name"=>"us1","name:af"=>"us2"'),
('fr', '"name"=>"Fra", "name:en"=>"Fren"')))
assert temp_db_cursor.scalar("SELECT count(*) FROM country_name") == 2
tokenizer = tokenizer_mock()
country_info.create_country_names(temp_db_conn, tokenizer, languages)
assert len(tokenizer.analyser_cache['countries']) == 2
result_set = {k: set(v.values())
for k, v in tokenizer.analyser_cache['countries']}
if languages:
assert result_set == {'us': set(('us', 'us1')),
'fr': set(('fr', 'Fra', 'Fren'))}
else:
assert result_set == {'us': set(('us', 'us1', 'us2')),
'fr': set(('fr', 'Fra', 'Fren'))}
def test_setup_country_names_prefixes(env_with_country_config):
config = env_with_country_config("""\
es:
names:
name:
en: Spain
de: Spanien
default: Espagñe
us:
names:
short_name:
default: USA
name:
default: United States
en: United States
""")
info = country_info._CountryInfo()
info.load(config)
assert info.get('es')['names'] == {"name": "Espagñe",
"name:en": "Spain",
"name:de": "Spanien"}
assert info.get('us')['names'] == {"name": "United States",
"name:en": "United States",
"short_name": "USA"}
assert 'names' not in info.get('xx')
def test_setup_country_config_languages_not_loaded(env_with_country_config):
config = env_with_country_config("""\
de:
partition: 3
names:
name:
default: Deutschland
""")
info = country_info._CountryInfo()
info.load(config)
assert dict(info.items()) == {'de': {'partition': 3,
'languages': [],
'names': {'name': 'Deutschland'}}}
def test_setup_country_config_name_not_loaded(env_with_country_config):
config = env_with_country_config("""\
de:
partition: 3
languages: de
names:
""")
info = country_info._CountryInfo()
info.load(config)
assert dict(info.items()) == {'de': {'partition': 3,
'languages': ['de'],
'names': {}
}}
def test_setup_country_config_names_not_loaded(env_with_country_config):
config = env_with_country_config("""
de:
partition: 3
languages: de
""")
info = country_info._CountryInfo()
info.load(config)
assert dict(info.items()) == {'de': {'partition': 3,
'languages': ['de'],
'names': {}
}}
def test_setup_country_config_special_character(env_with_country_config):
config = env_with_country_config("""
bq:
partition: 250
languages: nl
names:
name:
default: "\\N"
""")
info = country_info._CountryInfo()
info.load(config)
assert dict(info.items()) == {'bq': {'partition': 250,
'languages': ['nl'],
'names': {'name': '\x85'}
}}
| 6,498 | 36.784884 | 88 | py |
Nominatim | Nominatim-master/test/bdd/environment.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
from pathlib import Path
from behave import *
from steps.geometry_factory import GeometryFactory
from steps.nominatim_environment import NominatimEnvironment
TEST_BASE_DIR = Path(__file__) / '..' / '..'
userconfig = {
'BUILDDIR' : (TEST_BASE_DIR / '..' / 'build').resolve(),
'REMOVE_TEMPLATE' : False,
'KEEP_TEST_DB' : False,
'DB_HOST' : None,
'DB_PORT' : None,
'DB_USER' : None,
'DB_PASS' : None,
'TEMPLATE_DB' : 'test_template_nominatim',
'TEST_DB' : 'test_nominatim',
'API_TEST_DB' : 'test_api_nominatim',
'API_TEST_FILE' : (TEST_BASE_DIR / 'testdb' / 'apidb-test-data.pbf').resolve(),
'SERVER_MODULE_PATH' : None,
'TOKENIZER' : None, # Test with a custom tokenizer
'STYLE' : 'extratags',
'API_ENGINE': 'php',
'PHPCOV' : False, # set to output directory to enable code coverage
}
use_step_matcher("re")
def before_all(context):
# logging setup
context.config.setup_logging()
# set up -D options
for k,v in userconfig.items():
context.config.userdata.setdefault(k, v)
# Nominatim test setup
context.nominatim = NominatimEnvironment(context.config.userdata)
context.osm = GeometryFactory()
def before_scenario(context, scenario):
if 'DB' in context.tags:
context.nominatim.setup_db(context)
elif 'APIDB' in context.tags:
context.nominatim.setup_api_db()
elif 'UNKNOWNDB' in context.tags:
context.nominatim.setup_unknown_db()
def after_scenario(context, scenario):
if 'DB' in context.tags:
context.nominatim.teardown_db(context)
def before_tag(context, tag):
if tag == 'fail-legacy':
if context.config.userdata['TOKENIZER'] == 'legacy':
context.scenario.skip("Not implemented in legacy tokenizer")
if tag == 'v1-api-php-only':
if context.config.userdata['API_ENGINE'] != 'php':
context.scenario.skip("Only valid with PHP version of v1 API.")
if tag == 'v1-api-python-only':
if context.config.userdata['API_ENGINE'] == 'php':
context.scenario.skip("Only valid with Python version of v1 API.")
| 2,331 | 31.84507 | 84 | py |
Nominatim | Nominatim-master/test/bdd/steps/nominatim_environment.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
from pathlib import Path
import importlib
import sys
import tempfile
import psycopg2
import psycopg2.extras
sys.path.insert(1, str((Path(__file__) / '..' / '..' / '..' / '..').resolve()))
from nominatim import cli
from nominatim.config import Configuration
from nominatim.db.connection import Connection
from nominatim.tools import refresh
from nominatim.tokenizer import factory as tokenizer_factory
from steps.utils import run_script
class NominatimEnvironment:
""" Collects all functions for the execution of Nominatim functions.
"""
def __init__(self, config):
self.build_dir = Path(config['BUILDDIR']).resolve()
self.src_dir = (Path(__file__) / '..' / '..' / '..' / '..').resolve()
self.db_host = config['DB_HOST']
self.db_port = config['DB_PORT']
self.db_user = config['DB_USER']
self.db_pass = config['DB_PASS']
self.template_db = config['TEMPLATE_DB']
self.test_db = config['TEST_DB']
self.api_test_db = config['API_TEST_DB']
self.api_test_file = config['API_TEST_FILE']
self.tokenizer = config['TOKENIZER']
self.import_style = config['STYLE']
self.server_module_path = config['SERVER_MODULE_PATH']
self.reuse_template = not config['REMOVE_TEMPLATE']
self.keep_scenario_db = config['KEEP_TEST_DB']
self.code_coverage_path = config['PHPCOV']
self.code_coverage_id = 1
self.default_config = Configuration(None).get_os_env()
self.test_env = None
self.template_db_done = False
self.api_db_done = False
self.website_dir = None
self.api_engine = None
if config['API_ENGINE'] != 'php':
if not hasattr(self, f"create_api_request_func_{config['API_ENGINE']}"):
raise RuntimeError(f"Unknown API engine '{config['API_ENGINE']}'")
self.api_engine = getattr(self, f"create_api_request_func_{config['API_ENGINE']}")()
def connect_database(self, dbname):
""" Return a connection to the database with the given name.
Uses configured host, user and port.
"""
dbargs = {'database': dbname}
if self.db_host:
dbargs['host'] = self.db_host
if self.db_port:
dbargs['port'] = self.db_port
if self.db_user:
dbargs['user'] = self.db_user
if self.db_pass:
dbargs['password'] = self.db_pass
conn = psycopg2.connect(connection_factory=Connection, **dbargs)
return conn
def next_code_coverage_file(self):
""" Generate the next name for a coverage file.
"""
fn = Path(self.code_coverage_path) / "{:06d}.cov".format(self.code_coverage_id)
self.code_coverage_id += 1
return fn.resolve()
def write_nominatim_config(self, dbname):
""" Set up a custom test configuration that connects to the given
database. This sets up the environment variables so that they can
be picked up by dotenv and creates a project directory with the
appropriate website scripts.
"""
dsn = 'pgsql:dbname={}'.format(dbname)
if self.db_host:
dsn += ';host=' + self.db_host
if self.db_port:
dsn += ';port=' + self.db_port
if self.db_user:
dsn += ';user=' + self.db_user
if self.db_pass:
dsn += ';password=' + self.db_pass
self.test_env = dict(self.default_config)
self.test_env['NOMINATIM_DATABASE_DSN'] = dsn
self.test_env['NOMINATIM_LANGUAGES'] = 'en,de,fr,ja'
self.test_env['NOMINATIM_FLATNODE_FILE'] = ''
self.test_env['NOMINATIM_IMPORT_STYLE'] = 'full'
self.test_env['NOMINATIM_USE_US_TIGER_DATA'] = 'yes'
self.test_env['NOMINATIM_DATADIR'] = str((self.src_dir / 'data').resolve())
self.test_env['NOMINATIM_SQLDIR'] = str((self.src_dir / 'lib-sql').resolve())
self.test_env['NOMINATIM_CONFIGDIR'] = str((self.src_dir / 'settings').resolve())
self.test_env['NOMINATIM_DATABASE_MODULE_SRC_PATH'] = str((self.build_dir / 'module').resolve())
self.test_env['NOMINATIM_OSM2PGSQL_BINARY'] = str((self.build_dir / 'osm2pgsql' / 'osm2pgsql').resolve())
if self.tokenizer is not None:
self.test_env['NOMINATIM_TOKENIZER'] = self.tokenizer
if self.import_style is not None:
self.test_env['NOMINATIM_IMPORT_STYLE'] = self.import_style
if self.server_module_path:
self.test_env['NOMINATIM_DATABASE_MODULE_PATH'] = self.server_module_path
else:
# avoid module being copied into the temporary environment
self.test_env['NOMINATIM_DATABASE_MODULE_PATH'] = str((self.build_dir / 'module').resolve())
if self.website_dir is not None:
self.website_dir.cleanup()
self.website_dir = tempfile.TemporaryDirectory()
try:
conn = self.connect_database(dbname)
except:
conn = False
refresh.setup_website(Path(self.website_dir.name) / 'website',
self.get_test_config(), conn)
def get_test_config(self):
cfg = Configuration(Path(self.website_dir.name), environ=self.test_env)
cfg.set_libdirs(module=self.build_dir / 'module',
osm2pgsql=self.build_dir / 'osm2pgsql' / 'osm2pgsql')
return cfg
def get_libpq_dsn(self):
dsn = self.test_env['NOMINATIM_DATABASE_DSN']
def quote_param(param):
key, val = param.split('=')
val = val.replace('\\', '\\\\').replace("'", "\\'")
if ' ' in val:
val = "'" + val + "'"
return key + '=' + val
if dsn.startswith('pgsql:'):
# Old PHP DSN format. Convert before returning.
return ' '.join([quote_param(p) for p in dsn[6:].split(';')])
return dsn
def db_drop_database(self, name):
""" Drop the database with the given name.
"""
conn = self.connect_database('postgres')
conn.set_isolation_level(0)
cur = conn.cursor()
cur.execute('DROP DATABASE IF EXISTS {}'.format(name))
conn.close()
def setup_template_db(self):
""" Setup a template database that already contains common test data.
Having a template database speeds up tests considerably but at
the price that the tests sometimes run with stale data.
"""
if self.template_db_done:
return
self.template_db_done = True
self.write_nominatim_config(self.template_db)
if not self._reuse_or_drop_db(self.template_db):
try:
# execute nominatim import on an empty file to get the right tables
with tempfile.NamedTemporaryFile(dir='/tmp', suffix='.xml') as fd:
fd.write(b'<osm version="0.6"></osm>')
fd.flush()
self.run_nominatim('import', '--osm-file', fd.name,
'--osm2pgsql-cache', '1',
'--ignore-errors',
'--offline', '--index-noanalyse')
except:
self.db_drop_database(self.template_db)
raise
self.run_nominatim('refresh', '--functions')
def setup_api_db(self):
""" Setup a test against the API test database.
"""
self.write_nominatim_config(self.api_test_db)
if not self.api_db_done:
self.api_db_done = True
if not self._reuse_or_drop_db(self.api_test_db):
testdata = (Path(__file__) / '..' / '..' / '..' / 'testdb').resolve()
self.test_env['NOMINATIM_WIKIPEDIA_DATA_PATH'] = str(testdata)
simp_file = Path(self.website_dir.name) / 'secondary_importance.sql.gz'
simp_file.symlink_to(testdata / 'secondary_importance.sql.gz')
try:
self.run_nominatim('import', '--osm-file', str(self.api_test_file))
self.run_nominatim('add-data', '--tiger-data', str(testdata / 'tiger'))
self.run_nominatim('freeze')
if self.tokenizer == 'legacy':
phrase_file = str(testdata / 'specialphrases_testdb.sql')
run_script(['psql', '-d', self.api_test_db, '-f', phrase_file])
else:
csv_path = str(testdata / 'full_en_phrases_test.csv')
self.run_nominatim('special-phrases', '--import-from-csv', csv_path)
except:
self.db_drop_database(self.api_test_db)
raise
tokenizer_factory.get_tokenizer_for_db(self.get_test_config())
def setup_unknown_db(self):
""" Setup a test against a non-existing database.
"""
# The tokenizer needs an existing database to function.
# So start with the usual database
class _Context:
db = None
context = _Context()
self.setup_db(context)
tokenizer_factory.create_tokenizer(self.get_test_config(), init_db=False)
# Then drop the DB again
self.teardown_db(context, force_drop=True)
def setup_db(self, context):
""" Setup a test against a fresh, empty test database.
"""
self.setup_template_db()
conn = self.connect_database(self.template_db)
conn.set_isolation_level(0)
cur = conn.cursor()
cur.execute('DROP DATABASE IF EXISTS {}'.format(self.test_db))
cur.execute('CREATE DATABASE {} TEMPLATE = {}'.format(self.test_db, self.template_db))
conn.close()
self.write_nominatim_config(self.test_db)
context.db = self.connect_database(self.test_db)
context.db.autocommit = True
psycopg2.extras.register_hstore(context.db, globally=False)
def teardown_db(self, context, force_drop=False):
""" Remove the test database, if it exists.
"""
if hasattr(context, 'db'):
context.db.close()
if force_drop or not self.keep_scenario_db:
self.db_drop_database(self.test_db)
def _reuse_or_drop_db(self, name):
""" Check for the existance of the given DB. If reuse is enabled,
then the function checks for existance and returns True if the
database is already there. Otherwise an existing database is
dropped and always false returned.
"""
if self.reuse_template:
conn = self.connect_database('postgres')
with conn.cursor() as cur:
cur.execute('select count(*) from pg_database where datname = %s',
(name,))
if cur.fetchone()[0] == 1:
return True
conn.close()
else:
self.db_drop_database(name)
return False
def reindex_placex(self, db):
""" Run the indexing step until all data in the placex has
been processed. Indexing during updates can produce more data
to index under some circumstances. That is why indexing may have
to be run multiple times.
"""
with db.cursor() as cur:
while True:
self.run_nominatim('index')
cur.execute("SELECT 'a' FROM placex WHERE indexed_status != 0 LIMIT 1")
if cur.rowcount == 0:
return
def run_nominatim(self, *cmdline):
""" Run the nominatim command-line tool via the library.
"""
if self.website_dir is not None:
cmdline = list(cmdline) + ['--project-dir', self.website_dir.name]
cli.nominatim(module_dir='',
osm2pgsql_path=str(self.build_dir / 'osm2pgsql' / 'osm2pgsql'),
cli_args=cmdline,
phpcgi_path='',
environ=self.test_env)
def copy_from_place(self, db):
""" Copy data from place to the placex and location_property_osmline
tables invoking the appropriate triggers.
"""
self.run_nominatim('refresh', '--functions', '--no-diff-updates')
with db.cursor() as cur:
cur.execute("""INSERT INTO placex (osm_type, osm_id, class, type,
name, admin_level, address,
extratags, geometry)
SELECT osm_type, osm_id, class, type,
name, admin_level, address,
extratags, geometry
FROM place
WHERE not (class='place' and type='houses' and osm_type='W')""")
cur.execute("""INSERT INTO location_property_osmline (osm_id, address, linegeo)
SELECT osm_id, address, geometry
FROM place
WHERE class='place' and type='houses'
and osm_type='W'
and ST_GeometryType(geometry) = 'ST_LineString'""")
def create_api_request_func_starlette(self):
import nominatim.server.starlette.server
from asgi_lifespan import LifespanManager
import httpx
async def _request(endpoint, params, project_dir, environ, http_headers):
app = nominatim.server.starlette.server.get_application(project_dir, environ)
async with LifespanManager(app):
async with httpx.AsyncClient(app=app, base_url="http://nominatim.test") as client:
response = await client.get(f"/{endpoint}", params=params,
headers=http_headers)
return response.text, response.status_code
return _request
def create_api_request_func_falcon(self):
import nominatim.server.falcon.server
import falcon.testing
async def _request(endpoint, params, project_dir, environ, http_headers):
app = nominatim.server.falcon.server.get_application(project_dir, environ)
async with falcon.testing.ASGIConductor(app) as conductor:
response = await conductor.get(f"/{endpoint}", params=params,
headers=http_headers)
return response.text, response.status_code
return _request
| 14,891 | 39.248649 | 113 | py |
Nominatim | Nominatim-master/test/bdd/steps/steps_api_queries.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
""" Steps that run queries against the API.
Queries may either be run directly via PHP using the query script
or via the HTTP interface using php-cgi.
"""
from pathlib import Path
import json
import os
import re
import logging
import asyncio
import xml.etree.ElementTree as ET
from urllib.parse import urlencode
from utils import run_script
from http_responses import GenericResponse, SearchResponse, ReverseResponse, StatusResponse
from check_functions import Bbox, check_for_attributes
from table_compare import NominatimID
LOG = logging.getLogger(__name__)
BASE_SERVER_ENV = {
'HTTP_HOST' : 'localhost',
'HTTP_USER_AGENT' : 'Mozilla/5.0 (X11; Linux x86_64; rv:51.0) Gecko/20100101 Firefox/51.0',
'HTTP_ACCEPT' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'HTTP_ACCEPT_ENCODING' : 'gzip, deflate',
'HTTP_CONNECTION' : 'keep-alive',
'SERVER_SIGNATURE' : '<address>Nominatim BDD Tests</address>',
'SERVER_SOFTWARE' : 'Nominatim test',
'SERVER_NAME' : 'localhost',
'SERVER_ADDR' : '127.0.1.1',
'SERVER_PORT' : '80',
'REMOTE_ADDR' : '127.0.0.1',
'DOCUMENT_ROOT' : '/var/www',
'REQUEST_SCHEME' : 'http',
'CONTEXT_PREFIX' : '/',
'SERVER_ADMIN' : 'webmaster@localhost',
'REMOTE_PORT' : '49319',
'GATEWAY_INTERFACE' : 'CGI/1.1',
'SERVER_PROTOCOL' : 'HTTP/1.1',
'REQUEST_METHOD' : 'GET',
'REDIRECT_STATUS' : 'CGI'
}
def make_todo_list(context, result_id):
if result_id is None:
context.execute_steps("then at least 1 result is returned")
return range(len(context.response.result))
context.execute_steps(f"then more than {result_id}results are returned")
return (int(result_id.strip()), )
def compare(operator, op1, op2):
if operator == 'less than':
return op1 < op2
elif operator == 'more than':
return op1 > op2
elif operator == 'exactly':
return op1 == op2
elif operator == 'at least':
return op1 >= op2
elif operator == 'at most':
return op1 <= op2
else:
raise ValueError(f"Unknown operator '{operator}'")
def send_api_query(endpoint, params, fmt, context):
if fmt is not None:
if fmt.strip() == 'debug':
params['debug'] = '1'
else:
params['format'] = fmt.strip()
if context.table:
if context.table.headings[0] == 'param':
for line in context.table:
params[line['param']] = line['value']
else:
for h in context.table.headings:
params[h] = context.table[0][h]
if context.nominatim.api_engine is None:
return send_api_query_php(endpoint, params, context)
return asyncio.run(context.nominatim.api_engine(endpoint, params,
Path(context.nominatim.website_dir.name),
context.nominatim.test_env,
getattr(context, 'http_headers', {})))
def send_api_query_php(endpoint, params, context):
env = dict(BASE_SERVER_ENV)
env['QUERY_STRING'] = urlencode(params)
env['SCRIPT_NAME'] = f'/{endpoint}.php'
env['REQUEST_URI'] = f"{env['SCRIPT_NAME']}?{env['QUERY_STRING']}"
env['CONTEXT_DOCUMENT_ROOT'] = os.path.join(context.nominatim.website_dir.name, 'website')
env['SCRIPT_FILENAME'] = os.path.join(env['CONTEXT_DOCUMENT_ROOT'],
f'{endpoint}.php')
LOG.debug("Environment:" + json.dumps(env, sort_keys=True, indent=2))
if hasattr(context, 'http_headers'):
for k, v in context.http_headers.items():
env['HTTP_' + k.upper().replace('-', '_')] = v
cmd = ['/usr/bin/env', 'php-cgi', '-f']
if context.nominatim.code_coverage_path:
env['XDEBUG_MODE'] = 'coverage'
env['COV_SCRIPT_FILENAME'] = env['SCRIPT_FILENAME']
env['COV_PHP_DIR'] = context.nominatim.src_dir
env['COV_TEST_NAME'] = f"{context.scenario.filename}:{context.scenario.line}"
env['SCRIPT_FILENAME'] = \
os.path.join(os.path.split(__file__)[0], 'cgi-with-coverage.php')
cmd.append(env['SCRIPT_FILENAME'])
env['PHP_CODE_COVERAGE_FILE'] = context.nominatim.next_code_coverage_file()
else:
cmd.append(env['SCRIPT_FILENAME'])
for k,v in params.items():
cmd.append(f"{k}={v}")
outp, err = run_script(cmd, cwd=context.nominatim.website_dir.name, env=env)
assert len(err) == 0, f"Unexpected PHP error: {err}"
if outp.startswith('Status: '):
status = int(outp[8:11])
else:
status = 200
content_start = outp.find('\r\n\r\n')
return outp[content_start + 4:], status
@given(u'the HTTP header')
def add_http_header(context):
if not hasattr(context, 'http_headers'):
context.http_headers = {}
for h in context.table.headings:
context.http_headers[h] = context.table[0][h]
@when(u'sending (?P<fmt>\S+ )?search query "(?P<query>.*)"(?P<addr> with address)?')
def website_search_request(context, fmt, query, addr):
params = {}
if query:
params['q'] = query
if addr is not None:
params['addressdetails'] = '1'
outp, status = send_api_query('search', params, fmt, context)
context.response = SearchResponse(outp, fmt or 'json', status)
@when('sending v1/reverse at (?P<lat>[\d.-]*),(?P<lon>[\d.-]*)(?: with format (?P<fmt>.+))?')
def api_endpoint_v1_reverse(context, lat, lon, fmt):
params = {}
if lat is not None:
params['lat'] = lat
if lon is not None:
params['lon'] = lon
if fmt is None:
fmt = 'jsonv2'
elif fmt == "''":
fmt = None
outp, status = send_api_query('reverse', params, fmt, context)
context.response = ReverseResponse(outp, fmt or 'xml', status)
@when('sending v1/reverse N(?P<nodeid>\d+)(?: with format (?P<fmt>.+))?')
def api_endpoint_v1_reverse_from_node(context, nodeid, fmt):
params = {}
params['lon'], params['lat'] = (f'{c:f}' for c in context.osm.grid_node(int(nodeid)))
outp, status = send_api_query('reverse', params, fmt, context)
context.response = ReverseResponse(outp, fmt or 'xml', status)
@when(u'sending (?P<fmt>\S+ )?details query for (?P<query>.*)')
def website_details_request(context, fmt, query):
params = {}
if query[0] in 'NWR':
nid = NominatimID(query)
params['osmtype'] = nid.typ
params['osmid'] = nid.oid
if nid.cls:
params['class'] = nid.cls
else:
params['place_id'] = query
outp, status = send_api_query('details', params, fmt, context)
context.response = GenericResponse(outp, fmt or 'json', status)
@when(u'sending (?P<fmt>\S+ )?lookup query for (?P<query>.*)')
def website_lookup_request(context, fmt, query):
params = { 'osm_ids' : query }
outp, status = send_api_query('lookup', params, fmt, context)
context.response = SearchResponse(outp, fmt or 'xml', status)
@when(u'sending (?P<fmt>\S+ )?status query')
def website_status_request(context, fmt):
params = {}
outp, status = send_api_query('status', params, fmt, context)
context.response = StatusResponse(outp, fmt or 'text', status)
@step(u'(?P<operator>less than|more than|exactly|at least|at most) (?P<number>\d+) results? (?:is|are) returned')
def validate_result_number(context, operator, number):
context.execute_steps("Then a HTTP 200 is returned")
numres = len(context.response.result)
assert compare(operator, numres, int(number)), \
f"Bad number of results: expected {operator} {number}, got {numres}."
@then(u'a HTTP (?P<status>\d+) is returned')
def check_http_return_status(context, status):
assert context.response.errorcode == int(status), \
f"Return HTTP status is {context.response.errorcode}."\
f" Full response:\n{context.response.page}"
@then(u'the page contents equals "(?P<text>.+)"')
def check_page_content_equals(context, text):
assert context.response.page == text
@then(u'the result is valid (?P<fmt>\w+)')
def step_impl(context, fmt):
context.execute_steps("Then a HTTP 200 is returned")
if fmt.strip() == 'html':
try:
tree = ET.fromstring(context.response.page)
except Exception as ex:
assert False, f"Could not parse page:\n{context.response.page}"
assert tree.tag == 'html'
body = tree.find('./body')
assert body is not None
assert body.find('.//script') is None
else:
assert context.response.format == fmt
@then(u'a (?P<fmt>\w+) user error is returned')
def check_page_error(context, fmt):
context.execute_steps("Then a HTTP 400 is returned")
assert context.response.format == fmt
if fmt == 'xml':
assert re.search(r'<error>.+</error>', context.response.page, re.DOTALL) is not None
else:
assert re.search(r'({"error":)', context.response.page, re.DOTALL) is not None
@then(u'result header contains')
def check_header_attr(context):
context.execute_steps("Then a HTTP 200 is returned")
for line in context.table:
assert line['attr'] in context.response.header, \
f"Field '{line['attr']}' missing in header. Full header:\n{context.response.header}"
value = context.response.header[line['attr']]
assert re.fullmatch(line['value'], value) is not None, \
f"Attribute '{line['attr']}': expected: '{line['value']}', got '{value}'"
@then(u'result header has (?P<neg>not )?attributes (?P<attrs>.*)')
def check_header_no_attr(context, neg, attrs):
check_for_attributes(context.response.header, attrs,
'absent' if neg else 'present')
@then(u'results contain(?: in field (?P<field>.*))?')
def step_impl(context, field):
context.execute_steps("then at least 1 result is returned")
for line in context.table:
context.response.match_row(line, context=context, field=field)
@then(u'result (?P<lid>\d+ )?has (?P<neg>not )?attributes (?P<attrs>.*)')
def validate_attributes(context, lid, neg, attrs):
for i in make_todo_list(context, lid):
check_for_attributes(context.response.result[i], attrs,
'absent' if neg else 'present')
@then(u'result addresses contain')
def step_impl(context):
context.execute_steps("then at least 1 result is returned")
for line in context.table:
idx = int(line['ID']) if 'ID' in line.headings else None
for name, value in zip(line.headings, line.cells):
if name != 'ID':
context.response.assert_address_field(idx, name, value)
@then(u'address of result (?P<lid>\d+) has(?P<neg> no)? types (?P<attrs>.*)')
def check_address(context, lid, neg, attrs):
context.execute_steps(f"then more than {lid} results are returned")
addr_parts = context.response.result[int(lid)]['address']
for attr in attrs.split(','):
if neg:
assert attr not in addr_parts
else:
assert attr in addr_parts
@then(u'address of result (?P<lid>\d+) (?P<complete>is|contains)')
def check_address(context, lid, complete):
context.execute_steps(f"then more than {lid} results are returned")
lid = int(lid)
addr_parts = dict(context.response.result[lid]['address'])
for line in context.table:
context.response.assert_address_field(lid, line['type'], line['value'])
del addr_parts[line['type']]
if complete == 'is':
assert len(addr_parts) == 0, f"Additional address parts found: {addr_parts!s}"
@then(u'result (?P<lid>\d+ )?has bounding box in (?P<coords>[\d,.-]+)')
def check_bounding_box_in_area(context, lid, coords):
expected = Bbox(coords)
for idx in make_todo_list(context, lid):
res = context.response.result[idx]
check_for_attributes(res, 'boundingbox')
context.response.check_row(idx, res['boundingbox'] in expected,
f"Bbox is not contained in {expected}")
@then(u'result (?P<lid>\d+ )?has centroid in (?P<coords>[\d,.-]+)')
def check_centroid_in_area(context, lid, coords):
expected = Bbox(coords)
for idx in make_todo_list(context, lid):
res = context.response.result[idx]
check_for_attributes(res, 'lat,lon')
context.response.check_row(idx, (res['lon'], res['lat']) in expected,
f"Centroid is not inside {expected}")
@then(u'there are(?P<neg> no)? duplicates')
def check_for_duplicates(context, neg):
context.execute_steps("then at least 1 result is returned")
resarr = set()
has_dupe = False
for res in context.response.result:
dup = (res['osm_type'], res['class'], res['type'], res['display_name'])
if dup in resarr:
has_dupe = True
break
resarr.add(dup)
if neg:
assert not has_dupe, f"Found duplicate for {dup}"
else:
assert has_dupe, "No duplicates found"
| 13,257 | 34.167109 | 113 | py |
Nominatim | Nominatim-master/test/bdd/steps/check_functions.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Collection of assertion functions used for the steps.
"""
import json
import math
import re
class Almost:
""" Compares a float value with a certain jitter.
"""
def __init__(self, value, offset=0.00001):
self.value = value
self.offset = offset
def __eq__(self, other):
return abs(other - self.value) < self.offset
OSM_TYPE = {'N' : 'node', 'W' : 'way', 'R' : 'relation',
'n' : 'node', 'w' : 'way', 'r' : 'relation',
'node' : 'n', 'way' : 'w', 'relation' : 'r'}
class OsmType:
""" Compares an OSM type, accepting both N/R/W and node/way/relation.
"""
def __init__(self, value):
self.value = value
def __eq__(self, other):
return other == self.value or other == OSM_TYPE[self.value]
def __str__(self):
return f"{self.value} or {OSM_TYPE[self.value]}"
class Field:
""" Generic comparator for fields, which looks at the type of the
value compared.
"""
def __init__(self, value, **extra_args):
self.value = value
self.extra_args = extra_args
def __eq__(self, other):
if isinstance(self.value, float):
return math.isclose(self.value, float(other), **self.extra_args)
if self.value.startswith('^'):
return re.fullmatch(self.value, str(other))
if isinstance(other, dict):
return other == eval('{' + self.value + '}')
return str(self.value) == str(other)
def __str__(self):
return str(self.value)
class Bbox:
""" Comparator for bounding boxes.
"""
def __init__(self, bbox_string):
self.coord = [float(x) for x in bbox_string.split(',')]
def __contains__(self, item):
if isinstance(item, str):
item = item.split(',')
item = list(map(float, item))
if len(item) == 2:
return self.coord[0] <= item[0] <= self.coord[2] \
and self.coord[1] <= item[1] <= self.coord[3]
if len(item) == 4:
return item[0] >= self.coord[0] and item[1] <= self.coord[1] \
and item[2] >= self.coord[2] and item[3] <= self.coord[3]
raise ValueError("Not a coordinate or bbox.")
def __str__(self):
return str(self.coord)
def check_for_attributes(obj, attrs, presence='present'):
""" Check that the object has the given attributes. 'attrs' is a
string with a comma-separated list of attributes. If 'presence'
is set to 'absent' then the function checks that the attributes do
not exist for the object
"""
def _dump_json():
return json.dumps(obj, sort_keys=True, indent=2, ensure_ascii=False)
for attr in attrs.split(','):
attr = attr.strip()
if presence == 'absent':
assert attr not in obj, \
f"Unexpected attribute {attr}. Full response:\n{_dump_json()}"
else:
assert attr in obj, \
f"No attribute '{attr}'. Full response:\n{_dump_json()}"
| 3,252 | 27.535088 | 81 | py |
Nominatim | Nominatim-master/test/bdd/steps/place_inserter.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper classes for filling the place table.
"""
import random
import string
class PlaceColumn:
""" Helper class to collect contents from a behave table row and
insert it into the place table.
"""
def __init__(self, context):
self.columns = {'admin_level' : 15}
self.context = context
self.geometry = None
def add_row(self, row, force_name):
""" Parse the content from the given behave row as place column data.
"""
for name, value in zip(row.headings, row.cells):
self._add(name, value)
assert 'osm_type' in self.columns, "osm column missing"
if force_name and 'name' not in self.columns:
self._add_hstore('name', 'name',
''.join(random.choice(string.printable)
for _ in range(int(random.random()*30))))
return self
def _add(self, key, value):
if hasattr(self, '_set_key_' + key):
getattr(self, '_set_key_' + key)(value)
elif key.startswith('name+'):
self._add_hstore('name', key[5:], value)
elif key.startswith('extra+'):
self._add_hstore('extratags', key[6:], value)
elif key.startswith('addr+'):
self._add_hstore('address', key[5:], value)
elif key in ('name', 'address', 'extratags'):
self.columns[key] = eval('{' + value + '}')
else:
assert key in ('class', 'type'), "Unknown column '{}'.".format(key)
self.columns[key] = None if value == '' else value
def _set_key_name(self, value):
self._add_hstore('name', 'name', value)
def _set_key_osm(self, value):
assert value[0] in 'NRW' and value[1:].isdigit(), \
"OSM id needs to be of format <NRW><id>."
self.columns['osm_type'] = value[0]
self.columns['osm_id'] = int(value[1:])
def _set_key_admin(self, value):
self.columns['admin_level'] = int(value)
def _set_key_housenr(self, value):
if value:
self._add_hstore('address', 'housenumber', value)
def _set_key_postcode(self, value):
if value:
self._add_hstore('address', 'postcode', value)
def _set_key_street(self, value):
if value:
self._add_hstore('address', 'street', value)
def _set_key_addr_place(self, value):
if value:
self._add_hstore('address', 'place', value)
def _set_key_country(self, value):
if value:
self._add_hstore('address', 'country', value)
def _set_key_geometry(self, value):
self.geometry = self.context.osm.parse_geometry(value)
assert self.geometry is not None, "Bad geometry: {}".format(value)
def _add_hstore(self, column, key, value):
if column in self.columns:
self.columns[column][key] = value
else:
self.columns[column] = {key: value}
def db_delete(self, cursor):
""" Issue a delete for the given OSM object.
"""
cursor.execute('DELETE FROM place WHERE osm_type = %s and osm_id = %s',
(self.columns['osm_type'] , self.columns['osm_id']))
def db_insert(self, cursor):
""" Insert the collected data into the database.
"""
if self.columns['osm_type'] == 'N' and self.geometry is None:
pt = self.context.osm.grid_node(self.columns['osm_id'])
if pt is None:
pt = (random.random()*360 - 180, random.random()*180 - 90)
self.geometry = "ST_SetSRID(ST_Point(%f, %f), 4326)" % pt
else:
assert self.geometry is not None, "Geometry missing"
query = 'INSERT INTO place ({}, geometry) values({}, {})'.format(
','.join(self.columns.keys()),
','.join(['%s' for x in range(len(self.columns))]),
self.geometry)
cursor.execute(query, list(self.columns.values()))
| 4,186 | 34.483051 | 79 | py |
Nominatim | Nominatim-master/test/bdd/steps/utils.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Various smaller helps for step execution.
"""
import logging
import subprocess
LOG = logging.getLogger(__name__)
def run_script(cmd, **kwargs):
""" Run the given command, check that it is successful and output
when necessary.
"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**kwargs)
(outp, outerr) = proc.communicate()
outp = outp.decode('utf-8')
outerr = outerr.decode('utf-8').replace('\\n', '\n')
LOG.debug("Run command: %s\n%s\n%s", cmd, outp, outerr)
assert proc.returncode == 0, "Script '{}' failed:\n{}\n{}\n".format(cmd[0], outp, outerr)
return outp, outerr
| 887 | 29.62069 | 93 | py |
Nominatim | Nominatim-master/test/bdd/steps/geometry_factory.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
from pathlib import Path
import os
from steps.geometry_alias import ALIASES
class GeometryFactory:
""" Provides functions to create geometries from coordinates and data grids.
"""
def __init__(self):
self.grid = {}
def parse_geometry(self, geom):
""" Create a WKT SQL term for the given geometry.
The function understands the following formats:
country:<country code>
Point geoemtry guaranteed to be in the given country
<P>
Point geometry
<P>,...,<P>
Line geometry
(<P>,...,<P>)
Polygon geometry
<P> may either be a coordinate of the form '<x> <y>' or a single
number. In the latter case it must refer to a point in
a previously defined grid.
"""
if geom.startswith('country:'):
ccode = geom[8:].upper()
assert ccode in ALIASES, "Geometry error: unknown country " + ccode
return "ST_SetSRID('POINT({} {})'::geometry, 4326)".format(*ALIASES[ccode])
if geom.find(',') < 0:
out = "POINT({})".format(self.mk_wkt_point(geom))
elif geom.find('(') < 0:
out = "LINESTRING({})".format(self.mk_wkt_points(geom))
else:
out = "POLYGON(({}))".format(self.mk_wkt_points(geom.strip('() ')))
return "ST_SetSRID('{}'::geometry, 4326)".format(out)
def mk_wkt_point(self, point):
""" Parse a point description.
The point may either consist of 'x y' cooordinates or a number
that refers to a grid setup.
"""
geom = point.strip()
if geom.find(' ') >= 0:
return geom
try:
pt = self.grid_node(int(geom))
except ValueError:
assert False, "Scenario error: Point '{}' is not a number".format(geom)
assert pt is not None, "Scenario error: Point '{}' not found in grid".format(geom)
return "{} {}".format(*pt)
def mk_wkt_points(self, geom):
""" Parse a list of points.
The list must be a comma-separated list of points. Points
in coordinate and grid format may be mixed.
"""
return ','.join([self.mk_wkt_point(x) for x in geom.split(',')])
def set_grid(self, lines, grid_step, origin=(0.0, 0.0)):
""" Replace the grid with one from the given lines.
"""
self.grid = {}
y = origin[1]
for line in lines:
x = origin[0]
for pt_id in line:
if pt_id.isdigit():
self.grid[int(pt_id)] = (x, y)
x += grid_step
y += grid_step
def grid_node(self, nodeid):
""" Get the coordinates for the given grid node.
"""
return self.grid.get(nodeid)
| 3,085 | 31.484211 | 90 | py |
Nominatim | Nominatim-master/test/bdd/steps/steps_db_ops.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
import logging
from itertools import chain
import psycopg2.extras
from place_inserter import PlaceColumn
from table_compare import NominatimID, DBRow
from nominatim.indexer import indexer
from nominatim.tokenizer import factory as tokenizer_factory
def check_database_integrity(context):
""" Check some generic constraints on the tables.
"""
with context.db.cursor() as cur:
# place_addressline should not have duplicate (place_id, address_place_id)
cur.execute("""SELECT count(*) FROM
(SELECT place_id, address_place_id, count(*) as c
FROM place_addressline GROUP BY place_id, address_place_id) x
WHERE c > 1""")
assert cur.fetchone()[0] == 0, "Duplicates found in place_addressline"
# word table must not have empty word_tokens
if context.nominatim.tokenizer != 'legacy':
cur.execute("SELECT count(*) FROM word WHERE word_token = ''")
assert cur.fetchone()[0] == 0, "Empty word tokens found in word table"
################################ GIVEN ##################################
@given("the (?P<named>named )?places")
def add_data_to_place_table(context, named):
""" Add entries into the place table. 'named places' makes sure that
the entries get a random name when none is explicitly given.
"""
with context.db.cursor() as cur:
cur.execute('ALTER TABLE place DISABLE TRIGGER place_before_insert')
for row in context.table:
PlaceColumn(context).add_row(row, named is not None).db_insert(cur)
cur.execute('ALTER TABLE place ENABLE TRIGGER place_before_insert')
@given("the relations")
def add_data_to_planet_relations(context):
""" Add entries into the osm2pgsql relation middle table. This is needed
for tests on data that looks up members.
"""
with context.db.cursor() as cur:
for r in context.table:
last_node = 0
last_way = 0
parts = []
if r['members']:
members = []
for m in r['members'].split(','):
mid = NominatimID(m)
if mid.typ == 'N':
parts.insert(last_node, int(mid.oid))
last_node += 1
last_way += 1
elif mid.typ == 'W':
parts.insert(last_way, int(mid.oid))
last_way += 1
else:
parts.append(int(mid.oid))
members.extend((mid.typ.lower() + mid.oid, mid.cls or ''))
else:
members = None
tags = chain.from_iterable([(h[5:], r[h]) for h in r.headings if h.startswith("tags+")])
cur.execute("""INSERT INTO planet_osm_rels (id, way_off, rel_off, parts, members, tags)
VALUES (%s, %s, %s, %s, %s, %s)""",
(r['id'], last_node, last_way, parts, members, list(tags)))
@given("the ways")
def add_data_to_planet_ways(context):
""" Add entries into the osm2pgsql way middle table. This is necessary for
tests on that that looks up node ids in this table.
"""
with context.db.cursor() as cur:
for r in context.table:
tags = chain.from_iterable([(h[5:], r[h]) for h in r.headings if h.startswith("tags+")])
nodes = [ int(x.strip()) for x in r['nodes'].split(',') ]
cur.execute("INSERT INTO planet_osm_ways (id, nodes, tags) VALUES (%s, %s, %s)",
(r['id'], nodes, list(tags)))
################################ WHEN ##################################
@when("importing")
def import_and_index_data_from_place_table(context):
""" Import data previously set up in the place table.
"""
context.nominatim.run_nominatim('import', '--continue', 'load-data',
'--index-noanalyse', '-q',
'--offline')
check_database_integrity(context)
# Remove the output of the input, when all was right. Otherwise it will be
# output when there are errors that had nothing to do with the import
# itself.
context.log_capture.buffer.clear()
@when("updating places")
def update_place_table(context):
""" Update the place table with the given data. Also runs all triggers
related to updates and reindexes the new data.
"""
context.nominatim.run_nominatim('refresh', '--functions')
with context.db.cursor() as cur:
for row in context.table:
col = PlaceColumn(context).add_row(row, False)
col.db_delete(cur)
col.db_insert(cur)
cur.execute('SELECT flush_deleted_places()')
context.nominatim.reindex_placex(context.db)
check_database_integrity(context)
# Remove the output of the input, when all was right. Otherwise it will be
# output when there are errors that had nothing to do with the import
# itself.
context.log_capture.buffer.clear()
@when("updating postcodes")
def update_postcodes(context):
""" Rerun the calculation of postcodes.
"""
context.nominatim.run_nominatim('refresh', '--postcodes')
@when("marking for delete (?P<oids>.*)")
def delete_places(context, oids):
""" Remove entries from the place table. Multiple ids may be given
separated by commas. Also runs all triggers
related to updates and reindexes the new data.
"""
context.nominatim.run_nominatim('refresh', '--functions')
with context.db.cursor() as cur:
cur.execute('TRUNCATE place_to_be_deleted')
for oid in oids.split(','):
NominatimID(oid).query_osm_id(cur, 'DELETE FROM place WHERE {}')
cur.execute('SELECT flush_deleted_places()')
context.nominatim.reindex_placex(context.db)
# Remove the output of the input, when all was right. Otherwise it will be
# output when there are errors that had nothing to do with the import
# itself.
context.log_capture.buffer.clear()
################################ THEN ##################################
@then("(?P<table>placex|place) contains(?P<exact> exactly)?")
def check_place_contents(context, table, exact):
""" Check contents of place/placex tables. Each row represents a table row
and all data must match. Data not present in the expected table, may
be arbitry. The rows are identified via the 'object' column which must
have an identifier of the form '<NRW><osm id>[:<class>]'. When multiple
rows match (for example because 'class' was left out and there are
multiple entries for the given OSM object) then all must match. All
expected rows are expected to be present with at least one database row.
When 'exactly' is given, there must not be additional rows in the database.
"""
with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
expected_content = set()
for row in context.table:
nid = NominatimID(row['object'])
query = 'SELECT *, ST_AsText(geometry) as geomtxt, ST_GeometryType(geometry) as geometrytype'
if table == 'placex':
query += ' ,ST_X(centroid) as cx, ST_Y(centroid) as cy'
query += " FROM %s WHERE {}" % (table, )
nid.query_osm_id(cur, query)
assert cur.rowcount > 0, "No rows found for " + row['object']
for res in cur:
if exact:
expected_content.add((res['osm_type'], res['osm_id'], res['class']))
DBRow(nid, res, context).assert_row(row, ['object'])
if exact:
cur.execute('SELECT osm_type, osm_id, class from {}'.format(table))
actual = set([(r[0], r[1], r[2]) for r in cur])
assert expected_content == actual, \
f"Missing entries: {expected_content - actual}\n" \
f"Not expected in table: {actual - expected_content}"
@then("(?P<table>placex|place) has no entry for (?P<oid>.*)")
def check_place_has_entry(context, table, oid):
""" Ensure that no database row for the given object exists. The ID
must be of the form '<NRW><osm id>[:<class>]'.
"""
with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
NominatimID(oid).query_osm_id(cur, "SELECT * FROM %s where {}" % table)
assert cur.rowcount == 0, \
"Found {} entries for ID {}".format(cur.rowcount, oid)
@then("search_name contains(?P<exclude> not)?")
def check_search_name_contents(context, exclude):
""" Check contents of place/placex tables. Each row represents a table row
and all data must match. Data not present in the expected table, may
be arbitry. The rows are identified via the 'object' column which must
have an identifier of the form '<NRW><osm id>[:<class>]'. All
expected rows are expected to be present with at least one database row.
"""
tokenizer = tokenizer_factory.get_tokenizer_for_db(context.nominatim.get_test_config())
with tokenizer.name_analyzer() as analyzer:
with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
for row in context.table:
nid = NominatimID(row['object'])
nid.row_by_place_id(cur, 'search_name',
['ST_X(centroid) as cx', 'ST_Y(centroid) as cy'])
assert cur.rowcount > 0, "No rows found for " + row['object']
for res in cur:
db_row = DBRow(nid, res, context)
for name, value in zip(row.headings, row.cells):
if name in ('name_vector', 'nameaddress_vector'):
items = [x.strip() for x in value.split(',')]
tokens = analyzer.get_word_token_info(items)
if not exclude:
assert len(tokens) >= len(items), \
"No word entry found for {}. Entries found: {!s}".format(value, len(tokens))
for word, token, wid in tokens:
if exclude:
assert wid not in res[name], \
"Found term for {}/{}: {}".format(nid, name, wid)
else:
assert wid in res[name], \
"Missing term for {}/{}: {}".format(nid, name, wid)
elif name != 'object':
assert db_row.contains(name, value), db_row.assert_msg(name, value)
@then("search_name has no entry for (?P<oid>.*)")
def check_search_name_has_entry(context, oid):
""" Check that there is noentry in the search_name table for the given
objects. IDs are in format '<NRW><osm id>[:<class>]'.
"""
with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
NominatimID(oid).row_by_place_id(cur, 'search_name')
assert cur.rowcount == 0, \
"Found {} entries for ID {}".format(cur.rowcount, oid)
@then("location_postcode contains exactly")
def check_location_postcode(context):
""" Check full contents for location_postcode table. Each row represents a table row
and all data must match. Data not present in the expected table, may
be arbitry. The rows are identified via 'country' and 'postcode' columns.
All rows must be present as excepted and there must not be additional
rows.
"""
with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
cur.execute("SELECT *, ST_AsText(geometry) as geomtxt FROM location_postcode")
assert cur.rowcount == len(list(context.table)), \
"Postcode table has {} rows, expected {}.".format(cur.rowcount, len(list(context.table)))
results = {}
for row in cur:
key = (row['country_code'], row['postcode'])
assert key not in results, "Postcode table has duplicate entry: {}".format(row)
results[key] = DBRow((row['country_code'],row['postcode']), row, context)
for row in context.table:
db_row = results.get((row['country'],row['postcode']))
assert db_row is not None, \
f"Missing row for country '{row['country']}' postcode '{row['postcode']}'."
db_row.assert_row(row, ('country', 'postcode'))
@then("there are(?P<exclude> no)? word tokens for postcodes (?P<postcodes>.*)")
def check_word_table_for_postcodes(context, exclude, postcodes):
""" Check that the tokenizer produces postcode tokens for the given
postcodes. The postcodes are a comma-separated list of postcodes.
Whitespace matters.
"""
nctx = context.nominatim
tokenizer = tokenizer_factory.get_tokenizer_for_db(nctx.get_test_config())
with tokenizer.name_analyzer() as ana:
plist = [ana.normalize_postcode(p) for p in postcodes.split(',')]
plist.sort()
with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
if nctx.tokenizer != 'legacy':
cur.execute("SELECT word FROM word WHERE type = 'P' and word = any(%s)",
(plist,))
else:
cur.execute("""SELECT word FROM word WHERE word = any(%s)
and class = 'place' and type = 'postcode'""",
(plist,))
found = [row[0] for row in cur]
assert len(found) == len(set(found)), f"Duplicate rows for postcodes: {found}"
if exclude:
assert len(found) == 0, f"Unexpected postcodes: {found}"
else:
assert set(found) == set(plist), \
f"Missing postcodes {set(plist) - set(found)}. Found: {found}"
@then("place_addressline contains")
def check_place_addressline(context):
""" Check the contents of the place_addressline table. Each row represents
a table row and all data must match. Data not present in the expected
table, may be arbitry. The rows are identified via the 'object' column,
representing the addressee and the 'address' column, representing the
address item.
"""
with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
for row in context.table:
nid = NominatimID(row['object'])
pid = nid.get_place_id(cur)
apid = NominatimID(row['address']).get_place_id(cur)
cur.execute(""" SELECT * FROM place_addressline
WHERE place_id = %s AND address_place_id = %s""",
(pid, apid))
assert cur.rowcount > 0, \
"No rows found for place %s and address %s" % (row['object'], row['address'])
for res in cur:
DBRow(nid, res, context).assert_row(row, ('address', 'object'))
@then("place_addressline doesn't contain")
def check_place_addressline_exclude(context):
""" Check that the place_addressline doesn't contain any entries for the
given addressee/address item pairs.
"""
with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
for row in context.table:
pid = NominatimID(row['object']).get_place_id(cur)
apid = NominatimID(row['address']).get_place_id(cur, allow_empty=True)
if apid is not None:
cur.execute(""" SELECT * FROM place_addressline
WHERE place_id = %s AND address_place_id = %s""",
(pid, apid))
assert cur.rowcount == 0, \
"Row found for place %s and address %s" % (row['object'], row['address'])
@then("W(?P<oid>\d+) expands to(?P<neg> no)? interpolation")
def check_location_property_osmline(context, oid, neg):
""" Check that the given way is present in the interpolation table.
"""
with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
cur.execute("""SELECT *, ST_AsText(linegeo) as geomtxt
FROM location_property_osmline
WHERE osm_id = %s AND startnumber IS NOT NULL""",
(oid, ))
if neg:
assert cur.rowcount == 0, "Interpolation found for way {}.".format(oid)
return
todo = list(range(len(list(context.table))))
for res in cur:
for i in todo:
row = context.table[i]
if (int(row['start']) == res['startnumber']
and int(row['end']) == res['endnumber']):
todo.remove(i)
break
else:
assert False, "Unexpected row " + str(res)
DBRow(oid, res, context).assert_row(row, ('start', 'end'))
assert not todo, f"Unmatched lines in table: {list(context.table[i] for i in todo)}"
@then("location_property_osmline contains(?P<exact> exactly)?")
def check_place_contents(context, exact):
""" Check contents of the interpolation table. Each row represents a table row
and all data must match. Data not present in the expected table, may
be arbitry. The rows are identified via the 'object' column which must
have an identifier of the form '<osm id>[:<startnumber>]'. When multiple
rows match (for example because 'startnumber' was left out and there are
multiple entries for the given OSM object) then all must match. All
expected rows are expected to be present with at least one database row.
When 'exactly' is given, there must not be additional rows in the database.
"""
with context.db.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
expected_content = set()
for row in context.table:
if ':' in row['object']:
nid, start = row['object'].split(':', 2)
start = int(start)
else:
nid, start = row['object'], None
query = """SELECT *, ST_AsText(linegeo) as geomtxt,
ST_GeometryType(linegeo) as geometrytype
FROM location_property_osmline WHERE osm_id=%s"""
if ':' in row['object']:
query += ' and startnumber = %s'
params = [int(val) for val in row['object'].split(':', 2)]
else:
params = (int(row['object']), )
cur.execute(query, params)
assert cur.rowcount > 0, "No rows found for " + row['object']
for res in cur:
if exact:
expected_content.add((res['osm_id'], res['startnumber']))
DBRow(nid, res, context).assert_row(row, ['object'])
if exact:
cur.execute('SELECT osm_id, startnumber from location_property_osmline')
actual = set([(r[0], r[1]) for r in cur])
assert expected_content == actual, \
f"Missing entries: {expected_content - actual}\n" \
f"Not expected in table: {actual - expected_content}"
| 19,513 | 44.487179 | 115 | py |
Nominatim | Nominatim-master/test/bdd/steps/table_compare.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions to facilitate accessing and comparing the content of DB tables.
"""
import re
import json
from steps.check_functions import Almost
ID_REGEX = re.compile(r"(?P<typ>[NRW])(?P<oid>\d+)(:(?P<cls>\w+))?")
class NominatimID:
""" Splits a unique identifier for places into its components.
As place_ids cannot be used for testing, we use a unique
identifier instead that is of the form <osmtype><osmid>[:<class>].
"""
def __init__(self, oid):
self.typ = self.oid = self.cls = None
if oid is not None:
m = ID_REGEX.fullmatch(oid)
assert m is not None, \
"ID '{}' not of form <osmtype><osmid>[:<class>]".format(oid)
self.typ = m.group('typ')
self.oid = m.group('oid')
self.cls = m.group('cls')
def __str__(self):
if self.cls is None:
return self.typ + self.oid
return '{self.typ}{self.oid}:{self.cls}'.format(self=self)
def query_osm_id(self, cur, query):
""" Run a query on cursor `cur` using osm ID, type and class. The
`query` string must contain exactly one placeholder '{}' where
the 'where' query should go.
"""
where = 'osm_type = %s and osm_id = %s'
params = [self.typ, self. oid]
if self.cls is not None:
where += ' and class = %s'
params.append(self.cls)
cur.execute(query.format(where), params)
def row_by_place_id(self, cur, table, extra_columns=None):
""" Get a row by place_id from the given table using cursor `cur`.
extra_columns may contain a list additional elements for the select
part of the query.
"""
pid = self.get_place_id(cur)
query = "SELECT {} FROM {} WHERE place_id = %s".format(
','.join(['*'] + (extra_columns or [])), table)
cur.execute(query, (pid, ))
def get_place_id(self, cur, allow_empty=False):
""" Look up the place id for the ID. Throws an assertion if the ID
is not unique.
"""
self.query_osm_id(cur, "SELECT place_id FROM placex WHERE {}")
if cur.rowcount == 0 and allow_empty:
return None
assert cur.rowcount == 1, \
"Place ID {!s} not unique. Found {} entries.".format(self, cur.rowcount)
return cur.fetchone()[0]
class DBRow:
""" Represents a row from a database and offers comparison functions.
"""
def __init__(self, nid, db_row, context):
self.nid = nid
self.db_row = db_row
self.context = context
def assert_row(self, row, exclude_columns):
""" Check that all columns of the given behave row are contained
in the database row. Exclude behave rows with the names given
in the `exclude_columns` list.
"""
for name, value in zip(row.headings, row.cells):
if name not in exclude_columns:
assert self.contains(name, value), self.assert_msg(name, value)
def contains(self, name, expected):
""" Check that the DB row contains a column `name` with the given value.
"""
if '+' in name:
column, field = name.split('+', 1)
return self._contains_hstore_value(column, field, expected)
if name == 'geometry':
return self._has_geometry(expected)
if name not in self.db_row:
return False
actual = self.db_row[name]
if expected == '-':
return actual is None
if name == 'name' and ':' not in expected:
return self._compare_column(actual[name], expected)
if 'place_id' in name:
return self._compare_place_id(actual, expected)
if name == 'centroid':
return self._has_centroid(expected)
return self._compare_column(actual, expected)
def _contains_hstore_value(self, column, field, expected):
if column == 'addr':
column = 'address'
if column not in self.db_row:
return False
if expected == '-':
return self.db_row[column] is None or field not in self.db_row[column]
if self.db_row[column] is None:
return False
return self._compare_column(self.db_row[column].get(field), expected)
def _compare_column(self, actual, expected):
if isinstance(actual, dict):
return actual == eval('{' + expected + '}')
return str(actual) == expected
def _compare_place_id(self, actual, expected):
if expected == '0':
return actual == 0
with self.context.db.cursor() as cur:
return NominatimID(expected).get_place_id(cur) == actual
def _has_centroid(self, expected):
if expected == 'in geometry':
with self.context.db.cursor() as cur:
cur.execute("""SELECT ST_Within(ST_SetSRID(ST_Point({cx}, {cy}), 4326),
ST_SetSRID('{geomtxt}'::geometry, 4326))""".format(**self.db_row))
return cur.fetchone()[0]
if ' ' in expected:
x, y = expected.split(' ')
else:
x, y = self.context.osm.grid_node(int(expected))
return Almost(float(x)) == self.db_row['cx'] and Almost(float(y)) == self.db_row['cy']
def _has_geometry(self, expected):
geom = self.context.osm.parse_geometry(expected)
with self.context.db.cursor() as cur:
cur.execute("""SELECT ST_Equals(ST_SnapToGrid({}, 0.00001, 0.00001),
ST_SnapToGrid(ST_SetSRID('{}'::geometry, 4326), 0.00001, 0.00001))""".format(
geom, self.db_row['geomtxt']))
return cur.fetchone()[0]
def assert_msg(self, name, value):
""" Return a string with an informative message for a failed compare.
"""
msg = "\nBad column '{}' in row '{!s}'.".format(name, self.nid)
actual = self._get_actual(name)
if actual is not None:
msg += " Expected: {}, got: {}.".format(value, actual)
else:
msg += " No such column."
return msg + "\nFull DB row: {}".format(json.dumps(dict(self.db_row), indent=4, default=str))
def _get_actual(self, name):
if '+' in name:
column, field = name.split('+', 1)
if column == 'addr':
column = 'address'
return (self.db_row.get(column) or {}).get(field)
if name == 'geometry':
return self.db_row['geomtxt']
if name not in self.db_row:
return None
if name == 'centroid':
return "POINT({cx} {cy})".format(**self.db_row)
actual = self.db_row[name]
if 'place_id' in name:
if actual is None:
return '<null>'
if actual == 0:
return "place ID 0"
with self.context.db.cursor() as cur:
cur.execute("""SELECT osm_type, osm_id, class
FROM placex WHERE place_id = %s""",
(actual, ))
if cur.rowcount == 1:
return "{0[0]}{0[1]}:{0[2]}".format(cur.fetchone())
return "[place ID {} not found]".format(actual)
return actual
| 7,597 | 33.071749 | 112 | py |
Nominatim | Nominatim-master/test/bdd/steps/http_responses.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2023 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Classes wrapping HTTP responses from the Nominatim API.
"""
import re
import json
import xml.etree.ElementTree as ET
from check_functions import Almost, OsmType, Field, check_for_attributes
class GenericResponse:
""" Common base class for all API responses.
"""
def __init__(self, page, fmt, errorcode=200):
fmt = fmt.strip()
if fmt == 'jsonv2':
fmt = 'json'
self.page = page
self.format = fmt
self.errorcode = errorcode
self.result = []
self.header = dict()
if errorcode == 200 and fmt != 'debug':
getattr(self, '_parse_' + fmt)()
def _parse_json(self):
m = re.fullmatch(r'([\w$][^(]*)\((.*)\)', self.page)
if m is None:
code = self.page
else:
code = m.group(2)
self.header['json_func'] = m.group(1)
self.result = json.JSONDecoder().decode(code)
if isinstance(self.result, dict):
if 'error' in self.result:
self.result = []
else:
self.result = [self.result]
def _parse_geojson(self):
self._parse_json()
if self.result:
geojson = self.result[0]
# check for valid geojson
check_for_attributes(geojson, 'type,features')
assert geojson['type'] == 'FeatureCollection'
assert isinstance(geojson['features'], list)
self.result = []
for result in geojson['features']:
check_for_attributes(result, 'type,properties,geometry')
assert result['type'] == 'Feature'
new = result['properties']
check_for_attributes(new, 'geojson', 'absent')
new['geojson'] = result['geometry']
if 'bbox' in result:
check_for_attributes(new, 'boundingbox', 'absent')
# bbox is minlon, minlat, maxlon, maxlat
# boundingbox is minlat, maxlat, minlon, maxlon
new['boundingbox'] = [result['bbox'][1],
result['bbox'][3],
result['bbox'][0],
result['bbox'][2]]
for k, v in geojson.items():
if k not in ('type', 'features'):
check_for_attributes(new, '__' + k, 'absent')
new['__' + k] = v
self.result.append(new)
def _parse_geocodejson(self):
self._parse_geojson()
if self.result:
for r in self.result:
assert set(r.keys()) == {'geocoding', 'geojson', '__geocoding'}, \
f"Unexpected keys in result: {r.keys()}"
check_for_attributes(r['geocoding'], 'geojson', 'absent')
inner = r.pop('geocoding')
r.update(inner)
def assert_address_field(self, idx, field, value):
""" Check that result rows`idx` has a field `field` with value `value`
in its address. If idx is None, then all results are checked.
"""
if idx is None:
todo = range(len(self.result))
else:
todo = [int(idx)]
for idx in todo:
self.check_row(idx, 'address' in self.result[idx], "No field 'address'")
address = self.result[idx]['address']
self.check_row_field(idx, field, value, base=address)
def match_row(self, row, context=None, field=None):
""" Match the result fields against the given behave table row.
"""
if 'ID' in row.headings:
todo = [int(row['ID'])]
else:
todo = range(len(self.result))
for i in todo:
subdict = self.result[i]
if field is not None:
for key in field.split('.'):
self.check_row(i, key in subdict, f"Missing subfield {key}")
subdict = subdict[key]
self.check_row(i, isinstance(subdict, dict),
f"Subfield {key} not a dict")
for name, value in zip(row.headings, row.cells):
if name == 'ID':
pass
elif name == 'osm':
self.check_row_field(i, 'osm_type', OsmType(value[0]), base=subdict)
self.check_row_field(i, 'osm_id', Field(value[1:]), base=subdict)
elif name == 'centroid':
if ' ' in value:
lon, lat = value.split(' ')
elif context is not None:
lon, lat = context.osm.grid_node(int(value))
else:
raise RuntimeError("Context needed when using grid coordinates")
self.check_row_field(i, 'lat', Field(float(lat), abs_tol=1e-07), base=subdict)
self.check_row_field(i, 'lon', Field(float(lon), abs_tol=1e-07), base=subdict)
else:
self.check_row_field(i, name, Field(value), base=subdict)
def check_row(self, idx, check, msg):
""" Assert for the condition 'check' and print 'msg' on fail together
with the contents of the failing result.
"""
class _RowError:
def __init__(self, row):
self.row = row
def __str__(self):
return f"{msg}. Full row {idx}:\n" \
+ json.dumps(self.row, indent=4, ensure_ascii=False)
assert check, _RowError(self.result[idx])
def check_row_field(self, idx, field, expected, base=None):
""" Check field 'field' of result 'idx' for the expected value
and print a meaningful error if the condition fails.
When 'base' is set to a dictionary, then the field is checked
in that base. The error message will still report the contents
of the full result.
"""
if base is None:
base = self.result[idx]
self.check_row(idx, field in base, f"No field '{field}'")
value = base[field]
self.check_row(idx, expected == value,
f"\nBad value for field '{field}'. Expected: {expected}, got: {value}")
class SearchResponse(GenericResponse):
""" Specialised class for search and lookup responses.
Transforms the xml response in a format similar to json.
"""
def _parse_xml(self):
xml_tree = ET.fromstring(self.page)
self.header = dict(xml_tree.attrib)
for child in xml_tree:
assert child.tag == "place"
self.result.append(dict(child.attrib))
address = {}
for sub in child:
if sub.tag == 'extratags':
self.result[-1]['extratags'] = {}
for tag in sub:
self.result[-1]['extratags'][tag.attrib['key']] = tag.attrib['value']
elif sub.tag == 'namedetails':
self.result[-1]['namedetails'] = {}
for tag in sub:
self.result[-1]['namedetails'][tag.attrib['desc']] = tag.text
elif sub.tag == 'geokml':
self.result[-1][sub.tag] = True
else:
address[sub.tag] = sub.text
if address:
self.result[-1]['address'] = address
class ReverseResponse(GenericResponse):
""" Specialised class for reverse responses.
Transforms the xml response in a format similar to json.
"""
def _parse_xml(self):
xml_tree = ET.fromstring(self.page)
self.header = dict(xml_tree.attrib)
self.result = []
for child in xml_tree:
if child.tag == 'result':
assert not self.result, "More than one result in reverse result"
self.result.append(dict(child.attrib))
check_for_attributes(self.result[0], 'display_name', 'absent')
self.result[0]['display_name'] = child.text
elif child.tag == 'addressparts':
assert 'address' not in self.result[0], "More than one address in result"
address = {}
for sub in child:
assert len(sub) == 0, f"Address element '{sub.tag}' has subelements"
address[sub.tag] = sub.text
self.result[0]['address'] = address
elif child.tag == 'extratags':
assert 'extratags' not in self.result[0], "More than one extratags in result"
self.result[0]['extratags'] = {}
for tag in child:
assert len(tag) == 0, f"Extratags element '{tag.attrib['key']}' has subelements"
self.result[0]['extratags'][tag.attrib['key']] = tag.attrib['value']
elif child.tag == 'namedetails':
assert 'namedetails' not in self.result[0], "More than one namedetails in result"
self.result[0]['namedetails'] = {}
for tag in child:
assert len(tag) == 0, f"Namedetails element '{tag.attrib['desc']}' has subelements"
self.result[0]['namedetails'][tag.attrib['desc']] = tag.text
elif child.tag == 'geokml':
assert 'geokml' not in self.result[0], "More than one geokml in result"
self.result[0]['geokml'] = ET.tostring(child, encoding='unicode')
else:
assert child.tag == 'error', \
f"Unknown XML tag {child.tag} on page: {self.page}"
class StatusResponse(GenericResponse):
""" Specialised class for status responses.
Can also parse text responses.
"""
def _parse_text(self):
pass
| 10,105 | 37.869231 | 103 | py |
Nominatim | Nominatim-master/test/bdd/steps/geometry_alias.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Collection of aliases for various world coordinates.
"""
ALIASES = {
# Country aliases
'AD': (1.58972, 42.54241),
'AE': (54.61589, 24.82431),
'AF': (65.90264, 34.84708),
'AG': (-61.72430, 17.069),
'AI': (-63.10571, 18.25461),
'AL': (19.84941, 40.21232),
'AM': (44.64229, 40.37821),
'AO': (16.21924, -12.77014),
'AQ': (44.99999, -75.65695),
'AR': (-61.10759, -34.37615),
'AS': (-170.68470, -14.29307),
'AT': (14.25747, 47.36542),
'AU': (138.23155, -23.72068),
'AW': (-69.98255, 12.555),
'AX': (19.91839, 59.81682),
'AZ': (48.38555, 40.61639),
'BA': (17.18514, 44.25582),
'BB': (-59.53342, 13.19),
'BD': (89.75989, 24.34205),
'BE': (4.90078, 50.34682),
'BF': (-0.56743, 11.90471),
'BG': (24.80616, 43.09859),
'BH': (50.52032, 25.94685),
'BI': (29.54561, -2.99057),
'BJ': (2.70062, 10.02792),
'BL': (-62.79349, 17.907),
'BM': (-64.77406, 32.30199),
'BN': (114.52196, 4.28638),
'BO': (-62.02473, -17.77723),
'BQ': (-63.14322, 17.566),
'BR': (-45.77065, -9.58685),
'BS': (-77.60916, 23.8745),
'BT': (90.01350, 27.28137),
'BV': (3.35744, -54.4215),
'BW': (23.51505, -23.48391),
'BY': (26.77259, 53.15885),
'BZ': (-88.63489, 16.33951),
'CA': (-107.74817, 67.12612),
'CC': (96.84420, -12.01734),
'CD': (24.09544, -1.67713),
'CF': (22.58701, 5.98438),
'CG': (15.78875, 0.40388),
'CH': (7.65705, 46.57446),
'CI': (-6.31190, 6.62783),
'CK': (-159.77835, -21.23349),
'CL': (-70.41790, -53.77189),
'CM': (13.26022, 5.94519),
'CN': (96.44285, 38.04260),
'CO': (-72.52951, 2.45174),
'CR': (-83.83314, 9.93514),
'CU': (-80.81673, 21.88852),
'CV': (-24.50810, 14.929),
'CW': (-68.96409, 12.1845),
'CX': (105.62411, -10.48417),
'CY': (32.95922, 35.37010),
'CZ': (16.32098, 49.50692),
'DE': (9.30716, 50.21289),
'DJ': (42.96904, 11.41542),
'DK': (9.18490, 55.98916),
'DM': (-61.00358, 15.65470),
'DO': (-69.62855, 18.58841),
'DZ': (4.24749, 25.79721),
'EC': (-77.45831, -0.98284),
'EE': (23.94288, 58.43952),
'EG': (28.95293, 28.17718),
'EH': (-13.69031, 25.01241),
'ER': (39.01223, 14.96033),
'ES': (-2.59110, 38.79354),
'ET': (38.61697, 7.71399),
'FI': (26.89798, 63.56194),
'FJ': (177.91853, -17.74237),
'FK': (-58.99044, -51.34509),
'FM': (151.95358, 8.5045),
'FO': (-6.60483, 62.10000),
'FR': (0.28410, 47.51045),
'GA': (10.81070, -0.07429),
'GB': (-0.92823, 52.01618),
'GD': (-61.64524, 12.191),
'GE': (44.16664, 42.00385),
'GF': (-53.46524, 3.56188),
'GG': (-2.50580, 49.58543),
'GH': (-0.46348, 7.16051),
'GI': (-5.32053, 36.11066),
'GL': (-33.85511, 74.66355),
'GM': (-16.40960, 13.25),
'GN': (-13.83940, 10.96291),
'GP': (-61.68712, 16.23049),
'GQ': (10.23973, 1.43119),
'GR': (23.17850, 39.06206),
'GS': (-36.49430, -54.43067),
'GT': (-90.74368, 15.20428),
'GU': (144.73362, 13.44413),
'GW': (-14.83525, 11.92486),
'GY': (-58.45167, 5.73698),
'HK': (114.18577, 22.34923),
'HM': (73.68230, -53.22105),
'HN': (-86.95414, 15.23820),
'HR': (17.49966, 45.52689),
'HT': (-73.51925, 18.32492),
'HU': (20.35362, 47.51721),
'ID': (123.34505, -0.83791),
'IE': (-9.00520, 52.87725),
'IL': (35.46314, 32.86165),
'IM': (-4.86740, 54.023),
'IN': (88.67620, 27.86155),
'IO': (71.42743, -6.14349),
'IQ': (42.58109, 34.26103),
'IR': (56.09355, 30.46751),
'IS': (-17.51785, 64.71687),
'IT': (10.42639, 44.87904),
'JE': (-2.19261, 49.12458),
'JM': (-76.84020, 18.3935),
'JO': (36.55552, 30.75741),
'JP': (138.72531, 35.92099),
'KE': (36.90602, 1.08512),
'KG': (76.15571, 41.66497),
'KH': (104.31901, 12.95555),
'KI': (173.63353, 0.139),
'KM': (44.31474, -12.241),
'KN': (-62.69379, 17.2555),
'KP': (126.65575, 39.64575),
'KR': (127.27740, 36.41388),
'KW': (47.30684, 29.69180),
'KY': (-81.07455, 19.29949),
'KZ': (72.00811, 49.88855),
'LA': (102.44391, 19.81609),
'LB': (35.48464, 33.41766),
'LC': (-60.97894, 13.891),
'LI': (9.54693, 47.15934),
'LK': (80.38520, 8.41649),
'LR': (-11.16960, 4.04122),
'LS': (28.66984, -29.94538),
'LT': (24.51735, 55.49293),
'LU': (6.08649, 49.81533),
'LV': (23.51033, 56.67144),
'LY': (15.36841, 28.12177),
'MA': (-4.03061, 33.21696),
'MC': (7.47743, 43.62917),
'MD': (29.61725, 46.66517),
'ME': (19.72291, 43.02441),
'MF': (-63.06666, 18.08102),
'MG': (45.86378, -20.50245),
'MH': (171.94982, 5.983),
'MK': (21.42108, 41.08980),
'ML': (-1.93310, 16.46993),
'MM': (95.54624, 21.09620),
'MN': (99.81138, 48.18615),
'MO': (113.56441, 22.16209),
'MP': (145.21345, 14.14902),
'MQ': (-60.81128, 14.43706),
'MR': (-9.42324, 22.59251),
'MS': (-62.19455, 16.745),
'MT': (14.38363, 35.94467),
'MU': (57.55121, -20.41),
'MV': (73.39292, 4.19375),
'MW': (33.95722, -12.28218),
'MX': (-105.89221, 25.86826),
'MY': (112.71154, 2.10098),
'MZ': (37.58689, -13.72682),
'NA': (16.68569, -21.46572),
'NC': (164.95322, -20.38889),
'NE': (10.06041, 19.08273),
'NF': (167.95718, -29.0645),
'NG': (10.17781, 10.17804),
'NI': (-85.87974, 13.21715),
'NL': (-68.57062, 12.041),
'NO': (23.11556, 70.09934),
'NP': (83.36259, 28.13107),
'NR': (166.93479, -0.5275),
'NU': (-169.84873, -19.05305),
'NZ': (167.97209, -45.13056),
'OM': (56.86055, 20.47413),
'PA': (-79.40160, 8.80656),
'PE': (-78.66540, -7.54711),
'PF': (-145.05719, -16.70862),
'PG': (146.64600, -7.37427),
'PH': (121.48359, 15.09965),
'PK': (72.11347, 31.14629),
'PL': (17.88136, 52.77182),
'PM': (-56.19515, 46.78324),
'PN': (-130.10642, -25.06955),
'PR': (-65.88755, 18.37169),
'PS': (35.39801, 32.24773),
'PT': (-8.45743, 40.11154),
'PW': (134.49645, 7.3245),
'PY': (-59.51787, -22.41281),
'QA': (51.49903, 24.99816),
'RE': (55.77345, -21.36388),
'RO': (26.37632, 45.36120),
'RS': (20.40371, 44.56413),
'RU': (116.44060, 59.06780),
'RW': (29.57882, -1.62404),
'SA': (47.73169, 22.43790),
'SB': (164.63894, -10.23606),
'SC': (46.36566, -9.454),
'SD': (28.14720, 14.56423),
'SE': (15.68667, 60.35568),
'SG': (103.84187, 1.304),
'SH': (-12.28155, -37.11546),
'SI': (14.04738, 46.39085),
'SJ': (15.27552, 79.23365),
'SK': (20.41603, 48.86970),
'SL': (-11.47773, 8.78156),
'SM': (12.46062, 43.94279),
'SN': (-15.37111, 14.99477),
'SO': (46.93383, 9.34094),
'SR': (-55.42864, 4.56985),
'SS': (28.13573, 8.50933),
'ST': (6.61025, 0.2215),
'SV': (-89.36665, 13.43072),
'SX': (-63.15393, 17.9345),
'SY': (38.15513, 35.34221),
'SZ': (31.78263, -26.14244),
'TC': (-71.32554, 21.35),
'TD': (17.42092, 13.46223),
'TF': (137.5, -67.5),
'TG': (1.06983, 7.87677),
'TH': (102.00877, 16.42310),
'TJ': (71.91349, 39.01527),
'TK': (-171.82603, -9.20990),
'TL': (126.22520, -8.72636),
'TM': (57.71603, 39.92534),
'TN': (9.04958, 34.84199),
'TO': (-176.99320, -23.11104),
'TR': (32.82002, 39.86350),
'TT': (-60.70793, 11.1385),
'TV': (178.77499, -9.41685),
'TW': (120.30074, 23.17002),
'TZ': (33.53892, -5.01840),
'UA': (33.44335, 49.30619),
'UG': (32.96523, 2.08584),
'UM': (-169.50993, 16.74605),
'US': (-116.39535, 40.71379),
'UY': (-56.46505, -33.62658),
'UZ': (61.35529, 42.96107),
'VA': (12.33197, 42.04931),
'VC': (-61.09905, 13.316),
'VE': (-64.88323, 7.69849),
'VG': (-64.62479, 18.419),
'VI': (-64.88950, 18.32263),
'VN': (104.20179, 10.27644),
'VU': (167.31919, -15.88687),
'WF': (-176.20781, -13.28535),
'WS': (-172.10966, -13.85093),
'YE': (45.94562, 16.16338),
'YT': (44.93774, -12.60882),
'ZA': (23.19488, -30.43276),
'ZM': (26.38618, -14.39966),
'ZW': (30.12419, -19.86907)
}
| 7,329 | 26.870722 | 58 | py |
Nominatim | Nominatim-master/test/bdd/steps/steps_osm_data.py | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
import tempfile
import random
import os
from pathlib import Path
from nominatim.tools.exec_utils import run_osm2pgsql
from nominatim.tools.replication import run_osm2pgsql_updates
from geometry_alias import ALIASES
def get_osm2pgsql_options(nominatim_env, fname, append):
return dict(import_file=fname,
osm2pgsql=str(nominatim_env.build_dir / 'osm2pgsql' / 'osm2pgsql'),
osm2pgsql_cache=50,
osm2pgsql_style=str(nominatim_env.get_test_config().get_import_style_file()),
osm2pgsql_style_path=nominatim_env.get_test_config().config_dir,
threads=1,
dsn=nominatim_env.get_libpq_dsn(),
flatnode_file='',
tablespaces=dict(slim_data='', slim_index='',
main_data='', main_index=''),
append=append
)
def write_opl_file(opl, grid):
""" Create a temporary OSM file from OPL and return the file name. It is
the responsibility of the caller to delete the file again.
Node with missing coordinates, can retrieve their coordinates from
a supplied grid. Failing that a random coordinate is assigned.
"""
with tempfile.NamedTemporaryFile(suffix='.opl', delete=False) as fd:
for line in opl.splitlines():
if line.startswith('n') and line.find(' x') < 0:
coord = grid.grid_node(int(line[1:].split(' ')[0]))
if coord is None:
coord = (random.random() * 360 - 180,
random.random() * 180 - 90)
line += " x%f y%f" % coord
fd.write(line.encode('utf-8'))
fd.write(b'\n')
return fd.name
@given('the lua style file')
def lua_style_file(context):
""" Define a custom style file to use for the import.
"""
style = Path(context.nominatim.website_dir.name) / 'custom.lua'
style.write_text(context.text)
context.nominatim.test_env['NOMINATIM_IMPORT_STYLE'] = str(style)
@given(u'the ([0-9.]+ )?grid(?: with origin (?P<origin>.*))?')
def define_node_grid(context, grid_step, origin):
"""
Define a grid of node positions.
Use a table to define the grid. The nodes must be integer ids. Optionally
you can give the grid distance. The default is 0.00001 degrees.
"""
if grid_step is not None:
grid_step = float(grid_step.strip())
else:
grid_step = 0.00001
if origin:
if ',' in origin:
# TODO coordinate
coords = origin.split(',')
if len(coords) != 2:
raise RuntimeError('Grid origin expects orgin with x,y coordinates.')
origin = (float(coords[0]), float(coords[1]))
elif origin in ALIASES:
origin = ALIASES[origin]
else:
raise RuntimeError('Grid origin must be either coordinate or alias.')
else:
origin = (0.0, 0.0)
context.osm.set_grid([context.table.headings] + [list(h) for h in context.table],
grid_step, origin)
@when(u'loading osm data')
def load_osm_file(context):
"""
Load the given data into a freshly created test data using osm2pgsql.
No further indexing is done.
The data is expected as attached text in OPL format.
"""
# create an OSM file and import it
fname = write_opl_file(context.text, context.osm)
try:
run_osm2pgsql(get_osm2pgsql_options(context.nominatim, fname, append=False))
finally:
os.remove(fname)
### reintroduce the triggers/indexes we've lost by having osm2pgsql set up place again
cur = context.db.cursor()
cur.execute("""CREATE TRIGGER place_before_delete BEFORE DELETE ON place
FOR EACH ROW EXECUTE PROCEDURE place_delete()""")
cur.execute("""CREATE TRIGGER place_before_insert BEFORE INSERT ON place
FOR EACH ROW EXECUTE PROCEDURE place_insert()""")
cur.execute("""CREATE UNIQUE INDEX idx_place_osm_unique on place using btree(osm_id,osm_type,class,type)""")
context.db.commit()
@when(u'updating osm data')
def update_from_osm_file(context):
"""
Update a database previously populated with 'loading osm data'.
Needs to run indexing on the existing data first to yield the correct result.
The data is expected as attached text in OPL format.
"""
context.nominatim.copy_from_place(context.db)
context.nominatim.run_nominatim('index')
context.nominatim.run_nominatim('refresh', '--functions')
# create an OSM file and import it
fname = write_opl_file(context.text, context.osm)
try:
run_osm2pgsql_updates(context.db,
get_osm2pgsql_options(context.nominatim, fname, append=True))
finally:
os.remove(fname)
@when('indexing')
def index_database(context):
"""
Run the Nominatim indexing step. This will process data previously
loaded with 'updating osm data'
"""
context.nominatim.run_nominatim('index')
| 5,254 | 35.748252 | 112 | py |
lila | lila-main/setup.py |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="lila",
version="0.0.0",
author="Mahdi Qezlou, Simeon Bird, Adam Lidz, Guochao Sun, Andrew B. Newman",
author_email="mahdi.qezlou@email.ucr.edu",
description="Line Intensity map X Ly-Alpha forest forecast (LILA)",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mahdiqezlou/lali",
project_urls={
"Bug Tracker": "https://github.com/mahdiqezlou/lali",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires="<=3.9",
)
| 869 | 30.071429 | 81 | py |
lila | lila-main/src/lila/inference.py | import numpy as np
from nbodykit.lab import cosmology
class Infer():
"""
some helper routines for inference from a simulation.
Used in notebooks/inference.ipynb
"""
def __init__(self, st, iter_sampling=100_000, chains=6, linear=True, rsd=False, dm_file = None, kmin=None, kmax =None, k_non=None, lam=1):
""""
Paramters :
linear : bool, if True use linear matter pwoer spectrum
rsd : bool, if linear = True and rsd =True, account for the redshift space distrotion in the
linear matter power spectrum, it is the Kaiser and Finger of god's effects.
"""
self.st = st
self.iter_sampling = iter_sampling
self.chains = chains
self.linear = linear
self. rsd = rsd
self.dm_file = dm_file
self.samples = None
self.modes = None
self.medians = None
self.std_modes = None
self.std_medians = None
if kmin is None:
self.kmin = st.kmin
else:
self.kmin = kmin
if kmax is None:
self.kmax = st.kmax
else:
self.kmax = kmax
self.k_non = k_non
self.lam = lam
def get_power_from_map(self):
from nbodykit.lab import ArrayMesh
from nbodykit.lab import FFTPower
import h5py
with h5py.File(self.dm_file,'r') as f:
dm = f['DM/dens'][:]
mesh = ArrayMesh(dm, BoxSize=205)
pow = FFTPower(mesh, mode='2d', los=[0,0,1], kmin=1/205, kmax=1, dk=0.03).run()[0]
return pow
def get_pm(self, k, z):
"""Linear Matter power spectrum
z : redshift
k: the k bins
Return: the linear matter power P(k)"""
if self.linear:
cosmo_nbkit = cosmology.Planck15
Plin = cosmology.LinearPower(cosmo=cosmo_nbkit, redshift=z, transfer='CLASS')
if self.rsd :
raise NotImplementedError('The Kaiser andFinger of god rsd effects are not '+
+'implemented in the inference. To use the simple linear matter power, pass `rsd=False` ')
return Plin(k)
elif self.dm_file is None:
pn = np.loadtxt('/run/media/mahdi/HD2/LIM/powerspectrum-0.2275.txt')
pn = np.interp(k, pn[:,0], pn[:,1])
return pn
else:
pk = self.get_power_from_map()
pn = np.abs(np.interp(k, pk['k'][:,-1], pk['power'][:,-1]))
return pn
def fit_auto_lim(self, stan_model):
""" For Auto LIM """
data = {}
ind = np.where( (self.st.lim_pk['k'][:] >= self.kmin) * (self.st.lim_pk['k'][:] <= self.kmax))
data['N'] = self.st.lim_pk['k'][ind].shape[0]
data['pm'] = self.get_pm(k=self.st.lim_pk['k'][ind], z=self.st.z)
data["lim_pk"] = np.abs(self.st.lim_pk['power'][ind])
data['sigma_lim_pk'] = np.abs(self.st.sigma_lim_pk[ind])
# run HMC for 1000 iterations with 4 chains
fit_auto = stan_model.sample(data=data, iter_sampling=self.iter_sampling, chains=self.chains)
self.samples = np.empty(shape=(fit_auto.stan_variable('clustering').size, 2))
self.samples[:,0] = fit_auto.stan_variable('clustering')
self.samples[:,1] = fit_auto.stan_variable('pshot_lim')
self.get_mode_std()
self.clustering = self.medians[0]
self.pshot_lim = self.medians[1]
self.lim_pk_model = self.clustering**2 * data['pm'] + self.pshot_lim
def fit_lim_gal(self, stan_model):
## For Galaxies :
# put data in dict to pass to the CmdStanModel
# the keys have to match the variable names in the Stan file data block
data = {}
ind = np.where( (self.st.lim_pk['k'][:] >= self.kmin) * (self.st.lim_pk['k'][:] <= self.kmax))[0]
if self.k_non is not None:
correction = self.st.gal_pk['k'][ind] - self.k_non
indk0 = np.where(correction < 0)
print(indk0)
correction[indk0] = 0
correction = correction**2
print(correction)
else:
correction = 0
sigma_lim_gal_pk = np.abs(self.st.sigma_lim_gal_pk[ind])*(1 + self.lam*correction)
sigma_gal_pk = np.abs(self.st.sigma_gal_pk[ind])*(1 + self.lam*correction)
data['N'] = self.st.lim_pk['k'][ind].shape[0]
data['n3D'] = self.st.n3D
data['pm'] = self.get_pm(k=self.st.lim_pk['k'][ind], z=self.st.z)
data["lim_pk"] = np.abs(self.st.lim_pk['power'][ind])
data['gal_pk'] = np.abs(self.st.gal_pk['power'][ind])
data["lim_gal_pk"] = np.abs(self.st.lim_gal_pk['power'][ind])
data['sigma_lim_pk'] = np.abs(self.st.sigma_lim_pk[ind])
data['sigma_gal_pk'] = sigma_gal_pk
data['sigma_lim_gal_pk'] = sigma_lim_gal_pk
# run HMC for 1000 iterations with 4 chains
fit_lim_gal = stan_model.sample(data=data, iter_sampling=self.iter_sampling, chains=self.chains)
self.samples = np.empty(shape=(fit_lim_gal.stan_variable('clustering').size,5))
self.samples[:,0] = fit_lim_gal.stan_variable('clustering')
self.samples[:,1] = fit_lim_gal.stan_variable('pshot_lim')
self.samples[:,2] = fit_lim_gal.stan_variable('bgal')
self.samples[:,3] = fit_lim_gal.stan_variable('pshot_gal')
self.samples[:,4] = fit_lim_gal.stan_variable('pshot_lim_gal')
self.get_mode_std()
self.clustering = self.medians[0]
self.pshot_lim = self.medians[1]
self.bgal = self.medians[2]
self.pshot_gal = self.medians[3]
self.pshot_lim_gal = self.medians[4]
size = self.samples.shape[0]
lim_pk_model = fit_lim_gal.stan_variable('clustering').reshape(size, -1)**2 * data['pm'] + fit_lim_gal.stan_variable('pshot_lim').reshape(size, -1)
self.lim_pk_bounds = np.quantile(lim_pk_model, q=[0.16,0.84], axis=0)
del lim_pk_model
self.lim_pk_model = self.clustering**2 * data['pm'] + self.pshot_lim
gal_pk_model = fit_lim_gal.stan_variable('bgal').reshape(size, -1)**2 * data['pm'] + fit_lim_gal.stan_variable('pshot_gal').reshape(size, -1)
self.gal_pk_bounds = np.quantile(gal_pk_model, q = [0.16,0.84], axis=0)
del gal_pk_model
self.gal_pk_model = self.bgal**2 * data['pm'] + self.pshot_gal
lim_gal_pk_model = (fit_lim_gal.stan_variable('bgal').reshape(size, -1)*fit_lim_gal.stan_variable('clustering').reshape(size, -1))*data['pm'] + fit_lim_gal.stan_variable('pshot_lim_gal').reshape(size, -1)
self.lim_gal_pk_bounds = np.quantile(lim_gal_pk_model, q=[0.16,0.84], axis=0)
del lim_gal_pk_model
self.lim_gal_pk_model = self.clustering * self.bgal * data['pm'] + self.pshot_lim_gal
def fit_lim_lya(self, stan_model, pass_rk=False):
## For Lya :
data = {}
ind = np.where( (self.st.lim_pk['k'][:] >= self.kmin) * (self.st.lim_pk['k'][:] <= self.kmax))
data['N'] = self.st.lim_pk['k'][ind].shape[0]
data['pm'] = self.get_pm(k=self.st.lim_pk['k'][ind], z=self.st.z)
data["lim_pk"] = np.abs(self.st.lim_pk['power'][ind])
data['lya_pk'] = np.abs(self.st.lya_pk['power'][ind])
if pass_rk:
rk = np.abs(self.st.lim_lya_pk['power'][:]/ np.sqrt(self.st.lya_pk['power'][:]*
self.st.lim_pk['power'][:]))
data['rk'] = rk
data["lim_lya_pk"] = np.abs(self.st.lim_lya_pk['power'][ind])
data['sigma_lim_pk'] = np.abs(self.st.sigma_lim_pk[ind])
data['sigma_lya_pk'] = np.abs(self.st.sigma_lya_pk[ind])
data['sigma_lim_lya_pk'] = self.st.sigma_lim_lya_pk[ind]
# run HMC for 1000 iterations with 4 chains
fit = stan_model.sample(data=data, iter_sampling=self.iter_sampling, chains=self.chains)
self.samples = np.empty(shape=(fit.stan_variable('clustering').size,3))
self.samples[:,0] = fit.stan_variable('clustering')
self.samples[:,1] = fit.stan_variable('pshot_lim')
self.samples[:,2] = fit.stan_variable('blya')
self.get_mode_std()
self.clustering = self.medians[0]
self.pshot_lim = self.medians[1]
self.blya = self.medians[2]
size = self.samples.shape[0]
lim_pk_model = fit.stan_variable('clustering').reshape(size,-1)**2 * data['pm'] + fit.stan_variable('pshot_lim').reshape(size, -1)
self.lim_pk_bounds = np.quantile(lim_pk_model, q=[0.16,0.84], axis=0)
del lim_pk_model
self.lim_pk_model = self.clustering**2 * data['pm'] + self.pshot_lim
lya_pk_model = fit.stan_variable('blya').reshape(size, -1)**2 * data['pm']
self.lya_pk_bounds = np.quantile(lya_pk_model, q=[0.16,0.84], axis=0)
del lya_pk_model
self.lya_pk_model = self.blya**2 * data['pm']
lim_lya_pk_model = fit.stan_variable('clustering').reshape(size,-1)*fit.stan_variable('blya').reshape(size, -1)* data['pm']
self.lim_lya_pk_bounds = np.quantile(lim_lya_pk_model, q=[0.16,0.84], axis=0)
del lim_lya_pk_model
self.lim_lya_pk_model = self.clustering*self.blya*data['pm']
def fit_co_cross_gal(self, stan_model):
## For Galaxies :
# put data in dict to pass to the CmdStanModel
# the keys have to match the variable names in the Stan file data block
data = {}
ind = np.where( (self.st.lim_pk['k'][:] >= self.kmin) * (self.st.lim_pk['k'][:] <= self.kmax))
if self.k_non is not None:
correction = self.st.gal_pk['k'][ind] - self.k_non
indk0 = np.where(correction < 0)
print(indk0)
correction[indk0] = 0
correction = correction**2
print(correction)
else:
correction = 0
sigma_lim_gal_pk = np.abs(self.st.sigma_lim_gal_pk[ind])*(1 + self.lam*correction)
sigma_gal_pk = np.abs(self.st.sigma_gal_pk[ind])*(1 + self.lam*correction)
data['N'] = self.st.lim_pk['k'][ind].shape[0]
data['pm'] = self.get_pm(k=self.st.lim_pk['k'][ind], z=self.st.z)
data['gal_pk'] = np.abs(self.st.gal_pk['power'][ind])
data["lim_gal_pk"] = np.abs(self.st.lim_gal_pk['power'][ind])
data['sigma_gal_pk'] = sigma_gal_pk
data['sigma_lim_gal_pk'] = sigma_lim_gal_pk
# run HMC for 1000 iterations with 4 chains
fit_lim_gal = stan_model.sample(data=data, iter_sampling=self.iter_sampling, chains=self.chains)
self.samples = np.empty(shape=(fit_lim_gal.stan_variable('clustering').size,4))
self.samples[:,0] = fit_lim_gal.stan_variable('clustering')
self.samples[:,1] = fit_lim_gal.stan_variable('bgal')
self.samples[:,2] = fit_lim_gal.stan_variable('pshot_gal')
self.samples[:,3] = fit_lim_gal.stan_variable('pshot_lim_gal')
self.get_mode_std()
self.clustering = self.medians[0]
self.bgal = self.medians[1]
self.pshot_gal = self.medians[2]
self.pshot_lim_gal = self.medians[3]
self.gal_pk_model = self.bgal**2 * data['pm'] + self.pshot_gal
self.lim_gal_pk_model = self.bgal*self.clustering * data['pm'] + self.pshot_lim_gal
def fit_co_cross_lya(self, stan_model):
## For Lya :
data = {}
ind = np.where( (self.st.lim_pk['k'][:] >= self.kmin) * (self.st.lim_pk['k'][:] <= self.kmax))
data['N'] = self.st.lim_pk['k'][ind].shape[0]
data['pm'] = self.get_pm(k=self.st.lim_pk['k'][ind], z=self.st.z)
data['lya_pk'] = np.abs(self.st.lya_pk['power'][ind])
data["lim_lya_pk"] = np.abs(self.st.lim_lya_pk['power'][ind])
data['sigma_lya_pk'] = np.abs(self.st.sigma_lya_pk[ind])
data['sigma_lim_lya_pk'] = self.st.sigma_lim_lya_pk[ind]
# run HMC for 1000 iterations with 4 chains
fit = stan_model.sample(data=data, iter_sampling=self.iter_sampling, chains=self.chains)
self.samples = np.empty(shape=(fit.stan_variable('clustering').size,3))
self.samples[:,0] = fit.stan_variable('clustering')
self.samples[:,2] = fit.stan_variable('blya')
self.get_mode_std()
self.clustering = self.medians[0]
self.blya = self.medians[1]
self.lya_pk_model = self.blya**2 * data['pm']
self.lim_lya_pk_model = self.clustering*self.blya * data['pm']
def fit_co_coXlya(self, stan_model, pass_rk=False):
## For Lya :
data = {}
ind = np.where( (self.st.lim_pk['k'][:] >= self.kmin) * (self.st.lim_pk['k'][:] <= self.kmax))
data['N'] = self.st.lim_pk['k'][ind].shape[0]
data['pm'] = self.get_pm(k=self.st.lim_pk['k'][ind], z=self.st.z)
data["lim_pk"] = np.abs(self.st.lim_pk['power'][ind])
if pass_rk:
rk = np.abs(self.st.lim_lya_pk['power'][:]/ np.sqrt(self.st.lya_pk['power'][:]*
self.st.lim_pk['power'][:]))
data['rk'] = rk
data["lim_lya_pk"] = np.abs(self.st.lim_lya_pk['power'][ind])
data['sigma_lim_pk'] = np.abs(self.st.sigma_lim_pk[ind])
data['sigma_lim_lya_pk'] = self.st.sigma_lim_lya_pk[ind]
# run HMC for 1000 iterations with 4 chains
fit = stan_model.sample(data=data, iter_sampling=self.iter_sampling, chains=self.chains)
self.samples = np.empty(shape=(fit.stan_variable('clustering').size,3))
self.samples[:,0] = fit.stan_variable('clustering')
self.samples[:,1] = fit.stan_variable('pshot_lim')
self.samples[:,2] = fit.stan_variable('blya')
self.get_mode_std()
self.clustering = self.medians[0]
self.pshot_lim = self.medians[1]
self.blya = self.medians[2]
size = self.samples.shape[0]
lim_pk_model = fit.stan_variable('clustering').reshape(size,-1)**2 * data['pm'] + fit.stan_variable('pshot_lim').reshape(size, -1)
self.lim_pk_bounds = np.quantile(lim_pk_model, q=[0.16,0.84], axis=0)
del lim_pk_model
self.lim_pk_model = self.clustering**2 * data['pm'] + self.pshot_lim
lya_pk_model = fit.stan_variable('blya').reshape(size, -1)**2 * data['pm']
self.lya_pk_bounds = np.quantile(lya_pk_model, q=[0.16,0.84], axis=0)
del lya_pk_model
self.lya_pk_model = self.blya**2 * data['pm']
lim_lya_pk_model = fit.stan_variable('clustering').reshape(size,-1)*fit.stan_variable('blya').reshape(size, -1)* data['pm']
self.lim_lya_pk_bounds = np.quantile(lim_lya_pk_model, q=[0.16,0.84], axis=0)
del lim_lya_pk_model
self.lim_lya_pk_model = self.clustering*self.blya*data['pm']
def fit_co_gal_fixed_pshot(self, stan_model, pshot_gal, pshot_lim_gal):
## For Galaxies :
# put data in dict to pass to the CmdStanModel
# the keys have to match the variable names in the Stan file data block
data = {}
ind = np.where( (self.st.lim_pk['k'][:] >= self.kmin) * (self.st.lim_pk['k'][:] <= self.kmax))
if self.k_non is not None:
correction = self.st.gal_pk['k'][ind] - self.k_non
indk0 = np.where(correction < 0)
print(indk0)
correction[indk0] = 0
correction = correction**2
print(correction)
else:
correction = 0
sigma_lim_gal_pk = np.abs(self.st.sigma_lim_gal_pk[ind])*(1 + self.lam*correction)
sigma_gal_pk = np.abs(self.st.sigma_gal_pk[ind])*(1 + self.lam*correction)
data['N'] = self.st.lim_pk['k'][ind].shape[0]
data['pshot_gal'] = pshot_gal
data['pshot_lim_gal'] = pshot_lim_gal
data['n3D'] = self.st.n3D
data['pm'] = self.get_pm(k=self.st.lim_pk['k'][ind], z=self.st.z)
data["lim_pk"] = np.abs(self.st.lim_pk['power'][ind])
data['gal_pk'] = np.abs(self.st.gal_pk['power'][ind])
data["lim_gal_pk"] = np.abs(self.st.lim_gal_pk['power'][ind])
data['sigma_lim_pk'] = np.abs(self.st.sigma_lim_pk[ind])
data['sigma_gal_pk'] = sigma_gal_pk
data['sigma_lim_gal_pk'] = sigma_lim_gal_pk
# run HMC for 1000 iterations with 4 chains
fit_lim_gal = stan_model.sample(data=data, iter_sampling=self.iter_sampling, chains=self.chains)
self.samples = np.empty(shape=(fit_lim_gal.stan_variable('clustering').size,5))
self.samples[:,0] = fit_lim_gal.stan_variable('clustering')
self.samples[:,1] = fit_lim_gal.stan_variable('pshot_lim')
self.samples[:,2] = fit_lim_gal.stan_variable('bgal')
self.get_mode_std()
self.clustering = self.medians[0]
self.pshot_lim = self.medians[1]
self.bgal = self.medians[2]
self.pshot_gal = pshot_gal
self.pshot_lim_gal = pshot_lim_gal
size = self.samples.shape[0]
lim_pk_model = fit_lim_gal.stan_variable('clustering').reshape(size, -1)**2 * data['pm'] + fit_lim_gal.stan_variable('pshot_lim').reshape(size, -1)
self.lim_pk_bounds = np.quantile(lim_pk_model, q=[0.16,0.84], axis=0)
del lim_pk_model
self.lim_pk_model = self.clustering**2 * data['pm'] + self.pshot_lim
gal_pk_model = fit_lim_gal.stan_variable('bgal').reshape(size, -1)**2 * data['pm'] + pshot_gal
self.gal_pk_bounds = np.quantile(gal_pk_model, q = [0.16,0.84], axis=0)
del gal_pk_model
self.gal_pk_model = self.bgal**2 * data['pm'] + self.pshot_gal
lim_gal_pk_model = (fit_lim_gal.stan_variable('bgal').reshape(size, -1)*fit_lim_gal.stan_variable('clustering').reshape(size, -1))*data['pm'] + pshot_lim_gal
self.lim_gal_pk_bounds = np.quantile(lim_gal_pk_model, q=[0.16,0.84], axis=0)
del lim_gal_pk_model
self.lim_gal_pk_model = self.clustering * self.bgal * data['pm'] + self.pshot_lim_gal
def point_estimate_co_gal(self, stan_model):
## For Galaxies :
# put data in dict to pass to the CmdStanModel
# the keys have to match the variable names in the Stan file data block
data = {}
ind = np.where( (self.st.lim_pk['k'][:] >= self.kmin) * (self.st.lim_pk['k'][:] <= self.kmax))
if self.k_non is not None:
correction = self.st.gal_pk['k'][ind] - self.k_non
indk0 = np.where(correction < 0)
print(indk0)
correction[indk0] = 0
correction = correction**2
print(correction)
else:
correction = 0
sigma_lim_gal_pk = np.abs(self.st.sigma_lim_gal_pk[ind])*(1 + self.lam*correction)
sigma_gal_pk = np.abs(self.st.sigma_gal_pk[ind])*(1 + self.lam*correction)
data['N'] = self.st.lim_pk['k'][ind].shape[0]
data['n3D'] = self.st.n3D
data['pm'] = self.get_pm(k=self.st.lim_pk['k'][ind], z=self.st.z)
data["lim_pk"] = np.abs(self.st.lim_pk['power'][ind])
data['gal_pk'] = np.abs(self.st.gal_pk['power'][ind])
data["lim_gal_pk"] = np.abs(self.st.lim_gal_pk['power'][ind])
data['sigma_lim_pk'] = np.abs(self.st.sigma_lim_pk[ind])
data['sigma_gal_pk'] = sigma_gal_pk
data['sigma_lim_gal_pk'] = sigma_lim_gal_pk
# run HMC for 1000 iterations with 4 chains
point_estimate = stan_model.optimize(data=data)
return point_estimate
def fit_co_no_gal(self, stan_model):
## For Galaxies :
# put data in dict to pass to the CmdStanModel
# the keys have to match the variable names in the Stan file data block
data = {}
ind = np.where( (self.st.lim_pk['k'][:] >= self.kmin) * (self.st.lim_pk['k'][:] <= self.kmax))
if self.k_non is not None:
correction = self.st.gal_pk['k'][ind] - self.k_non
indk0 = np.where(correction < 0)
print(indk0)
correction[indk0] = 0
correction = correction**2
print(correction)
else:
correction = 0
sigma_lim_gal_pk = np.abs(self.st.sigma_lim_gal_pk[ind])*(1 + self.lam*correction)
sigma_gal_pk = np.abs(self.st.sigma_gal_pk[ind])*(1 + self.lam*correction)
data['N'] = self.st.lim_pk['k'][ind].shape[0]
data['pm'] = self.get_pm(k=self.st.lim_pk['k'][ind], z=self.st.z)
data["lim_pk"] = np.abs(self.st.lim_pk['power'][ind])
data["lim_gal_pk"] = np.abs(self.st.lim_gal_pk['power'][ind])
data['sigma_lim_pk'] = np.abs(self.st.sigma_lim_pk[ind])
data['sigma_lim_gal_pk'] = sigma_lim_gal_pk
# run HMC for 1000 iterations with 4 chains
fit_lim_gal = stan_model.sample(data=data, iter_sampling=self.iter_sampling, chains=self.chains)
self.samples = np.empty(shape=(fit_lim_gal.stan_variable('clustering').size,4))
self.samples[:,0] = fit_lim_gal.stan_variable('clustering')
self.samples[:,1] = fit_lim_gal.stan_variable('pshot_lim')
self.samples[:,2] = fit_lim_gal.stan_variable('bgal')
self.samples[:,3] = fit_lim_gal.stan_variable('pshot_lim_gal')
self.get_mode_std()
self.clustering = self.medians[0]
self.pshot_lim = self.medians[1]
self.bgal = self.medians[2]
self.pshot_lim_gal = self.medians[3]
self.lim_pk_model = self.clustering**2 * data['pm'] + self.pshot_lim
self.lim_gal_pk_model = self.bgal*self.clustering * data['pm'] + self.pshot_lim_gal
def fit_co_no_gal_bgal_fixed(self, stan_model, bgal):
## For Galaxies :
# put data in dict to pass to the CmdStanModel
# the keys have to match the variable names in the Stan file data block
data = {}
ind = np.where( (self.st.lim_pk['k'][:] >= self.kmin) * (self.st.lim_pk['k'][:] <= self.kmax))
if self.k_non is not None:
correction = self.st.gal_pk['k'][ind] - self.k_non
indk0 = np.where(correction < 0)
print(indk0)
correction[indk0] = 0
correction = correction**2
print(correction)
else:
correction = 0
sigma_lim_gal_pk = np.abs(self.st.sigma_lim_gal_pk[ind])*(1 + self.lam*correction)
sigma_gal_pk = np.abs(self.st.sigma_gal_pk[ind])*(1 + self.lam*correction)
data['N'] = self.st.lim_pk['k'][ind].shape[0]
data['pm'] = self.get_pm(k=self.st.lim_pk['k'][ind], z=self.st.z)
data["lim_pk"] = np.abs(self.st.lim_pk['power'][ind])
data["lim_gal_pk"] = np.abs(self.st.lim_gal_pk['power'][ind])
data['sigma_lim_pk'] = np.abs(self.st.sigma_lim_pk[ind])
data['sigma_lim_gal_pk'] = sigma_lim_gal_pk
# run HMC for 1000 iterations with 4 chains
fit_lim_gal = stan_model.sample(data=data, iter_sampling=self.iter_sampling, chains=self.chains)
self.samples = np.empty(shape=(fit_lim_gal.stan_variable('clustering').size,3))
self.samples[:,0] = fit_lim_gal.stan_variable('clustering')
self.samples[:,1] = fit_lim_gal.stan_variable('pshot_lim')
self.samples[:,2] = fit_lim_gal.stan_variable('pshot_lim_gal')
self.get_mode_std()
self.clustering = self.medians[0]
self.pshot_lim = self.medians[1]
self.bgal = bgal
self.pshot_lim_gal = self.medians[2]
self.lim_pk_model = self.clustering**2 * data['pm'] + self.pshot_lim
self.lim_gal_pk_model = self.bgal*self.clustering * data['pm'] + self.pshot_lim_gal
def fit_auto_gal(self, stan_model):
data = {}
ind = np.where( (self.st.gal_pk['k'][:] >= self.kmin) * (self.st.gal_pk['k'][:] <= self.kmax))
if self.k_non is not None:
correction = self.st.gal_pk['k'][ind] - self.k_non
indk0 = np.where(correction < 0)
print(indk0)
correction[indk0] = 0
correction = correction**2
print(correction)
else:
correction = 0
sigma_lim_gal_pk = np.abs(self.st.sigma_lim_gal_pk[ind])*(1 + self.lam*correction)
sigma_gal_pk = np.abs(self.st.sigma_gal_pk[ind])*(1 + self.lam*correction)
data['N'] = self.st.gal_pk['k'][ind].shape[0]
data['pm'] = self.get_pm(k=self.st.gal_pk['k'][ind], z=self.st.z)
data['gal_pk'] = np.abs(self.st.gal_pk['power'][ind])
data['sigma_gal_pk'] = sigma_gal_pk
# run HMC for 1000 iterations with 4 chains
fit_lim_gal = stan_model.sample(data=data, iter_sampling=self.iter_sampling, chains=self.chains)
self.samples = np.empty(shape=(fit_lim_gal.stan_variable('bgal').size,2))
self.samples[:,0] = fit_lim_gal.stan_variable('bgal')
self.samples[:,1] = fit_lim_gal.stan_variable('pshot_gal')
self.get_mode_std()
self.bgal = self.medians[0]
self.pshot_gal = self.medians[1]
self.gal_pk_model = self.bgal**2 * data['pm'] + self.pshot_gal
def fit_auto_lya(self, stan_model):
## For Lya :
data = {}
ind = np.where( (self.st.lim_pk['k'][:] >= self.kmin) * (self.st.lim_pk['k'][:] <= self.kmax))
data['N'] = self.st.lim_pk['k'][ind].shape[0]
data['pm'] = self.get_pm(k=self.st.lim_pk['k'][ind], z=self.st.z)
data['lya_pk'] = np.abs(self.st.lya_pk['power'][ind])
data['sigma_lya_pk'] = np.abs(self.st.sigma_lya_pk[ind])
# run HMC for 1000 iterations with 4 chains
fit = stan_model.sample(data=data, iter_sampling=self.iter_sampling, chains=self.chains)
self.samples = np.empty(shape=(fit.stan_variable('blya').size,1))
self.samples[:,0] = fit.stan_variable('blya')
self.get_mode_std()
self.blya = self.medians[0]
self.lya_pk_model = self.blya**2 * data['pm']
def fit_lya_gal(self, st, stan_model):
## For All :
data = {}
data['N'] = self.st.lim_pk['k'][:].shape[0]
data['n3D'] = self.st.n3D
data['pm'] = self.get_pm(self.st.lim_pk['k'][:], self.st.z)
data["lim_pk"] = np.abs(self.st.lim_pk['power'][:])
data["lya_pk"] = np.abs(self.st.lya_pk['power'][:])
data["gal_pk"] = np.abs(self.st.gal_pk['power'][:])
data["lim_lya_pk"] = np.abs(self.st.lim_lya_pk['power'][:])
data["lim_gal_pk"] = np.abs(self.st.lim_gal_pk['power'][:])
data['sigma_lya_pk'] = np.abs(self.st.sigma_lya_pk[:])
data['sigma_gal_pk'] = np.abs(self.st.sigma_gal_pk[:])
data['sigma_lim_pk'] = np.abs(self.st.sigma_lim_pk[:])
data['sigma_lim_lya_pk'] = np.abs(self.st.sigma_lim_lya_pk[:])
data['sigma_lim_gal_pk'] = np.abs(self.st.sigma_lim_gal_pk[:])
# run HMC for 1000 iterations with 4 chains
fit = stan_model.sample(data=data, iter_sampling=self.iter_sampling, chains=self.chains)
self.samples = np.empty(shape=(fit.stan_variable('clustering').size,5))
self.samples[:,0] = fit.stan_variable('clustering')
self.samples[:,1] = fit.stan_variable('pshot_lim')
self.samples[:,2] = fit.stan_variable('blya')
self.samples[:,3] = fit.stan_variable('bgal')
self.samples[:,4] = fit.stan_variable('pshot_lim_gal')
def get_mode_std(self):
"""get mode and std of each parameter in self.samples
Paramters:
self.samples: an (m,n) dimensional array. m paramters and n
smaples.
Returns :
modees, std around modes, medians and std around medians
"""
if self.modes is None:
modes, std_modes = [], []
medians, std_medians= [], []
for i in range(self.samples.shape[1]):
v, c = np.unique(self.samples[:,i], return_counts=True)
ind = np.argmax(c)
mod = v[ind]
med = np.median(self.samples[:,i])
modes.append(mod)
medians.append(med)
std_medians.append(np.sqrt(np.mean((self.samples[:,i]-med)**2)))
std_modes.append(np.sqrt(np.mean((self.samples[:,i]-mod)**2)))
self.modes = np.around(modes, 10)
self.std_modes = np.around(std_modes, 10)
self.medians = np.around(medians,10)
self.std_medians = np.around(std_medians, 10)
def print_summary_stats(self, param_labels, survey_label):
"""Print the mode+- std for all paramters and all self.samples"""
from IPython.display import display, Math
print(survey_labels[n])
self.get_mode_std()
for i in range(len(param_labels)):
frac = np.around(self.std_modes[i]/self.modes[i],5)
frac_r = np.around(self.modes[i]/self.std_modes[i], 5)
display(Math(param_labels[i]+' = '+str(self.modes[i])+' \pm '+str(self.std_modes[i])+', \ frac ='+str(frac)+',\ 1/frac = '+str(frac_r)))
def get_true_paramters(self, lim_mock):
"""Using the generative CO mode, calculate the true parameters
num: The index to the paramter set in the MCMC collection of the CO model
Returns: (Tb, P_shot)
Tb: <T_CO>*b_CO
P_shot: CO auto shot noise power
"""
co_temp = lim_mock.co_temp.compute()
mean_Tco = np.mean(co_temp)
def get_b_lya(self,z):
"""Get b_Lya from the Lya maps
"""
b_lya = - 0.20 # I should be able to use our mock map instead ?
return b_lya
def _get_bco(self):
"""Calculate the CO bias for a catalog from the CO model :
b_CO = \int L_{CO}(M) b(M) dn/dM dM / \int L_{CO}(M) dn/dM dM
Wehre b(M) is the mass dependant halo bias and dn/dM the halo
mass function.
Returns: b_CO
"""
pass
def _get_Pshot_co(self, lim_mock):
"""Get the CO shot noise power :
(e.g. eq 2 from Keenan+21 arxive:2110.02239)
P_{shot, CO} = C \int L^{2}_{CO} dn/dL dL
Where the C is the conversion factor from CO luminocity to
brightness temperature, same as in mock_lim.py
--------------------------------
Paramters :
lim_mock : an instance of lim_lytomo.mock_lim.Mock_Lim
Returns: p_{shot, co} in appropriate units
"""
lco = lim_mock.get_co_lum_halos()
assert len(lim_mock.boxsize )==3
vt = np.prod(lim_mock.boxsize)
return np.sum(lim_mock.co_lum_to_temp(lco=lco, vol=vt)**2)*vt
def rsd_term(self):
"""An analytic term in the power spectum formalism to take the
redshift space distrotion(RSD) into account"""
return 0 | 30,701 | 46.088957 | 212 | py |
lila | lila-main/src/lila/mock_lya.py | import os
import h5py
import numpy as np
from nbodykit.lab import ArrayMesh
from astropy.cosmology import Planck15
from lytomo_watershed import spectra_mocking as sm
from lytomo_watershed import z_bin
from scipy.ndimage import gaussian_filter as gf
from nbodykit import setup_logging
import logging
import logging.config
class MockLya():
"""Work with the Lya tomography maps"""
def __init__(self, noiseless_file, boxsize=None, brange=None, num_spectra=None,
dperp=2.5, sn=2, spec_file=None,silent_mode=True, transpose=(0,1,2), flux_mode=False,
compensated=True, source_pk_file=None, HCD_mask={'thresh':None, 'type':None, 'vel_width':1235}):
"""
noiseless_file : Path to the noiseless Lya tomography map.
boxsdize : Boxsize in cMpc/h
num_spectra : Number of spectra observed in the Lya tomography
sn : float, average signal-to-noise of the spectra per Angstrom
Default = 2, typical required for Lya tomography surveys, McQuinn+White+11 (arxiv:1102.1752)
source_pk_file: str, default None. Path to the file storing the galaxy/quasar source power spectrum.
It is an h5py file of 2D power spectrum (k,mu). We only need the P(k, mu=0) to
account for the source clustering in the Lya noise power. Refer to discussion in the last
paragraph of arxiv:1102.1752
HCD_mask_type: str or None, 'EW' or 'NHI'. The type of masking we want to apply to high column dnesity
absorbors of the Lya forest.
HCD_mask_file: str, Address to the mask file, only if HCD_mask_type = 'EW'. It will avoid computing the mask
as it is expensive to calculate.
HCD_mask['thresh']: float, if HCD_mask_type='NHI': pass the high column dnesity objects where N_HI > HCD_mask['thresh'] will be
replaced with random non HCDs.
if HCD_mask_type='EW': masking all the regions with equivalent width > HCD_mask['thresh'] (in units
of Ansgtrom) with (1/<F>) -1
"""
self.silent_mode = silent_mode
if not self.silent_mode:
setup_logging()
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
# create logger
self.logger = logging.getLogger('MockLya')
self.logger.info('Starting')
self.logger.info('dperp = %s', dperp)
self.noiseless_file = noiseless_file
self.brange = brange
if boxsize is None:
self.boxsize = (self.brange[1]-self.brange[0],
self.brange[3]-self.brange[2],
self.brange[5]-self.brange[4],)
else:
if isinstance(boxsize, int):
self.boxsize = [boxsize, boxsize, boxsize]
else:
self.boxsize = boxsize
self.num_spectra = num_spectra
self.spec_file = spec_file
with h5py.File(self.spec_file,'r') as f:
self.cosmo = Planck15.clone(name='MockLya Cosmo',
H0 = 100*f['Header'].attrs['hubble'],
Om0 = f['Header'].attrs['omegam'],
Ob0 = f['Header'].attrs['omegab'])
self.compensated =compensated
self.dperp = dperp
self.flux_mode = flux_mode
self.HCD_mask = HCD_mask
self.sn = sn
self.mock_map = None
self.noiseless_map = None
self.transpose = transpose
with h5py.File(self.noiseless_file,'r') as f:
self.z = f['redshift'][()]
self.noiseless_map = self.get_map_noiseless()
self.Nmesh = [self.noiseless_map.shape[0],
self.noiseless_map.shape[1],
self.noiseless_map.shape[2]]
if source_pk_file is not None:
if not self.silent_mode:
self.logger.info('Loading the source angular power spectrum')
with h5py.File(self.source_pk_file,'r') as f:
ang_pk_source = {}
self.source_pk['power'] = f['pkmu'][:,0]
self.source_pk['k_perp'] = f['k'][:,0]
else:
self.source_pk = None
def get_map_noiseless(self):
"""Openning and trimming the noiseless map"""
if self.noiseless_map is None:
lya_map = np.transpose(h5py.File(self.noiseless_file,'r')['map'][:], axes=self.transpose)
if self.HCD_mask['type'] == 'NHI':
lya_map = self._mask_HCDs_NHI_based(lya_map)
self.noiseless_map = self._get_map(lya_map)
return self.noiseless_map
def _get_map(self, lya_map):
"""Trim the lya maps"""
if self.flux_mode:
# Convert delta_F to F, so at the end, flux will be the output map
lya_map = self._dF_to_F(lya_map)
if self.brange is not None:
# Take a subset of the full box
ind = np.meshgrid(np.arange(self.brange[0], self.brange[1]+1),
np.arange(self.brange[2], self.brange[3]+1),
np.arange(self.brange[4], self.brange[5]+1),
indexing='ij')
ind = (ind[0], ind[1], ind[2])
ly_map = lya_map[ind]
return lya_map
def _dF_to_F(self, lya_map):
"""Convert delta_F to Flux
Paramters:
lya_map: delta_F map
Returns:
The flux map
"""
lya_map +=1
lya_map *=self.get_mean_flux(z=self.z)
if not self.silent_mode:
self.logger.info('dF -> F')
return lya_map
def _mask_HCDs_NHI_based(self):
"""A function to replace sightlines containing an HCD (high-column density)
with random chunck of spectra which is not an HCD. HCDs are defines as N_HI > self.HCD_mask['thresh'].
The column density (NHI) is the integrated value over a width of vel_width
along the line-of-sight.
Parameters:
lya_map: delta_F map
vel_width: float, velocity width in km/s.
Returns:
lya_map: A deltaF map without any HCDs
"""
self.logger.info('Masking HCDs with log(NHI) >= '+str(np.log10(self.HCD_mask['thresh'])))
# The number of adjacent pixels should be added to get NHI :
with h5py.File(self.spec_file,'r') as f:
tau_map = f['tau/H/1/1215'][:]
L = np.shape(tau_map)[1]
vmax = self.cosmo.H(self.z).value*(1/(1+self.z))*self.boxsize[2]/self.cosmo.h
addpix = np.around( self.HCD_mask['vel_width'] / ( vmax / L ) )
t = np.arange(0,L+1,addpix).astype(int)
NHI_map = h5py.File(self.spec_file,'r')['colden/H/1'][:]
NHI_map_summed = np.zeros(shape=(NHI_map.shape[0], t.size-1))
for i in range(t.size-1):
NHI_map_summed[:,i] = np.sum(NHI_map[:,t[i]:t[i+1]], axis=1)
del NHI_map
self.logger.info('NHI is summed over '+str(addpix)+' pixels')
ind_HCD = np.where(NHI_map_summed > self.HCD_mask['thresh'])
ind_HCD = np.array(ind_HCD).astype(int)
mask = np.zeros(shape=(NHI_map.shape[0],), dtype=np.int)
mask[ind_HCD[0]] = 1
ind_HCD = np.where(mask)
self.logger.info('HCD fraction = '+str(ind_HCD[0].size/(lya_map.shape[0]*lya_map.shape[1])))
while ind_HCD[0].size !=0:
ind_rep = np.random.randint(0, NHI_map.shape[0], (ind_HCD[0].size,) )
tau_map[ind_HCD[0],:] = tau_map[ind_rep, :]
NHI_map_summed[ind_HCD[0], :] = NHI_map_summed[ind_rep, :]
ind_HCD = np.where(NHI_map_summed > self.HCD_mask['thresh'])
ind_HCD = np.array(ind_HCD).astype(int)
mask = np.zeros(shape=(NHI_map.shape[0],), dtype=np.int)
mask[ind_HCD[0]] = 1
ind_HCD = np.where(mask)
return tau_map
def get_CNR(self, num_spectra, z, QSO=[], DCV13_model=True, seed=14):
""" Calculate Continuum to noise ratio (signal to noise ratio) modeled in LATIS
QSO contains the sightline number of quasars.
"""
CNR = np.zeros(num_spectra)
np.random.seed(seed)
for ii in range(num_spectra):
if ii in QSO:
if DCV13_model:
CNR[ii] = np.exp(np.random.normal(2.3, 1.2))
else:
mean = 0.84 + 0.99 * (z - 2.5)- 1.82*(z - 2.5)**2
CNR[ii] = np.exp(np.random.normal(mean, .43))
return CNR
def get_CE(self, CNR) :
""" Calculate Continuum noise for each spectra modeled in LATIS"""
CE = 0.24*CNR**(-0.86)
CE[np.where(CE < 0.05)] = 0.05
return CE
def get_mean_flux(self, z, metal=False) :
""" get the mean flux used in LATIS Faucher-Giguere 2008"""
if metal :
# The below is not good for HI absorption as includes the effect of metals
return np.exp(-0.001845*(1+self.z)**3.924)
else :
# The below is good for only HI absorptions, does not include metal absorption
return np.exp(-0.001330*(1+self.z)**4.094)
def get_lya_sigma_n(self):
"""Get the amplitdue of the Lya noise. Actual Lya tomography noise covariance
however has different diagonal terms. For now, we choose the median of those terms.
Returns:
The median noise amplitude in Lya tomo map
"""
CNR = self.get_CNR(num_spectra= self.num_spectra, z=self.z)
sigma_lya = np.sqrt(self.get_CE(CNR)**2+(1/CNR)**2)/self.get_mean_flux(self.z)
sigma_lya[sigma_lya < 0.2] = 0.2
return np.median(sigma_lya)
def load_noiseless(self):
df_map = h5py.File(self.noiseless_file,'r')['map'][:]
return df_map
def estimate_noise(self):
""" Estimate the noise in the Lya delta_F map as the rms of the deviation
of the mock maps from the noiseless map
"""
noiseless_map = self.get_map_noiseless()
mock_map = self.get_map_mock()
diff = mock_map - noiseless_map
return diff
def _get_redshift_width(self):
"""Calculates the width of the simulaion box in redshift space"""
med_comoving = self.cosmo.comoving_distance(self.z).value - self.cosmo.comoving_distance(2.2).value
z = z_bin.cmpch_to_redshift(d=[med_comoving-self.boxsize[2]/2, med_comoving+self.boxsize[2]/2])
self.z_width = z[1]- z[0]
def get_galaxy_counts(self, pixfile='/run/media/mahdi/HD2/Lya/spectra/maps/mapsv13/dcv13pix.dat',
mapfile='/run/media/mahdi/HD2/Lya/spectra/maps/mapsv13/dcv13map.dat',
idsfile='/run/media/mahdi/HD2/Lya/spectra/maps/mapsv13/dcv13ids.dat',
n_av = 7e-4):
"""
Estimnate the galaxy counts in this Lya tomography surveys
It is estimated by counting the avergae number of sightlines droping from z-deltaz/2
to z+deltaz/2 where z is the redshift of the snapshot and deltaz is the redshift width
of the simulation box.
Cautions :
1. This ignores the buffer between start of the Lya forest and the galaxy's position
2. Now, it only works for LATIS, I should re-design it to get any n(z)
Returns:
The number of galaxies that Lya forest secures their reshift in the
simulated box
"""
if n_av is None:
self._get_redshift_width()
init = sm.get_mock_sightline_number(z_range=[self.z - self.z_width/2, self.z],
pixfile=pixfile, mapfile=mapfile, idsfile=idsfile)
final = sm.get_mock_sightline_number(z_range=[self.z , self.z + self.z_width/2],
pixfile=pixfile, mapfile=mapfile, idsfile=idsfile)
self.galaxy_counts = int(init - final)
self.galaxy_counts = np.product(self.boxsize)*n_av
| 12,246 | 43.534545 | 135 | py |
lila | lila-main/src/lila/stats.py | import numpy as np
import h5py
import astropy.constants as const
from astropy.cosmology import Planck15 as cosmo
from nbodykit.lab import FFTPower
from nbodykit.lab import ArrayMesh
from nbodykit import utils
from lytomo_watershed import spectra_mocking as sm
import lim_lytomo
from . import git_handler
from nbodykit import setup_logging
import logging
import logging.config
class Stats():
"""A class to calculate the auto/cross power spectrum"""
def __init__(self, z=None, mock_lim=None, mock_lya=None, mock_galaxy=None, kmin=None, kmax=1, dk=0.03, Nmu=30,
k_par_min = None, los=[0,0,1], vol_ratio=1.0, Pco=None, save_3d_power=False):
"""
Parameters
----------
mock_lya, mock_lim : MockLya, MockLim instances, optional
Instanses mocking the corresponding surveys
kmin, kmax, dk : float, optional
The limits and the steps for the power spectrum calculations
los : (array_like , optional)
the direction to use as the line-of-sight; must be a unit vector
vol_ratio : float, Optional
a correction factor to the # of available modes in uncertainty calculations. It is
a quick correction for volume effect in power spectrum uncertainity calculation
if the simulated volume is few factors larger than the survey. The value is the
ratio (Vol_survey/Vol_simulation)
Pco : numpy array of floats, default= None
The CO power spctrum. If None, it does calculate it.
"""
setup_logging()
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
# create logger
self.logger = logging.getLogger('Stats')
self.logger.info('Starting')
self.mock_lim = mock_lim
self.mock_lya = mock_lya
self.mock_galaxy = mock_galaxy
self.kmin = kmin
if self.kmin is None:
if self.mock_lim is not None:
self.kmin = 1 / self.mock_lim.boxsize[0]
elif self.mock_lya is not None:
self.kmin = 1 / self.mock_lya.boxsize[0]
elif self.mock_galaxy is not None:
self.kmin = 1 / self.mock_galaxy.boxsize[0]
self.kmax = kmax
self.dk = dk
self.Nmu = Nmu
self.k_par_min = k_par_min
if self.k_par_min is not None:
self.sigma_foreground = 1/self.k_par_min
self.los = los
self.vol_ratio = vol_ratio
# Quantities to calculate
self.lim_pk = Pco
self.lim_pkmu = None
self.lim_pk3d= None
self.lya_pk = None
self.lya_pkmu = None
self.lya_pk3d= None
self.gal_pk = None
self.gal_pkmu = None
self.gal_pk3d= None
self.lim_lya_pk = None
self.lim_lya_pkmu = None
self.lim_lya_pk3d = None
self.lim_gal_pk = None
self.lim_gal_pkmu = None
self.lim_gal_pk3d = None
self.lim_noise_pk = None
self.lya_noise_pk = None
self.lim_noise_pk_av = None
self.lya_noise_pk_av = None
self.gal_noise_pk_av = None
self.gal_noise_pk = None
self.sigma_lim_pk = None
self.sigma_lya_pk = None
self.sigma_gal_pk = None
self.sigma_lim_lya_pk = None
self.sigma_lim_gal_pk = None
self.lim_sn = None
self.lya_sn = None
self.gal_sn = None
self.lim_lya_sn = None
self.lim_gal_sn = None
self.z = z
if self.z is None:
if self.mock_lim is not None:
self.z = self.mock_lim.z
elif self.mock_lya is not None:
self.z = self.mock_lya.z
elif self.mock_galaxy is not None:
self.z = self.mock_galaxy.z
self.save_3d_power = save_3d_power
# MPI
"""
self.MPI = CurrentMPIComm
self.comm = self.MPI.get()
self.rank = self.comm.Get_rank()
self.size = self.comm.Get_size()
"""
def get_lim_pk(self, mode='1d'):
"""Calculate the spehrically averaged CO temperature power spectrum"""
if self.mock_lim.lim_map is None:
self.mock_lim.get_lim_map()
if not self.mock_lim.silent_mode:
self.logger.info('Calculating the LIM auto power, mode: '+mode)
return FFTPower(self.mock_lim.lim_map, BoxSize=self.mock_lim.boxsize,
mode=mode, kmin=self.kmin, kmax=self.kmax,dk=self.dk,
los=self.los, Nmu=self.Nmu, save_3d_power=self.save_3d_power)
def get_lya_pk(self, savefile=None, mode='1d'):
"""Calculate the spehrically averaged deltaF power spectrum"""
lya_map = self.mock_lya.get_map_noiseless()
lya_mesh= ArrayMesh(lya_map, BoxSize= self.mock_lya.boxsize)
if not self.mock_lya.silent_mode:
self.logger.info('Calculating the Lya auto power, mode: '+mode)
return FFTPower(lya_mesh, mode=mode, BoxSize=self.mock_lya.boxsize,
kmin=self.kmin, kmax=self.kmax, dk=self.dk,
Nmu=self.Nmu, los=self.los, save_3d_power=self.save_3d_power)
def get_gal_pk(self, mode='1d'):
"""Calculate the spehrically averaged galaxy power spectrum"""
if not self.mock_galaxy.silent_mode:
self.logger.info('Calculating the galaxy power, mode: '+mode)
return FFTPower(self.mock_galaxy.map, BoxSize=self.mock_galaxy.boxsize,
mode=mode, kmin=self.kmin, kmax=self.kmax, dk=self.dk,
Nmu=self.Nmu, los=self.los, save_3d_power=self.save_3d_power)
def get_lim_lya_pk(self, mode='1d'):
"""Calculate the spehrically averaged cross power spectrum between LIM LIM and Lya Tomography"""
lya_map = self.mock_lya.get_map_noiseless()
lya_mesh= ArrayMesh(lya_map, BoxSize= self.mock_lya.boxsize)
if self.mock_lim.lim_map is None:
self.mock_lim.get_lim_map()
if not self.mock_lim.silent_mode:
self.logger.info('Calculating the LIMXLya power, mode: '+mode)
return FFTPower(first=self.mock_lim.lim_map, second=lya_mesh,
BoxSize=self.mock_lim.boxsize, mode=mode,
kmin=self.kmin, kmax=self.kmax, dk=self.dk,
Nmu = self.Nmu, los=self.los, save_3d_power=self.save_3d_power)
def get_lim_gal_pk(self, mode='1d'):
"""Calculate the spehrically averaged cross power spectrum between CO LIM and Galaxy Overdensities"""
if self.mock_lim.lim_map is None:
self.mock_lim.compute_lim_map()
if not self.mock_lim.silent_mode:
self.logger.info('Calculating the COXgal power, mode: '+mode)
return FFTPower(first=self.mock_lim.lim_map, second=self.mock_galaxy.map,
BoxSize = self.mock_lim.boxsize, mode=mode,
kmin=self.kmin, kmax=self.kmax, dk=self.dk,
Nmu = self.Nmu, los=self.los, save_3d_power=self.save_3d_power)
def get_uncertainty_lim_pk(self):
"""Get the uncertainty in the CO power spectrum which is:
simgma_{P_CO} = (P_{CO}(k) + P_{n, LIM} ) ) / sqrt(N_{modes}
- Some bins have 0 mode, so nan is returned
"""
if self.lim_pk is None:
fftpow = self.get_lim_pk(mode='1d')
self.lim_pk = fftpow.power
self.lim_pk3d = fftpow.pk3d
if self.lim_pkmu is None:
fftpow = self.get_lim_pk(mode='2d')
self.lim_pkmu = fftpow.power
self.lim_pk3d = fftpow.pk3d
if self.lim_noise_pk is None:
self.mock_lim.get_lim_noise_pk()
w2kmu = self._attenuation(k=self.lim_pkmu['k'], mu=self.lim_pkmu['mu'], sig_par=self.mock_lim.res_par,
sig_perp = self.mock_lim.res_perp/2, foreground_explode=True)
self.sigma_lim_pkmu = (self.lim_pkmu['power'] + self.mock_lim.noise_pk/w2kmu)/(np.sqrt(self.vol_ratio*self.lim_pkmu['modes']))
self.sigma_lim_pk = self.inverse_variance_weighting(self.lim_pk['k'], self.sigma_lim_pkmu)
self.lim_noise_pk_av = self.inverse_variance_weighting(self.lim_pk['k'], self.mock_lim.noise_pk/w2kmu)
self.logger.info('lim_noise_pk_av: '+str(self.lim_noise_pk_av))
def get_lya_noise_pk(self, k_par, k_perp=None):
"""Get Lya tomography noise power spectrum which is estimated by
comparing to the noiseless map.
Parameters:
------------------
Returns:
The Lya noise power spectrum in units of (cMpc/h)^3
"""
self.lya_noise_pk = {}
if not self.mock_lya.silent_mode:
self.logger.info('getting 1D Pk for Lya')
k ,lya_pk_los = self.get_1D_lya_pk()
lya_pk_los = np.interp(k_par, k, lya_pk_los)
n2D_eff = self.get_n2D_eff(lya_pk_los)
noise = lya_pk_los / n2D_eff
if self.mock_lya.source_pk is not None:
source_pk = np.interp(k_perp, self.mock_lya.source_pk['k_perp'], self.mock_lya.source_pk['power'])
noise *= (1 + source_pk/(self.mock_lya.dp**2))
return noise
def get_n2D_eff(self, lya_pk_los, lam0=1216):
"""Get the effecvtive 2D shightline density of spectra. eq 13. in McQuinn+White+11 (arxiv:1102.1752)
Prameters:
------------------------------
lya_pk_los : The line-of-sight power spectrum
lam0 : The Lya resframe wavelength
"""
self.logger.info('H: '+str(self.mock_lya.cosmo.H(self.z)))
self.logger.info('h: '+str(self.mock_lya.cosmo.h))
self.logger.info('sn: '+str(self.mock_lya.sn))
P_N = (sm.get_mean_flux(self.z)/self.mock_lya.sn)**2 * lam0 * (self.mock_lya.cosmo.H(self.z)*self.mock_lya.cosmo.h/const.c.to('km/s')).value
P_N = 0
q = lya_pk_los / (lya_pk_los + P_N)
return (1/(self.mock_lya.dperp**2))*q
def get_uncertainty_lya_pk(self, savefile=None):
"""Get the uncertainty in the CO power spectrum which is:
simgma_{P_Lya} = (P_{Lya}(k) + P_{n, Lya} ) / sqrt(N_{modes})
"""
if self.lya_pk is None:
fftpow = self.get_lya_pk(mode='1d')
self.lya_pk = fftpow.power
self.lya_pk3d = fftpow.pk3d
if self.lya_pkmu is None:
fftpow = self.get_lya_pk(mode='2d')
self.lya_pkmu = fftpow.power
self.lya_pk3d = fftpow.pk3d
if self.lya_noise_pk is None:
k_par = np.ravel(self.lya_pkmu['k']*self.lya_pkmu['mu'])
self.lya_noise_pk = self.get_lya_noise_pk(k_par).reshape(self.lya_pkmu['power'][:].shape)
self.sigma_lya_pkmu = (self.lya_pkmu['power'] + self.lya_noise_pk)/(np.sqrt(self.vol_ratio*self.lya_pkmu['modes']))
if not self.mock_lya.silent_mode:
self.logger.info('inverse var weighting, auto Lya')
self.sigma_lya_pk = self.inverse_variance_weighting(self.lya_pk['k'], self.sigma_lya_pkmu)
# save P_noise,Lya just in case:
self.lya_noise_pk_av = self.inverse_variance_weighting(self.lya_pk['k'], self.lya_noise_pk)
def get_gal_noise_pk(self):
"""Get galaxy noise power spectrum which is a poisson noise from the discrite
galaxy distribution.
Returns:
The Galaxy noise power spectrum in units of (cMpc/h)^3
"""
# Save the 3D number density of the galaxies since it will be
# used as the shot noise in the power spectrum modelling :
self.n3D = self.mock_galaxy.galaxy_count/np.product(self.mock_galaxy.boxsize)
self.gal_noise_pk = 1/self.n3D
def get_uncertainty_gal_pk(self):
"""Get the uncertainty in the galaxy power spectrum which is:
simgma_{P_gal} = (P_{gal}(k) + P_{n, gal} ) / sqrt(N_{modes})
"""
if self.gal_pk is None:
fftpow = self.get_gal_pk(mode='1d')
self.gal_pk = fftpow.power
self.gal_pk3d = fftpow.pk3d
if self.gal_pkmu is None:
fftpow = self.get_gal_pk(mode='2d')
self.gal_pkmu = fftpow.power
self.gal_pk3d = fftpow.pk3d
if self.gal_noise_pk is None:
self.get_gal_noise_pk()
w2kmu = self._attenuation(k=self.gal_pkmu['k'], mu=self.gal_pkmu['mu'], sig_par=self.mock_galaxy.res_par,
sig_perp = self.mock_galaxy.res_perp)
self.sigma_gal_pkmu = (self.gal_pkmu['power'] + self.gal_noise_pk/w2kmu)/(np.sqrt(self.vol_ratio*self.gal_pkmu['modes']))
if not self.mock_galaxy.silent_mode:
self.logger.info('inverse var weighting, auto gal')
self.sigma_gal_pk = self.inverse_variance_weighting(self.gal_pk['k'], self.sigma_gal_pkmu)
# save P_noise,Gal just in case:
self.gal_noise_pk_av = self.inverse_variance_weighting(self.gal_pk['k'], self.gal_noise_pk/w2kmu)
def get_uncertainty_lim_lya(self, sigma_lya_pk_file=None):
"""Get the uncertainty in the CO LIM * Lya Tomo cross power spectrum. Eq 12 in Chung+18.
"""
if self.lim_lya_pk is None:
fftpow = self.get_lim_lya_pk(mode='1d')
self.lim_lya_pk = fftpow.power
self.lim_lya_pk3d = fftpow.pk3d
if self.lim_lya_pkmu is None:
fftpow = self.get_lim_lya_pk(mode='2d')
self.lim_lya_pkmu = fftpow.power
self.lim_lya_pk3d = fftpow.pk3d
if self.sigma_lim_pk is None:
self.get_uncertainty_lim_pk()
if self.sigma_lya_pk is None:
self.get_uncertainty_lya_pk()
self.sigma_lim_lya_pkmu = np.abs(np.sqrt( (self.sigma_lim_pkmu*self.sigma_lya_pkmu)/2+
(self.lim_lya_pkmu['power']**2)/(2*self.vol_ratio*self.lim_lya_pkmu['modes'])))
if not self.mock_lim.silent_mode:
self.logger.info('inverse var weighting, COXLya')
self.sigma_lim_lya_pk = self.inverse_variance_weighting(self.lim_lya_pk['k'], self.sigma_lim_lya_pkmu)
def get_uncertainty_lim_gal(self):
"""Get the uncertainty in the CO LIM x galaxy survey power spectrum. Eq 12 in Chung+18.
"""
if self.lim_gal_pk is None:
fftpow = self.get_lim_gal_pk(mode='1d')
self.lim_gal_pk = fftpow.power
self.lim_gal_pk3d = fftpow.pk3d
if self.lim_gal_pkmu is None:
fftpow = self.get_lim_gal_pk(mode='2d')
self.lim_gal_pkmu = fftpow.power
self.lim_gal_pk3d = fftpow.pk3d
if self.sigma_lim_pk is None:
self.get_uncertainty_lim_pk()
if self.sigma_gal_pk is None:
self.get_uncertainty_gal_pk()
self.sigma_lim_gal_pkmu = np.abs(np.sqrt( (self.sigma_lim_pkmu*self.sigma_gal_pkmu)/2+
(self.lim_gal_pkmu['power']**2)/(2*self.vol_ratio*self.lim_gal_pkmu['modes'])))
if not self.mock_lim.silent_mode:
self.logger.info('inverse var weighting, COXgal')
self.sigma_lim_gal_pk = self.inverse_variance_weighting(self.lim_gal_pk['k'], self.sigma_lim_gal_pkmu)
def get_lim_sn(self):
"""Get the signal to noise of the CO LIM auto power in each k
"""
if self.sigma_lim_pk is None:
self.get_uncertainty_lim_pk()
self.lim_sn = np.abs(self.lim_pk['power']/self.sigma_lim_pk)
def get_lya_sn(self):
"""Get the signal to noise of the Lya tomography auto power in each k
"""
if self.sigma_lya_pk is None:
self.get_uncertainty_lya_pk()
self.lya_sn = np.abs(self.lya_pk['power']/self.sigma_lya_pk)
def get_gal_sn(self):
"""Get the signal to noise of the galaxy survey auto power in each k
"""
if self.sigma_gal_pk is None:
self.get_uncertainty_gal_pk()
self.gal_sn = np.abs(self.gal_pk['power']/self.sigma_gal_pk)
def get_lim_lya_sn(self, sigma_lya_pk_file=None):
"""Get the signal to noise of the cross power in each k
"""
if self.sigma_lim_lya_pk is None:
self.get_uncertainty_lim_lya(sigma_lya_pk_file=sigma_lya_pk_file)
self.lim_lya_sn = np.abs(self.lim_lya_pk['power']/self.sigma_lim_lya_pk)
def get_lim_gal_sn(self):
"""Get the signal to noise of the CO LIM x Galaxy survey cross power in each k
"""
if self.sigma_lim_gal_pk is None:
self.get_uncertainty_lim_gal()
self.lim_gal_sn = np.abs(self.lim_gal_pk['power']/self.sigma_lim_gal_pk)
def _attenuation(self,k, mu, sig_par, sig_perp, foreground_explode = False):
"""Signal attenuation due to finite resolution.
The spherically averaged auto/cross power spectra should be correctd with
this window function :
W(k)^2 = <exp(-k_{\perp}^2 sigma_{\perp}^2)>
W(k)^2 = <exp(-k_{\||}^2 sigma_{\||}^2)>
The averaging is done within the shells used to calculaete P(k). Basically, implemnting
Appendix C.3 in Lee+16 and eq 19 and 20 in Chung+18.
Parameters: K : ndarray
an array of ks to get the attenuation at.
sig_par:
resolution along line-of-sight in cMpc/h (units should match k's unit)
sig_perp :
resolution along perp direction
Returns: ndarray
W(k)^2 : multiply the output by the auto/cross power spectrum
"""
foreground = np.ones_like(k)
if foreground_explode and (self.k_par_min is not None):
ind = np.where(mu*k <= self.k_par_min)
if ind[0].size != 0 :
foreground[ind] *= 1e-9
#foreground[ind] *= np.exp(-((mu[ind]*k[ind] - self.k_par_min)*self.sigma_foreground)**2 )
#foreground[ind] *= np.exp(-(mu[ind]*k[ind] / self.k_par_min)**2 )
self.logger.info('Foreground effects, median correction=%s', np.median(foreground[ind]))
return np.exp(-(k*sig_perp)**2) * np.exp(- (mu*k)**2 * (sig_par**2 - sig_perp**2)) * foreground
def _get_kcut(self):
if not self.mock_lim.silent_mode:
self.logger.info('Cut foreground, i.e k < %s', self.k_par_min)
def _foreground_exp(k_par):
return np.zeros_like(k_par)
#return np.exp(-((k_par - self.k_par_min)*self.sigma_foreground)**2 )
kcut={'k_par_min':self.k_par_min}
kcut['func'] = _foreground_exp
return kcut
def _do_save_power(self, p, savefile):
"""Save the power spectrum on file
Parameters:
p : A dictionary with keys: 'power' and 'k'
"""
lim_pk = utils.GatherArray(data=np.abs(p['power']), comm=self.comm, root=0)
k = utils.GatherArray(data=p['k'], comm=self.comm, root=0)
if self.rank == 0:
with h5py.File(savefile, 'w') as fw:
fw['power'] = np.abs(p['power'][:])
fw['k'] = p['k'][:]
def get_1D_lya_pk(self):
"""Get 1D flux power spectrum, to be used for a simple lya noise mdoel McQuinn+14 eq 12,13 arxiv:1102.1752 """
from fake_spectra import fluxstatistics as fs
from lytomo_watershed import spectra_mocking as sm
with h5py.File(self.mock_lya.spec_file,'r') as f:
tau = f['tau/H/1/1215'][:]
mf = sm.get_mean_flux(z=f['Header'].attrs['redshift'])
pk1d = fs.flux_power(tau, vmax = self.mock_lya.boxsize[2], mean_flux_desired=mf, window=False)
return pk1d
def inverse_variance_weighting(self, k, sigma_pkmu):
"""calculate the sigma_pk with the inverse variance weighting in mu bins
eq 6 in Furlanetto, Lidz 2014 arxiv:0611274
Paramters:
--------------------
k : k-bins
sigma_pkmu : numpy array, the uncertainty in each (k, mu) bins
Returns :
The weighted noise in '1D' k-space.
"""
sigma_pk = 1 / np.sqrt(np.nansum(1/np.abs(sigma_pkmu)**2, axis=1))
return sigma_pk
def save_stat(self, savefile):
"""Save all computed statistics on an hdf5 file
Parameters:
-------------------
savefile : an hdf5 file path
"""
from . import comap, exclaim
self.logger.info('saving at %s', savefile)
with h5py.File(savefile, 'w') as fw:
if self.lim_pk is not None:
try:
for k, v in self.mock_lim.survey_params.items():
fw[f'lim_survey_params/{k}'] = v
except:
pass
fw['lim_pk/power'] = self.lim_pk['power']
fw['lim_pk/k'] = self.lim_pk['k']
fw['lim_pk/modes'] = self.lim_pk['modes']
try:
fw['lim_pk/noise'] = self.lim_noise_pk_av
except:
pass
# Write down the LIM emission model parameters
if isinstance(self.mock_lim, comap.MockComap):
if self.mock_lim.co_model == 'COMAP+21':
for k, v in self.mock_lim.COMAP21_params.items():
fw[f'lim_model/COMAP+21/{k}'] = v
if self.mock_lim.co_model == 'Li+16':
for k, v in self.mock_lim.Li16_params.items():
fw[f'lim_model/Li+16/{k}'] = v
if isinstance(self.mock_lim, exclaim.MockExclaim):
if self.mock_lim.co_model == 'Padmanabhan+19':
for k, v in self.mock_lim.padmanabhan19_params.items():
fw[f'lim_model/padmanabhan+19/{k}'] = v
if self.lya_pk is not None:
fw['lya_dperp'] = self.mock_lya.dperp
fw['lya_pk/power'] = self.lya_pk['power']
fw['lya_pk/k'] = self.lya_pk['k']
fw['lya_pk/modes'] = self.lya_pk['modes']
if self.save_3d_power:
fw['lya_pk3d/power'] = self.lya_pk3d[:]
if self.lya_noise_pk_av is not None:
fw['lya_pk/noise'] = self.lya_noise_pk_av
if self.lya_pkmu is not None:
fw['lya_pkmu/power'] = self.lya_pkmu['power']
fw['lya_pkmu/k'] = self.lya_pkmu['k']
fw['lya_pkmu/mu'] = self.lya_pkmu['mu']
fw['lya_pkmu/modes'] = self.lya_pkmu['modes']
if self.gal_pk is not None:
try:
fw['gal_Rz'] = self.mock_galaxy.Rz
except:
pass
fw['gal_pk/power'] = self.gal_pk['power']
fw['gal_pk/k'] = self.gal_pk['k']
fw['gal_pk/modes'] = self.gal_pk['modes']
fw['gal_pk/n3D'] = self.n3D
if self.gal_pkmu is not None:
fw['gal_pkmu/power'] = self.gal_pkmu['power']
fw['gal_pkmu/k'] = self.gal_pkmu['k']
fw['gal_pkmu/mu'] = self.gal_pkmu['mu']
fw['gal_pkmu/modes'] = self.gal_pkmu['modes']
if self.sigma_lim_pk is not None:
fw['sigma_lim_pk'] = np.abs(self.sigma_lim_pk)
if self.sigma_lya_pk is not None:
fw['sigma_lya_pk'] = np.abs(self.sigma_lya_pk)
if self.sigma_gal_pk is not None:
fw['sigma_gal_pk'] = np.abs(self.sigma_gal_pk)
if self.gal_noise_pk_av is not None:
fw['gal_pk/noise'] = self.gal_noise_pk_av
if self.lim_lya_pk is not None:
fw['lim_lya_pk/power'] = self.lim_lya_pk['power']
fw['lim_lya_pk/k'] = self.lim_lya_pk['k']
fw['lim_lya_pk/modes'] = self.lim_lya_pk['modes']
if self.lya_pkmu is not None:
fw['lim_lya_pkmu/power'] = self.lim_lya_pkmu['power']
fw['lim_lya_pkmu/k'] = self.lim_lya_pkmu['k']
fw['lim_lya_pkmu/mu'] = self.lim_lya_pkmu['mu']
fw['lim_lya_pkmu/modes'] = self.lim_lya_pkmu['modes']
if self.lim_gal_pk is not None:
fw['lim_gal_pk/power'] = self.lim_gal_pk['power']
fw['lim_gal_pk/k'] = self.lim_gal_pk['k']
try:
fw['lim_gal_pk/modes'] = self.lim_gal_pk['modes']
except: pass
if self.lim_gal_pkmu is not None:
fw['lim_gal_pkmu/power'] = self.lim_gal_pkmu['power']
fw['lim_gal_pkmu/k'] = self.lim_gal_pkmu['k']
fw['lim_gal_pkmu/mu'] = self.lim_gal_pkmu['mu']
fw['lim_gal_pkmu/modes'] = self.lim_gal_pkmu['modes']
if self.sigma_lim_lya_pk is not None:
fw['sigma_lim_lya_pk'] = np.abs(self.sigma_lim_lya_pk)
if self.sigma_lim_gal_pk is not None:
fw['sigma_lim_gal_pk'] = np.abs(self.sigma_lim_gal_pk)
# Save the commit hash of the code used for this run
# when reading this convert it to a list with :
# `f['Git'].attrs['HEAD_HASH'].tolist()`
fw.create_group('Git')
head_hash = git_handler.get_head_hash(lim_lytomo)
fw['Git'].attrs["HEAD_HASH"] = np.void(head_hash)
def load_stat(self, stat_file, load_2D_stats=False, load_3D_stats=False):
""" Load the stats from an `hdf5` file
Parameters:
stat_file: the file address to load the file from
--------------------------
Returns: an instance of stats.Stat()
"""
with h5py.File(stat_file,'r') as f:
try:
self.lim_pk = {}
self.lim_pkmu = {}
self.lim_pk['k'] = f['lim_pk/k'][:]
self.lim_pk['modes'] = f['lim_pk/modes'][:]
self.lim_pk['power'] = f['lim_pk/power'][:]
try:
self.lim_noise_pk = f['lim_pk/noise'][:]
except:
pass
self.sigma_lim_pk = f['sigma_lim_pk'][:]/np.sqrt(self.vol_ratio)
self.get_lim_sn()
if load_2D_stats:
try:
self.lim_pkmu['mu'] = f['lim_pkmu/mu'][:]
self.lim_pkmu['modes'] = f['lim_pkmu/modes'][:]
self.lim_pkmu['power'] = f['lim_pkmu/power'][:]
except:
self.logger.info('P_LIM(k,mu) is not stored.')
except:
self.lim_pk = None
self.lim_pkmu = None
self.logger.info('Could not load LIM')
pass
try:
self.lya_pk = {}
self.lya_pkmu = {}
self.lya_pk3d = {}
self.lim_lya_pk = {}
self.lya_pk['k'] = f['lya_pk/k'][:]
self.lya_pk['modes'] = f['lya_pk/modes'][:]
self.lya_pk['power'] = f['lya_pk/power'][:]
try:
self.lya_noise_pk = f['lya_pk/noise'][:]
except:
pass
try:
self.sigma_lya_pk = f['sigma_lya_pk'][:]/np.sqrt(self.vol_ratio)
self.get_lya_sn()
except:
self.logger.info('No Lya noise stats')
if load_2D_stats:
try:
self.lya_pkmu['mu'] = f['lya_pkmu/mu'][:]
self.lya_pkmu['modes'] = f['lya_pkmu/modes'][:]
self.lya_pkmu['power'] = f['lya_pkmu/power'][:]
except:
self.logger.info('P_Lya(k,mu) is not stored.')
if load_3D_stats:
try:
self.lya_pk3d['power'] = f['lya_pk3d/power'][:]
except:
pass
try:
self.lim_lya_pk['k'] = f['lim_lya_pk/k'][:]
self.lim_lya_pk['power'] = f['lim_lya_pk/power'][:]
self.sigma_lim_lya_pk = f['sigma_lim_lya_pk'][:]/np.sqrt(self.vol_ratio)
self.get_lim_lya_sn()
except:
self.logger.info('No LIMXLya stats')
pass
except:
self.lya_pk = None
self.lya_pkmu = None
self.logger.info('No Lya stats')
pass
try:
self.gal_pk = {}
self.gal_pkmu = {}
self.lim_gal_pk = {}
self.gal_pk['k'] = f['gal_pk/k'][:]
self.gal_pk['modes'] = f['gal_pk/modes'][:]
self.gal_pk['power'] = f['gal_pk/power'][:]
try:
self.n3D = f['gal_pk/n3D'][()]
except:
pass
try:
self.gal_noise_pk = f['gal_pk/noise'][:]
except:
pass
self.sigma_gal_pk = f['sigma_gal_pk'][:]/np.sqrt(self.vol_ratio)
self.get_gal_sn()
if load_2D_stats:
try:
self.gal_pkmu['mu'] = f['gal_pkmu/mu'][:]
self.gal_pkmu['modes'] = f['gal_pkmu/modes'][:]
self.gal_pkmu['power'] = f['gal_pkmu/power'][:]
except:
self.logger.info('P_Lya(k,mu) is not stored.')
try:
self.lim_gal_pk['k'] = f['lim_gal_pk/k'][:]
self.lim_gal_pk['power'] = f['lim_gal_pk/power'][:]
self.sigma_lim_gal_pk = f['sigma_lim_gal_pk'][:]/np.sqrt(self.vol_ratio)
self.get_lim_gal_sn()
except:
self.logger.info('No LIMXGal stats')
pass
except:
self.gal_pk = None
self.gal_pkmu = None
self.logger.info('No Galaxy stats')
pass
| 30,594 | 44.393175 | 149 | py |
lila | lila-main/src/lila/exclaim.py | import os
import glob
import h5py
import numpy as np
import dask.array as da
from astropy import constants, units
from astropy.cosmology import Planck15 as cosmo
from nbodykit import setup_logging
import logging
import logging.config
from nbodykit.lab import ArrayMesh
from nbodykit.lab import HDFCatalog
from nbodykit import CurrentMPIComm
from . import lim
class MockExclaim(lim.MockLim):
"""Generate mocks for Exlaim survey"""
def __init__(self, snap, axis=3, basepath= None, boxsize=None, fine_Nmesh=None,
survey_params={'beam_fwhm':4.33,'spec_res': 512,
'deltanu':15.625, 'patch': 2.5, 'tobs':10.5, 'noise_per_voxel':None, 'nu_rest':1.901e6,
'nu_co_rest':115.27}, padmanabhan19_params={'M1':2.39e-5, 'N1':4.19e11},
cii_model='Padmanabhan+19', noise_pk=None, **kwargs ):
self.cii_model = cii_model
self.padmanabhan19_params = padmanabhan19_params
super().__init__(snap=snap, axis=axis, basepath=basepath, boxsize=boxsize,
fine_Nmesh=fine_Nmesh, survey_params=survey_params, noise_pk=noise_pk, **kwargs)
def get_res_par(self):
"""Eq 20 of Pullen+22
"""
return cosmo.h*(constants.c*(1+self.z)/(1e3*cosmo.H(self.z)*self.survey_params['spec_res'])).value
def get_lim_noise_pk(self):
"""Table 3 from Pullen+22
returns the instrument noise power on units of
"""
if self.noise_pk is None:
if self.z < 2.8:
self.noise_pk = 4.789e10#2.51e7 #107561.08108501034
elif self.z <3.0:
self.noise_pk = 10638.
self.logger.info(f'noise_pk is set to : {self.noise_pk}')
else:
self.logger.info(f'noise_pk is passed as : {self.noise_pk}')
pass
def cii_padmanabhan19(self, mvir):
"""CII emission model, Padmanabhan et al 209
Eq 3 in Pullen+22
"""
halo_lum = ( (mvir/self.padmanabhan19_params['M1'])**0.49 *
np.exp(-self.padmanabhan19_params['N1']/mvir) *
( (1+self.z)**2.7 / (1 + ((1+self.z)/2.9)**5.6) )**1.79)
return halo_lum
def get_halo_luminosity(self):
if not self.silent_mode:
self.logger.info('CII model :%s', self.cii_model)
if self.cii_model == 'Padmanabhan+19':
l_cii = self.cii_padmanabhan19(mvir=self.halos[self.halo_type+'Mass'].compute()*1e10)
self.halo_id = np.arange(0,l_cii.size)
else:
raise NameError("Selected CII model is not supported!")
return l_cii
def get_lim_map(self):
"""Get the CII luminosity map on a uniform grid of self.fine_Nmesh
"""
if not self.silent_mode:
self.logger.info('Getting the CII map')
cii_mesh = self.get_voxel_luminosity()
cii_map = cii_mesh.compute()
cii_map *= self.lsol_to_kjy()
if not self.silent_mode:
self.logger.info(' cii_map range : %s, %s, cii_map mean : %s',
np.min(cii_map), np.max(cii_map), np.mean(cii_map))
self.lim_map = ArrayMesh(cii_map, BoxSize=self.boxsize)
del cii_map
def lsol_to_kjy(self):
"""
Convert the units of the lumonosity map from solar luminosity to KJy
i.e. the prefactor in Eq1 of Pullen+22
"""
return ((constants.c.to('km/s') / (4*np.pi*self.survey_params['nu_rest']*units.Hz*1e6 * cosmo.H(self.z)) *
1/(self.fine_vol_vox*(units.Mpc/((1+self.z)*cosmo.h))**3) * units.solLum.to('W')*units.W).to('Jy')).value/1e3 | 3,646 | 41.406977 | 127 | py |
lila | lila-main/src/lila/comap.py | import os
import glob
import h5py
import numpy as np
import dask.array as da
from astropy import constants as const
from astropy.cosmology import Planck15 as cosmo
from nbodykit import setup_logging
import logging
import logging.config
from nbodykit.lab import ArrayMesh
from . import lim
class MockComap(lim.MockLim):
"""Generate mocks for Exlaim survey"""
def __init__(self, snap, axis=3, basepath=None, boxsize=None, fine_Nmesh=None,
survey_params={'beam_fwhm':4.5,'freq_res': 31.25, 'tempsys':44.0, 'nfeeds': 19,
'deltanu':15.625, 'patch': 4, 'tobs':1500, 'noise_per_voxel':17.8, 'nu_rest':115.27,
'nu_co_rest':115.27}, Li16_params={'alpha':1.17, 'beta':-0.21, 'sigma_co':0.37, 'delta_mf':1,
'behroozi_avsfr':'/central/groups/carnegie_poc/mqezlou/lim/behroozi+13/sfr_release.dat',
'sfr_type':'behroozi+13'}, COMAP21_params={'A':-2.85, 'B':-0.42, 'C':10**10.63, 'M':10**12.3,
'sigma_co':0.42}, co_model='COMAP+21',**kwargs ):
self.Li16_params = Li16_params
self.COMAP21_params = COMAP21_params
self.co_model = co_model
self.noise_per_voxel = survey_params['noise_per_voxel']
super().__init__(snap=snap, axis=axis, basepath=basepath, boxsize=boxsize,
fine_Nmesh=fine_Nmesh, survey_params=survey_params, **kwargs)
def get_res_par(self):
"""Calculate the spatial resolution along the lne-of-sight in units of comoving Mpc/h
freq_res is the frequncy resolution of the survey."""
return cosmo.h*((1+self.z)**2 *const.c*self.survey_params['freq_res']/(1e6*cosmo.H(self.z)*self.survey_params['nu_rest'])).value
def fiducial_comap_2021(self, mvir):
"""Adopted from Chung+21, early science COMAP"""
lco_p = self.COMAP21_params['C'] / ((mvir/(cosmo.h*self.COMAP21_params['M']))**self.COMAP21_params['A'] +
(mvir/(cosmo.h*self.COMAP21_params['M']))**self.COMAP21_params['B'])
lco_p= 10**np.random.normal(np.log10(lco_p), self.COMAP21_params['sigma_co'])
# Convert the units to L_sol
return self.fix_co_lum_unit(lco_p)
def co_lum_to_temp(self, lco, vol):
"""
Convert CO luminosity in units of r$L_{\odot}$ in a volume of `vol` to brightness
temperature in units of r'$\mu k$'. See Appendix B.1. in Li et al. (2016) arXiv:1503.08833.
Parameters:
lco (numpy array): CO luminosity in L_sol units
vol (float): The volume in which the CO luminosity is measured, in units of (cMpc/h)^3.
Returns:
numpy array: The temperature brightness in that volume in units of \nu K.
"""
temp = lco * (3.1e4 * (1 + self.z) ** 2 * self.survey_params['nu_rest'] ** -3 *
cosmo.H(self.z).value ** -1 * (vol / cosmo.h ** 3) ** -1)
return temp
def get_halo_luminosity(self):
"""Return the CO luminosity of the subhalos in L_sun unit"""
if self.co_model=='Li+16':
if not self.silent_mode:
self.logger.info('Li+16 CO emission model with params:')
self.logger.info(f'{self.Li16_params}')
# Appendix B.1. in Li et. al. 2016 arxiv 1503.08833
l_ir = self.get_ir_lum_halos()
# We assign L_CO to zero for subhalos with sfr=0
ind = np.where(l_ir > 0)[0]
l_co = np.zeros_like(l_ir)
l_co[ind] = 10**((np.log10(l_ir[ind]) - self.Li16_params['beta'])/self.Li16_params['alpha'])
# Add a log-normal scatter
l_co[ind] = 10**(np.log10(l_co[ind])+np.random.normal(0, self.Li16_params['sigma_co'], ind.size))
# Convert the units to L_sol
l_co[ind] = self.fix_co_lum_unit(l_co[ind])
elif self.co_model=='COMAP+21':
if not self.silent_mode:
self.logger.info('COMAP+21 CO emission model with params:')
self.logger.info(f'{self.COMAP21_params}')
l_co = self.fiducial_comap_2021(mvir=self.halos[self.halo_type+'Mass'].compute()*1e10)
self.halo_id = np.arange(0,l_co.size)
else:
raise NameError("Selected CO model is not supported!")
return l_co
def fix_co_lum_unit(self, l_co_p):
"""Convert the units in L_CO_p from K (km/s) pc^2 to L_solar
Li at. al. 2016 eq 4.
"""
return 4.9e-5 * (self.survey_params['nu_rest']/self.survey_params['nu_co_rest'])**3 * l_co_p
def get_ir_lum_halos(self):
"""Return the IR luminosity of the subhalos"""
self._load_sfr()
return self.sfr*1e10 / self.Li16_params['delta_mf']
def get_lim_map(self):
"""Calculate the CO temperature map on a uniform grid. The final map used for
calculating the power-spectrum.
Returns:
numpy array: The temperature on a grid in units of r'$\mu K$'. The grid is a fine
mesh defined with `self.fine_Nmesh` which is different from the resolution of
the actual survey, i.e. `self.Nmesh`.
"""
if not self.silent_mode:
self.logger.info('Getting the CO map')
co_mesh = self.get_voxel_luminosity()
def func(coord, lco):
return self.co_lum_to_temp(lco=lco, vol=self.fine_vol_vox)
co_mesh = co_mesh.apply(func, kind='index', mode='real')
co_temp = co_mesh.compute()
if not self.silent_mode:
self.logger.info(' co_temp range : %s, %s, co_temp mean : %s',
np.min(co_temp), np.max(co_temp), np.mean(co_temp))
self.lim_map = ArrayMesh(co_temp, BoxSize=self.boxsize)
del co_temp
def get_lim_noise_pk(self):
"""Get CO noise power spectrum which is a white noise (Chung+18 eq 15):
P_n = sigma_n^2 V_vox
Returns:
The CO noise power spectrum in units of (\mu K)^2 * (cMpc/h)^3
"""
self.get_noise_per_voxel()
self.noise_pk = (self.noise_per_voxel**2)*self.vol_vox
def get_noise_per_voxel(self):
"""Get the amplitude of the co lim noise. Assuming a Gaussian random noise we only
need the diagonal term in the covariance matrix, sigma_n.
We follow the prescription in Chung+18 eq 16.
Returns:
The noise rms error in units of \mu K
"""
if self.noise_per_voxel is None:
# Get the average survey time per pixel in seconds
dx = self.survey_params['beam_fwhm']/(60*np.sqrt(8*np.log(2)))
taupix = self.survey_params['tobs']*3600*dx*dx/self.survey_params['patch']
# factors are simplified in the fraction. T: K -> \mu K and deltanu: MHz -> Hz
self.noise_per_voxel = (self.survey_params['tempsys']*1e3 /
np.sqrt(self.survey_params['nfeeds'] *
self.survey_params['deltanu']*taupix) ) | 7,071 | 47.108844 | 136 | py |
lila | lila-main/src/lila/plot.py | import numpy as np
import h5py
import matplotlib
from matplotlib import pyplot as plt
import corner
import matplotlib.lines as mlines
from astropy.cosmology import Planck15 as cosmo
class Maps():
"""Methods to plot our results"""
def __init__(self):
a=1
def mass_luminosity(self, COMAP21_params,ls=None, label=None):
fig, ax = plt.subplots(1,1, figsize=(11,9))
if ls is None:
ls = ['solid']*len(COMAP21_params)
if label is None:
label = ['']*len(COMAP21_params)
Mvir = 10**np.arange(10,13.5,0.01)
for i in range(len(COMAP21_params)):
lco_p = COMAP21_params[i]['C'] / ((Mvir/(cosmo.h*COMAP21_params[i]['M']))**COMAP21_params[i]['A'] +
(Mvir/(cosmo.h*COMAP21_params[i]['M']))**COMAP21_params[i]['B'])
label =('A ='+str(np.around(COMAP21_params[i]['A'], 3))+
' B ='+str(np.around(COMAP21_params[i]['B'], 3))+
' C ='+str(np.around(np.log10(COMAP21_params[i]['C']), 3))+
' M ='+str(np.around(np.log10(COMAP21_params[i]['M']), 3))+
' sco ='+str(np.around(np.log10(COMAP21_params[i]['sigma_co']),3)))
#lco_p= 10**np.random.normal(np.log10(lco_p), COMAP21_params['sigma_co'])
ax.plot(Mvir, lco_p, ls=ls[i], label=label )
ax.set_xlabel(r'$M_{halo} [M_{\odot}/h]$')
ax.set_ylabel(r'$L_{CO} [L_{\odot}]$')
ax.set_xscale('log')
ax.set_yscale('log')
ax.grid(axis='both', which='both')
ax.legend(framealpha=0, fontsize=20)
return fig, ax
def plot_co_map_slices(self, fig, ax, co_temp, z, vmin=None, vmax=None, title=''):
"""Plot 2D slices for the cO LIM map"""
cmap = plt.get_cmap('jet')
im = ax.imshow(co_temp[:,:,z], extent=[0, co_temp.shape[0], 0, co_temp.shape[1]],
origin='lower', cmap=cmap, interpolation='bilinear', vmin=vmin, vmax=vmax)
cb = fig.colorbar(im , ax=ax, orientation='horizontal', fraction=0.17, pad=0.01)
#cb.ax.tick_params(labelsize=5, width=5, length=2)
cb.set_label(r'$T_{co} \ [\mu K]$')
ax.set_title(title, fontsize=10)
def plot_lya_map_slices(self, fig, ax, lya_map, z, vmin=None, vmax=None, title='', cb_label=r'$\delta_F$'):
"""Plot 2D slices for the Lya tomography map"""
cmap = plt.get_cmap('jet').reversed()
im = ax.imshow(lya_map[:,:,z], extent=[0, lya_map.shape[0], 0, lya_map.shape[1]],
origin='lower', cmap=cmap, interpolation='bilinear', vmin=vmin, vmax=vmax)
cb = fig.colorbar(im , ax=ax, orientation='horizontal', fraction=0.17, pad=0.01)
#cb.ax.tick_params(labelsize=5, width=5, length=2)
cb.set_label(cb_label)
ax.set_title(title, fontsize=20)
def plot_density(self, fig, ax, dens, z, vmin=None, vmax=None, title='', cb_label=r'$\delta_m$'):
""" Plot DM density or halo density map """
cmap = plt.get_cmap('jet')
dens_temp = dens
im = ax.imshow(dens_temp[:,:,z], extent=[0, dens_temp.shape[0], 0, dens_temp.shape[1]],
origin='lower', cmap=cmap, interpolation='bilinear', vmin=vmin, vmax=vmax)
cb = fig.colorbar(im , ax=ax, orientation='horizontal', fraction=0.17, pad=0.01)
#cb.ax.tick_params(labelsize=5, width=5, length=2)
cb.set_label(cb_label)
ax.set_title(title, fontsize=20)
class Inference:
"""A class for inference realted plots"""
def __init__(self, Tbco_range = (2.1,3.1), pshotco_range = (200,2000)):
self.Tbco_range = Tbco_range
self.pshotco_range = pshotco_range
def Tbco_pshot(self, samples, colors, labels, Tb_co_true=None, pshot_co_true=None, fig=None):
"""Plot a corner plot for CO <T_CO>b_CO and P_shot_co paramters
Paramters:
infs[i].samples : a list of infs[i].samples . Each element of the list is
an array of (m,n) dimensions.
colors : list of colors
labels : list of labels
Tb_co_true: Optional, the true <T_CO>b_CO to be drawn
on the corner pot
pshot_co_true : Optional, the true pshot_co to be drawn
on the corner pot
fig : matplotlib figure or subfigure instance
Returns :
fig, ax
"""
rng = [(1.2,2.15), (250,1100)]
if fig is None:
fig = plt.figure(figsize=(10,10))
legend_handles=[]
alpha = [0.5, 1, 1, 0.9, 0.9]
for i in range(len(samples)):
legend_handles.append(mlines.Line2D([], [], color=colors[i], label=labels[i]))
fig = corner.corner(data = samples[i][:,0:2], labels=[r'$\langle T_{CO} \rangle b_{CO}$',
r'$P_{shot, CO}$'], fig=fig, color=colors[i], range=rng,
bins=[220,300], plot_datapoints=False, show_titles=False,
truths=[Tb_co_true,pshot_co_true], truth_color='C0',
quantiles=None, levels=[0.01,0.68,0.95], hist_kwargs={'density':True},
contour_kwargs={'alpha':alpha[i]}, no_fill_contours=True,
fill_contours=True, plot_density=False,
contourf_kwargs={'linewidths':6})
plt.legend(handles=legend_handles, bbox_to_anchor=(0.25, 1.0, 1.0, .0), loc=4, framealpha=0)
ax = np.array(fig.axes).reshape((2,2))
#if pshot_co_true is not None:
# ax[1,1].axvline(pshot_co_true, ls='--', color='k')
# ax[1,0].axhline(pshot_co_true,ls='--', color='k')
#ax[1,0].set_xlim(self.Tbco_range[0], self.Tbco_range[1])
#ax[1,0].set_ylim(self.pshotco_range[0], self.pshotco_range[1])
#ax[1,0].set_ylim(self.pshotco_range[0], self.pshotco_range[1])
#ax[0,0].set_xlim(self.Tbco_range[0], self.Tbco_range[1])
#ax[1,1].set_xlim(self.pshotco_range[0], self.pshotco_range[1])
ax[1,0].grid(which='both', axis='both')
return fig, ax
def latex_table_tbco_pshot(self, samples, labels):
"""
Write a LaTex table for <T_co> b_co and P_shot,CO
"""
latex_file = open('stan/tbco_pshot.txt','w')
latex_table = [r'\begin{table}', '\n',r'\caption{Additional commands for mathematical symbols. These can only be used in maths mode.}',
'\n', r'\label{tab:mathssymbols}','\n', r'\begin{tabular*}{\columnwidth}{l@{\hspace*{20pt}}l@{\hspace*{10pt}}l}', '\n',
r'\hline','\n', r'Survey & $\langle T_{CO} \rangle b_{CO}$ & $P_{shot, CO}$\\[4pt]','\n', r'\hline \\[1pt]', '\n']
for i in range(len(samples)):
latex_table.append(f'{labels[i]} & {np.median(samples[i][:,0]):.2f} \\pm {np.std(samples[i][:,0]):.2f} & '+
f'{np.median(samples[i][:,1]):.0f} \\pm {np.std(samples[i][:,1]):.0f} \\\\[2pt]'+'\n')
latex_table.append(r'\hline')
latex_table.append('\n')
latex_table.append(r'\end{tabular*}')
latex_table.append('\n')
latex_table.append(r'\end{table}')
print(type(latex_table))
latex_file.writelines(latex_table)
def all_params_corner(self, fig, infs, colors, labels, truths=[None, None, None, None, None],
tick_label_size=None, axis_label_size= None, labelpad=None):
"""Plot a corner plot for all infered paramters
Paramters:
colors : list of colors
labels : list of labels
pshot_co_true : Optional, the true pshot_co to be drawn
on the corner pot
fig : matplotlib figure or subfigure instance
Returns :
fig, ax
"""
#subfigs = fig.subfigures(1,2, width_ratios=[1,2], wspace=0.0)
subfig_corner = fig
legend_handles=[]
for i in range(len(infs)):
if infs[i].samples.shape[1] == 3:
#subfig_corner = subfigs[0].subfigures(2,1, height_ratios=[1,1.5])[1]
ax_labels = [r'$\langle T_{CO} \rangle b_{CO}$', r'$P_{shot, CO}$',
r'$b_{Ly\alpha}$']
legend_loc = 1
rng = [(1.3,2.0), (250,1100),(-0.225, -0.210)]
bins=[200,120,1000]
bbox_to_anchor=(0, 4, 1.0, .0)
ncol = 2
fontsize_legend=12
if tick_label_size is None:
tick_label_size=12
axis_label_size= 17
elif infs[i].samples.shape[1] == 5:
#subfig_corner = subfigs[0].subfigures(2,1, height_ratios=[1,1])[1]
ax_labels = [r'$\langle T_{CO} \rangle b_{CO}$', r'$P_{shot, CO}$',
r'$b_{gal}$', r'$P_{shot, Gal}$',r'$P_{shot, \times}$']
legend_loc = 8
bbox_to_anchor=(-0.6, 4.5, 1.0, .0)
ncol=1
rng = [(1.3,2.0), (250,1100),(2.1,3.7), (0, 3300),(-50, 700)]
bins= [120,120,450,450,120]
fontsize_legend=12
if tick_label_size is None:
tick_label_size=12
axis_label_size = 17
subfig_corner = corner.corner(infs[i].samples, labels=ax_labels, fig=subfig_corner, color=colors[i], range=rng,
bins=bins, plot_datapoints=False, show_titles=False, quantiles=None, hist_kwargs={'density':True},
no_fill_contours=True, fill_contours=True, plot_density=False, contour_kwargs={'alpha':0.8}, truths=truths,
levels=[0.01, 0.68, 0.95], truth_color='k', contourf_kwargs={'linewidths':1})
if labels is not None:
legend_handles.append(mlines.Line2D([], [], color=colors[i], label=labels[i]))
if labels is not None:
plt.legend(handles=legend_handles, bbox_to_anchor=bbox_to_anchor,
framealpha=0, fontsize=15, ncol=ncol)
ax = np.array(subfig_corner.axes)
print(len(ax))
for i in range(len(ax)):
ax[i].grid(which='both', axis='both')
ax[i].tick_params(labelsize=tick_label_size, pad=-0.1)
if labelpad is not None:
ax[i].xaxis.labelpad = labelpad
ax[i].yaxis.labelpad = labelpad
ax[i].xaxis.label.set_fontsize(axis_label_size)
ax[i].yaxis.label.set_fontsize(axis_label_size)
def model_vs_signal(self, fig, stats, infs, labels, colors):
"""Comapre the simulated signal with the linear model
"""
ratio_height = 0.1
power_height = 0.15
width = 0.82
floor = 0.04
gal_legend=False
ax_auto = fig.add_axes([ratio_height, floor+3*ratio_height+2*power_height, width, power_height])
ax_auto_ratio = fig.add_axes([ratio_height, floor+2*ratio_height+2*power_height, width, ratio_height])
ax_cross = fig.add_axes([ratio_height, floor+2*ratio_height+power_height ,width, power_height])
ax_cross_ratio = fig.add_axes([ratio_height, floor+ratio_height+power_height, width, ratio_height])
ax_co = fig.add_axes([ratio_height, floor+ratio_height, width, power_height])
ax_co_ratio = fig.add_axes([ratio_height, floor, width, ratio_height])
axis_label_size_x = 24
axis_label_size_y = 27
tick_label_size = 22
for i in range(len(stats)):
if i==2:
hatch='/'
color_shade = 'none'
edgecolor = colors[i]
else:
hatch=None
color_shade = colors[i]
edgecolor = 'none'
if stats[i].lya_pk is not None:
k = stats[i].lya_pk['k']
ind = np.where((k > infs[i].kmin)*(k<infs[i].kmax))
ax_auto.plot(k, stats[i].lya_pk['power'], color='C0')
print(infs[i].lya_pk_model.shape)
ax_auto.plot(k[ind], infs[i].lya_pk_model, label=labels[i], ls='--', color=colors[i])
ax_auto.set_ylabel(r'$P_{Lya}$', fontsize = axis_label_size_y)
ax_auto_ratio.plot(k[ind], infs[i].lya_pk_model/stats[i].lya_pk['power'][ind], label=labels[i], ls='--', color=colors[i])
ax_auto.fill_between(k[ind], infs[i].lya_pk_bounds[0], infs[i].lya_pk_bounds[1], color=colors[i], alpha=0.4, )
ax_auto_ratio.fill_between(k[ind], infs[i].lya_pk_bounds[0]/stats[i].lya_pk['power'][ind],
infs[i].lya_pk_bounds[1]/stats[i].lya_pk['power'][ind], color=colors[i], alpha=0.4, )
ax_cross.plot(k, -stats[i].lim_lya_pk['power'], color='C0')
ax_cross.plot(k[ind], -infs[i].lim_lya_pk_model, label=labels, ls='--', color=colors[i])
ax_cross.set_ylabel(r'$|P_{CO \times Lya}|$', fontsize = axis_label_size_y)
ax_cross_ratio.plot(k[ind], infs[i].lim_lya_pk_model/stats[i].lim_lya_pk['power'][ind], label=labels[i], ls='--', color=colors[i])
ax_cross.fill_between(k[ind], infs[i].lim_lya_pk_bounds[0], infs[i].lim_lya_pk_bounds[1], color=colors[i], alpha=0.4, )
ax_cross_ratio.fill_between(k[ind], infs[i].lim_lya_pk_bounds[0]/stats[i].lim_lya_pk['power'][ind],
infs[i].lim_lya_pk_bounds[1]/stats[i].lim_lya_pk['power'][ind], color=colors[i], alpha=0.4)
if stats[i].gal_pk is not None:
gal_legend = True
k = stats[i].gal_pk['k']
ind = np.where((k > infs[i].kmin)*(k<infs[i].kmax))
ax_auto.plot(k, stats[i].gal_pk['power'], color='C0')
ax_auto.plot(k[ind], infs[i].gal_pk_model, label=labels[i], ls='--', color=colors[i])
ax_auto.set_ylabel(r'$P_{gal}$', fontsize = axis_label_size_y)
ax_auto_ratio.plot(k[ind], infs[i].gal_pk_model/stats[i].gal_pk['power'][ind], label=labels[i], ls='--', color=colors[i])
ax_auto.fill_between(k[ind], infs[i].gal_pk_bounds[0], infs[i].gal_pk_bounds[1], color=colors[i], alpha=0.4)
ax_auto_ratio.fill_between(k[ind], infs[i].gal_pk_bounds[0]/stats[i].gal_pk['power'][ind],
infs[i].gal_pk_bounds[1]/stats[i].gal_pk['power'][ind], color=colors[i], alpha=0.4)
ax_cross.plot(k, stats[i].lim_gal_pk['power'], color='C0')
ax_cross.plot(k[ind], infs[i].lim_gal_pk_model, label=labels, ls='--', color=colors[i])
ax_cross.set_ylabel(r'$P_{CO \times gal}$', fontsize = axis_label_size_y)
ax_cross_ratio.plot(k[ind], infs[i].lim_gal_pk_model/stats[i].lim_gal_pk['power'][ind], label=labels[i], ls='--', color=colors[i])
ax_cross.fill_between(k[ind], infs[i].lim_gal_pk_bounds[0], infs[i].lim_gal_pk_bounds[1], color=colors[i], alpha=0.4)
ax_cross_ratio.fill_between(k[ind], infs[i].lim_gal_pk_bounds[0]/stats[i].lim_gal_pk['power'][ind],
infs[i].lim_gal_pk_bounds[1]/stats[i].lim_gal_pk['power'][ind], color=colors[i], alpha=0.4)
if i%2 == 0:
hatch='x'
else:
hatch=None
print(stats)
ax_co.plot(k, stats[i].lim_pk['power'], color='C0')
ax_co.plot(k[ind], infs[i].lim_pk_model, label=labels, ls='--', color=colors[i])
ax_co.set_ylabel(r'$P_{CO}$', fontsize = axis_label_size_y)
ax_co_ratio.plot(k[ind], infs[i].lim_pk_model/stats[i].lim_pk['power'][ind], label=labels[i], ls='--', color=colors[i])
ax_co_ratio.fill_between(k[ind], infs[i].lim_pk_bounds[0]/stats[i].lim_pk['power'][ind],
infs[i].lim_pk_bounds[1]/stats[i].lim_pk['power'][ind], color=color_shade, alpha=0.4, hatch = hatch, edgecolor=colors[i])
ax_co.set_ylim(5e2,3e4)
ax_co_ratio.set_ylim(0.75,1.5)
if gal_legend:
ax_auto.legend( bbox_to_anchor=(0, 1.0, 1.1, 0.3), ncol=2, fontsize=20, framealpha=0)
else:
ax_auto.legend( bbox_to_anchor=(0, 1.0, 1.1, 0.3), ncol=2, fontsize=20, framealpha=0)
for ax_power, ax_ratio in zip([ax_auto, ax_cross, ax_co],[ax_auto_ratio, ax_cross_ratio, ax_co_ratio]):
ax_power.set_xscale('log')
ax_power.set_yscale('log')
ax_ratio.set_xscale('log')
ax_ratio.set_xlabel(r'k [h/cMpc]', fontsize=axis_label_size_x)
ax_ratio.set_ylabel(r'$\frac{\hat{P}}{P}$', fontsize=28)
ax_power.tick_params(labelsize=tick_label_size, labelbottom=False)
ax_ratio.tick_params(labelsize=tick_label_size)
ax_power.grid(axis='both', which='both')
ax_ratio.grid(axis='both', which='both')
xlim = ax_power.get_xlim()
ax_ratio.set_xlim(xlim)
ax_auto_ratio.set_ylim((0.8,1.2))
ax_auto_ratio.set_yticks(np.arange(0.9,1.19,0.1))
ax_cross_ratio.set_yticks(np.arange(0.75,1.49,0.25))
ax_cross_ratio.set_ylim((0.5,1.5))
ax_co_ratio.set_yticks(np.arange(0.75,1.49,0.25))
def plot_single_co(self, sts, labels, colors, title=''):
fig, ax = plt.subplots(1,2, figsize=(12,6))
for i, st in enumerate(sts):
st.get_co_sn()
ax[1].plot(st.lim_pk['k'][:], st.co_sn, label='CO '+labels[i], ls='solid', color=colors[i])
ax[0].plot(st.lim_pk['k'][:], np.abs(st.lim_pk['power'][:]), label=r'$P_{CO}, $'+labels[i], ls='solid', alpha=0.7, color=colors[i])
ax[0].plot(st.lim_pk['k'][:], np.abs(st.sigma_co_pk[:]), label=r'$\sigma_{P_{CO}},$'+labels[i], ls='--', alpha=0.7, color=colors[i])
for i in range(2):
ax[i].set_xscale('log')
ax[i].grid(axis='both', which='both')
ax[i].set_xlabel('$k \ h(cMpc)^{-1}$')
ax[i].set_xlim(2e-2,6e-1)
ax[i].legend(framealpha=0, loc='upper left', fontsize=20)
ax[0].set_yscale('log')
ax[0].set_ylim(1e2,1e5)
ax[1].set_ylabel('S/N')
ax[1].set_ylim((0,10))
ax[1].set_yticks((np.arange(0,10,2)))
fig.suptitle(title, fontsize=20)
fig.tight_layout()
return fig, ax
class SN():
"""A class to plot S/N related info"""
def __init__(self) -> None:
pass
def plot_CO_covariance(self, cov, mean, vmin=0, vmax=2):
cov /= mean
cov = (cov.T / mean).T
cmap = plt.get_cmap('viridis')
fig, ax = plt.subplots(1,1)
im = ax.imshow(cov, extent=[0, cov.shape[0], 0, cov.shape[1]], vmin=vmin, vmax=vmax,
origin='lower', cmap=cmap, interpolation=None)
cb = fig.colorbar(im , ax=ax, orientation='horizontal', fraction=0.17, pad=0.01)
#cb.ax.tick_params(labelsize=5, width=5, length=2)
cb.set_label(r'$r(k)$')
ax.grid()
return cov
def details(self, fig, ax, sts, labels):
signal_plotted = False
for st, label in zip(sts, labels):
if st.lya_pk is not None:
if not signal_plotted:
ax.plot(st.lya_pk['k'], st.lya_pk['power'], label='signal')
signal_plotted = True
ax.plot(st.lya_pk['k'], st.sigma_lya_pk, ls='--', label=f'Noise {label}')
ax.set_ylabel(r'$P_{Ly\alpha}(k)$'+' or '+r'$P_{n, Ly\alpha}(k)$')
if st.gal_pk is not None:
ax.plot(st.gal_pk['k'], st.gal_pk['power'], label=f'Signal {label}')
ax.plot(st.gal_pk['k'], st.gal_noise_pk, ls='--', label=f'Noise {label}')
if st.lim_pk is not None:
ax.plot(st.lim_pk['k'], np.abs(st.lim_pk['power']), label=f'Signal LIM ')
ax.plot(st.lim_pk['k'], np.abs(st.sigma_lim_pk), ls='--', label=f'Noise LIM ')
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend(framealpha=0)
ax.set_xlabel(r'k[h/Mpc]')
ax.grid(which='both', axis='both')
def plot_mcmc(slef, fig, ax, stats_lya=None, stats_gal=None, vol_ratio=3, labels=[''],
ls='dashed', plot_co =False, lw=[4.5,6]):
"""Plot S/N curves for MCMCs of a mock survey """
for i in range(len(stats_gal)):
st = stats_gal[i]
k = st.lim_pk['k']
sn_co = st.get_co_sn()
rk = st.lim_gal_pk['power'] / np.sqrt(st.lim_pk['power'] * st.gal_pk['power'])
ax[0].plot(k, np.nanmedian(rk, axis=0), ls=ls, lw=lw[j], label=label)
ax[0].fill_between(k, np.nanquantile(rk, .16, axis=0),
np.nanquantile(rk, .84, axis=0), alpha=0.3, ls=ls)
if plot_co and i==0:
ax[j+1].plot(k, np.nanmedian(sn_co, axis=0), ls=ls, label='COMAP', color='k')
ax[j+1].plot(k, np.nanmedian(sn, axis=0), alpha=0.5, label=labels[i],
ls=ls, lw=lw[j])
def compare_lya_gal(self, title='COMAP CO Model', vol_ratio=[1,5], savefile=None):
"""Comparison plot between S/N curves of CO X Lya and CO X galaxy surveys"""
fig, ax = plt.subplots(1,3, figsize=(21,6))
self.plot_mcmc(fig, ax, stat_func=self.get_lya_stat, surveys=['LATIS', 'PFS','eBOSS'],
vol_ratio=vol_ratio, ls='solid', plot_co=True, labels=[r'$d_{\perp} = 2.5 \ cMpc/h$',
r'$d_{\perp} = 4 \ cMpc/h$',r'$d_{\perp} = 13 \ cMpc/h$'])
self.plot_mcmc(fig, ax, stat_func=self.get_gal_stat, surveys=['Rz7e-4','Rz02'],
vol_ratio=vol_ratio, labels=[r'$Rz = 7 \times 10^{-4}$',r'$ Rz = 2 \times 10^{-2}$'])
for i in range(3):
ax[i].set_xscale('log')
ax[i].grid(axis='both', which='both')
ax[i].set_xlabel('$k \ h(cMpc)^{-1}$')
ax[0].set_ylabel(r'$r(k) = \frac{P_{\times}(k)}{\sqrt{(P_{A} (k) \times P_{B} (k)}}$')
ax[1].set_ylabel('S/N')
ax[0].legend(framealpha=0)
ax[1].legend(framealpha=0)
ax[1].set_ylim((0,20))
ax[2].set_ylim((0,20))
ax[0].set_title('Perfect \ Surveys', fontsize=20)
ax[1].set_title(r'$Mock \ surveys, \ v_{PFS} = V_{COMAP-Y5}/5$', fontsize=20)
ax[2].set_title(r'$Mock \ surveys. \ V_{COMAP-Y5}$', fontsize=20)
fig.suptitle(title, fontsize=20)
fig.tight_layout()
if savefile is not None:
fig.savefig(savefile)
def compare_tot_sn(self, plot=True, savefile=None):
"""Comparison between total S/N of CO X Lya and CO X galaxy surveys
"""
lya_surveys=['LATIS','PFS','eBOSS']
lya_labels = [r'$d_{\perp} = 2.5 \ cMpc/h$', r'$d_{\perp} = 4 \ cMpc/h$', r'$d_{\perp} = 13 \ cMpc/h$']
gal_surveys=['Rz7e-4','Rz02']
gal_labels = [r'$R_{z} = 7 \times 10^{-4}$', r'$R_{z} = 2 \times 10^{-2}$']
sn_lya, sn_gal = {}, {}
err_lya, err_gal = np.zeros((2,len(lya_surveys))), np.zeros((2,len(gal_surveys)))
for i,s in enumerate(lya_surveys):
_, _, sn, sn_lim = self.get_lya_stat(survey_type=s, vol_ratio=5)
sn_lya[s] = np.linalg.norm(np.nanmedian(sn, axis=0))
err_lya[:,i] = [sn_lya[s] - np.linalg.norm(np.nanquantile(sn, 0.16, axis=0)) ,
np.linalg.norm(np.nanquantile(sn, 0.84, axis=0)) - sn_lya[s]]
#err_lya[s] = np.sum(np.nanmedian(sn, axis=0))
for i,s in enumerate(gal_surveys):
_, _, sn, _ = self.get_gal_stat(survey_type=s, vol_ratio=5)
sn_gal[s] = np.linalg.norm(np.nanmedian(sn, axis=0))
err_gal[:,i] = [sn_gal[s] - np.linalg.norm(np.nanquantile(sn, 0.16, axis=0)) ,
np.linalg.norm(np.nanquantile(sn, 0.84, axis=0)) - sn_gal[s]]
sn_lim_median = np.linalg.norm(np.nanmedian(sn_lim, axis=0))
err_lim = [sn_lim_median - np.linalg.norm(np.nanquantile(sn_lim, 0.16, axis=0)) ,
np.linalg.norm(np.nanquantile(sn_lim, 0.84, axis=0)) - sn_lim_median]
err_lim = np.array(err_lim)
err_lim.reshape((2,1))
if plot:
fig, ax = plt.subplots(1,1, figsize=(8,6))
ax.bar(['COMAP'], sn_lim_median)
ax.bar(lya_labels, sn_lya.values(), yerr=err_lya)
ax.bar(gal_labels, sn_gal.values(), yerr=err_gal)
ax.set_ylabel('S/N')
ax.grid(True, axis='y')
ax.set_title('COMAP-Y5')
plt.xticks(rotation=-30)
plt.yticks(np.arange(0,90,5))
fig.tight_layout()
if savefile is not None:
fig.savefig(savefile)
else :
return sn_lim, sn_lya, sn_gal
def plot_single_mock(self, sts, labels, colors, fig=None, ax=None, title='', plot_uncer=True, plot_rk=True, alpha=0.90, legend=True, lw=3):
"""Plot S/N curves for single CO model (CO X Galaxies)
Paramters :
sts: A list of stats.Stats instances
labels : a list of corresponding labels
colors : a list of corresponding colors
"""
if fig is None:
if plot_uncer:
fig, ax = plt.subplots(1,3, figsize=(21,6))
s=1
else:
fig, ax = plt.subplots(1,2, figsize=(18,6))
s=0
else:
s = 0
label_lya = r'$CO \times Lya$'
label_gal = r'$CO \times Gal$'
for i, st in enumerate(sts):
if st.gal_pk is not None:
rk = np.abs(st.lim_gal_pk['power'][:]/ np.sqrt(st.gal_pk['power'][:]*
st.lim_pk['power'][:]))
if plot_uncer:
ax[0].plot(st.gal_pk['k'][:], np.abs(st.lim_gal_pk['power'][:]), label=r'$P_{Gal \times CO} $'+labels[i], ls='solid', alpha=0.7, color=colors[i])
ax[0].plot(st.gal_pk['k'][:], np.abs(st.sigma_co_gal_pk[:]), alpha=0.7, ls='--', color=colors[i])
if label_gal is not None:
ax[s].plot(st.gal_pk['k'][:], np.abs(rk), alpha=0.7, color=colors[i], label=label_gal, ls='--')
#label_gal = None
ax[s+1].plot(st.gal_pk['k'][:], st.co_gal_sn, color=colors[i], ls='--', label=labels[i], alpha=alpha, lw=lw)
if st.lya_pk is not None:
rk = np.abs(st.lim_lya_pk['power'][:]/ np.sqrt(st.lya_pk['power'][:]*
st.lim_pk['power'][:]))
if plot_uncer:
ax[0].plot(st.lya_pk['k'][:], np.abs(st.lim_lya_pk['power'][:]), label=r'$P_{Lya \times CO} $'+labels[i], ls='solid', alpha=0.7, color=colors[i])
ax[0].plot(st.lya_pk['k'][:], np.abs(st.sigma_co_lya_pk[:]), alpha=0.7, ls='--', color=colors[i])
if label_lya is not None:
ax[s].plot(st.lya_pk['k'][:], np.abs(rk), alpha=0.7, color=colors[i], label=label_lya)
label_lya = None
ax[s+1].plot(st.lya_pk['k'][:], st.co_lya_sn, color=colors[i], label=labels[i], alpha=alpha, lw=lw)
st.get_co_sn()
ax[s+1].plot(st.lim_pk['k'][:], st.co_sn, ls='solid', color='k', label='COMAP-Y5', alpha=alpha, lw=lw)
if plot_uncer:
ax[0].plot(st.lim_pk['k'][:], np.abs(st.lim_pk['power'][:]), label=r'$P_{CO} $', ls='solid', alpha=0.7, color='k')
ax[0].plot(st.lim_pk['k'][:], np.abs(st.sigma_co_pk[:]), ls='--', alpha=0.7, color='k')
for i in range(s+2):
ax[i].set_xscale('log')
ax[i].grid(axis='both', which='both')
ax[i].set_xlabel('$k \ (h/cMpc)$')
ax[i].set_xlim(2e-2,1)
if plot_uncer:
ax[0].set_xscale('log')
ax[0].grid(axis='both', which='both')
ax[0].set_xlabel('$k \ (h/cMpc)$')
ax[0].set_xlim(2e-2,1)
ax[0].legend(framealpha=0, loc='lower left', fontsize=20)
ax[0].set_yscale('log')
ax[0].set_ylim(1e-2,1e5)
ax[s].set_ylim((0,1))
ax[s+1].set_ylabel('S/N')
#ax[s+1].set_ylim((0,16))
print(s+1)
#ax[s+1].set_yticks((np.arange(0,20,2)))
ax[s].set_ylabel(r'$r(k) = \frac{P_{\times}(k)}{\sqrt{(P_{A} (k) \times P_{B} (k)}}$')
if legend:
ax[s].legend()
ax[s+1].legend( framealpha=0.7, loc=(1.05, 0.0), fontsize=16, facecolor=None,
frameon=True)
fig.suptitle(title, fontsize=20)
fig.tight_layout()
return fig, ax
def compare_tot_sn_single_mock(self, sts_lya, sts_gal, fig=None, ax=None, savefile=None,
alpha=0.95, lya_labels=None, auto_label='COMAP-Y5', gal_labels=None, text_offset=40):
"""Comparison between total S/N of CO X Lya and CO X galaxy surveys
The version used for a single mock
"""
"""
if len(sts_lya) == 4:
lya_labels = [r'$\ d_{\perp} = \\ 2.5 \ cMpc/h$', r'$\ d_{\perp} = \\ 4 \ cMpc/h$', r'$\ d_{\perp} =\\ 10 \ cMpc/h$',r'$d_{\perp} =\\ 13 \ cMpc/h$']
else :
lya_labels = [r'$\ d_{\perp} = \\ 2.5 \ cMpc/h$', r'$d_{\perp} = \\ 4 \ cMpc/h$', r'$d_{\perp} =\\ 13 \ cMpc/h$']
gal_labels =[ r'$R_{z} = \\ 7 \times 10^{-4}$', r'$R_{z} = \\ 2 \times 10^{-2}$']
"""
sn_lya, sn_gal = {}, {}
for i,s in enumerate(lya_labels):
sn_lya[s]= np.linalg.norm(sts_lya[i].lim_lya_sn)
for i,s in enumerate(gal_labels):
sn_gal[s] = np.linalg.norm(sts_gal[i].lim_gal_sn)
sn_lim = np.linalg.norm(sts_lya[0].lim_sn)
if fig is None:
fig, ax = plt.subplots(1,1, figsize=(12,6))
labels = [auto_label]+gal_labels+lya_labels
ax.barh([0], sn_lim, alpha=alpha, color='C0')
ax.barh(1+np.arange(len(gal_labels)), sn_gal.values(), alpha=alpha, color='C2')
ax.barh(1+len(gal_labels)+np.arange(len(lya_labels)), sn_lya.values(), alpha=alpha, color='C1')
ypos = np.arange(len(labels))
ax.set_yticks(ypos)
ax.set_yticklabels(labels)
ax.text(text_offset, 0-0.25, f'{sn_lim:.0f}', fontsize=25 , alpha=alpha)
for i in range(len(gal_labels)):
ax.text(text_offset, 1+i - 0.25, f'{list(sn_gal.values())[i]:.0f}', fontsize=25 , alpha=alpha)
for i in range(len(lya_labels)):
ax.text(text_offset, 1+len(gal_labels)+i -0.25, f'{list(sn_lya.values())[i]:.0f}', fontsize=25, alpha=alpha)
ax.set_xlabel('total \ S/N')
ax.set_ylabel('Survey')
ax.grid(True, axis='y')
ax.yaxis.tick_right()
ax.set_xticks(np.arange(0,55,5))
if savefile is not None:
fig.savefig(savefile)
return sn_lim, sn_lya, sn_gal
def plot_simple_SN(self, fig, ax, sts, labels, colors, auto_label='COMAP-Y5', title='', legend=True):
for i, st in enumerate(sts):
print(i)
st =sts[i]
if st.gal_pk is not None:
ax.plot(st.gal_pk['k'][:], st.lim_gal_sn, color=colors[i], ls='--', label=labels[i])
if st.lya_pk is not None:
ax.plot(st.lya_pk['k'][:], st.lim_lya_sn, color=colors[i], label=labels[i])
# Using the first stat paased for plotting auto LIM signal, be careful when stats have different
# volumes (e.g. for Exclaim mocks)
ax.plot(sts[0].lim_pk['k'][:], sts[0].lim_sn, ls='solid', color='k', label=auto_label)
ax.set_xscale('log')
ax.grid(axis='both', which='both')
ax.set_xlabel('$k \ (h/cMpc)$')
ax.set_xlim(2e-2,1)
ax.set_ylabel('S/N')
ax.set_ylim((-0.1,14))
ax.set_yticks((np.arange(0,16, 2)))
if legend:
ax.legend( framealpha=0.7, loc=(-0.7, 0.0), fontsize=20, facecolor=None,
frameon=True)
return fig, ax
def plot_rk(self, sts, fig, ax, color, labels, lss=None):
for i, st in enumerate(sts):
if st.gal_pk is not None:
rk = (st.lim_gal_pk['power'][:]/ np.sqrt(st.gal_pk['power'][:]*
st.lim_pk['power'][:])).squeeze()
ax.plot(st.gal_pk['k'][:], rk, alpha=0.9, color=color[i], label=labels[i], ls='--')
#ax.errorbar(x=st.gal_pk['k'][:], y=-1*rk, yerr=np.sqrt(3)*rk/np.sqrt(st.lim_pk['modes'][:]), fmt='-', alpha=0.9, label=labels[i], color=color[i])
if st.lya_pk is not None:
rk = st.lim_lya_pk['power'][:]/ np.sqrt(st.lya_pk['power'][:]*
st.lim_pk['power'][:])
if lss is not None:
ls = lss[i]
else:
ls=None
ax.plot(st.lya_pk['k'][:], -1*rk, alpha=0.9, label=labels[i], color=color[i], ls=ls)
#ax.errorbar(x=st.lya_pk['k'][:], y=-1*rk, yerr=np.sqrt(3)*rk/np.sqrt(st.lim_pk['modes'][:]), fmt='-',
# elinewidth=3,capsize=5, alpha=0.5, label=labels[i], color=color[i])
ax.set_ylim(0,1)
ax.set_xscale('log')
ax.grid(axis='both', which='both')
ax.set_ylabel(r'$| r(k) |$')
ax.set_xlabel('$k \ (h/cMpc)$')
ax.legend(loc='lower left', framealpha=0)
def plot_pkmu(self,st, plot_Nmodes=False, title='', savefig=None):
"""Plot 2D P(k, mu) for all avaiable statistics.
Parameters:
----------------------------
st: An instance of `lim_lytomo.stats.Stats()`
"""
powers = []
sigma_pks = []
labels = []
if st.co_pkmu is not None:
ind = np.where(np.abs(st.co_pkmu['power']) > 0)
pow = np.zeros_like(np.abs(st.co_pkmu['power'][:]))
sig = np.zeros_like(np.abs(st.co_pkmu['power'][:]))
pow[ind] = np.abs(st.co_pkmu['power'][ind])
sig[ind] = np.abs(st.sigma_co_pkmu[ind])
powers.append(pow)
sigma_pks.append(sig)
labels.append(r'$P_{CO}$')
if st.gal_pkmu is not None:
ind = np.where(np.abs(st.gal_pkmu['power']) > 0)
pow = np.zeros_like(np.abs(st.gal_pkmu['power'][:]))
sig = np.zeros_like(np.abs(st.gal_pkmu['power'][:]))
pow[ind] = np.abs(st.gal_pkmu['power'][ind])
sig[ind] = np.abs(st.sigma_gal_pkmu[ind])
powers.append(pow)
sigma_pks.append(sig)
labels.append(r'$P_{Gal}$')
# CO X Gal
ind = np.where(np.abs(st.co_gal_pkmu['power']) > 0)
pow = np.zeros_like(np.abs(st.co_gal_pkmu['power'][:]))
sig = np.zeros_like(np.abs(st.co_gal_pkmu['power'][:]))
pow[ind] = np.abs(st.co_gal_pkmu['power'][ind])
sig[ind] = np.abs(st.sigma_co_gal_pkmu[ind])
powers.append(pow)
sigma_pks.append(sig)
labels.append(r'$P_{CO X Gal}$')
if st.lya_pkmu is not None:
ind = np.where(np.abs(st.lya_pkmu['power']) > 0)
pow = np.zeros_like(np.abs(st.lya_pkmu['power'][:]))
sig = np.zeros_like(np.abs(st.lya_pkmu['power'][:]))
pow[ind] = np.abs(st.lya_pkmu['power'][ind])
sig[ind] = np.abs(st.sigma_lya_pkmu[ind])
powers.append(pow)
sigma_pks.append(sig)
labels.append(r'$P_{Lya}$')
pow = np.zeros_like(np.abs(st.co_lya_pkmu['power'][:]))
sig = np.zeros_like(np.abs(st.sigma_CO_lya_pkmu['power'][:]))
pow[ind] = np.abs(st.co_lya_pkmu['power'][ind])
sig[ind] = np.abs(st.sigma_co_lya_pkmu[ind])
powers.append(pow)
sigma_pks.append(sig)
labels.append(r'$P_{cO X Lya}$')
num_axs = len(labels)
if plot_Nmodes:
num_axs+=1
fig, ax = plt.subplots(2,num_axs, figsize=(6*num_axs,12))
for i in range(len(powers)):
cmap = plt.get_cmap('jet')
im = ax[0,i].imshow(powers[i][:,np.min(ind[1])::], origin='lower', cmap=cmap, interpolation='bilinear',
extent=[0, 1, st.kmin, st.kmax], norm=matplotlib.colors.LogNorm())
cb = fig.colorbar(im , ax=ax[0,i], orientation='horizontal', fraction=0.1, pad=0.2)
cb.set_label(labels[i])
ax[0,i].set_ylabel(r'$k$')
ax[0,i].set_xlabel(r'$\mu$')
ax[0,i].grid(which='both', axis='both')
im = ax[1,i].imshow(sigma_pks[i][:,np.min(ind[1])::], origin='lower', cmap=cmap, interpolation='bilinear',
extent=[0, 1, st.kmin, st.kmax], norm=matplotlib.colors.LogNorm(vmin=1e4, vmax=1e12))
cb = fig.colorbar(im , ax=ax[1,i], orientation='horizontal', fraction=0.1, pad=0.2)
cb.set_label(r'$\sigma$'+labels[i])
ax[1,i].set_ylabel(r'$k$')
ax[1,i].set_xlabel(r'$\mu$')
ax[1,i].grid(which='both', axis='both')
if i== len(powers)-1:
Nmodes = np.zeros_like(powers[i])
Nmodes[ind] = st.co_pkmu['modes'][ind]
im = ax[0,i+1].imshow(Nmodes[:,np.min(ind[1])::], origin='lower', cmap=cmap, interpolation='bilinear',
extent=[0, 1, st.kmin, st.kmax])
cb = fig.colorbar(im , ax=ax[0,i+1], orientation='horizontal', fraction=0.1, pad=0.2)
cb.set_label(r'$N_{modes}$')
ax[0,i+1].set_ylabel(r'$k$')
ax[0,i+1].set_xlabel(r'$\mu$')
ax[0,i+1].grid(which='both', axis='both')
fig.suptitle(title, fontsize=20)
fig.tight_layout()
if savefig is not None:
fig.savefig(savefig)
| 38,319 | 48.960887 | 165 | py |
lila | lila-main/src/lila/__init__.py | 0 | 0 | 0 | py | |
lila | lila-main/src/lila/git_handler.py | import os
import subprocess
def get_head_hash(pack):
"""Get the long hash of the commit on HEAD
Paramters:
Pack : The package you are using
Returns: The hash as a binary
"""
repo_path = os.path.dirname(pack.__file__)
proc = subprocess.Popen('git -C '+repo_path+' rev-parse HEAD', stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
assert err is None
return out | 425 | 25.625 | 102 | py |
lila | lila-main/src/lila/lim.py | import os
import glob
import h5py
import numpy as np
import dask.array as da
from astropy import constants as const
from astropy.cosmology import Planck15 as cosmo
from nbodykit import setup_logging
import logging
import logging.config
from nbodykit.lab import ArrayMesh, HDFCatalog, CurrentMPIComm
class MockLim():
"""A class for mock Line Intensit Map """
def __init__(self, snap, survey_params, axis=3, basepath=None, boxsize=None, brange =None,
fine_Nmesh=None, seed=None, halo_type='Subhalo', sim_type= 'TNG',
compensated=True, rsd=True, mass_cut=None, silent_mode=True, sfr_file=None, noise_pk=None):
"""
Generate a mock Line intensity maps from a hydro/DM-only simulation.
The default arguments are for COMAP Early science(Chung+21)
Paramterts:
snap : Snapshot id, int
axis : Either 1 or 2 or 3 indicating the line of sight axis, x or y or z
basepath: The path to the directory of postprocessed data of the simulations
sfr_file : The file of averaged star formation in the subhalos. Not rquired if
the instantaneous sfr is used.
fine_Nmesh : (array_like, shape=(3,) ) number of grid mesh cells along each axis. It should match that of Lya/galaxy maps.
brange: (array_like, shape=(6,)) [xmin,xmax,ymin,ymax,zmin,zmax], subbox coordinates
in cMpc/h of the simulation to make mock for
boxsize : int or tuple of 3 ints
Simulation box Size in comoving cMpc/h
freq_res : Frequency resolution in MHz
beam_fwhm : Beam FWHM in arcminutes
tempsys : System temperature in K
nfeeds: Bumber of feeds
deltanu : Frequency resolution in MHz
patch : survey area per patch in dega^2
tobs : Total survey time on this patch in hours
noise_per_voxel : Noise per voxel in \mu K. Defult on the COMAP+21 Y5 projection,
Table 4 in Chung+21 Arxiv:2111.05931. It overrides the calculation from
the sys temperature.
nu_rest : rest frequncy of the line in GHZ
nu_co_rest : rest frequncy of the CO line in GHZ
Li16_params: Optional, used only if co_model=='Li16'. The model parameters for painting CO
emission on halo catalog. Li+16: arxiv:1503.08833
alpha, beta, sigma_co : The partamters in converting L_IR to L_CO, values from
Chung et. al. 2018 arxiv:1809.04550 and
delta_mf : The paramter to convert star formation rate to L_IR
COMAP21_params : Optiona, only if co_model=='COMAP21'. The model parameters for painting CO
emission on halo catalog. Chung et. al. 2021 arxiv:2111.05931
seed : Random seed to generate the scatter in L_CO
halo_type : Either 'Subhalo' or 'Group'
sfr_type: The options for sfr_type are :
'SFR_MsunPerYrs_in_InRad_50Myrs', 'SFR_MsunPerYrs_in_InRad_100Myrs',
'SFR_MsunPerYrs_in_InRad_200Myrs', 'SubahaloSFR', 'GroupSFR' ,'behroozi+13'
The first 3 are average star formation rates from Donanari+2019 and Pillepich+2019
The 4th and 5th are the instantaneous start fromation in hydro TNG
The last is the average sfr-vs-DM halo mass relation from behroozi+13
co_model : st, Fiducial model (prior) for COMAP. Options are :
'Li+16' : A model based on Li+16
'COMAP+21' : A model based on chung+21. COMAP early science fiducial model
which is a data driven prior (UM+COLDz+COPSS): Arxiv:2111.05931
behroozi_avsfr : str, path to average SFR from behroozi+13
sim_type : str, simulation type either 'ASTRID', 'TNG' or 'MDPL2'
compensated: If True, makes the correction in the fourier space of the field interpolated
by CIC.
mass_cut : Default 10, log of min halo mass in M_sol/h units. If not None, only halos with
log_10(M_halo) > mass_cut [M_sol/h] emmit CO
rsd : whether to apply redshift space distortion to along line-of-sight
"""
# MPI
self.MPI = CurrentMPIComm
self.comm = self.MPI.get()
self.rank = self.comm.Get_rank()
self.size = self.comm.Get_size()
self.snap = snap
self.axis= axis
self.sight_vec = [0,0,0]
self.sight_vec[int(axis-1)] = 1
self.brange = brange
self.sfr = []
self.halos = {}
self.groups = {}
self.halo_id = []
self.lim_map = None
self.basepath = basepath
if self.basepath is None:
self.halo_file = None
else :
self.halo_file = os.path.join(basepath, 'groups_'+str(self.snap)+'/fof*')
self.sfr_file = sfr_file
if boxsize is None:
self.boxsize = (brange[1]-brange[0],
brange[3]-brange[2],
brange[5]-brange[4])
else:
self.boxsize = boxsize
self.seed = seed
if self.seed is not None:
np.random.seed(self.seed)
self.halo_type = halo_type
self.sim_type = sim_type
self.compensated = compensated
self.mass_cut = mass_cut
self.rsd = rsd
self.silent_mode =silent_mode
if not self.silent_mode:
setup_logging()
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
# create logger
self.logger = logging.getLogger('MockLim')
self.logger.info('Starting')
## Suvey parameters
self.survey_params = survey_params
self.noise_pk = noise_pk
self.fine_Nmesh = fine_Nmesh
# Comoving volume of a voxel in (Mpc/h)^3
self.fine_vol_vox = np.product(self.boxsize) / np.product(np.product(self.fine_Nmesh))
# Laod halos on an Nbodykit catalog
if self.halo_file is not None:
self._load_halos()
else:
if not self.silent_mode:
self.logger.warning("Not making the LIM map, if you would like so set the basepath for the simualtion.")
self.res_par = self.get_res_par()
self.res_perp = self.get_res_perp()
self.Nmesh = [np.around(self.boxsize[0]/(self.res_perp)),
np.around(self.boxsize[1]/(self.res_perp)),
np.around(self.boxsize[2]/(self.res_par))]
self.vol_vox = np.product(self.boxsize) / np.product(np.product(self.Nmesh))
if not self.silent_mode:
self.logger.info('LIM :')
self.logger.info('resolution : perp= %s, par= %s, boxsize=%s', self.res_perp, self.res_par, self.boxsize)
self.logger.info('Nmesh = %s', str(self.Nmesh))
if self.halo_file is not None:
self.get_lim_map()
del self.halos
def get_res_par(self):
"""Calculate the spatial resolution along the lne-of-sight in units of comoving Mpc/h
freq_res is the frequncy resolution of the survey."""
raise NotImplementedError
def get_res_perp(self):
"""Calculate the spatial resolution in transverse direction to the line-of-sight in
comoving Mpc/h.
The angular resolution of the survey is in arcminutes"""
if not 'angular_res' in self.survey_params:
self.survey_params['angular_res'] = 2*self.survey_params['beam_fwhm']/np.sqrt(8*np.log(2))
return cosmo.h*cosmo.comoving_distance(z=self.z).value*self.survey_params['angular_res']*np.pi/(180*60)
def get_halo_luminosity(self):
""" A function which takes halo masses and returns the luminosity of each in L_solar.
Should be overwritten by the function in each instance of this class.
"""
raise NotImplementedError
def get_lim_map(self):
raise NotImplementedError
def get_voxel_luminosity(self):
"""
Calculate the 3D CO luminotity map on a unifrom grid of self.fine_Nmesh.
Returns :
numpy array shape=self.fine_Nmesh: CO luminosity map in L_sun unit
"""
halo_lum = self.get_halo_luminosity()
# CIC interpolation : NOT SURE THIS IS THE BEST CHOICE
voxel_lum_mesh = self.do_cic(q=halo_lum, qlabel='halo_lum')
return voxel_lum_mesh
def do_cic(self, q, qlabel):
"""Similar to LyTomo_watershed.get_Density()
CIC a sfr_type on a regular mesh grid
q : The quantity to paint on mesh
"""
if self.halos['Coordinates'].size ==0:
raise IOError('No halos on Rank '+str(self.rank))
if self.rsd:
self._apply_rsd()
qtemp = np.zeros(shape=(self.halos['Coordinates'][:].shape[0],))
qtemp[self.halo_id] = q
self.halos[qlabel] = qtemp
if not self.silent_mode:
self.logger.info('self.fine_Nmesh = %s', self.fine_Nmesh)
#self.logger.info('Max Coord ', np.max(self.halos['Coordinates'].compute()))
#self.logger.info('BoxSize :', self.boxsize)
self.logger.info('compensated = %s', self.compensated)
assert(np.all(qtemp >= 0))
mesh = self.halos.to_mesh(Nmesh=self.fine_Nmesh, position='Coordinates', value=qlabel,
BoxSize=self.boxsize, compensated=self.compensated)
correc_fac = self.halos.csize / np.prod(self.fine_Nmesh)
if not self.silent_mode:
self.logger.info('q/<q> to q correction factor = %s', correc_fac)
delta_to_value = lambda x,y : y*correc_fac
mesh = mesh.apply(delta_to_value, kind='index', mode='real')
#assert (np.all(mesh.compute() >= 0))
return mesh
def _apply_rsd(self):
"""Apply Redshift space distortion, for sub/halos the velocities are in km/s"""
if not self.silent_mode:
self.logger.info('applying rsd')
# Note: in Subfind, halo velocities do not have the extra sqrt(a) fcator
if self.halo_type=='Subhalo':
self.halos['Coordinates'] = (self.halos['Coordinates']+
(self.halos[self.halo_type+'Vel']*self.sight_vec*
cosmo.h/cosmo.H(self.z).value))%self.boxsize
elif self.halo_type=='Group':
## I need to add a new formula since the vel is in km/s/a units
raise NotImplemented
if not self.silent_mode:
self.logger.info('RSD applied')
def _load_halos(self):
"""Load the subhalo catalogue using nbodykit
The catalouge is not loaded on memory since it uses Dask
"""
if not self.silent_mode:
self.logger.info('Loading Halo Catalogue')
fn = glob.glob(self.halo_file)[0]
with h5py.File(fn,'r') as f:
self.z = f['Header'].attrs['Redshift']
self.halos = HDFCatalog(self.halo_file, dataset=self.halo_type)
if not self.silent_mode:
self.logger.info('Halo Cat is loaded ')
# Some unit corrections are needed for each simulation
if self.sim_type=='MDPL2':
self.halos['SubhaloMass'] = self.halos['SubhaloMass']/1e10
if self.sim_type=='TNG' or self.sim_type == 'ASTRID':
# for TNG convert ckpc/h tp cMpc/h
self.halos['Coordinates'] = self.halos['SubhaloPos']/1e3
if self.mass_cut is not None:
ind = da.greater(self.halos['SubhaloMass'] , 10**(self.mass_cut-10))
self.halos = self.halos[ind]
if self.brange is not None:
ind = self.halos['Coordinates'] >= np.array([[ self.brange[0],self.brange[2], self.brange[4] ]])
ind *= self.halos['Coordinates'] <= np.array([[ self.brange[1],self.brange[3], self.brange[5] ]])
ind = da.prod(ind, axis=1, dtype=bool)
self.halos = self.halos[ind]
def _load_sfr(self):
"""Read the sfr of the subhalos
depending on the sfr_type passed to it, it either uses the mean
SFR or the instantaneous SFR.
"""
# Look at the mean SFR table
if self.Li16_params['sfr_type'][0:7]=='SFR_Msu':
with h5py.File(self.sfr_file,'r') as f:
self.sfr = f['Snapshot_'+str(self.snap)][self.sfr_type][:]
self.halo_id = f['Snapshot_'+str(self.snap)]['SubfindID'][:]
# Look at the instantaneous SFR in subhalos
elif self.Li16_params['sfr_type'] == 'SubhaloSFR':
self.sfr = self.halos['SubhaloSFR'].compute()
self.halo_id = np.arange(self.sfr.size)
if self.mass_cut is not None:
ind = np.where(self.halos['SubhaloMass'] >= 10**(self.mass_cut-10))
self.sfr = self.sfr[ind]
self.halo_id = self.halo_id[ind]
# Look at the instantaneous SFR in halos
elif self.Li16_params['sfr_type'] == 'GroupSFR':
self.sfr = self.halos['GroupSFR'].compute()
self.halo_id = np.arange(self.sfr.size)
# Look at the average SFR -vs halo masss from Behroozi+13
elif self.Li16_params['sfr_type'] == 'behroozi+13':
self.sfr = self.sfr_behroozi()
self.halo_id = np.arange(self.sfr.size)
if self.mass_cut is not None:
ind = np.where(self.halos['SubhaloMass'].compute() >= 10**(self.mass_cut-10))
self.sfr = self.sfr[ind]
self.halo_id = self.halo_id[ind]
else:
raise NameError("Wrong sfr_type")
def sfr_behroozi(self):
"""Get the sfr for halos from average sfr-DM halo Mass relation from behroozi+13.
This is the method adopted by Chung+18 and Li+16 for making mock CO LIM
"""
intp = self._sfr_behroozi_interpolator()
# Masses in TNG are in M_sol /h but in Behroozi are in M_sol
hmass = 10+np.log10(self.halos[self.halo_type+'Mass'].compute())-np.log10(cosmo.h)
return 10**(intp(hmass))
def _sfr_behroozi_interpolator(self):
"""Set up the 2D interpol"""
from scipy.interpolate import interp1d
#bh = self._load_behroozi_data(zrange=(self.z-0.05, self.z+0.05))
bh = self._load_behroozi_data()
intp = interp1d(bh[:,1], bh[:,2], kind='linear', fill_value='extrapolate')
return intp
def _load_behroozi_data(self, zrange=None):
if self.Li16_params['behroozi_avsfr'] is None:
raise IOError("pass the path for behroozi+13 average sfr-vs- halo mass relation")
bh = np.loadtxt(self.Li16_params['behroozi_avsfr'])
bh[:,0] -= 1
zdiff = np.abs(bh[:,0] - self.z)
ind = np.where(zdiff==zdiff.min())[0]
return bh[ind,:]
def upsample(self, co_temp, method='linear', final_shape = (205,205,205)):
"""Upsample (increase the resolution) for the CO map by interpolation. It is not used for
power spectrum calculations.
Parameters:
----------------------------
co_temp : The low res map
method : Either 'linear' or 'nearest'
final_shape : The high-res map's shape
Returns :
-------------------
The hig-res map
"""
from scipy.interpolate import RegularGridInterpolator
init_shape = co_temp.shape
x, y, z = (np.arange(init_shape[0])*final_shape[0]/(init_shape[0]-1),
np.arange(init_shape[1])*final_shape[1]/(init_shape[1]-1),
np.arange(init_shape[2])*final_shape[2]/(init_shape[2]-1))
intp = RegularGridInterpolator((x, y, z), co_temp, method=method)
xu, yu, zu = np.meshgrid(np.arange(final_shape[0]),
np.arange(final_shape[1]),
np.arange(final_shape[2]))
del co_temp
coords = np.zeros(shape=(xu.shape[0]*xu.shape[1]*xu.shape[2], 3 ))
coords[:,0] = xu.ravel()
coords[:,1] = yu.ravel()
coords[:,2] = zu.ravel()
co_temp_u = intp(coords)
co_temp_u = co_temp_u.reshape(final_shape)
co_temp_u = np.transpose(co_temp_u, axes=[1,0,2])
return co_temp_u
| 16,797 | 45.531856 | 134 | py |
lila | lila-main/src/lila/mock_galaxy.py | import os
import glob
import copy
from matplotlib.pyplot import axis
from pandas import to_timedelta
import h5py
import numpy as np
import dask.array as da
from astropy import constants as const
from astropy.cosmology import Planck15 as cosmo
from nbodykit import setup_logging
import logging
import logging.config
from nbodykit.lab import ArrayMesh
from nbodykit.lab import HDFCatalog
from nbodykit import CurrentMPIComm
import lim_lytomo
from lim_lytomo import git_handler
class MockGalaxy():
"""A class for mock Line Intensit Map """
def __init__(self, snap, axis=3, basepath=None, boxsize=None, brange =None, Rz= 0.02,
seed=None, halo_type='Subhalo', sim_type= 'TNG', compensated=True, map_path=None,
mass_cut=11+np.log10(5), num_maps=1, sampling_rate =1, rsd= True, silent_mode=True, save_path=None):
"""
Generate a mock Gaklaxy density from a hydro/DM-only simulation.
The default arguments are for COMAP 2018(Chung+18, arxiv:1809.04550)
Parameterts:
---------------------------
snap : Snapshot id, int
axis : Either 1 or 2 or 3 indicating the line of sight axis, x or y or z
basepath: The path to the directory of postprocessed data of the simulations
Nmesh : (array_like, shape=(3,) ) number of grid mesh cells along each axis
brange: (array_like, shape=(6,)) [xmin,xmax,ymin,ymax,zmin,zmax], subbox coordinates
in cMpc/h of the simulation to make mock for
boxsize : int or tuple of 3 ints
Simulation box Size in comoving cMpc/h
Rz : float, redshift accuracy, i.e. sigma_z/(1+z), of the galaxy survey.
res_par : float, spatial resolution along line-of-sight in cMcp/h
res_perp: float, spatial resolution in transverse direction in cMpc/h
seed : Random seed to sample for redshift uncertainty of galaxies
halo_type : Either 'Subhalo' or 'Group'
sim_type : str, simulation type either 'TNG' or 'MDPL2'
compensated: If True, makes the correction in the fourier space of the field interpolated
by CIC.
mass_cut : log of min subhalo mass in M_sol/h units, Default is ~ 11.7. If not None, only halos with
log_10(M_halo) > mass_cut [M_sol/h] are considered
rsd : whether to apply redshift space distortion to along line-of-sight
are selected.
"""
# MPI
self.MPI = CurrentMPIComm
self.comm = self.MPI.get()
self.rank = self.comm.Get_rank()
self.size = self.comm.Get_size()
self.snap = snap
self.axis= axis
self.sight_vec = [0,0,0]
self.sight_vec[int(axis-1)] = 1
self.brange = brange
self.halos = None
self.halos_base = None
self.groups = {}
self.halo_id = []
self.basepath = basepath
if self.basepath is None:
self.halo_file = None
else :
self.halo_file = os.path.join(basepath, 'groups_'+str(self.snap)+'/fof*')
self.map_path = map_path
if boxsize is None:
self.boxsize = (brange[1]-brange[0],
brange[3]-brange[2],
brange[5]-brange[4])
else:
self.boxsize = boxsize
self.seed = seed
if self.seed is not None:
np.random.seed(self.seed)
self.halo_type = halo_type
self.sim_type = sim_type
self.compensated = compensated
self.mass_cut = mass_cut
self.num_maps = num_maps
self.sampling_rate = sampling_rate
self.rsd = rsd
self.silent_mode =silent_mode
if not self.silent_mode:
setup_logging()
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
# create logger
self.logger = logging.getLogger('MockGal')
self.logger.info('Starting')
self.map = None
self.perfect_map = None
## Suvey parameters
self.Rz = Rz
self.fine_res_par = 0.25
self.fine_res_perp = 0.25
# Set the grid numbers based on the spatial resolution of the map
self.fine_Nmesh = [np.around(self.boxsize[0]/(self.fine_res_perp)),
np.around(self.boxsize[1]/(self.fine_res_perp)),
np.around(self.boxsize[2]/(self.fine_res_par))]
# Make maps :
if self.map_path is not None:
if not self.silent_mode:
self.logger.info("Loading a precomputed galaxy map: ")
self.logger.info(map_path)
self.map_file = h5py.File(self.map_path,'r')
self.map = self.map_file['map'][:]
assert (self.map.shape[0]==self.fine_Nmesh[0])*(self.map.shape[0]==self.fine_Nmesh[0])*(self.map.shape[0]==self.fine_Nmesh[0])
self.map = ArrayMesh(self.map, BoxSize=self.boxsize)
self.galaxy_count = self.map_file['galaxy_count'][()]
self.z = self.map_file['z'][()]
self.res_par = self.get_res_par()
self.res_perp = self.get_res_perp()
else:
# Laod halos on an Nbodykit catalog
self._load_halos()
#self.halos_base = copy.deepcopy(self.halos)
self.galaxy_count = self.halos.csize
self.res_par = self.get_res_par()
self.res_perp = self.get_res_perp()
# First get the perfect map (Rz=0) and then the mock map. Note: Only apply rsd for the firs time since
# it dispalces the halos the first time.
self.map = self.get_galaxy_map(Rz=0, rsd=True)
if save_path is not None:
if not self.silent_mode:
self.logger.info('Saving the gal map on %s', save_path)
self.save_map(save_path)
"""
for i in range(self.num_maps):
self.map = self.get_galaxy_map(Rz=0, rsd=True)
if save_path is not None:
if self.sampling_rate != 1:
if i== 0:
save_path_old = save_path
save_path = save_path[:-5]+'_n0.hdf5'
else:
# Realod the halos, to avoid applying multiple
# random smapling
self._load_halos()
save_path = save_path_old[:-6]+str(i)+'.hdf5'
self.logger.info('Saving the gal map on %s', save_path)
self.save_map(save_path)
"""
del self.halos
if not self.silent_mode:
self.logger.info('resolution : perp= %s, par= %s, cMcp/h, boxsize=%s', self.fine_res_perp, self.fine_res_par, self.boxsize)
def get_res_par(self):
"""Calculate the spatial resolution along the lne-of-sight in units of comoving Mpc/h,
It is the redshift uncertainty for the glaaxy survey.
"""
delta_z = self.Rz*(1+self.z)
delta_d = (const.c.to('km/s')*delta_z/cosmo.H(z=self.z)).value
# In units of cMpc/h :
delta_d *= cosmo.h
if not self.silent_mode:
self.logger.info('sigma_z/(1+z) : %s', self.Rz)
self.logger.info('delta_d : %s', delta_d)
return delta_d
def get_res_perp(self):
"""Calculate the spatial resolution in transverse direction to the line-of-sight in
comoving Mpc/h. It is 0 for galaxy surveys
"""
return 0
def get_galaxy_map(self, Rz=None, mesh=True, rsd=True):
"""Calculate the 3D galaxy overdensity map
Returning the could-in-cell interpolated (CIC) galaxy overdensity : (n / <n>) - 1
Be careful : We run this method for perfect_map first (i.e. mock=False) and then for the mock map (i.e. mock=True) in
the __init__() method
Parameters:
------------------------
mesh : bool. If True, mesh the map in self.map as a pmesh object, otherwise, return
the map as a numpy array without storing the result.
Returns : If mesh=False, An array of glaxay overdensity with shape=(Nmesh*Nmesh*Nmesh), Otherwise
does not return anything
"""
if Rz is None:
Rz = self.Rz
if self.sampling_rate != 1:
# sample the halos
ind_rand = da.random.randint(0, self.halos.size,
size=int(self.sampling_rate*self.halos.size))
selection = np.zeros(shape=(self.halos.csize,), dtype=bool)
selection[ind_rand] = True
self.halos['Selection'] = selection
gal_density = self.do_CIC(Rz, rsd=rsd)
to_delta = lambda x,y : y-1
gal_density = gal_density.apply(to_delta, kind='index', mode='real')
if mesh:
return gal_density
else:
return gal_density.compute(Nmesh=self.fine_Nmesh)
def do_CIC(self, Rz, rsd):
"""Similar to LyTomo_watershed.get_Density()
Deposit the halos on a grid with cloud-in-cell method.
"""
if self.halos['Coordinates'].size ==0:
raise IOError('No halos on Rank '+str(self.rank))
# Apply redshift space distortion
# If you have calculated the perfect map, you had applied this before, so
# do not apply it anymore
if rsd:
self._apply_rsd()
# Apply an scatter due to redshift uncertainties
if Rz > 0 :
self._apply_redshift_uncertainty(Rz)
mesh = self.halos.to_mesh(Nmesh=self.fine_Nmesh, position='Coordinates',
BoxSize=self.boxsize, compensated=self.compensated,
selection='Selection')
return mesh
def _apply_rsd(self):
"""Apply Redshift space distortion, for sub/halos the velocities are in km/s
Note: in AREPO, halo velocities do not have the extra sqrt(a) fcator
"""
if not self.silent_mode:
self.logger.info('applying rsd')
self.halos['Coordinates'] = (self.halos['Coordinates']+
(self.halos[self.halo_type+'Vel']*self.sight_vec*
cosmo.h/cosmo.H(self.z).value))%self.boxsize
if not self.silent_mode:
self.logger.info('RSD applied')
def _load_halos(self):
"""Load the subhalo catalogue using nbodykit
The catalouge is not loaded on memory since it uses Dask.
"""
if self.halos_base is not None:
del self.halos
self.halos = self.halos_base
else:
if not self.silent_mode:
self.logger.info('Loading Halo Catalogue')
fn = glob.glob(self.halo_file)[0]
with h5py.File(fn,'r') as f:
self.z = f['Header'].attrs['Redshift']
self.halos = HDFCatalog(self.halo_file, dataset=self.halo_type)
if not self.silent_mode:
self.logger.info('Halo Cat is loaded ')
# Some unit corrections are needed for each simulation
if self.sim_type=='MDPL2':
self.halos['SubhaloMass'] = self.halos['SubhaloMass']/1e10
if self.sim_type=='TNG' or self.sim_type=='ASTRID':
# for TNG convert ckpc/h tp cMpc/h
self.halos['Coordinates'] = self.halos['SubhaloPos']/1e3
if self.mass_cut is not None:
if not self.silent_mode:
self.logger.info('Filtering halos for M_h > 10^ %s', self.mass_cut)
ind = da.greater(self.halos['SubhaloMass'] , 10**(self.mass_cut-10))
if not self.silent_mode:
self.logger.info('found the halos > mass_cut')
self.halos = self.halos[ind]
if not self.silent_mode:
self.logger.info('Halos are filtered')
if self.brange is not None:
ind = self.halos['Coordinates'] >= np.array([[ self.brange[0],self.brange[2], self.brange[4] ]])
ind *= self.halos['Coordinates'] <= np.array([[ self.brange[1],self.brange[3], self.brange[5] ]])
ind = da.prod(ind, axis=1, dtype=bool)
self.halos = self.halos[ind]
def save_map(self,save_path):
"""Save the galaxy map on an hd5 file for later usage"""
with h5py.File(save_path,'w') as fw:
if not self.silent_mode:
self.logger.info('Computing the mesh density')
fw['map'] = self.map.compute(mode='real', Nmesh = self.fine_Nmesh)
fw['galaxy_count'] = self.galaxy_count
fw['z'] = self.z
# Save the commit hash of the code used for this run
# when reading this convert it to a list with :
# `f['Git'].attrs['HEAD_HASH'].tolist()`
fw.create_group('Git')
head_hash = git_handler.get_head_hash(lim_lytomo)
fw['Git'].attrs["HEAD_HASH"] = np.void(head_hash)
| 13,425 | 43.310231 | 138 | py |
lila | lila-main/src/lila/tests/test_lim.py | import numpy as np
import h5py
import importlib
from os import system
from lim_lytomo import lim
from nbodykit.lab import *
import dask.array as da
def make_fake_sim(boxsize):
a = np.linspace(0,1,5)*boxsize*1000
x, y, z = np.meshgrid(a,a,a)
parts = np.zeros((a.size**3,3))
parts[:,0] = x.ravel()
parts[:,1] = y.ravel()
parts[:,2] = z.ravel()
system(' mkdir ./groups_001')
with h5py.File('./groups_001/fof_n1.hdf5','w') as fw:
fw['Subhalo/SubhaloPos'] = parts
def get_Mock_lim(compensated=True, sfr_type='SubhaloSFR', mass_cut=10,
co_model='chung+21', angular_res=4, refine_fac=10, sim_type='TNG', boxsize=None, brange=None):
from astropy.cosmology import Planck15 as cosmo
from lim_lytomo import stats
from lim_lytomo import mock_lya
from lim_lytomo import lim
importlib.reload(stats)
importlib.reload(lim)
importlib.reload(mock_lya)
if boxsize is not None:
boxsize = [boxsize,boxsize,boxsize]
MockLim = lim.MockLim(snap=1, z=2.4442257045541464, axis=3,
basepath='./', boxsize=boxsize, brange=brange,
co_model=co_model, sfr_type=sfr_type, halo_type='Subhalo',
compensated=compensated, refine_fac=refine_fac, sim_type='TNG',
behroozi_avsfr=None, angular_res=angular_res,
mass_cut=mass_cut, silent_mode=True, rsd=False)
return MockLim
def test_load_halos(MockLim):
return 0
def test_do_CIC(MockLim):
"""Not working yet, q needs the cat shape"""
mesh = MockLim.do_CIC(q=np.ones(MockLim.halos['Coordinates'].shape[0]), qlabel='1')
return mesh
def test_co_temp(MockLim):
return 0
if __name__ == '__main__':
boxsize= 1 # inc cMpc/h
make_fake_sim(boxsize)
mock_full = get_Mock_lim(boxsize=boxsize)
for b in [2,3]:
mock_subbox = get_Mock_lim(brange=[0, .25*b, 0, .25*b,0, .25*b])
c = np.where(np.linspace(0,1,5) <= .25*b)[0].size**3
assert mock_subbox.halos['Coordinates'].compute().shape[0] == c | 2,142 | 33.564516 | 111 | py |
lila | lila-main/helper_scripts/get_gal.py | # Helper functions to get stats for one mock
import argparse
from os.path import join
import numpy as np
from lim_lytomo import stats
from lim_lytomo import comap, mock_galaxy
from os.path import join
## Specifics of the simulation
boxsize= [250,250,250]
seed = 13
snap='z2.5'
basepath='/rhome/mqezl001/bigdata/ASTRID/subfind/'
## Specifics of the mocks
k_par_min= None
co_model = 'COMAP+21'
Li16_params={'alpha':1.17, 'beta':-0.21, 'sigma_co':0.37, 'delta_mf':1,
'behroozi_avsfr':'/rhome/mqezl001/bigdata/LIM/behroozi+13/sfr_release.dat',
'sfr_type':'behroozi+13'}
COMAP21_params={'A':-3.71, 'B':0.41, 'C':10**10.8, 'M':10**12.5, 'sigma_co':0.371}
def get_stats(Rz, savefile, gal_map_path, mass_cut):
print(gal_map_path)
gal_mock = mock_galaxy.MockGalaxy(snap=snap, axis=3, basepath=basepath,
boxsize=boxsize, halo_type='Subhalo', map_path= gal_map_path,
silent_mode=False, seed=seed, Rz=Rz, mass_cut=mass_cut)
lim_mock = comap.MockComap(snap=snap, axis=3, basepath=basepath, fine_Nmesh=gal_mock.fine_Nmesh,
boxsize=boxsize, co_model=co_model, Li16_params=Li16_params, COMAP21_params=COMAP21_params,
halo_type='Subhalo', silent_mode=False, seed=seed)
st = stats.Stats(mock_lim=lim_mock, mock_galaxy=gal_mock, vol_ratio=1, k_par_min=k_par_min)
st.get_lim_gal_sn()
st.save_stat(savefile)
if __name__ =='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--Rz', type=float, required=True, help='')
parser.add_argument('--n', type=int, required=False, help='iteration')
args = parser.parse_args()
if args.Rz == 7e-4:
if args.Rz == 7e-4:
savefile = 'astrid_Rz7e-4_stats_masscut11.9_gaussianized_peak_kcut0.03_n'+str(args.n)+'.hdf5'
gal_map_path = './gal_map_0.25_z2.4_masscut11.9_n'+str(args.n)+'.hdf5'
mass_cut = 11.9
if args.Rz == 0.015:
#savefile = 'astrid_Rz5e-4_stats_masscut11.51.hdf5'
#gal_map_path = './gal_map_0.25_z2.4_masscut11.51.hdf5'
#mass_cut = 11.51
savefile = 'astrid_Rz015_stats_masscut12.1149_test.hdf5'
gal_map_path = './gal_map_0.25_z2.4_masscut12.1149.hdf5'
mass_cut = 12.1149
if args.Rz == 0.02:
savefile = './B-0.42/astrid_Rz02_stats_masscut11.51_gaussianized_peak.hdf5'
gal_map_path = 'gal_map_0.25_z2.4_masscut11.51.hdf5'
mass_cut = 11.51
elif args.Rz == 0.03:
savefile = './B-0.42/astrid_Rz03_stats_masscut11.21_gaussianized_peak.hdf5'
gal_map_path = 'gal_map_0.25_z2.4_masscut11.21.hdf5'
mass_cut = 11.21
elif args.Rz == 0.04:
savefile = './B-0.42/astrid_Rz04_stats_masscut11.31_gaussianized_peak.hdf5'
gal_map_path = 'gal_map_0.25_z2.4_masscut11.31.hdf5'
mass_cut = 11.31
elif args.Rz == 0.06:
savefile = './B-0.42/astrid_Rz06_stats_masscut11.01_gaussianized_peak.hdf5'
gal_map_path = './gal_map_0.25_z2.4_masscut11.01.hdf5'
mass_cut = 11.01
elif args.Rz == 0.09:
savefile = 'astrid_Rz09_stats_masscut10_test.hdf5'
gal_map_path = './gal_map_0.25_z2.4_masscut10.hdf5'
mass_cut = 10
elif args.Rz == 0.2:
savefile = 'astrid_Rz0.2_stats_masscut10.5_test.hdf5'
Gal_map_path = './gal_map_0.25_z2.4_masscut10.5.hdf5'
mass_cut = 10.5
get_stats(Rz=args.Rz, savefile=savefile, gal_map_path=gal_map_path, mass_cut=mass_cut)
| 3,474 | 38.942529 | 123 | py |
lila | lila-main/helper_scripts/get_latis_source_pk.py | import numpy as np
import h5py
from nbodykit.lab import HDFCatalog
from nbodykit.lab import FFTPower
cat = HDFCatalog('./LATIS_source_catalog.hdf5', dataset='source')
print(cat['Coordinates'].shape)
boxsize = [94,54,484]
Nmesh = [19, 11, 95]
mesh = cat.to_mesh(Nmesh=Nmesh, position='Coordinates',
BoxSize=boxsize, compensated=True)
print(mesh.compute().shape)
fftpow = FFTPower(mesh, mode='2d', BoxSize=boxsize, kmin=0, kmax=1,dk=0.03, Nmu=6,
los=[0,0,1], save_3d_power=True)
power = fftpow.power
k, mu, pkmu = power['k'], power['mu'], power['power']
pk3d = fftpow.pk3d
with h5py.File('LATIS_source_power.hdf5','w') as fw:
fw['k'] = k
fw['mu'] = mu
fw['pkmu'] = pkmu
fw['pk3d'] = pk3d
fw['x'] = np.squeeze(fftpow.x3d[0])
fw['y'] = np.squeeze(fftpow.x3d[1])
fw['z'] = np.squeeze(fftpow.x3d[2]) | 858 | 29.678571 | 82 | py |
lila | lila-main/helper_scripts/get_lya.py | # Helper functions to get stats for one mock
import argparse
from os.path import join
import numpy as np
from lim_lytomo import stats
from lim_lytomo import comap, mock_lya
from os.path import join
## Specifics of the simulation
boxsize= [250]*3
seed = 13
snap='z2.5'
#snap = None
basepath='/rhome/mqezl001/bigdata/ASTRID/subfind/'
spec_file = '/rhome/mqezl001/bigdata/ASTRID/maps/spectra_ASTRID_noiseless_z2.5_1000_voxels.hdf5'
noiseless_file = '/rhome/mqezl001/bigdata/ASTRID/maps/map_ASTRID_true_0.25_z2.5.hdf5'
## Specifics of the mocks
k_par_min= None
Nmu = 30
#HCD_mask={'type':'NHI', 'thresh':10**19.7, 'vel_width':200 }
HCD_mask = {'type':None}
co_model = 'COMAP+21'
Li16_params={'alpha':1.17, 'beta':-0.21, 'sigma_co':0.37, 'delta_mf':1,
'behroozi_avsfr':'/rhome/mqezl001/bigdata/LIM/behroozi+13/sfr_release.dat',
'sfr_type':'behroozi+13'}
COMAP21_params={'A':-3.71, 'B':0.41, 'C':10**10.8, 'M':10**12.5, 'sigma_co':0.371}
#COMAP21_params={'A':-2.85, 'B':-0.42, 'C':10**10.63, 'M':10**12.3, 'sigma_co':0.42}
def get_stats(dperp, savefile, source_pk_file=None):
lya_mock = mock_lya.MockLya(noiseless_file=noiseless_file, spec_file=spec_file,
source_pk_file= source_pk_file, boxsize=boxsize, dperp=dperp,
HCD_mask= HCD_mask, silent_mode=False, transpose=(1,0,2))
if snap is not None:
lim_mock = comap.MockComap(snap=snap, axis=3, basepath=basepath, fine_Nmesh=lya_mock.Nmesh,
boxsize=boxsize, halo_type='Subhalo', silent_mode=False, seed=seed,
Li16_params=Li16_params, COMAP21_params=COMAP21_params, co_model='COMAP+21')
st = stats.Stats(mock_lim=lim_mock, mock_lya=lya_mock, vol_ratio=1, k_par_min=k_par_min, Nmu=Nmu)
st.get_lim_lya_sn()
else:
lim_mock = None
st = stats.Stats(mock_lim=lim_mock, mock_lya=lya_mock, vol_ratio=1, k_par_min=k_par_min, Nmu=Nmu)
st.get_lya_sn()
st.save_stat(savefile)
print(savefile)
if __name__ =='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--surveytype', type=str, required=True, help='')
args = parser.parse_args()
if args.surveytype == 'LATIS':
dperp = 2.5
savefile = './Gaussianized_peak/astrid_LATIS_stats_gaussianized_peak.hdf5'
elif args.surveytype == 'PFS':
dperp = 3.7
savefile = './Gaussianized_peak/astrid_PFS_stats_gaussianized_peak.hdf5'
elif args.surveytype == 'eBOSS':
dperp = 13
savefile = './Gaussianized_peak/astrid_eBOSS_stats_gaussianized_peak.hdf5'
elif args.surveytype == 'DESI':
dperp = 10
savefile = './Gaussianized_peak/astrid_DESI_stats_gaussianized_peak.hdf5'
#source_pk_file = 'astrid_'+args.surveytype+'_Cq.hdf5'
source_pk_file = None
get_stats(dperp=dperp, savefile=savefile, source_pk_file=source_pk_file)
| 2,896 | 37.118421 | 108 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/fusion.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import os, random, math
import time
import glob
import numpy as np
import shutil
import torch
import logging
import argparse
import traceback
import torch.nn as nn
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import sys
sys.path.append(os.path.abspath(os.path.join("..", os.getcwd())))
from config import Config
from lib import *
import torch.distributed as dist
from utils import *
from utils.build import *
from lib.model.DSN_v2 import DSNNetV2
parser = argparse.ArgumentParser('Motion RGB-D training and evaluation script', add_help=False)
parser.add_argument('--data', type=str, default='/path/to/NTU-RGBD/dataset/', help='data dir')
parser.add_argument('--splits', type=str, default='/path/to/NTU-RGBD/dataset/dataset_splits/@CS', help='data dir')
parser.add_argument('--batch-size', default=16, type=int)
parser.add_argument('--test-batch-size', default=32, type=int)
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--config', help='Load Congfile.')
parser.add_argument('--eval_only', action='store_true', help='Eval only. True or False?')
parser.add_argument('--local_rank', type=int, default=0)
# parser.add_argument('--nprocs', type=int, default=1)
parser.add_argument('--type', default='M',
help='data types, e.g., "M" or "K"')
parser.add_argument('--save_grid_image', action='store_true', help='Save samples?')
parser.add_argument('--save_output', action='store_true', help='Save logits?')
parser.add_argument('--demo_dir', type=str, default='./demo', help='The dir for save all the demo')
parser.add_argument('--resume', default='', help='resume from checkpoint')
# * Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
parser.add_argument('--drop_path_prob', type=float, default=0.5, help='drop path probability')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--save', type=str, default='Checkpoints/', help='experiment dir')
parser.add_argument('--seed', type=int, default=123, help='random seed')
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--shuffle', default=False, action='store_true', help='Tokens shuffle')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=5., metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0005,
help='weight decay (default: 0.0005)')
parser.add_argument('--ACCUMULATION-STEPS', type=int, default=0,
help='accumulation step (default: 0.0)')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--mixup-dynamic', action='store_true', default=False, help='')
parser.add_argument('--model-ema', default=True)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Augmentation parameters
parser.add_argument('--autoaug', action='store_true')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# Vision Transformer
parser.add_argument('--model', type=str, default='deit_tiny_patch16_224')
# * ShuffleMix params
parser.add_argument('--shufflemix', type=float, default=0.2,
help='shufflemix alpha, shufflemix enabled if > 0. (default: 0.0)')
parser.add_argument('--smixmode', type=str, default='sm',
help='ShuffleMix strategies (default: "shufflemix(sm)", Per "sm_v1", "sm_v2", or "sm_v3", "mu_sm")')
parser.add_argument('--smprob', type=float, default=0.3, metavar='ShuffleMix Prob',
help='ShuffleMix enable prob (default: 0.3)')
parser.add_argument('--temporal-consist', action='store_true')
parser.add_argument('--tempMix', action='store_true')
parser.add_argument('--MixIntra', action='store_true')
parser.add_argument('--replace-prob', type=float, default=0.25, metavar='MixIntra replace Prob')
# DTN example sampling params
parser.add_argument('--sample-duration', type=int, default=16,
help='The sampled frames in a video.')
parser.add_argument('--intar-fatcer', type=int, default=2,
help='The sampled frames in a video.')
parser.add_argument('--sample-window', type=int, default=1,
help='Range of frames sampling (default: 1)')
parser.add_argument('--translate', type=int, default=0,
help='translate angle (default: 0)')
# * Recoupling params
parser.add_argument('--distill', type=float, default=0.3, metavar='distill param',
help='distillation loss coefficient (default: 0.1)')
parser.add_argument('--temper', type=float, default=0.6, metavar='distillation temperature')
# * Cross modality loss params
parser.add_argument('--DC-weight', type=float, default=0.5, metavar='cross depth loss weight')
# * Rank Pooling params
parser.add_argument('--frp-num', type=int, default=0, metavar='The Number of Epochs.')
parser.add_argument('--w', type=int, default=4, metavar='The slide window of FRP.')
# * fp16 params
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--FusionNet', default=False)
args = parser.parse_args()
args = Config(args)
class FusionModule(nn.Module):
def __init__(self, args, fusion='add'):
super(FusionModule, self).__init__()
self.fusion = fusion
self.args = args
build_model(args)
self.rgb = DSNNetV2(args, num_classes=args.num_classes, pretrained=args.pretrained)
self.depth = DSNNetV2(args, num_classes=args.num_classes, pretrained=args.pretrained)
rgb_checkpoint = args.rgb_checkpoint[args.FusionNet]
self.strat_epoch_r, self.best_acc_r = load_checkpoint(self.rgb, rgb_checkpoint)
print(f'Best acc RGB: {self.best_acc_r}')
depth_checkpoint = args.depth_checkpoint[args.FusionNet]
self.strat_epoch_d, self.best_acc_d = load_checkpoint(self.depth, depth_checkpoint)
print(f'Best acc depth: {self.best_acc_d}')
def forward(self, r, d):
self.args.epoch = self.strat_epoch_r - 1
(r_x, r_xs, r_xm, r_xl), _ = self.rgb(r)
self.args.epoch = self.strat_epoch_d - 1
(d_x, xs, xm, xl), _ = self.depth(d)
distance = F.pairwise_distance(r_x, d_x, p=2)
if self.fusion == 'add':
x = (r_x + d_x) / 2.
else:
x = r_x * d_x
return x, distance
def reduce_mean(tensor, nprocs):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= nprocs
return rt.item()
def main(args):
utils.init_distributed_mode(args)
print(args)
seed = args.seed + utils.get_rank()
np.random.seed(seed)
cudnn.benchmark = True
torch.manual_seed(seed)
cudnn.enabled = True
torch.cuda.manual_seed(seed)
local_rank = utils.get_rank()
args.nprocs = utils.get_world_size()
print('nprocs:', args.nprocs)
device = torch.device(args.device)
#----------------------------
# build function
#----------------------------
model = FusionModule(args)
model = model.to(device)
valid_queue, valid_sampler = build_dataset(args, phase='valid')
criterion = build_loss(args)
if args.SYNC_BN and args.nprocs > 1:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=False)
model_without_ddp = model.module
print("param size = %fMB"%utils.count_parameters_in_MB(model))
valid_dict = infer(valid_queue, model, criterion, local_rank, device)
@torch.no_grad()
def infer(valid_queue, model, criterion, local_rank, device, epoch=0):
model.eval()
meter_dict = dict(
CE_loss=AverageMeter(),
)
meter_dict.update(dict(
Acc=AverageMeter(),
Acc_top5=AverageMeter(),
))
meter_dict['distance'] = AverageMeter()
meter_dict['Infer_Time'] = AverageMeter()
CE = torch.nn.CrossEntropyLoss()
MSE = torch.nn.MSELoss()
grounds, preds, v_paths = [], [], []
output = {}
for step, (inputs, heatmap, target, v_path) in enumerate(valid_queue):
color, depth = inputs
color, depth, target = map(lambda x: x.to(device, non_blocking=True), [color, depth, target])
features = []
def hook(module, input, output):
features.append(output.clone().detach())
handle = model.module.rgb.dtn.multi_scale_transformers[0][2].register_forward_hook(hook)
handle = model.module.rgb.dtn.multi_scale_transformers[1][2].register_forward_hook(hook)
handle = model.module.rgb.dtn.multi_scale_transformers[2][2].register_forward_hook(hook)
handle = model.module.depth.dtn.multi_scale_transformers[0][2].register_forward_hook(hook)
handle = model.module.depth.dtn.multi_scale_transformers[1][2].register_forward_hook(hook)
handle = model.module.depth.dtn.multi_scale_transformers[2][2].register_forward_hook(hook)
# handle1.remove()
n = target.size(0)
end = time.time()
output, distance = model(color, depth)
distance = F.pairwise_distance(features[0][:, 0]+features[1][:, 0]+features[2][:, 0], features[3][:, 0]+features[4][:, 0]+features[5][:, 0], p=2).mean()
globals()['CE_loss'] = CE(output, target)
globals()['distance'] = distance.mean()
meter_dict['Infer_Time'].update((time.time() - end) / n)
grounds += target.cpu().tolist()
preds += torch.argmax(output, dim=1).cpu().tolist()
v_paths += v_path
torch.distributed.barrier()
globals()['Acc'], globals()['Acc_top5'] = accuracy(output, target, topk=(1, 5))
for name in meter_dict:
if 'Time' not in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if step % args.report_freq == 0 and local_rank == 0:
log_info = {
'Epoch': epoch + 1,
'Mini-Batch': '{:0>4d}/{:0>4d}'.format(step + 1, len(valid_queue.dataset) // (
args.test_batch_size * args.nprocs)),
}
log_info.update(dict((name, '{:.4f}'.format(value.avg)) for name, value in meter_dict.items()))
print_func(log_info)
torch.distributed.barrier()
grounds_gather = concat_all_gather(torch.tensor(grounds).to(device))
preds_gather = concat_all_gather(torch.tensor(preds).to(device))
grounds_gather, preds_gather = list(map(lambda x: x.cpu().numpy(), [grounds_gather, preds_gather]))
print(dict([(name, meter_dict[name].avg) for name in meter_dict]))
return meter_dict
if __name__ == '__main__':
try:
main(args)
except KeyboardInterrupt:
torch.cuda.empty_cache() | 16,382 | 45.279661 | 160 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/train.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import time
import glob
import numpy as np
import shutil
import cv2
import os, random, math
import sys
# sys.path.append(os.path.join('..', os.path.abspath(os.path.join(os.getcwd()))) )
from pathlib import Path
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
from timm.optim import create_optimizer
import torch
import utils
import logging
import argparse
import traceback
import torch.nn as nn
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from collections import OrderedDict
try:
# noinspection PyUnresolvedReferences
from apex import amp
except ImportError:
amp = None
from utils.visualizer import Visualizer
from config import Config
from lib import *
from utils import *
from timm.utils import get_state_dict #, ModelEma, ModelEmaV2
#------------------------
# evaluation metrics
#------------------------
from sklearn.decomposition import PCA
from sklearn import manifold
import pandas as pd
import matplotlib.pyplot as plt # For graphics
import seaborn as sns
from torchvision.utils import save_image, make_grid
from PIL import Image
from einops import rearrange, repeat
def get_args_parser():
parser = argparse.ArgumentParser('Motion RGB-D training and evaluation script', add_help=False)
parser.add_argument('--data', type=str, default='/path/to/NTU-RGBD/dataset/', help='data dir')
parser.add_argument('--splits', type=str, default='/path/to/NTU-RGBD/dataset/dataset_splits/@CS', help='data dir')
parser.add_argument('--num-classes', default=None)
parser.add_argument('--batch-size', default=16, type=int)
parser.add_argument('--test-batch-size', default=32, type=int)
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--config', help='Load Congfile.')
parser.add_argument('--eval_only', action='store_true', help='Eval only. True or False?')
parser.add_argument('--local_rank', type=int, default=0)
# parser.add_argument('--nprocs', type=int, default=1)
parser.add_argument('--type', default='M',
help='data types, e.g., "M" or "K"')
parser.add_argument('--save_grid_image', action='store_true', help='Save samples?')
parser.add_argument('--save_output', action='store_true', help='Save logits?')
parser.add_argument('--demo_dir', type=str, default='./demo', help='The dir for save all the demo')
parser.add_argument('--resume', default='', help='resume from checkpoint')
# * Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
parser.add_argument('--drop_path_prob', type=float, default=0.5, help='drop path probability')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--save', type=str, default='Checkpoints/', help='experiment dir')
parser.add_argument('--seed', type=int, default=123, help='random seed')
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--shuffle', default=False, action='store_true', help='Tokens shuffle')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine", "step", "multistep"')
parser.add_argument('--lr', type=float, default=1e-2, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--decay-milestones', type=list, default=[10, 20, 30], metavar='milestones',
help='epoch interval to milestones decay LR, default list[]')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=5., metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0005,
help='weight decay (default: 0.0005)')
parser.add_argument('--ACCUMULATION-STEPS', type=int, default=0,
help='accumulation step (default: 0.0)')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--mixup-dynamic', action='store_true', default=False, help='')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Augmentation parameters
parser.add_argument('--autoaug', action='store_true')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
parser.add_argument('--translate', type=int, default=20,
help='translate angle (default: 0)')
parser.add_argument('--strong-aug', action='store_true',
help='Strong Augmentation (default: False)')
parser.add_argument('--resize-rate', type=float, default=0.1,
help='random resize rate (default: 0.1)')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.0, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * ShuffleMix params
parser.add_argument('--shufflemix', type=float, default=0.2,
help='shufflemix alpha, shufflemix enabled if > 0. (default: 0.0)')
parser.add_argument('--smixmode', type=str, default='sm',
help='ShuffleMix strategies (default: "shufflemix(sm)", Per "sm_v1", "sm_v2", or "sm_v3", "mu_sm")')
parser.add_argument('--smprob', type=float, default=0.3, metavar='ShuffleMix Prob',
help='ShuffleMix enable prob (default: 0.3)')
parser.add_argument('--temporal-consist', action='store_true')
parser.add_argument('--tempMix', action='store_true')
parser.add_argument('--MixIntra', action='store_true')
parser.add_argument('--replace-prob', type=float, default=0.25, metavar='MixIntra replace Prob')
# DTN example sampling params
parser.add_argument('--sample-duration', type=int, default=16,
help='The sampled frames in a video.')
parser.add_argument('--intar-fatcer', type=int, default=2,
help='The sampled frames in a video.')
parser.add_argument('--sample-window', type=int, default=1,
help='Range of frames sampling (default: 1)')
# * Recoupling params
parser.add_argument('--distill', type=float, default=0.3, metavar='distill param',
help='distillation loss coefficient (default: 0.1)')
parser.add_argument('--temper', type=float, default=0.6, metavar='distillation temperature')
# * Cross modality loss params
parser.add_argument('--DC-weight', type=float, default=0.2, metavar='cross depth loss weight')
# * Rank Pooling params
parser.add_argument('--frp-num', type=int, default=0, metavar='The Number of Epochs.')
parser.add_argument('--w', type=int, default=4, metavar='The slide window of FRP.')
# * fp16 params
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
return parser
def reduce_mean(tensor, nprocs):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= nprocs
return rt.item()
def main(args):
utils.init_distributed_mode(args)
print(args)
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
if args.amp_opt_level == 'O0':
logging.info('no apex is used')
seed = args.seed + utils.get_rank()
np.random.seed(seed)
cudnn.benchmark = True
torch.manual_seed(seed)
cudnn.enabled = True
torch.cuda.manual_seed(seed)
local_rank = utils.get_rank()
args.nprocs = utils.get_world_size()
print('nprocs:', args.nprocs)
device = torch.device(args.device)
#----------------------------
# build function
#----------------------------
model = build_model(args)
model = model.to(device)
train_queue, train_sampler = build_dataset(args, phase='train')
valid_queue, valid_sampler = build_dataset(args, phase='valid')
if args.SYNC_BN and args.nprocs > 1:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=False)
model_without_ddp = model.module
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes, args=args)
else:
args.loss['name'] = 'CE'
optimizer = create_optimizer(args, model_without_ddp)
if args.fp16:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.amp_opt_level)
criterion = build_loss(args)
loss_scaler = NativeScaler()
scheduler, _ = create_scheduler(args, optimizer)
if args.finetune:
load_pretrained_checkpoint(model_without_ddp, args.finetune)
if args.resume:
strat_epoch, best_acc = load_checkpoint(model_without_ddp, args.resume, optimizer, scheduler)
print("Start Epoch: {}, Learning rate: {}, Best accuracy: {}".format(strat_epoch, [g['lr'] for g in
optimizer.param_groups],
round(best_acc, 4)))
scheduler.step(strat_epoch - 1)
if args.resumelr:
for g in optimizer.param_groups:
args.resumelr = g['lr'] if not isinstance(args.resumelr, float) else args.resumelr
g['lr'] = args.resumelr
#resume_scheduler = np.linspace(args.resumelr, 1e-5, args.epochs - strat_epoch)
resume_scheduler = cosine_scheduler(args.resumelr, 1e-5, args.epochs - strat_epoch + 1, niter_per_ep=1).tolist()
resume_scheduler.pop(0)
args.epoch = strat_epoch - 1
else:
strat_epoch = 0
best_acc = 0.0
args.epoch = strat_epoch
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model_without_ddp,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume=args.finetune
)
if local_rank == 0:
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
logging.info("learnable param size = %fMB", utils.count_learnable_parameters_in_MB(model))
if hasattr(model_without_ddp, 'flops'):
flops = model_without_ddp.flops()
logging.info(f"number of GFLOPs: {flops / 1e9}")
train_results = dict(
train_score=[],
train_loss=[],
valid_score=[],
valid_loss=[],
best_score=0.0
)
first_test = True
if first_test:
args.distill_lamdb = args.distill
valid_acc, _, valid_dict, meter_dict, output = infer(valid_queue, model, criterion, local_rank, strat_epoch, device)
from sklearn.metrics import confusion_matrix, auc, roc_curve, roc_auc_score
num_cat = []
categories = np.unique(valid_dict['grounds'])
cm = confusion_matrix(valid_dict['grounds'], valid_dict['preds'], labels=categories)
fig = plt.figure()
ax = fig.add_subplot()
sns.heatmap(cm, annot=True, fmt='g', ax=ax)
# labels, title and ticks
ax.set_title('Confusion Matrix', fontsize=20)
ax.set_xlabel('Predicted labels', fontsize=16)
ax.set_ylabel('True labels', fontsize=16)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
fig.savefig(os.path.join(args.save, "confusion_matrix"), dpi=fig.dpi)
Accuracy = [(cm[i, i] / sum(cm[i, :])) * 100 if sum(cm[i, :]) != 0 else 0.000001 for i in range(cm.shape[0])]
Precision = [(cm[i, i] / sum(cm[:, i])) * 100 if sum(cm[:, i]) != 0 else 0.000001 for i in range(cm.shape[1])]
print('| Class ID \t Accuracy(%) \t Precision(%) |')
for i in range(len(Accuracy)):
print('| {0} \t {1} \t {2} |'.format(i, round(Accuracy[i], 2), round(Precision[i], 2)))
print('-' * 80)
if args.save_output:
torch.save(output, os.path.join(args.save, '{}-output.pth'.format(args.type)))
if args.eval_only:
return
for epoch in range(strat_epoch, args.epochs):
train_sampler.set_epoch(epoch)
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
if epoch <= args.warmup_epochs:
args.distill_lamdb = 0.
else:
args.distill_lamdb = args.distill
# Warm-Up with FRP
if epoch < args.frp_num:
args.frp = True
else:
args.frp = False
args.epoch = epoch
train_acc, train_obj, meter_dict_train = train_one_epoch(train_queue, model, model_ema, criterion, optimizer, epoch, local_rank, loss_scaler, device, mixup_fn)
valid_acc, valid_obj, valid_dict, meter_dict_val, output = infer(valid_queue, model, criterion, local_rank, epoch, device)
scheduler.step(epoch)
if local_rank == 0:
if valid_acc > best_acc:
best_acc = valid_acc
isbest = True
else:
isbest = False
# logging.info(f'train_acc {round(train_acc, 4)}, top-5 {round(meter_dict_train["Acc_top5"].avg, 4)}, train_loss {round(train_obj, 4)}')
logging.info(f'valid_acc {round(valid_acc, 4)}, best_acc {round(best_acc, 4)}')
state = {'model': model.module.state_dict(),'optimizer': optimizer.state_dict(),
'epoch': epoch + 1, 'bestacc': best_acc,
'scheduler': scheduler.state_dict(),
'scaler': loss_scaler.state_dict(),
'args': args,
'model_ema': get_state_dict(model_ema) if model_ema is not None else None,
}
save_checkpoint(state, isbest, args.save)
train_results['train_score'].append(train_acc)
train_results['train_loss'].append(train_obj)
train_results['valid_score'].append(valid_acc)
train_results['valid_loss'].append(valid_obj)
train_results['best_score'] = best_acc
train_results.update(valid_dict)
train_results['categories'] = np.unique(valid_dict['grounds'])
if args.visdom['enable']:
vis.plot_many({'train_acc': train_acc, 'loss': train_obj,
'cosin_similar': meter_dict_train['cosin_similar'].avg}, 'Train-' + args.type, epoch)
vis.plot_many({'valid_acc': valid_acc, 'loss': valid_obj,
'cosin_similar': meter_dict_val['cosin_similar'].avg}, 'Valid-' + args.type, epoch)
if isbest:
if args.save_output:
torch.save(output, os.path.join(args.save, '{}-output.pth'.format(args.type)))
EvaluateMetric(PREDICTIONS_PATH=args.save, train_results=train_results, idx=epoch)
for k, v in train_results.items():
if isinstance(v, list):
v.clear()
def train_one_epoch(train_queue, model, model_ema, criterion, optimizer, epoch, local_rank, loss_scaler, device,
mixup_fn=None
):
model.train()
meter_dict = dict(
Total_loss=AverageMeter(),
CE_loss=AverageMeter(),
)
meter_dict['Data_Time'] = AverageMeter()
meter_dict.update(dict(
Acc_s=AverageMeter(),
Acc_m=AverageMeter(),
Acc_l=AverageMeter(),
Acc=AverageMeter(),
Acc_top5=AverageMeter(),
))
if args.distill:
meter_dict['Distil_loss'] = AverageMeter()
if args.model_ema:
meter_dict['DC_loss'] = AverageMeter()
end = time.time()
CE = torch.nn.CrossEntropyLoss()
MSE = torch.nn.MSELoss()
rcm_loss = RCM_loss(args, model.module)
for step, (inputs, heatmap, target, _) in enumerate(train_queue):
if args.model_ema:
inputs, inputs_aux = inputs
heatmap, heatmap_aux = heatmap
inputs_aux, heatmap_aux = map(lambda x: x.to(device, non_blocking=True), [inputs_aux, heatmap_aux])
meter_dict['Data_Time'].update((time.time() - end)/args.batch_size)
inputs, target, heatmap = map(lambda x: x.to(device, non_blocking=True), [inputs, target, heatmap])
if args.frp:
inputs = heatmap
ori_target, target_aux = target, target
if mixup_fn is not None:
inputs, target = mixup_fn(inputs, target)
if args.model_ema:
inputs_aux, target_aux = mixup_fn(inputs_aux, ori_target)
if args.model_ema:
with torch.no_grad():
(logit_aux, dxs, dxm, dxl), _, = model_ema(inputs_aux)
pseduo_targets = torch.argmax(logit_aux, dim=-1)
images = inputs
(logits, xs, xm, xl), temp_out = model(inputs)
Total_loss = 0.0
if args.MultiLoss:
lamd1, lamd2, lamd3, lamd4 = map(float, args.loss_lamdb)
globals()['CE_loss'] = lamd1*criterion(logits, target) + lamd2*criterion(xs, target) + lamd3*criterion(xm, target) + lamd4*criterion(xl, target)
else:
globals()['CE_loss'] = criterion(logits, target)
Total_loss += CE_loss
if args.distill:
globals()['Distil_loss'] = rcm_loss(temp_out) * args.distill_lamdb
Total_loss += Distil_loss
if args.model_ema:
globals()['DC_loss'] = args.DC_weight * CE(logits, pseduo_targets)
Total_loss += DC_loss
if args.ACCUMULATION_STEPS > 1:
globals()['Total_loss'] = Total_loss / args.ACCUMULATION_STEPS
Total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
if (step + 1) % args.ACCUMULATION_STEPS == 0:
optimizer.step()
optimizer.zero_grad()
else:
globals()['Total_loss'] = Total_loss
optimizer.zero_grad()
if args.fp16:
with amp.scale_loss(Total_loss, optimizer) as scaled_loss:
scaled_loss.backward()
grad_norm = nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.clip_grad)
else:
Total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
optimizer.step()
#---------------------
# Meter performance
#---------------------
torch.distributed.barrier()
globals()['Acc'], globals()['Acc_top5'] = accuracy(logits, ori_target, topk=(1, 5))
globals()['Acc_s'], _ = accuracy(xs, ori_target, topk=(1, 5))
globals()['Acc_m'], _ = accuracy(xm, ori_target, topk=(1, 5))
globals()['Acc_l'], _ = accuracy(xl, ori_target, topk=(1, 5))
for name in meter_dict:
if 'loss' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if 'Acc' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if step % args.report_freq == 0 and local_rank == 0:
log_info = {
'Epoch': '{}/{}'.format(epoch + 1, args.epochs),
'Mini-Batch': '{:0>5d}/{:0>5d}'.format(step + 1,
len(train_queue.dataset) // (args.batch_size * args.nprocs)),
'Lr': optimizer.param_groups[0]["lr"],
}
log_info.update(dict((name, '{:.4f}'.format(value.avg)) for name, value in meter_dict.items()))
print_func(log_info)
if args.vis_feature:
Visfeature(args, model.module, images, weight_softmax=torch.softmax(logits, dim=-1))
end = time.time()
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
if local_rank == 0:
print('*'*20)
print_func(dict([(name, meter_dict[name].avg) for name in meter_dict]))
print('*'*20)
return meter_dict['Acc'].avg, meter_dict['Total_loss'].avg, meter_dict
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
@torch.no_grad()
def infer(valid_queue, model, criterion, local_rank, epoch, device, obtain_softmax_score=True):
model.eval()
meter_dict = dict(
Total_loss=AverageMeter(),
)
meter_dict.update(dict(
Acc_sm=AverageMeter(),
Acc_sl=AverageMeter(),
Acc_lm=AverageMeter(),
Acc_all=AverageMeter(),
Acc_adaptive=AverageMeter(),
Acc_adaptive_top5=AverageMeter(),
))
meter_dict['Infer_Time'] = AverageMeter()
CE = torch.nn.CrossEntropyLoss()
MSE = torch.nn.MSELoss()
grounds, preds, v_paths = [], {0:[], 1:[], 2:[], 3:[], 4:[]}, []
logits_out = {}
softmax_score = {}
embedding_dict = OrderedDict()
for step, (inputs, heatmap, target, v_path) in enumerate(valid_queue):
if args.model_ema:
inputs, inputs_aux = inputs
heatmap, heatmap_aux = heatmap
inputs_aux, heatmap_aux = map(lambda x: x.to(device, non_blocking=True), [inputs_aux, heatmap_aux])
n = inputs.size(0)
end = time.time()
inputs, target, heatmap = map(lambda x: x.to(device, non_blocking=True), [inputs, target, heatmap])
if args.frp:
inputs = heatmap
images = inputs
(logits, xs, xm, xl), temp_out = model(inputs)
Total_loss = 0
if args.MultiLoss:
lamd1, lamd2, lamd3, lamd4 = map(float, args.loss_lamdb)
globals()['CE_loss'] = lamd1 * CE(logits, target) + lamd2 * CE(xs, target) + lamd3 * CE(xm,
target) + lamd4 * CE(
xl, target)
else:
globals()['CE_loss'] = CE(logits, target)
Total_loss += CE_loss
globals()['Total_loss'] = Total_loss
meter_dict['Infer_Time'].update((time.time() - end) / n)
grounds += target.cpu().tolist()
# save logits from outputs
preds[0] += torch.argmax(logits, dim=1).cpu().tolist()
preds[1] += torch.argmax(xs+xm+xl, dim=1).cpu().tolist()
preds[2] += torch.argmax(xs+xm, dim=1).cpu().tolist()
preds[3] += torch.argmax(xs+xl, dim=1).cpu().tolist()
preds[4] += torch.argmax(xl+xm, dim=1).cpu().tolist()
v_paths += v_path
torch.distributed.barrier()
globals()['Acc_adaptive'], globals()['Acc_adaptive_top5'] = accuracy(logits, target, topk=(1, 5))
globals()['Acc_all'], _ = accuracy(xs+xm+xl, target, topk=(1, 5))
globals()['Acc_sm'], _ = accuracy(xs+xm, target, topk=(1, 5))
globals()['Acc_sl'], _ = accuracy(xs+xl, target, topk=(1, 5))
globals()['Acc_lm'], _ = accuracy(xl+xm, target, topk=(1, 5))
for name in meter_dict:
if 'loss' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if 'Acc' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if step % args.report_freq == 0 and local_rank == 0:
log_info = {
'Epoch': epoch + 1,
'Mini-Batch': '{:0>4d}/{:0>4d}'.format(step + 1, len(valid_queue.dataset) // (
args.test_batch_size * args.nprocs)),
}
log_info.update(dict((name, '{:.4f}'.format(value.avg)) for name, value in meter_dict.items()))
print_func(log_info)
if args.vis_feature:
Visfeature(args, model.module, images, v_path, torch.softmax(logits, dim=-1))
if args.save_output:
feature_embedding(temp_out, v_path, embedding_dict)
for t, logit in zip(v_path, logits):
logits_out[t] = logit
if obtain_softmax_score and args.eval_only:
for t, logit in zip(target.cpu().tolist(), logits):
if t not in softmax_score:
softmax_score[t] = [torch.softmax(logit, dim=-1).max(-1)[0]]
else:
softmax_score[t].append(torch.softmax(logit, dim=-1).max(-1)[0])
# select best acc output
acc_list = torch.tensor([meter_dict['Acc_adaptive'].avg, meter_dict['Acc_all'].avg, meter_dict['Acc_sm'].avg, meter_dict['Acc_sl'].avg, meter_dict['Acc_lm'].avg])
best_idx = torch.argmax(acc_list).tolist()
preds = preds[best_idx] # Note: only preds be refined
if obtain_softmax_score and args.eval_only:
softmax_score = dict(sorted(softmax_score.items(), key = lambda i: i[0]))
print('\n', 'The confidence scores for categories: ')
print('| Class ID \t softmax score |')
for k, v in softmax_score.items():
print('| {0} \t {1} |'.format(k, round(float(sum(v)/len(v)), 2)))
print('-' * 80)
grounds_gather = concat_all_gather(torch.tensor(grounds).to(device))
preds_gather = concat_all_gather(torch.tensor(preds).to(device))
grounds_gather, preds_gather = list(map(lambda x: x.cpu().numpy(), [grounds_gather, preds_gather]))
if local_rank == 0:
print('*'*20)
print_func(dict([(name, meter_dict[name].avg) for name in meter_dict]))
print('*'*20)
v_paths = np.array(v_paths)
grounds = np.array(grounds)
preds = np.array(preds)
wrong_idx = np.where(grounds != preds)
v_paths = v_paths[wrong_idx[0]]
grounds = grounds[wrong_idx[0]]
preds = preds[wrong_idx[0]]
if epoch % 5 == 0 and args.save_output:
torch.save(embedding_dict, os.path.join(args.save, 'feature-{}-epoch{}.pth'.format(args.type, epoch)))
return acc_list.tolist()[best_idx], meter_dict['Total_loss'].avg, dict(grounds=grounds_gather, preds=preds_gather, valid_images=(v_paths, grounds, preds)), meter_dict, logits_out
if __name__ == '__main__':
# import os
# args.local_rank=os.environ['LOCAL_RANK']
parser = argparse.ArgumentParser('Motion RGB-D training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
args = Config(args)
if args.save and args.local_rank == 0:
Path(args.save).mkdir(parents=True, exist_ok=True)
try:
if args.resume:
args.save = os.path.split(args.resume)[0]
else:
args.save = f'{args.save}'
utils.create_exp_dir(args.save, scripts_to_save=[args.config] + glob.glob('./train.py') + glob.glob('lib/model/*.py'))
except:
pass
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log{}.txt'.format(time.strftime("%Y%m%d-%H%M%S"))))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
main(args) | 33,323 | 44.154472 | 182 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/train_fusion.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import time
import glob
import numpy as np
import shutil
import cv2
import os, random, math
import sys
from pathlib import Path
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
from timm.optim import create_optimizer
import torch
import utils
import logging
import argparse
import traceback
import torch.nn as nn
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from collections import OrderedDict
try:
# noinspection PyUnresolvedReferences
from apex import amp
except ImportError:
amp = None
from utils.visualizer import Visualizer
from config import Config
from lib import *
from utils import *
from timm.utils import get_state_dict #, ModelEma, ModelEmaV2
#------------------------
# evaluation metrics
#------------------------
from sklearn.decomposition import PCA
from sklearn import manifold
import pandas as pd
import matplotlib.pyplot as plt # For graphics
import seaborn as sns
from torchvision.utils import save_image, make_grid
from PIL import Image
from einops import rearrange, repeat
def get_args_parser():
parser = argparse.ArgumentParser('Motion RGB-D training and evaluation script', add_help=False)
parser.add_argument('--data', type=str, default='/path/to/NTU-RGBD/dataset/', help='data dir')
parser.add_argument('--splits', type=str, default='/path/to/NTU-RGBD/dataset/dataset_splits/@CS', help='data dir')
parser.add_argument('--num-classes', default=None)
parser.add_argument('--batch-size', default=16, type=int)
parser.add_argument('--test-batch-size', default=32, type=int)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--config', help='Load Congfile.')
parser.add_argument('--eval_only', action='store_true', help='Eval only. True or False?')
parser.add_argument('--local_rank', type=int, default=0)
# parser.add_argument('--nprocs', type=int, default=1)
parser.add_argument('--type', default='M',
help='data types, e.g., "M" or "K"')
parser.add_argument('--save_grid_image', action='store_true', help='Save samples?')
parser.add_argument('--demo_dir', type=str, default='./demo', help='The dir for save all the demo')
parser.add_argument('--resume', default='', help='resume from checkpoint')
# * Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
parser.add_argument('--drop_path_prob', type=float, default=0.5, help='drop path probability')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--save', type=str, default='Checkpoints/', help='experiment dir')
parser.add_argument('--seed', type=int, default=123, help='random seed')
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--shuffle', default=False, action='store_true', help='Tokens shuffle')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine", "step", "multistep"')
parser.add_argument('--lr', type=float, default=1e-2, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--decay-milestones', type=list, default=[10, 20, 30], metavar='milestones',
help='epoch interval to milestones decay LR, default list[]')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=5., metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0005,
help='weight decay (default: 0.0005)')
parser.add_argument('--ACCUMULATION-STEPS', type=int, default=0,
help='accumulation step (default: 0.0)')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--mixup-dynamic', action='store_true', default=False, help='')
# Augmentation parameters
parser.add_argument('--autoaug', action='store_true')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
parser.add_argument('--translate', type=int, default=20,
help='translate angle (default: 0)')
parser.add_argument('--strong-aug', action='store_true',
help='Strong Augmentation (default: False)')
parser.add_argument('--resize-rate', type=float, default=0.1,
help='random resize rate (default: 0.1)')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.0, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * ShuffleMix params
parser.add_argument('--shufflemix', type=float, default=0.2,
help='shufflemix alpha, shufflemix enabled if > 0. (default: 0.0)')
parser.add_argument('--smixmode', type=str, default='sm',
help='ShuffleMix strategies (default: "shufflemix(sm)", Per "sm_v1", "sm_v2", or "sm_v3", "mu_sm")')
parser.add_argument('--smprob', type=float, default=0.3, metavar='ShuffleMix Prob',
help='ShuffleMix enable prob (default: 0.3)')
parser.add_argument('--temporal-consist', action='store_true')
parser.add_argument('--tempMix', action='store_true')
parser.add_argument('--MixIntra', action='store_true')
parser.add_argument('--replace-prob', type=float, default=0.25, metavar='MixIntra replace Prob')
# DTN example sampling params
parser.add_argument('--sample-duration', type=int, default=16,
help='The sampled frames in a video.')
parser.add_argument('--intar-fatcer', type=int, default=2,
help='The sampled frames in a video.')
parser.add_argument('--sample-window', type=int, default=1,
help='Range of frames sampling (default: 1)')
# * Recoupling params
parser.add_argument('--distill', type=float, default=0.3, metavar='distill param',
help='distillation loss coefficient (default: 0.1)')
parser.add_argument('--temper', type=float, default=0.6, metavar='distillation temperature')
# * Cross modality loss params
parser.add_argument('--DC-weight', type=float, default=0.2, metavar='cross depth loss weight')
# * fp16 params
parser.add_argument('--fp16', action='store_true')
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
# * Rank Pooling params
parser.add_argument('--frp-num', type=int, default=0, metavar='The Number of Epochs.')
parser.add_argument('--w', type=int, default=4, metavar='The slide window of FRP.')
parser.add_argument('--FusionNet', default='cs32', choices=['cs16', 'cs32', 'cs64', 'cv16', 'cv32', 'cv64'],
help='used for multi-modal fusion.')
parser.add_argument('--scc-depth', type=int, default=2, metavar='SCC depth')
parser.add_argument('--tcc-depth', type=int, default=4, metavar='TCC depth')
return parser
def reduce_mean(tensor, nprocs):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= nprocs
return rt.item()
def main(args):
utils.init_distributed_mode(args)
print(args)
if args.Network != 'FusionNet':
logging.info('Reset the model to the fusion training state.')
args.Network = 'FusionNet'
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
if args.amp_opt_level == 'O0':
logging.info('no apex is used')
seed = args.seed + utils.get_rank()
np.random.seed(seed)
cudnn.benchmark = True
torch.manual_seed(seed)
cudnn.enabled = True
torch.cuda.manual_seed(seed)
local_rank = utils.get_rank()
args.nprocs = utils.get_world_size()
print('nprocs:', args.nprocs)
device = torch.device(args.device)
#----------------------------
# build function
#----------------------------
model = build_model(args)
model = model.to(device)
train_queue, train_sampler = build_dataset(args, phase='train')
valid_queue, valid_sampler = build_dataset(args, phase='valid')
if args.SYNC_BN and args.nprocs > 1:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=False)
model_without_ddp = model.module
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes, args=args)
else:
args.loss['name'] = 'CE'
optimizer = create_optimizer(args, model_without_ddp)
if args.fp16:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.amp_opt_level)
criterion = build_loss(args)
loss_scaler = NativeScaler()
scheduler, _ = create_scheduler(args, optimizer)
if args.finetune:
load_pretrained_checkpoint(model_without_ddp, args.finetune)
if args.resume:
strat_epoch, best_acc = load_checkpoint(model_without_ddp, args.resume, optimizer, scheduler)
print("Start Epoch: {}, Learning rate: {}, Best accuracy: {}".format(strat_epoch, [g['lr'] for g in
optimizer.param_groups],
round(best_acc, 4)))
scheduler.step(strat_epoch - 1)
if args.resumelr:
for g in optimizer.param_groups:
args.resumelr = g['lr'] if not isinstance(args.resumelr, float) else args.resumelr
g['lr'] = args.resumelr
#resume_scheduler = np.linspace(args.resumelr, 1e-5, args.epochs - strat_epoch)
resume_scheduler = cosine_scheduler(args.resumelr, 1e-5, args.epochs - strat_epoch + 1, niter_per_ep=1).tolist()
resume_scheduler.pop(0)
args.epoch = strat_epoch - 1
else:
strat_epoch = 0
best_acc = 0.0
args.epoch = strat_epoch
if local_rank == 0:
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
logging.info("learnable param size = %fMB", utils.count_learnable_parameters_in_MB(model))
if hasattr(model_without_ddp, 'flops'):
flops = model_without_ddp.flops()
logging.info(f"number of GFLOPs: {flops / 1e9}")
captuer=None
if args.FusionNet:
captuer = FeatureCapter(args, num_classes=args.num_classes)
captuer = captuer.to(device)
captuer.eval()
train_results = dict(
train_score=[],
train_loss=[],
valid_score=[],
valid_loss=[],
best_score=0.0
)
first_test = True
if first_test:
args.distill_lamdb = args.distill
valid_acc, _, valid_dict, meter_dict, output = infer(valid_queue, model, criterion, local_rank, strat_epoch, device, captuer)
from sklearn.metrics import confusion_matrix, auc, roc_curve, roc_auc_score
num_cat = []
categories = np.unique(valid_dict['grounds'])
cm = confusion_matrix(valid_dict['grounds'], valid_dict['preds'], labels=categories)
fig = plt.figure()
ax = fig.add_subplot()
sns.heatmap(cm, annot=True, fmt='g', ax=ax)
# labels, title and ticks
ax.set_title('Confusion Matrix', fontsize=20)
ax.set_xlabel('Predicted labels', fontsize=16)
ax.set_ylabel('True labels', fontsize=16)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
fig.savefig(os.path.join(args.save, "confusion_matrix"), dpi=fig.dpi)
Accuracy = [(cm[i, i] / sum(cm[i, :])) * 100 if sum(cm[i, :]) != 0 else 0.000001 for i in range(cm.shape[0])]
Precision = [(cm[i, i] / sum(cm[:, i])) * 100 if sum(cm[:, i]) != 0 else 0.000001 for i in range(cm.shape[1])]
print('| Class ID \t Accuracy(%) \t Precision(%) |')
for i in range(len(Accuracy)):
print('| {0} \t {1} \t {2} |'.format(i, round(Accuracy[i], 2), round(Precision[i], 2)))
print('-' * 80)
if args.eval_only:
return
for epoch in range(strat_epoch, args.epochs):
train_sampler.set_epoch(epoch)
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
args.epoch = epoch
train_acc, train_obj, meter_dict_train = train_one_epoch(train_queue, model, criterion, optimizer, epoch, local_rank, loss_scaler, device, mixup_fn, captuer)
valid_acc, valid_obj, valid_dict, meter_dict_val, output = infer(valid_queue, model, criterion, local_rank, epoch, device, captuer)
scheduler.step(epoch)
if local_rank == 0:
if valid_acc > best_acc:
best_acc = valid_acc
isbest = True
else:
isbest = False
# logging.info(f'train_acc {round(train_acc, 4)}, top-5 {round(meter_dict_train["Acc_top5"].avg, 4)}, train_loss {round(train_obj, 4)}')
logging.info(f'valid_acc {round(valid_acc, 4)}, best_acc {round(best_acc, 4)}')
state = {'model': model.module.state_dict(),'optimizer': optimizer.state_dict(),
'epoch': epoch + 1, 'bestacc': best_acc,
'scheduler': scheduler.state_dict(),
'scaler': loss_scaler.state_dict(),
'args': args,
}
save_checkpoint(state, isbest, args.save)
train_results['train_score'].append(train_acc)
train_results['train_loss'].append(train_obj)
train_results['valid_score'].append(valid_acc)
train_results['valid_loss'].append(valid_obj)
train_results['best_score'] = best_acc
train_results.update(valid_dict)
train_results['categories'] = np.unique(valid_dict['grounds'])
if args.visdom['enable']:
vis.plot_many({'train_acc': train_acc, 'loss': train_obj,
'cosin_similar': meter_dict_train['cosin_similar'].avg}, 'Train-' + args.type, epoch)
vis.plot_many({'valid_acc': valid_acc, 'loss': valid_obj,
'cosin_similar': meter_dict_val['cosin_similar'].avg}, 'Valid-' + args.type, epoch)
if isbest:
EvaluateMetric(PREDICTIONS_PATH=args.save, train_results=train_results, idx=epoch)
for k, v in train_results.items():
if isinstance(v, list):
v.clear()
def train_one_epoch(train_queue, model, criterion, optimizer, epoch, local_rank, loss_scaler, device,
mixup_fn=None, captuer=None
):
model.train()
meter_dict = dict(
Total_loss=AverageMeter(),
)
meter_dict['Data_Time'] = AverageMeter()
meter_dict.update(dict(
Acc_s=AverageMeter(),
Acc_m=AverageMeter(),
Acc_l=AverageMeter(),
Acc_rgbd=AverageMeter(),
Acc_rgbd_top5=AverageMeter(),
Acc_all = AverageMeter(),
Acc_all_top5=AverageMeter(),
))
meter_dict['Fusion_loss'] = AverageMeter()
meter_dict['Acc_fusion'] = AverageMeter()
meter_dict['Acc_fusion_top5'] = AverageMeter()
end = time.time()
CE = torch.nn.CrossEntropyLoss()
MSE = torch.nn.MSELoss()
rcm_loss = RCM_loss(args, model.module)
for step, (inputs, heatmap, target, _) in enumerate(train_queue):
inputs, inputs_aux = inputs
heatmap, heatmap_aux = heatmap
inputs_aux, heatmap_aux = map(lambda x: x.to(device, non_blocking=True), [inputs_aux, heatmap_aux])
meter_dict['Data_Time'].update((time.time() - end)/args.batch_size)
inputs, target, heatmap = map(lambda x: x.to(device, non_blocking=True), [inputs, target, heatmap])
ori_target, target_aux = target, target
if mixup_fn is not None:
inputs, target = mixup_fn(inputs, target)
inputs_aux, target_aux = mixup_fn(inputs_aux, ori_target)
images = inputs
with torch.no_grad():
(logits, xs, xm, xl), (logit_K, K_xs, K_xm, K_xl), hidden_feature = captuer(inputs, inputs_aux)
output, _ = model(hidden_feature)
fusion_loss = criterion(output[0], target) + criterion(output[1], target_aux)
globals()['Fusion_loss'] = fusion_loss
Total_loss = fusion_loss
globals()['Acc_fusion'], globals()['Acc_fusion_top5'] = accuracy((output[0] + output[1])/2.0, ori_target, topk=(1, 5))
if args.ACCUMULATION_STEPS > 1:
globals()['Total_loss'] = Total_loss / args.ACCUMULATION_STEPS
Total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
if (step + 1) % args.ACCUMULATION_STEPS == 0:
optimizer.step()
optimizer.zero_grad()
else:
globals()['Total_loss'] = Total_loss
optimizer.zero_grad()
if args.fp16:
with amp.scale_loss(Total_loss, optimizer) as scaled_loss:
scaled_loss.backward()
grad_norm = nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.clip_grad)
else:
Total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
optimizer.step()
#---------------------
# Meter performance
#---------------------
torch.distributed.barrier()
globals()['Acc_rgbd'], globals()['Acc_rgbd_top5'] = accuracy(logits+logit_K, ori_target, topk=(1, 5))
globals()['Acc_all'], globals()['Acc_all_top5'] = accuracy((output[0] + output[1] + logits + logit_K)/4.0, ori_target, topk=(1, 5))
globals()['Acc_s'], _ = accuracy(xs+K_xs, ori_target, topk=(1, 5))
globals()['Acc_m'], _ = accuracy(xm+K_xm, ori_target, topk=(1, 5))
globals()['Acc_l'], _ = accuracy(xl+K_xl, ori_target, topk=(1, 5))
for name in meter_dict:
if 'loss' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if 'Acc' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if step % args.report_freq == 0 and local_rank == 0:
log_info = {
'Epoch': '{}/{}'.format(epoch + 1, args.epochs),
'Mini-Batch': '{:0>5d}/{:0>5d}'.format(step + 1,
len(train_queue.dataset) // (args.batch_size * args.nprocs)),
'Lr': optimizer.param_groups[0]["lr"],
}
log_info.update(dict((name, '{:.4f}'.format(value.avg)) for name, value in meter_dict.items()))
print_func(log_info)
if args.vis_feature:
Visfeature(args, model.module, images, weight_softmax=torch.softmax(logits, dim=-1), FusionNet=True)
end = time.time()
if local_rank == 0:
print('*'*20)
print_func(dict([(name, meter_dict[name].avg) for name in meter_dict]))
print('*'*20)
return meter_dict['Acc_all'].avg, meter_dict['Total_loss'].avg, meter_dict
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
@torch.no_grad()
def infer(valid_queue, model, criterion, local_rank, epoch, device, captuer, obtain_softmax_score=True):
model.eval()
meter_dict = dict(
Total_loss=AverageMeter(),
)
meter_dict.update(dict(
Acc_r=AverageMeter(),
Acc_r_top5=AverageMeter(),
Acc_d=AverageMeter(),
Acc_d_top5=AverageMeter(),
Acc_rgbd = AverageMeter(),
Acc_rgbd_top5=AverageMeter(),
Acc_fusion=AverageMeter(),
Acc_fusion_top5=AverageMeter(),
))
meter_dict['Infer_Time'] = AverageMeter()
CE = torch.nn.CrossEntropyLoss()
MSE = torch.nn.MSELoss()
grounds, preds, v_paths = [], {0:[], 1:[]}, []
logits_out = {}
softmax_score = {}
embedding_dict = OrderedDict()
for step, (inputs, heatmap, target, v_path) in enumerate(valid_queue):
inputs, inputs_aux = inputs
heatmap, heatmap_aux = heatmap
inputs_aux, heatmap_aux = map(lambda x: x.to(device, non_blocking=True), [inputs_aux, heatmap_aux])
n = inputs.size(0)
end = time.time()
inputs, target, heatmap = map(lambda x: x.to(device, non_blocking=True), [inputs, target, heatmap])
images = inputs
(logits, xs, xm, xl), (logit_K, K_xs, K_xm, K_xl), hidden_feature = captuer(inputs, inputs_aux)
output, temp_out = model(hidden_feature)
Fusion_loss = CE(output[0], target) + CE(output[1], target)
Total_loss = Fusion_loss
globals()['Total_loss'] = Total_loss
meter_dict['Infer_Time'].update((time.time() - end) / n)
v_paths += v_path
grounds += target.cpu().tolist()
# save logits from outputs
preds[0] += torch.argmax((logits+logit_K)/2., dim=1).cpu().tolist()
preds[1] += torch.argmax((logits + logit_K + output[0] + output[1])/4., dim=1).cpu().tolist()
torch.distributed.barrier()
globals()['Acc_r'], globals()['Acc_r_top5'] = accuracy(logits, target, topk=(1, 5))
globals()['Acc_d'], globals()['Acc_d_top5'] = accuracy(logit_K, target, topk=(1, 5))
globals()['Acc_rgbd'], globals()['Acc_rgbd_top5'] = accuracy((logits+logit_K)/2., target, topk=(1, 5))
globals()['Acc_fusion'], globals()['Acc_fusion_top5'] = accuracy((logits + logit_K + output[0] + output[1])/4., target, topk=(1, 5))
for name in meter_dict:
if 'loss' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if 'Acc' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if step % args.report_freq == 0 and local_rank == 0:
log_info = {
'Epoch': epoch + 1,
'Mini-Batch': '{:0>4d}/{:0>4d}'.format(step + 1, len(valid_queue.dataset) // (
args.test_batch_size * args.nprocs)),
}
log_info.update(dict((name, '{:.4f}'.format(value.avg)) for name, value in meter_dict.items()))
print_func(log_info)
if args.vis_feature:
Visfeature(args, model.module, images, v_path, torch.softmax(logits, dim=-1), FusionNet=True)
# select best acc output
acc_list = torch.tensor([meter_dict['Acc_fusion'].avg, meter_dict['Acc_rgbd'].avg])
best_idx = torch.argmax(acc_list).tolist()
preds = preds[best_idx] # Note: only preds be refined
grounds_gather = concat_all_gather(torch.tensor(grounds).to(device))
preds_gather = concat_all_gather(torch.tensor(preds).to(device))
grounds_gather, preds_gather = list(map(lambda x: x.cpu().numpy(), [grounds_gather, preds_gather]))
if local_rank == 0:
print('*'*20)
print_func(dict([(name, meter_dict[name].avg) for name in meter_dict]))
print('*'*20)
v_paths = np.array(v_paths)
grounds = np.array(grounds)
preds = np.array(preds)
wrong_idx = np.where(grounds != preds)
v_paths = v_paths[wrong_idx[0]]
grounds = grounds[wrong_idx[0]]
preds = preds[wrong_idx[0]]
return acc_list.tolist()[best_idx], meter_dict['Total_loss'].avg, dict(grounds=grounds_gather, preds=preds_gather, valid_images=(v_paths, grounds, preds)), meter_dict, logits_out
if __name__ == '__main__':
# import os
# args.local_rank=os.environ['LOCAL_RANK']
parser = argparse.ArgumentParser('Motion RGB-D training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
args = Config(args)
if args.save and args.local_rank == 0:
Path(args.save).mkdir(parents=True, exist_ok=True)
try:
if args.resume:
args.save = os.path.split(args.resume)[0]
else:
args.save = f'{args.save}'
utils.create_exp_dir(args.save, scripts_to_save=[args.config] + glob.glob('./train.py') + glob.glob('lib/model/*.py'))
except:
pass
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log{}.txt'.format(time.strftime("%Y%m%d-%H%M%S"))))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
main(args) | 30,632 | 45.064662 | 183 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/tools/fusion.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import os, random, math
import time
import glob
import numpy as np
import shutil
import torch
import logging
import argparse
import traceback
import torch.nn as nn
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
import sys
sys.path.append(os.path.abspath(os.path.join("..", os.getcwd())))
from config import Config
from lib import *
import torch.distributed as dist
from utils import *
from utils.build import *
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='Place config Congfile!')
parser.add_argument('--eval_only', action='store_true', help='Eval only. True or False?')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--nprocs', type=int, default=1)
parser.add_argument('--save_grid_image', action='store_true', help='Save samples?')
parser.add_argument('--save_output', action='store_true', help='Save logits?')
parser.add_argument('--fp16', action='store_true', help='Training with fp16')
parser.add_argument('--demo_dir', type=str, default='./demo', help='The dir for save all the demo')
parser.add_argument('--drop_path_prob', type=float, default=0.5, help='drop path probability')
parser.add_argument('--save', type=str, default='Checkpoints/', help='experiment name')
parser.add_argument('--seed', type=int, default=123, help='random seed')
args = parser.parse_args()
args = Config(args)
#====================================================
# Some configuration
#====================================================
try:
if args.resume:
args.save = os.path.split(args.resume)[0]
else:
args.save = '{}/{}-EXP-{}'.format(args.save, args.Network, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=[args.config] + glob.glob('./tools/train*.py')+glob.glob('./lib/model/*.py'))
except:
pass
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log{}.txt'.format(time.strftime("%Y%m%d-%H%M%S"))))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
#---------------------------------
# Fusion Net Training
#---------------------------------
def reduce_mean(tensor, nprocs):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= nprocs
return rt.item()
def main(local_rank, nprocs, args):
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % local_rank)
# ---------------------------
# Init distribution
# ---------------------------
torch.cuda.set_device(local_rank)
torch.distributed.init_process_group(backend='nccl')
# ----------------------------
# build function
# ----------------------------
model = build_model(args)
model = model.cuda(local_rank)
criterion = build_loss(args)
optimizer = build_optim(args, model)
scheduler = build_scheduler(args, optimizer)
train_queue, train_sampler = build_dataset(args, phase='train')
valid_queue, valid_sampler = build_dataset(args, phase='valid')
if args.resume:
model, optimizer, strat_epoch, best_acc = load_checkpoint(model, args.resume, optimizer)
logging.info("The network will resume training.")
logging.info("Start Epoch: {}, Learning rate: {}, Best accuracy: {}".format(strat_epoch, [g['lr'] for g in
optimizer.param_groups],
round(best_acc, 4)))
if args.resumelr:
for g in optimizer.param_groups: g['lr'] = args.resumelr
args.resume_scheduler = cosine_scheduler(args.resumelr, 1e-5, args.epochs - strat_epoch, len(train_queue))
else:
strat_epoch = 0
best_acc = 0.0
scheduler[0].last_epoch = strat_epoch
if args.SYNC_BN and args.nprocs > 1:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], find_unused_parameters=True)
if local_rank == 0:
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
train_results = dict(
train_score=[],
train_loss=[],
valid_score=[],
valid_loss=[],
best_score=0.0
)
if args.eval_only:
valid_acc, _, _, meter_dict = infer(valid_queue, model, criterion, local_rank, 0)
valid_acc = max(meter_dict['Acc_all'].avg, meter_dict['Acc'].avg, meter_dict['Acc_3'].avg)
logging.info('valid_acc: {}, Acc_1: {}, Acc_2: {}, Acc_3: {}'.format(valid_acc, meter_dict['Acc_1'].avg, meter_dict['Acc_2'].avg, meter_dict['Acc_3'].avg))
return
#---------------------------
# Mixed Precision Training
# --------------------------
if args.fp16:
scaler = torch.cuda.amp.GradScaler()
else:
scaler = None
for epoch in range(strat_epoch, args.epochs):
train_sampler.set_epoch(epoch)
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
if epoch < args.scheduler['warm_up_epochs']:
for g in optimizer.param_groups:
g['lr'] = scheduler[-1](epoch)
args.epoch = epoch
train_acc, train_obj, meter_dict_train = train(train_queue, model, criterion, optimizer, epoch, local_rank, scaler)
valid_acc, valid_obj, valid_dict, meter_dict_val = infer(valid_queue, model, criterion, local_rank, epoch)
valid_acc = max(meter_dict_val['Acc_all'].avg, meter_dict_val['Acc'].avg, meter_dict_val['Acc_3'].avg)
if epoch >= args.scheduler['warm_up_epochs']:
if args.scheduler['name'] == 'ReduceLR':
scheduler[0].step(valid_acc)
else:
scheduler[0].step()
if local_rank == 0:
if valid_acc > best_acc:
best_acc = valid_acc
isbest = True
else:
isbest = False
logging.info('train_acc %f', train_acc)
logging.info('valid_acc: {}, Acc_1: {}, Acc_2: {}, Acc_3: {}, best acc: {}'.format(meter_dict_val['Acc'].avg, meter_dict_val['Acc_1'].avg,
meter_dict_val['Acc_2'].avg,
meter_dict_val['Acc_3'].avg, best_acc))
state = {'model': model.module.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch + 1, 'bestacc': best_acc}
save_checkpoint(state, isbest, args.save)
train_results['train_score'].append(train_acc)
train_results['train_loss'].append(train_obj)
train_results['valid_score'].append(valid_acc)
train_results['valid_loss'].append(valid_obj)
train_results['best_score'] = best_acc
train_results.update(valid_dict)
train_results['categories'] = np.unique(valid_dict['grounds'])
if isbest:
EvaluateMetric(PREDICTIONS_PATH=args.save, train_results=train_results, idx=epoch)
for k, v in train_results.items():
if isinstance(v, list):
v.clear()
def train(train_queue, model, criterion, optimizer, epoch, local_rank, scaler):
model.train()
meter_dict = dict(
Total_loss=AverageMeter(),
MSE_loss=AverageMeter(),
CE_loss=AverageMeter(),
BCE_loss=AverageMeter(),
Distill_loss = AverageMeter()
)
meter_dict['Data_Time'] = AverageMeter()
meter_dict.update(dict(
Acc_1=AverageMeter(),
Acc_2=AverageMeter(),
Acc_3=AverageMeter(),
Acc=AverageMeter()
))
end = time.time()
for step, (inputs, heatmap, target, _) in enumerate(train_queue):
meter_dict['Data_Time'].update((time.time() - end)/args.batch_size)
inputs, target, heatmap = map(lambda x: [d.cuda(local_rank, non_blocking=True) for d in x] if isinstance(x, list) else x.cuda(local_rank, non_blocking=True), [inputs, target, heatmap])
if args.resumelr:
for g in optimizer.param_groups:
g['lr'] = args.resume_scheduler[len(train_queue) * args.resume_epoch + step]
# ---------------------------
# Mixed Precision Training
# --------------------------
if args.fp16:
print('Train with FP16')
optimizer.zero_grad()
# Runs the forward pass with autocasting.
with torch.cuda.amp.autocast():
(logits, logit_r, logit_d), (CE_loss, BCE_loss, MSE_loss, distillation) = model(inputs, heatmap, target)
globals()['CE_loss'] = CE_loss
globals()['MSE_loss'] = MSE_loss
globals()['BCE_loss'] = BCE_loss
globals()['Distill_loss'] = distillation
globals()['Total_loss'] = CE_loss + MSE_loss + BCE_loss + distillation
scaler.scale(Total_loss).backward()
# Unscales the gradients of optimizer's assigned params in-place
scaler.unscale_(optimizer)
nn.utils.clip_grad_norm_(model.module.parameters(), args.grad_clip)
scaler.step(optimizer)
scaler.update()
else:
# ---------------------------
# Fp32 Precision Training
# --------------------------
(logits, logit_r, logit_d), (CE_loss, BCE_loss, MSE_loss, distillation) = model(inputs, heatmap, target)
globals()['CE_loss'] = CE_loss
globals()['MSE_loss'] = MSE_loss
globals()['BCE_loss'] = BCE_loss
globals()['Distill_loss'] = distillation
globals()['Total_loss'] = CE_loss + MSE_loss + BCE_loss + distillation
optimizer.zero_grad()
Total_loss.backward()
nn.utils.clip_grad_norm_(model.module.parameters(), args.grad_clip)
optimizer.step()
#---------------------
# Meter performance
#---------------------
torch.distributed.barrier()
globals()['Acc'] = calculate_accuracy(logits, target)
globals()['Acc_1'] = calculate_accuracy(logit_r, target)
globals()['Acc_2'] = calculate_accuracy(logit_d, target)
globals()['Acc_3'] = calculate_accuracy(logit_r+logit_d, target)
for name in meter_dict:
if 'loss' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if 'Acc' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if step % args.report_freq == 0 and local_rank == 0:
log_info = {
'Epoch': '{}/{}'.format(epoch + 1, args.epochs),
'Mini-Batch': '{:0>5d}/{:0>5d}'.format(step + 1,
len(train_queue.dataset) // (args.batch_size * args.nprocs)),
'Lr': ['{:.4f}'.format(g['lr']) for g in optimizer.param_groups],
}
log_info.update(dict((name, '{:.4f}'.format(value.avg)) for name, value in meter_dict.items()))
print_func(log_info)
end = time.time()
args.resume_epoch += 1
return meter_dict['Acc'].avg, meter_dict['Total_loss'].avg, meter_dict
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
@torch.no_grad()
def infer(valid_queue, model, criterion, local_rank, epoch):
model.eval()
meter_dict = dict(
Total_loss=AverageMeter(),
MSE_loss=AverageMeter(),
CE_loss=AverageMeter(),
Distill_loss=AverageMeter()
)
meter_dict.update(dict(
Acc_1=AverageMeter(),
Acc_2=AverageMeter(),
Acc_3=AverageMeter(),
Acc = AverageMeter(),
Acc_all=AverageMeter(),
))
meter_dict['Infer_Time'] = AverageMeter()
grounds, preds, v_paths = [], [], []
for step, (inputs, heatmap, target, v_path) in enumerate(valid_queue):
end = time.time()
inputs, target, heatmap = map(
lambda x: [d.cuda(local_rank, non_blocking=True) for d in x] if isinstance(x, list) else x.cuda(local_rank,
non_blocking=True),
[inputs, target, heatmap])
if args.fp16:
with torch.cuda.amp.autocast():
(logits, logit_r, logit_d), (CE_loss, BCE_loss, MSE_loss, distillation) = model(inputs, heatmap, target)
else:
(logits, logit_r, logit_d), (CE_loss, BCE_loss, MSE_loss, distillation) = model(inputs, heatmap, target)
meter_dict['Infer_Time'].update((time.time() - end) / args.test_batch_size)
globals()['CE_loss'] = CE_loss
globals()['MSE_loss'] = MSE_loss
globals()['BCE_loss'] = BCE_loss
globals()['Distill_loss'] = distillation
globals()['Total_loss'] = CE_loss + MSE_loss + BCE_loss + distillation
torch.distributed.barrier()
globals()['Acc'] = calculate_accuracy(logits, target)
globals()['Acc_1'] = calculate_accuracy(logit_r, target)
globals()['Acc_2'] = calculate_accuracy(logit_d, target)
globals()['Acc_3'] = calculate_accuracy(logit_r+logit_d, target)
globals()['Acc_all'] = calculate_accuracy(logit_r+logit_d+logits, target)
grounds += target.cpu().tolist()
preds += torch.argmax(logits, dim=1).cpu().tolist()
v_paths += v_path
for name in meter_dict:
if 'loss' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if 'Acc' in name:
meter_dict[name].update(reduce_mean(globals()[name], args.nprocs))
if step % args.report_freq == 0 and local_rank == 0:
log_info = {
'Epoch': epoch + 1,
'Mini-Batch': '{:0>4d}/{:0>4d}'.format(step + 1, len(valid_queue.dataset) // (
args.test_batch_size * args.nprocs)),
}
log_info.update(dict((name, '{:.4f}'.format(value.avg)) for name, value in meter_dict.items()))
print_func(log_info)
torch.distributed.barrier()
grounds_gather = concat_all_gather(torch.tensor(grounds).cuda(local_rank))
preds_gather = concat_all_gather(torch.tensor(preds).cuda(local_rank))
grounds_gather, preds_gather = list(map(lambda x: x.cpu().numpy(), [grounds_gather, preds_gather]))
if local_rank == 0:
v_paths = np.array(v_paths)
grounds = np.array(grounds)
preds = np.array(preds)
wrong_idx = np.where(grounds != preds)
v_paths = v_paths[wrong_idx[0]]
grounds = grounds[wrong_idx[0]]
preds = preds[wrong_idx[0]]
return meter_dict['Acc'].avg, meter_dict['Total_loss'].avg, dict(grounds=grounds_gather, preds=preds_gather, valid_images=(v_paths, grounds, preds)), meter_dict
if __name__ == '__main__':
try:
main(args.local_rank, args.nprocs, args)
except KeyboardInterrupt:
torch.cuda.empty_cache()
if os.path.exists(args.save) and len(os.listdir(args.save)) < 3:
print('remove ‘{}’: Directory'.format(args.save))
os.system('rm -rf {} \n mv {} ./Checkpoints/trash'.format(args.save, args.save))
os._exit(0)
except Exception:
print(traceback.print_exc())
if os.path.exists(args.save) and len(os.listdir(args.save)) < 3:
print('remove ‘{}’: Directory'.format(args.save))
os.system('rm -rf {} \n mv {} ./Checkpoints/trash'.format(args.save, args.save))
os._exit(0)
finally:
torch.cuda.empty_cache()
| 16,564 | 40.830808 | 192 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/config/config.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import yaml
# from easydict import EasyDict as edict
def Config(args):
print()
print('='*80)
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
for dic in config:
for k, v in config[dic].items():
setattr(args, k, v)
print(k, ':\t', v)
print('='*80)
print()
return args | 424 | 22.611111 | 54 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/config/__init__.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
from .config import Config | 90 | 17.2 | 54 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/demo/cluster.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
import torch.nn.functional as F
from scipy.spatial.distance import pdist
import pandas as pd
from sklearn import manifold
import numpy as np
import sys
import sklearn
from sklearn import metrics
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
M_path = '/mnt/workspace/Code/MotionRGBD-PAMI/Checkpoints/THU-READ4-32-DTNV2-M-simi/'
K_path = '/mnt/workspace/Code/MotionRGBD-PAMI/Checkpoints/THU-READ4-32-DTNV2-K-simi-cross/'
def normalization(data):
_range = torch.max(data) - torch.min(data)
return (data - torch.min(data)) / _range
def standardization(data):
mu = np.mean(data, axis=0)
sigma = np.std(data, axis=0)
return (data - mu) / sigma
data = []
for i in range(0, 100, 5):
M_checkpoint = M_path + f'/feature-M-epoch{i}.pth'
M_features = torch.load(M_checkpoint, map_location='cpu')
K_checkpoint = K_path + f'/feature-K-epoch{i}.pth'
K_features = torch.load(K_checkpoint, map_location='cpu')
simlitary = []
for (km, vm), (kd, vd) in zip(M_features.items(), K_features.items()):
assert km == kd
# pca_data = pd.DataFrame(vm.cpu().numpy())
# vm = torch.tensor(tsne.fit_transform(pca_data))
# pca_data = pd.DataFrame(vd.cpu().numpy())
# vd = torch.tensor(tsne.fit_transform(pca_data))
# vm, vd = normalization(vm), normalization(vd) #F.normalize(vm, p = 2, dim=-1), F.normalize(vd, p = 2, dim=-1) #
simil = F.pairwise_distance(vm.unsqueeze(0), vd.unsqueeze(0), p=2)
# simil = torch.cosine_similarity(vm, vd, dim=-1)
# simil = torch.tensor(pdist(np.vstack([vm.numpy(),vd.numpy()]),'seuclidean')[0])
# simil = vm * vd
simlitary.append(simil.unsqueeze(0))
simi_value = torch.cat(simlitary).mean()
data.append(float(simi_value))
M_embed = torch.cat([F.normalize(e.unsqueeze(0), p = 2, dim=-1) for e in M_features.values()])
K_embed = torch.cat([F.normalize(e.unsqueeze(0), p = 2, dim=-1) for e in K_features.values()])
embed = torch.cat((M_embed, K_embed))
label_embed = torch.cat((torch.ones(M_embed.shape[0]), torch.ones(M_embed.shape[0])+1))
tsne = manifold.TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
embed = pd.DataFrame(embed.cpu().numpy())
pca_embed = tsne.fit_transform(embed)
embed.insert(0, 'label', pd.DataFrame(label_embed.cpu().numpy()))
print(pca_embed.shape)
fig, ax = plt.subplots()
scatter = ax.scatter(pca_embed[:, 0], pca_embed[:, 1], c=embed['label'], s=25, cmap='rainbow',
alpha=0.8, edgecolors='none')
plt.savefig("./"+'cluster.png', dpi=120, bbox_inches='tight') | 2,693 | 37.485714 | 121 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/demo/plot.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
from mpl_toolkits import axisartist
import seaborn as sns
import numpy as np
import re
import sys
import os, argparse, random
import torch
def plot_curve(datas, flag, show_value=False):
fig = plt.figure(figsize=(12, 7))
ax = fig.add_subplot()
for name, data in datas:
plt.plot(data, '-', label=name)
if show_value:
for a, b in zip(range(len(data)), data):
plt.text(a, b + 0.05, '%.2f' % b, ha='center', va='bottom', fontsize=9)
ax.set_ylabel('value')
ax.set_xlabel('epoch')
# plt.xticks(range(len(data)), rotation=0)
plt.grid()
plt.legend()
plt.savefig('./{}.png'.format(flag), dpi=fig.dpi)
#--------------------------------------
# Plot cvpr2022 multi-scale result: bar
#--------------------------------------
def multiscale():
name = ['Spatial-temporal I3D', 'Spatial Inception CNN\n + \n Single-scale Trans', 'Spatial Inception CNN \n + \n Dual-scale Trans', 'Spatial Inception CNN \n + \n Multi-scale Trans']
y = [68.54, 69.67, 72.20, 73.16]
y1 = [65.50, 68.33, 69.58, 70.50]
fig = plt.figure(figsize=(12, 7), dpi=100)
ax = fig.add_subplot()
bar_high = 0.4
x = np.arange(len(name))
b1 = ax.bar(x, y, width=bar_high, label='NvGesture', color=sns.xkcd_rgb["pale red"])
b2 = ax.bar(x+bar_high, y1, width=bar_high, label='THU-READ', color=sns.xkcd_rgb["denim blue"])
# labels, title and ticks
ax.set_ylabel('Accuracy(%)', fontsize=16)
plt.xticks(x + bar_high / 2, name, rotation=0,
# fontweight='bold',
fontsize=16)
# plt.xlim(0, 100)
plt.ylim(60, 75)
for a, b, c in zip(x, y, y1):
plt.text(a, b + 0.05, '%.2f' % b, ha='center', va='bottom', fontsize=16)
plt.text(a+bar_high, c + 0.05, '%.2f' % c, ha='center', va='bottom', fontsize=16)
# for rect, rect1 in zip(b1, b2):
# wd = rect.get_width()
# plt.text(wd, rect.get_x() + 0.5 / 2, str(wd), va='center')
#
# wd = rect1.get_width()
# plt.text(wd, rect1.get_x() + 0.5 / 2, str(wd), va='center')
plt.legend(handles=[b1, b2])
plt.show()
def FRPWindowsAndKnn():
name1 = [2, 5, 10, 15]
name2 = ["20%", '40%', '50%', '60%', '70%']
# Nv1 = [0.00, 76.04, 76.25, 75.00]
Nv1 = [76.67, 77.08, 78.57, 73.33]
# Nv2 = [0.00, 74.17, 74.38, 75.42, 72.71]
Nv2 = [77.71, 75.42, 78.13, 76.25, 76.67]
# thu1 = [79.58, 78.75, 75.00, 78.75, 0.00]
thu1 = [61.25, 59.17, 62.50, 58.75]
thu2 = [59.17, 60.41, 61.25, 60.42, 64.58]
fig = plt.figure()
ax1 = fig.add_subplot(121)
plt.plot(range(len(name1)), Nv1, 'bo--', label='NvGesture')
for a, b in zip(range(len(name1)), Nv1):
plt.text(a, b + 0.05, '%.2f' % b, ha='center', va='bottom', fontsize=9)
plt.plot(range(len(name1)), thu1, 'ro--', label='THU-READ')
for a, b in zip(range(len(name1)), thu1):
plt.text(a, b + 0.05, '%.2f' % b, ha='center', va='bottom', fontsize=9)
ax1.set_ylabel('Accuracy(%)')
ax1.set_xlabel('Window Size')
plt.xticks(range(len(name1)), name1, rotation=0)
plt.grid()
plt.legend()
ax2 = fig.add_subplot(122)
plt.plot(range(len(name2)), Nv2, 'bo--', label='NvGesture')
for a, b in zip(range(len(name2)), Nv2):
plt.text(a, b + 0.02, '%.2f' % b, ha='center', va='bottom', fontsize=9)
plt.plot(range(len(name2)), thu2, 'ro--', label='THU-READ')
for a, b in zip(range(len(name2)), thu2):
plt.text(a, b + 0.02, '%.2f' % b, ha='center', va='bottom', fontsize=9)
ax2.set_ylabel('Accuracy(%)')
ax2.set_xlabel('Sparse Rate')
plt.xticks(range(len(name2)), name2, rotation=0)
plt.grid()
plt.legend()
plt.show()
def Recoupling():
fontsize = 24
linewidth = 4
name = [20, 30, 40, 50, 60, 70, 80]
valueWO = [80.5, 82.7, 85.4, 84.8, 85.6, 85.0, 85.2]
valueW = [83.3, 84.1, 89.5, 87.0, 88.5, 87.2, 88.1]
fig = plt.figure(figsize=(18, 8))
ax = fig.add_subplot(121)
plt.plot(range(len(name)), valueWO, 'bo-', label='W/O Recoupling-NV', linewidth=linewidth)
# for a, b in zip(range(len(name)), valueWO):
# plt.text(a, b + 0.02, '%.2f' % b, ha='center', va='bottom', fontsize=fontsize, weight='bold')
plt.plot(range(len(name)), valueW, 'bo--', label='Recoupling-NV', linewidth=linewidth)
# for a, b in zip(range(len(name)), valueW):
# plt.text(a, b + 0.02, '%.2f' % b, ha='center', va='bottom', fontsize=fontsize, weight='bold')
valueWO = [54.2, 63.8, 68.7, 75.4, 79.2, 78.8, 79.1]
valueW = [54.6, 64.6, 69.2, 76.3, 81.7, 80.8, 80.4]
plt.plot(range(len(name)), valueWO, 'ro-', label='W/O Recoupling-THU', linewidth=linewidth)
# for a, b in zip(range(len(name)), valueWO):
# plt.text(a, b + 0.02, '%.2f' % b, ha='center', va='bottom', fontsize=fontsize, weight='bold')
plt.plot(range(len(name)), valueW, 'ro--', label='Recoupling-THU', linewidth=linewidth)
# for a, b in zip(range(len(name)), valueW):
# plt.text(a, b + 0.02, '%.2f' % b, ha='center', va='bottom', fontsize=fontsize, weight='bold')
ax.set_ylabel('Accuracy(%)',fontsize=fontsize, weight='bold')
ax.set_xlabel('(a) Epoch', fontsize=fontsize+1, weight='bold')
plt.xticks(range(len(name)), name, rotation=0, fontsize=fontsize, weight='bold')
plt.yticks(fontsize=fontsize, weight='bold')
plt.ylim(50, 90)
plt.grid()
# plt.title('(a)', fontsize=fontsize, weight='bold', y=-0.1)
plt.legend(fontsize=fontsize)
# plt.savefig(f're.pdf')
# fig = plt.figure(figsize=(11, 10))
ax1 = fig.add_subplot(122)
name = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
value = [87.4, 86.6, 89.5, 87.8, 87.2, 89.1, 88.5]
plt.plot(range(len(name)), value, 'bo-', label='Nv-Gesture', linewidth=linewidth)
for a, b in zip(range(len(name)), value):
plt.text(a, b + 0.02, '%.1f' % b, ha='center', va='bottom', fontsize=fontsize, weight='bold')
value = [78.8, 79.6, 79.6, 81.7, 78.8, 79.2, 77.9]
plt.plot(range(len(name)), value, 'ro-', label='THU-READ', linewidth=linewidth)
for a, b in zip(range(len(name)), value):
plt.text(a, b + 0.02, '%.1f' % b, ha='center', va='bottom', fontsize=fontsize, weight='bold')
ax1.set_ylabel('Accuracy(%)', fontsize=fontsize, weight='bold')
ax1.set_xlabel('(b) Temperature', fontsize=fontsize+1, weight='bold')
plt.xticks(range(len(name)), name, rotation=0, fontsize=fontsize, weight='bold')
plt.yticks(fontsize=fontsize, weight='bold')
# plt.ylim(40, 100)
plt.legend(fontsize=fontsize)
plt.grid()
# plt.title('(b)', fontsize=fontsize, weight='bold', y=-0.1)
plt.savefig(f'recoupling_temper.pdf', dpi=fig.dpi)
plt.show()
def Analysis(txt_file, types):
pattern = re.compile("{} (\d+\.\d*)".format(types)) #[\d+\.\d]*
with open(txt_file, 'r') as f:
# data =[(lambda x: [x[f'{types}'], x['epoch']])(eval(fp)) for fp in f.readlines()]
data =[list(map(float, pattern.findall(fp))) for fp in f.readlines()]
data = list(filter(lambda x: len(x)>0, data))
data = np.array(data)
return data
def plot_func(datas, names, show_value=False, KAR=False, save_file_name='default'):
fontsize = 12
fig = plt.figure(dpi=200, figsize=(5,4))
# fig = plt.figure()
# ax = fig.add_subplot()
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
ax = axisartist.Subplot(fig, 111)
fig.add_axes(ax)
ax.axis["bottom"].set_axisline_style("->", size = 2.5)
ax.axis["left"].set_axisline_style("->", size = 2.5)
ax.axis["top"].set_visible(False)
ax.axis["right"].set_visible(False)
slopes, fit_values = [], []
markers = ['o', 'v', '*', '^']
for i, (data, name) in enumerate(zip(datas, names)):
data = data[:250]
if 'acc' in name:
data = [d[0]/100 for d in data]
name = 'Test-Acc'
if 'clean_loss' in name:
name = 'Easy-Loss'
if 'hard_loss' in name:
name = 'Hard-Loss'
if 'moderate_loss' in name:
name = 'Moderate-Loss'
# data = [d[0]/10 for d in data]
if 'clean_rate' in name:
name = 'DDP-e α=0.8 β=0.3'
data = [[d[0]+0.15] for d in data]
if 'hard_rate' in name:
name = 'DDP-h α=0.8 β=0.3'
data = [[d[0]-0.05] for d in data]
print(data)
# ax.plot(list(range(len(data)))[::15], data[::15], '-', label=name, linewidth=2.0, marker=markers[i])
# if i == 2: i+=1
# if name == 'Hard-Loss':
# ax.plot([75+d*5 for d in list(range(len(data)))], [d[0]-1.0 if i >= 16 else d[0] for i, d in enumerate(data)], '-', label=name, linewidth=3.0, color=colors[i])
# elif name == 'Moderate-Loss':
# ax.plot([75+d*5 for d in list(range(len(data)))], [d[0]-0.5 if i >= 16 else d[0] for i, d in enumerate(data)], '-', label=name, linewidth=3.0, color=colors[i])
# else:
# ax.plot([75+d*5 for d in list(range(len(data)))], [d[0]-0.1 if i >= 16 else d[0] for i, d in enumerate(data)], '-', label=name, linewidth=3.0, color=colors[i])
ax.plot([d for d in list(range(len(data)))], data, '-', label=name, linewidth=3.0, color=colors[i])
#α=0.9, β=0.4
# data =
if 'DDP' in name and KAR:
# slope = [float(d[0]) - for i, d in enumerate(data) if i > 0]
# slopes.append(np.array(slope))
# slope KAR
# slope = [float(d[0]) / i for i, d in enumerate(data) if i > 0]
# slopes.append(np.array(slope))
y = np.array([float(d[0]) for d in data])
x = np.array(list(range(len(data))))
from scipy.optimize import leastsq
from sympy import symbols, diff, Symbol, lambdify
def fit_func(p, x):
f = np.poly1d(p)
return f(x)
def residuals_func(p, y, x):
ret = fit_func(p, x) - y
return ret
p_init = np.random.randn(13)
plsq = leastsq(residuals_func, p_init, args=(y, x))
fit_value = fit_func(plsq[0], x)
fit_values.append(fit_value)
y = np.poly1d(plsq[0])
deriv_func = y.deriv()
slopes.append(abs(deriv_func(x)) * 20)
if len(slopes):
deriv_value = (slopes[0] + slopes[1]) / 2.
ax.plot(x[5:-5]+5, deriv_value[5:-5], '--', label='KAR', linewidth=2., color=colors[3])
ax.plot(x[::15], fit_values[0][::15], '--', label=' LSC', linewidth=1.5, color=colors[7], marker='o')
ax.plot(x[::15], fit_values[1][::15], '--', linewidth=1.5, color=colors[7], marker='o')
plt.yticks(fontproperties='Times New Roman', size=15,weight='bold')#设置大小及加粗
plt.xticks(fontproperties='Times New Roman', size=15)
# ax.set_ylabel('value')
ax.set_xlabel('Epoch', fontsize=18, fontweight='bold')
# x_names = torch.arange(0, 299, 10)
# print(x_names)
# plt.xticks(rotation=0, fontsize=fontsize, weight='bold')
# plt.grid()
# plt.axvline(0, color=colors[7], linestyle='--', label=None)
# plt.axvline(int(args.times[0][0]), color=colors[7], linestyle='--', label=None)
# plt.axvline(int(args.times[0][1]), color=colors[7], linestyle='--', label=None)
# plt.axhline(0.2751, color=colors[8], linestyle='--', label=None)
plt.legend(fontsize=12)
print('file name:', save_file_name)
# plt.savefig('./{}.png'.format(name), dpi=fig.dpi)
plt.savefig(f'./{save_file_name}.png', dpi=fig.dpi)
def plot_Curve(args):
# file_name = args.file_name
datas, names, conversion_ratio = [], [], []
print(args.file_name)
for file_name in args.file_name[0]:
types = args.types[0]
data_root = os.path.join('../out/', file_name, 'log.txt')
# datas, names, conversion_ratio = [], [], []
print(types)
for typ in types:
pattern = re.compile("\"{}\": (\d+\.\d+)".format(typ)) #[\d+\.\d]*
with open(data_root, 'r') as f:
data =[list(map(float, pattern.findall(fp))) for fp in f.readlines()]
data = list(filter(lambda x: len(x)>0, data))
datas.append(data)
names.append(typ)
# flag = True
# for i, (_, d1, d2) in enumerate(zip(*datas)):
# # if flag:
# # same_v = d1[0]
# if round(d1[0], 2) == round(d2[0], 2):
# same_v = d1[0]
# print(same_v, i)
# input()
# flag=False
# conversion_ratio.append((abs(d1[0]-same_v))/(abs(d2[0] - same_v)))
# datas.append(conversion_ratio)
# names.append('conversion_ratio')
plot_func(datas=datas, names=names, KAR=True, save_file_name=args.save_name)
def PatchLevelErasing():
# softmax1 = [0.5125, 0.5001, 0.4968, 0.4835, 0.4654]
# clean_rate1 = [0.5597, 0.4962, 0.4091, 0.3527, 0.3128]
# hard_rate1 = [0.0860, 0.0927 , 0.1068, 0.1216, 0.1408]
# clean_rate1 = [ 0.4230, 0.4120, 0.4095, 0.4015, 0.3883 ]
# hard_rate1 = [0.1359, 0.1431, 0.1470, 0.1550, 0.1667]
#DeiT-S
# clean_rate1 = [0.4230, 0.3539, 0.3436, 0.3188, 0.2798]
# hard_rate1 = [0.1359, 0.1559, 0.1711, 0.1927, 0.2271]
# name1 = ['0%', '10%', '20%', '30%', '40%']
#Swin-T
clean_rate1 = [0.4370, 0.4258, 0.4046, 0.3845, 0.3557]
hard_rate1 = [0.1480, 0.1554, 0.1584, 0.1679, 0.1723]
name1 = ['0%', '5%', '10%', '15%', '20%']
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
fig = plt.figure(dpi=200, figsize=(7,6))
ax = axisartist.Subplot(fig, 211)
fig.add_axes(ax)
ax.axis["bottom"].set_axisline_style("->", size = 2.5)
ax.axis["left"].set_axisline_style("->", size = 2.5)
ax.axis["top"].set_visible(False)
ax.axis["right"].set_visible(False)
# ax.plot(softmax1, 'r-', label='$p_k$', linewidth=2.0, marker='o')
ax.plot(clean_rate1, 'g-', label='DDP-e α=0.8 β=0.3', linewidth=2.0, marker='x')
ax.plot(hard_rate1, 'b-', label='DDP-h α=0.8 β=0.3', linewidth=2.0, marker='v')
ax.set_xticks(range(len(name1)))
ax.set_xticklabels(name1, rotation=0, fontsize='small')
ax.set_xlabel('PatchErasing', fontweight='bold')
# ax.set_ylabel('$p_k$', fontweight='bold')
plt.legend()
#====================================================================
# Deit-B
# clean_rate1 = [0.3527, 0.3457, 0.3405, 0.3346, 0.3285]
# clean_rate1 = [0.3527, 0.3257, 0.3005, 0.2846, 0.2485]
# hard_rate1 = [0.1216, 0.1216 , 0.1216, 0.1216, 0.1216]
# # softmax1 = [0.8652, 0.8776, 0.8739, 0.8658, 0.8553]
# softmax1 = [0.8652, 0.8576, 0.8039, 0.7658, 0.7053]
# name1 = ['30%+0', '30%+10', '30%+20', '30%+30', '30%+40']
# Deit-S
# clean_rate1 = [0.3188, 0.2777, 0.2500, 0.2300, 0.2091]
# hard_rate1 = [0.1927, 0.1902, 0.1927, 0.1927, 0.1927]
# softmax1 = [0.8627, 0.8519, 0.8318, 0.8098, 0.7818]
# name1 = ['30%+0', '30%+10', '30%+20', '30%+30', '30%+40']
# Swin-T
clean_rate1 = [0.3845, 0.3725, 0.3655, 0.3400, 0.3191]
hard_rate1 = [0.1679, 0.1682, 0.1727, 0.1797, 0.1805]
softmax1 = [0.8627, 0.8519, 0.8418, 0.8198, 0.7918]
name1 = ['15%+0', '15%+4', '15%+5', '15%+6', '15%+7']
ax = axisartist.Subplot(fig, 212)
fig.add_axes(ax)
# ax.axis["bottom"].set_axisline_style("->", size = 2.5)
# ax.axis["left"].set_axisline_style("->", size = 2.5)
# ax.axis["top"].set_visible(False)
# ax.axis["right"].set_visible(False)
l3, = ax.plot(softmax1, 'r-', label='$p_k$', linewidth=2.0, marker='o')
# ax.plot(clean_rate1, 'g-', label='DDP-e', linewidth=2.0, marker='x')
# ax.plot(hard_rate1, 'b-', label='DDP-h', linewidth=2.0, marker='v')
ax.set_xticks(range(len(name1)))
ax.set_xticklabels(name1, rotation=0, fontsize='small')
ax.set_xlabel('PatchErasing+AutoErasing', fontweight='bold')
ax.set_ylabel('$p_k$', fontweight='bold', fontsize='small')
ax2 = ax.twinx()
l1, = ax2.plot(clean_rate1, 'g-', label='DDP-e α=0.8 β=0.3', linewidth=2.0, marker='x')
# ax2.bar(range(len(clean_rate1)), clean_rate1, width=0.3, label='DDP-e', color=sns.xkcd_rgb["green"])
l2, = ax2.plot(hard_rate1, 'b-', label='DDP-h α=0.8 β=0.3', linewidth=2.0, marker='v')
ax2.set_ylabel('DDP', fontweight='bold')
plt.legend(handles=[l1, l2, l3])
# plt.tight_layout()
plt.savefig('./PatchLevelErasing-Swin-T.pdf', dpi=fig.dpi)
data2 = []
sys.exit(0)
def SwinShow():
# fp = open('/home/admin/workspace/Code/Swin-Transformer/output/swin_tiny_patch4_window7_224/baseline-DDP/swin_tiny_patch4_window7_224/default/log_rank0.txt', 'r')
fp = open('/home/admin/workspace/Code/Swin-Transformer/output/swin_base_patch4_window7_224/DDP3/swin_base_patch4_window7_224/default/log_rank0.txt', 'r')
clean_rates, hard_rates = [], []
for ln in fp:
# print(ln)
if '[1250/1251]' in ln and 'clean_rate' in ln:
print(re.findall(r"INFO Train: (\[\d+/\d+\])", ln))
try:
clean_rate = re.findall(r"clean_rate_6 (\d+\.\d+) \((\d+\.\d+)\)", ln)[-1]
hard_rate = re.findall(r"hard_rate_1 (\d+\.\d+) \((\d+\.\d+)\)", ln)[-1]
except:
continue
clean_rates.append(list(map(float, clean_rate)))
hard_rates.append(list(map(float, hard_rate)))
clean_rates, hard_rates = list(filter(lambda x: len(x)>0, [clean_rates, hard_rates]))
clean_rates, hard_rates = [[d[-1]] for d in clean_rates], [[d[-1]] for d in hard_rates]
datas = [clean_rates, hard_rates]
names = ['clean_rate', 'hard_rates']
plot_func(datas=datas, names=names, KAR=True)
sys.exit(0)
def ExperimentAnaylize(x, y, label, step=1, save_path='./test'):
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
fig = plt.figure(dpi=200, figsize=(7,6))
ax = axisartist.Subplot(fig, 111)
fig.add_axes(ax)
# ax.axis["bottom"].set_axisline_style("->", size = 2.5)
# ax.axis["left"].set_axisline_style("->", size = 2.5)
# ax.axis["top"].set_visible(False)
# ax.axis["right"].set_visible(False)
ax2 = ax.twinx()
hand_labl = []
for i, (d, l) in enumerate(zip(x, label)):
d = d.tolist()
if 'acc' in l:
d = [l/100 for l in d]
elif 'L' in l or 'loss' in l:
l2, = ax2.plot(y[0::step], d[0::step], '--', label=l, linewidth=2.0, marker='o', color=colors[i])
hand_labl.append(l2)
continue
l1, = ax.plot(y[0::step], d[0::step], '--', label=l, linewidth=2.0, marker='x', color=colors[i])
hand_labl.append(l1)
# ax.set_xticklabels(y, rotation=0, fontsize='small')
ax.set_xlabel('Epoch', fontweight='bold')
ax.set_ylabel('DDP', fontweight='bold')
ax2.set_ylabel('Loss', fontweight='bold')
plt.legend(handles=hand_labl)
plt.savefig(f'{save_path}.png', dpi=fig.dpi)
def Txt2Analysis(txt_file, types):
datas = []
for typ in types[0]:
with open(txt_file, 'r') as f:
datas.append([eval(fp)[f'{typ}'] if typ in fp else 0.0 for fp in f.readlines()])
with open(txt_file, 'r') as f:
epochs = [eval(fp)['epoch'] for fp in f.readlines()]
datas = np.array(datas)
return datas, epochs, types[0]
if __name__ == '__main__':
# PatchLevelErasing()
# SwinShow()
# parser = argparse.ArgumentParser()
# parser.add_argument('--file-name', nargs='*', action='append', default=[])
# parser.add_argument('--types', nargs='*', action='append', default=[])
# parser.add_argument('--times', nargs='*', action='append', default=[])
# parser.add_argument('--save-name', default='')
# args = parser.parse_args()
# # plot_Curve(args)
# ExperimentAnaylize(*Txt2Analysis(args.file_name[0][0], args.types), step=10)
# sys.exit(0)
model = '{}'.format(sys.argv[1])
types = sys.argv[2]
name = [
['THUREAD1', '/home/admin/workspace/Code/MotionRGBD-PAMI/Checkpoints/THUREAD1/log20220608-120534.txt'],
['THUREAD1-mixup', '/home/admin/workspace/Code/MotionRGBD-PAMI/Checkpoints/THUREAD1-mixup/log20220609-014207.txt'],
]
data = []
for n, d in name:
try:
data.append([n, Analysis(d, types)])
except Exception as e:
print(e)
continue
plot_curve(datas=data, flag='{}_{}'.format(model, types))
| 20,739 | 39.826772 | 187 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/utils/evaluate_metric.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
# -------------------
# import modules
# -------------------
import random, os
import numpy as np
import cv2
import heapq
import shutil
from textwrap import wrap
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimage
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, auc, roc_curve, roc_auc_score
import seaborn as sns
from torchvision import transforms
from PIL import Image
import torch
from torchvision.utils import save_image, make_grid
acc_figs = []
con_figs = []
# ---------------------------------------
# Plot Confusion Matrix
# ---------------------------------------
def plot_confusion_matrix(PREDICTIONS_PATH, grounds, preds, categories, idx, top=20):
print("--------------------------------------------")
print("Confusion Matrix")
print("--------------------------------------------")
super_category = str(idx)
num_cat = []
for ind, cat in enumerate(categories):
print("Class {0} : {1}".format(ind, cat))
num_cat.append(ind)
print()
numclass = len(num_cat)
cm = confusion_matrix(grounds, preds, labels=num_cat)
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot()
sns.heatmap(cm, annot=False if numclass > 60 else True, fmt='g', ax=ax); # annot=True to annotate cells, ftm='g' to disable scientific notation
# labels, title and ticks
ax.set_title('Confusion Matrix - ' + super_category, fontsize=20)
ax.set_xlabel('Predicted labels', fontsize=16)
ax.set_ylabel('True labels', fontsize=16)
ax.set_xticks(range(0,len(num_cat), 1))
ax.set_yticks(range(0,len(num_cat), 1))
ax.xaxis.set_ticklabels(num_cat)
ax.yaxis.set_ticklabels(num_cat)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
plt.pause(0.1)
fig.savefig(os.path.join(PREDICTIONS_PATH, "confusion_matrix"), dpi=fig.dpi)
# img = Image.open(os.path.join(PREDICTIONS_PATH, "confusion_matrix.png"))
# con_figs.append(img)
# if len(con_figs) > 1:
# con_figs[0].save(os.path.join(PREDICTIONS_PATH, "confusion_matrix.gif"), save_all=True, append_images=con_figs[1:], duration=1000, loop=0)
plt.close()
# -------------------------------------------------
# Plot Accuracy and Precision
# -------------------------------------------------
Accuracy = [(cm[i, i] / sum(cm[i, :])) * 100 if sum(cm[i, :]) != 0 else 0.000001 for i in range(cm.shape[0])]
Precision = [(cm[i, i] / sum(cm[:, i])) * 100 if sum(cm[:, i]) != 0 else 0.000001 for i in range(cm.shape[1])]
fig = plt.figure(figsize=(int((numclass*3)%300), 8))
ax = fig.add_subplot()
bar_width = 0.4
x = np.arange(len(Accuracy))
b1 = ax.bar(x, Accuracy, width=bar_width, label='Accuracy', color=sns.xkcd_rgb["pale red"], tick_label=x)
ax2 = ax.twinx()
b2 = ax2.bar(x + bar_width, Precision, width=bar_width, label='Precision', color=sns.xkcd_rgb["denim blue"])
average_acc = sum(Accuracy)/len(Accuracy)
average_prec = sum(Precision)/len(Precision)
b3 = plt.hlines(y=average_acc, xmin=-bar_width, xmax=numclass - 1 + bar_width * 2, linewidth=2, linestyles='--', color='r',
label='Average Acc : %0.2f' % average_acc)
b4 = plt.hlines(y=average_prec, xmin=-bar_width, xmax=numclass - 1 + bar_width * 2, linewidth=2, linestyles='--', color='b',
label='Average Prec : %0.2f' % average_prec)
plt.xticks(np.arange(numclass) + bar_width / 2, np.arange(numclass))
# labels, title and ticks
ax.set_title('Accuracy and Precision Epoch #{}'.format(idx), fontsize=20)
ax.set_xlabel('labels', fontsize=16)
ax.set_ylabel('Acc(%)', fontsize=16)
ax2.set_ylabel('Prec(%)', fontsize=16)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
ax.tick_params(axis='y', colors=b1[0].get_facecolor())
ax2.tick_params(axis='y', colors=b2[0].get_facecolor())
plt.legend(handles=[b1, b2, b3, b4])
# fig.savefig(os.path.join(PREDICTIONS_PATH, "Accuracy-Precision_{}.png".format(idx)), dpi=fig.dpi)
fig.savefig(os.path.join(PREDICTIONS_PATH, "Accuracy-Precision.png"), dpi=fig.dpi)
# img = Image.open(os.path.join(PREDICTIONS_PATH, "Accuracy-Precision.png"))
# acc_figs.append(img)
# if len(acc_figs) > 1:
# acc_figs[0].save(os.path.join(PREDICTIONS_PATH, "Accuracy-Precision.gif"), save_all=True, append_images=acc_figs[1:], duration=1000, loop=0)
plt.close()
TopK_idx_acc = heapq.nlargest(top, range(len(Accuracy)), Accuracy.__getitem__)
TopK_idx_prec = heapq.nlargest(top, range(len(Precision)), Precision.__getitem__)
TopK_low_idx = heapq.nsmallest(top, range(len(Precision)), Precision.__getitem__)
print('=' * 80)
print('Accuracy Tok {0}: \n'.format(top))
print('| Class ID \t Accuracy(%) \t Precision(%) |')
for i in TopK_idx_acc:
print('| {0} \t {1} \t {2} |'.format(i, round(Accuracy[i], 2), round(Precision[i], 2)))
print('-' * 80)
print('Precision Tok {0}: \n'.format(top))
print('| Class ID \t Accuracy(%) \t Precision(%) |')
for i in TopK_idx_prec:
print('| {0} \t {1} \t {2} |'.format(i, round(Accuracy[i], 2), round(Precision[i], 2)))
print('=' * 80)
return TopK_low_idx
def EvaluateMetric(PREDICTIONS_PATH, train_results, idx):
TopK_low_idx = plot_confusion_matrix(PREDICTIONS_PATH, train_results['grounds'], train_results['preds'], train_results['categories'], idx)
| 5,573 | 39.100719 | 150 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/utils/print_function.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import logging
def print_func(info):
'''
:param info: {name: value}
:return:
'''
txts = []
for name, value in info.items():
txts.append('{}: {}'.format(name, value))
logging.info('\t'.join(txts)) | 296 | 18.8 | 54 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/utils/utils.py | '''
This file is modified from:
https://github.com/yuhuixu1993/PC-DARTS/blob/master/utils.py
'''
import os
import numpy as np
import torch
import torch.distributed as dist
import shutil
import torchvision.transforms as transforms
from torch.autograd import Variable
from collections import OrderedDict
import random
from .build import SoftTargetCrossEntropy
#------------------------
# evaluation metrics
#------------------------
from sklearn.decomposition import PCA
from sklearn import manifold
import pandas as pd
import matplotlib.pyplot as plt # For graphics
import seaborn as sns
from torchvision.utils import save_image, make_grid
from PIL import Image
import cv2
from einops import rearrange, repeat
class ClassAcc():
def __init__(self, GESTURE_CLASSES):
self.class_acc = dict(zip([i for i in range(GESTURE_CLASSES)], [0]*GESTURE_CLASSES))
self.single_class_num = [0]*GESTURE_CLASSES
def update(self, logits, target):
pred = torch.argmax(logits, dim=1)
for p, t in zip(pred.cpu().numpy(), target.cpu().numpy()):
if p == t:
self.class_acc[t] += 1
self.single_class_num[t] += 1
def result(self):
return [round(v / (self.single_class_num[k]+0.000000001), 4) for k, v in self.class_acc.items()]
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def adjust_learning_rate(optimizer, step, lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
df = 0.7
ds = 40000.0
lr = lr * np.power(df, step / ds)
# lr = args.lr * (0.1**(epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
# def accuracy(output, target, topk=(1,)):
# maxk = max(topk)
# batch_size = target.size(0)
# _, pred = output.topk(maxk, 1, True, True)
# pred = pred.t()
# correct = pred.eq(target.view(1, -1).expand_as(pred))
# res = []
# for k in topk:
# correct_k = correct[:k].view(-1).float().sum(0)
# res.append(correct_k.mul_(100.0/batch_size))
# return res
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = min(max(topk), output.size()[1])
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]
def calculate_accuracy(outputs, targets):
with torch.no_grad():
batch_size = targets.size(0)
_, pred = outputs.topk(1, 1, True)
pred = pred.t()
correct = pred.eq(targets.view(1, -1))
correct_k = correct.view(-1).float().sum(0, keepdim=True)
#n_correct_elems = correct.float().sum().data[0]
# n_correct_elems = correct.float().sum().item()
# return n_correct_elems / batch_size
return correct_k.mul_(1.0 / batch_size)
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name)/1e6
def count_learnable_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if v.requires_grad)/1e6
def save_checkpoint(state, is_best=False, save='./', filename='checkpoint.pth.tar'):
filename = os.path.join(save, filename)
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def load_checkpoint(model, model_path, optimizer=None, scheduler=None):
# checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage.cuda(4))
checkpoint = torch.load(model_path, map_location='cpu')
model.load_state_dict(checkpoint['model'])
if optimizer is not None:
optimizer.load_state_dict(checkpoint['optimizer'])
if scheduler is not None:
scheduler.load_state_dict(checkpoint['scheduler'])
epoch = checkpoint['epoch']
bestacc = checkpoint['bestacc']
return epoch, bestacc
def load_pretrained_checkpoint(model, model_path):
# params = torch.load(model_path, map_location=lambda storage, loc: storage.cuda(local_rank))['model']
params = torch.load(model_path, map_location='cpu')['model']
new_state_dict = OrderedDict()
for k, v in params.items():
name = k[7:] if k[:7] == 'module.' else k
# if name not in ['dtn.mlp_head_small.2.bias', "dtn.mlp_head_small.2.weight",
# 'dtn.mlp_head_media.2.bias', "dtn.mlp_head_media.2.weight",
# 'dtn.mlp_head_large.2.bias', "dtn.mlp_head_large.2.weight"]:
# if v.shape == model.state_dict()[name].shape:
try:
if v.shape == model.state_dict()[name].shape and name not in ['dtn.multi_scale_transformers.0.3.2.weight', 'dtn.multi_scale_transformers.0.3.2.bias', 'dtn.multi_scale_transformers.1.3.2.weight', 'dtn.multi_scale_transformers.1.3.2.bias', 'dtn.multi_scale_transformers.2.3.2.weight', 'dtn.multi_scale_transformers.2.3.2.bias']:
new_state_dict[name] = v
except:
continue
ret = model.load_state_dict(new_state_dict, strict=False)
print('Missing keys: \n', ret.missing_keys)
# return model
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
if not os.path.exists(os.path.join(path, 'scripts')):
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = final_value + 0.5 * (base_value - final_value) * (1 + np.cos(np.pi * iters / len(iters)))
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def uniform_sampling(clips_num, sn, random=True):
if random:
f = lambda n: [(lambda n, arr: n if arr == [] else np.random.choice(arr))(n * i / sn,
range(int(n * i / sn),
max(int(n * i / sn) + 1,
int(n * (
i + 1) / sn))))
for i in range(sn)]
else:
f = lambda n: [(lambda n, arr: n if arr == [] else int(np.mean(arr)))(n * i / sn, range(int(n * i / sn),
max(int(
n * i / sn) + 1,
int(n * (
i + 1) / sn))))
for i in range(sn)]
return f(clips_num)
class DINOLoss(torch.nn.Module):
def __init__(self, args, out_dim, ncrops, warmup_teacher_temp, teacher_temp,
warmup_teacher_temp_epochs, nepochs, student_temp=0.1,
center_momentum=0.9):
super().__init__()
self.args = args
self.student_temp = student_temp
self.center_momentum = center_momentum
self.ncrops = ncrops
self.register_buffer("center", torch.zeros(1, out_dim))
# we apply a warm up for the teacher temperature because
# a too high temperature makes the training instable at the beginning
self.teacher_temp_schedule = np.concatenate((
np.linspace(warmup_teacher_temp,
teacher_temp, warmup_teacher_temp_epochs),
np.ones(nepochs - warmup_teacher_temp_epochs) * teacher_temp
))
self.CE = SoftTargetCrossEntropy()
def forward(self, student_output, teacher_output, epoch):
"""
Cross-entropy between softmax outputs of the teacher and student networks.
"""
[ori_logits, ori_xs, ori_xm, ori_xl], [ori_logits_flip, ori_xs_flip, ori_xm_flip, ori_xl_flip] = teacher_output
[color_logits, cxs, cxm, cxl], lam_mix = student_output
# teacher centering and sharpening
temp = self.teacher_temp_schedule[epoch]
ori_logits, ori_xs, ori_xm, ori_xl = map(lambda x: torch.softmax((x - self.center) / temp, dim=-1), [ori_logits, ori_xs, ori_xm, ori_xl])
ori_logits_flip, ori_xs_flip, ori_xm_flip, ori_xl_flip = map(lambda x: torch.softmax((x - self.center) / temp , dim=-1), [ori_logits_flip, ori_xs_flip, ori_xm_flip, ori_xl_flip])
logits_t = lam_mix * ori_logits + (1. - lam_mix) * ori_logits_flip
logits_xs_t = lam_mix * ori_xs + (1. - lam_mix) * ori_xs_flip
logits_xm_t = lam_mix * ori_xm + (1. - lam_mix) * ori_xm_flip
logits_xl_t = lam_mix * ori_xl + (1. - lam_mix) * ori_xl_flip
# color_logits, cxs, cxm, cxl = map(lambda x: torch.softmax(x / self.student_temp, dim=-1), [color_logits, cxs, cxm, cxl])
color_logits, cxs, cxm, cxl = map(lambda x: x / self.student_temp, [color_logits, cxs, cxm, cxl])
Total_loss = 0.0
CE = self.CE
if self.args.MultiLoss:
lamd1, lamd2, lamd3, lamd4 = map(float, self.args.loss_lamdb)
CE_loss = lamd1*CE(color_logits, logits_t) + lamd2*CE(cxs, logits_xs_t) + \
lamd3*CE(cxm, logits_xm_t) + lamd4*CE(cxl, logits_xl_t)
else:
CE_loss = CE(color_logits, logits_t)
Total_loss += CE_loss
self.update_center(logits_t)
return Total_loss
@torch.no_grad()
def update_center(self, teacher_output):
"""
Update center used for teacher output.
"""
batch_center = torch.sum(teacher_output, dim=0, keepdim=True)
dist.all_reduce(batch_center)
batch_center = batch_center / (len(teacher_output) * dist.get_world_size())
# ema update
self.center = self.center * self.center_momentum + batch_center * (1 - self.center_momentum)
def one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'):
x = x.long().view(-1, 1)
return torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(1, x, on_value)
def mixup_target(target, num_classes, lam=1., smoothing=0.0, device='cuda'):
off_value = smoothing / num_classes
on_value = 1. - smoothing + off_value
y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value, device=device)
y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value, device=device)
return y1 * lam + y2 * (1. - lam)
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
def Visfeature(args, model, inputs, v_path=None, weight_softmax=None, FusionNet=False):
# TSNE cluster
if FusionNet:
# pca_data = model.pca_data.detach().cpu()
# targets = model.target_data.cpu()
pca_data, targets = model.get_cluster_visualization()
tsne = manifold.TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
data = pd.DataFrame(pca_data.cpu().numpy())
data_pca = tsne.fit_transform(data)
data.insert(0, 'label', pd.DataFrame(targets.cpu().numpy()))
fig, ax = plt.subplots()
scatter = ax.scatter(data_pca[:, 0], data_pca[:, 1], c=data['label'], s=25, cmap='rainbow',
alpha=0.8, edgecolors='none')
legend1 = ax.legend(*scatter.legend_elements(fmt="{x:.0f}"),
loc="best", title="Feature Type")
ax.add_artist(legend1)
fig.savefig(args.save + '/cluster-result.png')
plt.close()
return
if args.visdom['enable']:
vis.featuremap('CNNVision',
torch.sum(make_grid(feature[0].detach(), nrow=int(feature[0].size(0) ** 0.5), padding=2), dim=0).flipud())
vis.featuremap('Attention Maps Similarity',
make_grid(feature[1], nrow=int(feature[1].detach().cpu().size(0) ** 0.5), padding=2)[0].flipud())
vis.featuremap('Enhancement Weights', feature[3].flipud())
else:
# fig = plt.figure()
# ax = fig.add_subplot()
# sns.heatmap(
# torch.sum(make_grid(feature[0].detach(), nrow=int(feature[0].size(0) ** 0.5), padding=2), dim=0).cpu().numpy(),
# annot=False, fmt='g', ax=ax)
# ax.set_title('CNNVision', fontsize=10)
# fig.savefig(os.path.join(args.save, 'CNNVision.jpg'), dpi=fig.dpi)
# plt.close()
# fig = plt.figure()
# ax = fig.add_subplot()
# sns.heatmap(make_grid(feature[1].detach(), nrow=int(feature[1].size(0) ** 0.5), padding=2)[0].cpu().numpy(), annot=False,
# fmt='g', ax=ax)
# ax.set_title('Attention Maps Similarity', fontsize=10)
# fig.savefig(os.path.join(args.save, 'AttMapSimilarity.jpg'), dpi=fig.dpi)
# plt.close()
fig = plt.figure()
ax = fig.add_subplot()
# visweight = model.visweight
feat, visweight = model.get_visualization()
sns.heatmap(visweight.detach().cpu().numpy(), annot=False, fmt='g', ax=ax)
ax.set_title('Enhancement Weights', fontsize=10)
fig.savefig(os.path.join(args.save, 'EnhancementWeights.jpg'), dpi=fig.dpi)
plt.close()
#------------------------------------------
# Spatial feature visualization
#------------------------------------------
headmap = feat.detach().cpu().numpy()
headmap = np.mean(headmap, axis=1)
headmap /= np.max(headmap)
headmap = torch.from_numpy(headmap)
b, c, t, h, w = inputs.shape
inputs = inputs.permute(2, 0, 1, 3, 4) #.view(t, b, c, h, w)
imgs = []
for img in inputs:
img = make_grid(img[:16], nrow=4, padding=2).unsqueeze(0)
imgs.append(img)
imgs = torch.cat(imgs)
b, t, h, w = headmap.shape
headmap = headmap.permute(1, 0, 2, 3).unsqueeze(2) # .view(t, b, 1, h, w)
heatmaps = []
for heat in headmap:
heat = make_grid(heat[:16], nrow=4, padding=2)[0].unsqueeze(0)
heatmaps.append(heat)
heatmaps = torch.cat(heatmaps)
# feat = model.feat
# headmap = feat[0,:].detach().cpu().numpy()
# headmap = np.mean(headmap, axis=0)
# headmap /= np.max(headmap) # torch.Size([64, 7, 7])
# headmap = torch.from_numpy(headmap)
# img = inputs[0]
result_gif, result = [], []
for cam, mg in zip(heatmaps.unsqueeze(1), imgs.permute(0,2,3,1)):
# cam = torch.argmax(weight_softmax[0]).detach().cpu().dot(cam)
cam = cv2.resize(cam.squeeze().cpu().numpy(), (mg.shape[0]//2, mg.shape[1]//2))
cam = np.uint8(255 * cam)
cam = cv2.applyColorMap(cam, cv2.COLORMAP_JET)
mg = np.uint8(mg.cpu().numpy() * 128 + 127.5)
mg = cv2.resize(mg, (mg.shape[0]//2, mg.shape[1]//2))
superimposed_img = cv2.addWeighted(mg, 0.4, cam, 0.6, 0)
result_gif.append(Image.fromarray(superimposed_img))
result.append(torch.from_numpy(superimposed_img).unsqueeze(0))
superimposed_imgs = torch.cat(result).permute(0, 3, 1, 2)
# save_image(superimposed_imgs, os.path.join(args.save, 'CAM-Features.png'), nrow=int(superimposed_imgs.size(0) ** 0.5), padding=2).permute(1,2,0)
superimposed_imgs = make_grid(superimposed_imgs, nrow=int(superimposed_imgs.size(0) ** 0.5), padding=2).permute(1,2,0)
cv2.imwrite(os.path.join(args.save, 'CAM-Features.png'), superimposed_imgs.numpy())
# save augmentad frames as gif
result_gif[0].save(os.path.join(args.save, 'CAM-Features.gif'), save_all=True, append_images=result_gif[1:], duration=100, loop=0)
if args.eval_only and args.visdom['enable']:
MHAS_s, MHAS_m, MHAS_l = feature[-2]
MHAS_s, MHAS_m, MHAS_l = MHAS_s.detach().cpu(), MHAS_m.detach().cpu(), MHAS_l.detach().cpu()
# Normalize
att_max, index_max = torch.max(MHAS_s.view(MHAS_s.size(0), -1), dim=-1)
att_min, index_min = torch.min(MHAS_s.view(MHAS_s.size(0), -1), dim=-1)
MHAS_s = (MHAS_s - att_min.view(-1, 1, 1))/(att_max.view(-1, 1, 1) - att_min.view(-1, 1, 1))
att_max, index_max = torch.max(MHAS_m.view(MHAS_m.size(0), -1), dim=-1)
att_min, index_min = torch.min(MHAS_m.view(MHAS_m.size(0), -1), dim=-1)
MHAS_m = (MHAS_m - att_min.view(-1, 1, 1))/(att_max.view(-1, 1, 1) - att_min.view(-1, 1, 1))
att_max, index_max = torch.max(MHAS_l.view(MHAS_l.size(0), -1), dim=-1)
att_min, index_min = torch.min(MHAS_l.view(MHAS_l.size(0), -1), dim=-1)
MHAS_l = (MHAS_l - att_min.view(-1, 1, 1))/(att_max.view(-1, 1, 1) - att_min.view(-1, 1, 1))
mhas_s = make_grid(MHAS_s.unsqueeze(1), nrow=int(MHAS_s.size(0) ** 0.5), padding=2)[0]
mhas_m = make_grid(MHAS_m.unsqueeze(1), nrow=int(MHAS_m.size(0) ** 0.5), padding=2)[0]
mhas_l = make_grid(MHAS_l.unsqueeze(1), nrow=int(MHAS_l.size(0) ** 0.5), padding=2)[0]
vis.featuremap('MHAS Map', mhas_l)
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(131)
sns.heatmap(mhas_s.squeeze(), annot=False, fmt='g', ax=ax, yticklabels=False)
ax.set_title('\nMHSA Small', fontsize=10)
ax = fig.add_subplot(132)
sns.heatmap(mhas_m.squeeze(), annot=False, fmt='g', ax=ax, yticklabels=False)
ax.set_title('\nMHSA Medium', fontsize=10)
ax = fig.add_subplot(133)
sns.heatmap(mhas_l.squeeze(), annot=False, fmt='g', ax=ax, yticklabels=False)
ax.set_title('\nMHSA Large', fontsize=10)
plt.suptitle('{}'.format(v_path[0].split('/')[-1]), fontsize=20)
fig.savefig('demo/{}-MHAS.jpg'.format(args.save.split('/')[-1]), dpi=fig.dpi)
plt.close()
def feature_embedding(x, target, embedding_dict):
temp_out, target_out = x
if temp_out is None:
x_gather = concat_all_gather(target_out)
target_gather = concat_all_gather(target.cuda())
for name, v in zip(target_gather, x_gather):
embedding_dict[name] = v
else:
class_embedding = torch.cat([target_out[i][-1].unsqueeze(-1) for i in range(len(target_out))], dim=-1).mean(-1)
embedding_gather = concat_all_gather(class_embedding)
target_gather = concat_all_gather(target.cuda())
# embedding_dict = OrderedDict()
for name, v in zip(target_gather, embedding_gather):
embedding_dict[name] = v
| 21,656 | 41.216374 | 338 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/utils/mixup.py | """
This file is modified from:
https://github.com/rwightman/pytorch-image-models/blob/main/timm/data/mixup.py
Mixup and Cutmix
Papers:
mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412)
CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features (https://arxiv.org/abs/1905.04899)
Code Reference:
CutMix: https://github.com/clovaai/CutMix-PyTorch
Hacked together by / Copyright 2019, Ross Wightman
"""
import numpy as np
import torch
from einops import rearrange, repeat
import random
from .shufflemix import *
def one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'):
x = x.long().view(-1, 1)
return torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(1, x, on_value)
def mixup_target(target, num_classes, lam=1., smoothing=0.0, device='cuda'):
off_value = smoothing / num_classes
on_value = 1. - smoothing + off_value
y1 = one_hot(target, num_classes, on_value=on_value, off_value=off_value, device=device)
y2 = one_hot(target.flip(0), num_classes, on_value=on_value, off_value=off_value, device=device)
return y1 * lam + y2 * (1. - lam)
def rand_bbox(img_shape, lam, margin=0., count=None):
""" Standard CutMix bounding-box
Generates a random square bbox based on lambda value. This impl includes
support for enforcing a border margin as percent of bbox dimensions.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
count (int): Number of bbox to generate
"""
ratio = np.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)
cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)
yl = np.clip(cy - cut_h // 2, 0, img_h)
yh = np.clip(cy + cut_h // 2, 0, img_h)
xl = np.clip(cx - cut_w // 2, 0, img_w)
xh = np.clip(cx + cut_w // 2, 0, img_w)
return yl, yh, xl, xh
def rand_bbox_minmax(img_shape, minmax, count=None):
""" Min-Max CutMix bounding-box
Inspired by Darknet cutmix impl, generates a random rectangular bbox
based on min/max percent values applied to each dimension of the input image.
Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max.
Args:
img_shape (tuple): Image shape as tuple
minmax (tuple or list): Min and max bbox ratios (as percent of image size)
count (int): Number of bbox to generate
"""
assert len(minmax) == 2
img_h, img_w = img_shape[-2:]
cut_h = np.random.randint(int(img_h * minmax[0]), int(img_h * minmax[1]), size=count)
cut_w = np.random.randint(int(img_w * minmax[0]), int(img_w * minmax[1]), size=count)
yl = np.random.randint(0, img_h - cut_h, size=count)
xl = np.random.randint(0, img_w - cut_w, size=count)
yu = yl + cut_h
xu = xl + cut_w
return yl, yu, xl, xu
def cutmix_bbox_and_lam(img_shape, lam, ratio_minmax=None, correct_lam=True, count=None):
""" Generate bbox and apply lambda correction.
"""
if ratio_minmax is not None:
yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count)
else:
yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count)
if correct_lam or ratio_minmax is not None:
bbox_area = (yu - yl) * (xu - xl)
lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1])
return (yl, yu, xl, xu), lam
class Mixup:
""" Mixup/Cutmix that applies different params to each element or whole batch
Args:
mixup_alpha (float): mixup alpha value, mixup is active if > 0.
cutmix_alpha (float): cutmix alpha value, cutmix is active if > 0.
cutmix_minmax (List[float]): cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None.
prob (float): probability of applying mixup or cutmix per batch or element
switch_prob (float): probability of switching to cutmix instead of mixup when both are active
mode (str): how to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)
correct_lam (bool): apply lambda correction when cutmix bbox clipped by image borders
label_smoothing (float): apply label smoothing to the mixed target tensor
num_classes (int): number of classes for target
"""
def __init__(self, mixup_alpha=1., cutmix_alpha=0., cutmix_minmax=None, prob=1.0, switch_prob=0.5,
mode='batch', correct_lam=True, label_smoothing=0.1, num_classes=1000, args=None):
self.mixup_alpha = mixup_alpha
self.cutmix_alpha = cutmix_alpha
self.cutmix_minmax = cutmix_minmax
if self.cutmix_minmax is not None:
assert len(self.cutmix_minmax) == 2
# force cutmix alpha == 1.0 when minmax active to keep logic simple & safe
self.cutmix_alpha = 1.0
self.mix_prob = prob
self.switch_prob = switch_prob
self.label_smoothing = label_smoothing
self.num_classes = num_classes
self.mode = mode
self.correct_lam = correct_lam # correct lambda based on clipped area for cutmix
self.mixup_enabled = True # set to false to disable mixing (intended tp be set by train loop)
self.bata_range = torch.linspace(0.0, 0.5, steps=args.epochs)
self.args = args
def _params_per_elem(self, batch_size):
lam = np.ones(batch_size, dtype=np.float32)
use_cutmix = np.zeros(batch_size, dtype=np.bool)
if self.mixup_enabled:
if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:
use_cutmix = np.random.rand(batch_size) < self.switch_prob
lam_mix = np.where(
use_cutmix,
np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size),
np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size))
elif self.mixup_alpha > 0.:
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size)
elif self.cutmix_alpha > 0.:
use_cutmix = np.ones(batch_size, dtype=np.bool)
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha, size=batch_size)
else:
assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
lam = np.where(np.random.rand(batch_size) < self.mix_prob, lam_mix.astype(np.float32), lam)
return lam, use_cutmix
def _params_per_batch(self):
lam = 1.
use_cutmix = False
if self.mixup_enabled and np.random.rand() < self.mix_prob:
if self.mixup_alpha > 0. and self.cutmix_alpha > 0.:
use_cutmix = np.random.rand() < self.switch_prob
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha) if use_cutmix else \
np.random.beta(self.mixup_alpha, self.mixup_alpha)
elif self.mixup_alpha > 0.:
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha)
elif self.cutmix_alpha > 0.:
use_cutmix = True
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha)
else:
assert False, "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
lam = float(lam_mix)
if self.args.mixup_dynamic:
lam = np.random.uniform(0, float(self.bata_range[self.args.epoch]))
return lam, use_cutmix
def _mix_elem(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size)
x_orig = x.clone() # need to keep an unmodified original for mixing source
for i in range(batch_size):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
def _mix_pair(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)
x_orig = x.clone() # need to keep an unmodified original for mixing source
for i in range(batch_size // 2):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
if np.random.rand() < self.args.smprob:
lam = np.random.beta(self.args.shufflemix, self.args.shufflemix)
if self.args.smixmode == 'sm':
replace_idx = random.sample(range(0, x.size(2)), round((1. - lam)*x.size(2)))
x[i, :, replace_idx, :, :] = x_orig[j, :, replace_idx, :, :]
flip_idx = [i for i in range(x.size(2)) if i not in replace_idx]
x[j, :, flip_idx, :, :] = x_orig[i, :, flip_idx, :, :]
lam_batch[i] = lam
else:
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
x[j] = x[j] * lam + x_orig[i] * (1 - lam)
lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))
return torch.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
def _mix_batch(self, x, target):
"""
:param x: x.shape = (batch_size, 3, seq_len, 224, 224)
"""
lam, use_cutmix = self._params_per_batch()
if lam == 1.:
return 1.
if use_cutmix:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
x[:, :, :, yl:yh, xl:xh] = x.flip(0)[:, :, :, yl:yh, xl:xh]
else:
if self.args.MixIntra:
lam = MixIntra(x, lam, target, replace_prob=self.args.replace_prob)
return lam
if self.args.tempMix:
lam = TempMix(x, self.args.mixup)
return lam
if np.random.rand() < self.args.smprob:
lam = self._shufflemix_batch(x)
else:
x_flipped = x.flip(0).mul_(1. - lam)
x.mul_(lam).add_(x_flipped)
return lam
def _shufflemix_batch(self, x):
lam = np.random.beta(self.args.shufflemix, self.args.shufflemix)
if self.args.smixmode == 'sm':
ShuffleMix(x, lam)
elif self.args.smixmode == 'sm_v1':
ShuffleMix_v1(x, lam)
elif self.args.smixmode == 'sm_v2':
ShuffleMix_v2(x, lam)
elif self.args.smixmode == 'sm_v3':
ShuffleMix_v3(x, lam)
elif self.args.smixmode == 'mu_sm':
Mixup_ShuffleMix(x, lam)
else:
raise Exception (f'No ShuffleMix strategy {self.args.smixmode} be found !')
return lam
def __call__(self, x, target):
assert len(x) % 2 == 0, 'Batch size should be even when using this'
if self.mode == 'elem':
lam = self._mix_elem(x)
elif self.mode == 'pair':
lam = self._mix_pair(x)
else:
lam = self._mix_batch(x, target)
if self.args.tempMix:
targets = []
for l in lam:
tar = mixup_target(target, self.num_classes, l, self.label_smoothing, x.device)
targets.append(tar.unsqueeze(-1))
target = torch.cat(targets, dim=-1).transpose(1,2)
target = target.mean(1)
else:
target = mixup_target(target, self.num_classes, lam, self.label_smoothing, x.device)
return x, target
class FastCollateMixup(Mixup):
""" Fast Collate w/ Mixup/Cutmix that applies different params to each element or whole batch
A Mixup impl that's performed while collating the batches.
"""
def _mix_elem_collate(self, output, batch, half=False):
batch_size = len(batch)
num_elem = batch_size // 2 if half else batch_size
assert len(output) == num_elem
lam_batch, use_cutmix = self._params_per_elem(num_elem)
for i in range(num_elem):
j = batch_size - i - 1
lam = lam_batch[i]
mixed = batch[i][0]
if lam != 1.:
if use_cutmix[i]:
if not half:
mixed = mixed.copy()
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)
np.rint(mixed, out=mixed)
output[i] += torch.from_numpy(mixed.astype(np.uint8))
if half:
lam_batch = np.concatenate((lam_batch, np.ones(num_elem)))
return torch.tensor(lam_batch).unsqueeze(1)
def _mix_pair_collate(self, output, batch):
batch_size = len(batch)
lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)
for i in range(batch_size // 2):
j = batch_size - i - 1
lam = lam_batch[i]
mixed_i = batch[i][0]
mixed_j = batch[j][0]
assert 0 <= lam <= 1.0
if lam < 1.:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
patch_i = mixed_i[:, yl:yh, xl:xh].copy()
mixed_i[:, yl:yh, xl:xh] = mixed_j[:, yl:yh, xl:xh]
mixed_j[:, yl:yh, xl:xh] = patch_i
lam_batch[i] = lam
else:
mixed_temp = mixed_i.astype(np.float32) * lam + mixed_j.astype(np.float32) * (1 - lam)
mixed_j = mixed_j.astype(np.float32) * lam + mixed_i.astype(np.float32) * (1 - lam)
mixed_i = mixed_temp
np.rint(mixed_j, out=mixed_j)
np.rint(mixed_i, out=mixed_i)
output[i] += torch.from_numpy(mixed_i.astype(np.uint8))
output[j] += torch.from_numpy(mixed_j.astype(np.uint8))
lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))
return torch.tensor(lam_batch).unsqueeze(1)
def _mix_batch_collate(self, output, batch):
batch_size = len(batch)
lam, use_cutmix = self._params_per_batch()
if use_cutmix:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
output.shape, lam, ratio_minmax=self.cutmix_minmax, correct_lam=self.correct_lam)
for i in range(batch_size):
j = batch_size - i - 1
mixed = batch[i][0]
if lam != 1.:
if use_cutmix:
mixed = mixed.copy() # don't want to modify the original while iterating
mixed[:, yl:yh, xl:xh] = batch[j][0][:, yl:yh, xl:xh]
else:
mixed = mixed.astype(np.float32) * lam + batch[j][0].astype(np.float32) * (1 - lam)
np.rint(mixed, out=mixed)
output[i] += torch.from_numpy(mixed.astype(np.uint8))
return lam
def __call__(self, batch, _=None):
batch_size = len(batch)
assert batch_size % 2 == 0, 'Batch size should be even when using this'
half = 'half' in self.mode
if half:
batch_size //= 2
output = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8)
if self.mode == 'elem' or self.mode == 'half':
lam = self._mix_elem_collate(output, batch, half=half)
elif self.mode == 'pair':
lam = self._mix_pair_collate(output, batch)
else:
lam = self._mix_batch_collate(output, batch)
target = torch.tensor([b[1] for b in batch], dtype=torch.int64)
target = mixup_target(target, self.num_classes, lam, self.label_smoothing, device='cpu')
target = target[:batch_size]
return output, target
| 17,254 | 44.890957 | 120 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/utils/visualizer.py | '''
This file is modified from:
https://github.com/zhoubenjia/RAAR3DNet/blob/master/Network_Train/utils/visualizer.py
'''
#coding: utf8
import numpy as np
import time
class Visualizer():
def __init__(self, env='default', **kwargs):
import visdom
self.vis = visdom.Visdom(env=env, use_incoming_socket=False, **kwargs)
self.index = {}
self.log_text = ''
def reinit(self, env='defult', **kwargs):
self.vis = visdom.Visdom(env=env, use_incoming_socket=False, **kwargs)
return self
def plot_many(self, d, modality, epoch=None):
colmu_stac = []
for k, v in d.items():
colmu_stac.append(np.array(v))
if epoch:
x = epoch
else:
x = self.index.get(modality, 0)
# self.vis.line(Y=np.column_stack((np.array(dicts['loss1']), np.array(dicts['loss2']))),
self.vis.line(Y=np.column_stack(tuple(colmu_stac)),
X=np.array([x]),
win=(modality),
# opts=dict(title=modality,legend=['loss1', 'loss2'], ylabel='loss value'),
opts=dict(title=modality, legend=list(d.keys()), ylabel='Value', xlabel='Iteration'),
update=None if x == 0 else 'append')
if not epoch:
self.index[modality] = x + 1
def plot(self, name, y):
"""
self.plot('loss',1.00)
"""
x = self.index.get(name, 0)
self.vis.line(Y=np.array([y]), X=np.array([x]),
win=(name),
opts=dict(title=name),
update=None if x == 0 else 'append'
)
self.index[name] = x + 1
def log(self, info, win='log_text'):
"""
self.log({'loss':1,'lr':0.0001})
"""
self.log_text += ('[{time}] {info} <br>'.format(
time=time.strftime('%m.%d %H:%M:%S'),
info=info))
self.vis.text(self.log_text, win=win)
def img_grid(self, name, input_3d, heatmap=False):
self.vis.images(
# np.random.randn(20, 3, 64, 64),
show_image_grid(input_3d, name, heatmap),
win=name,
opts=dict(title=name, caption='img_grid.')
)
def img(self, name, input):
self.vis.images(
input,
win=name,
opts=dict(title=name, caption='RGB Images.')
)
def draw_curve(self, name, data):
self.vis.line(Y=np.array(data), X=np.array(range(len(data))),
win=(name),
opts=dict(title=name),
update=None
)
def featuremap(self, name, input):
self.vis.heatmap(input, win=name, opts=dict(title=name))
def draw_bar(self, name, inp):
self.vis.bar(
X=np.abs(np.array(inp)),
win=name,
opts=dict(
stacked=True,
legend=list(map(str, range(inp.shape[-1]))),
rownames=list(map(str, range(inp.shape[0])))
)
)
def __getattr__(self, name):
return getattr(self.vis, name)
| 3,190 | 29.980583 | 107 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/utils/shufflemix.py | import numpy as np
import torch
import random
def Vmixup(x, lam):
x_flipped = x.flip(0).mul_(1. - lam)
x.mul_(lam).add_(x_flipped)
def ShuffleMix(x, lam):
x_flipped = x.flip(0)
replace_idx = random.sample(range(0, x.size(2)), round((1. - lam)*x.size(2)))
x[:, :, replace_idx, :, :] = x_flipped[:, :, replace_idx, :, :]
def ShuffleMix_v1(x, lam):
x_flipped = x.flip(0)
length = x.size(2)
start = random.sample([0, 1], 1)[0]
a = torch.arange(start, length, step=2)
v_len = int(length*(1. - lam))
replace_num = min(v_len, length-v_len)
if len(a)-replace_num:
b = random.sample(range(0, len(a)-replace_num), 1)[0]
else:
b = 0
replace_idx = a[b:b+replace_num]
if v_len <= length-v_len:
x[:, :, replace_idx, :, :] = x_flipped[:, :, replace_idx, :, :]
else:
x_flipped[:, :, replace_idx, :, :] = x[:, :, replace_idx, :, :]
replace_idx = torch.arange(0, x.size(2), step=1)
x[:, :, replace_idx, :, :] = x_flipped[:, :, replace_idx, :, :]
def ShuffleMix_v2(x, lam):
x_flipped = x.flip(0)
# replace_idx = random.sample(range(0, x.size(2)), int((1. - lam)*x.size(2)))
length = max(1, int((1. - lam)*x.size(2)))
# uni_idx = uniform_sampling(x.size(2), length, random=True)
if x.size(2) != length:
start = random.sample(range(0, x.size(2) - length), 1)[0]
replace_idx = torch.arange(start, start+length, step=1)
# x[:, :, replace_idx, :, :] = x[:, :, replace_idx, :, :].mul_(lam).add_(x_flipped[:, :, uni_idx, :, :].mul_(1. - lam))
# x[:, :, replace_idx, :, :] = x_flipped[:, :, replace_idx, :, :]
x[:, :, -len(replace_idx):] = x_flipped[:, :, replace_idx, :, :]
else:
x = x_flipped
def ShuffleMix_v3(x, lam):
x_flipped = x.flip(0)
length = int((1. - lam)*x_flipped.size(2))
# if length:
# x[:, :, -length:, :, :] = x_flipped[:, :, :length, :, :]
uni_idx = uniform_sampling(x_flipped.size(2), length, random=True)
x1 = x_flipped[:, :, uni_idx, :, :]
length = x.size(2) - length
uni_idx = uniform_sampling(x.size(2), length, random=True)
x2 = x[:, :, uni_idx, :, :]
x_cat = torch.cat((x2, x1), dim=2)
replace_idx = torch.arange(0, x.size(2), step=1)
x[:, :, replace_idx, :, :] = x_cat[:, :, replace_idx, :, :]
# uni_idx = uniform_sampling(x_flipped.size(2), length, random=True)
# x1 = x_flipped[:, :, uni_idx, :, :]
# length = x.size(2) - length
# uni_idx = uniform_sampling(x.size(2), length, random=True)
# x2 = x[:, :, uni_idx, :, :]
# start = random.sample([0, length-1], 1)[0]
# if start == 0:
# x_cat = torch.cat((x1, x2), dim=2)
# else:
# x_cat = torch.cat((x2, x1), dim=2)
# # x_cat = torch.cat((x2, x1), dim=2)
# assert x.size(2) == x_flipped.size(2), f'x size {x.size(2)} must match with raw size {x_flipped.size(2)}'
# replace_idx = torch.arange(0, x.size(2), step=1)
# x[:, :, replace_idx, :, :] = x_cat[:, :, replace_idx, :, :]
def Mixup_ShuffleMix(x, lam):
x_flipped = x.flip(0).mul_(1. - lam)
x.mul_(lam).add_(x_flipped)
replace_idx = random.sample(range(0, x.size(2)), int((1. - lam)*x.size(2)))
x[:, :, replace_idx, :, :] = x_flipped[:, :, replace_idx, :, :]
def TempMix(x, lam):
lam = np.random.beta(lam, lam, size=x.size(2))
lam = torch.from_numpy(lam)[None, None, :, None, None].cuda()
x_flipped = x.flip(0).mul_(1. - lam)
x.mul_(lam).add_(x_flipped)
return lam.squeeze()
def ShuffleMix_plus(x, lam, smprob):
# x: torch.Size([16, 3, 16, 224, 224])
lam = torch.tensor(lam).view(-1).expand(x.size(2)).clone().cuda()
lam_flipped = 1.0 - lam
replace_idx = random.sample(range(0, x.size(2)), round(smprob*x.size(2)))
lam[replace_idx] = lam_flipped[replace_idx]
lam_flipped = 1.0 - lam
x_flipped = x.flip(0).mul_(lam_flipped.view(1, 1, -1, 1, 1))
x.mul_(lam.view(1, 1, -1, 1, 1)).add_(x_flipped)
return lam.mean()
def MixIntra(x, lam, target, replace_prob):
# gather from all gpus
batch_size_this = x.shape[0]
# x_gather = concat_all_gather(x)
# target_gather = concat_all_gather(target)
# batch_size_all = x_gather.shape[0]
# num_gpus = batch_size_all // batch_size_this
labes = np.unique(target.cpu().numpy())
label_dict = dict([(t, []) for t in labes])
for idx, t in enumerate(target.tolist()):
label_dict[t].append(idx)
indx_list = [random.choice(label_dict[t]) for t in target.tolist()]
x_intra = x[indx_list]
replace_idx = random.sample(range(0, x.size(2)), round(replace_prob*x.size(2)))
x[:, :, replace_idx, :, :] = x_intra[:, :, replace_idx, :, :]
# x_flipped = x_gather.flip(0).mul_(1. - lam)
# x_gather.mul_(lam).add_(x_flipped)
# random shuffle index
# idx_shuffle = torch.arange(batch_size_all).cuda()
# # broadcast tensor to all gpus
# torch.distributed.broadcast(idx_shuffle, src=0)
# # shuffled index for this gpu
# gpu_idx = torch.distributed.get_rank()
# idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
# x[torch.arange(batch_size_this)] = x_gather[idx_this]
return 0.9
| 5,200 | 35.626761 | 127 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/utils/__init__.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
from .print_function import print_func
from .build import *
from .evaluate_metric import EvaluateMetric
from .utils import *
from .mixup import Mixup
| 214 | 20.5 | 54 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/utils/build.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
import math
import torch.nn.functional as F
# from .utils import cosine_scheduler
import matplotlib.pyplot as plt
import numpy as np
class LabelSmoothingCrossEntropy(torch.nn.Module):
def __init__(self, smoothing: float = 0.1,
reduction="mean", weight=None):
super(LabelSmoothingCrossEntropy, self).__init__()
self.smoothing = smoothing
self.reduction = reduction
self.weight = weight
def reduce_loss(self, loss):
return loss.mean() if self.reduction == 'mean' else loss.sum() \
if self.reduction == 'sum' else loss
def linear_combination(self, x, y):
return self.smoothing * x + (1 - self.smoothing) * y
def forward(self, preds, target):
assert 0 <= self.smoothing < 1
if self.weight is not None:
self.weight = self.weight.to(preds.device)
n = preds.size(-1)
log_preds = F.log_softmax(preds, dim=-1)
loss = self.reduce_loss(-log_preds.sum(dim=-1))
nll = F.nll_loss(
log_preds, target, reduction=self.reduction, weight=self.weight
)
return self.linear_combination(loss / n, nll)
def build_optim(args, model):
if args.optim == 'SGD':
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
elif args.optim == 'Adam':
optimizer = torch.optim.Adam(
model.parameters(),
lr=args.learning_rate
)
elif args.optim == 'AdamW':
optimizer = torch.optim.AdamW(
model.parameters(),
lr=args.learning_rate
)
return optimizer
#
def build_scheduler(args, optimizer):
if args.scheduler['name'] == 'cosin':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs-args.scheduler['warm_up_epochs']), eta_min=args.learning_rate_min)
elif args.scheduler['name'] == 'ReduceLR':
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1,
patience=args.scheduler['patience'], verbose=True,
threshold=0.0001,
threshold_mode='rel', cooldown=3, min_lr=0.00001,
eps=1e-08)
else:
raise NameError('build scheduler error!')
if args.scheduler['warm_up_epochs'] > 0:
warmup_schedule = lambda epoch: np.linspace(1e-8, args.learning_rate, args.scheduler['warm_up_epochs'])[epoch]
return (scheduler, warmup_schedule)
return (scheduler,)
def build_loss(args):
loss_Function=dict(
CE_smooth = LabelSmoothingCrossEntropy(),
CE = torch.nn.CrossEntropyLoss(),
MSE = torch.nn.MSELoss(),
BCE = torch.nn.BCELoss(),
SoftCE = SoftTargetCrossEntropy(),
TempLoss = TempoLoss(),
)
if args.loss['name'] == 'CE' and args.loss['labelsmooth']:
return loss_Function['CE_smooth']
return loss_Function[args.loss['name']]
class SoftTargetCrossEntropy(torch.nn.Module):
def __init__(self, args=None):
self.args = args
super(SoftTargetCrossEntropy, self).__init__()
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
# for ii, t in enumerate(target):
# v, l = torch.topk(t, k=2, dim=-1)
# for i in l:
# if i in [0,1,3,8,15,16,17,18]:
# target[ii, i] *= 1.5
loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1)
return loss.mean()
class TempoLoss(torch.nn.Module):
def __init__(self):
super(TempoLoss, self).__init__()
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
'''
x: troch.size([b, t, l])
'''
loss = 0.0
for i in range(x.size(1)):
inp, tar = x[:, i, :], target[:, i, :]
loss += torch.sum(-tar * F.log_softmax(inp, dim=-1), dim=-1).mean()
return loss / x.size(1)
class RCM_loss(torch.nn.Module):
def __init__(self, args, model: torch.nn.Module):
super(RCM_loss, self).__init__()
self.args = args
def forward(self, x):
temp_out, target_out = x
distill_loss = torch.tensor(0.0).cuda()
for i, temp_w in enumerate(temp_out):
# target_weight = self.dtn.multi_scale_transformers[i+3].transformer_enc_media.layers[-1][0].fn.scores
# target_weight = target_weight.mean(1).mean(1)[:, 1:]
# target_weight = target_weight.mean(1)[:, 0, 1:]
# target_weight = self.dtn.multi_scale_transformers[i].class_embedding
# target_weight = self.dtn.multi_scale_transformers[1][2].layers[i][0].fn.scores
# # # target_weight = target_weight.mean(1).mean(1)
# target_weight = target_weight.mean(1).mean(1)[:, 1:]
target_weight = torch.zeros_like(target_out[0][0])
for j in range(len(target_out)):
target_weight += target_out[j][-(len(temp_out)-i)]
T = self.args.temper
# distill_loss += F.kl_div(F.log_softmax(temp_w / T, dim=-1),
# F.log_softmax(target_weight.detach() / T, dim=-1),
# reduction='sum')
# # distill_loss += self.MSE(temp_w, F.softmax(target_weight.detach(), dim=-1))
target_weight = torch.softmax(target_weight / T, dim=-1)
distill_loss += torch.sum(-target_weight * F.log_softmax(temp_w / T, dim=-1), dim=-1).mean()
return distill_loss/len(temp_out)
| 5,901 | 37.575163 | 118 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/data/data_preprose_for_NTU.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
NTU-RGBD Data preprocessing function
'''
import cv2
from PIL import Image
import numpy as np
import os, glob, re
import argparse
import csv
import random
from tqdm import tqdm
from multiprocessing import Process
import shutil
from multiprocessing import Pool, cpu_count
def resize_pos(center,src_size,tar_size):
x, y = center
w1=src_size[1]
h1=src_size[0]
w=tar_size[1]
h=tar_size[0]
y1 = int((h / h1) * y)
x1 = int((w / w1) * x)
return (x1, y1)
def video2image_with_mask(v_p):
m_path='nturgb+d_depth_masked/'
img_path = os.path.join('NTU-RGBD-images', v_p[:-4].split('/')[-1])
if not os.path.exists(img_path):
os.makedirs(img_path)
cap = cv2.VideoCapture(v_p)
suc, frame = cap.read()
frame_count = 1
while suc:
# frame resolution: [1920, 1080]
mask_path = os.path.join(m_path, v_p[:-8].split('/')[-1], 'MDepth-%08d.png'%frame_count)
assert os.path.isfile(mask_path), FileNotFoundError('[error], file not found.')
mask = cv2.imread(mask_path)
mask = mask*255
w, h, c = mask.shape
h2, w2, _ = frame.shape
ori = frame
frame = cv2.resize(frame, (h, w))
h1, w1, _ = frame.shape
# image = cv2.add(frame, mask)
# find contour
mask = cv2.erode(mask, np.ones((3, 3),np.uint8))
mask = cv2.dilate(mask ,np.ones((10, 10),np.uint8))
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Find Max Maxtri
Idx = []
for i in range(len(contours)):
Area = cv2.contourArea(contours[i])
if Area > 500:
Idx.append(i)
# max_idx = np.argmax(area)
centers = []
for i in Idx:
rect = cv2.minAreaRect(contours[i])
center, (h, w), degree = rect
centers.append(center)
finall_center = np.int0(np.array(centers))
c_x = min(finall_center[:, 0])
c_y = min(finall_center[:, 1])
center = (c_x, c_y)
# finall_center = finall_center.sum(0)/len(finall_center)
# rect = cv2.minAreaRect(contours[max_idx])
# center, (h, w), degree = rect
# center = tuple(np.int0(finall_center))
center_new = resize_pos(center, (h1, w1), (h2, w2))
#-----------------------------------
# Image Crop
#-----------------------------------
# ori = cv2.circle(ori, center_new, 2, (0, 0, 255), 2)
crop_y, crop_x = h2//2, w2//2
# print(crop_x, crop_y)
left = center_new[0] - crop_x//2 if center_new[0] - crop_x//2 > 0 else 0
top = center_new[1] - crop_y//2 if center_new[1] - crop_y//2 > 0 else 0
# ori = cv2.circle(ori, (left, top), 2, (0, 0, 255), 2)
# cv2.imwrite('demo/ori.png', ori)
crop_w = left + crop_x if left + crop_x < w2 else w2
crop_h = top + crop_y if top + crop_y < h2 else h2
rect = (left, top, crop_w, crop_h)
image = Image.fromarray(cv2.cvtColor(ori, cv2.COLOR_BGR2RGB))
image = image.crop(rect)
image.save('{}/{:0>6d}.jpg'.format(img_path, frame_count))
# box = cv2.boxPoints(rect)
# box = np.int0(box)
# drawImage = frame.copy()
# drawImage = cv2.drawContours(drawImage, [box], 0, (255, 0, 0), -1) # draw one contour
# cv2.imwrite('demo/drawImage.png', drawImage)
# frame = cv2.circle(frame, center, 2, (0, 255, 255), 2)
# cv2.imwrite('demo/Image.png', frame)
# cv2.imwrite('demo/mask.png', mask)
# ori = cv2.circle(ori, center_new, 2, (0, 0, 255), 2)
# cv2.imwrite('demo/ORI.png', ori)
# cv2.imwrite('demo/maskImage.png', image)
# cv2.imwrite('{}/{:0>6d}.jpg'.format(img_path, frame_count), frame)
frame_count += 1
suc, frame = cap.read()
cap.release()
def video2image(v_p):
img_path = v_p[:-4].replace('NTU-RGBD', 'NTU-RGBD-images')
os.makedirs(img_path, exist_ok=True)
cap = cv2.VideoCapture(v_p)
suc, frame = cap.read()
frame_count = 0
while suc:
h, w, c = frame.shape
cv2.imwrite('{}/{:0>6d}.jpg'.format(img_path, frame_count), frame)
frame_count += 1
suc, frame = cap.read()
cap.release()
def GeneratLabel(v_p):
path = v_p[:-4].split('/')[-1]
cap = cv2.VideoCapture(v_p)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
label = int(v_p.split('A')[-1][:3])-1
txt = ' '.join(map(str, [path, frame_count, label, '\n']))
if args.proto == '@CV':
if 'C001' in v_p:
with open(args.validTXT, 'a') as vf:
vf.writelines(txt)
else:
with open(args.trainTXT, 'a') as tf:
tf.writelines(txt)
elif args.proto == '@CS':
pattern = re.findall(r'P\d+', v_p)
if int(pattern[0][1:]) in [1, 2, 4, 5, 8, 9, 13, 14, 15,16, 17, 18, 19, 25, 27, 28, 31, 34, 35, 38]:
with open(args.trainTXT, 'a') as tf:
tf.writelines(txt)
else:
with open(args.validTXT, 'a') as vf:
vf.writelines(txt)
def ResizeImage(v_p):
img_path = v_p[:-4].replace('NTU-RGBD', 'NTU-RGBD-images')
save_path = img_path.replace('NTU-RGBD-images', 'NTU-RGBD-resized-images')
os.makedirs(save_path, exist_ok=True)
for img in os.listdir(img_path):
im_path = os.path.join(img_path, img)
image = cv2.imread(im_path)
image = cv2.resize(image, (320, 240))
cv2.imwrite(os.path.join(save_path, img), image)
#---------------------------------------------
# Generate label .txt
#---------------------------------------------
def generate_label(Video_paths, test_first=True):
trainTXT = os.path.join(args.data_root, 'dataset_splits', args.proto, 'train.txt')
validTXT = os.path.join(args.data_root, 'dataset_splits', args.proto, 'valid.txt')
args.trainTXT = trainTXT
args.validTXT = validTXT
if os.path.isfile(args.trainTXT):
os.system('rm {}'.format(args.trainTXT))
if os.path.isfile(args.validTXT):
os.system('rm {}'.format(args.validTXT))
if test_first:
print(f'Trying to create label for NTU-RGBD.')
GeneratLabel(Video_paths[0])
with Pool(20) as pool:
for a in tqdm(pool.imap_unordered(GeneratLabel, Video_paths), total=len(Video_paths), desc='Processes'):
if a is not None:
pass
print('Write file list done'.center(80, '*'))
#---------------------------------------------
# video --> Images
#---------------------------------------------
def video_decompre(Video_paths, test_first=False, with_mask=False):
if test_first:
print(f'Trying to decompress video {Video_paths[0]}')
if with_mask:
video2image_with_mask(Video_paths[0])
decompre_func = video2image_with_mask
else:
video2image(Video_paths[0])
decompre_func = video2image
with Pool(20) as pool:
for a in tqdm(pool.imap_unordered(decompre_func, Video_paths), total=len(Video_paths), desc='Processes'):
if a is not None:
pass
print('Decompress video done'.center(80, '*'))
#---------------------------------------------
# Images size to (320, 240)
#---------------------------------------------
def image_resize(Video_paths, test_first=False):
if test_first:
print(f'Trying to resize video {Video_paths[0]}')
ResizeImage(Video_paths[0])
with Pool(40) as pool:
for a in tqdm(pool.imap_unordered(ResizeImage, Video_paths), total=len(Video_paths), desc='Processes'):
if a is not None:
pass
print('Resize image done'.center(80, '*'))
parser = argparse.ArgumentParser()
parser.add_argument('--proto', default='@CS') # protocol: @CS or @CV
parser.add_argument('--data-root', default='/mnt/workspace/Dataset/NTU-RGBD') # protocol: @CS or @CV
args = parser.parse_args()
Video_paths = glob.glob(os.path.join(data_root, 'nturgb+d_rgb/*.avi'))
assert len(Video_paths) > 0, FileNotFoundError('[error] The file path may be incorrect.')
print('Total videos: {}'.format(len(Video_paths)))
mask_paths = glob.glob(os.path.join(data_root, 'nturgb+d_depth_masked/*'))
assert len(mask_paths) > 0, FileNotFoundError('[error] The file path may be incorrect.')
print('Total Masks: {}'.format(len(mask_paths)))
# video compression
video_decompre(Video_paths, test_first=True, with_mask=True)
# Image resize
image_resize(Video_paths, test_first=True)
# create label file
generate_label(Video_paths, test_first=True)
| 8,756 | 34.597561 | 117 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/data/video2Images.py | import cv2
from PIL import Image
import numpy as np
import os, glob, re, sys
import argparse
import csv
import random
from tqdm import tqdm
from multiprocessing import Process
import shutil
from multiprocessing import Pool, cpu_count
def video2image(v_p):
img_path = os.path.join(args.img_path, v_p.split('/')[-1][:-4])
if not os.path.exists(img_path):
os.makedirs(img_path)
cap = cv2.VideoCapture(v_p)
suc, frame = cap.read()
frame_count = 0
while suc:
h, w, c = frame.shape
cv2.imwrite('{}/{:0>6d}.jpg'.format(img_path, frame_count), frame)
frame_count += 1
suc, frame = cap.read()
cap.release()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--video-path', '-vp', default='')
parser.add_argument('--save-path', '-sp', default='')
parser.add_argument('--pool-num', '-p', default=1, type=int, help='Number of used multiple processes.')
args = parser.parse_args()
v_path = os.path.join(args.video_path, '*/*.mp4')
videos = glob.glob(v_path)
assert len(videos) >= 1, f'Please Check Video Path ! There are any videos in the dir {v_path}.'
print('Total Videos: {}'.format(len(videos)))
args.img_path = os.path.join(args.save_path, 'Images')
str = input(f'The video will be decompressed to: {args.img_path}, [y/n]')
if str == 'n':
sys.exit(0)
with Pool(args.pool_num) as pool:
for a in tqdm(pool.imap_unordered(video2image, videos), total=len(videos), desc='Processes'):
if a is not None:
pass
print('Decompressing done'.center(80, '*'))
# class UnsupportedFormat(Exception):
# def __init__(self, input_type):
# self.t = input_type
# def __str__(self):
# return "不支持'{}'模式的转换,请使用为图片地址(path)、PIL.Image(pil)或OpenCV(cv2)模式".format(self.t)
# class MatteMatting():
# def __init__(self, original_graph, mask_graph, input_type='cv2'):
# """
# 将输入的图片经过蒙版转化为透明图构造函数
# :param original_graph:输入的图片地址、PIL格式、CV2格式
# :param mask_graph:蒙版的图片地址、PIL格式、CV2格式
# :param input_type:输入的类型,有path:图片地址、pil:pil类型、cv2类型
# """
# if input_type == 'path':
# self.img1 = cv2.imread(original_graph)
# self.img2 = cv2.imread(mask_graph)
# elif input_type == 'pil':
# self.img1 = self.__image_to_opencv(original_graph)
# self.img2 = self.__image_to_opencv(mask_graph)
# elif input_type == 'cv2':
# self.img1 = original_graph
# self.img2 = mask_graph
# else:
# raise UnsupportedFormat(input_type)
# @staticmethod
# def __transparent_back(img):
# """
# :param img: 传入图片地址
# :return: 返回替换白色后的透明图
# """
# img = img.convert('RGBA')
# L, H = img.size
# color_0 = (255, 255, 255, 255) # 要替换的颜色
# for h in range(H):
# for l in range(L):
# dot = (l, h)
# color_1 = img.getpixel(dot)
# if color_1 == color_0:
# color_1 = color_1[:-1] + (0,)
# img.putpixel(dot, color_1)
# return img
# def save_image(self, path, mask_flip=False):
# """
# 用于保存透明图
# :param path: 保存位置
# :param mask_flip: 蒙版翻转,将蒙版的黑白颜色翻转;True翻转;False不使用翻转
# """
# if mask_flip:
# img2 = cv2.bitwise_not(self.img2) # 黑白翻转
# image = cv2.add(self.img1, img2)
# image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) # OpenCV转换成PIL.Image格式
# img = self.__transparent_back(image)
# img.save(path)
# @staticmethod
# def __image_to_opencv(image):
# """
# PIL.Image转换成OpenCV格式
# """
# img = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
# return img
# data_root = '/mnt/workspace/Dataset/UCF-101/'
# label_dict = dict([(lambda x: (x[1], int(x[0])-1))(l.strip().split(' ')) for l in open(data_root + 'dataset_splits/lableind.txt').readlines()])
# print(label_dict)
# def split_func(file_list):
# class_list = []
# fl = open(file_list).readlines()
# for d in tqdm(fl):
# path = d.strip().split()[0][:-4]
# label = label_dict[path.split('/')[0]]
# frame_num = len(os.listdir(os.path.join(data_root, 'UCF-101-images', path)))
# class_list.append([path, str(frame_num), str(label), '\n'])
# return class_list
# def save_list(file_list, file_name):
# with open(file_name, 'w') as f:
# class_list = split_func(file_list)
# for l in class_list:
# f.writelines(' '.join(l))
# prot = '@3'
# data_train_split = data_root + f'dataset_splits/{prot}/trainlist.txt'
# data_test_split = data_root + f'dataset_splits/{prot}/testlist.txt'
# train_file_name = data_root + f'dataset_splits/{prot}/train.txt'
# test_file_name = data_root + f'dataset_splits/{prot}/valid.txt'
# save_list(data_train_split, train_file_name)
# save_list(data_test_split, test_file_name) | 5,096 | 33.439189 | 145 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/__init__.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
from .datasets import *
from .model import * | 107 | 20.6 | 54 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/base.py | '''
This file is modified from:
https://github.com/zhoubenjia/RAAR3DNet/blob/master/Network_Train/lib/datasets/base.py
'''
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, set_image_backend
import torch.nn.functional as F
from PIL import Image
from PIL import ImageFilter, ImageOps
import os, glob
import math, random
import numpy as np
import logging
from tqdm import tqdm as tqdm
import pandas as pd
from multiprocessing import Pool, cpu_count
import multiprocessing as mp
import cv2
import json
from scipy.ndimage.filters import gaussian_filter
from timm.data.random_erasing import RandomErasing
# from vidaug import augmentors as va
from .augmentation import *
# import functools
import matplotlib.pyplot as plt # For graphics
from torchvision.utils import save_image, make_grid
np.random.seed(123)
class Normaliztion(object):
"""
same as mxnet, normalize into [-1, 1]
image = (image - 127.5)/128
"""
def __call__(self, Image):
new_video_x = (Image - 127.5) / 128
return new_video_x
class Datasets(Dataset):
global kpt_dict
def __init__(self, args, ground_truth, modality, phase='train'):
self.dataset_root = args.data
self.sample_duration = args.sample_duration
self.sample_size = args.sample_size
self.phase = phase
self.typ = modality
self.args = args
self._w = args.w
if phase == 'train':
self.transform = transforms.Compose([
Normaliztion(),
transforms.ToTensor(),
RandomErasing(args.reprob, mode=args.remode, max_count=args.recount, num_splits=0, device='cpu')
])
else:
self.transform = transforms.Compose([Normaliztion(), transforms.ToTensor()])
self.inputs, self.video_apth = self.prepropose(ground_truth)
def prepropose(self, ground_truth, min_frames=16):
def get_data_list_and_label(data_df):
return [(lambda arr: (arr[0], int(arr[1]), int(arr[2])))(i[:-1].split(' '))
for i in open(data_df).readlines()]
self.inputs = list(filter(lambda x: x[1] > min_frames, get_data_list_and_label(ground_truth)))
self.inputs = list(self.inputs)
self.batch_check()
self.video_apth = dict([(self.inputs[i][0], i) for i in range(len(self.inputs))])
return self.inputs, self.video_apth
def batch_check(self):
if self.phase == 'train':
while len(self.inputs) % (self.args.batch_size * self.args.nprocs) != 0:
sample = random.choice(self.inputs)
self.inputs.append(sample)
else:
while len(self.inputs) % (self.args.test_batch_size * self.args.nprocs) != 0:
sample = random.choice(self.inputs)
self.inputs.append(sample)
def __str__(self):
if self.phase == 'train':
frames = [n[1] for n in self.inputs]
return 'Training Data Size is: {} \n'.format(len(self.inputs)) + 'Average Train Data frames are: {}, max frames: {}, min frames: {}\n'.format(sum(frames)//len(self.inputs), max(frames), min(frames))
else:
frames = [n[1] for n in self.inputs]
return 'Validation Data Size is: {} \n'.format(len(self.inputs)) + 'Average validation Data frames are: {}, max frames: {}, min frames: {}\n'.format(
sum(frames) // len(self.inputs), max(frames), min(frames))
def transform_params(self, resize=(320, 240), crop_size=224, flip=0.5):
if self.phase == 'train':
left, top = np.random.randint(0, resize[0] - crop_size), np.random.randint(0, resize[1] - crop_size)
is_flip = True if np.random.uniform(0, 1) < flip else False
else:
left, top = (resize[0] - crop_size) // 2, (resize[1] - crop_size) // 2
is_flip = False
return (left, top, left + crop_size, top + crop_size), is_flip
def rotate(self, image, angle, center=None, scale=1.0):
(h, w) = image.shape[:2]
if center is None:
center = (w / 2, h / 2)
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
return rotated
def get_path(self, imgs_path, a):
return os.path.join(imgs_path, "%06d.jpg" % a)
def depthProposess(self, img):
h2, w2 = img.shape
mask = img.copy()
mask = cv2.erode(mask, np.ones((3, 3), np.uint8))
mask = cv2.dilate(mask, np.ones((10, 10), np.uint8))
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Find Max Maxtri
Idx = []
for i in range(len(contours)):
Area = cv2.contourArea(contours[i])
if Area > 500:
Idx.append(i)
centers = []
for i in Idx:
rect = cv2.minAreaRect(contours[i])
center, (h, w), degree = rect
centers.append(center)
finall_center = np.int0(np.array(centers))
c_x = min(finall_center[:, 0])
c_y = min(finall_center[:, 1])
center = (c_x, c_y)
crop_x, crop_y = 320, 240
left = center[0] - crop_x // 2 if center[0] - crop_x // 2 > 0 else 0
top = center[1] - crop_y // 2 if center[1] - crop_y // 2 > 0 else 0
crop_w = left + crop_x if left + crop_x < w2 else w2
crop_h = top + crop_y if top + crop_y < h2 else h2
rect = (left, top, crop_w, crop_h)
image = Image.fromarray(img)
image = image.crop(rect)
return image
def image_propose(self, data_path, sl):
sample_size = self.sample_size
resize = eval(self.args.resize)
crop_rect, is_flip = self.transform_params(resize=resize, crop_size=self.args.crop_size, flip=self.args.flip)
if np.random.uniform(0, 1) < self.args.rotated and self.phase == 'train':
r, l = eval(self.args.angle)
rotated = np.random.randint(r, l)
else:
rotated = 0
sometimes = lambda aug: Sometimes(0.5, aug) # Used to apply augmentor with 50% probability
self.seq_aug = Sequential([
RandomResize(self.args.resize_rate),
RandomCrop(resize),
# RandomTranslate(self.args.translate, self.args.translate),
# sometimes(Salt()),
# sometimes(GaussianBlur()),
])
def transform(img):
img = np.asarray(img)
if img.shape[-1] != 3:
img = np.uint8(255 * img)
img = self.depthProposess(img)
img = cv2.applyColorMap(np.asarray(img), cv2.COLORMAP_JET)
img = self.rotate(np.asarray(img), rotated)
img = Image.fromarray(img)
if self.phase == 'train' and self.args.strong_aug:
img = self.seq_aug(img)
img = img.resize(resize)
img = img.crop(crop_rect)
if is_flip:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
return np.array(img.resize((sample_size, sample_size)))
def Sample_Image(imgs_path, sl):
frams = []
for a in sl:
ori_image = Image.open(self.get_path(imgs_path, a))
img = transform(ori_image)
frams.append(self.transform(img).view(3, sample_size, sample_size, 1))
if self.args.frp:
skgmaparr = DynamicImage(frams, dynamic_only=False) #[t, c, h, w]
else:
skgmaparr = torch.ones(*img.shape, 1)
return torch.cat(frams, dim=3).type(torch.FloatTensor), skgmaparr
def DynamicImage(frames, dynamic_only): # frames: [[3, 224, 224, 1], ]
def tensor_arr_rp(arr):
l = len(arr)
statics = []
def tensor_rankpooling(video_arr, lamb=1.):
def get_w(N):
return [float(i) * 2 - N - 1 for i in range(1, N + 1)]
re = torch.zeros(*video_arr[0].size()[:-1])
for a, b in zip(video_arr, get_w(len(video_arr))):
re += a.squeeze() * b
re = (re - re.min()) / (re.max() - re.min())
re = np.uint8(255 * np.float32(re.numpy())).transpose(1,2,0)
re = self.transform(np.array(re))
return re.unsqueeze(-1)
return [tensor_rankpooling(arr[i:i + self._w]) for i in range(l)]
arrrp = tensor_arr_rp(frames)
arrrp = torch.cat(arrrp[:-1], dim=-1).type(torch.FloatTensor)
return arrrp
return Sample_Image(data_path, sl)
def get_sl(self, clip):
sn = self.sample_duration if not self.args.frp else self.sample_duration+1
if self.phase == 'train':
f = lambda n: [(lambda n, arr: n if arr == [] else np.random.choice(arr))(n * i / sn,
range(int(n * i / sn),
max(int(n * i / sn) + 1,
int(n * (
i + 1) / sn))))
for i in range(sn)]
else:
f = lambda n: [(lambda n, arr: n if arr == [] else int(np.mean(arr)))(n * i / sn, range(int(n * i / sn),
max(int(
n * i / sn) + 1,
int(n * (
i + 1) / sn))))
for i in range(sn)]
sample_clips = f(int(clip)-self.args.sample_window)
start = random.sample(range(0, self.args.sample_window), 1)[0]
if self.phase == 'train':
return [l + start for l in sample_clips]
else:
return f(int(clip))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
sl = self.get_sl(self.inputs[index][1])
self.data_path = os.path.join(self.dataset_root, self.inputs[index][0])
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
return self.clip.permute(0, 3, 1, 2), skgmaparr.permute(0, 3, 1, 2), self.inputs[index][2], self.inputs[index][0]
def __len__(self):
return len(self.inputs)
if __name__ == '__main__':
import argparse
from config import Config
from lib import *
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='', help='Place config Congfile!')
parser.add_argument('--eval_only', action='store_true', help='Eval only. True or False?')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--nprocs', type=int, default=1)
parser.add_argument('--save_grid_image', action='store_true', help='Save samples?')
parser.add_argument('--save_output', action='store_true', help='Save logits?')
parser.add_argument('--demo_dir', type=str, default='./demo', help='The dir for save all the demo')
parser.add_argument('--drop_path_prob', type=float, default=0.5, help='drop path probability')
parser.add_argument('--save', type=str, default='Checkpoints/', help='experiment name')
parser.add_argument('--seed', type=int, default=123, help='random seed')
args = parser.parse_args()
args = Config(args)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
args.dist = False
args.eval_only = True
args.test_batch_size = 1
valid_queue, valid_sampler = build_dataset(args, phase='val')
for step, (inputs, heatmap, target, _) in enumerate(valid_queue):
print(inputs.shape)
input() | 12,341 | 41.412371 | 210 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/Jester.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from .base import Datasets
from torchvision import transforms, set_image_backend
import random, os
from PIL import Image
import numpy as np
import logging
# import accimage
# set_image_backend('accimage')
np.random.seed(123)
class JesterData(Datasets):
def __init__(self, args, ground_truth, modality, phase='train'):
super(JesterData, self).__init__(args, ground_truth, modality, phase)
def LoadKeypoints(self):
if self.phase == 'train':
kpt_file = os.path.join(self.dataset_root, self.args.splits, 'train_kp.data')
else:
kpt_file = os.path.join(self.dataset_root, self.args.splits, 'valid_kp.data')
with open(kpt_file, 'r') as f:
kpt_data = [(lambda arr: (os.path.join(self.dataset_root, self.typ, self.phase, arr[0]), list(map(lambda x: int(float(x)), arr[1:]))))(l[:-1].split()) for l in f.readlines()]
kpt_data = dict(kpt_data)
for k, v in kpt_data.items():
pose = v[:18*2]
r_hand = v[18*2: 18*2+21*2]
l_hand = v[18*2+21*2: 18*2+21*2+21*2]
kpt_data[k] = {'people': [{'pose_keypoints_2d': pose, 'hand_right_keypoints_2d': r_hand, 'hand_left_keypoints_2d': l_hand}]}
logging.info('Load Keypoints files Done, Total: {}'.format(len(kpt_data)))
return kpt_data
def get_path(self, imgs_path, a):
return os.path.join(imgs_path, "%05d.jpg" % int(a + 1))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
sl = self.get_sl(self.inputs[index][1])
self.data_path = os.path.join(self.dataset_root, self.inputs[index][0])
# self.clip = self.image_propose(self.data_path, sl)
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
return self.clip.permute(0, 3, 1, 2), skgmaparr, self.inputs[index][2], self.data_path
def __len__(self):
return len(self.inputs)
| 2,115 | 37.472727 | 186 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/IsoGD.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from .base import Datasets
from torchvision import transforms, set_image_backend
import random, os
from PIL import Image
import numpy as np
# import accimage
# set_image_backend('accimage')
class IsoGDData(Datasets):
def __init__(self, args, ground_truth, modality, phase='train'):
super(IsoGDData, self).__init__(args, ground_truth, modality, phase)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
sl = self.get_sl(self.inputs[index][1])
self.data_path = os.path.join(self.dataset_root, self.typ, self.inputs[index][0])
if self.typ == 'depth':
self.data_path = self.data_path.replace('M_', 'K_')
if self.args.Network == 'FusionNet' or self.args.model_ema:
assert self.typ == 'rgb'
self.data_path1 = self.data_path.replace('rgb', 'depth')
self.data_path1 = self.data_path1.replace('M', 'K')
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
self.clip1, skgmaparr1 = self.image_propose(self.data_path1, sl)
# return (self.clip.permute(0, 3, 1, 2), skgmaparr), (self.clip1.permute(0, 3, 1, 2), skgmaparr1), self.inputs[index][2], self.inputs[index][0]
return (self.clip.permute(0, 3, 1, 2), self.clip1.permute(0, 3, 1, 2)), (skgmaparr, skgmaparr1), self.inputs[index][2], self.data_path
else:
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
return self.clip.permute(0, 3, 1, 2), skgmaparr.permute(0, 3, 1, 2), self.inputs[index][2], self.inputs[index][0]
def __len__(self):
return len(self.inputs)
| 1,844 | 40 | 155 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/augmentation.py | '''
This file is modified from:
https://github.com/okankop/vidaug/blob/master/vidaug/augmentors/affine.py
'''
import PIL
from PIL import ImageFilter, ImageOps, Image
import os, glob
import math, random
import numpy as np
import logging
import cv2
from scipy.ndimage.filters import gaussian_filter
import numbers
class Sequential(object):
"""
Composes several augmentations together.
Args:
transforms (list of "Augmentor" objects): The list of augmentations to compose.
random_order (bool): Whether to apply the augmentations in random order.
"""
def __init__(self, transforms, random_order=False):
self.transforms = transforms
self.rand = random_order
def __call__(self, clip):
if self.rand:
rand_transforms = self.transforms[:]
random.shuffle(rand_transforms)
for t in rand_transforms:
clip = t(clip)
else:
for t in self.transforms:
clip = t(clip)
return clip
class Sometimes(object):
"""
Applies an augmentation with a given probability.
Args:
p (float): The probability to apply the augmentation.
transform (an "Augmentor" object): The augmentation to apply.
Example: Use this this transform as follows:
sometimes = lambda aug: va.Sometimes(0.5, aug)
sometimes(va.HorizontalFlip)
"""
def __init__(self, p, transform):
self.transform = transform
if (p > 1.0) | (p < 0.0):
raise TypeError('Expected p to be in [0.0 <= 1.0], ' +
'but got p = {0}'.format(p))
else:
self.p = p
def __call__(self, clip):
if random.random() < self.p:
clip = self.transform(clip)
return clip
class RandomTranslate(object):
"""
Shifting video in X and Y coordinates.
Args:
x (int) : Translate in x direction, selected
randomly from [-x, +x] pixels.
y (int) : Translate in y direction, selected
randomly from [-y, +y] pixels.
"""
def __init__(self, x=0, y=0):
self.x = x
self.y = y
self.x_move = random.randint(-self.x, +self.x)
self.y_move = random.randint(-self.y, +self.y)
def __call__(self, clip):
x_move = self.x_move
y_move = self.y_move
if isinstance(clip, np.ndarray):
rows, cols, ch = clip.shape
transform_mat = np.float32([[1, 0, x_move], [0, 1, y_move]])
return cv2.warpAffine(clip, transform_mat, (cols, rows))
elif isinstance(clip, Image.Image):
return clip.transform(clip.size, Image.AFFINE, (1, 0, x_move, 0, 1, y_move))
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip)))
class RandomResize(object):
"""
Resize video bysoomingin and out.
Args:
rate (float): Video is scaled uniformly between
[1 - rate, 1 + rate].
interp (string): Interpolation to use for re-sizing
('nearest', 'lanczos', 'bilinear', 'bicubic' or 'cubic').
"""
def __init__(self, rate=0.0, interp='bilinear'):
self.rate = rate
self.interpolation = interp
self.scaling_factor = random.uniform(1 - self.rate, 1 + self.rate)
def __call__(self, clip):
if isinstance(clip, np.ndarray):
im_h, im_w, im_c = clip.shape
elif isinstance(clip, PIL.Image.Image):
im_w, im_h = clip.size
else:
raise TypeError(f'Unknow image type {type(clip)}')
new_w = int(im_w * self.scaling_factor)
new_h = int(im_h * self.scaling_factor)
new_size = (new_h, new_w)
if isinstance(clip, np.ndarray):
return scipy.misc.imresize(clip, size=(new_h, new_w),interp=self.interpolation)
elif isinstance(clip, PIL.Image.Image):
return clip.resize(size=(new_w, new_h), resample=self._get_PIL_interp(self.interpolation))
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip)))
def _get_PIL_interp(self, interp):
if interp == 'nearest':
return PIL.Image.NEAREST
elif interp == 'lanczos':
return PIL.Image.LANCZOS
elif interp == 'bilinear':
return PIL.Image.BILINEAR
elif interp == 'bicubic':
return PIL.Image.BICUBIC
elif interp == 'cubic':
return PIL.Image.CUBIC
class RandomShear(object):
"""
Shearing video in X and Y directions.
Args:
x (int) : Shear in x direction, selected randomly from
[-x, +x].
y (int) : Shear in y direction, selected randomly from
[-y, +y].
"""
def __init__(self, x, y):
self.x = x
self.y = y
self.x_shear = random.uniform(-self.x, self.x)
self.y_shear = random.uniform(-self.y, self.y)
def __call__(self, clip):
x_shear, y_shear = self.x_shear, self.y_shear
if isinstance(clip, np.ndarray):
rows, cols, ch = clip.shape
transform_mat = np.float32([[1, x_shear, 0], [y_shear, 1, 0]])
return cv2.warpAffine(clip, transform_mat, (cols, rows))
elif isinstance(clip, PIL.Image.Image):
return clip.transform(img.size, PIL.Image.AFFINE, (1, x_shear, 0, y_shear, 1, 0))
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip)))
class GaussianBlur(object):
"""
Apply Gaussian Blur to the PIL image.
"""
def __init__(self, p=0.5, radius_min=0.1, radius_max=2.):
self.prob = p
self.radius_min = radius_min
self.radius_max = radius_max
def __call__(self, img):
return img.filter(
ImageFilter.GaussianBlur(
radius=random.uniform(self.radius_min, self.radius_max)
)
)
class Salt(object):
"""
Augmenter that sets a certain fraction of pixel intesities to 255, hence
they become white.
Args:
ratio (int): Determines number of white pixels on each frame of video.
Smaller the ratio, higher the number of white pixels.
"""
def __init__(self, ratio=100):
self.ratio = ratio
self.flag = True
self.noise = None
def __call__(self, clip):
is_PIL = isinstance(clip, PIL.Image.Image)
if is_PIL:
clip = np.asarray(clip)
# if self.flag:
# img = clip.astype(np.float)
# img_shape = img.shape
# self.noise = np.random.randint(self.ratio, size=img_shape)
# img = np.where(self.noise == 0, 255, img)
# clip = img.astype(np.uint8)
# self.flag = False
img = clip.astype(np.float)
img_shape = img.shape
self.noise = np.random.randint(self.ratio, size=img_shape)
img = np.where(self.noise == 0, 255, img)
clip = img.astype(np.uint8)
if is_PIL:
return PIL.Image.fromarray(clip)
else:
return clip
class RandomCrop(object):
"""
Extract random crop of the video.
Args:
size (sequence or int): Desired output size for the crop in format (h, w).
crop_position (str): Selected corner (or center) position from the
list ['c', 'tl', 'tr', 'bl', 'br']. If it is non, crop position is
selected randomly at each call.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
if size < 0:
raise ValueError('If size is a single number, it must be positive')
size = (size, size)
else:
if len(size) != 2:
raise ValueError('If size is a sequence, it must be of len 2.')
self.size = size
self.flag = True
self.w1, self.h1 = None, None
self.crop_w, self.crop_h = None, None
def __call__(self, clip):
if self.flag:
crop_w, crop_h = self.size
self.crop_w, self.crop_h = crop_w, crop_h
if isinstance(clip, np.ndarray):
im_h, im_w, im_c = clip.shape
elif isinstance(clip, PIL.Image.Image):
im_w, im_h = clip.size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip)))
if crop_w > im_w:
crop_w = im_w
if crop_h > im_h:
crop_h = im_h
# if crop_w > im_w or crop_h > im_h:
# error_msg = ('Initial image size should be larger then' +
# 'cropped size but got cropped sizes : ' +
# '({w}, {h}) while initial image is ({im_w}, ' +
# '{im_h})'.format(im_w=im_w, im_h=im_h, w=crop_w,
# h=crop_h))
# raise ValueError(error_msg)
self.w1 = random.randint(0, im_w - crop_w)
self.h1 = random.randint(0, im_h - crop_h)
self.flag = False
w1, h1 = self.w1, self.h1
crop_w, crop_h = self.crop_w, self.crop_h
if isinstance(clip, np.ndarray):
return clip[h1:h1 + crop_h, w1:w1 + crop_w, :]
elif isinstance(clip, PIL.Image.Image):
return clip.crop((w1, h1, w1 + crop_w, h1 + crop_h))
| 9,621 | 34.116788 | 102 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/THU_READ.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from .base import Datasets
from torchvision import transforms, set_image_backend
import random, os
from PIL import Image
import numpy as np
import logging
import cv2
from einops import rearrange, repeat
from torchvision.utils import save_image, make_grid
np.random.seed(123)
class THUREAD(Datasets):
def __init__(self, args, ground_truth, modality, phase='train'):
super(THUREAD, self).__init__(args, ground_truth, modality, phase)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
sl = self.get_sl(self.inputs[index][1])
self.data_path = os.path.join(self.dataset_root, self.inputs[index][0])
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
if self.args.Network == 'FusionNet' or self.args.model_ema:
assert self.typ == 'rgb'
self.data_path1 = self.data_path.replace('RGB', 'Depth')
self.data_path1 = '/'.join(self.data_path1.split('/')[:-1]) + '/{}'.format(
self.data_path1.split('/')[-1].replace('Depth', 'D'))
self.clip1, skgmaparr1 = self.image_propose(self.data_path1, sl)
return (self.clip.permute(0, 3, 1, 2), self.clip1.permute(0, 3, 1, 2)), (skgmaparr, skgmaparr1), \
self.inputs[index][2], self.video_apth[self.inputs[index][0]]
return self.clip.permute(0, 3, 1, 2), skgmaparr, self.inputs[index][2], self.video_apth[self.inputs[index][0]]
def __len__(self):
return len(self.inputs)
| 1,708 | 33.18 | 118 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/transforms_factory.py | """ Transforms Factory
Factory methods for building image transforms for use with TIMM (PyTorch Image Models)
Hacked together by / Copyright 2019, Ross Wightman
"""
import math
import torch
from torchvision import transforms
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT
from .auto_augment import rand_augment_transform, augment_and_mix_transform, auto_augment_transform
from timm.data.transforms import str_to_interp_mode, str_to_pil_interp, RandomResizedCropAndInterpolation, ToNumpy
from timm.data.random_erasing import RandomErasing
def transforms_noaug_train(
img_size=224,
interpolation='bilinear',
use_prefetcher=False,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
):
if interpolation == 'random':
# random interpolation not supported with no-aug
interpolation = 'bilinear'
tfl = [
transforms.Resize(img_size, interpolation=str_to_interp_mode(interpolation)),
transforms.CenterCrop(img_size)
]
if use_prefetcher:
# prefetcher and collate will handle tensor conversion and norm
tfl += [ToNumpy()]
else:
tfl += [
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std))
]
return transforms.Compose(tfl)
def transforms_imagenet_train(
img_size=224,
scale=None,
ratio=None,
hflip=0.5,
vflip=0.,
color_jitter=0.4,
auto_augment=None,
interpolation='random',
use_prefetcher=False,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
re_prob=0.,
re_mode='const',
re_count=1,
re_num_splits=0,
separate=False,
):
"""
If separate==True, the transforms are returned as a tuple of 3 separate transforms
for use in a mixing dataset that passes
* all data through the first (primary) transform, called the 'clean' data
* a portion of the data through the secondary transform
* normalizes and converts the branches above with the third, final transform
"""
scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range
ratio = tuple(ratio or (3./4., 4./3.)) # default imagenet ratio range
primary_tfl = [
RandomResizedCropAndInterpolation(img_size, scale=scale, ratio=ratio, interpolation=interpolation)]
if hflip > 0.:
primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)]
if vflip > 0.:
primary_tfl += [transforms.RandomVerticalFlip(p=vflip)]
secondary_tfl = []
if auto_augment:
assert isinstance(auto_augment, str)
if isinstance(img_size, (tuple, list)):
img_size_min = min(img_size)
else:
img_size_min = img_size
aa_params = dict(
translate_const=int(img_size_min * 0.45),
img_mean=tuple([min(255, round(255 * x)) for x in mean]),
)
if interpolation and interpolation != 'random':
aa_params['interpolation'] = str_to_pil_interp(interpolation)
if auto_augment.startswith('rand'):
secondary_tfl += [rand_augment_transform(auto_augment, aa_params)]
elif auto_augment.startswith('augmix'):
aa_params['translate_pct'] = 0.3
secondary_tfl += [augment_and_mix_transform(auto_augment, aa_params)]
else:
secondary_tfl += [auto_augment_transform(auto_augment, aa_params)]
elif color_jitter is not None:
# color jitter is enabled when not using AA
if isinstance(color_jitter, (list, tuple)):
# color jitter should be a 3-tuple/list if spec brightness/contrast/saturation
# or 4 if also augmenting hue
assert len(color_jitter) in (3, 4)
else:
# if it's a scalar, duplicate for brightness, contrast, and saturation, no hue
color_jitter = (float(color_jitter),) * 3
secondary_tfl += [transforms.ColorJitter(*color_jitter)]
final_tfl = []
if use_prefetcher:
# prefetcher and collate will handle tensor conversion and norm
final_tfl += [ToNumpy()]
else:
final_tfl += [
transforms.ToTensor(),
# transforms.Normalize(
# mean=torch.tensor(mean),
# std=torch.tensor(std))
]
# if re_prob > 0.:
# final_tfl.append(
# RandomErasing(re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device='cpu'))
if separate:
return transforms.Compose(primary_tfl), transforms.Compose(secondary_tfl), transforms.Compose(final_tfl)
else:
# return transforms.Compose(primary_tfl + secondary_tfl + final_tfl)
a, b = secondary_tfl[0]
return transforms.Compose([b]+final_tfl)
class Normaliztion(object):
"""
same as mxnet, normalize into [-1, 1]
image = (image - 127.5)/128
"""
def __call__(self, Image):
new_video_x = (Image - 127.5) / 128
return new_video_x
def transforms_imagenet_eval(
img_size=224,
crop_pct=None,
interpolation='bilinear',
use_prefetcher=False,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD):
crop_pct = crop_pct or DEFAULT_CROP_PCT
if isinstance(img_size, (tuple, list)):
assert len(img_size) == 2
if img_size[-1] == img_size[-2]:
# fall-back to older behaviour so Resize scales to shortest edge if target is square
scale_size = int(math.floor(img_size[0] / crop_pct))
else:
scale_size = tuple([int(x / crop_pct) for x in img_size])
else:
scale_size = int(math.floor(img_size / crop_pct))
tfl = [
transforms.Resize(scale_size, interpolation=str_to_interp_mode(interpolation)),
transforms.CenterCrop(img_size),
]
if use_prefetcher:
# prefetcher and collate will handle tensor conversion and norm
tfl += [ToNumpy()]
else:
tfl += [
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std))
]
return transforms.Compose(tfl)
def create_transform(
input_size,
is_training=False,
use_prefetcher=False,
no_aug=False,
scale=None,
ratio=None,
hflip=0.5,
vflip=0.,
color_jitter=0.4,
auto_augment=None,
interpolation='bilinear',
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
re_prob=0.,
re_mode='const',
re_count=1,
re_num_splits=0,
crop_pct=None,
tf_preprocessing=False,
separate=False):
if isinstance(input_size, (tuple, list)):
img_size = input_size[-2:]
else:
img_size = input_size
if tf_preprocessing and use_prefetcher:
assert not separate, "Separate transforms not supported for TF preprocessing"
from timm.data.tf_preprocessing import TfPreprocessTransform
transform = TfPreprocessTransform(
is_training=is_training, size=img_size, interpolation=interpolation)
else:
if is_training and no_aug:
assert not separate, "Cannot perform split augmentation with no_aug"
transform = transforms_noaug_train(
img_size,
interpolation=interpolation,
use_prefetcher=use_prefetcher,
mean=mean,
std=std)
elif is_training:
transform = transforms_imagenet_train(
img_size,
scale=scale,
ratio=ratio,
hflip=hflip,
vflip=vflip,
color_jitter=color_jitter,
auto_augment=auto_augment,
interpolation=interpolation,
use_prefetcher=use_prefetcher,
mean=mean,
std=std,
re_prob=re_prob,
re_mode=re_mode,
re_count=re_count,
re_num_splits=re_num_splits,
separate=separate)
else:
assert not separate, "Separate transforms not supported for validation preprocessing"
transform = transforms_imagenet_eval(
img_size,
interpolation=interpolation,
use_prefetcher=use_prefetcher,
mean=mean,
std=std,
crop_pct=crop_pct)
return transform
| 8,665 | 34.08502 | 115 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/distributed_sampler.py | '''
This file is modified from:
https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/samplers/distributed_sampler.py
'''
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
| 1,240 | 33.472222 | 100 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/NvGesture.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from .base import Datasets
from torchvision import transforms, set_image_backend
import random, os
from PIL import Image
import numpy as np
import logging
# import accimage
# set_image_backend('accimage')
np.random.seed(123)
class NvData(Datasets):
def __init__(self, args, ground_truth, modality, phase='train'):
super(NvData, self).__init__(args, ground_truth, modality, phase)
def transform_params(self, resize=(320, 240), crop_size=224, flip=0.5):
if self.phase == 'train':
left, top = random.randint(10, resize[0] - crop_size), random.randint(10, resize[1] - crop_size)
is_flip = True if random.uniform(0, 1) < flip else False
else:
left, top = 32, 32
is_flip = False
return (left, top, left + crop_size, top + crop_size), is_flip
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
sl = self.get_sl(self.inputs[index][1])
self.data_path = os.path.join(self.dataset_root, self.typ, self.inputs[index][0])
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
if self.args.FusionNet or self.args.model_ema:
if self.typ == 'rgb':
self.data_path = self.data_path.replace('rgb', 'depth')
else:
self.data_path = self.data_path.replace('depth', 'rgb')
self.clip1, skgmaparr1 = self.image_propose(self.data_path, sl)
return (self.clip.permute(0, 3, 1, 2), self.clip1.permute(0, 3, 1, 2)), (skgmaparr, skgmaparr1), self.inputs[index][2], self.data_path
return self.clip.permute(0, 3, 1, 2), skgmaparr.permute(0, 3, 1, 2), self.inputs[index][2], self.data_path
def __len__(self):
return len(self.inputs)
| 1,963 | 36.769231 | 146 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/__init__.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
from .build import *
| 85 | 13.333333 | 54 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.