id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14,800
|
admin.py
|
osm-search_Nominatim/src/nominatim_db/tools/admin.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for database analysis and maintenance.
"""
from typing import Optional, Tuple, Any, cast
import logging
import psycopg
from psycopg.types.json import Json
from ..typing import DictCursorResult
from ..config import Configuration
from ..db.connection import connect, Cursor, register_hstore
from ..errors import UsageError
from ..tokenizer import factory as tokenizer_factory
from ..data.place_info import PlaceInfo
LOG = logging.getLogger()
def _get_place_info(cursor: Cursor, osm_id: Optional[str],
place_id: Optional[int]) -> DictCursorResult:
sql = """SELECT place_id, extra.*
FROM placex, LATERAL placex_indexing_prepare(placex) as extra
"""
values: Tuple[Any, ...]
if osm_id:
osm_type = osm_id[0].upper()
if osm_type not in 'NWR' or not osm_id[1:].isdigit():
LOG.fatal('OSM ID must be of form <N|W|R><id>. Got: %s', osm_id)
raise UsageError("OSM ID parameter badly formatted")
sql += ' WHERE placex.osm_type = %s AND placex.osm_id = %s'
values = (osm_type, int(osm_id[1:]))
elif place_id is not None:
sql += ' WHERE placex.place_id = %s'
values = (place_id, )
else:
LOG.fatal("No OSM object given to index.")
raise UsageError("OSM object not found")
cursor.execute(sql + ' LIMIT 1', values)
if cursor.rowcount < 1:
LOG.fatal("OSM object %s not found in database.", osm_id)
raise UsageError("OSM object not found")
return cast(DictCursorResult, cursor.fetchone())
def analyse_indexing(config: Configuration, osm_id: Optional[str] = None,
place_id: Optional[int] = None) -> None:
""" Analyse indexing of a single Nominatim object.
"""
with connect(config.get_libpq_dsn()) as conn:
register_hstore(conn)
with conn.cursor(row_factory=psycopg.rows.dict_row) as cur:
place = _get_place_info(cur, osm_id, place_id)
cur.execute("update placex set indexed_status = 2 where place_id = %s",
(place['place_id'], ))
cur.execute("""SET auto_explain.log_min_duration = '0';
SET auto_explain.log_analyze = 'true';
SET auto_explain.log_nested_statements = 'true';
LOAD 'auto_explain';
SET client_min_messages = LOG;
SET log_min_messages = FATAL""")
tokenizer = tokenizer_factory.get_tokenizer_for_db(config)
# Enable printing of messages.
conn.add_notice_handler(lambda diag: print(diag.message_primary))
with tokenizer.name_analyzer() as analyzer:
cur.execute("""UPDATE placex
SET indexed_status = 0, address = %s, token_info = %s,
name = %s, linked_place_id = %s
WHERE place_id = %s""",
(place['address'],
Json(analyzer.process_place(PlaceInfo(place))),
place['name'], place['linked_place_id'], place['place_id']))
# we do not want to keep the results
conn.rollback()
def clean_deleted_relations(config: Configuration, age: str) -> None:
""" Clean deleted relations older than a given age
"""
with connect(config.get_libpq_dsn()) as conn:
with conn.cursor() as cur:
try:
cur.execute("""SELECT place_force_delete(p.place_id)
FROM import_polygon_delete d, placex p
WHERE p.osm_type = d.osm_type AND p.osm_id = d.osm_id
AND age(p.indexed_date) > %s::interval""",
(age, ))
except psycopg.DataError as exc:
raise UsageError('Invalid PostgreSQL time interval format') from exc
conn.commit()
| 4,196
|
Python
|
.py
| 87
| 36.91954
| 89
| 0.586797
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,801
|
exec_utils.py
|
osm-search_Nominatim/src/nominatim_db/tools/exec_utils.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper functions for executing external programs.
"""
from typing import Any, Mapping, List, Optional
import logging
import os
import re
import subprocess
import shutil
from ..db.connection import get_pg_env
from ..errors import UsageError
from ..version import OSM2PGSQL_REQUIRED_VERSION
LOG = logging.getLogger()
def run_osm2pgsql(options: Mapping[str, Any]) -> None:
""" Run osm2pgsql with the given options.
"""
_check_osm2pgsql_version(options['osm2pgsql'])
env = get_pg_env(options['dsn'])
cmd = [_find_osm2pgsql_cmd(options['osm2pgsql']),
'--append' if options['append'] else '--create',
'--slim',
'--log-progress', 'true',
'--number-processes', '1' if options['append'] else str(options['threads']),
'--cache', str(options['osm2pgsql_cache']),
'--style', str(options['osm2pgsql_style'])
]
if str(options['osm2pgsql_style']).endswith('.lua'):
env['LUA_PATH'] = ';'.join((str(options['osm2pgsql_style_path'] / '?.lua'),
os.environ.get('LUAPATH', ';')))
cmd.extend(('--output', 'flex'))
for flavour in ('data', 'index'):
if options['tablespaces'][f"main_{flavour}"]:
env[f"NOMINATIM_TABLESPACE_PLACE_{flavour.upper()}"] = \
options['tablespaces'][f"main_{flavour}"]
else:
cmd.extend(('--output', 'gazetteer', '--hstore', '--latlon'))
cmd.extend(_mk_tablespace_options('main', options))
if options['flatnode_file']:
cmd.extend(('--flat-nodes', options['flatnode_file']))
cmd.extend(_mk_tablespace_options('slim', options))
if options.get('disable_jit', False):
env['PGOPTIONS'] = '-c jit=off -c max_parallel_workers_per_gather=0'
if 'import_data' in options:
cmd.extend(('-r', 'xml', '-'))
elif isinstance(options['import_file'], list):
for fname in options['import_file']:
cmd.append(str(fname))
else:
cmd.append(str(options['import_file']))
subprocess.run(cmd, cwd=options.get('cwd', '.'),
input=options.get('import_data'),
env=env, check=True)
def _mk_tablespace_options(ttype: str, options: Mapping[str, Any]) -> List[str]:
cmds: List[str] = []
for flavour in ('data', 'index'):
if options['tablespaces'][f"{ttype}_{flavour}"]:
cmds.extend((f"--tablespace-{ttype}-{flavour}",
options['tablespaces'][f"{ttype}_{flavour}"]))
return cmds
def _find_osm2pgsql_cmd(cmdline: Optional[str]) -> str:
if cmdline is not None:
return cmdline
in_path = shutil.which('osm2pgsql')
if in_path is None:
raise UsageError('osm2pgsql executable not found. Please install osm2pgsql first.')
return str(in_path)
def _check_osm2pgsql_version(cmdline: Optional[str]) -> None:
cmd = [_find_osm2pgsql_cmd(cmdline), '--version']
result = subprocess.run(cmd, capture_output=True, check=True)
if not result.stderr:
raise UsageError("osm2pgsql does not print version information.")
verinfo = result.stderr.decode('UTF-8')
match = re.search(r'osm2pgsql version (\d+)\.(\d+)', verinfo)
if match is None:
raise UsageError(f"No version information found in output: {verinfo}")
if (int(match[1]), int(match[2])) < OSM2PGSQL_REQUIRED_VERSION:
raise UsageError(f"osm2pgsql is too old. Found version {match[1]}.{match[2]}. "
f"Need at least version {'.'.join(map(str, OSM2PGSQL_REQUIRED_VERSION))}.")
| 3,826
|
Python
|
.py
| 84
| 38.119048
| 100
| 0.625471
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,802
|
tiger_data.py
|
osm-search_Nominatim/src/nominatim_db/tools/tiger_data.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for importing tiger data and handling tarbar and directory files
"""
from typing import Any, TextIO, List, Union, cast, Iterator, Dict
import csv
import io
import logging
import os
import tarfile
from psycopg.types.json import Json
from ..config import Configuration
from ..db.connection import connect
from ..db.sql_preprocessor import SQLPreprocessor
from ..errors import UsageError
from ..db.query_pool import QueryPool
from ..data.place_info import PlaceInfo
from ..tokenizer.base import AbstractTokenizer
from . import freeze
LOG = logging.getLogger()
class TigerInput:
""" Context manager that goes through Tiger input files which may
either be in a directory or gzipped together in a tar file.
"""
def __init__(self, data_dir: str) -> None:
self.tar_handle = None
self.files: List[Union[str, tarfile.TarInfo]] = []
if data_dir.endswith('.tar.gz'):
try:
self.tar_handle = tarfile.open(data_dir) # pylint: disable=consider-using-with
except tarfile.ReadError as err:
LOG.fatal("Cannot open '%s'. Is this a tar file?", data_dir)
raise UsageError("Cannot open Tiger data file.") from err
self.files = [i for i in self.tar_handle.getmembers() if i.name.endswith('.csv')]
LOG.warning("Found %d CSV files in tarfile with path %s", len(self.files), data_dir)
else:
files = os.listdir(data_dir)
self.files = [os.path.join(data_dir, i) for i in files if i.endswith('.csv')]
LOG.warning("Found %d CSV files in path %s", len(self.files), data_dir)
if not self.files:
LOG.warning("Tiger data import selected but no files found at %s", data_dir)
def __enter__(self) -> 'TigerInput':
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
if self.tar_handle:
self.tar_handle.close()
self.tar_handle = None
def __bool__(self) -> bool:
return bool(self.files)
def get_file(self, fname: Union[str, tarfile.TarInfo]) -> TextIO:
""" Return a file handle to the next file to be processed.
Raises an IndexError if there is no file left.
"""
if self.tar_handle is not None:
extracted = self.tar_handle.extractfile(fname)
assert extracted is not None
return io.TextIOWrapper(extracted)
return open(cast(str, fname), encoding='utf-8')
def __iter__(self) -> Iterator[Dict[str, Any]]:
""" Iterate over the lines in each file.
"""
for fname in self.files:
fd = self.get_file(fname)
yield from csv.DictReader(fd, delimiter=';')
async def add_tiger_data(data_dir: str, config: Configuration, threads: int,
tokenizer: AbstractTokenizer) -> int:
""" Import tiger data from directory or tar file `data dir`.
"""
dsn = config.get_libpq_dsn()
with connect(dsn) as conn:
if freeze.is_frozen(conn):
raise UsageError("Tiger cannot be imported when database frozen (Github issue #3048)")
with TigerInput(data_dir) as tar:
if not tar:
return 1
with connect(dsn) as conn:
sql = SQLPreprocessor(conn, config)
sql.run_sql_file(conn, 'tiger_import_start.sql')
# Reading files and then for each file line handling
# sql_query in <threads - 1> chunks.
place_threads = max(1, threads - 1)
async with QueryPool(dsn, place_threads, autocommit=True) as pool:
with tokenizer.name_analyzer() as analyzer:
lines = 0
for row in tar:
try:
address = dict(street=row['street'], postcode=row['postcode'])
args = ('SRID=4326;' + row['geometry'],
int(row['from']), int(row['to']), row['interpolation'],
Json(analyzer.process_place(PlaceInfo({'address': address}))),
analyzer.normalize_postcode(row['postcode']))
except ValueError:
continue
await pool.put_query(
"""SELECT tiger_line_import(%s::GEOMETRY, %s::INT,
%s::INT, %s::TEXT, %s::JSONB, %s::TEXT)""",
args)
lines += 1
if lines == 1000:
print('.', end='', flush=True)
lines = 0
print('', flush=True)
LOG.warning("Creating indexes on Tiger data")
with connect(dsn) as conn:
sql = SQLPreprocessor(conn, config)
sql.run_sql_file(conn, 'tiger_import_finish.sql')
return 0
| 5,086
|
Python
|
.py
| 112
| 34.910714
| 98
| 0.59272
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,803
|
database_import.py
|
osm-search_Nominatim/src/nominatim_db/tools/database_import.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for setting up and importing a new Nominatim database.
"""
from typing import Tuple, Optional, Union, Sequence, MutableMapping, Any
import logging
import os
import subprocess
import asyncio
from pathlib import Path
import psutil
import psycopg
from psycopg import sql as pysql
from ..errors import UsageError
from ..config import Configuration
from ..db.connection import connect, get_pg_env, Connection, server_version_tuple,\
postgis_version_tuple, drop_tables, table_exists, execute_scalar
from ..db.sql_preprocessor import SQLPreprocessor
from ..db.query_pool import QueryPool
from .exec_utils import run_osm2pgsql
from ..version import POSTGRESQL_REQUIRED_VERSION, POSTGIS_REQUIRED_VERSION
LOG = logging.getLogger()
def _require_version(module: str, actual: Tuple[int, int], expected: Tuple[int, int]) -> None:
""" Compares the version for the given module and raises an exception
if the actual version is too old.
"""
if actual < expected:
LOG.fatal('Minimum supported version of %s is %d.%d. '
'Found version %d.%d.',
module, expected[0], expected[1], actual[0], actual[1])
raise UsageError(f'{module} is too old.')
def _require_loaded(extension_name: str, conn: Connection) -> None:
""" Check that the given extension is loaded. """
with conn.cursor() as cur:
cur.execute('SELECT * FROM pg_extension WHERE extname = %s', (extension_name, ))
if cur.rowcount <= 0:
LOG.fatal('Required module %s is not loaded.', extension_name)
raise UsageError(f'{extension_name} is not loaded.')
def check_existing_database_plugins(dsn: str) -> None:
""" Check that the database has the required plugins installed."""
with connect(dsn) as conn:
_require_version('PostgreSQL server',
server_version_tuple(conn),
POSTGRESQL_REQUIRED_VERSION)
_require_version('PostGIS',
postgis_version_tuple(conn),
POSTGIS_REQUIRED_VERSION)
_require_loaded('hstore', conn)
def setup_database_skeleton(dsn: str, rouser: Optional[str] = None) -> None:
""" Create a new database for Nominatim and populate it with the
essential extensions.
The function fails when the database already exists or Postgresql or
PostGIS versions are too old.
Uses `createdb` to create the database.
If 'rouser' is given, then the function also checks that the user
with that given name exists.
Requires superuser rights by the caller.
"""
proc = subprocess.run(['createdb'], env=get_pg_env(dsn), check=False)
if proc.returncode != 0:
raise UsageError('Creating new database failed.')
with connect(dsn) as conn:
_require_version('PostgreSQL server',
server_version_tuple(conn),
POSTGRESQL_REQUIRED_VERSION)
if rouser is not None:
cnt = execute_scalar(conn, 'SELECT count(*) FROM pg_user where usename = %s',
(rouser, ))
if cnt == 0:
LOG.fatal("Web user '%s' does not exist. Create it with:\n"
"\n createuser %s", rouser, rouser)
raise UsageError('Missing read-only user.')
# Create extensions.
with conn.cursor() as cur:
cur.execute('CREATE EXTENSION IF NOT EXISTS hstore')
cur.execute('CREATE EXTENSION IF NOT EXISTS postgis')
postgis_version = postgis_version_tuple(conn)
if postgis_version[0] >= 3:
cur.execute('CREATE EXTENSION IF NOT EXISTS postgis_raster')
conn.commit()
_require_version('PostGIS',
postgis_version_tuple(conn),
POSTGIS_REQUIRED_VERSION)
def import_osm_data(osm_files: Union[Path, Sequence[Path]],
options: MutableMapping[str, Any],
drop: bool = False, ignore_errors: bool = False) -> None:
""" Import the given OSM files. 'options' contains the list of
default settings for osm2pgsql.
"""
options['import_file'] = osm_files
options['append'] = False
options['threads'] = 1
if not options['flatnode_file'] and options['osm2pgsql_cache'] == 0:
# Make some educated guesses about cache size based on the size
# of the import file and the available memory.
mem = psutil.virtual_memory()
fsize = 0
if isinstance(osm_files, list):
for fname in osm_files:
fsize += os.stat(str(fname)).st_size
else:
fsize = os.stat(str(osm_files)).st_size
options['osm2pgsql_cache'] = int(min((mem.available + mem.cached) * 0.75,
fsize * 2) / 1024 / 1024) + 1
run_osm2pgsql(options)
with connect(options['dsn']) as conn:
if not ignore_errors:
with conn.cursor() as cur:
cur.execute('SELECT true FROM place LIMIT 1')
if cur.rowcount == 0:
raise UsageError('No data imported by osm2pgsql.')
if drop:
drop_tables(conn, 'planet_osm_nodes')
conn.commit()
if drop and options['flatnode_file']:
Path(options['flatnode_file']).unlink()
def create_tables(conn: Connection, config: Configuration, reverse_only: bool = False) -> None:
""" Create the set of basic tables.
When `reverse_only` is True, then the main table for searching will
be skipped and only reverse search is possible.
"""
sql = SQLPreprocessor(conn, config)
sql.env.globals['db']['reverse_only'] = reverse_only
sql.run_sql_file(conn, 'tables.sql')
def create_table_triggers(conn: Connection, config: Configuration) -> None:
""" Create the triggers for the tables. The trigger functions must already
have been imported with refresh.create_functions().
"""
sql = SQLPreprocessor(conn, config)
sql.run_sql_file(conn, 'table-triggers.sql')
def create_partition_tables(conn: Connection, config: Configuration) -> None:
""" Create tables that have explicit partitioning.
"""
sql = SQLPreprocessor(conn, config)
sql.run_sql_file(conn, 'partition-tables.src.sql')
def truncate_data_tables(conn: Connection) -> None:
""" Truncate all data tables to prepare for a fresh load.
"""
with conn.cursor() as cur:
cur.execute('TRUNCATE placex')
cur.execute('TRUNCATE place_addressline')
cur.execute('TRUNCATE location_area')
cur.execute('TRUNCATE location_area_country')
cur.execute('TRUNCATE location_property_tiger')
cur.execute('TRUNCATE location_property_osmline')
cur.execute('TRUNCATE location_postcode')
if table_exists(conn, 'search_name'):
cur.execute('TRUNCATE search_name')
cur.execute('DROP SEQUENCE IF EXISTS seq_place')
cur.execute('CREATE SEQUENCE seq_place start 100000')
cur.execute("""SELECT tablename FROM pg_tables
WHERE tablename LIKE 'location_road_%'""")
for table in [r[0] for r in list(cur)]:
cur.execute('TRUNCATE ' + table)
conn.commit()
_COPY_COLUMNS = pysql.SQL(',').join(map(pysql.Identifier,
('osm_type', 'osm_id', 'class', 'type',
'name', 'admin_level', 'address',
'extratags', 'geometry')))
async def load_data(dsn: str, threads: int) -> None:
""" Copy data into the word and placex table.
"""
placex_threads = max(1, threads - 1)
progress = asyncio.create_task(_progress_print())
async with QueryPool(dsn, placex_threads + 1) as pool:
# Copy data from place to placex in <threads - 1> chunks.
for imod in range(placex_threads):
await pool.put_query(
pysql.SQL("""INSERT INTO placex ({columns})
SELECT {columns} FROM place
WHERE osm_id % {total} = {mod}
AND NOT (class='place'
and (type='houses' or type='postcode'))
AND ST_IsValid(geometry)
""").format(columns=_COPY_COLUMNS,
total=pysql.Literal(placex_threads),
mod=pysql.Literal(imod)), None)
# Interpolations need to be copied seperately
await pool.put_query("""
INSERT INTO location_property_osmline (osm_id, address, linegeo)
SELECT osm_id, address, geometry FROM place
WHERE class='place' and type='houses' and osm_type='W'
and ST_GeometryType(geometry) = 'ST_LineString' """, None)
progress.cancel()
async with await psycopg.AsyncConnection.connect(dsn) as aconn:
await aconn.execute('ANALYSE')
async def _progress_print() -> None:
while True:
try:
await asyncio.sleep(1)
except asyncio.CancelledError:
print('', flush=True)
break
print('.', end='', flush=True)
async def create_search_indices(conn: Connection, config: Configuration,
drop: bool = False, threads: int = 1) -> None:
""" Create tables that have explicit partitioning.
"""
# If index creation failed and left an index invalid, they need to be
# cleaned out first, so that the script recreates them.
with conn.cursor() as cur:
cur.execute("""SELECT relname FROM pg_class, pg_index
WHERE pg_index.indisvalid = false
AND pg_index.indexrelid = pg_class.oid""")
bad_indices = [row[0] for row in list(cur)]
for idx in bad_indices:
LOG.info("Drop invalid index %s.", idx)
cur.execute(pysql.SQL('DROP INDEX {}').format(pysql.Identifier(idx)))
conn.commit()
sql = SQLPreprocessor(conn, config)
await sql.run_parallel_sql_file(config.get_libpq_dsn(),
'indices.sql', min(8, threads), drop=drop)
| 10,605
|
Python
|
.py
| 217
| 37.976959
| 95
| 0.608595
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,804
|
freeze.py
|
osm-search_Nominatim/src/nominatim_db/tools/freeze.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for removing unnecessary data from the database.
"""
from typing import Optional
from pathlib import Path
from psycopg import sql as pysql
from ..db.connection import Connection, drop_tables, table_exists
UPDATE_TABLES = [
'address_levels',
'gb_postcode',
'import_osmosis_log',
'import_polygon_%',
'location_area%',
'location_road%',
'place',
'planet_osm_%',
'search_name_%',
'us_postcode',
'wikipedia_%'
]
def drop_update_tables(conn: Connection) -> None:
""" Drop all tables only necessary for updating the database from
OSM replication data.
"""
parts = (pysql.SQL("(tablename LIKE {})").format(pysql.Literal(t)) for t in UPDATE_TABLES)
with conn.cursor() as cur:
cur.execute(pysql.SQL("SELECT tablename FROM pg_tables WHERE ")
+ pysql.SQL(' or ').join(parts))
tables = [r[0] for r in cur]
drop_tables(conn, *tables, cascade=True)
conn.commit()
def drop_flatnode_file(fpath: Optional[Path]) -> None:
""" Remove the flatnode file if it exists.
"""
if fpath and fpath.exists():
fpath.unlink()
def is_frozen(conn: Connection) -> bool:
""" Returns true if database is in a frozen state
"""
return table_exists(conn, 'place') is False
| 1,512
|
Python
|
.py
| 46
| 28.521739
| 94
| 0.671703
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,805
|
sp_wiki_loader.py
|
osm-search_Nominatim/src/nominatim_db/tools/special_phrases/sp_wiki_loader.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Module containing the SPWikiLoader class.
"""
from typing import Iterable
import re
import logging
from ...config import Configuration
from ...utils.url_utils import get_url
from .special_phrase import SpecialPhrase
LOG = logging.getLogger()
def _get_wiki_content(lang: str) -> str:
"""
Request and return the wiki page's content
corresponding to special phrases for a given lang.
Requested URL Example :
https://wiki.openstreetmap.org/wiki/Special:Export/Nominatim/Special_Phrases/EN
"""
url = 'https://wiki.openstreetmap.org/wiki/Special:Export/Nominatim/Special_Phrases/' \
+ lang.upper()
return get_url(url)
class SPWikiLoader:
"""
Handles loading of special phrases from the wiki.
"""
def __init__(self, config: Configuration) -> None:
self.config = config
# Compile the regex here to increase performances.
self.occurence_pattern = re.compile(
r'\| *([^\|]+) *\|\| *([^\|]+) *\|\| *([^\|]+) *\|\| *([^\|]+) *\|\| *([\-YN])'
)
# Hack around a bug where building=yes was imported with quotes into the wiki
self.type_fix_pattern = re.compile(r'\"|"')
self.languages = self.config.get_str_list('LANGUAGES') or \
['af', 'ar', 'br', 'ca', 'cs', 'de', 'en', 'es',
'et', 'eu', 'fa', 'fi', 'fr', 'gl', 'hr', 'hu',
'ia', 'is', 'it', 'ja', 'mk', 'nl', 'no', 'pl',
'ps', 'pt', 'ru', 'sk', 'sl', 'sv', 'uk', 'vi',
'lv', 'tr']
def generate_phrases(self) -> Iterable[SpecialPhrase]:
""" Download the wiki pages for the configured languages
and extract the phrases from the page.
"""
for lang in self.languages:
LOG.warning('Importing phrases for lang: %s...', lang)
loaded_xml = _get_wiki_content(lang)
# One match will be of format [label, class, type, operator, plural]
matches = self.occurence_pattern.findall(loaded_xml)
for match in matches:
yield SpecialPhrase(match[0],
match[1],
self.type_fix_pattern.sub('', match[2]),
match[3])
| 2,566
|
Python
|
.py
| 58
| 34.327586
| 91
| 0.558847
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,806
|
importer_statistics.py
|
osm-search_Nominatim/src/nominatim_db/tools/special_phrases/importer_statistics.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Contains the class which handles statistics for the
import of special phrases.
"""
import logging
LOG = logging.getLogger()
class SpecialPhrasesImporterStatistics():
"""
Class handling statistics of the import
process of special phrases.
"""
def __init__(self) -> None:
self._intialize_values()
def _intialize_values(self) -> None:
"""
Set all counts for the global
import to 0.
"""
self.tables_created = 0
self.tables_deleted = 0
self.tables_ignored = 0
self.invalids = 0
def notify_one_phrase_invalid(self) -> None:
"""
Add +1 to the count of invalid entries
fetched from the wiki.
"""
self.invalids += 1
def notify_one_table_created(self) -> None:
"""
Add +1 to the count of created tables.
"""
self.tables_created += 1
def notify_one_table_deleted(self) -> None:
"""
Add +1 to the count of deleted tables.
"""
self.tables_deleted += 1
def notify_one_table_ignored(self) -> None:
"""
Add +1 to the count of ignored tables.
"""
self.tables_ignored += 1
def notify_import_done(self) -> None:
"""
Print stats for the whole import process
and reset all values.
"""
LOG.info('====================================================================')
LOG.info('Final statistics of the import:')
LOG.info('- %s phrases were invalid.', self.invalids)
if self.invalids > 0:
LOG.info(' Those invalid phrases have been skipped.')
LOG.info('- %s tables were ignored as they already exist on the database',
self.tables_ignored)
LOG.info('- %s tables were created', self.tables_created)
LOG.info('- %s tables were deleted from the database', self.tables_deleted)
if self.tables_deleted > 0:
LOG.info(' They were deleted as they are not valid anymore.')
if self.invalids > 0:
LOG.warning('%s phrases were invalid and have been skipped during the whole process.',
self.invalids)
self._intialize_values()
| 2,507
|
Python
|
.py
| 69
| 28.057971
| 98
| 0.576781
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,807
|
special_phrase.py
|
osm-search_Nominatim/src/nominatim_db/tools/special_phrases/special_phrase.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Module containing the class SpecialPhrase.
This class is a model used to transfer a special phrase through
the process of load and importation.
"""
from typing import Any
class SpecialPhrase:
"""
Model representing a special phrase.
"""
def __init__(self, p_label: str, p_class: str, p_type: str, p_operator: str) -> None:
self.p_label = p_label.strip()
self.p_class = p_class.strip()
self.p_type = p_type.strip()
# Needed if some operator in the wiki are not written in english
p_operator = p_operator.strip().lower()
self.p_operator = '-' if p_operator not in ('near', 'in') else p_operator
def __eq__(self, other: Any) -> bool:
if not isinstance(other, SpecialPhrase):
return False
return self.p_label == other.p_label \
and self.p_class == other.p_class \
and self.p_type == other.p_type \
and self.p_operator == other.p_operator
def __hash__(self) -> int:
return hash((self.p_label, self.p_class, self.p_type, self.p_operator))
| 1,322
|
Python
|
.py
| 32
| 34.875
| 89
| 0.635019
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,808
|
sp_csv_loader.py
|
osm-search_Nominatim/src/nominatim_db/tools/special_phrases/sp_csv_loader.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Module containing the SPCsvLoader class.
The class allows to load phrases from a csv file.
"""
from typing import Iterable
import csv
import os
from ...errors import UsageError
from .special_phrase import SpecialPhrase
class SPCsvLoader:
"""
Handles loading of special phrases from external csv file.
"""
def __init__(self, csv_path: str) -> None:
self.csv_path = csv_path
def generate_phrases(self) -> Iterable[SpecialPhrase]:
""" Open and parse the given csv file.
Create the corresponding SpecialPhrases.
"""
self._check_csv_validity()
with open(self.csv_path, encoding='utf-8') as fd:
reader = csv.DictReader(fd, delimiter=',')
for row in reader:
yield SpecialPhrase(row['phrase'], row['class'], row['type'], row['operator'])
def _check_csv_validity(self) -> None:
"""
Check that the csv file has the right extension.
"""
_, extension = os.path.splitext(self.csv_path)
if extension != '.csv':
raise UsageError(f'The file {self.csv_path} is not a csv file.')
| 1,370
|
Python
|
.py
| 37
| 30.810811
| 94
| 0.648036
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,809
|
sp_importer.py
|
osm-search_Nominatim/src/nominatim_db/tools/special_phrases/sp_importer.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Module containing the class handling the import
of the special phrases.
Phrases are analyzed and imported into the database.
The phrases already present in the database which are not
valids anymore are removed.
"""
from typing import Iterable, Tuple, Mapping, Sequence, Optional, Set
import logging
import re
from psycopg.sql import Identifier, SQL
from ...typing import Protocol
from ...config import Configuration
from ...db.connection import Connection, drop_tables, index_exists
from .importer_statistics import SpecialPhrasesImporterStatistics
from .special_phrase import SpecialPhrase
from ...tokenizer.base import AbstractTokenizer
LOG = logging.getLogger()
def _classtype_table(phrase_class: str, phrase_type: str) -> str:
""" Return the name of the table for the given class and type.
"""
return f'place_classtype_{phrase_class}_{phrase_type}'
class SpecialPhraseLoader(Protocol):
""" Protocol for classes implementing a loader for special phrases.
"""
def generate_phrases(self) -> Iterable[SpecialPhrase]:
""" Generates all special phrase terms this loader can produce.
"""
class SPImporter():
# pylint: disable-msg=too-many-instance-attributes
"""
Class handling the process of special phrases importation into the database.
Take a sp loader which load the phrases from an external source.
"""
def __init__(self, config: Configuration, conn: Connection,
sp_loader: SpecialPhraseLoader) -> None:
self.config = config
self.db_connection = conn
self.sp_loader = sp_loader
self.statistics_handler = SpecialPhrasesImporterStatistics()
self.black_list, self.white_list = self._load_white_and_black_lists()
self.sanity_check_pattern = re.compile(r'^\w+$')
# This set will contain all existing phrases to be added.
# It contains tuples with the following format: (label, class, type, operator)
self.word_phrases: Set[Tuple[str, str, str, str]] = set()
# This set will contain all existing place_classtype tables which doesn't match any
# special phrases class/type on the wiki.
self.table_phrases_to_delete: Set[str] = set()
def import_phrases(self, tokenizer: AbstractTokenizer, should_replace: bool) -> None:
"""
Iterate through all SpecialPhrases extracted from the
loader and import them into the database.
If should_replace is set to True only the loaded phrases
will be kept into the database. All other phrases already
in the database will be removed.
"""
LOG.warning('Special phrases importation starting')
self._fetch_existing_place_classtype_tables()
# Store pairs of class/type for further processing
class_type_pairs = set()
for phrase in self.sp_loader.generate_phrases():
result = self._process_phrase(phrase)
if result:
class_type_pairs.add(result)
self._create_classtype_table_and_indexes(class_type_pairs)
if should_replace:
self._remove_non_existent_tables_from_db()
self.db_connection.commit()
with tokenizer.name_analyzer() as analyzer:
analyzer.update_special_phrases(self.word_phrases, should_replace)
LOG.warning('Import done.')
self.statistics_handler.notify_import_done()
def _fetch_existing_place_classtype_tables(self) -> None:
"""
Fetch existing place_classtype tables.
Fill the table_phrases_to_delete set of the class.
"""
query = """
SELECT table_name
FROM information_schema.tables
WHERE table_schema='public'
AND table_name like 'place_classtype_%';
"""
with self.db_connection.cursor() as db_cursor:
db_cursor.execute(SQL(query))
for row in db_cursor:
self.table_phrases_to_delete.add(row[0])
def _load_white_and_black_lists(self) \
-> Tuple[Mapping[str, Sequence[str]], Mapping[str, Sequence[str]]]:
"""
Load white and black lists from phrases-settings.json.
"""
settings = self.config.load_sub_configuration('phrase-settings.json')
return settings['blackList'], settings['whiteList']
def _check_sanity(self, phrase: SpecialPhrase) -> bool:
"""
Check sanity of given inputs in case somebody added garbage in the wiki.
If a bad class/type is detected the system will exit with an error.
"""
class_matchs = self.sanity_check_pattern.findall(phrase.p_class)
type_matchs = self.sanity_check_pattern.findall(phrase.p_type)
if not class_matchs or not type_matchs:
LOG.warning("Bad class/type: %s=%s. It will not be imported",
phrase.p_class, phrase.p_type)
return False
return True
def _process_phrase(self, phrase: SpecialPhrase) -> Optional[Tuple[str, str]]:
"""
Processes the given phrase by checking black and white list
and sanity.
Return the class/type pair corresponding to the phrase.
"""
# blacklisting: disallow certain class/type combinations
if phrase.p_class in self.black_list.keys() \
and phrase.p_type in self.black_list[phrase.p_class]:
return None
# whitelisting: if class is in whitelist, allow only tags in the list
if phrase.p_class in self.white_list.keys() \
and phrase.p_type not in self.white_list[phrase.p_class]:
return None
# sanity check, in case somebody added garbage in the wiki
if not self._check_sanity(phrase):
self.statistics_handler.notify_one_phrase_invalid()
return None
self.word_phrases.add((phrase.p_label, phrase.p_class,
phrase.p_type, phrase.p_operator))
return (phrase.p_class, phrase.p_type)
def _create_classtype_table_and_indexes(self,
class_type_pairs: Iterable[Tuple[str, str]]) -> None:
"""
Create table place_classtype for each given pair.
Also create indexes on place_id and centroid.
"""
LOG.warning('Create tables and indexes...')
sql_tablespace = self.config.TABLESPACE_AUX_DATA
if sql_tablespace:
sql_tablespace = ' TABLESPACE ' + sql_tablespace
with self.db_connection.cursor() as db_cursor:
db_cursor.execute("CREATE INDEX idx_placex_classtype ON placex (class, type)")
for pair in class_type_pairs:
phrase_class = pair[0]
phrase_type = pair[1]
table_name = _classtype_table(phrase_class, phrase_type)
if table_name in self.table_phrases_to_delete:
self.statistics_handler.notify_one_table_ignored()
# Remove this table from the ones to delete as it match a
# class/type still existing on the special phrases of the wiki.
self.table_phrases_to_delete.remove(table_name)
# So don't need to create the table and indexes.
continue
# Table creation
self._create_place_classtype_table(sql_tablespace, phrase_class, phrase_type)
# Indexes creation
self._create_place_classtype_indexes(sql_tablespace, phrase_class, phrase_type)
# Grant access on read to the web user.
self._grant_access_to_webuser(phrase_class, phrase_type)
self.statistics_handler.notify_one_table_created()
with self.db_connection.cursor() as db_cursor:
db_cursor.execute("DROP INDEX idx_placex_classtype")
def _create_place_classtype_table(self, sql_tablespace: str,
phrase_class: str, phrase_type: str) -> None:
"""
Create table place_classtype of the given phrase_class/phrase_type
if doesn't exit.
"""
table_name = _classtype_table(phrase_class, phrase_type)
with self.db_connection.cursor() as cur:
cur.execute(SQL("""CREATE TABLE IF NOT EXISTS {} {} AS
SELECT place_id AS place_id,
st_centroid(geometry) AS centroid
FROM placex
WHERE class = %s AND type = %s
""").format(Identifier(table_name), SQL(sql_tablespace)),
(phrase_class, phrase_type))
def _create_place_classtype_indexes(self, sql_tablespace: str,
phrase_class: str, phrase_type: str) -> None:
"""
Create indexes on centroid and place_id for the place_classtype table.
"""
index_prefix = f'idx_place_classtype_{phrase_class}_{phrase_type}_'
base_table = _classtype_table(phrase_class, phrase_type)
# Index on centroid
if not index_exists(self.db_connection, index_prefix + 'centroid'):
with self.db_connection.cursor() as db_cursor:
db_cursor.execute(SQL("CREATE INDEX {} ON {} USING GIST (centroid) {}")
.format(Identifier(index_prefix + 'centroid'),
Identifier(base_table),
SQL(sql_tablespace)))
# Index on place_id
if not index_exists(self.db_connection, index_prefix + 'place_id'):
with self.db_connection.cursor() as db_cursor:
db_cursor.execute(SQL("CREATE INDEX {} ON {} USING btree(place_id) {}")
.format(Identifier(index_prefix + 'place_id'),
Identifier(base_table),
SQL(sql_tablespace)))
def _grant_access_to_webuser(self, phrase_class: str, phrase_type: str) -> None:
"""
Grant access on read to the table place_classtype for the webuser.
"""
table_name = _classtype_table(phrase_class, phrase_type)
with self.db_connection.cursor() as db_cursor:
db_cursor.execute(SQL("""GRANT SELECT ON {} TO {}""")
.format(Identifier(table_name),
Identifier(self.config.DATABASE_WEBUSER)))
def _remove_non_existent_tables_from_db(self) -> None:
"""
Remove special phrases which doesn't exist on the wiki anymore.
Delete the place_classtype tables.
"""
LOG.warning('Cleaning database...')
# Delete place_classtype tables corresponding to class/type which
# are not on the wiki anymore.
drop_tables(self.db_connection, *self.table_phrases_to_delete)
for _ in self.table_phrases_to_delete:
self.statistics_handler.notify_one_table_deleted()
| 11,360
|
Python
|
.py
| 221
| 39.60181
| 97
| 0.617536
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,810
|
url_utils.py
|
osm-search_Nominatim/src/nominatim_db/utils/url_utils.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper functions for accessing URL.
"""
from typing import IO
import logging
import urllib.request as urlrequest
from ..version import NOMINATIM_VERSION
LOG = logging.getLogger()
def get_url(url: str) -> str:
""" Get the contents from the given URL and return it as a UTF-8 string.
This version makes sure that an appropriate user agent is sent.
"""
headers = {"User-Agent": f"Nominatim/{NOMINATIM_VERSION!s}"}
try:
request = urlrequest.Request(url, headers=headers)
with urlrequest.urlopen(request) as response: # type: IO[bytes]
return response.read().decode('utf-8')
except Exception:
LOG.fatal('Failed to load URL: %s', url)
raise
| 927
|
Python
|
.py
| 26
| 31.692308
| 76
| 0.705357
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,811
|
centroid.py
|
osm-search_Nominatim/src/nominatim_db/utils/centroid.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for computation of centroids.
"""
from typing import Tuple, Any
from collections.abc import Collection
class PointsCentroid:
""" Centroid computation from single points using an online algorithm.
More points may be added at any time.
Coordinates are internally treated as a 7-digit fixed-point float
(i.e. in OSM style).
"""
def __init__(self) -> None:
self.sum_x = 0
self.sum_y = 0
self.count = 0
def centroid(self) -> Tuple[float, float]:
""" Return the centroid of all points collected so far.
"""
if self.count == 0:
raise ValueError("No points available for centroid.")
return (float(self.sum_x/self.count)/10000000,
float(self.sum_y/self.count)/10000000)
def __len__(self) -> int:
return self.count
def __iadd__(self, other: Any) -> 'PointsCentroid':
if isinstance(other, Collection) and len(other) == 2:
if all(isinstance(p, (float, int)) for p in other):
x, y = other
self.sum_x += int(x * 10000000)
self.sum_y += int(y * 10000000)
self.count += 1
return self
raise ValueError("Can only add 2-element tuples to centroid.")
| 1,514
|
Python
|
.py
| 39
| 31.205128
| 74
| 0.617747
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,812
|
indexer.py
|
osm-search_Nominatim/src/nominatim_db/indexer/indexer.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Main work horse for indexing (computing addresses) the database.
"""
from typing import cast, List, Any, Optional
import logging
import time
import psycopg
from ..db.connection import connect, execute_scalar
from ..db.query_pool import QueryPool
from ..tokenizer.base import AbstractTokenizer
from .progress import ProgressLogger
from . import runners
LOG = logging.getLogger()
class Indexer:
""" Main indexing routine.
"""
def __init__(self, dsn: str, tokenizer: AbstractTokenizer, num_threads: int):
self.dsn = dsn
self.tokenizer = tokenizer
self.num_threads = num_threads
def has_pending(self) -> bool:
""" Check if any data still needs indexing.
This function must only be used after the import has finished.
Otherwise it will be very expensive.
"""
with connect(self.dsn) as conn:
with conn.cursor() as cur:
cur.execute("SELECT 'a' FROM placex WHERE indexed_status > 0 LIMIT 1")
return cur.rowcount > 0
async def index_full(self, analyse: bool = True) -> None:
""" Index the complete database. This will first index boundaries
followed by all other objects. When `analyse` is True, then the
database will be analysed at the appropriate places to
ensure that database statistics are updated.
"""
with connect(self.dsn) as conn:
conn.autocommit = True
def _analyze() -> None:
if analyse:
with conn.cursor() as cur:
cur.execute('ANALYZE')
while True:
if await self.index_by_rank(0, 4) > 0:
_analyze()
if await self.index_boundaries(0, 30) > 100:
_analyze()
if await self.index_by_rank(5, 25) > 100:
_analyze()
if await self.index_by_rank(26, 30) > 1000:
_analyze()
if await self.index_postcodes() > 100:
_analyze()
if not self.has_pending():
break
async def index_boundaries(self, minrank: int, maxrank: int) -> int:
""" Index only administrative boundaries within the given rank range.
"""
total = 0
LOG.warning("Starting indexing boundaries using %s threads",
self.num_threads)
minrank = max(minrank, 4)
maxrank = min(maxrank, 25)
# Precompute number of rows to process for all rows
with connect(self.dsn) as conn:
hstore_info = psycopg.types.TypeInfo.fetch(conn, "hstore")
if hstore_info is None:
raise RuntimeError('Hstore extension is requested but not installed.')
psycopg.types.hstore.register_hstore(hstore_info)
with conn.cursor() as cur:
cur = conn.execute(""" SELECT rank_search, count(*)
FROM placex
WHERE rank_search between %s and %s
AND class = 'boundary' and type = 'administrative'
AND indexed_status > 0
GROUP BY rank_search""",
(minrank, maxrank))
total_tuples = {row.rank_search: row.count for row in cur}
with self.tokenizer.name_analyzer() as analyzer:
for rank in range(minrank, maxrank + 1):
total += await self._index(runners.BoundaryRunner(rank, analyzer),
total_tuples=total_tuples.get(rank, 0))
return total
async def index_by_rank(self, minrank: int, maxrank: int) -> int:
""" Index all entries of placex in the given rank range (inclusive)
in order of their address rank.
When rank 30 is requested then also interpolations and
places with address rank 0 will be indexed.
"""
total = 0
maxrank = min(maxrank, 30)
LOG.warning("Starting indexing rank (%i to %i) using %i threads",
minrank, maxrank, self.num_threads)
# Precompute number of rows to process for all rows
with connect(self.dsn) as conn:
hstore_info = psycopg.types.TypeInfo.fetch(conn, "hstore")
if hstore_info is None:
raise RuntimeError('Hstore extension is requested but not installed.')
psycopg.types.hstore.register_hstore(hstore_info)
with conn.cursor() as cur:
cur = conn.execute(""" SELECT rank_address, count(*)
FROM placex
WHERE rank_address between %s and %s
AND indexed_status > 0
GROUP BY rank_address""",
(minrank, maxrank))
total_tuples = {row.rank_address: row.count for row in cur}
with self.tokenizer.name_analyzer() as analyzer:
for rank in range(max(1, minrank), maxrank + 1):
if rank >= 30:
batch = 20
elif rank >= 26:
batch = 5
else:
batch = 1
total += await self._index(runners.RankRunner(rank, analyzer),
batch=batch, total_tuples=total_tuples.get(rank, 0))
if maxrank == 30:
total += await self._index(runners.RankRunner(0, analyzer))
total += await self._index(runners.InterpolationRunner(analyzer), batch=20)
return total
async def index_postcodes(self) -> int:
"""Index the entries of the location_postcode table.
"""
LOG.warning("Starting indexing postcodes using %s threads", self.num_threads)
return await self._index(runners.PostcodeRunner(), batch=20)
def update_status_table(self) -> None:
""" Update the status in the status table to 'indexed'.
"""
with connect(self.dsn) as conn:
with conn.cursor() as cur:
cur.execute('UPDATE import_status SET indexed = true')
conn.commit()
async def _index(self, runner: runners.Runner, batch: int = 1,
total_tuples: Optional[int] = None) -> int:
""" Index a single rank or table. `runner` describes the SQL to use
for indexing. `batch` describes the number of objects that
should be processed with a single SQL statement.
`total_tuples` may contain the total number of rows to process.
When not supplied, the value will be computed using the
approriate runner function.
"""
LOG.warning("Starting %s (using batch size %s)", runner.name(), batch)
if total_tuples is None:
total_tuples = self._prepare_indexing(runner)
progress = ProgressLogger(runner.name(), total_tuples)
if total_tuples > 0:
async with await psycopg.AsyncConnection.connect(
self.dsn, row_factory=psycopg.rows.dict_row) as aconn,\
QueryPool(self.dsn, self.num_threads, autocommit=True) as pool:
fetcher_time = 0.0
tstart = time.time()
async with aconn.cursor(name='places') as cur:
query = runner.index_places_query(batch)
params: List[Any] = []
num_places = 0
async for place in cur.stream(runner.sql_get_objects()):
fetcher_time += time.time() - tstart
params.extend(runner.index_places_params(place))
num_places += 1
if num_places >= batch:
LOG.debug("Processing places: %s", str(params))
await pool.put_query(query, params)
progress.add(num_places)
params = []
num_places = 0
tstart = time.time()
if num_places > 0:
await pool.put_query(runner.index_places_query(num_places), params)
LOG.info("Wait time: fetcher: %.2fs, pool: %.2fs",
fetcher_time, pool.wait_time)
return progress.done()
def _prepare_indexing(self, runner: runners.Runner) -> int:
with connect(self.dsn) as conn:
hstore_info = psycopg.types.TypeInfo.fetch(conn, "hstore")
if hstore_info is None:
raise RuntimeError('Hstore extension is requested but not installed.')
psycopg.types.hstore.register_hstore(hstore_info)
total_tuples = execute_scalar(conn, runner.sql_count_objects())
LOG.debug("Total number of rows: %i", total_tuples)
return cast(int, total_tuples)
| 9,382
|
Python
|
.py
| 186
| 35.473118
| 95
| 0.553417
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,813
|
runners.py
|
osm-search_Nominatim/src/nominatim_db/indexer/runners.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Mix-ins that provide the actual commands for the indexer for various indexing
tasks.
"""
from typing import Any, Sequence
from psycopg import sql as pysql
from psycopg.abc import Query
from psycopg.rows import DictRow
from psycopg.types.json import Json
from ..typing import Protocol
from ..data.place_info import PlaceInfo
from ..tokenizer.base import AbstractAnalyzer
# pylint: disable=C0111
def _mk_valuelist(template: str, num: int) -> pysql.Composed:
return pysql.SQL(',').join([pysql.SQL(template)] * num)
def _analyze_place(place: DictRow, analyzer: AbstractAnalyzer) -> Json:
return Json(analyzer.process_place(PlaceInfo(place)))
class Runner(Protocol):
def name(self) -> str: ...
def sql_count_objects(self) -> Query: ...
def sql_get_objects(self) -> Query: ...
def index_places_query(self, batch_size: int) -> Query: ...
def index_places_params(self, place: DictRow) -> Sequence[Any]: ...
SELECT_SQL = pysql.SQL("""SELECT place_id, extra.*
FROM (SELECT * FROM placex {}) as px,
LATERAL placex_indexing_prepare(px) as extra """)
UPDATE_LINE = "(%s, %s::hstore, %s::hstore, %s::int, %s::jsonb)"
class AbstractPlacexRunner:
""" Returns SQL commands for indexing of the placex table.
"""
def __init__(self, rank: int, analyzer: AbstractAnalyzer) -> None:
self.rank = rank
self.analyzer = analyzer
def index_places_query(self, batch_size: int) -> Query:
return pysql.SQL(
""" UPDATE placex
SET indexed_status = 0, address = v.addr, token_info = v.ti,
name = v.name, linked_place_id = v.linked_place_id
FROM (VALUES {}) as v(id, name, addr, linked_place_id, ti)
WHERE place_id = v.id
""").format(_mk_valuelist(UPDATE_LINE, batch_size))
def index_places_params(self, place: DictRow) -> Sequence[Any]:
return (place['place_id'],
place['name'],
place['address'],
place['linked_place_id'],
_analyze_place(place, self.analyzer))
class RankRunner(AbstractPlacexRunner):
""" Returns SQL commands for indexing one rank within the placex table.
"""
def name(self) -> str:
return f"rank {self.rank}"
def sql_count_objects(self) -> pysql.Composed:
return pysql.SQL("""SELECT count(*) FROM placex
WHERE rank_address = {} and indexed_status > 0
""").format(pysql.Literal(self.rank))
def sql_get_objects(self) -> pysql.Composed:
return SELECT_SQL.format(pysql.SQL(
"""WHERE placex.indexed_status > 0 and placex.rank_address = {}
ORDER BY placex.geometry_sector
""").format(pysql.Literal(self.rank)))
class BoundaryRunner(AbstractPlacexRunner):
""" Returns SQL commands for indexing the administrative boundaries
of a certain rank.
"""
def name(self) -> str:
return f"boundaries rank {self.rank}"
def sql_count_objects(self) -> Query:
return pysql.SQL("""SELECT count(*) FROM placex
WHERE indexed_status > 0
AND rank_search = {}
AND class = 'boundary' and type = 'administrative'
""").format(pysql.Literal(self.rank))
def sql_get_objects(self) -> Query:
return SELECT_SQL.format(pysql.SQL(
"""WHERE placex.indexed_status > 0 and placex.rank_search = {}
and placex.class = 'boundary' and placex.type = 'administrative'
ORDER BY placex.partition, placex.admin_level
""").format(pysql.Literal(self.rank)))
class InterpolationRunner:
""" Returns SQL commands for indexing the address interpolation table
location_property_osmline.
"""
def __init__(self, analyzer: AbstractAnalyzer) -> None:
self.analyzer = analyzer
def name(self) -> str:
return "interpolation lines (location_property_osmline)"
def sql_count_objects(self) -> Query:
return """SELECT count(*) FROM location_property_osmline
WHERE indexed_status > 0"""
def sql_get_objects(self) -> Query:
return """SELECT place_id, get_interpolation_address(address, osm_id) as address
FROM location_property_osmline
WHERE indexed_status > 0
ORDER BY geometry_sector"""
def index_places_query(self, batch_size: int) -> Query:
return pysql.SQL("""UPDATE location_property_osmline
SET indexed_status = 0, address = v.addr, token_info = v.ti
FROM (VALUES {}) as v(id, addr, ti)
WHERE place_id = v.id
""").format(_mk_valuelist("(%s, %s::hstore, %s::jsonb)", batch_size))
def index_places_params(self, place: DictRow) -> Sequence[Any]:
return (place['place_id'], place['address'],
_analyze_place(place, self.analyzer))
class PostcodeRunner(Runner):
""" Provides the SQL commands for indexing the location_postcode table.
"""
def name(self) -> str:
return "postcodes (location_postcode)"
def sql_count_objects(self) -> Query:
return 'SELECT count(*) FROM location_postcode WHERE indexed_status > 0'
def sql_get_objects(self) -> Query:
return """SELECT place_id FROM location_postcode
WHERE indexed_status > 0
ORDER BY country_code, postcode"""
def index_places_query(self, batch_size: int) -> Query:
return pysql.SQL("""UPDATE location_postcode SET indexed_status = 0
WHERE place_id IN ({})""")\
.format(pysql.SQL(',').join((pysql.Placeholder() for _ in range(batch_size))))
def index_places_params(self, place: DictRow) -> Sequence[Any]:
return (place['place_id'], )
| 6,283
|
Python
|
.py
| 127
| 39.251969
| 98
| 0.609465
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,814
|
progress.py
|
osm-search_Nominatim/src/nominatim_db/indexer/progress.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helpers for progress logging.
"""
import logging
from datetime import datetime
LOG = logging.getLogger()
INITIAL_PROGRESS = 10
class ProgressLogger:
""" Tracks and prints progress for the indexing process.
`name` is the name of the indexing step being tracked.
`total` sets up the total number of items that need processing.
`log_interval` denotes the interval in seconds at which progress
should be reported.
"""
def __init__(self, name: str, total: int, log_interval: int = 1) -> None:
self.name = name
self.total_places = total
self.done_places = 0
self.rank_start_time = datetime.now()
self.log_interval = log_interval
self.next_info = INITIAL_PROGRESS if LOG.isEnabledFor(logging.WARNING) else total + 1
def add(self, num: int = 1) -> None:
""" Mark `num` places as processed. Print a log message if the
logging is at least info and the log interval has passed.
"""
self.done_places += num
if self.done_places < self.next_info:
return
now = datetime.now()
done_time = (now - self.rank_start_time).total_seconds()
if done_time < 2:
self.next_info = self.done_places + INITIAL_PROGRESS
return
places_per_sec = self.done_places / done_time
eta = (self.total_places - self.done_places) / places_per_sec
LOG.warning("Done %d in %d @ %.3f per second - %s ETA (seconds): %.2f",
self.done_places, int(done_time),
places_per_sec, self.name, eta)
self.next_info += int(places_per_sec) * self.log_interval
def done(self) -> int:
""" Print final statistics about the progress.
"""
rank_end_time = datetime.now()
if rank_end_time == self.rank_start_time:
diff_seconds = 0.0
places_per_sec = float(self.done_places)
else:
diff_seconds = (rank_end_time - self.rank_start_time).total_seconds()
places_per_sec = self.done_places / diff_seconds
LOG.warning("Done %d/%d in %d @ %.3f per second - FINISHED %s\n",
self.done_places, self.total_places, int(diff_seconds),
places_per_sec, self.name)
return self.done_places
| 2,555
|
Python
|
.py
| 59
| 34.932203
| 93
| 0.619911
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,815
|
icu_tokenizer.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/icu_tokenizer.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tokenizer implementing normalisation as used before Nominatim 4 but using
libICU instead of the PostgreSQL module.
"""
from typing import Optional, Sequence, List, Tuple, Mapping, Any, cast, \
Dict, Set, Iterable
import itertools
import logging
from pathlib import Path
from psycopg.types.json import Jsonb
from psycopg import sql as pysql
from ..db.connection import connect, Connection, Cursor, server_version_tuple,\
drop_tables, table_exists, execute_scalar
from ..config import Configuration
from ..db.sql_preprocessor import SQLPreprocessor
from ..data.place_info import PlaceInfo
from ..data.place_name import PlaceName
from .icu_rule_loader import ICURuleLoader
from .place_sanitizer import PlaceSanitizer
from .icu_token_analysis import ICUTokenAnalysis
from .base import AbstractAnalyzer, AbstractTokenizer
DBCFG_TERM_NORMALIZATION = "tokenizer_term_normalization"
LOG = logging.getLogger()
WORD_TYPES =(('country_names', 'C'),
('postcodes', 'P'),
('full_word', 'W'),
('housenumbers', 'H'))
def create(dsn: str, data_dir: Path) -> 'ICUTokenizer':
""" Create a new instance of the tokenizer provided by this module.
"""
return ICUTokenizer(dsn, data_dir)
class ICUTokenizer(AbstractTokenizer):
""" This tokenizer uses libICU to convert names and queries to ASCII.
Otherwise it uses the same algorithms and data structures as the
normalization routines in Nominatim 3.
"""
def __init__(self, dsn: str, data_dir: Path) -> None:
self.dsn = dsn
self.data_dir = data_dir
self.loader: Optional[ICURuleLoader] = None
def init_new_db(self, config: Configuration, init_db: bool = True) -> None:
""" Set up a new tokenizer for the database.
This copies all necessary data in the project directory to make
sure the tokenizer remains stable even over updates.
"""
self.loader = ICURuleLoader(config)
self._save_config()
if init_db:
self.update_sql_functions(config)
self._setup_db_tables(config)
self._create_base_indices(config, 'word')
def init_from_project(self, config: Configuration) -> None:
""" Initialise the tokenizer from the project directory.
"""
self.loader = ICURuleLoader(config)
with connect(self.dsn) as conn:
self.loader.load_config_from_db(conn)
def finalize_import(self, config: Configuration) -> None:
""" Do any required postprocessing to make the tokenizer data ready
for use.
"""
self._create_lookup_indices(config, 'word')
def update_sql_functions(self, config: Configuration) -> None:
""" Reimport the SQL functions for this tokenizer.
"""
with connect(self.dsn) as conn:
sqlp = SQLPreprocessor(conn, config)
sqlp.run_sql_file(conn, 'tokenizer/icu_tokenizer.sql')
def check_database(self, config: Configuration) -> None:
""" Check that the tokenizer is set up correctly.
"""
# Will throw an error if there is an issue.
self.init_from_project(config)
def update_statistics(self, config: Configuration, threads: int = 2) -> None:
""" Recompute frequencies for all name words.
"""
with connect(self.dsn) as conn:
if not table_exists(conn, 'search_name'):
return
with conn.cursor() as cur:
cur.execute('ANALYSE search_name')
if threads > 1:
cur.execute(pysql.SQL('SET max_parallel_workers_per_gather TO {}')
.format(pysql.Literal(min(threads, 6),)))
if server_version_tuple(conn) < (12, 0):
LOG.info('Computing word frequencies')
drop_tables(conn, 'word_frequencies', 'addressword_frequencies')
cur.execute("""CREATE TEMP TABLE word_frequencies AS
SELECT unnest(name_vector) as id, count(*)
FROM search_name GROUP BY id""")
cur.execute('CREATE INDEX ON word_frequencies(id)')
cur.execute("""CREATE TEMP TABLE addressword_frequencies AS
SELECT unnest(nameaddress_vector) as id, count(*)
FROM search_name GROUP BY id""")
cur.execute('CREATE INDEX ON addressword_frequencies(id)')
cur.execute("""CREATE OR REPLACE FUNCTION word_freq_update(wid INTEGER,
INOUT info JSONB)
AS $$
DECLARE rec RECORD;
BEGIN
IF info is null THEN
info = '{}'::jsonb;
END IF;
FOR rec IN SELECT count FROM word_frequencies WHERE id = wid
LOOP
info = info || jsonb_build_object('count', rec.count);
END LOOP;
FOR rec IN SELECT count FROM addressword_frequencies WHERE id = wid
LOOP
info = info || jsonb_build_object('addr_count', rec.count);
END LOOP;
IF info = '{}'::jsonb THEN
info = null;
END IF;
END;
$$ LANGUAGE plpgsql IMMUTABLE;
""")
LOG.info('Update word table with recomputed frequencies')
drop_tables(conn, 'tmp_word')
cur.execute("""CREATE TABLE tmp_word AS
SELECT word_id, word_token, type, word,
word_freq_update(word_id, info) as info
FROM word
""")
drop_tables(conn, 'word_frequencies', 'addressword_frequencies')
else:
LOG.info('Computing word frequencies')
drop_tables(conn, 'word_frequencies')
cur.execute("""
CREATE TEMP TABLE word_frequencies AS
WITH word_freq AS MATERIALIZED (
SELECT unnest(name_vector) as id, count(*)
FROM search_name GROUP BY id),
addr_freq AS MATERIALIZED (
SELECT unnest(nameaddress_vector) as id, count(*)
FROM search_name GROUP BY id)
SELECT coalesce(a.id, w.id) as id,
(CASE WHEN w.count is null THEN '{}'::JSONB
ELSE jsonb_build_object('count', w.count) END
||
CASE WHEN a.count is null THEN '{}'::JSONB
ELSE jsonb_build_object('addr_count', a.count) END) as info
FROM word_freq w FULL JOIN addr_freq a ON a.id = w.id;
""")
cur.execute('CREATE UNIQUE INDEX ON word_frequencies(id) INCLUDE(info)')
cur.execute('ANALYSE word_frequencies')
LOG.info('Update word table with recomputed frequencies')
drop_tables(conn, 'tmp_word')
cur.execute("""CREATE TABLE tmp_word AS
SELECT word_id, word_token, type, word,
(CASE WHEN wf.info is null THEN word.info
ELSE coalesce(word.info, '{}'::jsonb) || wf.info
END) as info
FROM word LEFT JOIN word_frequencies wf
ON word.word_id = wf.id
""")
drop_tables(conn, 'word_frequencies')
with conn.cursor() as cur:
cur.execute('SET max_parallel_workers_per_gather TO 0')
sqlp = SQLPreprocessor(conn, config)
sqlp.run_string(conn,
'GRANT SELECT ON tmp_word TO "{{config.DATABASE_WEBUSER}}"')
conn.commit()
self._create_base_indices(config, 'tmp_word')
self._create_lookup_indices(config, 'tmp_word')
self._move_temporary_word_table('tmp_word')
def _cleanup_housenumbers(self) -> None:
""" Remove unused house numbers.
"""
with connect(self.dsn) as conn:
if not table_exists(conn, 'search_name'):
return
with conn.cursor(name="hnr_counter") as cur:
cur.execute("""SELECT DISTINCT word_id, coalesce(info->>'lookup', word_token)
FROM word
WHERE type = 'H'
AND NOT EXISTS(SELECT * FROM search_name
WHERE ARRAY[word.word_id] && name_vector)
AND (char_length(coalesce(word, word_token)) > 6
OR coalesce(word, word_token) not similar to '\\d+')
""")
candidates = {token: wid for wid, token in cur}
with conn.cursor(name="hnr_counter") as cur:
cur.execute("""SELECT housenumber FROM placex
WHERE housenumber is not null
AND (char_length(housenumber) > 6
OR housenumber not similar to '\\d+')
""")
for row in cur:
for hnr in row[0].split(';'):
candidates.pop(hnr, None)
LOG.info("There are %s outdated housenumbers.", len(candidates))
LOG.debug("Outdated housenumbers: %s", candidates.keys())
if candidates:
with conn.cursor() as cur:
cur.execute("""DELETE FROM word WHERE word_id = any(%s)""",
(list(candidates.values()), ))
conn.commit()
def update_word_tokens(self) -> None:
""" Remove unused tokens.
"""
LOG.warning("Cleaning up housenumber tokens.")
self._cleanup_housenumbers()
LOG.warning("Tokenizer house-keeping done.")
def name_analyzer(self) -> 'ICUNameAnalyzer':
""" Create a new analyzer for tokenizing names and queries
using this tokinzer. Analyzers are context managers and should
be used accordingly:
```
with tokenizer.name_analyzer() as analyzer:
analyser.tokenize()
```
When used outside the with construct, the caller must ensure to
call the close() function before destructing the analyzer.
Analyzers are not thread-safe. You need to instantiate one per thread.
"""
assert self.loader is not None
return ICUNameAnalyzer(self.dsn, self.loader.make_sanitizer(),
self.loader.make_token_analysis())
def most_frequent_words(self, conn: Connection, num: int) -> List[str]:
""" Return a list of the `num` most frequent full words
in the database.
"""
with conn.cursor() as cur:
cur.execute("""SELECT word, sum((info->>'count')::int) as count
FROM word WHERE type = 'W'
GROUP BY word
ORDER BY count DESC LIMIT %s""", (num,))
return list(s[0].split('@')[0] for s in cur)
def _save_config(self) -> None:
""" Save the configuration that needs to remain stable for the given
database as database properties.
"""
assert self.loader is not None
with connect(self.dsn) as conn:
self.loader.save_config_to_db(conn)
def _setup_db_tables(self, config: Configuration) -> None:
""" Set up the word table and fill it with pre-computed word
frequencies.
"""
with connect(self.dsn) as conn:
drop_tables(conn, 'word')
sqlp = SQLPreprocessor(conn, config)
sqlp.run_string(conn, """
CREATE TABLE word (
word_id INTEGER,
word_token text NOT NULL,
type text NOT NULL,
word text,
info jsonb
) {{db.tablespace.search_data}};
GRANT SELECT ON word TO "{{config.DATABASE_WEBUSER}}";
DROP SEQUENCE IF EXISTS seq_word;
CREATE SEQUENCE seq_word start 1;
GRANT SELECT ON seq_word to "{{config.DATABASE_WEBUSER}}";
""")
conn.commit()
def _create_base_indices(self, config: Configuration, table_name: str) -> None:
""" Set up the word table and fill it with pre-computed word
frequencies.
"""
with connect(self.dsn) as conn:
sqlp = SQLPreprocessor(conn, config)
sqlp.run_string(conn,
"""CREATE INDEX idx_{{table_name}}_word_token ON {{table_name}}
USING BTREE (word_token) {{db.tablespace.search_index}}""",
table_name=table_name)
for name, ctype in WORD_TYPES:
sqlp.run_string(conn,
"""CREATE INDEX idx_{{table_name}}_{{idx_name}} ON {{table_name}}
USING BTREE (word) {{db.tablespace.address_index}}
WHERE type = '{{column_type}}'
""",
table_name=table_name, idx_name=name,
column_type=ctype)
conn.commit()
def _create_lookup_indices(self, config: Configuration, table_name: str) -> None:
""" Create additional indexes used when running the API.
"""
with connect(self.dsn) as conn:
sqlp = SQLPreprocessor(conn, config)
# Index required for details lookup.
sqlp.run_string(conn, """
CREATE INDEX IF NOT EXISTS idx_{{table_name}}_word_id
ON {{table_name}} USING BTREE (word_id) {{db.tablespace.search_index}}
""",
table_name=table_name)
conn.commit()
def _move_temporary_word_table(self, old: str) -> None:
""" Rename all tables and indexes used by the tokenizer.
"""
with connect(self.dsn) as conn:
drop_tables(conn, 'word')
with conn.cursor() as cur:
cur.execute(f"ALTER TABLE {old} RENAME TO word")
for idx in ('word_token', 'word_id'):
cur.execute(f"""ALTER INDEX idx_{old}_{idx}
RENAME TO idx_word_{idx}""")
for name, _ in WORD_TYPES:
cur.execute(f"""ALTER INDEX idx_{old}_{name}
RENAME TO idx_word_{name}""")
conn.commit()
class ICUNameAnalyzer(AbstractAnalyzer):
""" The ICU analyzer uses the ICU library for splitting names.
Each instance opens a connection to the database to request the
normalization.
"""
def __init__(self, dsn: str, sanitizer: PlaceSanitizer,
token_analysis: ICUTokenAnalysis) -> None:
self.conn: Optional[Connection] = connect(dsn)
self.conn.autocommit = True
self.sanitizer = sanitizer
self.token_analysis = token_analysis
self._cache = _TokenCache()
def close(self) -> None:
""" Free all resources used by the analyzer.
"""
if self.conn:
self.conn.close()
self.conn = None
def _search_normalized(self, name: str) -> str:
""" Return the search token transliteration of the given name.
"""
return cast(str, self.token_analysis.search.transliterate(name)).strip()
def _normalized(self, name: str) -> str:
""" Return the normalized version of the given name with all
non-relevant information removed.
"""
return cast(str, self.token_analysis.normalizer.transliterate(name)).strip()
def get_word_token_info(self, words: Sequence[str]) -> List[Tuple[str, str, int]]:
""" Return token information for the given list of words.
If a word starts with # it is assumed to be a full name
otherwise is a partial name.
The function returns a list of tuples with
(original word, word token, word id).
The function is used for testing and debugging only
and not necessarily efficient.
"""
assert self.conn is not None
full_tokens = {}
partial_tokens = {}
for word in words:
if word.startswith('#'):
full_tokens[word] = self._search_normalized(word[1:])
else:
partial_tokens[word] = self._search_normalized(word)
with self.conn.cursor() as cur:
cur.execute("""SELECT word_token, word_id
FROM word WHERE word_token = ANY(%s) and type = 'W'
""", (list(full_tokens.values()),))
full_ids = {r[0]: r[1] for r in cur}
cur.execute("""SELECT word_token, word_id
FROM word WHERE word_token = ANY(%s) and type = 'w'""",
(list(partial_tokens.values()),))
part_ids = {r[0]: r[1] for r in cur}
return [(k, v, full_ids.get(v, None)) for k, v in full_tokens.items()] \
+ [(k, v, part_ids.get(v, None)) for k, v in partial_tokens.items()]
def normalize_postcode(self, postcode: str) -> str:
""" Convert the postcode to a standardized form.
This function must yield exactly the same result as the SQL function
'token_normalized_postcode()'.
"""
return postcode.strip().upper()
def update_postcodes_from_db(self) -> None:
""" Update postcode tokens in the word table from the location_postcode
table.
"""
assert self.conn is not None
analyzer = self.token_analysis.analysis.get('@postcode')
with self.conn.cursor() as cur:
# First get all postcode names currently in the word table.
cur.execute("SELECT DISTINCT word FROM word WHERE type = 'P'")
word_entries = set((entry[0] for entry in cur))
# Then compute the required postcode names from the postcode table.
needed_entries = set()
cur.execute("SELECT country_code, postcode FROM location_postcode")
for cc, postcode in cur:
info = PlaceInfo({'country_code': cc,
'class': 'place', 'type': 'postcode',
'address': {'postcode': postcode}})
address = self.sanitizer.process_names(info)[1]
for place in address:
if place.kind == 'postcode':
if analyzer is None:
postcode_name = place.name.strip().upper()
variant_base = None
else:
postcode_name = analyzer.get_canonical_id(place)
variant_base = place.get_attr("variant")
if variant_base:
needed_entries.add(f'{postcode_name}@{variant_base}')
else:
needed_entries.add(postcode_name)
break
# Now update the word table.
self._delete_unused_postcode_words(word_entries - needed_entries)
self._add_missing_postcode_words(needed_entries - word_entries)
def _delete_unused_postcode_words(self, tokens: Iterable[str]) -> None:
assert self.conn is not None
if tokens:
with self.conn.cursor() as cur:
cur.execute("DELETE FROM word WHERE type = 'P' and word = any(%s)",
(list(tokens), ))
def _add_missing_postcode_words(self, tokens: Iterable[str]) -> None:
assert self.conn is not None
if not tokens:
return
analyzer = self.token_analysis.analysis.get('@postcode')
terms = []
for postcode_name in tokens:
if '@' in postcode_name:
term, variant = postcode_name.split('@', 2)
term = self._search_normalized(term)
if analyzer is None:
variants = [term]
else:
variants = analyzer.compute_variants(variant)
if term not in variants:
variants.append(term)
else:
variants = [self._search_normalized(postcode_name)]
terms.append((postcode_name, variants))
if terms:
with self.conn.cursor() as cur:
cur.executemany("""SELECT create_postcode_word(%s, %s)""", terms)
def update_special_phrases(self, phrases: Iterable[Tuple[str, str, str, str]],
should_replace: bool) -> None:
""" Replace the search index for special phrases with the new phrases.
If `should_replace` is True, then the previous set of will be
completely replaced. Otherwise the phrases are added to the
already existing ones.
"""
assert self.conn is not None
norm_phrases = set(((self._normalized(p[0]), p[1], p[2], p[3])
for p in phrases))
with self.conn.cursor() as cur:
# Get the old phrases.
existing_phrases = set()
cur.execute("SELECT word, info FROM word WHERE type = 'S'")
for word, info in cur:
existing_phrases.add((word, info['class'], info['type'],
info.get('op') or '-'))
added = self._add_special_phrases(cur, norm_phrases, existing_phrases)
if should_replace:
deleted = self._remove_special_phrases(cur, norm_phrases,
existing_phrases)
else:
deleted = 0
LOG.info("Total phrases: %s. Added: %s. Deleted: %s",
len(norm_phrases), added, deleted)
def _add_special_phrases(self, cursor: Cursor,
new_phrases: Set[Tuple[str, str, str, str]],
existing_phrases: Set[Tuple[str, str, str, str]]) -> int:
""" Add all phrases to the database that are not yet there.
"""
to_add = new_phrases - existing_phrases
added = 0
with cursor.copy('COPY word(word_token, type, word, info) FROM STDIN') as copy:
for word, cls, typ, oper in to_add:
term = self._search_normalized(word)
if term:
copy.write_row((term, 'S', word,
Jsonb({'class': cls, 'type': typ,
'op': oper if oper in ('in', 'near') else None})))
added += 1
return added
def _remove_special_phrases(self, cursor: Cursor,
new_phrases: Set[Tuple[str, str, str, str]],
existing_phrases: Set[Tuple[str, str, str, str]]) -> int:
""" Remove all phrases from the database that are no longer in the
new phrase list.
"""
to_delete = existing_phrases - new_phrases
if to_delete:
cursor.executemany(
""" DELETE FROM word
WHERE type = 'S' and word = %s
and info->>'class' = %s and info->>'type' = %s
and %s = coalesce(info->>'op', '-')
""", to_delete)
return len(to_delete)
def add_country_names(self, country_code: str, names: Mapping[str, str]) -> None:
""" Add default names for the given country to the search index.
"""
# Make sure any name preprocessing for country names applies.
info = PlaceInfo({'name': names, 'country_code': country_code,
'rank_address': 4, 'class': 'boundary',
'type': 'administrative'})
self._add_country_full_names(country_code,
self.sanitizer.process_names(info)[0],
internal=True)
def _add_country_full_names(self, country_code: str, names: Sequence[PlaceName],
internal: bool = False) -> None:
""" Add names for the given country from an already sanitized
name list.
"""
assert self.conn is not None
word_tokens = set()
for name in names:
norm_name = self._search_normalized(name.name)
if norm_name:
word_tokens.add(norm_name)
with self.conn.cursor() as cur:
# Get existing names
cur.execute("""SELECT word_token, coalesce(info ? 'internal', false) as is_internal
FROM word
WHERE type = 'C' and word = %s""",
(country_code, ))
# internal/external names
existing_tokens: Dict[bool, Set[str]] = {True: set(), False: set()}
for word in cur:
existing_tokens[word[1]].add(word[0])
# Delete names that no longer exist.
gone_tokens = existing_tokens[internal] - word_tokens
if internal:
gone_tokens.update(existing_tokens[False] & word_tokens)
if gone_tokens:
cur.execute("""DELETE FROM word
USING unnest(%s::text[]) as token
WHERE type = 'C' and word = %s
and word_token = token""",
(list(gone_tokens), country_code))
# Only add those names that are not yet in the list.
new_tokens = word_tokens - existing_tokens[True]
if not internal:
new_tokens -= existing_tokens[False]
if new_tokens:
if internal:
sql = """INSERT INTO word (word_token, type, word, info)
(SELECT token, 'C', %s, '{"internal": "yes"}'
FROM unnest(%s::text[]) as token)
"""
else:
sql = """INSERT INTO word (word_token, type, word)
(SELECT token, 'C', %s
FROM unnest(%s::text[]) as token)
"""
cur.execute(sql, (country_code, list(new_tokens)))
def process_place(self, place: PlaceInfo) -> Mapping[str, Any]:
""" Determine tokenizer information about the given place.
Returns a JSON-serializable structure that will be handed into
the database via the token_info field.
"""
token_info = _TokenInfo()
names, address = self.sanitizer.process_names(place)
if names:
token_info.set_names(*self._compute_name_tokens(names))
if place.is_country():
assert place.country_code is not None
self._add_country_full_names(place.country_code, names)
if address:
self._process_place_address(token_info, address)
return token_info.to_dict()
def _process_place_address(self, token_info: '_TokenInfo',
address: Sequence[PlaceName]) -> None:
for item in address:
if item.kind == 'postcode':
token_info.set_postcode(self._add_postcode(item))
elif item.kind == 'housenumber':
token_info.add_housenumber(*self._compute_housenumber_token(item))
elif item.kind == 'street':
token_info.add_street(self._retrieve_full_tokens(item.name))
elif item.kind == 'place':
if not item.suffix:
token_info.add_place(itertools.chain(*self._compute_name_tokens([item])))
elif not item.kind.startswith('_') and not item.suffix and \
item.kind not in ('country', 'full', 'inclusion'):
token_info.add_address_term(item.kind,
itertools.chain(*self._compute_name_tokens([item])))
def _compute_housenumber_token(self, hnr: PlaceName) -> Tuple[Optional[int], Optional[str]]:
""" Normalize the housenumber and return the word token and the
canonical form.
"""
assert self.conn is not None
analyzer = self.token_analysis.analysis.get('@housenumber')
result: Tuple[Optional[int], Optional[str]] = (None, None)
if analyzer is None:
# When no custom analyzer is set, simply normalize and transliterate
norm_name = self._search_normalized(hnr.name)
if norm_name:
result = self._cache.housenumbers.get(norm_name, result)
if result[0] is None:
hid = execute_scalar(self.conn, "SELECT getorcreate_hnr_id(%s)", (norm_name, ))
result = hid, norm_name
self._cache.housenumbers[norm_name] = result
else:
# Otherwise use the analyzer to determine the canonical name.
# Per convention we use the first variant as the 'lookup name', the
# name that gets saved in the housenumber field of the place.
word_id = analyzer.get_canonical_id(hnr)
if word_id:
result = self._cache.housenumbers.get(word_id, result)
if result[0] is None:
variants = analyzer.compute_variants(word_id)
if variants:
hid = execute_scalar(self.conn, "SELECT create_analyzed_hnr_id(%s, %s)",
(word_id, list(variants)))
result = hid, variants[0]
self._cache.housenumbers[word_id] = result
return result
def _retrieve_full_tokens(self, name: str) -> List[int]:
""" Get the full name token for the given name, if it exists.
The name is only retrieved for the standard analyser.
"""
assert self.conn is not None
norm_name = self._search_normalized(name)
# return cached if possible
if norm_name in self._cache.fulls:
return self._cache.fulls[norm_name]
with self.conn.cursor() as cur:
cur.execute("SELECT word_id FROM word WHERE word_token = %s and type = 'W'",
(norm_name, ))
full = [row[0] for row in cur]
self._cache.fulls[norm_name] = full
return full
def _compute_name_tokens(self, names: Sequence[PlaceName]) -> Tuple[Set[int], Set[int]]:
""" Computes the full name and partial name tokens for the given
dictionary of names.
"""
assert self.conn is not None
full_tokens: Set[int] = set()
partial_tokens: Set[int] = set()
for name in names:
analyzer_id = name.get_attr('analyzer')
analyzer = self.token_analysis.get_analyzer(analyzer_id)
word_id = analyzer.get_canonical_id(name)
if analyzer_id is None:
token_id = word_id
else:
token_id = f'{word_id}@{analyzer_id}'
full, part = self._cache.names.get(token_id, (None, None))
if full is None:
variants = analyzer.compute_variants(word_id)
if not variants:
continue
with self.conn.cursor() as cur:
cur.execute("SELECT * FROM getorcreate_full_word(%s, %s)",
(token_id, variants))
full, part = cast(Tuple[int, List[int]], cur.fetchone())
self._cache.names[token_id] = (full, part)
assert part is not None
full_tokens.add(full)
partial_tokens.update(part)
return full_tokens, partial_tokens
def _add_postcode(self, item: PlaceName) -> Optional[str]:
""" Make sure the normalized postcode is present in the word table.
"""
assert self.conn is not None
analyzer = self.token_analysis.analysis.get('@postcode')
if analyzer is None:
postcode_name = item.name.strip().upper()
variant_base = None
else:
postcode_name = analyzer.get_canonical_id(item)
variant_base = item.get_attr("variant")
if variant_base:
postcode = f'{postcode_name}@{variant_base}'
else:
postcode = postcode_name
if postcode not in self._cache.postcodes:
term = self._search_normalized(postcode_name)
if not term:
return None
variants = {term}
if analyzer is not None and variant_base:
variants.update(analyzer.compute_variants(variant_base))
with self.conn.cursor() as cur:
cur.execute("SELECT create_postcode_word(%s, %s)",
(postcode, list(variants)))
self._cache.postcodes.add(postcode)
return postcode_name
class _TokenInfo:
""" Collect token information to be sent back to the database.
"""
def __init__(self) -> None:
self.names: Optional[str] = None
self.housenumbers: Set[str] = set()
self.housenumber_tokens: Set[int] = set()
self.street_tokens: Optional[Set[int]] = None
self.place_tokens: Set[int] = set()
self.address_tokens: Dict[str, str] = {}
self.postcode: Optional[str] = None
def _mk_array(self, tokens: Iterable[Any]) -> str:
return f"{{{','.join((str(s) for s in tokens))}}}"
def to_dict(self) -> Dict[str, Any]:
""" Return the token information in database importable format.
"""
out: Dict[str, Any] = {}
if self.names:
out['names'] = self.names
if self.housenumbers:
out['hnr'] = ';'.join(self.housenumbers)
out['hnr_tokens'] = self._mk_array(self.housenumber_tokens)
if self.street_tokens is not None:
out['street'] = self._mk_array(self.street_tokens)
if self.place_tokens:
out['place'] = self._mk_array(self.place_tokens)
if self.address_tokens:
out['addr'] = self.address_tokens
if self.postcode:
out['postcode'] = self.postcode
return out
def set_names(self, fulls: Iterable[int], partials: Iterable[int]) -> None:
""" Adds token information for the normalised names.
"""
self.names = self._mk_array(itertools.chain(fulls, partials))
def add_housenumber(self, token: Optional[int], hnr: Optional[str]) -> None:
""" Extract housenumber information from a list of normalised
housenumbers.
"""
if token:
assert hnr is not None
self.housenumbers.add(hnr)
self.housenumber_tokens.add(token)
def add_street(self, tokens: Iterable[int]) -> None:
""" Add addr:street match terms.
"""
if self.street_tokens is None:
self.street_tokens = set()
self.street_tokens.update(tokens)
def add_place(self, tokens: Iterable[int]) -> None:
""" Add addr:place search and match terms.
"""
self.place_tokens.update(tokens)
def add_address_term(self, key: str, partials: Iterable[int]) -> None:
""" Add additional address terms.
"""
array = self._mk_array(partials)
if len(array) > 2:
self.address_tokens[key] = array
def set_postcode(self, postcode: Optional[str]) -> None:
""" Set the postcode to the given one.
"""
self.postcode = postcode
class _TokenCache:
""" Cache for token information to avoid repeated database queries.
This cache is not thread-safe and needs to be instantiated per
analyzer.
"""
def __init__(self) -> None:
self.names: Dict[str, Tuple[int, List[int]]] = {}
self.partials: Dict[str, int] = {}
self.fulls: Dict[str, List[int]] = {}
self.postcodes: Set[str] = set()
self.housenumbers: Dict[str, Tuple[Optional[int], Optional[str]]] = {}
| 37,951
|
Python
|
.py
| 751
| 34.83755
| 102
| 0.53064
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,816
|
place_sanitizer.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/place_sanitizer.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Handler for cleaning name and address tags in place information before it
is handed to the token analysis.
"""
from typing import Optional, List, Mapping, Sequence, Callable, Any, Tuple
from ..errors import UsageError
from ..config import Configuration
from .sanitizers.config import SanitizerConfig
from .sanitizers.base import SanitizerHandler, ProcessInfo
from ..data.place_name import PlaceName
from ..data.place_info import PlaceInfo
class PlaceSanitizer:
""" Controller class which applies sanitizer functions on the place
names and address before they are used by the token analysers.
"""
def __init__(self, rules: Optional[Sequence[Mapping[str, Any]]],
config: Configuration) -> None:
self.handlers: List[Callable[[ProcessInfo], None]] = []
if rules:
for func in rules:
if 'step' not in func:
raise UsageError("Sanitizer rule is missing the 'step' attribute.")
if not isinstance(func['step'], str):
raise UsageError("'step' attribute must be a simple string.")
module: SanitizerHandler = \
config.load_plugin_module(func['step'], 'nominatim_db.tokenizer.sanitizers')
self.handlers.append(module.create(SanitizerConfig(func)))
def process_names(self, place: PlaceInfo) -> Tuple[List[PlaceName], List[PlaceName]]:
""" Extract a sanitized list of names and address parts from the
given place. The function returns a tuple
(list of names, list of address names)
"""
obj = ProcessInfo(place)
for func in self.handlers:
func(obj)
return obj.names, obj.address
| 1,957
|
Python
|
.py
| 42
| 38.928571
| 96
| 0.674895
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,817
|
factory.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/factory.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Functions for creating a tokenizer or initialising the right one for an
existing database.
A tokenizer is something that is bound to the lifetime of a database. It
can be chosen and configured before the initial import but then needs to
be used consistently when querying and updating the database.
This module provides the functions to create and configure a new tokenizer
as well as instantiating the appropriate tokenizer for updating an existing
database.
"""
from typing import Optional
import logging
import importlib
from pathlib import Path
from ..errors import UsageError
from ..db import properties
from ..db.connection import connect
from ..config import Configuration
from ..tokenizer.base import AbstractTokenizer, TokenizerModule
LOG = logging.getLogger()
def _import_tokenizer(name: str) -> TokenizerModule:
""" Load the tokenizer.py module from project directory.
"""
src_file = Path(__file__).parent / (name + '_tokenizer.py')
if not src_file.is_file():
LOG.fatal("No tokenizer named '%s' available. "
"Check the setting of NOMINATIM_TOKENIZER.", name)
raise UsageError('Tokenizer not found')
return importlib.import_module('nominatim_db.tokenizer.' + name + '_tokenizer')
def create_tokenizer(config: Configuration, init_db: bool = True,
module_name: Optional[str] = None) -> AbstractTokenizer:
""" Create a new tokenizer as defined by the given configuration.
The tokenizer data and code is copied into the 'tokenizer' directory
of the project directory and the tokenizer loaded from its new location.
"""
if module_name is None:
module_name = config.TOKENIZER
# Create the directory for the tokenizer data
assert config.project_dir is not None
basedir = config.project_dir / 'tokenizer'
if not basedir.exists():
basedir.mkdir()
elif not basedir.is_dir():
LOG.fatal("Tokenizer directory '%s' cannot be created.", basedir)
raise UsageError("Tokenizer setup failed.")
# Import and initialize the tokenizer.
tokenizer_module = _import_tokenizer(module_name)
tokenizer = tokenizer_module.create(config.get_libpq_dsn(), basedir)
tokenizer.init_new_db(config, init_db=init_db)
with connect(config.get_libpq_dsn()) as conn:
properties.set_property(conn, 'tokenizer', module_name)
return tokenizer
def get_tokenizer_for_db(config: Configuration) -> AbstractTokenizer:
""" Instantiate a tokenizer for an existing database.
The function looks up the appropriate tokenizer in the database
and initialises it.
"""
assert config.project_dir is not None
basedir = config.project_dir / 'tokenizer'
if not basedir.is_dir():
# Directory will be repopulated by tokenizer below.
basedir.mkdir()
with connect(config.get_libpq_dsn()) as conn:
name = properties.get_property(conn, 'tokenizer')
if name is None:
LOG.fatal("Tokenizer was not set up properly. Database property missing.")
raise UsageError('Cannot initialize tokenizer.')
tokenizer_module = _import_tokenizer(name)
tokenizer = tokenizer_module.create(config.get_libpq_dsn(), basedir)
tokenizer.init_from_project(config)
return tokenizer
| 3,521
|
Python
|
.py
| 77
| 40.714286
| 83
| 0.730567
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,818
|
icu_rule_loader.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/icu_rule_loader.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Helper class to create ICU rules from a configuration file.
"""
from typing import Mapping, Any, Dict, Optional
import io
import json
import logging
from icu import Transliterator
from ..config import flatten_config_list, Configuration
from ..db.properties import set_property, get_property
from ..db.connection import Connection
from ..errors import UsageError
from .place_sanitizer import PlaceSanitizer
from .icu_token_analysis import ICUTokenAnalysis
from .token_analysis.base import AnalysisModule, Analyzer
from ..data import country_info
LOG = logging.getLogger()
DBCFG_IMPORT_NORM_RULES = "tokenizer_import_normalisation"
DBCFG_IMPORT_TRANS_RULES = "tokenizer_import_transliteration"
DBCFG_IMPORT_ANALYSIS_RULES = "tokenizer_import_analysis_rules"
def _get_section(rules: Mapping[str, Any], section: str) -> Any:
""" Get the section named 'section' from the rules. If the section does
not exist, raise a usage error with a meaningful message.
"""
if section not in rules:
LOG.fatal("Section '%s' not found in tokenizer config.", section)
raise UsageError("Syntax error in tokenizer configuration file.")
return rules[section]
class ICURuleLoader:
""" Compiler for ICU rules from a tokenizer configuration file.
"""
def __init__(self, config: Configuration) -> None:
self.config = config
rules = config.load_sub_configuration('icu_tokenizer.yaml',
config='TOKENIZER_CONFIG')
# Make sure country information is available to analyzers and sanitizers.
country_info.setup_country_config(config)
self.normalization_rules = self._cfg_to_icu_rules(rules, 'normalization')
self.transliteration_rules = self._cfg_to_icu_rules(rules, 'transliteration')
self.analysis_rules = _get_section(rules, 'token-analysis')
self._setup_analysis()
# Load optional sanitizer rule set.
self.sanitizer_rules = rules.get('sanitizers', [])
def load_config_from_db(self, conn: Connection) -> None:
""" Get previously saved parts of the configuration from the
database.
"""
rules = get_property(conn, DBCFG_IMPORT_NORM_RULES)
if rules is not None:
self.normalization_rules = rules
rules = get_property(conn, DBCFG_IMPORT_TRANS_RULES)
if rules is not None:
self.transliteration_rules = rules
rules = get_property(conn, DBCFG_IMPORT_ANALYSIS_RULES)
if rules:
self.analysis_rules = json.loads(rules)
else:
self.analysis_rules = []
self._setup_analysis()
def save_config_to_db(self, conn: Connection) -> None:
""" Save the part of the configuration that cannot be changed into
the database.
"""
set_property(conn, DBCFG_IMPORT_NORM_RULES, self.normalization_rules)
set_property(conn, DBCFG_IMPORT_TRANS_RULES, self.transliteration_rules)
set_property(conn, DBCFG_IMPORT_ANALYSIS_RULES, json.dumps(self.analysis_rules))
def make_sanitizer(self) -> PlaceSanitizer:
""" Create a place sanitizer from the configured rules.
"""
return PlaceSanitizer(self.sanitizer_rules, self.config)
def make_token_analysis(self) -> ICUTokenAnalysis:
""" Create a token analyser from the reviouly loaded rules.
"""
return ICUTokenAnalysis(self.normalization_rules,
self.transliteration_rules, self.analysis)
def get_search_rules(self) -> str:
""" Return the ICU rules to be used during search.
The rules combine normalization and transliteration.
"""
# First apply the normalization rules.
rules = io.StringIO()
rules.write(self.normalization_rules)
# Then add transliteration.
rules.write(self.transliteration_rules)
return rules.getvalue()
def get_normalization_rules(self) -> str:
""" Return rules for normalisation of a term.
"""
return self.normalization_rules
def get_transliteration_rules(self) -> str:
""" Return the rules for converting a string into its asciii representation.
"""
return self.transliteration_rules
def _setup_analysis(self) -> None:
""" Process the rules used for creating the various token analyzers.
"""
self.analysis: Dict[Optional[str], TokenAnalyzerRule] = {}
if not isinstance(self.analysis_rules, list):
raise UsageError("Configuration section 'token-analysis' must be a list.")
norm = Transliterator.createFromRules("rule_loader_normalization",
self.normalization_rules)
trans = Transliterator.createFromRules("rule_loader_transliteration",
self.transliteration_rules)
for section in self.analysis_rules:
name = section.get('id', None)
if name in self.analysis:
if name is None:
LOG.fatal("ICU tokenizer configuration has two default token analyzers.")
else:
LOG.fatal("ICU tokenizer configuration has two token "
"analyzers with id '%s'.", name)
raise UsageError("Syntax error in ICU tokenizer config.")
self.analysis[name] = TokenAnalyzerRule(section, norm, trans,
self.config)
@staticmethod
def _cfg_to_icu_rules(rules: Mapping[str, Any], section: str) -> str:
""" Load an ICU ruleset from the given section. If the section is a
simple string, it is interpreted as a file name and the rules are
loaded verbatim from the given file. The filename is expected to be
relative to the tokenizer rule file. If the section is a list then
each line is assumed to be a rule. All rules are concatenated and returned.
"""
content = _get_section(rules, section)
if content is None:
return ''
return ';'.join(flatten_config_list(content, section)) + ';'
class TokenAnalyzerRule:
""" Factory for a single analysis module. The class saves the configuration
and creates a new token analyzer on request.
"""
def __init__(self, rules: Mapping[str, Any],
normalizer: Any, transliterator: Any,
config: Configuration) -> None:
analyzer_name = _get_section(rules, 'analyzer')
if not analyzer_name or not isinstance(analyzer_name, str):
raise UsageError("'analyzer' parameter needs to be simple string")
self._analysis_mod: AnalysisModule = \
config.load_plugin_module(analyzer_name, 'nominatim_db.tokenizer.token_analysis')
self.config = self._analysis_mod.configure(rules, normalizer,
transliterator)
def create(self, normalizer: Any, transliterator: Any) -> Analyzer:
""" Create a new analyser instance for the given rule.
"""
return self._analysis_mod.create(normalizer, transliterator, self.config)
| 7,503
|
Python
|
.py
| 150
| 40.253333
| 93
| 0.65362
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,819
|
base.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/base.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Abstract class definitions for tokenizers. These base classes are here
mainly for documentation purposes.
"""
from abc import ABC, abstractmethod
from typing import List, Tuple, Dict, Any, Optional, Iterable
from pathlib import Path
from ..typing import Protocol
from ..config import Configuration
from ..db.connection import Connection
from ..data.place_info import PlaceInfo
class AbstractAnalyzer(ABC):
""" The analyzer provides the functions for analysing names and building
the token database.
Analyzers are instantiated on a per-thread base. Access to global data
structures must be synchronised accordingly.
"""
def __enter__(self) -> 'AbstractAnalyzer':
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.close()
@abstractmethod
def close(self) -> None:
""" Free all resources used by the analyzer.
"""
@abstractmethod
def get_word_token_info(self, words: List[str]) -> List[Tuple[str, str, int]]:
""" Return token information for the given list of words.
The function is used for testing and debugging only
and does not need to be particularly efficient.
Arguments:
words: A list of words to look up the tokens for.
If a word starts with # it is assumed to be a full name
otherwise is a partial term.
Returns:
The function returns the list of all tuples that could be
found for the given words. Each list entry is a tuple of
(original word, word token, word id).
"""
@abstractmethod
def normalize_postcode(self, postcode: str) -> str:
""" Convert the postcode to its standardized form.
This function must yield exactly the same result as the SQL function
`token_normalized_postcode()`.
Arguments:
postcode: The postcode to be normalized.
Returns:
The given postcode after normalization.
"""
@abstractmethod
def update_postcodes_from_db(self) -> None:
""" Update the tokenizer's postcode tokens from the current content
of the `location_postcode` table.
"""
@abstractmethod
def update_special_phrases(self,
phrases: Iterable[Tuple[str, str, str, str]],
should_replace: bool) -> None:
""" Update the tokenizer's special phrase tokens from the given
list of special phrases.
Arguments:
phrases: The new list of special phrases. Each entry is
a tuple of (phrase, class, type, operator).
should_replace: If true, replace the current list of phrases.
When false, just add the given phrases to the
ones that already exist.
"""
@abstractmethod
def add_country_names(self, country_code: str, names: Dict[str, str]) -> None:
""" Add the given names to the tokenizer's list of country tokens.
Arguments:
country_code: two-letter country code for the country the names
refer to.
names: Dictionary of name type to name.
"""
@abstractmethod
def process_place(self, place: PlaceInfo) -> Any:
""" Extract tokens for the given place and compute the
information to be handed to the PL/pgSQL processor for building
the search index.
Arguments:
place: Place information retrieved from the database.
Returns:
A JSON-serialisable structure that will be handed into
the database via the `token_info` field.
"""
class AbstractTokenizer(ABC):
""" The tokenizer instance is the central instance of the tokenizer in
the system. There will only be a single instance of the tokenizer
active at any time.
"""
@abstractmethod
def init_new_db(self, config: Configuration, init_db: bool = True) -> None:
""" Set up a new tokenizer for the database.
The function should copy all necessary data into the project
directory or save it in the property table to make sure that
the tokenizer remains stable over updates.
Arguments:
config: Read-only object with configuration options.
init_db: When set to False, then initialisation of database
tables should be skipped. This option is only required for
migration purposes and can be safely ignored by custom
tokenizers.
"""
@abstractmethod
def init_from_project(self, config: Configuration) -> None:
""" Initialise the tokenizer from an existing database setup.
The function should load all previously saved configuration from
the project directory and/or the property table.
Arguments:
config: Read-only object with configuration options.
"""
@abstractmethod
def finalize_import(self, config: Configuration) -> None:
""" This function is called at the very end of an import when all
data has been imported and indexed. The tokenizer may create
at this point any additional indexes and data structures needed
during query time.
Arguments:
config: Read-only object with configuration options.
"""
@abstractmethod
def update_sql_functions(self, config: Configuration) -> None:
""" Update the SQL part of the tokenizer. This function is called
automatically on migrations or may be called explicitly by the
user through the `nominatim refresh --functions` command.
The tokenizer must only update the code of the tokenizer. The
data structures or data itself must not be changed by this function.
Arguments:
config: Read-only object with configuration options.
"""
@abstractmethod
def check_database(self, config: Configuration) -> Optional[str]:
""" Check that the database is set up correctly and ready for being
queried.
Arguments:
config: Read-only object with configuration options.
Returns:
If an issue was found, return an error message with the
description of the issue as well as hints for the user on
how to resolve the issue. If everything is okay, return `None`.
"""
@abstractmethod
def update_statistics(self, config: Configuration, threads: int = 1) -> None:
""" Recompute any tokenizer statistics necessary for efficient lookup.
This function is meant to be called from time to time by the user
to improve performance. However, the tokenizer must not depend on
it to be called in order to work.
"""
@abstractmethod
def update_word_tokens(self) -> None:
""" Do house-keeping on the tokenizers internal data structures.
Remove unused word tokens, resort data etc.
"""
@abstractmethod
def name_analyzer(self) -> AbstractAnalyzer:
""" Create a new analyzer for tokenizing names and queries
using this tokinzer. Analyzers are context managers and should
be used accordingly:
```
with tokenizer.name_analyzer() as analyzer:
analyser.tokenize()
```
When used outside the with construct, the caller must ensure to
call the close() function before destructing the analyzer.
"""
@abstractmethod
def most_frequent_words(self, conn: Connection, num: int) -> List[str]:
""" Return a list of the most frequent full words in the database.
Arguments:
conn: Open connection to the database which may be used to
retrieve the words.
num: Maximum number of words to return.
"""
class TokenizerModule(Protocol):
""" Interface that must be exported by modules that implement their
own tokenizer.
"""
def create(self, dsn: str, data_dir: Path) -> AbstractTokenizer:
""" Factory for new tokenizers.
"""
| 8,789
|
Python
|
.py
| 187
| 36.251337
| 82
| 0.63156
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,820
|
icu_token_analysis.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/icu_token_analysis.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Container class collecting all components required to transform an OSM name
into a Nominatim token.
"""
from typing import Mapping, Optional, TYPE_CHECKING
from icu import Transliterator
from .token_analysis.base import Analyzer
if TYPE_CHECKING:
from typing import Any
from .icu_rule_loader import TokenAnalyzerRule # pylint: disable=cyclic-import
class ICUTokenAnalysis:
""" Container class collecting the transliterators and token analysis
modules for a single Analyser instance.
"""
def __init__(self, norm_rules: str, trans_rules: str,
analysis_rules: Mapping[Optional[str], 'TokenAnalyzerRule']):
self.normalizer = Transliterator.createFromRules("icu_normalization",
norm_rules)
trans_rules += ";[:Space:]+ > ' '"
self.to_ascii = Transliterator.createFromRules("icu_to_ascii",
trans_rules)
self.search = Transliterator.createFromRules("icu_search",
norm_rules + trans_rules)
self.analysis = {name: arules.create(self.normalizer, self.to_ascii)
for name, arules in analysis_rules.items()}
def get_analyzer(self, name: Optional[str]) -> Analyzer:
""" Return the given named analyzer. If no analyzer with that
name exists, return the default analyzer.
"""
return self.analysis.get(name) or self.analysis[None]
| 1,750
|
Python
|
.py
| 36
| 38.666667
| 82
| 0.643234
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,821
|
tag_japanese.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/sanitizers/tag_japanese.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
This sanitizer maps OSM data to Japanese block addresses.
It replaces blocknumber and housenumber with housenumber,
and quarter and neighbourhood with place.
"""
from typing import Callable
from typing import List, Optional
from .base import ProcessInfo
from .config import SanitizerConfig
from ...data.place_name import PlaceName
def create(_: SanitizerConfig) -> Callable[[ProcessInfo], None]:
"""Set up the sanitizer
"""
return tag_japanese
def reconbine_housenumber(
new_address: List[PlaceName],
tmp_housenumber: Optional[str],
tmp_blocknumber: Optional[str]
) -> List[PlaceName]:
""" Recombine the tag of housenumber by using housenumber and blocknumber
"""
if tmp_blocknumber and tmp_housenumber:
new_address.append(
PlaceName(
kind='housenumber',
name=f'{tmp_blocknumber}-{tmp_housenumber}',
suffix=''
)
)
elif tmp_blocknumber:
new_address.append(
PlaceName(
kind='housenumber',
name=tmp_blocknumber,
suffix=''
)
)
elif tmp_housenumber:
new_address.append(
PlaceName(
kind='housenumber',
name=tmp_housenumber,
suffix=''
)
)
return new_address
def reconbine_place(
new_address: List[PlaceName],
tmp_neighbourhood: Optional[str],
tmp_quarter: Optional[str]
) -> List[PlaceName]:
""" Recombine the tag of place by using neighbourhood and quarter
"""
if tmp_neighbourhood and tmp_quarter:
new_address.append(
PlaceName(
kind='place',
name=f'{tmp_quarter}{tmp_neighbourhood}',
suffix=''
)
)
elif tmp_neighbourhood:
new_address.append(
PlaceName(
kind='place',
name=tmp_neighbourhood,
suffix=''
)
)
elif tmp_quarter:
new_address.append(
PlaceName(
kind='place',
name=tmp_quarter,
suffix=''
)
)
return new_address
def tag_japanese(obj: ProcessInfo) -> None:
"""Recombine kind of address
"""
if obj.place.country_code != 'jp':
return
tmp_housenumber = None
tmp_blocknumber = None
tmp_neighbourhood = None
tmp_quarter = None
new_address = []
for item in obj.address:
if item.kind == 'housenumber':
tmp_housenumber = item.name
elif item.kind == 'block_number':
tmp_blocknumber = item.name
elif item.kind == 'neighbourhood':
tmp_neighbourhood = item.name
elif item.kind == 'quarter':
tmp_quarter = item.name
else:
new_address.append(item)
new_address = reconbine_housenumber(new_address, tmp_housenumber, tmp_blocknumber)
new_address = reconbine_place(new_address, tmp_neighbourhood, tmp_quarter)
obj.address = [item for item in new_address if item.name is not None]
| 3,356
|
Python
|
.py
| 108
| 22.87963
| 86
| 0.60142
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,822
|
tag_analyzer_by_language.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/sanitizers/tag_analyzer_by_language.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
This sanitizer sets the `analyzer` property depending on the
language of the tag. The language is taken from the suffix of the name.
If a name already has an analyzer tagged, then this is kept.
Arguments:
filter-kind: Restrict the names the sanitizer should be applied to
the given tags. The parameter expects a list of
regular expressions which are matched against 'kind'.
Note that a match against the full string is expected.
whitelist: Restrict the set of languages that should be tagged.
Expects a list of acceptable suffixes. When unset,
all 2- and 3-letter lower-case codes are accepted.
use-defaults: Configure what happens when the name has no suffix.
When set to 'all', a variant is created for
each of the default languages in the country
the feature is in. When set to 'mono', a variant is
only created, when exactly one language is spoken
in the country. The default is to do nothing with
the default languages of a country.
mode: Define how the variants are created and may be 'replace' or
'append'. When set to 'append' the original name (without
any analyzer tagged) is retained. (default: replace)
"""
from typing import Callable, Dict, Optional, List
from ...data import country_info
from .base import ProcessInfo
from .config import SanitizerConfig
class _AnalyzerByLanguage:
""" Processor for tagging the language of names in a place.
"""
def __init__(self, config: SanitizerConfig) -> None:
self.filter_kind = config.get_filter('filter-kind')
self.replace = config.get('mode', 'replace') != 'append'
self.whitelist = config.get('whitelist')
self._compute_default_languages(config.get('use-defaults', 'no'))
def _compute_default_languages(self, use_defaults: str) -> None:
self.deflangs: Dict[Optional[str], List[str]] = {}
if use_defaults in ('mono', 'all'):
for ccode, clangs in country_info.iterate('languages'):
if len(clangs) == 1 or use_defaults == 'all':
if self.whitelist:
self.deflangs[ccode] = [l for l in clangs if l in self.whitelist]
else:
self.deflangs[ccode] = clangs
def _suffix_matches(self, suffix: str) -> bool:
if self.whitelist is None:
return len(suffix) in (2, 3) and suffix.islower()
return suffix in self.whitelist
def __call__(self, obj: ProcessInfo) -> None:
if not obj.names:
return
more_names = []
for name in (n for n in obj.names
if not n.has_attr('analyzer') and self.filter_kind(n.kind)):
if name.suffix:
langs = [name.suffix] if self._suffix_matches(name.suffix) else None
else:
langs = self.deflangs.get(obj.place.country_code)
if langs:
if self.replace:
name.set_attr('analyzer', langs[0])
else:
more_names.append(name.clone(attr={'analyzer': langs[0]}))
more_names.extend(name.clone(attr={'analyzer': l}) for l in langs[1:])
obj.names.extend(more_names)
def create(config: SanitizerConfig) -> Callable[[ProcessInfo], None]:
""" Create a function that sets the analyzer property depending on the
language of the tag.
"""
return _AnalyzerByLanguage(config)
| 3,839
|
Python
|
.py
| 76
| 40.157895
| 89
| 0.626471
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,823
|
config.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/sanitizers/config.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Configuration for Sanitizers.
"""
from typing import Sequence, Union, Optional, Pattern, Callable, Any, TYPE_CHECKING
from collections import UserDict
import re
from ...errors import UsageError
# working around missing generics in Python < 3.8
# See https://github.com/python/typing/issues/60#issuecomment-869757075
if TYPE_CHECKING:
_BaseUserDict = UserDict[str, Any]
else:
_BaseUserDict = UserDict
class SanitizerConfig(_BaseUserDict):
""" The `SanitizerConfig` class is a read-only dictionary
with configuration options for the sanitizer.
In addition to the usual dictionary functions, the class provides
accessors to standard sanitizer options that are used by many of the
sanitizers.
"""
def get_string_list(self, param: str, default: Sequence[str] = tuple()) -> Sequence[str]:
""" Extract a configuration parameter as a string list.
Arguments:
param: Name of the configuration parameter.
default: Takes a tuple or list of strings which will
be returned if the parameter is missing in the
sanitizer configuration.
Note that if this default parameter is not
provided then an empty list is returned.
Returns:
If the parameter value is a simple string, it is returned as a
one-item list. If the parameter value does not exist, the given
default is returned. If the parameter value is a list, it is
checked to contain only strings before being returned.
"""
values = self.data.get(param, None)
if values is None:
return list(default)
if isinstance(values, str):
return [values] if values else []
if not isinstance(values, (list, tuple)):
raise UsageError(f"Parameter '{param}' must be string or list of strings.")
if any(not isinstance(value, str) for value in values):
raise UsageError(f"Parameter '{param}' must be string or list of strings.")
return values
def get_bool(self, param: str, default: Optional[bool] = None) -> bool:
""" Extract a configuration parameter as a boolean.
Arguments:
param: Name of the configuration parameter. The parameter must
contain one of the yaml boolean values or an
UsageError will be raised.
default: Value to return, when the parameter is missing.
When set to `None`, the parameter must be defined.
Returns:
Boolean value of the given parameter.
"""
value = self.data.get(param, default)
if not isinstance(value, bool):
raise UsageError(f"Parameter '{param}' must be a boolean value ('yes' or 'no').")
return value
def get_delimiter(self, default: str = ',;') -> Pattern[str]:
""" Return the 'delimiters' parameter in the configuration as a
compiled regular expression that can be used to split strings on
these delimiters.
Arguments:
default: Delimiters to be used when 'delimiters' parameter
is not explicitly configured.
Returns:
A regular expression pattern which can be used to
split a string. The regular expression makes sure that the
resulting names are stripped and that repeated delimiters
are ignored. It may still create empty fields on occasion. The
code needs to filter those.
"""
delimiter_set = set(self.data.get('delimiters', default))
if not delimiter_set:
raise UsageError("Empty 'delimiter' parameter not allowed for sanitizer.")
return re.compile('\\s*[{}]+\\s*'.format(''.join('\\' + d for d in delimiter_set)))
def get_filter(self, param: str, default: Union[str, Sequence[str]] = 'PASS_ALL'
) -> Callable[[str], bool]:
""" Returns a filter function for the given parameter of the sanitizer
configuration.
The value provided for the parameter in sanitizer configuration
should be a string or list of strings, where each string is a regular
expression. These regular expressions will later be used by the
filter function to filter strings.
Arguments:
param: The parameter for which the filter function
will be created.
default: Defines the behaviour of filter function if
parameter is missing in the sanitizer configuration.
Takes a string(PASS_ALL or FAIL_ALL) or a list of strings.
Any other value of string or an empty list is not allowed,
and will raise a ValueError. If the value is PASS_ALL, the filter
function will let all strings to pass, if the value is FAIL_ALL,
filter function will let no strings to pass.
If value provided is a list of strings each string
is treated as a regular expression. In this case these regular
expressions will be used by the filter function.
By default allow filter function to let all strings pass.
Returns:
A filter function that takes a target string as the argument and
returns True if it fully matches any of the regular expressions
otherwise returns False.
"""
filters = self.get_string_list(param) or default
if filters == 'PASS_ALL':
return lambda _: True
if filters == 'FAIL_ALL':
return lambda _: False
if filters and isinstance(filters, (list, tuple)):
regexes = [re.compile(regex) for regex in filters]
return lambda target: any(regex.fullmatch(target) for regex in regexes)
raise ValueError("Default parameter must be a non-empty list or a string value \
('PASS_ALL' or 'FAIL_ALL').")
| 6,579
|
Python
|
.py
| 121
| 41.272727
| 93
| 0.612943
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,824
|
strip_brace_terms.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/sanitizers/strip_brace_terms.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
This sanitizer creates additional name variants for names that have
addendums in brackets (e.g. "Halle (Saale)"). The additional variant contains
only the main name part with the bracket part removed.
"""
from typing import Callable
from .base import ProcessInfo
from .config import SanitizerConfig
def create(_: SanitizerConfig) -> Callable[[ProcessInfo], None]:
""" Create a name processing function that creates additional name variants
for bracket addendums.
"""
def _process(obj: ProcessInfo) -> None:
""" Add variants for names that have a bracket extension.
"""
if obj.names:
new_names = []
for name in (n for n in obj.names if '(' in n.name):
new_name = name.name.split('(')[0].strip()
if new_name:
new_names.append(name.clone(name=new_name))
obj.names.extend(new_names)
return _process
| 1,151
|
Python
|
.py
| 29
| 33.827586
| 79
| 0.671441
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,825
|
clean_postcodes.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/sanitizers/clean_postcodes.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Sanitizer that filters postcodes by their officially allowed pattern.
Arguments:
convert-to-address: If set to 'yes' (the default), then postcodes that do
not conform with their country-specific pattern are
converted to an address component. That means that
the postcode does not take part when computing the
postcode centroids of a country but is still searchable.
When set to 'no', non-conforming postcodes are not
searchable either.
default-pattern: Pattern to use, when there is none available for the
country in question. Warning: will not be used for
objects that have no country assigned. These are always
assumed to have no postcode.
"""
from typing import Callable, Optional, Tuple
from ...data.postcode_format import PostcodeFormatter
from .base import ProcessInfo
from .config import SanitizerConfig
class _PostcodeSanitizer:
def __init__(self, config: SanitizerConfig) -> None:
self.convert_to_address = config.get_bool('convert-to-address', True)
self.matcher = PostcodeFormatter()
default_pattern = config.get('default-pattern')
if default_pattern is not None and isinstance(default_pattern, str):
self.matcher.set_default_pattern(default_pattern)
def __call__(self, obj: ProcessInfo) -> None:
if not obj.address:
return
postcodes = ((i, o) for i, o in enumerate(obj.address) if o.kind == 'postcode')
for pos, postcode in postcodes:
formatted = self.scan(postcode.name, obj.place.country_code)
if formatted is None:
if self.convert_to_address:
postcode.kind = 'unofficial_postcode'
else:
obj.address.pop(pos)
else:
postcode.name = formatted[0]
postcode.set_attr('variant', formatted[1])
def scan(self, postcode: str, country: Optional[str]) -> Optional[Tuple[str, str]]:
""" Check the postcode for correct formatting and return the
normalized version. Returns None if the postcode does not
correspond to the official format of the given country.
"""
match = self.matcher.match(country, postcode)
if match is None:
return None
assert country is not None
return self.matcher.normalize(country, match),\
' '.join(filter(lambda p: p is not None, match.groups()))
def create(config: SanitizerConfig) -> Callable[[ProcessInfo], None]:
""" Create a function that filters postcodes by their officially allowed pattern.
"""
return _PostcodeSanitizer(config)
| 3,067
|
Python
|
.py
| 61
| 39.672131
| 87
| 0.640107
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,826
|
delete_tags.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/sanitizers/delete_tags.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Sanitizer which prevents certain tags from getting into the search index.
It remove tags which matches all properties given below.
Arguments:
type: Define which type of tags should be considered for removal.
There are two types of tags 'name' and 'address' tags.
Takes a string 'name' or 'address'. (default: 'name')
filter-kind: Define which 'kind' of tags should be removed.
Takes a string or list of strings where each
string is a regular expression. A tag is considered
to be a candidate for removal if its 'kind' property
fully matches any of the given regular expressions.
Note that by default all 'kind' of tags are considered.
suffix: Define the 'suffix' property of the tags which should be
removed. Takes a string or list of strings where each
string is a regular expression. A tag is considered to be a
candidate for removal if its 'suffix' property fully
matches any of the given regular expressions. Note that by
default tags with any suffix value are considered including
those which don't have a suffix at all.
name: Define the 'name' property corresponding to the 'kind' property
of the tag. Takes a string or list of strings where each string
is a regular expression. A tag is considered to be a candidate
for removal if its name fully matches any of the given regular
expressions. Note that by default tags with any 'name' are
considered.
country_code: Define the country code of places whose tags should be
considered for removed. Takes a string or list of strings
where each string is a two-letter lower-case country code.
Note that by default tags of places with any country code
are considered including those which don't have a country
code at all.
rank_address: Define the address rank of places whose tags should be
considered for removal. Takes a string or list of strings
where each string is a number or range of number or the
form <from>-<to>.
Note that default is '0-30', which means that tags of all
places are considered.
See https://nominatim.org/release-docs/latest/customize/Ranking/#address-rank
to learn more about address rank.
"""
from typing import Callable, List, Tuple, Sequence
from ...data.place_name import PlaceName
from .base import ProcessInfo
from .config import SanitizerConfig
class _TagSanitizer:
def __init__(self, config: SanitizerConfig) -> None:
self.type = config.get('type', 'name')
self.filter_kind = config.get_filter('filter-kind')
self.country_codes = config.get_string_list('country_code', [])
self.filter_suffix = config.get_filter('suffix')
self.filter_name = config.get_filter('name')
self.allowed_ranks = self._set_allowed_ranks(
config.get_string_list("rank_address", ["0-30"])
)
self.has_country_code = config.get('country_code', None) is not None
def __call__(self, obj: ProcessInfo) -> None:
tags = obj.names if self.type == 'name' else obj.address
if not tags \
or not self.allowed_ranks[obj.place.rank_address] \
or self.has_country_code \
and obj.place.country_code not in self.country_codes:
return
filtered_tags: List[PlaceName] = []
for tag in tags:
if not self.filter_kind(tag.kind) \
or not self.filter_suffix(tag.suffix or '') \
or not self.filter_name(tag.name):
filtered_tags.append(tag)
if self.type == 'name':
obj.names = filtered_tags
else:
obj.address = filtered_tags
def _set_allowed_ranks(self, ranks: Sequence[str]) -> Tuple[bool, ...]:
""" Returns a tuple of 31 boolean values corresponding to the
address ranks 0-30. Value at index 'i' is True if rank 'i'
is present in the ranks or lies in the range of any of the
ranks provided in the sanitizer configuration, otherwise
the value is False.
"""
allowed_ranks = [False] * 31
for rank in ranks:
intvl = [int(x) for x in rank.split('-')]
start, end = intvl[0], intvl[0] if len(intvl) == 1 else intvl[1]
for i in range(start, end + 1):
allowed_ranks[i] = True
return tuple(allowed_ranks)
def create(config: SanitizerConfig) -> Callable[[ProcessInfo], None]:
""" Create a function to process removal of certain tags.
"""
return _TagSanitizer(config)
| 5,243
|
Python
|
.py
| 97
| 42.319588
| 96
| 0.626475
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,827
|
split_name_list.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/sanitizers/split_name_list.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Sanitizer that splits lists of names into their components.
Arguments:
delimiters: Define the set of characters to be used for
splitting the list. (default: ',;')
"""
from typing import Callable
from .base import ProcessInfo
from .config import SanitizerConfig
def create(config: SanitizerConfig) -> Callable[[ProcessInfo], None]:
""" Create a name processing function that splits name values with
multiple values into their components.
"""
regexp = config.get_delimiter()
def _process(obj: ProcessInfo) -> None:
if not obj.names:
return
new_names = []
for name in obj.names:
split_names = regexp.split(name.name)
if len(split_names) == 1:
new_names.append(name)
else:
new_names.extend(name.clone(name=n) for n in split_names if n)
obj.names = new_names
return _process
| 1,152
|
Python
|
.py
| 32
| 29.78125
| 78
| 0.663971
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,828
|
base.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/sanitizers/base.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Common data types and protocols for sanitizers.
"""
from typing import Optional, List, Mapping, Callable
from ...typing import Protocol, Final
from ...data.place_info import PlaceInfo
from ...data.place_name import PlaceName
from .config import SanitizerConfig
class ProcessInfo:
""" Container class for information handed into to handler functions.
The 'names' and 'address' members are mutable. A handler must change
them by either modifying the lists place or replacing the old content
with a new list.
"""
def __init__(self, place: PlaceInfo):
self.place: Final = place
self.names = self._convert_name_dict(place.name)
self.address = self._convert_name_dict(place.address)
@staticmethod
def _convert_name_dict(names: Optional[Mapping[str, str]]) -> List[PlaceName]:
""" Convert a dictionary of names into a list of PlaceNames.
The dictionary key is split into the primary part of the key
and the suffix (the part after an optional colon).
"""
out = []
if names:
for key, value in names.items():
parts = key.split(':', 1)
out.append(PlaceName(value.strip(),
parts[0].strip(),
parts[1].strip() if len(parts) > 1 else None))
return out
class SanitizerHandler(Protocol):
""" Protocol for sanitizer modules.
"""
def create(self, config: SanitizerConfig) -> Callable[[ProcessInfo], None]:
"""
Create a function for sanitizing a place.
Arguments:
config: A dictionary with the additional configuration options
specified in the tokenizer configuration
Return:
The result must be a callable that takes a place description
and transforms name and address as required.
"""
| 2,152
|
Python
|
.py
| 51
| 33.921569
| 83
| 0.644636
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,829
|
clean_tiger_tags.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/sanitizers/clean_tiger_tags.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Sanitizer that preprocesses tags from the TIGER import.
It makes the following changes:
* remove state reference from tiger:county
"""
from typing import Callable
import re
from .base import ProcessInfo
from .config import SanitizerConfig
COUNTY_MATCH = re.compile('(.*), [A-Z][A-Z]')
def _clean_tiger_county(obj: ProcessInfo) -> None:
""" Remove the state reference from tiger:county tags.
This transforms a name like 'Hamilton, AL' into 'Hamilton'.
If no state reference is detected at the end, the name is left as is.
"""
if not obj.address:
return
for item in obj.address:
if item.kind == 'tiger' and item.suffix == 'county':
m = COUNTY_MATCH.fullmatch(item.name)
if m:
item.name = m[1]
# Switch kind and suffix, the split left them reversed.
item.kind = 'county'
item.suffix = 'tiger'
return
def create(_: SanitizerConfig) -> Callable[[ProcessInfo], None]:
""" Create a function that preprocesses tags from the TIGER import.
"""
return _clean_tiger_county
| 1,334
|
Python
|
.py
| 36
| 31.666667
| 77
| 0.673137
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,830
|
clean_housenumbers.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/sanitizers/clean_housenumbers.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Sanitizer that preprocesses address tags for house numbers. The sanitizer
allows to
* define which tags are to be considered house numbers (see 'filter-kind')
* split house number lists into individual numbers (see 'delimiters')
Arguments:
delimiters: Define the set of characters to be used for
splitting a list of house numbers into parts. (default: ',;')
filter-kind: Define the address tags that are considered to be a
house number. Either takes a single string or a list of strings,
where each string is a regular expression. An address item
is considered a house number if the 'kind' fully matches any
of the given regular expressions. (default: 'housenumber')
convert-to-name: Define house numbers that should be treated as a name
instead of a house number. Either takes a single string
or a list of strings, where each string is a regular
expression that must match the full house number value.
"""
from typing import Callable, Iterator, List
from ...data.place_name import PlaceName
from .base import ProcessInfo
from .config import SanitizerConfig
class _HousenumberSanitizer:
def __init__(self, config: SanitizerConfig) -> None:
self.filter_kind = config.get_filter('filter-kind', ['housenumber'])
self.split_regexp = config.get_delimiter()
self.filter_name = config.get_filter('convert-to-name', 'FAIL_ALL')
def __call__(self, obj: ProcessInfo) -> None:
if not obj.address:
return
new_address: List[PlaceName] = []
for item in obj.address:
if self.filter_kind(item.kind):
if self.filter_name(item.name):
obj.names.append(item.clone(kind='housenumber'))
else:
new_address.extend(item.clone(kind='housenumber', name=n)
for n in self.sanitize(item.name))
else:
# Don't touch other address items.
new_address.append(item)
obj.address = new_address
def sanitize(self, value: str) -> Iterator[str]:
""" Extract housenumbers in a regularized format from an OSM value.
The function works as a generator that yields all valid housenumbers
that can be created from the value.
"""
for hnr in self.split_regexp.split(value):
if hnr:
yield from self._regularize(hnr)
def _regularize(self, hnr: str) -> Iterator[str]:
yield hnr
def create(config: SanitizerConfig) -> Callable[[ProcessInfo], None]:
""" Create a housenumber processing function.
"""
return _HousenumberSanitizer(config)
| 3,022
|
Python
|
.py
| 62
| 39.290323
| 81
| 0.648878
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,831
|
generic_mutation.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/token_analysis/generic_mutation.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Creator for mutation variants for the generic token analysis.
"""
from typing import Sequence, Iterable, Iterator, Tuple
import itertools
import logging
import re
from ...errors import UsageError
LOG = logging.getLogger()
def _zigzag(outer: Iterable[str], inner: Iterable[str]) -> Iterator[str]:
return itertools.chain.from_iterable(itertools.zip_longest(outer, inner, fillvalue=''))
class MutationVariantGenerator:
""" Generates name variants by applying a regular expression to the name
and replacing it with one or more variants. When the regular expression
matches more than once, each occurrence is replaced with all replacement
patterns.
"""
def __init__(self, pattern: str, replacements: Sequence[str]):
self.pattern = re.compile(pattern)
self.replacements = replacements
if self.pattern.groups > 0:
LOG.fatal("The mutation pattern %s contains a capturing group. "
"This is not allowed.", pattern)
raise UsageError("Bad mutation pattern in configuration.")
def generate(self, names: Iterable[str]) -> Iterator[str]:
""" Generator function for the name variants. 'names' is an iterable
over a set of names for which the variants are to be generated.
"""
for name in names:
parts = self.pattern.split(name)
if len(parts) == 1:
yield name
else:
for seps in self._fillers(len(parts)):
yield ''.join(_zigzag(parts, seps))
def _fillers(self, num_parts: int) -> Iterator[Tuple[str, ...]]:
""" Returns a generator for strings to join the given number of string
parts in all possible combinations.
"""
return itertools.product(self.replacements, repeat=num_parts - 1)
| 2,070
|
Python
|
.py
| 46
| 37.717391
| 91
| 0.669151
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,832
|
config_variants.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/token_analysis/config_variants.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Parser for configuration for variants.
"""
from typing import Any, Iterator, Tuple, List, Optional, Set, NamedTuple
from collections import defaultdict
import itertools
import re
from ...config import flatten_config_list
from ...errors import UsageError
class ICUVariant(NamedTuple):
""" A single replacement rule for variant creation.
"""
source: str
replacement: str
def get_variant_config(in_rules: Any,
normalizer: Any) -> Tuple[List[Tuple[str, List[str]]], str]:
""" Convert the variant definition from the configuration into
replacement sets.
Returns a tuple containing the replacement set and the list of characters
used in the replacements.
"""
immediate = defaultdict(list)
chars: Set[str] = set()
if in_rules:
vset: Set[ICUVariant] = set()
rules = flatten_config_list(in_rules, 'variants')
vmaker = _VariantMaker(normalizer)
for section in rules:
for rule in (section.get('words') or []):
vset.update(vmaker.compute(rule))
# Intermediate reorder by source. Also compute required character set.
for variant in vset:
if variant.source[-1] == ' ' and variant.replacement[-1] == ' ':
replstr = variant.replacement[:-1]
else:
replstr = variant.replacement
immediate[variant.source].append(replstr)
chars.update(variant.source)
return list(immediate.items()), ''.join(chars)
class _VariantMaker:
""" Generator for all necessary ICUVariants from a single variant rule.
All text in rules is normalized to make sure the variants match later.
"""
def __init__(self, normalizer: Any) -> None:
self.norm = normalizer
def compute(self, rule: Any) -> Iterator[ICUVariant]:
""" Generator for all ICUVariant tuples from a single variant rule.
"""
parts = re.split(r'(\|)?([=-])>', rule)
if len(parts) != 4:
raise UsageError(f"Syntax error in variant rule: {rule}")
decompose = parts[1] is None
src_terms = [self._parse_variant_word(t) for t in parts[0].split(',')]
repl_terms = (self.norm.transliterate(t).strip() for t in parts[3].split(','))
# If the source should be kept, add a 1:1 replacement
if parts[2] == '-':
for src in src_terms:
if src:
for froms, tos in _create_variants(*src, src[0], decompose):
yield ICUVariant(froms, tos)
for src, repl in itertools.product(src_terms, repl_terms):
if src and repl:
for froms, tos in _create_variants(*src, repl, decompose):
yield ICUVariant(froms, tos)
def _parse_variant_word(self, name: str) -> Optional[Tuple[str, str, str]]:
name = name.strip()
match = re.fullmatch(r'([~^]?)([^~$^]*)([~$]?)', name)
if match is None or (match.group(1) == '~' and match.group(3) == '~'):
raise UsageError(f"Invalid variant word descriptor '{name}'")
norm_name = self.norm.transliterate(match.group(2)).strip()
if not norm_name:
return None
return norm_name, match.group(1), match.group(3)
_FLAG_MATCH = {'^': '^ ',
'$': ' ^',
'': ' '}
def _create_variants(src: str, preflag: str, postflag: str,
repl: str, decompose: bool) -> Iterator[Tuple[str, str]]:
if preflag == '~':
postfix = _FLAG_MATCH[postflag]
# suffix decomposition
src = src + postfix
repl = repl + postfix
yield src, repl
yield ' ' + src, ' ' + repl
if decompose:
yield src, ' ' + repl
yield ' ' + src, repl
elif postflag == '~':
# prefix decomposition
prefix = _FLAG_MATCH[preflag]
src = prefix + src
repl = prefix + repl
yield src, repl
yield src + ' ', repl + ' '
if decompose:
yield src, repl + ' '
yield src + ' ', repl
else:
prefix = _FLAG_MATCH[preflag]
postfix = _FLAG_MATCH[postflag]
yield prefix + src + postfix, prefix + repl + postfix
| 4,496
|
Python
|
.py
| 108
| 32.842593
| 86
| 0.591921
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,833
|
housenumbers.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/token_analysis/housenumbers.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Specialized processor for housenumbers. Analyses common housenumber patterns
and creates variants for them.
"""
from typing import Any, List, cast
import re
from ...data.place_name import PlaceName
from .generic_mutation import MutationVariantGenerator
RE_NON_DIGIT = re.compile('[^0-9]')
RE_DIGIT_ALPHA = re.compile(r'(\d)\s*([^\d\s␣])')
RE_ALPHA_DIGIT = re.compile(r'([^\s\d␣])\s*(\d)')
RE_NAMED_PART = re.compile(r'[a-z]{4}')
### Configuration section
def configure(*_: Any) -> None:
""" All behaviour is currently hard-coded.
"""
return None
### Analysis section
def create(normalizer: Any, transliterator: Any, config: None) -> 'HousenumberTokenAnalysis': # pylint: disable=W0613
""" Create a new token analysis instance for this module.
"""
return HousenumberTokenAnalysis(normalizer, transliterator)
class HousenumberTokenAnalysis:
""" Detects common housenumber patterns and normalizes them.
"""
def __init__(self, norm: Any, trans: Any) -> None:
self.norm = norm
self.trans = trans
self.mutator = MutationVariantGenerator('␣', (' ', ''))
def get_canonical_id(self, name: PlaceName) -> str:
""" Return the normalized form of the housenumber.
"""
# shortcut for number-only numbers, which make up 90% of the data.
if RE_NON_DIGIT.search(name.name) is None:
return name.name
norm = cast(str, self.trans.transliterate(self.norm.transliterate(name.name)))
# If there is a significant non-numeric part, use as is.
if RE_NAMED_PART.search(norm) is None:
# Otherwise add optional spaces between digits and letters.
(norm_opt, cnt1) = RE_DIGIT_ALPHA.subn(r'\1␣\2', norm)
(norm_opt, cnt2) = RE_ALPHA_DIGIT.subn(r'\1␣\2', norm_opt)
# Avoid creating too many variants per number.
if cnt1 + cnt2 <= 4:
return norm_opt
return norm
def compute_variants(self, norm_name: str) -> List[str]:
""" Compute the spelling variants for the given normalized housenumber.
Generates variants for optional spaces (marked with '␣').
"""
return list(self.mutator.generate([norm_name]))
| 2,462
|
Python
|
.py
| 56
| 37.928571
| 117
| 0.667647
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,834
|
postcodes.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/token_analysis/postcodes.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Specialized processor for postcodes. Supports a 'lookup' variant of the
token, which produces variants with optional spaces.
"""
from typing import Any, List
from ...data.place_name import PlaceName
from .generic_mutation import MutationVariantGenerator
### Configuration section
def configure(*_: Any) -> None:
""" All behaviour is currently hard-coded.
"""
return None
### Analysis section
def create(normalizer: Any, transliterator: Any, config: None) -> 'PostcodeTokenAnalysis': # pylint: disable=W0613
""" Create a new token analysis instance for this module.
"""
return PostcodeTokenAnalysis(normalizer, transliterator)
class PostcodeTokenAnalysis:
""" Special normalization and variant generation for postcodes.
This analyser must not be used with anything but postcodes as
it follows some special rules: the canonial ID is the form that
is used for the output. `compute_variants` then needs to ensure that
the generated variants once more follow the standard normalization
and transliteration, so that postcodes are correctly recognised by
the search algorithm.
"""
def __init__(self, norm: Any, trans: Any) -> None:
self.norm = norm
self.trans = trans
self.mutator = MutationVariantGenerator(' ', (' ', ''))
def get_canonical_id(self, name: PlaceName) -> str:
""" Return the standard form of the postcode.
"""
return name.name.strip().upper()
def compute_variants(self, norm_name: str) -> List[str]:
""" Compute the spelling variants for the given normalized postcode.
Takes the canonical form of the postcode, normalizes it using the
standard rules and then creates variants of the result where
all spaces are optional.
"""
# Postcodes follow their own transliteration rules.
# Make sure at this point, that the terms are normalized in a way
# that they are searchable with the standard transliteration rules.
return [self.trans.transliterate(term) for term in
self.mutator.generate([self.norm.transliterate(norm_name)]) if term]
| 2,409
|
Python
|
.py
| 51
| 41.254902
| 114
| 0.706911
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,835
|
generic.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/token_analysis/generic.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Generic processor for names that creates abbreviation variants.
"""
from typing import Mapping, Dict, Any, Iterable, Iterator, Optional, List, cast
import itertools
import datrie
from ...errors import UsageError
from ...data.place_name import PlaceName
from .config_variants import get_variant_config
from .generic_mutation import MutationVariantGenerator
### Configuration section
def configure(rules: Mapping[str, Any], normalizer: Any, _: Any) -> Dict[str, Any]:
""" Extract and preprocess the configuration for this module.
"""
config: Dict[str, Any] = {}
config['replacements'], config['chars'] = get_variant_config(rules.get('variants'),
normalizer)
config['variant_only'] = rules.get('mode', '') == 'variant-only'
# parse mutation rules
config['mutations'] = []
for rule in rules.get('mutations', []):
if 'pattern' not in rule:
raise UsageError("Missing field 'pattern' in mutation configuration.")
if not isinstance(rule['pattern'], str):
raise UsageError("Field 'pattern' in mutation configuration "
"must be a simple text field.")
if 'replacements' not in rule:
raise UsageError("Missing field 'replacements' in mutation configuration.")
if not isinstance(rule['replacements'], list):
raise UsageError("Field 'replacements' in mutation configuration "
"must be a list of texts.")
config['mutations'].append((rule['pattern'], rule['replacements']))
return config
### Analysis section
def create(normalizer: Any, transliterator: Any,
config: Mapping[str, Any]) -> 'GenericTokenAnalysis':
""" Create a new token analysis instance for this module.
"""
return GenericTokenAnalysis(normalizer, transliterator, config)
class GenericTokenAnalysis:
""" Collects the different transformation rules for normalisation of names
and provides the functions to apply the transformations.
"""
def __init__(self, norm: Any, to_ascii: Any, config: Mapping[str, Any]) -> None:
self.norm = norm
self.to_ascii = to_ascii
self.variant_only = config['variant_only']
# Set up datrie
if config['replacements']:
self.replacements = datrie.Trie(config['chars'])
for src, repllist in config['replacements']:
self.replacements[src] = repllist
else:
self.replacements = None
# set up mutation rules
self.mutations = [MutationVariantGenerator(*cfg) for cfg in config['mutations']]
def get_canonical_id(self, name: PlaceName) -> str:
""" Return the normalized form of the name. This is the standard form
from which possible variants for the name can be derived.
"""
return cast(str, self.norm.transliterate(name.name)).strip()
def compute_variants(self, norm_name: str) -> List[str]:
""" Compute the spelling variants for the given normalized name
and transliterate the result.
"""
variants = self._generate_word_variants(norm_name)
for mutation in self.mutations:
variants = mutation.generate(variants)
return [name for name in self._transliterate_unique_list(norm_name, variants) if name]
def _transliterate_unique_list(self, norm_name: str,
iterable: Iterable[str]) -> Iterator[Optional[str]]:
seen = set()
if self.variant_only:
seen.add(norm_name)
for variant in map(str.strip, iterable):
if variant not in seen:
seen.add(variant)
yield self.to_ascii.transliterate(variant).strip()
def _generate_word_variants(self, norm_name: str) -> Iterable[str]:
baseform = '^ ' + norm_name + ' ^'
baselen = len(baseform)
partials = ['']
startpos = 0
if self.replacements is not None:
pos = 0
force_space = False
while pos < baselen:
full, repl = self.replacements.longest_prefix_item(baseform[pos:],
(None, None))
if full is not None:
done = baseform[startpos:pos]
partials = [v + done + r
for v, r in itertools.product(partials, repl)
if not force_space or r.startswith(' ')]
if len(partials) > 128:
# If too many variants are produced, they are unlikely
# to be helpful. Only use the original term.
startpos = 0
break
startpos = pos + len(full)
if full[-1] == ' ':
startpos -= 1
force_space = True
pos = startpos
else:
pos += 1
force_space = False
# No variants detected? Fast return.
if startpos == 0:
return (norm_name, )
if startpos < baselen:
return (part[1:] + baseform[startpos:-1] for part in partials)
return (part[1:-1] for part in partials)
| 5,620
|
Python
|
.py
| 119
| 35.445378
| 94
| 0.587934
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,836
|
base.py
|
osm-search_Nominatim/src/nominatim_db/tokenizer/token_analysis/base.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Common data types and protocols for analysers.
"""
from typing import Mapping, List, Any
from ...typing import Protocol
from ...data.place_name import PlaceName
class Analyzer(Protocol):
""" The `create()` function of an analysis module needs to return an
object that implements the following functions.
"""
def get_canonical_id(self, name: PlaceName) -> str:
""" Return the canonical form of the given name. The canonical ID must
be unique (the same ID must always yield the same variants) and
must be a form from which the variants can be derived.
Arguments:
name: Extended place name description as prepared by
the sanitizers.
Returns:
ID string with a canonical form of the name. The string may
be empty, when the analyzer cannot analyze the name at all,
for example because the character set in use does not match.
"""
def compute_variants(self, canonical_id: str) -> List[str]:
""" Compute the transliterated spelling variants for the given
canonical ID.
Arguments:
canonical_id: ID string previously computed with
`get_canonical_id()`.
Returns:
A list of possible spelling variants. All strings must have
been transformed with the global normalizer and
transliterator ICU rules. Otherwise they cannot be matched
against the input by the query frontend.
The list may be empty, when there are no useful
spelling variants. This may happen when an analyzer only
usually outputs additional variants to the canonical spelling
and there are no such variants.
"""
class AnalysisModule(Protocol):
""" The setup of the token analysis is split into two parts:
configuration and analyser factory. A token analysis module must
therefore implement the two functions here described.
"""
def configure(self, rules: Mapping[str, Any],
normalizer: Any, transliterator: Any) -> Any:
""" Prepare the configuration of the analysis module.
This function should prepare all data that can be shared
between instances of this analyser.
Arguments:
rules: A dictionary with the additional configuration options
as specified in the tokenizer configuration.
normalizer: an ICU Transliterator with the compiled
global normalization rules.
transliterator: an ICU Transliterator with the compiled
global transliteration rules.
Returns:
A data object with configuration data. This will be handed
as is into the `create()` function and may be
used freely by the analysis module as needed.
"""
def create(self, normalizer: Any, transliterator: Any, config: Any) -> Analyzer:
""" Create a new instance of the analyser.
A separate instance of the analyser is created for each thread
when used in multi-threading context.
Arguments:
normalizer: an ICU Transliterator with the compiled normalization
rules.
transliterator: an ICU Transliterator with the compiled
transliteration rules.
config: The object that was returned by the call to configure().
Returns:
A new analyzer instance. This must be an object that implements
the Analyzer protocol.
"""
| 4,095
|
Python
|
.py
| 80
| 38.175
| 84
| 0.617654
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,837
|
paths.py
|
osm-search_Nominatim/packaging/nominatim-db/extra_src/nominatim_db/paths.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Path settings for extra data used by Nominatim.
"""
from pathlib import Path
DATA_DIR = (Path(__file__) / '..' / 'resources').resolve()
SQLLIB_DIR = (DATA_DIR / 'lib-sql')
CONFIG_DIR = (DATA_DIR / 'settings')
| 425
|
Python
|
.py
| 13
| 31.615385
| 58
| 0.708029
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,838
|
paths.py
|
osm-search_Nominatim/packaging/nominatim-api/extra_src/paths.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Path settings for extra data used by Nominatim.
"""
from pathlib import Path
DATA_DIR = None
SQLLIB_DIR = None
CONFIG_DIR = (Path(__file__) / '..' / 'resources' / 'settings').resolve()
| 401
|
Python
|
.py
| 13
| 29.769231
| 73
| 0.72093
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,839
|
paths-py-no-php.tmpl
|
osm-search_Nominatim/cmake/paths-py-no-php.tmpl
|
# SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Path settings for extra data used by Nominatim (installed version).
"""
from pathlib import Path
SQLLIB_DIR = (Path('@NOMINATIM_LIBDIR@') / 'lib-sql').resolve()
DATA_DIR = Path('@NOMINATIM_DATADIR@').resolve()
CONFIG_DIR = Path('@NOMINATIM_CONFIGDIR@').resolve()
| 475
|
Python
|
.py
| 13
| 35.461538
| 67
| 0.737527
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,840
|
mk_install_instructions.py
|
osm-search_Nominatim/docs/mk_install_instructions.py
|
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2024 by the Nominatim developer community.
from pathlib import Path
import mkdocs_gen_files
VAGRANT_PATH = Path(__file__, '..', '..', 'vagrant').resolve()
for infile in VAGRANT_PATH.glob('Install-on-*.sh'):
outfile = f"admin/{infile.stem}.md"
title = infile.stem.replace('-', ' ')
with mkdocs_gen_files.open(outfile, "w") as outfd, infile.open() as infd:
print("#", title, file=outfd)
has_empty = False
for line in infd:
line = line.rstrip()
docpos = line.find('#DOCS:')
if docpos >= 0:
line = line[docpos + 6:]
elif line == '#' or line.startswith('#!'):
line = ''
elif line.startswith('# '):
line = line[2:]
if line or not has_empty:
print(line, file=outfd)
has_empty = not bool(line)
mkdocs_gen_files.set_edit_path(outfile, "docs/mk_install_instructions.py")
| 1,080
|
Python
|
.py
| 27
| 31.703704
| 78
| 0.575382
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,841
|
create-manpage.py
|
osm-search_Nominatim/man/create-manpage.py
|
import sys
import os
from pathlib import Path
sys.path.append(str(Path(__file__, '..', '..', 'src').resolve()))
from nominatim_db.cli import get_set_parser
def get_parser():
parser = get_set_parser()
return parser.parser
| 233
|
Python
|
.py
| 8
| 26.625
| 65
| 0.705882
|
osm-search/Nominatim
| 3,062
| 711
| 96
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,842
|
setup.py
|
selfspy_selfspy/setup.py
|
import os
import platform
# dep_link = []
if platform.system() == 'Darwin':
req_file = 'osx-requirements.txt'
elif platform.system() == "Windows":
req_file = "win-requirements.txt"
else:
req_file = 'requirements.txt'
#dep_link = ['http://python-xlib.svn.sourceforge.net/viewvc/python-xlib/tags/xlib_0_15rc1/?view=tar#egg=pyxlib']
with open(os.path.join(os.path.dirname(__file__), req_file)) as f:
requires = list(f.readlines())
print '"%s"' % requires
from setuptools import setup
setup(name="selfspy",
version='0.3.0',
packages=['selfspy'],
author="David Fendrich",
# author_email='',
description=''.join("""
Log everything you do on the computer, for statistics,
future reference and all-around fun!
""".strip().split('\n')),
install_requires=requires,
#dependency_links=dep_link,
entry_points=dict(console_scripts=['selfspy=selfspy:main',
'selfstats=selfspy.stats:main']))
| 1,016
|
Python
|
.py
| 27
| 31.444444
| 116
| 0.640244
|
selfspy/selfspy
| 2,404
| 231
| 72
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,843
|
com.github.gurgeh.selfspy.plist
|
selfspy_selfspy/com.github.gurgeh.selfspy.plist
|
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.github.gurgeh.selfspy</string>
<key>ProgramArguments</key>
<array>
<string>/usr/local/bin/selfspy</string>
</array>
<key>RunAtLoad</key>
<true/>
</dict>
</plist>
| 400
|
Python
|
.py
| 15
| 23.133333
| 52
| 0.642487
|
selfspy/selfspy
| 2,404
| 231
| 72
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,844
|
models.py
|
selfspy_selfspy/selfspy/models.py
|
# Copyright 2012 David Fendrich
# This file is part of Selfspy
# Selfspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Selfspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Selfspy. If not, see <http://www.gnu.org/licenses/>.
import zlib
import json
import re
import datetime
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy import (
Index, Column, Boolean, Integer, Unicode, DateTime, Binary, ForeignKey,
create_engine
)
from sqlalchemy.orm import sessionmaker, relationship, backref
def initialize(fname):
engine = create_engine('sqlite:///%s' % fname)
Base.metadata.create_all(engine)
return sessionmaker(bind=engine)
ENCRYPTER = None
Base = declarative_base()
class SpookMixin(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
created_at = Column(DateTime, default=datetime.datetime.now, index=True)
class Process(SpookMixin, Base):
name = Column(Unicode, index=True, unique=True)
def __init__(self, name):
self.name = name
def __repr__(self):
return "<Process '%s'>" % self.name
class Window(SpookMixin, Base):
title = Column(Unicode, index=True)
process_id = Column(Integer, ForeignKey('process.id'), nullable=False, index=True)
process = relationship("Process", backref=backref('windows'))
def __init__(self, title, process_id):
self.title = title
self.process_id = process_id
def __repr__(self):
return "<Window '%s'>" % (repr(self.title))
class Geometry(SpookMixin, Base):
xpos = Column(Integer, nullable=False)
ypos = Column(Integer, nullable=False)
width = Column(Integer, nullable=False)
height = Column(Integer, nullable=False)
Index('idx_geo', 'xpos', 'ypos', 'width', 'height')
def __init__(self, x, y, width, height):
self.xpos = x
self.ypos = y
self.width = width
self.height = height
def __repr__(self):
return "<Geometry (%d, %d), (%d, %d)>" % (self.xpos, self.ypos, self.width, self.height)
class Click(SpookMixin, Base):
button = Column(Integer, nullable=False)
press = Column(Boolean, nullable=False)
x = Column(Integer, nullable=False)
y = Column(Integer, nullable=False)
nrmoves = Column(Integer, nullable=False)
process_id = Column(Integer, ForeignKey('process.id'), nullable=False, index=True)
process = relationship("Process", backref=backref('clicks'))
window_id = Column(Integer, ForeignKey('window.id'), nullable=False)
window = relationship("Window", backref=backref('clicks'))
geometry_id = Column(Integer, ForeignKey('geometry.id'), nullable=False)
geometry = relationship("Geometry", backref=backref('clicks'))
def __init__(self, button, press, x, y, nrmoves, process_id, window_id, geometry_id):
self.button = button
self.press = press
self.x = x
self.y = y
self.nrmoves = nrmoves
self.process_id = process_id
self.window_id = window_id
self.geometry_id = geometry_id
def __repr__(self):
return "<Click (%d, %d), (%d, %d, %d)>" % (self.x, self.y, self.button, self.press, self.nrmoves)
def pad(s, padnum):
ls = len(s)
if ls % padnum == 0:
return s
return s + '\0' * (padnum - (ls % padnum))
def maybe_encrypt(s, other_encrypter=None):
if other_encrypter is not None:
s = pad(s, 8)
s = other_encrypter.encrypt(s)
elif ENCRYPTER:
s = pad(s, 8)
s = ENCRYPTER.encrypt(s)
return s
def maybe_decrypt(s, other_encrypter=None):
if other_encrypter is not None:
s = other_encrypter.decrypt(s)
elif ENCRYPTER:
s = ENCRYPTER.decrypt(s)
return s
class Keys(SpookMixin, Base):
text = Column(Binary, nullable=False)
started = Column(DateTime, nullable=False)
process_id = Column(Integer, ForeignKey('process.id'), nullable=False, index=True)
process = relationship("Process", backref=backref('keys'))
window_id = Column(Integer, ForeignKey('window.id'), nullable=False)
window = relationship("Window", backref=backref('keys'))
geometry_id = Column(Integer, ForeignKey('geometry.id'), nullable=False)
geometry = relationship("Geometry", backref=backref('keys'))
nrkeys = Column(Integer, index=True)
keys = Column(Binary)
timings = Column(Binary)
def __init__(self, text, keys, timings, nrkeys, started, process_id, window_id, geometry_id):
ztimings = zlib.compress(json.dumps(timings))
self.encrypt_text(text)
self.encrypt_keys(keys)
self.nrkeys = nrkeys
self.timings = ztimings
self.started = started
self.process_id = process_id
self.window_id = window_id
self.geometry_id = geometry_id
def encrypt_text(self, text, other_encrypter=None):
ztext = maybe_encrypt(text, other_encrypter=other_encrypter)
self.text = ztext
def encrypt_keys(self, keys, other_encrypter=None):
zkeys = maybe_encrypt(zlib.compress(json.dumps(keys)),
other_encrypter=other_encrypter)
self.keys = zkeys
def decrypt_text(self):
return maybe_decrypt(self.text)
def decrypt_humanreadable(self):
return self.to_humanreadable(self.decrypt_text())
def decrypt_keys(self):
keys = maybe_decrypt(self.keys)
return json.loads(zlib.decompress(keys))
def to_humanreadable(self, text):
backrex = re.compile("\<\[Backspace\]x?(\d+)?\>",re.IGNORECASE)
matches = backrex.search(text)
while matches is not None:
backspaces = matches.group(1)
try:
deletechars = int(backspaces)
except TypeError:
deletechars = 1
newstart = matches.start() - deletechars
if newstart < 0:
newstart = 0
text = (text[:newstart] + text[matches.end():])
matches = backrex.search(text)
return text
def load_timings(self):
return json.loads(zlib.decompress(self.timings))
def __repr__(self):
return "<Keys %s>" % self.nrkeys
| 6,721
|
Python
|
.py
| 159
| 35.672956
| 105
| 0.663743
|
selfspy/selfspy
| 2,404
| 231
| 72
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,845
|
sniff_cocoa.py
|
selfspy_selfspy/selfspy/sniff_cocoa.py
|
# Copyright 2012 Bjarte Johansen
# This file is part of Selfspy
# Selfspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Selfspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Selfspy. If not, see <http://www.gnu.org/licenses/>.
from Foundation import NSObject
from AppKit import NSApplication, NSApp, NSWorkspace
from Cocoa import (
NSEvent, NSFlagsChanged,
NSKeyDown, NSKeyUp, NSKeyDownMask, NSKeyUpMask,
NSLeftMouseDown, NSLeftMouseUpMask, NSLeftMouseDownMask,
NSRightMouseDown, NSRightMouseUpMask, NSRightMouseDownMask,
NSMouseMoved, NSMouseMovedMask,
NSScrollWheel, NSScrollWheelMask,
NSFlagsChangedMask,
NSAlternateKeyMask, NSCommandKeyMask, NSControlKeyMask,
NSShiftKeyMask, NSAlphaShiftKeyMask,
NSApplicationActivationPolicyProhibited
)
from Quartz import (
CGWindowListCopyWindowInfo,
kCGWindowListExcludeDesktopElements,
kCGWindowListOptionOnScreenOnly,
kCGNullWindowID
)
from PyObjCTools import AppHelper
import config as cfg
import signal
import time
FORCE_SCREEN_CHANGE = 10
WAIT_ANIMATION = 1
class Sniffer:
def __init__(self):
self.key_hook = lambda x: True
self.mouse_button_hook = lambda x: True
self.mouse_move_hook = lambda x: True
self.screen_hook = lambda x: True
self.last_check_windows = time.time()
def createAppDelegate(self):
sc = self
class AppDelegate(NSObject):
def applicationDidFinishLaunching_(self, notification):
mask = (NSKeyDownMask
| NSKeyUpMask
| NSLeftMouseDownMask
| NSLeftMouseUpMask
| NSRightMouseDownMask
| NSRightMouseUpMask
| NSMouseMovedMask
| NSScrollWheelMask
| NSFlagsChangedMask)
NSEvent.addGlobalMonitorForEventsMatchingMask_handler_(mask, sc.handler)
def applicationWillResignActive(self, notification):
self.applicationWillTerminate_(notification)
return True
def applicationShouldTerminate_(self, notification):
self.applicationWillTerminate_(notification)
return True
def applicationWillTerminate_(self, notification):
# need to release the lock here as when the
# application terminates it does not run the rest the
# original main, only the code that has crossed the
# pyobc bridge.
if cfg.LOCK.is_locked():
cfg.LOCK.release()
print("Exiting")
return None
return AppDelegate
def run(self):
NSApplication.sharedApplication()
delegate = self.createAppDelegate().alloc().init()
NSApp().setDelegate_(delegate)
NSApp().setActivationPolicy_(NSApplicationActivationPolicyProhibited)
self.workspace = NSWorkspace.sharedWorkspace()
def handler(signal, frame):
AppHelper.stopEventLoop()
signal.signal(signal.SIGINT, handler)
AppHelper.runEventLoop()
def cancel(self):
AppHelper.stopEventLoop()
def handler(self, event):
try:
check_windows = False
event_type = event.type()
todo = lambda: None
if (
time.time() - self.last_check_windows > FORCE_SCREEN_CHANGE and
event_type != NSKeyUp
):
self.last_check_windows = time.time()
check_windows = True
loc = NSEvent.mouseLocation()
if event_type == NSLeftMouseDown:
check_windows = True
todo = lambda: self.mouse_button_hook(1, loc.x, loc.y)
elif event_type == NSRightMouseDown:
check_windows = True
todo = lambda: self.mouse_button_hook(3, loc.x, loc.y)
elif event_type == NSScrollWheel:
if event.deltaY() > 0:
todo = lambda: self.mouse_button_hook(4, loc.x, loc.y)
elif event.deltaY() < 0:
todo = lambda: self.mouse_button_hook(5, loc.x, loc.y)
if event.deltaX() > 0:
todo = lambda: self.mouse_button_hook(6, loc.x, loc.y)
elif event.deltaX() < 0:
todo = lambda: self.mouse_button_hook(7, loc.x, loc.y)
elif event_type == NSKeyDown:
flags = event.modifierFlags()
modifiers = [] # OS X api doesn't care it if is left or right
if flags & NSControlKeyMask:
modifiers.append('Ctrl')
if flags & NSAlternateKeyMask:
modifiers.append('Alt')
if flags & NSCommandKeyMask:
modifiers.append('Cmd')
if flags & (NSShiftKeyMask | NSAlphaShiftKeyMask):
modifiers.append('Shift')
character = event.charactersIgnoringModifiers()
# these two get a special case because I am unsure of
# their unicode value
if event.keyCode() == 36:
character = "Enter"
elif event.keyCode() == 51:
character = "Backspace"
todo = lambda: self.key_hook(event.keyCode(),
modifiers,
keycodes.get(character,
character),
event.isARepeat())
elif event_type == NSMouseMoved:
todo = lambda: self.mouse_move_hook(loc.x, loc.y)
elif event_type == NSFlagsChanged:
# Register leaving this window after animations are done
# approx (1 second)
self.last_check_windows = (time.time() - FORCE_SCREEN_CHANGE +
WAIT_ANIMATION)
check_windows = True
if check_windows:
activeApps = self.workspace.runningApplications()
for app in activeApps:
if app.isActive():
app_name = app.localizedName()
options = kCGWindowListOptionOnScreenOnly | kCGWindowListExcludeDesktopElements
windowList = CGWindowListCopyWindowInfo(options,
kCGNullWindowID)
windowListLowPrio = [
w for w in windowList
if w['kCGWindowLayer'] or not w.get('kCGWindowName', u'')
]
windowList = [
w for w in windowList
if not w['kCGWindowLayer'] and w.get('kCGWindowName', u'')
]
windowList = windowList + windowListLowPrio
for window in windowList:
if window['kCGWindowOwnerName'] == app_name:
geometry = window['kCGWindowBounds']
self.screen_hook(window['kCGWindowOwnerName'],
window.get('kCGWindowName', u''),
geometry['X'],
geometry['Y'],
geometry['Width'],
geometry['Height'])
break
break
todo()
except (SystemExit, KeyboardInterrupt):
AppHelper.stopEventLoop()
return
except:
AppHelper.stopEventLoop()
raise
# Cocoa does not provide a good api to get the keycodes, therefore we
# have to provide our own.
keycodes = {
u"\u0009": "Tab",
u"\u001b": "Escape",
u"\uf700": "Up",
u"\uF701": "Down",
u"\uF702": "Left",
u"\uF703": "Right",
u"\uF704": "F1",
u"\uF705": "F2",
u"\uF706": "F3",
u"\uF707": "F4",
u"\uF708": "F5",
u"\uF709": "F6",
u"\uF70A": "F7",
u"\uF70B": "F8",
u"\uF70C": "F9",
u"\uF70D": "F10",
u"\uF70E": "F11",
u"\uF70F": "F12",
u"\uF710": "F13",
u"\uF711": "F14",
u"\uF712": "F15",
u"\uF713": "F16",
u"\uF714": "F17",
u"\uF715": "F18",
u"\uF716": "F19",
u"\uF717": "F20",
u"\uF718": "F21",
u"\uF719": "F22",
u"\uF71A": "F23",
u"\uF71B": "F24",
u"\uF71C": "F25",
u"\uF71D": "F26",
u"\uF71E": "F27",
u"\uF71F": "F28",
u"\uF720": "F29",
u"\uF721": "F30",
u"\uF722": "F31",
u"\uF723": "F32",
u"\uF724": "F33",
u"\uF725": "F34",
u"\uF726": "F35",
u"\uF727": "Insert",
u"\uF728": "Delete",
u"\uF729": "Home",
u"\uF72A": "Begin",
u"\uF72B": "End",
u"\uF72C": "PageUp",
u"\uF72D": "PageDown",
u"\uF72E": "PrintScreen",
u"\uF72F": "ScrollLock",
u"\uF730": "Pause",
u"\uF731": "SysReq",
u"\uF732": "Break",
u"\uF733": "Reset",
u"\uF734": "Stop",
u"\uF735": "Menu",
u"\uF736": "User",
u"\uF737": "System",
u"\uF738": "Print",
u"\uF739": "ClearLine",
u"\uF73A": "ClearDisplay",
u"\uF73B": "InsertLine",
u"\uF73C": "DeleteLine",
u"\uF73D": "InsertChar",
u"\uF73E": "DeleteChar",
u"\uF73F": "Prev",
u"\uF740": "Next",
u"\uF741": "Select",
u"\uF742": "Execute",
u"\uF743": "Undo",
u"\uF744": "Redo",
u"\uF745": "Find",
u"\uF746": "Help",
u"\uF747": "ModeSwitch"
}
| 10,236
|
Python
|
.py
| 258
| 27.341085
| 103
| 0.538508
|
selfspy/selfspy
| 2,404
| 231
| 72
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,846
|
sniff_win.py
|
selfspy_selfspy/selfspy/sniff_win.py
|
# -*- coding: utf-8 -*-
# Copyright 2012 Morten Linderud
# This file is part of Selfspy
# Selfspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Selfspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Selfspy. If not, see <http://www.gnu.org/licenses/>.
import pyHook
import pythoncom
import sys
import threading
import ctypes
class SnifferThread(threading.Thread):
def __init__(self, hook):
threading.Thread.__init__(self)
self.daemon = True
self.encoding = sys.stdin.encoding
self.key_hook = lambda x: True
self.mouse_button_hook = lambda x: True
self.mouse_move_hook = lambda x: True
self.screen_hook = lambda x: True
self.remap = {
248: u"\xf8",
216: u"\xd8",
230: u"\xe6",
198: u"\xc6",
229: u"\xe5",
197: u"\xc5"
}
self.hm = hook
def run(self):
self.hm.KeyDown = self.KeyboardEvent
self.hm.MouseAllButtonsDown = self.MouseButtons
self.hm.MouseMove = self.MouseMove
self.hm.HookKeyboard()
self.hm.HookMouse()
pythoncom.PumpMessages()
def MouseButtons(self, event):
loc = event.Position
if event.MessageName == "mouse right down":
self.mouse_button_hook(3, loc[0], loc[1],)
if event.MessageName == "mouse left down":
self.mouse_button_hook(1, loc[0], loc[1])
if event.MessageName == "mouse middle down":
self.mouse_button_hook(2, loc[0], loc[1])
try:
string_event = event.WindowName.decode(self.encoding)
except AttributeError:
string_event = ""
self.screen_hook(str(event.Window), string_event, loc[0], loc[1], 0, 0)
return True
def MouseMove(self, event):
loc = event.Position
if event.MessageName == "mouse move":
self.mouse_move_hook(loc[0], loc[1])
if event.MessageName == "mouse wheel":
if event.Wheel == -1:
self.mouse_button_hook(5, loc[0], loc[1],)
elif event.Wheel == 1:
self.mouse_button_hook(4, loc[0], loc[1],)
return True
def KeyboardEvent(self, event):
modifiers = []
if event.Key in ["Lshift", "Rshift"]:
modifiers.append('Shift')
elif event.Key in ["Lmenu", "Rmenu"]:
modifiers.append('Alt')
elif event.Key in ["Rcontrol", "Lcontrol"]:
modifiers.append('Ctrl')
elif event.Key in ["Rwin", "Lwin"]:
modifiers.append('Super')
if event.Ascii in self.remap.keys():
string = self.remap[event.Ascii]
else:
string = unicode(chr(event.Ascii))
self.key_hook(str(event.Ascii), modifiers, string, False)
window_name = event.WindowName or ''
self.screen_hook(str(event.Window), window_name.decode(self.encoding), 0, 0, 0, 0)
return True
class Sniffer:
"""Winning!"""
def __init__(self):
self.encoding = sys.stdin.encoding
self.key_hook = lambda x: True
self.mouse_button_hook = lambda x: True
self.mouse_move_hook = lambda x: True
self.screen_hook = lambda x: True
self.remap = {
248: u"\xf8",
216: u"\xd8",
230: u"\xe6",
198: u"\xc6",
229: u"\xe5",
197: u"\xc5"
}
def run(self):
try:
self.hm = pyHook.HookManager()
self.thread = SnifferThread(self.hm)
# pythoncom.PumpMessages needs to be in the same thread as the events
self.thread.mouse_button_hook = self.mouse_button_hook
self.thread.mouse_move_hook = self.mouse_move_hook
self.thread.screen_hook = self.screen_hook
self.thread.key_hook = self.key_hook
self.thread.start()
while True:
self.thread.join(100)
except:
self.cancel()
def cancel(self):
ctypes.windll.user32.PostQuitMessage(0)
self.hm.UnhookKeyboard()
self.hm.UnhookMouse()
del self.thread
del self.hm
| 4,695
|
Python
|
.py
| 121
| 29.371901
| 90
| 0.589074
|
selfspy/selfspy
| 2,404
| 231
| 72
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,847
|
check_password.py
|
selfspy_selfspy/selfspy/check_password.py
|
# Copyright 2012 David Fendrich
# This file is part of Selfspy
# Selfspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Selfspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Selfspy. If not, see <http://www.gnu.org/licenses/>.
import os
DIGEST_NAME = 'password.digest'
MAGIC_STRING = '\xc5\x7fdh\x05\xf6\xc5=\xcfh\xafv\xc0\xf4\x13i*.O\xf6\xc2\x8d\x0f\x87\xdb\x9f\xc2\x88\xac\x95\xf8\xf0\xf4\x96\xe9\x82\xd1\xca[\xe5\xa32\xa0\x03\nD\x12\n\x1dr\xbc\x03\x9bE\xd3q6\x89Cwi\x10\x92\xdf(#\x8c\x87\x1b3\xd6\xd4\x8f\xde)\xbe\x17\xbf\xe4\xae\xb73\\\xcb\x7f\xd3\xc4\x89\xd0\x88\x07\x90\xd8N,\xbd\xbd\x93j\xc7\xa3\xec\xf3P\xff\x11\xde\xc9\xd6 \x98\xe8\xbc\xa0|\x83\xe90Nw\xe4=\xb53\x08\xf0\x14\xaa\xf9\x819,X~\x8e\xf7mB\x13\xe9;\xde\x9e\x10\xba\x19\x95\xd4p\xa7\xd2\xa9o\xbdF\xcd\x83\xec\xc5R\x17":K\xceAiX\xc1\xe8\xbe\xb8\x04m\xbefA8\x99\xee\x00\x93\xb4\x00\xb3\xd4\x8f\x00@Q\xe9\xd5\xdd\xff\x8d\x93\xe3w6\x8ctRQK\xa9\x97a\xc1UE\xdfv\xda\x15\xf5\xccA)\xec^]AW\x17/h)\x12\x89\x15\x0e#8"\x7f\x16\xd6e\x91\xa6\xd8\xea \xb9\xdb\x93W\xce9\xf2a\xe7\xa7T=q'
def check(data_dir, decrypter, read_only=False):
fname = os.path.join(data_dir, DIGEST_NAME)
if os.path.exists(fname):
if decrypter is None:
return False
f = open(fname, 'rb')
s = f.read()
f.close()
return decrypter.decrypt(s) == MAGIC_STRING
else:
if decrypter is not None:
if read_only:
return False
else:
s = decrypter.encrypt(MAGIC_STRING)
f = open(fname, 'wb')
f.write(s)
f.close()
return True
| 2,083
|
Python
|
.py
| 34
| 54.617647
| 771
| 0.691328
|
selfspy/selfspy
| 2,404
| 231
| 72
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,848
|
config.py
|
selfspy_selfspy/selfspy/config.py
|
#!/usr/bin/env python
# Copyright 2012 Bjarte Johansen
# This file is part of Selfspy
# Selfspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Selfspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Selfspy. If not, see <http://www.gnu.org/licenses/>.
DATA_DIR = '~/.selfspy'
DBNAME = 'selfspy.sqlite'
LOCK_FILE = 'selfspy.pid'
LOCK = None
| 795
|
Python
|
.py
| 17
| 45.411765
| 70
| 0.774611
|
selfspy/selfspy
| 2,404
| 231
| 72
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,849
|
__init__.py
|
selfspy_selfspy/selfspy/__init__.py
|
#!/usr/bin/env python
# Copyright 2012 David Fendrich
# This file is part of Selfspy
# Selfspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Selfspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Selfspy. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import argparse
import ConfigParser
from lockfile import LockFile
import hashlib
from Crypto.Cipher import Blowfish
from selfspy.activity_store import ActivityStore
from selfspy.password_dialog import get_password
from selfspy import check_password
from selfspy import config as cfg
def parse_config():
conf_parser = argparse.ArgumentParser(description=__doc__, add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
conf_parser.add_argument("-c", "--config",
help="Config file with defaults. Command line parameters will override those given in the config file. The config file must start with a \"[Defaults]\" section, followed by [argument]=[value] on each line.", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args()
defaults = {}
if args.config:
if not os.path.exists(args.config):
raise EnvironmentError("Config file %s doesn't exist." % args.config)
config = ConfigParser.SafeConfigParser()
config.read([args.config])
defaults = dict(config.items('Defaults'))
else:
if os.path.exists(os.path.expanduser('~/.selfspy/selfspy.conf')):
config = ConfigParser.SafeConfigParser()
config.read([os.path.expanduser('~/.selfspy/selfspy.conf')])
defaults = dict(config.items('Defaults'))
parser = argparse.ArgumentParser(description='Monitor your computer activities and store them in an encrypted database for later analysis or disaster recovery.', parents=[conf_parser])
parser.set_defaults(**defaults)
parser.add_argument('-p', '--password', help='Encryption password. If you want to keep your database unencrypted, specify -p "" here. If you don\'t specify a password in the command line arguments or in a config file, a dialog will pop up, asking for the password. The most secure is to not use either command line or config file but instead type it in on startup.')
parser.add_argument('-d', '--data-dir', help='Data directory for selfspy, where the database is stored. Remember that Selfspy must have read/write access. Default is %s' % cfg.DATA_DIR, default=cfg.DATA_DIR)
parser.add_argument('-n', '--no-text', action='store_true', help='Do not store what you type. This will make your database smaller and less sensitive to security breaches. Process name, window titles, window geometry, mouse clicks, number of keys pressed and key timings will still be stored, but not the actual letters. Key timings are stored to enable activity calculation in selfstats. If this switch is used, you will never be asked for password.')
parser.add_argument('-r', '--no-repeat', action='store_true', help='Do not store special characters as repeated characters.')
parser.add_argument('--change-password', action="store_true", help='Change the password used to encrypt the keys columns and exit.')
return parser.parse_args()
def make_encrypter(password):
if password == "":
encrypter = None
else:
encrypter = Blowfish.new(hashlib.md5(password).digest())
return encrypter
def main():
try:
args = vars(parse_config())
except EnvironmentError as e:
print str(e)
sys.exit(1)
args['data_dir'] = os.path.expanduser(args['data_dir'])
def check_with_encrypter(password):
encrypter = make_encrypter(password)
return check_password.check(args['data_dir'], encrypter)
try:
os.makedirs(args['data_dir'])
except OSError:
pass
lockname = os.path.join(args['data_dir'], cfg.LOCK_FILE)
cfg.LOCK = LockFile(lockname)
if cfg.LOCK.is_locked():
print '%s is locked! I am probably already running.' % lockname
print 'If you can find no selfspy process running, it is a stale lock and you can safely remove it.'
print 'Shutting down.'
sys.exit(1)
if args['no_text']:
args['password'] = ""
if args['password'] is None:
args['password'] = get_password(verify=check_with_encrypter)
encrypter = make_encrypter(args['password'])
if not check_password.check(args['data_dir'], encrypter):
print 'Password failed'
sys.exit(1)
if args['change_password']:
new_password = get_password(message="New Password: ")
new_encrypter = make_encrypter(new_password)
print 'Re-encrypting your keys...'
astore = ActivityStore(os.path.join(args['data_dir'], cfg.DBNAME),
encrypter,
store_text=(not args['no_text']),
repeat_char=(not args['no_repeat']))
astore.change_password(new_encrypter)
# delete the old password.digest
os.remove(os.path.join(args['data_dir'], check_password.DIGEST_NAME))
check_password.check(args['data_dir'], new_encrypter)
# don't assume we want the logger to run afterwards
print 'Exiting...'
sys.exit(0)
astore = ActivityStore(os.path.join(args['data_dir'], cfg.DBNAME),
encrypter,
store_text=(not args['no_text']),
repeat_char=(not args['no_repeat']))
cfg.LOCK.acquire()
try:
astore.run()
except SystemExit:
astore.close()
except KeyboardInterrupt:
pass
# In OS X this is has to be released in sniff_cocoa
cfg.LOCK.release()
if __name__ == '__main__':
main()
| 6,254
|
Python
|
.py
| 115
| 46.686957
| 456
| 0.680806
|
selfspy/selfspy
| 2,404
| 231
| 72
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,850
|
sniff_x.py
|
selfspy_selfspy/selfspy/sniff_x.py
|
# Copyright 2012 David Fendrich
# This file is part of Selfspy
# Selfspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Selfspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Selfspy. If not, see <http://www.gnu.org/licenses/>.
# This file is loosely based on examples/record_demo.py in python-xlib
import sys
from Xlib import X, XK, display
from Xlib.ext import record
from Xlib.error import XError
from Xlib.protocol import rq
def state_to_idx(state): # this could be a dict, but I might want to extend it.
if state == 1:
return 1
if state == 128:
return 4
if state == 129:
return 5
return 0
class Sniffer:
def __init__(self):
self.keysymdict = {}
for name in dir(XK):
if name.startswith("XK_"):
self.keysymdict[getattr(XK, name)] = name[3:]
self.key_hook = lambda x: True
self.mouse_button_hook = lambda x: True
self.mouse_move_hook = lambda x: True
self.screen_hook = lambda x: True
self.contextEventMask = [X.KeyPress, X.MotionNotify]
self.the_display = display.Display()
self.record_display = display.Display()
self.keymap = self.the_display._keymap_codes
self.atom_NET_WM_NAME = self.the_display.intern_atom('_NET_WM_NAME')
self.atom_UTF8_STRING = self.the_display.intern_atom('UTF8_STRING')
def run(self):
# Check if the extension is present
if not self.record_display.has_extension("RECORD"):
print "RECORD extension not found"
sys.exit(1)
else:
print "RECORD extension present"
# Create a recording context; we only want key and mouse events
self.ctx = self.record_display.record_create_context(
0,
[record.AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': tuple(self.contextEventMask),
'errors': (0, 0),
'client_started': False,
'client_died': False,
}])
# Enable the context; this only returns after a call to record_disable_context,
# while calling the callback function in the meantime
self.record_display.record_enable_context(self.ctx, self.processevents)
# Finally free the context
self.record_display.record_free_context(self.ctx)
def cancel(self):
self.the_display.record_disable_context(self.ctx)
self.the_display.flush()
def processevents(self, reply):
if reply.category != record.FromServer:
return
if reply.client_swapped:
print "* received swapped protocol data, cowardly ignored"
return
if not len(reply.data) or ord(reply.data[0]) < 2:
# not an event
return
cur_class, cur_window, cur_name = self.get_cur_window()
if cur_class:
cur_geo = self.get_geometry(cur_window)
if cur_geo:
self.screen_hook(cur_class,
cur_name,
cur_geo.x,
cur_geo.y,
cur_geo.width,
cur_geo.height)
data = reply.data
while len(data):
ef = rq.EventField(None)
event, data = ef.parse_binary_value(data, self.record_display.display, None, None)
if event.type in [X.KeyPress]:
# X.KeyRelease, we don't log this anyway
self.key_hook(*self.key_event(event))
elif event.type in [X.ButtonPress]:
# X.ButtonRelease we don't log this anyway.
self.mouse_button_hook(*self.button_event(event))
elif event.type == X.MotionNotify:
self.mouse_move_hook(event.root_x, event.root_y)
elif event.type == X.MappingNotify:
self.the_display.refresh_keyboard_mapping()
newkeymap = self.the_display._keymap_codes
print 'Change keymap!', newkeymap == self.keymap
self.keymap = newkeymap
def get_key_name(self, keycode, state):
state_idx = state_to_idx(state)
cn = self.keymap[keycode][state_idx]
if cn < 256:
return chr(cn).decode('latin1')
else:
return self.lookup_keysym(cn)
def key_event(self, event):
flags = event.state
modifiers = []
if flags & X.ControlMask:
modifiers.append('Ctrl')
if flags & X.Mod1Mask: # Mod1 is the alt key
modifiers.append('Alt')
if flags & X.Mod4Mask: # Mod4 should be super/windows key
modifiers.append('Super')
if flags & X.ShiftMask:
modifiers.append('Shift')
return (event.detail,
modifiers,
self.get_key_name(event.detail, event.state),
event.sequence_number == 1)
def button_event(self, event):
return event.detail, event.root_x, event.root_y
def lookup_keysym(self, keysym):
if keysym in self.keysymdict:
return self.keysymdict[keysym]
return "[%d]" % keysym
def get_wm_name(self, win):
"""
Custom method to query for _NET_WM_NAME first, before falling back to
python-xlib's method, which (currently) only queries WM_NAME with
type=STRING."""
# Alternatively, we could also try WM_NAME with "UTF8_STRING" and
# "COMPOUND_TEXT", but _NET_WM_NAME should be good.
d = win.get_full_property(self.atom_NET_WM_NAME, self.atom_UTF8_STRING)
if d is None or d.format != 8:
# Fallback.
r = win.get_wm_name()
if r:
return r.decode('latin1') # WM_NAME with type=STRING.
else:
# Fixing utf8 issue on Ubuntu (https://github.com/gurgeh/selfspy/issues/133)
# Thanks to https://github.com/gurgeh/selfspy/issues/133#issuecomment-142943681
try:
return d.value.decode('utf8')
except UnicodeError:
return d.value.encode('utf8').decode('utf8')
def get_cur_window(self):
i = 0
cur_class = None
cur_window = None
cur_name = None
while i < 10:
try:
cur_window = self.the_display.get_input_focus().focus
cur_class = None
cur_name = None
while cur_class is None:
if type(cur_window) is int:
return None, None, None
cur_name = self.get_wm_name(cur_window)
cur_class = cur_window.get_wm_class()
if cur_class:
cur_class = cur_class[1]
if not cur_class:
cur_window = cur_window.query_tree().parent
except XError:
i += 1
continue
break
cur_class = cur_class or ''
cur_name = cur_name or ''
return cur_class.decode('latin1'), cur_window, cur_name
def get_geometry(self, cur_window):
i = 0
geo = None
while i < 10:
try:
geo = cur_window.get_geometry()
break
except XError:
i += 1
return geo
| 8,152
|
Python
|
.py
| 192
| 30.463542
| 94
| 0.56321
|
selfspy/selfspy
| 2,404
| 231
| 72
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,851
|
stats.py
|
selfspy_selfspy/selfspy/stats.py
|
#!/usr/bin/env python
# Copyright 2012 David Fendrich
# This file is part of Selfspy
# Selfspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Selfspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Selfspy. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import re
import datetime
import time
import argparse
import ConfigParser
from collections import Counter
from Crypto.Cipher import Blowfish
import hashlib
import config as cfg
from selfspy import check_password
from selfspy.password_dialog import get_password
from selfspy.period import Period
from selfspy import models
import codecs
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
ACTIVE_SECONDS = 180
PERIOD_LOOKUP = {'s': 'seconds', 'm': 'minutes', 'h': 'hours', 'd': 'days', 'w': 'weeks'}
ACTIVITY_ACTIONS = {'active', 'periods', 'pactive', 'tactive', 'ratios'}
SUMMARY_ACTIONS = ACTIVITY_ACTIONS.union({'pkeys', 'tkeys', 'key_freqs', 'clicks', 'ratios'})
PROCESS_ACTIONS = {'pkeys', 'pactive'}
WINDOW_ACTIONS = {'tkeys', 'tactive'}
BUTTON_MAP = [('button1', 'left'),
('button2', 'middle'),
('button3', 'right'),
('button4', 'up'),
('button5', 'down')]
def pretty_seconds(secs):
secs = int(secs)
active = False
outs = ''
days = secs / (3600 * 24)
if days:
active = True
outs += '%d days, ' % days
secs -= days * (3600 * 24)
hours = secs / 3600
if hours:
active = True
if active:
outs += '%dh ' % hours
secs -= hours * 3600
minutes = secs / 60
if minutes:
active = True
if active:
outs += '%dm ' % minutes
secs -= minutes * 60
outs += '%ds' % secs
return outs
def make_time_string(dates, clock):
now = datetime.datetime.now()
now2 = datetime.datetime.now()
if dates is None:
dates = []
if isinstance(dates, list) and len(dates) > 0:
if type(dates[0]) is str:
datesstr = " ".join(dates)
else:
print '%s is of uncompatible type list of %s.' % (
dates[0], str(type(dates[0])))
elif isinstance(dates, basestring):
datesstr = dates
else:
datesstr = now.strftime('%Y %m %d')
dates = datesstr.split() # any whitespace
if len(dates) > 3:
print 'Max three arguments to date', dates
sys.exit(1)
try:
dates = [int(d) for d in dates]
if len(dates) == 3:
now = now.replace(year=dates[0])
if len(dates) >= 2:
now = now.replace(month=dates[-2])
if len(dates) >= 1:
now = now.replace(day=dates[-1])
if len(dates) == 2:
if now > now2:
now = now.replace(year=now.year - 1)
if len(dates) == 1:
if now > now2:
m = now.month - 1
if m:
now = now.replace(month=m)
else:
now = now.replace(year=now.year - 1, month=12)
except ValueError:
print 'Malformed date', dates
sys.exit(1)
if clock:
try:
hour, minute = [int(v) for v in clock.split(':')]
except ValueError:
print 'Malformed clock', clock
sys.exit(1)
now = now.replace(hour=hour, minute=minute, second=0)
if now > now2:
now -= datetime.timedelta(days=1)
return now.strftime('%Y-%m-%d %H:%M'), now
def make_period(q, period, who, start, prop):
if isinstance(period, list) and len(period)>0:
if type(period[0]) is str:
periodstr = "".join(period)
else:
print '%s is of uncompatible type list of %s.' % (who, str(type(period[0])))
elif isinstance(period, basestring):
periodstr = period.translate(None, " \t")
else:
print '%s is of uncompatible type %s.' % (who, str(type(period)))
sys.exit(1)
pmatch = re.match("(\d+)(["+"".join(PERIOD_LOOKUP.keys())+"]?)", periodstr)
if pmatch==None:
print '%s has an unrecognizable format: %s' % (who, periodstr)
sys.exit(1)
period = [pmatch.group(1)]+([pmatch.group(2)] if pmatch.group(2) else [])
d = {}
val = int(period[0])
if len(period) == 1:
d['hours'] = val
else:
if period[1] not in PERIOD_LOOKUP:
print '--limit unit "%s" not one of %s' % (period[1], PERIOD_LOOKUP.keys())
sys.exit(1)
d[PERIOD_LOOKUP[period[1]]] = val
if start:
return q.filter(prop <= start + datetime.timedelta(**d))
else:
start = datetime.datetime.now() - datetime.timedelta(**d)
return q.filter(prop >= start), start
def create_times(row):
current_time = time.mktime(row.created_at.timetuple())
abs_times = [current_time]
for t in row.load_timings():
current_time -= t
abs_times.append(current_time)
abs_times.reverse()
return abs_times
class Selfstats:
def __init__(self, db_name, args):
self.args = args
self.session_maker = models.initialize(db_name)
self.inmouse = False
self.check_needs()
def do(self):
if self.need_summary:
self.calc_summary()
self.show_summary()
else:
self.show_rows()
def check_needs(self):
self.need_text = False
self.need_activity = False
self.need_timings = False
self.need_keys = False
self.need_humanreadable = False
self.need_summary = False
self.need_process = any(self.args[k] for k in PROCESS_ACTIONS)
self.need_window = any(self.args[k] for k in WINDOW_ACTIONS)
if self.args['body'] is not None:
self.need_text = True
if self.args['showtext']:
self.need_text = True
cutoff = [self.args[k] for k in ACTIVITY_ACTIONS if self.args[k]]
if cutoff:
if any(c != cutoff[0] for c in cutoff):
print 'You must give the same time argument to the different parameters in the --active family, when you use several in the same query.'
sys.exit(1)
self.need_activity = cutoff[0]
self.need_timings = True
if self.args['key_freqs']:
self.need_keys = True
if self.args['human_readable']:
self.need_humanreadable = True
if any(self.args[k] for k in SUMMARY_ACTIONS):
self.need_summary = True
def maybe_reg_filter(self, q, name, names, table, source_prop, target_prop):
if self.args[name] is not None:
ids = []
try:
reg = re.compile(self.args[name], re.I)
except re.error, e:
print 'Error in regular expression', str(e)
sys.exit(1)
for x in self.session.query(table).all():
if reg.search(x.__getattribute__(source_prop)):
ids.append(x.id)
if not self.inmouse:
print '%d %s matched' % (len(ids), names)
if ids:
q = q.filter(target_prop.in_(ids))
else:
return q, False
return q, True
def filter_prop(self, prop, startprop):
self.session = self.session_maker()
q = self.session.query(prop).order_by(prop.id)
if self.args['date'] or self.args['clock']:
s, start = make_time_string(self.args['date'], self.args['clock'])
q = q.filter(prop.created_at >= s)
if self.args['limit'] is not None:
q = make_period(q, self.args['limit'], '--limit', start, startprop)
elif self.args['id'] is not None:
q = q.filter(prop.id >= self.args['id'])
if self.args['limit'] is not None:
q = q.filter(prop.id < self.args['id'] + int(self.args['limit'][0]))
elif self.args['back'] is not None:
q, start = make_period(q, self.args['back'], '--back', None, startprop)
if self.args['limit'] is not None:
q = make_period(q, self.args['limit'], '--limit', start, startprop)
q, found = self.maybe_reg_filter(q, 'process', 'process(es)', models.Process, 'name', prop.process_id)
if not found:
return None
q, found = self.maybe_reg_filter(q, 'title', 'title(s)', models.Window, 'title', prop.window_id)
if not found:
return None
return q
def filter_keys(self):
q = self.filter_prop(models.Keys, models.Keys.started)
if q is None:
return
if self.args['min_keys'] is not None:
q = q.filter(models.Keys.nrkeys >= self.args['min_keys'])
if self.args['body']:
try:
bodrex = re.compile(self.args['body'], re.I)
except re.error, e:
print 'Error in regular expression', str(e)
sys.exit(1)
for x in q.all():
if(self.need_humanreadable):
body = x.decrypt_humanreadable()
else:
body = x.decrypt_text()
if bodrex.search(body):
yield x
else:
for x in q:
yield x
def filter_clicks(self):
self.inmouse = True
q = self.filter_prop(models.Click, models.Click.created_at)
if q is None:
return
for x in q:
yield x
def show_rows(self):
fkeys = self.filter_keys()
rows = 0
print '<RowID> <Starting date and time> <Duration> <Process> <Window title> <Number of keys pressed>',
if self.args['showtext'] and self.need_humanreadable:
print '<Decrypted Human Readable text>'
elif self.args['showtext']:
print '<Decrypted text>'
else:
print
for row in fkeys:
rows += 1
print row.id, row.started, pretty_seconds((row.created_at - row.started).total_seconds()), row.process.name, '"%s"' % row.window.title, row.nrkeys,
if self.args['showtext']:
if self.need_humanreadable:
print row.decrypt_humanreadable().decode('utf8')
else:
print row.decrypt_text().decode('utf8')
else:
print
print rows, 'rows'
def calc_summary(self):
def updict(d1, d2, activity_times, sub=None):
if sub is not None:
if sub not in d1:
d1[sub] = {}
d1 = d1[sub]
for key, val in d2.items():
if key not in d1:
d1[key] = 0
d1[key] += val
if self.need_activity:
if 'activity' not in d1:
d1['activity'] = Period(self.need_activity, time.time())
d1['activity'].extend(activity_times)
sumd = {}
processes = {}
windows = {}
timings = []
keys = Counter()
for row in self.filter_keys():
d = {'nr': 1,
'keystrokes': len(row.load_timings())}
if self.need_activity:
timings = create_times(row)
if self.need_process:
updict(processes, d, timings, sub=row.process.name)
if self.need_window:
updict(windows, d, timings, sub=row.window.title)
updict(sumd, d, timings)
if self.args['key_freqs']:
keys.update(row.decrypt_keys())
for click in self.filter_clicks():
d = {'noscroll_clicks': click.button not in [4, 5],
'clicks': 1,
'button%d' % click.button: 1,
'mousings': click.nrmoves}
if self.need_activity:
timings = [time.mktime(click.created_at.timetuple())]
if self.need_process:
updict(processes, d, timings, sub=click.process.name)
if self.need_window:
updict(windows, d, timings, sub=click.window.title)
updict(sumd, d, timings)
self.processes = processes
self.windows = windows
self.summary = sumd
if self.args['key_freqs']:
self.summary['key_freqs'] = keys
def show_summary(self):
print '%d keystrokes in %d key sequences,' % (self.summary.get('keystrokes', 0), self.summary.get('nr', 0)),
print '%d clicks (%d excluding scroll),' % (self.summary.get('clicks', 0), self.summary.get('noscroll_clicks', 0)),
print '%d mouse movements' % (self.summary.get('mousings', 0))
print
if self.need_activity:
act = self.summary.get('activity')
if act:
act = act.calc_total()
else:
act = 0
print 'Total time active:',
print pretty_seconds(act)
print
if self.args['clicks']:
print 'Mouse clicks:'
for key, name in BUTTON_MAP:
print self.summary.get(key, 0), name
print
if self.args['key_freqs']:
print 'Key frequencies:'
for key, val in self.summary['key_freqs'].most_common():
print key, val
print
if self.args['pkeys']:
print 'Processes sorted by keystrokes:'
pdata = self.processes.items()
pdata.sort(key=lambda x: x[1].get('keystrokes', 0), reverse=True)
for name, data in pdata:
print name, data.get('keystrokes', 0)
print
if self.args['tkeys']:
print 'Window titles sorted by keystrokes:'
wdata = self.windows.items()
wdata.sort(key=lambda x: x[1].get('keystrokes', 0), reverse=True)
for name, data in wdata:
print name, data.get('keystrokes', 0)
print
if self.args['pactive']:
print 'Processes sorted by activity:'
for p in self.processes.values():
p['active_time'] = int(p['activity'].calc_total())
pdata = self.processes.items()
pdata.sort(key=lambda x: x[1]['active_time'], reverse=True)
for name, data in pdata:
print '%s, %s' % (name, pretty_seconds(data['active_time']))
print
if self.args['tactive']:
print 'Window titles sorted by activity:'
for w in self.windows.values():
w['active_time'] = int(w['activity'].calc_total())
wdata = self.windows.items()
wdata.sort(key=lambda x: x[1]['active_time'], reverse=True)
for name, data in wdata:
print '%s, %s' % (name, pretty_seconds(data['active_time']))
print
if self.args['periods']:
if 'activity' in self.summary:
print 'Active periods:'
for t1, t2 in self.summary['activity'].times:
d1 = datetime.datetime.fromtimestamp(t1).replace(microsecond=0)
d2 = datetime.datetime.fromtimestamp(t2).replace(microsecond=0)
print '%s - %s' % (d1.isoformat(' '), str(d2.time()).split('.')[0])
else:
print 'No active periods.'
print
if self.args['ratios']:
def tryget(prop):
return float(max(1, self.summary.get(prop, 1)))
mousings = tryget('mousings')
clicks = tryget('clicks')
keys = tryget('keystrokes')
print 'Keys / Clicks: %.1f' % (keys / clicks)
print 'Active seconds / Keys: %.1f' % (act / keys)
print
print 'Mouse movements / Keys: %.1f' % (mousings / keys)
print 'Mouse movements / Clicks: %.1f' % (mousings / clicks)
print
def parse_config():
conf_parser = argparse.ArgumentParser(description=__doc__, add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter)
conf_parser.add_argument("-c", "--config",
help="""Config file with defaults. Command line parameters will override those given in the config file. Options to selfspy goes in the "[Defaults]" section, followed by [argument]=[value] on each line. Options specific to selfstats should be in the "[Selfstats]" section, though "password" and "data-dir" are still read from "[Defaults]".""", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args()
defaults = {}
if args.config:
if not os.path.exists(args.config):
raise EnvironmentError("Config file %s doesn't exist." % args.config)
config = ConfigParser.SafeConfigParser()
config.read([args.config])
defaults = dict(config.items('Defaults') + config.items("Selfstats"))
parser = argparse.ArgumentParser(description="""Calculate statistics on selfspy data. Per default it will show non-text information that matches the filter. Adding '-s' means also show text. Adding any of the summary options will show those summaries over the given filter instead of the listing. Multiple summary options can be given to print several summaries over the same filter. If you give arguments that need to access text / keystrokes, you will be asked for the decryption password.""", epilog="""See the README file or http://gurgeh.github.com/selfspy for examples.""", parents=[conf_parser])
parser.set_defaults(**defaults)
parser.add_argument('-p', '--password', help='Decryption password. Only needed if selfstats needs to access text / keystrokes data. If your database in not encrypted, specify -p="" here. If you don\'t specify a password in the command line arguments or in a config file, and the statistics you ask for require a password, a dialog will pop up asking for the password. If you give your password on the command line, remember that it will most likely be stored in plain text in your shell history.')
parser.add_argument('-d', '--data-dir', help='Data directory for selfspy, where the database is stored. Remember that Selfspy must have read/write access. Default is %s' % cfg.DATA_DIR, default=cfg.DATA_DIR)
parser.add_argument('-s', '--showtext', action='store_true', help='Also show the text column. This switch is ignored if at least one of the summary options are used. Requires password.')
parser.add_argument('-D', '--date', nargs='+', help='Which date to start the listing or summarizing from. If only one argument is given (--date 13) it is interpreted as the closest date in the past on that day. If two arguments are given (--date 03 13) it is interpreted as the closest date in the past on that month and that day, in that order. If three arguments are given (--date 2012 03 13) it is interpreted as YYYY MM DD')
parser.add_argument('-C', '--clock', type=str, help='Time to start the listing or summarizing from. Given in 24 hour format as --clock 13:25. If no --date is given, interpret the time as today if that results in sometimes in the past, otherwise as yesterday.')
parser.add_argument('-i', '--id', type=int, help='Which row ID to start the listing or summarizing from. If --date and/or --clock is given, this option is ignored.')
parser.add_argument('-b', '--back', nargs='+', type=str, help='--back <period> [<unit>] Start the listing or summary this much back in time. Use this as an alternative to --date, --clock and --id. If any of those are given, this option is ignored. <unit> is either "s" (seconds), "m" (minutes), "h" (hours), "d" (days) or "w" (weeks). If no unit is given, it is assumed to be hours.')
parser.add_argument('-l', '--limit', help='--limit <period> [<unit>]. If the start is given in --date/--clock, the limit is a time period given by <unit>. <unit> is either "s" (seconds), "m" (minutes), "h" (hours), "d" (days) or "w" (weeks). If no unit is given, it is assumed to be hours. If the start is given with --id, limit has no unit and means that the maximum row ID is --id + --limit.', nargs='+', type=str)
parser.add_argument('-m', '--min-keys', type=int, metavar='nr', help='Only allow entries with at least <nr> keystrokes')
parser.add_argument('-T', '--title', type=str, metavar='regexp', help='Only allow entries where a search for this <regexp> in the window title matches something. All regular expressions are case insensitive.')
parser.add_argument('-P', '--process', type=str, metavar='regexp', help='Only allow entries where a search for this <regexp> in the process matches something.')
parser.add_argument('-B', '--body', type=str, metavar='regexp', help='Only allow entries where a search for this <regexp> in the body matches something. Do not use this filter when summarizing ratios or activity, as it has no effect on mouse clicks. Requires password.')
parser.add_argument('--clicks', action='store_true', help='Summarize number of mouse button clicks for all buttons.')
parser.add_argument('--key-freqs', action='store_true', help='Summarize a table of absolute and relative number of keystrokes for each used key during the time period. Requires password.')
parser.add_argument('--human-readable', action='store_true', help='This modifies the --body entry and honors backspace.')
parser.add_argument('--active', type=int, metavar='seconds', nargs='?', const=ACTIVE_SECONDS, help='Summarize total time spent active during the period. The optional argument gives how many seconds after each mouse click (including scroll up or down) or keystroke that you are considered active. Default is %d.' % ACTIVE_SECONDS)
parser.add_argument('--ratios', type=int, metavar='seconds', nargs='?', const=ACTIVE_SECONDS, help='Summarize the ratio between different metrics in the given period. "Clicks" will not include up or down scrolling. The optional argument is the "seconds" cutoff for calculating active use, like --active.')
parser.add_argument('--periods', type=int, metavar='seconds', nargs='?', const=ACTIVE_SECONDS, help='List active time periods. Optional argument works same as for --active.')
parser.add_argument('--pactive', type=int, metavar='seconds', nargs='?', const=ACTIVE_SECONDS, help='List processes, sorted by time spent active in them. Optional argument works same as for --active.')
parser.add_argument('--tactive', type=int, metavar='seconds', nargs='?', const=ACTIVE_SECONDS, help='List window titles, sorted by time spent active in them. Optional argument works same as for --active.')
parser.add_argument('--pkeys', action='store_true', help='List processes sorted by number of keystrokes.')
parser.add_argument('--tkeys', action='store_true', help='List window titles sorted by number of keystrokes.')
return parser.parse_args()
def make_encrypter(password):
if password == "":
encrypter = None
else:
encrypter = Blowfish.new(hashlib.md5(password).digest())
return encrypter
def main():
try:
args = vars(parse_config())
except EnvironmentError as e:
print str(e)
sys.exit(1)
args['data_dir'] = os.path.expanduser(args['data_dir'])
def check_with_encrypter(password):
encrypter = make_encrypter(password)
return check_password.check(args['data_dir'], encrypter, read_only=True)
ss = Selfstats(os.path.join(args['data_dir'], cfg.DBNAME), args)
if ss.need_text or ss.need_keys:
if args['password'] is None:
args['password'] = get_password(verify=check_with_encrypter)
models.ENCRYPTER = make_encrypter(args['password'])
if not check_password.check(args['data_dir'], models.ENCRYPTER, read_only=True):
print 'Password failed'
sys.exit(1)
ss.do()
if __name__ == '__main__':
main()
| 24,374
|
Python
|
.py
| 472
| 41.432203
| 606
| 0.60306
|
selfspy/selfspy
| 2,404
| 231
| 72
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,852
|
password_dialog.py
|
selfspy_selfspy/selfspy/password_dialog.py
|
# Copyright 2012 David Fendrich
# This file is part of Selfspy
# Selfspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Selfspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Selfspy. If not, see <http://www.gnu.org/licenses/>.
import sys
import getpass
from Tkinter import Tk, StringVar
from tkSimpleDialog import Dialog
def get_password(verify=None, message=None):
if (not verify):
pw = get_user_password(verify, message)
else:
pw = get_keyring_password(verify)
if pw == None:
pw = get_user_password(verify, message)
return pw
def get_user_password(verify, message=None, force_save=False):
if sys.stdin.isatty():
pw = get_tty_password(verify, message, force_save)
else:
pw = get_tk_password(verify, message, force_save)
return pw
def get_keyring_password(verify, message=None):
pw = None
try:
import keyring
usr = getpass.getuser()
pw = keyring.get_password('Selfspy', usr)
if pw is not None:
if (not verify) or not verify(pw):
print 'The keyring password is not valid. Please, input the correct one.'
pw = get_user_password(verify, message, force_save=True)
except ImportError:
print 'keyring library not found'
return pw
def set_keyring_password(password):
try:
import keyring
usr = getpass.getuser()
keyring.set_password('Selfspy', usr, password)
except ImportError:
print 'Unable to save password to keyring (library not found)'
except NameError:
pass
except:
print 'Unable to save password to keyring'
def get_tty_password(verify, message=None, force_save=False):
verified = False
for i in xrange(3):
if message:
pw = getpass.getpass(message)
else:
pw = getpass.getpass()
if (not verify) or verify(pw):
verified = True
break
if not verified:
print 'Password failed'
sys.exit(1)
if not force_save:
while True:
store = raw_input("Do you want to store the password in the keychain [Y/N]: ")
if store.lower() in ['n', 'y']:
break
save_to_keychain = store.lower() == 'y'
else:
save_to_keychain = True
if save_to_keychain:
set_keyring_password(pw)
return pw
def get_tk_password(verify, message=None, force_save=False):
root = Tk()
root.withdraw()
if message is None:
message = 'Password'
while True:
dialog_info = PasswordDialog(title='Selfspy encryption password',
prompt=message,
parent=root)
pw, save_to_keychain = dialog_info.result
if pw is None:
return ""
if (not verify) or verify(pw):
break
if save_to_keychain or force_save:
set_keyring_password(pw)
return pw
class PasswordDialog(Dialog):
def __init__(self, title, prompt, parent):
self.prompt = prompt
Dialog.__init__(self, parent, title)
def body(self, master):
from Tkinter import Label
from Tkinter import Entry
from Tkinter import Checkbutton
from Tkinter import IntVar
from Tkinter import W
self.checkVar = IntVar()
Label(master, text=self.prompt).grid(row=0, sticky=W)
self.e1 = Entry(master)
self.e1.grid(row=0, column=1)
self.cb = Checkbutton(master, text="Save to keychain", variable=self.checkVar)
self.cb.pack()
self.cb.grid(row=1, columnspan=2, sticky=W)
self.e1.configure(show='*')
def apply(self):
self.result = (self.e1.get(), self.checkVar.get() == 1)
if __name__ == '__main__':
print get_password()
| 4,290
|
Python
|
.py
| 117
| 29.153846
| 90
| 0.640901
|
selfspy/selfspy
| 2,404
| 231
| 72
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,853
|
activity_store.py
|
selfspy_selfspy/selfspy/activity_store.py
|
# Copyright 2012 David Fendrich
# This file is part of Selfspy
# Selfspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Selfspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Selfspy. If not, see <http://www.gnu.org/licenses/>.
import time
from datetime import datetime
NOW = datetime.now
import sqlalchemy
import platform
if platform.system() == 'Darwin':
from selfspy import sniff_cocoa as sniffer
elif platform.system() == 'Windows':
from selfspy import sniff_win as sniffer
else:
from selfspy import sniff_x as sniffer
from selfspy import models
from selfspy.models import Process, Window, Geometry, Click, Keys
SKIP_MODIFIERS = {"", "Shift_L", "Control_L", "Super_L", "Alt_L", "Super_R", "Control_R", "Shift_R", "[65027]"} # [65027] is AltGr in X for some ungodly reason.
SCROLL_BUTTONS = {4, 5, 6, 7}
SCROLL_COOLOFF = 10 # seconds
class Display:
def __init__(self):
self.proc_id = None
self.win_id = None
self.geo_id = None
class KeyPress:
def __init__(self, key, time, is_repeat):
self.key = key
self.time = time
self.is_repeat = is_repeat
class ActivityStore:
def __init__(self, db_name, encrypter=None, store_text=True, repeat_char=True):
self.session_maker = models.initialize(db_name)
models.ENCRYPTER = encrypter
self.store_text = store_text
self.repeat_char = repeat_char
self.curtext = u""
self.key_presses = []
self.mouse_path = []
self.current_window = Display()
self.last_scroll = {button: 0 for button in SCROLL_BUTTONS}
self.last_key_time = time.time()
self.last_commit = time.time()
self.started = NOW()
self.last_screen_change = None
def trycommit(self):
self.last_commit = time.time()
for _ in range(1000):
try:
self.session.commit()
break
except sqlalchemy.exc.OperationalError:
time.sleep(1)
except:
self.session.rollback()
def run(self):
self.session = self.session_maker()
self.sniffer = sniffer.Sniffer()
self.sniffer.screen_hook = self.got_screen_change
self.sniffer.key_hook = self.got_key
self.sniffer.mouse_button_hook = self.got_mouse_click
self.sniffer.mouse_move_hook = self.got_mouse_move
self.sniffer.run()
def got_screen_change(self, process_name, window_name, win_x, win_y, win_width, win_height):
"""Receives a screen change and stores any changes.
If the process or window has changed it will also store any queued pressed keys.
Keyword arguments:
process_name -- the name of the process running the current window
window_name -- the name of the window
win_x -- the x position of the window
win_y -- the y position of the window
win_width -- the width of the window
win_height -- the height of the window"""
# skip the event if same arguments as last time are passed
args = [process_name, window_name, win_x, win_y, win_width, win_height]
if self.last_screen_change == args:
return
self.last_screen_change = args
cur_process = self.session.query(
Process
).filter_by(
name=process_name
).scalar()
if not cur_process:
cur_process = Process(process_name)
self.session.add(cur_process)
cur_geometry = self.session.query(
Geometry
).filter_by(
xpos=win_x,
ypos=win_y,
width=win_width,
height=win_height
).scalar()
if not cur_geometry:
cur_geometry = Geometry(win_x, win_y, win_width, win_height)
self.session.add(cur_geometry)
cur_window = self.session.query(Window).filter_by(title=window_name,
process_id=cur_process.id).scalar()
if not cur_window:
cur_window = Window(window_name, cur_process.id)
self.session.add(cur_window)
if not (self.current_window.proc_id == cur_process.id
and self.current_window.win_id == cur_window.id):
self.trycommit()
self.store_keys() # happens before as these keypresses belong to the previous window
self.current_window.proc_id = cur_process.id
self.current_window.win_id = cur_window.id
self.current_window.geo_id = cur_geometry.id
def filter_many(self):
specials_in_row = 0
lastpress = None
newpresses = []
for press in self.key_presses:
key = press.key
if specials_in_row and key != lastpress.key:
if specials_in_row > 1:
lastpress.key = '%s]x%d>' % (lastpress.key[:-2], specials_in_row)
newpresses.append(lastpress)
specials_in_row = 0
if len(key) > 1:
specials_in_row += 1
lastpress = press
else:
newpresses.append(press)
if specials_in_row:
if specials_in_row > 1:
lastpress.key = '%s]x%d>' % (lastpress.key[:-2], specials_in_row)
newpresses.append(lastpress)
self.key_presses = newpresses
def store_keys(self):
""" Stores the current queued key-presses """
if self.repeat_char:
self.filter_many()
if self.key_presses:
keys = [press.key for press in self.key_presses]
timings = [press.time for press in self.key_presses]
add = lambda count, press: count + (0 if press.is_repeat else 1)
nrkeys = reduce(add, self.key_presses, 0)
curtext = u""
if not self.store_text:
keys = []
else:
curtext = ''.join(keys)
self.session.add(Keys(curtext.encode('utf8'),
keys,
timings,
nrkeys,
self.started,
self.current_window.proc_id,
self.current_window.win_id,
self.current_window.geo_id))
self.trycommit()
self.started = NOW()
self.key_presses = []
self.last_key_time = time.time()
def got_key(self, keycode, state, string, is_repeat):
""" Receives key-presses and queues them for storage.
keycode is the code sent by the keyboard to represent the pressed key
state is the list of modifier keys pressed, each modifier key should be represented
with capital letters and optionally followed by an underscore and location
specifier, i.e: SHIFT or SHIFT_L/SHIFT_R, ALT, CTRL
string is the string representation of the key press
repeat is True if the current key is a repeat sent by the keyboard """
now = time.time()
if string in SKIP_MODIFIERS:
return
if len(state) > 1 or (len(state) == 1 and state[0] != "Shift"):
string = '<[%s: %s]>' % (' '.join(state), string)
elif len(string) > 1:
string = '<[%s]>' % string
self.key_presses.append(KeyPress(string, now - self.last_key_time, is_repeat))
self.last_key_time = now
def store_click(self, button, x, y):
""" Stores incoming mouse-clicks """
self.session.add(Click(button,
True,
x, y,
len(self.mouse_path),
self.current_window.proc_id,
self.current_window.win_id,
self.current_window.geo_id))
self.mouse_path = []
self.trycommit()
def got_mouse_click(self, button, x, y):
""" Receives mouse clicks and sends them for storage.
Mouse buttons: left: 1, middle: 2, right: 3, scroll up: 4, down:5, left:6, right:7
x,y are the coordinates of the keypress
press is True if it pressed down, False if released"""
if button in [4, 5, 6, 7]:
if time.time() - self.last_scroll[button] < SCROLL_COOLOFF:
return
self.last_scroll[button] = time.time()
self.store_click(button, x, y)
def got_mouse_move(self, x, y):
""" Queues mouse movements.
x,y are the new coorinates on moving the mouse"""
self.mouse_path.append([x, y])
def close(self):
""" stops the sniffer and stores the latest keys. To be used on shutdown of program"""
self.sniffer.cancel()
self.store_keys()
def change_password(self, new_encrypter):
self.session = self.session_maker()
keys = self.session.query(Keys).all()
for k in keys:
dtext = k.decrypt_text()
dkeys = k.decrypt_keys()
k.encrypt_text(dtext, new_encrypter)
k.encrypt_keys(dkeys, new_encrypter)
self.session.commit()
| 9,764
|
Python
|
.py
| 219
| 33.219178
| 161
| 0.581937
|
selfspy/selfspy
| 2,404
| 231
| 72
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,854
|
period.py
|
selfspy_selfspy/selfspy/period.py
|
# Copyright 2012 David Fendrich
# This file is part of Selfspy
# Selfspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Selfspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Selfspy. If not, see <http://www.gnu.org/licenses/>.
import bisect
class Period:
def __init__(self, cutoff, maxtime):
self.times = []
self.cutoff = cutoff
self.maxtime = maxtime
def append(self, time):
ltimes = len(self.times)
end = min(time + self.cutoff, self.maxtime)
def check_in(i):
if self.times[i][0] <= time <= self.times[i][1]:
self.times[i] = (self.times[i][0], max(end, self.times[i][1]))
return True
return False
def maybe_merge(i):
if ltimes > i + 1:
if self.times[i][1] >= self.times[i + 1][0]:
self.times[i] = (self.times[i][0], self.times[i + 1][1])
self.times.pop(i + 1)
if ltimes == 0:
self.times.append((time, end))
return
i = bisect.bisect(self.times, (time,))
if i >= 1 and check_in(i - 1):
maybe_merge(i - 1)
elif i < ltimes and check_in(i):
maybe_merge(i)
else:
self.times.insert(i, (time, end))
maybe_merge(i)
def extend(self, times):
for time in times:
self.append(time)
def calc_total(self):
return sum(t2 - t1 for t1, t2 in self.times)
| 1,938
|
Python
|
.py
| 47
| 32.787234
| 78
| 0.600959
|
selfspy/selfspy
| 2,404
| 231
| 72
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,855
|
setup.py
|
translate_pootle/setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import re
import sys
from distutils import log
from distutils.command.build import build as DistutilsBuild
from distutils.core import Command
from distutils.errors import DistutilsOptionError
from pkg_resources import parse_version, require
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
from pootle import __version__
from pootle.constants import DJANGO_MINIMUM_REQUIRED_VERSION
from pootle.core.utils import version
README_FILE = 'README.rst'
def check_pep440_versions():
if require('setuptools')[0].parsed_version < parse_version('18.5'):
exit("setuptools %s is incompatible with Pootle. Please upgrade "
"using:\n"
"'pip install --upgrade setuptools'"
% require('setuptools')[0].version)
if require('pip')[0].parsed_version < parse_version('6.0'):
exit("pip %s is incompatible with Pootle. Please upgrade "
"using:\n"
"'pip install --upgrade pip'" % require('pip')[0].version)
def parse_requirements(file_name, recurse=False):
"""Parses a pip requirements file and returns a list of packages.
Use the result of this function in the ``install_requires`` field.
Copied from cburgmer/pdfserver.
"""
requirements = []
for line in open(file_name, 'r').read().split('\n'):
# Ignore comments, blank lines and included requirements files
if re.match(r'(\s*#)|(\s*$)|'
'((--allow-external|--allow-unverified) .*$)', line):
continue
if re.match(r'-r .*$', line):
if recurse:
requirements.extend(parse_requirements(
'requirements/' +
re.sub(r'-r\s*(.*[.]txt)$', r'\1', line), recurse))
continue
if re.match(r'^\s*-e\s+', line):
requirements.append(re.sub(
r'''\s*-e\s+ # -e marker
.* # URL
\#egg= # egg marker
([^\d]*)- # \1 dep name
([\.\d]* # \2 M.N.*
((a|b|rc|dev)+\d*)* # (optional) devN
)$''',
r'\1==\2', line, flags=re.VERBOSE))
log.warn("Pootle requires a non-PyPI dependency, when using pip "
"ensure you use the --process-dependency-links option.")
elif re.match(r'\s*-f\s+', line):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(file_name, recurse=False):
dependency_links = []
for line in open(file_name, 'r').read().split('\n'):
if re.match(r'^\s*-e\s+', line):
dependency_links.append(re.sub(r'\s*-e\s+', '', line))
if re.match(r'-r .*$', line):
if recurse:
dependency_links.extend(parse_dependency_links(
'requirements/' +
re.sub(r'-r\s*(.*[.]txt)$', r'\1', line), recurse))
continue
return dependency_links
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--tb=short', 'tests/']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
class PootleBuildMo(DistutilsBuild):
description = "compile Gettext PO files into MO"
user_options = [
('all', None,
"compile all language (don't use LINGUAS file)"),
('lang=', 'l',
"specify a language to compile"),
('check', None,
"check for errors"),
]
boolean_options = ['all']
po_path_base = os.path.join('pootle', 'locale')
_langs = []
def initialize_options(self):
self.all = False
self.lang = None
self.check = False
def finalize_options(self):
if self.all and self.lang is not None:
raise DistutilsOptionError(
"Can't use --all and --lang together"
)
if self.lang is not None:
self._langs = [self.lang]
elif self.all:
for lang in os.listdir(self.po_path_base):
if (os.path.isdir(os.path.join(self.po_path_base, lang)) and
lang != "templates"):
self._langs.append(lang)
else:
for lang in open(os.path.join('pootle', 'locale', 'LINGUAS')):
self._langs.append(lang.rstrip())
def build_mo(self):
"""Compile .mo files from available .po files"""
import subprocess
import gettext
from translate.storage import factory
error_occured = False
for lang in self._langs:
lang = lang.rstrip()
po_path = os.path.join('pootle', 'locale', lang)
mo_path = os.path.join('pootle', 'locale', lang, 'LC_MESSAGES')
if not os.path.exists(mo_path):
os.makedirs(mo_path)
for po, mo in (('pootle.po', 'django.mo'),
('pootle_js.po', 'djangojs.mo')):
po_filename = os.path.join(po_path, po)
mo_filename = os.path.join(mo_path, mo)
if not os.path.exists(po_filename):
log.warn("%s: missing file %s", lang, po_filename)
continue
if not os.path.exists(mo_path):
os.makedirs(mo_path)
log.info("compiling %s", lang)
if self.check:
command = ['msgfmt', '-c', '--strict',
'-o', mo_filename, po_filename]
else:
command = ['msgfmt', '--strict',
'-o', mo_filename, po_filename]
try:
subprocess.check_call(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
error_occured = True
except Exception as e:
log.warn("%s: skipping, running msgfmt failed: %s",
lang, e)
try:
store = factory.getobject(po_filename)
gettext.c2py(store.getheaderplural()[1])
except Exception:
log.warn("%s: invalid plural header in %s",
lang, po_filename)
if error_occured:
sys.exit(1)
def run(self):
self.build_mo()
class PootleUpdateReadme(Command):
"""Process a README file for branching"""
user_options = [
('write', 'w', 'Overwrite the %s file' % README_FILE),
('branch', 'b', 'Rewrite using branch rewriting (default)'),
('release', 'r', 'Rewrite using release or tag rewriting'),
# --check - to see that README is what is expected
]
description = "Update the %s file" % README_FILE
def initialize_options(self):
self.write = False
self.branch = True
self.release = False
def finalize_options(self):
if self.release:
self.branch = False
def run(self):
new_readme = parse_long_description(README_FILE, self.release)
if self.write:
with open(README_FILE, 'w') as readme:
readme.write(new_readme)
else:
print(new_readme)
class BuildChecksTemplatesCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import django
import codecs
from pootle.apps.pootle_checks.constants import (
CHECK_NAMES, EXCLUDED_FILTERS)
from translate.filters.checks import (TeeChecker, StandardChecker,
StandardUnitChecker)
from translate.lang.factory import get_all_languages
try:
from docutils.core import publish_parts
except ImportError:
from distutils.errors import DistutilsModuleError
raise DistutilsModuleError("Please install the docutils library.")
from pootle import syspath_override # noqa
django.setup()
def get_check_description(name, filterfunc):
"""Get a HTML snippet for a specific quality check description.
The quality check description is extracted from the check function
docstring (which uses reStructuredText) and rendered using docutils
to get the HTML snippet.
"""
# Provide a header with an anchor to refer to.
description = ('\n<h3 id="%s">%s</h3>\n\n' %
(name, unicode(CHECK_NAMES[name])))
# Clean the leading whitespace on each docstring line so it gets
# properly rendered.
docstring = "\n".join(line.strip()
for line in filterfunc.__doc__.split("\n"))
# Render the reStructuredText in the docstring into HTML.
description += publish_parts(docstring, writer_name="html")["body"]
return description
print("Regenerating Translate Toolkit quality checks descriptions")
# Get a checker with the Translate Toolkit checks. Note that filters
# that are not used in Pootle are excluded.
checkerclasses = [StandardChecker, StandardUnitChecker]
# Also include language-specific checks
checkerclasses.extend([type(lang.checker)
for lang in get_all_languages()
if lang.checker is not None])
fd = TeeChecker(
checkerclasses=checkerclasses
).getfilters(excludefilters=EXCLUDED_FILTERS)
docs = sorted(
get_check_description(name, f) for name, f in fd.items()
)
# Output the quality checks descriptions to the HTML file.
templates_dir = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "pootle", "templates"
)
filename = os.path.join(templates_dir, "help/_ttk_quality_checks.html")
with codecs.open(filename, "w", "utf-8") as f:
f.write(u"\n".join(docs))
print("Checks templates written to %r" % (filename))
def parse_long_description(filename, tag=False):
def reduce_header_level():
# PyPI doesn't like title to be underlined with =
if tag:
readme_lines[1] = readme_lines[1].replace("=", "-")
def adjust_installation_command():
extra_options = []
if dependency_links:
extra_options += ["--process-dependency-links"]
if version.is_prerelease():
extra_options += ["--pre"]
for ln, line in enumerate(readme_lines):
if re.match(r'^\s*pip install\s+.*\s+Pootle$', line):
if extra_options:
readme_lines[ln] = (
" pip install %s Pootle\n" % " ".join(extra_options))
else:
readme_lines[ln] = " pip install Pootle\n"
def replace_urls():
from pootle.core.utils import version
branch = version.get_git_branch()
branch_escape = None
if branch is not None:
branch_escape = branch.replace('/', '%2F')
for ln, line in enumerate(readme_lines):
for pattern, replace, rewrite_type in (
# Release Notes
('releases/[0-9]\.[0-9]\.[0-9]\.html',
'releases/%s.html' % version.get_main_version(),
'all'),
# Adjust docs away from /latest/
('/pootle/en/latest/',
'/pootle/en/%s/' % version.get_rtd_version(),
'branch'),
# Coverage - Codecov for branches
('codecov.io/gh/translate/pootle/branch/master',
'codecov.io/gh/translate/pootle/branch/%s' % branch_escape,
'branch'),
('shields.io/codecov/c/github/translate/pootle/master',
'shields.io/codecov/c/github/translate/pootle/%s' %
branch_escape,
'branch'),
# Coverage - Coveralls for tags
('codecov.io/gh/translate/pootle/branch/master',
'coveralls.io/github/translate/pootle?branch=%s' %
version.get_version(),
'tag'),
('shields.io/codecov/c/github/translate/pootle/master',
'shields.io/coveralls/translate/pootle/%s' %
version.get_version(),
'tag'),
# Travis - change only the badge, can't link to branch
('travis/translate/pootle/master',
'travis/translate/pootle/%s' % version.get_git_branch(),
'branch'),
('travis/translate/pootle/master',
'travis/translate/pootle/%s' % version.get_version(),
'tag'),
# Landscape
('landscape.io/github/translate/pootle/master',
'landscape.io/github/translate/pootle/%s' %
version.get_git_branch(),
'branch'),
# Requires.io
('requires/github/translate/pootle/master',
'requires/github/translate/pootle/%s' %
version.get_git_branch(),
'branch'),
('requirements/\?branch=master',
'requirements/?branch=%s' % branch_escape,
'branch'),
('https://img.shields.io/requires/.*',
'https://requires.io/github/translate/'
'pootle/requirements.svg?tag=%s'
% version.get_version(),
'tag'),
('requirements/\?branch=master',
'requirements/?tag=%s' % version.get_version(),
'tag'),
):
if ((rewrite_type == 'tag' and tag)
or (rewrite_type == 'branch'
and not tag
and branch is not None)
or rewrite_type == 'all'):
readme_lines[ln] = re.sub(pattern, replace, readme_lines[ln])
filename = os.path.join(os.path.dirname(__file__), filename)
with open(filename) as f:
readme_lines = f.readlines()
reduce_header_level()
adjust_installation_command()
replace_urls()
return "".join(readme_lines)
check_pep440_versions()
dependency_links = []
install_requires = parse_requirements('requirements/base.txt'),
dependency_links += parse_dependency_links('requirements/base.txt')
tests_require = parse_requirements('requirements/tests.txt'),
dependency_links += parse_dependency_links('requirements/tests.txt')
extras_require = {}
extras_require['dev'] = parse_requirements('requirements/dev.txt', recurse=True)
dependency_links += parse_dependency_links('requirements/dev.txt', recurse=True)
# Database dependencies
extras_require['mysql'] = parse_requirements('requirements/_db_mysql.txt')
dependency_links += parse_dependency_links('requirements/_db_mysql.txt')
extras_require['postgresql'] = parse_requirements('requirements/_db_postgresql.txt')
dependency_links += parse_dependency_links('requirements/_db_postgresql.txt')
# Pootle FS plugins
extras_require['git'] = parse_requirements('requirements/_pootle_fs_git.txt')
dependency_links += parse_dependency_links('requirements/_pootle_fs_git.txt')
# Elasticsearch
extras_require['es1'] = parse_requirements('requirements/_es_1.txt')
dependency_links += parse_dependency_links('requirements/_es_1.txt')
extras_require['es2'] = parse_requirements('requirements/_es_2.txt')
dependency_links += parse_dependency_links('requirements/_es_2.txt')
extras_require['es5'] = parse_requirements('requirements/_es_5.txt')
dependency_links += parse_dependency_links('requirements/_es_5.txt')
# Testing
extras_require['travis'] = parse_requirements('requirements/travis.txt',
recurse=True)
dependency_links += parse_dependency_links('requirements/travis.txt',
recurse=True)
extras_require['appveyor'] = parse_requirements('requirements/appveyor.txt',
recurse=True)
dependency_links += parse_dependency_links('requirements/appveyor.txt',
recurse=True)
setup(
name="Pootle",
version=__version__,
description="An online collaborative localization tool.",
long_description=parse_long_description(README_FILE, tag=True),
author="Translate",
author_email="dev@translate.org.za",
license="GNU General Public License 3 or later (GPLv3+)",
url="http://pootle.translatehouse.org",
download_url="https://github.com/translate/pootle/releases/tag/" +
__version__,
install_requires=install_requires,
dependency_links=dependency_links,
tests_require=tests_require,
extras_require=extras_require,
platforms=["any"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: %s"
% ".".join(map(str, DJANGO_MINIMUM_REQUIRED_VERSION[:2])),
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: "
"GNU General Public License v3 or later (GPLv3+)",
"Operating System :: OS Independent",
"Operating System :: Unix",
"Programming Language :: JavaScript",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Localization",
"Topic :: Text Processing :: Linguistic"
],
zip_safe=False,
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': [
'pootle = pootle.runner:main',
],
'pytest11': [
'pytest_pootle = pytest_pootle.plugin',
]
},
cmdclass={
'build_checks_templates': BuildChecksTemplatesCommand,
'build_mo': PootleBuildMo,
'update_readme': PootleUpdateReadme,
'test': PyTest,
},
)
| 18,915
|
Python
|
.py
| 426
| 33.06338
| 84
| 0.575045
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,856
|
run_coveralls.py
|
translate_pootle/run_coveralls.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
# This file was cribbed from https://github.com/brechtm/citeproc-py
import os
from distutils.sysconfig import get_python_lib
from subprocess import call
if __name__ == '__main__':
# chdir to the site-packages directory so the report lists relative paths
orig_dir = os.getcwd()
dot_coverage_path = os.path.join(orig_dir, '.coverage')
os.chdir(get_python_lib())
try:
os.remove('.coverage')
except OSError:
pass
os.symlink(dot_coverage_path, '.coverage')
# create a report from the coverage data
if 'TRAVIS' in os.environ:
rc = call('coveralls')
raise SystemExit(0)
else:
rc = call(['coverage', 'report'])
raise SystemExit(rc)
| 1,014
|
Python
|
.py
| 29
| 30.758621
| 77
| 0.690816
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,857
|
manage.py
|
translate_pootle/manage.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import sys
import warnings
from django.core.management import execute_from_command_line
from pootle import syspath_override # noqa
from pootle.core.log import cmd_log
if __name__ == "__main__":
warnings.warn("Deprecated. Use 'pootle' command instead", DeprecationWarning)
cmd_log(*sys.argv)
os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
execute_from_command_line()
| 705
|
Python
|
.py
| 19
| 35
| 81
| 0.756241
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,858
|
.pylint-travisrc
|
translate_pootle/.pylint-travisrc
|
# Travis specific pylint configuration #
# ######################################
#
# For Travis we only care to fail if a test fails, so no fancy reporting
#
# Adding new enabled checks:
# 1. They must pass
# 2. They must pass reliably, no false positives
# 3. Prefer a checker over and individual check
# 4. We don't include anything that the devs can't agree on in terms of linting
[MASTER]
ignore=migrations,.eggs,.git,.tox
[MESSAGES CONTROL]
disable=all
# For checkers and individual checks see
# https://pylint.readthedocs.io/en/latest/features.html
enable=reimported, logging, stdlib, string, unneeded-not, undefined-loop-variable, old-octal-literal, print-statement
dummy-variables-rgx=__|^.*[^_]_$
[REPORTS]
msg-template="{path}:{line}: {msg_id} {msg} ({symbol})"
output-format=colorized
reports=no
| 812
|
Python
|
.py
| 22
| 35.727273
| 117
| 0.73028
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,859
|
conf.py
|
translate_pootle/docs/conf.py
|
# -*- coding: utf-8 -*-
#
# Pootle documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import os
import re
import sys
from django import __version__ as dj_version_actual
from pootle.core.utils import version as pootle_version
from translate.__version__ import sver as ttk_version_actual
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('_ext'))
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'jsonlexer',
'pootle_docs',
'removed',
'sphinx.ext.coverage',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pootle'
copyright = u'2004-2017'
author = u'Pootle contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pootle_version.get_docs_version()
# The full version, including alpha/beta/rc tags.
from pootle import __version__
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'_build',
'_themes/README.rst',
'specs'
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
#todo_include_todos = False
# Build a refence icon.rst.inc files that we can include for inline
# icons.
icons_dir = "../pootle/static/images/sprite"
icons_inc_file_name = "icons.rst.inc"
build_icons_inc = False
if not os.path.isfile(os.path.join(os.curdir, icons_inc_file_name)):
build_icons_inc = True
else:
icon_inc_mtime = os.stat(os.path.join(os.curdir,
icons_inc_file_name)).st_mtime
for icon_image in os.listdir(icons_dir):
if os.stat(os.path.join(icons_dir,
icon_image)).st_mtime > icon_inc_mtime:
build_icons_inc = True
if build_icons_inc:
with open(icons_inc_file_name, "w") as icons_txt_file:
for icon_image in os.listdir(icons_dir):
icon_name = icon_image[:icon_image.rfind(".")]
print(".. |icon:" + icon_name + "| " + \
"image:: /" + icons_dir + "/" + icon_image, file=icons_txt_file)
print(" :alt: " + \
icon_name.replace("-", " ").replace("_", " ") + \
" icon", file=icons_txt_file)
print(file=icons_txt_file)
# Files to include at the end of every .rst file
rst_epilog = """
.. include:: /%s
""" % icons_inc_file_name
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx-bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'nosidebar': True,
'link_color': '#489b7',
'link_color_hover': '#489b7',
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/pootle_logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '../pootle/static/favicon/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pootledoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Pootle.tex', u'Pootle Documentation',
u'Pootle contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pootle', u'Pootle Documentation',
[u'Translate.org.za'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Pootle', u'Pootle Documentation',
u'Pootle contributors', 'Pootle', 'Online translation tool.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# -- Options for Intersphinx ---------------------------------------------
from pootle.checks import DJANGO_MINIMUM_REQUIRED_VERSION
django_ver = ".".join(map(str, DJANGO_MINIMUM_REQUIRED_VERSION[:2]))
intersphinx_mapping = {
'python': ('https://docs.python.org/2.7', None),
'django': ('https://docs.djangoproject.com/en/%s' % django_ver,
'https://docs.djangoproject.com/en/%s/_objects/' % django_ver),
'toolkit': ('http://docs.translatehouse.org/projects/translate-toolkit/en/latest/', None),
'virtualenvwrapper': ('https://virtualenvwrapper.readthedocs.io/en/latest/', None),
'pip': ('https://pip.pypa.io/en/stable/', None),
}
# -- Options for External links -------------------------------------------------
extlinks = {
# :role: (URL, prefix)
'issue': ('https://github.com/translate/pootle/issues/%s', 'issue '),
'man': ('http://linux.die.net/man/1/%s', ''),
'wiki': ('http://translate.sourceforge.net/wiki/%s', ''),
'wp': ('https://en.wikipedia.org/wiki/%s', ''),
}
# -- Dependency versions ----
install_options = set()
requirements_dir = '../requirements'
for requirement in os.listdir(requirements_dir):
with open(os.path.join(requirements_dir, requirement)) as req:
for line in req.readlines():
if re.match(r'^\s*-e\s+', line):
install_options.add("--process-dependency-links")
break
if pootle_version.is_prerelease():
install_options.add("--pre")
if install_options:
install_options_string = " ".join(install_options)
else:
install_options_string = "\\"
rst_prolog = """
.. |django_ver| replace:: %s
.. |ttk_ver| replace:: %s
.. |--process-dependency-links --pre| replace:: %s
""" % (dj_version_actual, ttk_version_actual, install_options_string)
| 14,845
|
Python
|
.py
| 347
| 40.170029
| 94
| 0.698943
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,860
|
jsonlexer.py
|
translate_pootle/docs/_ext/jsonlexer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
"""Sphinx extension to enable JSON code highlighting."""
def setup(app):
# enable Pygments json lexer
try:
import pygments
if pygments.__version__ >= '1.5':
# use JSON lexer included in recent versions of Pygments
from pygments.lexers import JsonLexer
else:
# use JSON lexer from pygments-json if installed
from pygson.json_lexer import JSONLexer as JsonLexer
except ImportError:
pass # not fatal if we have old (or no) Pygments and no pygments-json
else:
app.add_lexer('json', JsonLexer())
return {"parallel_read_safe": True}
| 937
|
Python
|
.py
| 24
| 33.375
| 78
| 0.682068
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,861
|
pootle_docs.py
|
translate_pootle/docs/_ext/pootle_docs.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
"""Sphinx extension with custom stuff for Pootle docs."""
from sphinx import addnodes
from sphinx.domains.std import Cmdoption
def setup(app):
# Django :xxx: roles for intersphinx cross-references
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_object_type(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; django-admin command",
parse_node=parse_django_admin_node,
)
app.add_directive('django-admin-option', Cmdoption)
return {"parallel_read_safe": True}
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env.ref_context['std:program'] = command
signode += addnodes.desc_name(sig, sig)
return command
| 1,102
|
Python
|
.py
| 30
| 32.133333
| 77
| 0.702068
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,862
|
removed.py
|
translate_pootle/docs/_ext/removed.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
"""Directove to describe a removal in a specific version.
Copied from: sphinx/directives/other.py
"""
from docutils import nodes
from docutils.parsers.rst import Directive
from sphinx import addnodes
from sphinx.util.nodes import set_source_info
versionlabels = {
'removed': 'Removed in version %s',
}
class VersionChange(Directive):
"""
Directive to describe a change/addition/deprecation in a specific version.
"""
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
node = addnodes.versionmodified()
node.document = self.state.document
set_source_info(self, node)
node['type'] = self.name
node['version'] = self.arguments[0]
text = versionlabels[self.name] % self.arguments[0]
if len(self.arguments) == 2:
inodes, messages = self.state.inline_text(self.arguments[1],
self.lineno+1)
para = nodes.paragraph(self.arguments[1], '', *inodes,
translatable=False)
set_source_info(self, para)
node.append(para)
else:
messages = []
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
if len(node):
if isinstance(node[0], nodes.paragraph) and node[0].rawsource:
content = nodes.inline(node[0].rawsource, translatable=True)
content.source = node[0].source
content.line = node[0].line
content += node[0].children
node[0].replace_self(nodes.paragraph('', '', content,
translatable=False))
node[0].insert(0, nodes.inline('', '%s: ' % text,
classes=['versionmodified']))
else:
para = nodes.paragraph('', '',
nodes.inline('', '%s.' % text,
classes=['versionmodified']),
translatable=False)
node.append(para)
env = self.state.document.settings.env
# XXX should record node.source as well
env.note_versionchange(node['type'], node['version'], node, node.line)
return [node] + messages
def setup(app):
app.add_directive('removed', VersionChange)
return {"parallel_read_safe": True}
| 2,832
|
Python
|
.py
| 67
| 31.074627
| 78
| 0.579005
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,863
|
apache-wsgi.py
|
translate_pootle/docs/server/apache-wsgi.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import site
import sys
# You probably will need to change these paths to match your deployment,
# most likely because of the Python version you are using.
ALLDIRS = [
'/var/www/pootle/env/lib/python2.7/site-packages',
'/var/www/pootle/env/lib/python2.7/site-packages/pootle/apps',
]
# Remember original sys.path.
prev_sys_path = list(sys.path)
# Add each new site-packages directory.
for directory in ALLDIRS:
site.addsitedir(directory)
# Reorder sys.path so new directories at the front.
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
# Set the Pootle settings module as DJANGO_SETTINGS_MODULE.
os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
# Set the WSGI application.
def application(environ, start_response):
"""Wrapper for Django's WSGIHandler().
This allows to get values specified by SetEnv in the Apache
configuration or interpose other changes to that environment, like
installing middleware.
"""
try:
os.environ['POOTLE_SETTINGS'] = environ['POOTLE_SETTINGS']
except KeyError:
pass
from django.core.wsgi import get_wsgi_application
_wsgi_application = get_wsgi_application()
return _wsgi_application(environ, start_response)
| 1,407
|
Python
|
.py
| 39
| 32.487179
| 72
| 0.735793
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,864
|
plugin.py
|
translate_pootle/pytest_pootle/plugin.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from pkgutil import iter_modules
import pytest
from . import fixtures
from .fixtures import (
core as fixtures_core, formats as fixtures_formats,
models as fixtures_models, pootle_fs as fixtures_fs)
from .fixtures.core import (
management as fixtures_core_management, utils as fixtures_core_utils)
def _load_fixtures(*modules):
for mod in modules:
path = mod.__path__
prefix = '%s.' % mod.__name__
for loader_, name, is_pkg in iter_modules(path, prefix):
if not is_pkg:
yield name
def pytest_addoption(parser):
parser.addoption(
"--debug-tests",
action="store",
default="",
help="Debug tests to a given file")
parser.addoption(
"--force-migration",
action="store_true",
default=False,
help="Force migration before test run")
parser.addoption(
"--memusage",
action="store_true",
default=False,
help="Run memusage tests")
def pytest_configure(config):
# register an additional marker
config.addinivalue_line(
"markers", "pootle_vfolders: requires special virtual folder projects")
config.addinivalue_line(
"markers", "pootle_memusage: memory usage tests")
pytest_plugins = tuple(
_load_fixtures(
fixtures,
fixtures_core,
fixtures_core_management,
fixtures_core_utils,
fixtures_formats,
fixtures_models,
fixtures_fs))
for plugin in pytest_plugins:
config.pluginmanager.import_plugin(plugin)
def pytest_runtest_setup(item):
marker = item.get_marker("pootle_memusage")
skip_memtests = (
(marker is not None
and not (item.config.getoption("--memusage")
or not item._request.getfixturevalue("memusage"))))
if skip_memtests:
pytest.skip("test requires memusage flag and dj.debug.memusage")
| 2,236
|
Python
|
.py
| 63
| 28.47619
| 79
| 0.653401
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,865
|
search.py
|
translate_pootle/pytest_pootle/search.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from itertools import groupby
from django.db.models import Max
from django.urls import resolve
from pootle.core.dateparse import parse_datetime
from pootle.core.url_helpers import split_pootle_path
from pootle_checks.utils import get_category_id
from pootle_misc.util import get_date_interval
from pootle_store.constants import ALLOWED_SORTS, SIMPLY_SORTED
from pootle_store.models import Unit
from pootle_store.unit.filters import UnitSearchFilter, UnitTextSearch
from pootle_store.unit.results import GroupedResults, StoreResults
def get_max_and_order_fields(sort_by):
if sort_by[0] == '-':
max_field = sort_by[1:]
sort_order = '-sort_by_field'
else:
max_field = sort_by
sort_order = 'sort_by_field'
return max_field, sort_order
def calculate_search_results(kwargs, user):
pootle_path = kwargs["pootle_path"]
category = kwargs.get("category")
checks = kwargs.get("checks")
offset = kwargs.get("offset", 0)
limit = kwargs.get("count", 9)
modified_since = kwargs.get("modified-since")
month = kwargs.get("month")
search = kwargs.get("search")
sfields = kwargs.get("sfields")
soptions = kwargs.get("soptions", [])
sort = kwargs.get("sort", None)
vfolder = kwargs.get("vfolder", None)
language_code, project_code, dir_path_, filename = (
split_pootle_path(kwargs["pootle_path"]))
uids = [
int(x)
for x
in kwargs.get("uids", "").split(",")
if x]
unit_filter = kwargs.get("filter")
if modified_since:
modified_since = parse_datetime(modified_since)
if month:
month = get_date_interval(month)
path_kwargs = {
k: v
for k, v
in resolve(pootle_path).kwargs.items()
if k in [
"language_code",
"project_code",
"dir_path",
"filename"]}
qs = (
Unit.objects.get_translatable(user=user, **path_kwargs)
.order_by("store", "index"))
if vfolder is not None:
qs = qs.filter(store__vfolders=vfolder)
# if "filter" is present in request vars...
if unit_filter:
# filter the results accordingly
qs = UnitSearchFilter().filter(
qs,
unit_filter,
user=user,
checks=checks,
category=get_category_id(category))
# filter by modified
if modified_since:
qs = qs.filter(change__submitted_on__gt=modified_since).distinct()
if month is not None:
qs = qs.filter(
change__submitted_on__gte=month[0],
change__submitted_on__lte=month[1]).distinct()
# sort results
if unit_filter in ["my-suggestions", "user-suggestions"]:
sort_on = "suggestions"
elif unit_filter in ["my-submissions", "user-submissions"]:
sort_on = "submissions"
else:
sort_on = "units"
sort_by = ALLOWED_SORTS[sort_on].get(sort, None)
if sort_by is not None:
# filtered sort
if sort_on in SIMPLY_SORTED:
qs = qs.order_by(sort_by, "store__pootle_path", "index")
else:
max_field, sort_order = get_max_and_order_fields(sort_by)
qs = (
qs.annotate(sort_by_field=Max(max_field))
.order_by(sort_order, "store__pootle_path", "index"))
# text search
if search and sfields:
qs = UnitTextSearch(qs).search(
search,
[sfields],
"exact" in soptions)
find_unit = (
not offset
and language_code
and project_code
and filename
and uids)
start = offset
total = qs.count()
if find_unit:
# find the uid in the Store
uid_list = list(qs.values_list("pk", flat=True))
unit_index = uid_list.index(uids[0])
start = int(unit_index / (2 * limit)) * (2 * limit)
end = min(start + (2 * limit), total)
unit_groups = []
units_by_path = groupby(
qs.values(*GroupedResults.select_fields)[start:end],
lambda x: x["store__pootle_path"])
for pootle_path, units in units_by_path:
unit_groups.append(
{pootle_path: StoreResults(units).data})
total = qs.count()
return total, start, min(end, total), unit_groups
| 4,667
|
Python
|
.py
| 127
| 28.76378
| 78
| 0.609013
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,866
|
factories.py
|
translate_pootle/pytest_pootle/factories.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from hashlib import md5
import factory
from django.utils import timezone
from django.utils.encoding import force_bytes
import pootle_store
from pootle.core.delegate import wordcount
from pootle.core.utils.timezone import make_aware
class SubmissionFactory(factory.django.DjangoModelFactory):
creation_time = make_aware(timezone.now())
class Meta(object):
model = 'pootle_statistics.Submission'
class UserFactory(factory.django.DjangoModelFactory):
username = factory.Sequence(lambda n: 'foo%s' % n)
email = factory.LazyAttribute(lambda o: '%s@example.org' % o.username)
class Meta(object):
model = 'accounts.User'
class LegalPageFactory(factory.django.DjangoModelFactory):
title = factory.Sequence(lambda n: 'title%s' % n)
virtual_path = factory.Sequence(lambda n: '/foo/bar%s' % n)
class Meta(object):
model = 'staticpages.LegalPage'
class AgreementFactory(factory.django.DjangoModelFactory):
user = factory.SubFactory(UserFactory)
document = factory.SubFactory(LegalPageFactory)
class Meta(object):
model = 'staticpages.Agreement'
class DirectoryFactory(factory.django.DjangoModelFactory):
@factory.lazy_attribute
def pootle_path(self):
if self.parent is None:
return "/"
return (
"%s/%s"
% (self.parent.pootle_path.rstrip("/"),
self.name))
class Meta(object):
model = 'pootle_app.Directory'
django_get_or_create = ("name", "parent")
obsolete = False
class LanguageDBFactory(factory.django.DjangoModelFactory):
class Meta(object):
model = 'pootle_language.Language'
django_get_or_create = ("code", )
nplurals = 2
@factory.lazy_attribute
def code(self):
from pootle_language.models import Language
# returns an incrementing index relative to the tp
return 'language%s' % (Language.objects.count() - 1)
@factory.lazy_attribute
def fullname(self):
from pootle_language.models import Language
# returns an incrementing index relative to the tp
return 'Language %s' % (Language.objects.count() - 1)
@factory.lazy_attribute
def specialchars(self):
from pootle_language.models import Language
return "" if (Language.objects.count() - 1) % 2 == 0 else u"ñ\u200c€"
class ProjectDBFactory(factory.django.DjangoModelFactory):
class Meta(object):
model = 'pootle_project.Project'
django_get_or_create = ("code", )
@factory.lazy_attribute
def code(self):
from pootle_project.models import Project
# returns an incrementing index relative to the tp
return 'project%s' % Project.objects.count()
@factory.lazy_attribute
def fullname(self):
from pootle_project.models import Project
# returns an incrementing index relative to the tp
return 'Project %s' % Project.objects.count()
checkstyle = "standard"
class StoreDBFactory(factory.django.DjangoModelFactory):
class Meta(object):
model = 'pootle_store.Store'
django_get_or_create = ("pootle_path", )
parent = factory.LazyAttribute(
lambda s: s.translation_project.directory)
obsolete = False
@factory.lazy_attribute
def pootle_path(self):
return (
"%s/%s"
% (self.translation_project.pootle_path.rstrip("/"),
self.name))
@factory.lazy_attribute
def name(self):
# returns an incrementing index relative to the tp
return 'store%s.po' % self.translation_project.stores.count()
class TranslationProjectFactory(factory.django.DjangoModelFactory):
class Meta(object):
model = 'pootle_translationproject.TranslationProject'
class UnitDBFactory(factory.django.DjangoModelFactory):
class Meta(object):
model = 'pootle_store.Unit'
state = pootle_store.constants.UNTRANSLATED
@factory.lazy_attribute
def index(self):
# returns an incrementing index relative to the store
return self.store.unit_set.count()
@factory.lazy_attribute
def unitid(self):
return self.source_f
@factory.lazy_attribute
def unitid_hash(self):
return md5(force_bytes(self.unitid)).hexdigest()
@factory.lazy_attribute
def source_f(self):
return (
"%s Source %s %s%s"
% (pootle_store.util.get_state_name(self.state).capitalize(),
self.store.pootle_path,
self.index, "%s."))
@factory.lazy_attribute
def target_f(self):
state_name = pootle_store.util.get_state_name(self.state)
endings = [" ", "", "%d", "\t"]
if state_name in ["translated", "fuzzy", "obsolete"]:
# make half fail checks
if not self.index % 2:
ending = endings[self.index % 4]
else:
ending = "%s."
return (
"%s Target %s %s%s"
% (state_name.capitalize(),
self.store.pootle_path,
self.index,
ending))
return ""
@factory.lazy_attribute
def target_wordcount(self):
from pootle_store.models import Unit
counter = wordcount.get(Unit)
return counter.count_words(self.target_f)
class VirtualFolderDBFactory(factory.django.DjangoModelFactory):
class Meta(object):
model = 'virtualfolder.VirtualFolder'
priority = 2
is_public = True
@factory.lazy_attribute
def name(self):
from virtualfolder.models import VirtualFolder
return 'virtualfolder%s' % VirtualFolder.objects.count()
class AnnouncementFactory(factory.django.DjangoModelFactory):
class Meta(object):
model = 'staticpages.StaticPage'
active = True
class SuggestionFactory(factory.django.DjangoModelFactory):
class Meta(object):
model = 'pootle_store.Suggestion'
| 6,277
|
Python
|
.py
| 159
| 31.993711
| 77
| 0.670251
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,867
|
utils.py
|
translate_pootle/pytest_pootle/utils.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
"""Random utilities for tests."""
import io
import sys
import time
import types
from contextlib import contextmanager
from datetime import datetime
from uuid import uuid4
from translate.storage.factory import getclass
from pootle.core.plugin.delegate import Getter, Provider
STRING_STORE = """
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\\n"
"Report-Msgid-Bugs-To: \\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=UTF-8\\n"
"Content-Transfer-Encoding: 8bit\\n"
"X-Generator: Pootle Tests\\n"
%(x_pootle_headers)s
%(units)s
"""
STRING_POOTLE_HEADERS = """
"X-Pootle-Path: %(pootle_path)s\\n"
"X-Pootle-Revision: %(revision)s\\n"
"""
STRING_UNIT = """
#: %(src)s
msgid "%(src)s"
msgstr "%(target)s"
"""
FUZZY_STRING_UNIT = """
#: %(src)s
#, fuzzy
msgid "%(src)s"
msgstr "%(target)s"
"""
def setup_store(pootle_path):
from pootle.core.url_helpers import split_pootle_path
from pootle_translationproject.models import TranslationProject
from .factories import StoreDBFactory
(lang_code, proj_code,
dir_path, filename) = split_pootle_path(pootle_path)
tp = TranslationProject.objects.get(
language__code=lang_code, project__code=proj_code)
return StoreDBFactory(
translation_project=tp, parent=tp.directory, name=filename)
def create_store(pootle_path=None, store_revision=None, units=None):
_units = []
for src, target, is_fuzzy in units or []:
if is_fuzzy:
_units.append(FUZZY_STRING_UNIT % {"src": src, "target": target})
else:
_units.append(STRING_UNIT % {"src": src, "target": target})
units = "\n\n".join(_units)
x_pootle_headers = ""
if pootle_path and store_revision:
x_pootle_headers = (STRING_POOTLE_HEADERS.strip()
% {"pootle_path": pootle_path,
"revision": store_revision})
string_store = STRING_STORE % {"x_pootle_headers": x_pootle_headers,
"units": units}
io_store = io.BytesIO(string_store.encode('utf-8'))
return getclass(io_store)(io_store.read())
def get_test_uids(offset=0, count=1, pootle_path="^/language0/"):
"""Returns a list translated unit uids from ~middle of
translated units dataset
"""
from pootle_store.constants import TRANSLATED
from pootle_store.models import Unit
units = Unit.objects.filter(
store__pootle_path__regex=pootle_path).filter(state=TRANSLATED)
begin = (units.count() / 2) + offset
return list(units[begin: begin + count].values_list("pk", flat=True))
def items_equal(left, right):
"""Returns `True` if items in `left` list are equal to items in
`right` list.
"""
return sorted(left) == sorted(right)
def create_api_request(rf, method='get', url='/', data='', user=None,
encode_as_json=True):
"""Convenience function to create and setup fake requests."""
content_type = 'application/x-www-form-urlencoded'
if data and encode_as_json:
from pootle.core.utils.json import jsonify
content_type = 'application/json'
data = jsonify(data)
request_method = getattr(rf, method.lower())
request = request_method(url, data=data, content_type=content_type)
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
if user is not None:
request.user = user
return request
def update_store(store, units=None, store_revision=None,
user=None, submission_type=None, resolve_conflict=None):
from pootle_store.models import POOTLE_WINS
if resolve_conflict is None:
resolve_conflict = POOTLE_WINS
store.update(
store=create_store(units=units),
store_revision=store_revision,
user=user, submission_type=submission_type,
resolve_conflict=resolve_conflict)
def get_translated_storefile(store, pootle_path=None):
"""Returns file store with added translations for untranslated units."""
storeclass = store.syncer.file_class
filestore = store.syncer.convert(storeclass)
for unit in filestore.units:
if not unit.istranslated():
unit.target = "Translation of %s" % unit.source
path = pootle_path if pootle_path is not None else store.pootle_path
filestore.updateheader(add=True, X_Pootle_Path=path)
filestore.updateheader(add=True,
X_Pootle_Revision=store.get_max_unit_revision())
return filestore
def add_store_fs(store, fs_path, synced=False):
from pootle_fs.models import StoreFS
if synced:
return StoreFS.objects.create(
store=store,
path=fs_path,
last_sync_hash=uuid4().hex,
last_sync_revision=store.get_max_unit_revision())
return StoreFS.objects.create(
store=store,
path=fs_path)
def log_test_start(debug_logger):
debug_logger.debug(
"\n%s\nTESTS START: %s\nTESTS: py.test %s\n%s",
"=" * 80,
datetime.now(),
" ".join(sys.argv[1:]),
"=" * 80)
def log_test_timing(debug_logger, timings, name, start):
from django.db import connection
time_taken = time.time() - start
timings["tests"][name] = dict(
slow_queries=[
q for q
in connection.queries
if float(q["time"]) > 0],
query_count=len(connection.queries),
timing=time_taken)
debug_logger.debug(
"{: <70} {: <10} {: <10}".format(
*(name,
round(time_taken, 4),
len(connection.queries))))
def log_test_report(debug_logger, timings):
debug_logger.debug(
"%s\nTESTS END: %s",
"=" * 80,
datetime.now())
total_time = time.time() - timings["start"]
total_queries = sum(
t["query_count"]
for t
in timings["tests"].values())
if total_queries:
avg_query_time = total_time / total_queries
debug_logger.debug(
"TESTS AVERAGE query time: %s",
avg_query_time)
debug_logger.debug(
"TESTS TOTAL test time: %s",
total_time)
debug_logger.debug(
"TESTS TOTAL queries: %s",
total_queries)
debug_logger.debug("%s\n" % ("=" * 80))
@contextmanager
def suppress_getter(getter):
_orig_get = getter.get
_orig_connect = getter.connect
temp_getter = Getter()
def suppressed_get(self, *args, **kwargs):
return temp_getter.get(*args, **kwargs)
def suppressed_connect(self, func, *args, **kwargs):
return temp_getter.connect(func, *args, **kwargs)
getter.get = types.MethodType(suppressed_get, getter)
getter.connect = types.MethodType(suppressed_connect, getter)
try:
yield
finally:
getter.get = _orig_get
getter.connect = _orig_connect
@contextmanager
def suppress_provider(provider):
_orig_gather = provider.gather
_orig_connect = provider.connect
temp_provider = Provider()
def suppressed_gather(self, *args, **kwargs):
return temp_provider.gather(*args, **kwargs)
def suppressed_connect(self, func, *args, **kwargs):
return temp_provider.connect(func, *args, **kwargs)
provider.gather = types.MethodType(suppressed_gather, provider)
provider.connect = types.MethodType(suppressed_connect, provider)
try:
yield
finally:
provider.gather = _orig_gather
provider.connect = _orig_connect
| 7,742
|
Python
|
.py
| 209
| 30.703349
| 77
| 0.655836
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,868
|
suite.py
|
translate_pootle/pytest_pootle/suite.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
def view_context_test(ctx, **assertions):
for k, v in assertions.items():
if k == "check_categories":
for i, cat in enumerate(ctx[k]):
assert v[i] == cat
elif k == "checks" and ctx["page"] == "translate":
for _k, _v in ctx[k].items():
for i, check in enumerate(v[_k]["checks"]):
for __k, __v in check.items():
assert _v["checks"][i][__k] == __v
elif k in ["translation_states", "checks"] and ctx["page"] == "browse":
for i, cat in enumerate(ctx[k]):
for _k, _v in cat.items():
assert str(ctx[k][i][_k]) == str(_v)
elif k == "search_form":
assert ctx[k].as_p() == v.as_p()
elif k == "table":
for tk in ["id", "fields", "headings"]:
assert ctx[k][tk] == v[tk]
assert list(ctx[k]["rows"]) == list(v["rows"])
else:
assert ctx[k] == v
| 1,271
|
Python
|
.py
| 29
| 33.655172
| 79
| 0.51371
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,869
|
env.py
|
translate_pootle/pytest_pootle/env.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from translate.storage.factory import getclass
class PootleTestEnv(object):
methods = (
"redis", "case_sensitive_schema", "formats", "site_root",
"languages", "suggestion_states", "site_matrix", "system_users",
"permissions", "site_permissions", "tps", "templates",
"disabled_project", "subdirs", "submissions", "announcements",
"terminology", "fs", "vfolders", "complex_po")
def setup(self, **kwargs):
for method in self.methods:
should_setup = (
method not in kwargs
or kwargs[method])
if should_setup:
getattr(self, "setup_%s" % method)()
def setup_formats(self):
from pootle.core.delegate import formats
formats.get().initialize()
def setup_complex_po(self):
import pytest_pootle
from pytest_pootle.factories import StoreDBFactory
from pootle_translationproject.models import TranslationProject
po_file = os.path.join(
os.path.dirname(pytest_pootle.__file__),
*("data", "po", "complex.po"))
with open(po_file) as f:
ttk = getclass(f)(f.read())
tp = TranslationProject.objects.get(
project__code="project0",
language__code="language0")
store = StoreDBFactory(
parent=tp.directory,
translation_project=tp,
name="complex.po")
store.update(ttk)
def setup_suggestion_states(self):
from pootle_store.models import SuggestionState
for state in ["pending", "accepted", "rejected"]:
SuggestionState.objects.get_or_create(name=state)
def setup_announcements(self):
from pytest_pootle.factories import AnnouncementFactory
from pootle_project.models import Project
from pootle_language.models import Language
from pootle_translationproject.models import TranslationProject
for language in Language.objects.all():
AnnouncementFactory(
title="Language announcement for: %s" % language,
body=(
'<div dir="ltr" lang="en">This is an example announcements. '
'Just like a real announcement it contains text and some '
'markup, and even a random link about localisation.<br />'
'<a href="http://docs.translatehouse.org/languages/'
'localization-guide/en/latest/guide/start.html">localisation '
'guide</a>.</div>'),
virtual_path="announcements/%s" % language.code)
for project in Project.objects.all():
AnnouncementFactory(
title="Project announcement for: %s" % project,
body=(
'<div dir="ltr" lang="en">This is an example announcements. '
'Just like a real announcement it contains text and some '
'markup, and even a random link about localisation.<br />'
'<a href="http://docs.translatehouse.org/projects/'
'localization-guide/en/latest/guide/start.html">localisation '
'guide</a>.</div>'),
virtual_path="announcements/projects/%s" % project.code)
for tp in TranslationProject.objects.all():
AnnouncementFactory(
title="TP announcement for: %s" % tp,
body=(
'<div dir="ltr" lang="en">This is an example announcements. '
'Just like a real announcement it contains text and some '
'markup, and even a random link about localisation.<br />'
'<a href="http://docs.translatehouse.org/tps/'
'localization-guide/en/latest/guide/start.html">localisation '
'guide</a>.</div>'),
virtual_path="announcements/%s/%s"
% (tp.language.code, tp.project.code))
def setup_case_sensitive_schema(self):
from django.db import connection
from django.apps import apps
from pootle.core.utils.db import set_mysql_collation_for_column
cursor = connection.cursor()
# Language
set_mysql_collation_for_column(
apps,
cursor,
"pootle_language.Language",
"code",
"utf8_general_ci",
"varchar(50)")
# Project
set_mysql_collation_for_column(
apps,
cursor,
"pootle_project.Project",
"code",
"utf8_bin",
"varchar(255)")
# Directory
set_mysql_collation_for_column(
apps,
cursor,
"pootle_app.Directory",
"pootle_path",
"utf8_bin",
"varchar(255)")
set_mysql_collation_for_column(
apps,
cursor,
"pootle_app.Directory",
"name",
"utf8_bin",
"varchar(255)")
# Store
set_mysql_collation_for_column(
apps,
cursor,
"pootle_store.Store",
"pootle_path",
"utf8_bin",
"varchar(255)")
set_mysql_collation_for_column(
apps,
cursor,
"pootle_store.Store",
"name",
"utf8_bin",
"varchar(255)")
# Word.Stem
set_mysql_collation_for_column(
apps,
cursor,
"pootle_word.Stem",
"root",
"utf8_bin",
"varchar(255)")
def setup_permissions(self):
from django.contrib.contenttypes.models import ContentType
from .fixtures.models.permission import _require_permission
args = {
'app_label': 'pootle_app',
'model': 'directory'}
pootle_content_type = ContentType.objects.get(**args)
_require_permission(
'view',
'Can access a project',
pootle_content_type)
_require_permission(
'hide',
'Cannot access a project',
pootle_content_type)
_require_permission(
'suggest',
'Can make a suggestion',
pootle_content_type)
_require_permission(
'translate',
'Can submit translations',
pootle_content_type)
_require_permission(
'review',
'Can review translations',
pootle_content_type)
_require_permission(
'administrate',
'Can administrate a TP',
pootle_content_type)
def _setup_project_fs(self, project):
from pootle_fs.utils import FSPlugin
from pytest_pootle.utils import add_store_fs
project.config["pootle_fs.fs_type"] = "localfs"
project.config["pootle_fs.translation_mappings"] = {
"default": "/<language_code>/<dir_path>/<filename>.<ext>"}
project.config["pootle_fs.fs_url"] = "/tmp/path/for/setup"
plugin = FSPlugin(project)
for store in plugin.resources.stores:
add_store_fs(
store=store,
fs_path=plugin.get_fs_path(store.pootle_path),
synced=True)
def setup_fs(self):
from pootle_project.models import Project
for i in range(0, 2):
project = Project.objects.get(code="project%s" % i)
self._setup_project_fs(project)
self._setup_project_fs(
Project.objects.get(code="terminology"))
self._setup_project_fs(
Project.objects.get(code="disabled_project0"))
def setup_languages(self):
from .fixtures.models.language import _require_language
_require_language('en', 'English')
def setup_redis(self):
from pootle.core.models import Revision
Revision.initialize(force=True)
def setup_system_users(self):
from django.contrib.auth import get_user_model
from .fixtures.models.user import TEST_USERS, _require_user
users = {
user.username: user
for user
in get_user_model().objects.all()}
for username, user_params in TEST_USERS.items():
user_params["email"] = "%s@email.test" % username
TEST_USERS[username]["user"] = (
users.get(username)
or _require_user(username=username, **user_params))
def setup_site_permissions(self):
from django.contrib.auth import get_user_model
from pootle_app.models import Directory, PermissionSet
User = get_user_model()
nobody = User.objects.get_nobody_user()
default = User.objects.get_default_user()
from django.contrib.auth.models import Permission
view = Permission.objects.get(codename="view")
suggest = Permission.objects.get(codename="suggest")
translate = Permission.objects.get(codename="translate")
criteria = {
'user': nobody,
'directory': Directory.objects.root}
permission_set, created = PermissionSet.objects.get_or_create(**criteria)
if created:
permission_set.positive_permissions.set([view, suggest])
permission_set.save()
criteria['user'] = default
permission_set, created = PermissionSet.objects.get_or_create(**criteria)
if created:
permission_set.positive_permissions.set([view, suggest, translate])
permission_set.save()
def setup_site_root(self):
from pytest_pootle.factories import DirectoryFactory
DirectoryFactory(
name="projects",
parent=DirectoryFactory(parent=None, name=""))
def setup_site_matrix(self):
from pytest_pootle.factories import ProjectDBFactory, LanguageDBFactory
from pootle_format.models import Format
from pootle_language.models import Language
# add 2 languages
for i_ in range(0, 2):
LanguageDBFactory()
source_language = Language.objects.get(code="en")
po = Format.objects.get(name="po")
for i_ in range(0, 2):
# add 2 projects
project = ProjectDBFactory(
source_language=source_language)
project.filetypes.add(po)
def setup_terminology(self):
import pytest_pootle
from pytest_pootle.factories import (
ProjectDBFactory, StoreDBFactory, TranslationProjectFactory)
from pootle_language.models import Language
source_language = Language.objects.get(code="en")
terminology = ProjectDBFactory(code="terminology",
checkstyle="terminology",
fullname="Terminology",
source_language=source_language)
term_file = os.path.join(
os.path.dirname(pytest_pootle.__file__),
*("data", "po", "terminology.po"))
with open(term_file) as f:
term_ttk = getclass(f)(f.read())
for language in Language.objects.all():
tp = TranslationProjectFactory(
project=terminology, language=language)
if language.code not in ["language0", "language1"]:
continue
store = StoreDBFactory(
parent=tp.directory,
translation_project=tp,
name="terminology.po")
store.update(term_ttk)
def setup_disabled_project(self):
from pytest_pootle.factories import (DirectoryFactory,
ProjectDBFactory,
TranslationProjectFactory)
from pootle.core.contextmanagers import keep_data
from pootle_format.models import Format
from pootle_language.models import Language
with keep_data():
source_language = Language.objects.get(code="en")
project = ProjectDBFactory(code="disabled_project0",
fullname="Disabled Project 0",
source_language=source_language)
project.filetypes.add(Format.objects.get(name="po"))
project.disabled = True
project.save()
language = Language.objects.get(code="language0")
tp = TranslationProjectFactory(project=project, language=language)
tp_dir = tp.directory
tp_dir.obsolete = False
tp_dir.save()
self._add_stores(tp, n=(1, 1))
subdir0 = DirectoryFactory(name="subdir0", parent=tp.directory, tp=tp)
self._add_stores(tp, n=(1, 1), parent=subdir0)
def setup_subdirs(self):
from pytest_pootle.factories import DirectoryFactory
from pootle.core.contextmanagers import keep_data
from pootle_translationproject.models import TranslationProject
with keep_data():
for tp in TranslationProject.objects.all():
subdir0 = DirectoryFactory(
name="subdir0", parent=tp.directory, tp=tp)
subdir1 = DirectoryFactory(
name="subdir1", parent=subdir0, tp=tp)
self._add_stores(tp, n=(2, 1), parent=subdir0)
self._add_stores(tp, n=(1, 1), parent=subdir1)
def setup_submissions(self):
from django.contrib.auth import get_user_model
from django.utils import timezone
from pootle.core.contextmanagers import bulk_operations
from pootle_data.models import TPChecksData, TPData
from pootle_score.models import UserTPScore
from pootle_statistics.models import SubmissionTypes
from pootle_store.constants import UNTRANSLATED
from pootle_store.models import Unit, UnitChange
from pootle_translationproject.contextmanagers import update_tp_after
from pootle_translationproject.models import TranslationProject
year_ago = timezone.now() - relativedelta(years=1)
units = Unit.objects.all()
units.update(creation_time=year_ago)
User = get_user_model()
admin = User.objects.get(username="admin")
member = User.objects.get(username="member")
member2 = User.objects.get(username="member2")
UnitChange.objects.bulk_create(
UnitChange(unit_id=unit_id, changed_with=SubmissionTypes.SYSTEM)
for unit_id
in units.filter(state__gt=UNTRANSLATED).values_list("id", flat=True))
tps = TranslationProject.objects.exclude(
language__code="templates").select_related(
"language", "project__source_language").all()
bulk_pootle = bulk_operations(
models=(
get_user_model(),
UserTPScore,
TPData,
TPChecksData))
with bulk_pootle:
for tp in tps:
with update_tp_after(tp):
self._add_subs_to_stores(
tp.stores, admin, member, member2)
def _add_subs_to_stores(self, stores, admin, member, member2):
for store in stores.select_related("data", "parent"):
self._add_subs_to_store(store, admin, member, member2)
def _add_subs_to_store(self, store, admin, member, member2):
from django.utils import timezone
# from pootle_store.contextmanagers import update_store_after
year_ago = timezone.now() - relativedelta(years=1)
units = store.unit_set.select_related("change").all()
for unit in units:
self._add_submissions(
unit, year_ago, admin, member, member2)
def setup_templates(self):
from pootle.core.contextmanagers import keep_data
from pootle.core.signals import update_data
from pootle_project.models import Project
from pootle_translationproject.contextmanagers import update_tp_after
from pytest_pootle.factories import (
LanguageDBFactory, TranslationProjectFactory)
tps = []
with keep_data():
templates = LanguageDBFactory(code="templates")
for project in Project.objects.all():
# add a TP to the project for each language
tp = TranslationProjectFactory(project=project, language=templates)
# As there are no files on the FS we have to currently unobsolete
# the directory
tp_dir = tp.directory
tp_dir.obsolete = False
tp_dir.save()
self._add_template_stores(tp)
tps.append(tp)
for tp in tps:
with update_tp_after(tp):
for store in tp.stores.all():
update_data.send(
store.__class__,
instance=store)
def setup_tps(self):
from pootle.core.contextmanagers import keep_data
from pootle_project.models import Project
from pootle_language.models import Language
from pytest_pootle.factories import TranslationProjectFactory
with keep_data():
for project in Project.objects.select_related("source_language").all():
for language in Language.objects.exclude(code="en"):
# add a TP to the project for each language
tp = TranslationProjectFactory(
project=project, language=language)
# As there are no files on the FS we have to currently
# unobsolete the directory
tp_dir = tp.directory
tp_dir.obsolete = False
tp_dir.save()
self._add_stores(tp)
def _add_template_stores(self, tp, n=(3, 2), parent=None):
from pytest_pootle.factories import StoreDBFactory, UnitDBFactory
for i_ in range(0, n[0]):
# add 3 stores
store = StoreDBFactory(translation_project=tp)
store.filetype = tp.project.filetype_tool.choose_filetype(store.name)
store.save()
# add 8 units to each store
for i_ in range(0, 4):
UnitDBFactory(store=store, target="")
def _add_stores(self, tp, n=(3, 2), parent=None):
from pytest_pootle.factories import StoreDBFactory, UnitDBFactory
from pootle_store.constants import UNTRANSLATED, TRANSLATED, FUZZY, OBSOLETE
for i_ in range(0, n[0]):
# add 3 stores
if parent is None:
store = StoreDBFactory(translation_project=tp)
else:
store = StoreDBFactory(translation_project=tp, parent=parent)
store.filetype = tp.project.filetype_tool.choose_filetype(store.name)
store.save()
# add 8 units to each store
for state in [UNTRANSLATED, TRANSLATED, FUZZY, OBSOLETE]:
for i_ in range(0, n[1]):
UnitDBFactory(store=store, state=state)
def _update_submission_times(self, unit, update_time, last_update=None):
submissions = unit.submission_set.all()
if last_update:
submissions = submissions.exclude(
creation_time__lte=last_update)
submissions.update(creation_time=update_time)
def _add_submissions(self, unit, created, admin, member, member2):
from pootle.core.delegate import review
from pootle_store.constants import UNTRANSLATED, FUZZY, OBSOLETE
from pootle_store.models import Suggestion, Unit, UnitChange
from django.utils import timezone
original_state = unit.state
unit.created = created
first_modified = created + relativedelta(months=unit.index, days=10)
# add suggestion at first_modified
suggestion_review = review.get(Suggestion)
suggestion, created_ = suggestion_review().add(
unit,
"Suggestion for %s" % (unit.target or unit.source),
user=member)
self._update_submission_times(unit, first_modified, created)
# accept the suggestion 7 days later if not untranslated
next_time = first_modified + timedelta(days=7)
if original_state == UNTRANSLATED:
suggestion_review([suggestion], reviewer=admin).reject()
else:
Unit.objects.filter(pk=unit.pk).update(mtime=next_time)
UnitChange.objects.filter(
unit_id=unit.pk).update(
reviewed_on=next_time,
reviewed_by=admin,
submitted_on=next_time)
suggestion_review([suggestion], reviewer=admin).accept()
self._update_submission_times(
unit, next_time, first_modified)
# add another suggestion as different user 7 days later
suggestion2_, created_ = suggestion_review().add(
unit,
"Suggestion 2 for %s" % (unit.target or unit.source),
user=member2)
self._update_submission_times(
unit,
first_modified + timedelta(days=14),
next_time)
# mark FUZZY
if original_state == FUZZY:
unit.markfuzzy()
# mark OBSOLETE
elif original_state == OBSOLETE:
unit.makeobsolete()
elif unit.target:
# Re-edit units with translations, adding some submissions
# of SubmissionTypes.EDIT_TYPES
old_target = unit.target
current_time = timezone.now() - timedelta(days=14)
unit.__class__.objects.filter(id=unit.id).update(
target_f="Updated %s" % old_target,
mtime=current_time)
unit.change.save()
def setup_vfolders(self):
from pytest_pootle.factories import VirtualFolderDBFactory
from django.db import connection
from django.apps import apps
from pootle.core.utils.db import set_mysql_collation_for_column
from pootle_language.models import Language
from pootle_project.models import Project
cursor = connection.cursor()
# VirtualFolder
set_mysql_collation_for_column(
apps,
cursor,
"virtualfolder.VirtualFolder",
"name",
"utf8_bin",
"varchar(70)")
project0 = Project.objects.get(code="project0")
language0 = Language.objects.get(code="language0")
VirtualFolderDBFactory(filter_rules="store0.po")
VirtualFolderDBFactory(filter_rules="store1.po")
vf = VirtualFolderDBFactory(
all_languages=True,
is_public=False,
filter_rules="store0.po")
vf.projects.add(project0)
vf.save()
vf = VirtualFolderDBFactory(
all_languages=True,
is_public=False,
filter_rules="store1.po")
vf.projects.add(project0)
vf.save()
vf = VirtualFolderDBFactory(
filter_rules="subdir0/store4.po")
vf.languages.add(language0)
vf.projects.add(project0)
vf.save()
| 23,700
|
Python
|
.py
| 530
| 32.403774
| 84
| 0.593402
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,870
|
utils.py
|
translate_pootle/pytest_pootle/fs/utils.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
def parse_fs_action_args(action):
command_args = []
plugin_kwargs = {}
if action.endswith("_force"):
action = action[:-6]
command_args.append("--force")
plugin_kwargs["force"] = True
elif action.startswith("resolve"):
if action.endswith("_overwrite"):
command_args.append("--overwrite")
plugin_kwargs["merge"] = False
else:
plugin_kwargs["merge"] = True
if "pootle" in action:
command_args.append("--pootle-wins")
plugin_kwargs["pootle_wins"] = True
else:
plugin_kwargs["pootle_wins"] = False
action = "resolve"
return action, command_args, plugin_kwargs
| 988
|
Python
|
.py
| 27
| 29.740741
| 77
| 0.626694
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,871
|
signals.py
|
translate_pootle/pytest_pootle/fixtures/signals.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from contextlib import contextmanager
import pytest
from pootle.core.contextmanagers import keep_data
from pootle.core.signals import update_data
from pootle.core.utils.timezone import localdate
class UpdateUnitTest(object):
def __init__(self, runner):
self.runner = runner
self.unit = runner.unit
def __enter__(self):
self.original = self._get_unit_data(self.unit)
def __exit__(self, *args):
self.unit.refresh_from_db()
self.unit.store.data.refresh_from_db()
self.unit.store.translation_project.data.refresh_from_db()
self._test(
self.original,
self._get_unit_data(self.unit))
def _get_unit_data(self, unit):
store = unit.store
tp = store.translation_project
data = {}
data["unit_revision"] = unit.revision
data["checks"] = list(unit.qualitycheck_set.filter(name="xmltags"))
data["store_data"] = {
k: getattr(store.data, k)
for k
in ["translated_words",
"critical_checks",
"max_unit_revision",
"total_words",
"pending_suggestions"]}
store_score = unit.store.user_scores.get(
user__username="member",
date=localdate())
data["store_score"] = {
k: getattr(store_score, k)
for k
in ["translated",
"suggested",
"reviewed",
"score"]}
tp_score = tp.user_scores.get(
user__username="member",
date=localdate())
data["tp_score"] = {
k: getattr(tp_score, k)
for k
in ["translated",
"suggested",
"reviewed",
"score"]}
data["store_checks_data"] = {
cd.name: cd.count
for cd in store.check_data.all()}
data["tp_checks_data"] = {
cd.name: cd.count
for cd in tp.check_data.all()}
data["tp_data"] = {
k: getattr(tp.data, k)
for k
in ["translated_words",
"critical_checks",
"max_unit_revision",
"total_words",
"pending_suggestions"]}
data["dir_revision"] = list(
store.parent.revisions.filter(
key__in=["stats", "checks"]))
data["tp_dir_revision"] = list(
tp.directory.revisions.filter(
key__in=["stats", "checks"]))
return data
def _test(self, original, updated):
for test_type in ["revisions", "data", "check_data", "scores"]:
getattr(self.runner, "_test_%s" % test_type)(original, updated)
@pytest.fixture
def update_unit_test():
return UpdateUnitTest
@contextmanager
def _no_update_data():
with keep_data(signals=(update_data, )):
yield
@pytest.fixture
def no_update_data():
return _no_update_data
| 3,264
|
Python
|
.py
| 93
| 25.451613
| 77
| 0.56038
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,872
|
pootle_terminology.py
|
translate_pootle/pytest_pootle/fixtures/pootle_terminology.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import pytest
from translate.storage.factory import getclass
def pytest_generate_tests(metafunc):
import pytest_pootle
if 'terminology_units' in metafunc.fixturenames:
term_file = os.path.join(
os.path.dirname(pytest_pootle.__file__),
*("data", "po", "terminology.po"))
with open(term_file) as f:
_terms = [x.source for x in getclass(f)(f.read()).units[1:]]
metafunc.parametrize("terminology_units", _terms)
@pytest.fixture
def terminology_project():
from pootle_project.models import Project
return Project.objects.get(code="terminology")
@pytest.fixture
def terminology0(language0, terminology_project):
from pootle_translationproject.models import TranslationProject
return TranslationProject.objects.get(
language=language0,
project__code="terminology")
| 1,154
|
Python
|
.py
| 29
| 34.965517
| 77
| 0.725314
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,873
|
getters.py
|
translate_pootle/pytest_pootle/fixtures/getters.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from contextlib import contextmanager
import pytest
from pytest_pootle.utils import suppress_getter
from pootle.core.delegate import context_data, tp_tool, wordcount
@contextmanager
def _no_wordcount():
with suppress_getter(wordcount):
yield
@pytest.fixture
def no_wordcount():
return _no_wordcount
@contextmanager
def _no_context_data():
with suppress_getter(context_data):
yield
@pytest.fixture
def no_context_data():
return _no_context_data
@contextmanager
def _no_tp_tool():
with suppress_getter(tp_tool):
yield
@pytest.fixture
def no_tp_tool():
return _no_tp_tool
| 907
|
Python
|
.py
| 32
| 25.375
| 77
| 0.756977
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,874
|
import_export_fixtures.py
|
translate_pootle/pytest_pootle/fixtures/import_export_fixtures.py
|
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import shutil
from collections import OrderedDict
import pytest
from django.urls import reverse
from .models import store
FILE_IMPORT_FAIL_TESTS = OrderedDict()
FILE_IMPORT_FAIL_TESTS["revision_header_missing.po"] = "MissingPootleRevError"
FILE_IMPORT_FAIL_TESTS["revision_header_invalid.po"] = "MissingPootleRevError"
FILE_IMPORT_FAIL_TESTS["path_header_missing.po"] = "MissingPootlePathError"
FILE_IMPORT_FAIL_TESTS["path_header_invalid.po"] = "FileImportError"
@pytest.fixture(params=FILE_IMPORT_FAIL_TESTS.keys())
def file_import_failure(request):
from import_export import exceptions
return (
request.param,
getattr(
exceptions, FILE_IMPORT_FAIL_TESTS[request.param]))
@pytest.fixture
def ts_directory(po_directory, request, tmpdir, settings):
"""Sets up a tmp directory for TS files. Although it doesnt use the
po_directory fixture it calls it first to ensure FS is true to the DB
when fixture is run
"""
import pytest_pootle
ts_dir = str(tmpdir.mkdir("ts"))
# Adjust locations
settings.POOTLE_TRANSLATION_DIRECTORY = ts_dir
shutil.copytree(
os.path.join(
os.path.dirname(pytest_pootle.__file__),
"data", "ts", "tutorial"),
os.path.join(
settings.POOTLE_TRANSLATION_DIRECTORY,
"tutorial"))
def _cleanup():
for f in tmpdir.listdir():
f.remove()
request.addfinalizer(_cleanup)
return settings.POOTLE_TRANSLATION_DIRECTORY
@pytest.fixture
def en_tutorial_ts(english_tutorial, ts_directory):
"""Require the en/tutorial/tutorial.ts store."""
from pootle_format.models import Format
english_tutorial.project.filetypes.add(
Format.objects.get(name="ts"))
return store._require_store(english_tutorial,
ts_directory,
'tutorial.ts')
@pytest.fixture(
params=[
"language0_project0", "templates_project0", "en_terminology"])
def import_tps(request):
"""List of required translation projects for import tests."""
from pootle_translationproject.models import TranslationProject
language_code, project_code = request.param.split('_')
return TranslationProject.objects.get(
language__code=language_code,
project__code=project_code)
@pytest.fixture
def exported_tp_view_response(client, request_users, tp0):
from import_export.utils import TPTMXExporter
user = request_users["user"]
client.login(
username=user.username,
password=request_users["password"])
kwargs = {
"project_code": tp0.project.code,
"language_code": tp0.language.code,
"dir_path": ""}
exporter = TPTMXExporter(tp0)
exporter.export()
response = client.get(reverse('pootle-tp-browse', kwargs=kwargs))
return response
| 3,130
|
Python
|
.py
| 79
| 33.56962
| 78
| 0.703704
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,875
|
jsondata.py
|
translate_pootle/pytest_pootle/fixtures/jsondata.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from collections import OrderedDict
import pytest
JSON_OBJECTS = (
3,
"four",
u"five ☠",
"six \n seven ",
[9, 10],
(11, 12, 13),
OrderedDict(foo="bar", foo2="baz"),
[1, "two", OrderedDict(three=3)])
@pytest.fixture(params=JSON_OBJECTS)
def json_objects(request):
return request.param
| 604
|
Python
|
.py
| 21
| 25.761905
| 77
| 0.693241
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,876
|
statistics.py
|
translate_pootle/pytest_pootle/fixtures/statistics.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
@pytest.fixture
def anon_submission_unit(nobody, store0):
anon = nobody
unit = store0.units.first()
old_target = unit.target
unit.target_f = "Updated %s" % old_target
unit.save(user=anon)
@pytest.fixture
def quality_check_submission(admin):
from pootle_store.constants import TRANSLATED
from pootle_store.models import QualityCheck
# create a sub with quality check info
qc_filter = dict(
unit__state=TRANSLATED,
unit__store__translation_project__project__disabled=False,
unit__store__obsolete=False)
qc = QualityCheck.objects.filter(**qc_filter).first()
unit = qc.unit
unit.toggle_qualitycheck(qc.id, True, admin)
return unit.submission_set.filter(quality_check__gt=0).first()
| 1,049
|
Python
|
.py
| 28
| 33.535714
| 77
| 0.728079
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,877
|
search.py
|
translate_pootle/pytest_pootle/fixtures/search.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from collections import OrderedDict
import pytest
UNITS_TEXT_SEARCH_TESTS = OrderedDict()
UNITS_TEXT_SEARCH_TESTS["case:Translated (source)"] = {
"text": "Translated",
"sfields": ["source"]}
UNITS_TEXT_SEARCH_TESTS["case:Translated (source/target)"] = {
"text": "Translated",
"sfields": ["source", "target"]}
UNITS_TEXT_SEARCH_TESTS["Suggestion for Translated (target)"] = {
"text": "suggestion for translated",
"sfields": ["target"]}
UNITS_TEXT_SEARCH_TESTS["suggestion for TRANSLATED (target)"] = {
"text": "suggestion for TRANSLATED",
"sfields": ["target"]}
UNITS_TEXT_SEARCH_TESTS["suggestion for translated (source)"] = {
"text": "suggestion for translated",
"sfields": ["source"],
"empty": True}
UNITS_TEXT_SEARCH_TESTS["case:Translated (source_f/target_f)"] = {
"text": "Translated",
"sfields": ["source_f", "target_f"]}
UNITS_TEXT_SEARCH_TESTS["Suggestion for Translated (target_f)"] = {
"text": "suggestion for translated",
"sfields": ["target_f"]}
UNITS_TEXT_SEARCH_TESTS["suggestion for TRANSLATED (target_f)"] = {
"text": "suggestion for TRANSLATED",
"sfields": ["target_f"]}
UNITS_TEXT_SEARCH_TESTS["suggestion for translated (source_f)"] = {
"text": "suggestion for translated",
"sfields": ["source_f"],
"empty": True}
UNITS_TEXT_SEARCH_TESTS["suggestion for translated (source/target)"] = {
"text": "suggestion for translated",
"sfields": ["target", "source"]}
UNITS_TEXT_SEARCH_TESTS["exact: suggestion for translated (target)"] = {
"text": "Suggestion for Translated",
"sfields": ["target"]}
UNITS_TEXT_SEARCH_TESTS["exact: suggestion for translated (source/target)"] = {
"text": "Suggestion for Translated",
"sfields": ["target", "source"]}
UNITS_TEXT_SEARCH_TESTS["suggestion translated for (target)"] = {
"text": "suggestion translated for",
"sfields": ["target"]}
UNITS_TEXT_SEARCH_TESTS["exact: suggestion translated for (target)"] = {
"text": "suggestion translated for",
"sfields": ["target"],
"empty": True}
UNITS_TEXT_SEARCH_TESTS["FOO (notes)"] = {
"text": "FOO",
"sfields": ["notes"],
"empty": True}
UNITS_TEXT_SEARCH_TESTS["FOO BAR"] = {
"sfields": ["target", "source"],
"empty": True}
# hmm - not 100% if this should pass or fail
UNITS_TEXT_SEARCH_TESTS["suggestion for translated FOO (target)"] = {
"text": "suggestion translated for FOO",
"sfields": ["target"],
"empty": True}
UNITS_CONTRIB_SEARCH_TESTS = [
"suggestions",
"FOO",
"my_suggestions",
"user_suggestions",
"user_suggestions_accepted",
"user_suggestions_rejected",
"my_submissions",
"user_submissions",
"my_submissions_overwritten",
"user_submissions_overwritten"]
UNITS_STATE_SEARCH_TESTS = [
"all",
"translated",
"untranslated",
"fuzzy",
"incomplete",
"FOO"]
UNITS_CHECKS_SEARCH_TESTS = [
"checks:foo",
"category:foo",
"category:critical",
"checks:endpunc",
"checks:endpunc,printf",
"checks:endpunc,foo"]
@pytest.fixture(params=UNITS_STATE_SEARCH_TESTS)
def units_state_searches(request):
return request.param
@pytest.fixture(params=UNITS_CHECKS_SEARCH_TESTS)
def units_checks_searches(request):
from pootle_checks.utils import get_category_id
check_type, check_data = request.param.split(":")
if check_type == "category":
return check_type, get_category_id(check_data)
return check_type, check_data.split(",")
@pytest.fixture(params=UNITS_CONTRIB_SEARCH_TESTS)
def units_contributor_searches(request):
return request.param
@pytest.fixture(params=UNITS_TEXT_SEARCH_TESTS.keys())
def units_text_searches(request):
text = request.param
if text.startswith("case:"):
text = text[6:]
case = True
else:
case = False
if text.startswith("exact:"):
text = text[6:]
exact = True
else:
exact = False
test = UNITS_TEXT_SEARCH_TESTS[request.param]
test["text"] = test.get("text", text)
test["empty"] = test.get("empty", False)
test["case"] = case
test["exact"] = exact
return test
| 4,422
|
Python
|
.py
| 124
| 31.564516
| 79
| 0.672816
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,878
|
cache.py
|
translate_pootle/pytest_pootle/fixtures/cache.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
@pytest.fixture(scope='session', autouse=True)
def delete_pattern():
"""Adds the no-op `delete_pattern()` method to `LocMemCache`."""
from django.core.cache.backends.locmem import LocMemCache
LocMemCache.delete_pattern = lambda x, y: 0
| 540
|
Python
|
.py
| 13
| 39.384615
| 77
| 0.748092
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,879
|
dt.py
|
translate_pootle/pytest_pootle/fixtures/dt.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from datetime import datetime, timedelta
import pytest
import pytz
from pootle.core.utils.timezone import localdate, make_aware
@pytest.fixture
def today():
return localdate()
@pytest.fixture
def yesterday(today):
return today - timedelta(days=1)
@pytest.fixture
def dt_today(today):
return make_aware(
datetime.combine(
today,
datetime.min.time())).astimezone(
pytz.timezone("UTC"))
@pytest.fixture
def dt_yesterday(yesterday):
return make_aware(
datetime.combine(
yesterday,
datetime.min.time())).astimezone(
pytz.timezone("UTC"))
| 931
|
Python
|
.py
| 31
| 25.064516
| 77
| 0.696288
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,880
|
mock.py
|
translate_pootle/pytest_pootle/fixtures/mock.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
"""Monkeypatching fixtures."""
from _pytest.monkeypatch import MonkeyPatch
mp = MonkeyPatch()
class FakeJob(object):
id = 'FAKE_JOB_ID'
mp.setattr('rq.get_current_job', lambda: FakeJob())
| 475
|
Python
|
.py
| 13
| 34.615385
| 77
| 0.751101
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,881
|
site.py
|
translate_pootle/pytest_pootle/fixtures/site.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import functools
import inspect
import os
import shutil
import tempfile
import time
import pytest
from pytest_pootle.env import PootleTestEnv
@pytest.fixture(autouse=True)
def test_timing(request, log_timings):
from django.db import reset_queries
if not request.config.getoption("--debug-tests"):
return
from django.conf import settings
settings.DEBUG = True
reset_queries()
start = time.time()
request.addfinalizer(
functools.partial(
log_timings,
request.node.name,
start))
@pytest.fixture
def po_test_dir(request, tmpdir):
po_dir = str(tmpdir.mkdir("po"))
def rm_po_dir():
if os.path.exists(po_dir):
shutil.rmtree(po_dir)
request.addfinalizer(rm_po_dir)
return po_dir
@pytest.fixture
def po_directory(request, po_test_dir, settings):
"""Sets up a tmp directory for PO files."""
translation_directory = settings.POOTLE_TRANSLATION_DIRECTORY
# Adjust locations
settings.POOTLE_TRANSLATION_DIRECTORY = po_test_dir
def _cleanup():
settings.POOTLE_TRANSLATION_DIRECTORY = translation_directory
request.addfinalizer(_cleanup)
@pytest.fixture(scope='session')
def tests_use_db(request):
return bool(
[item for item in request.node.items
if item.get_marker('django_db')])
@pytest.fixture(scope='session')
def tests_use_vfolders(request):
return bool(
[item for item in request.node.items
if item.get_marker('pootle_vfolders')])
@pytest.fixture(scope='session')
def tests_use_migration(request, tests_use_db):
force_migration = request.config.getoption("--force-migration")
return bool(
force_migration
or (tests_use_db
and [item for item in request.node.items
if item.get_marker('django_migration')]))
@pytest.fixture(autouse=True, scope='session')
def debug_utils(request):
class _TraceEvent(object):
def __init__(self, *args, **kwargs):
self.stack = inspect.stack()[2]
self.args = args
self.kwargs = kwargs
def __str__(self):
return ", ".join(
[self.stack[1],
str(self.stack[2]),
self.stack[3],
str(self.args),
str(self.kwargs)])
class _Trace(object):
debug = False
_called = ()
def __call__(self, *args, **kwargs):
self._called += (_TraceEvent(*args, **kwargs), )
def __iter__(self):
for event in self._called:
yield event
self._called = ()
def __str__(self):
return "\n".join(str(item) for item in self._called)
@pytest.fixture(autouse=True, scope='session')
def setup_db_if_needed(request, tests_use_db):
"""Sets up the site DB only if tests requested to use the DB (autouse)."""
if tests_use_db and not request.config.getvalue('reuse_db'):
return request.getfixturevalue('post_db_setup')
@pytest.fixture(scope='session')
def post_db_setup(translations_directory, django_db_setup, django_db_blocker,
tests_use_db, tests_use_vfolders, request):
"""Sets up the site DB for the test session."""
if tests_use_db:
with django_db_blocker.unblock():
PootleTestEnv().setup(
vfolders=tests_use_vfolders)
@pytest.fixture(scope='session')
def django_db_use_migrations(tests_use_migration):
return tests_use_migration
@pytest.fixture
def no_projects():
from pootle_project.models import Project
Project.objects.all().delete()
@pytest.fixture
def no_vfolders():
from virtualfolder.models import VirtualFolder
VirtualFolder.objects.all().delete()
@pytest.fixture
def no_permissions():
from django.contrib.auth.models import Permission
Permission.objects.all().delete()
@pytest.fixture
def no_permission_sets():
from pootle_app.models import PermissionSet
PermissionSet.objects.all().delete()
@pytest.fixture
def no_submissions():
from pootle_statistics.models import Submission
Submission.objects.all().delete()
@pytest.fixture
def no_users(no_units):
from django.contrib.auth import get_user_model
User = get_user_model()
User.objects.all().delete()
@pytest.fixture
def no_units():
from pootle_store.models import Unit
Unit.objects.all().delete()
@pytest.fixture
def no_templates_tps(templates):
templates.translationproject_set.all().delete()
@pytest.fixture
def no_extra_users():
from django.contrib.auth import get_user_model
User = get_user_model()
User.objects.exclude(
username__in=["system", "default", "nobody"]).delete()
@pytest.fixture(autouse=True, scope="session")
def translations_directory(request):
"""used by PootleEnv"""
from django.conf import settings
settings.POOTLE_TRANSLATION_DIRECTORY = tempfile.mkdtemp()
def rm_tmp_dir():
shutil.rmtree(settings.POOTLE_TRANSLATION_DIRECTORY)
request.addfinalizer(rm_tmp_dir)
@pytest.fixture
def clear_cache():
"""Currently tests only use one cache so this clears all"""
from django.core.cache import caches
from django_redis import get_redis_connection
get_redis_connection('default').flushdb()
get_redis_connection('lru').flushdb()
get_redis_connection('redis').flushdb()
caches["exports"].clear()
@pytest.fixture(scope="session")
def test_fs():
"""A convenience fixture for retrieving data from test files"""
import pytest_pootle
class TestFs(object):
def path(self, path):
return os.path.join(
os.path.dirname(pytest_pootle.__file__),
path)
def open(self, paths, *args, **kwargs):
if isinstance(paths, (list, tuple)):
paths = os.path.join(*paths)
return open(self.path(paths), *args, **kwargs)
return TestFs()
@pytest.fixture
def media_test_dir(request, settings, tmpdir):
media_dir = str(tmpdir.mkdir("media"))
settings.MEDIA_ROOT = media_dir
def rm_media_dir():
if os.path.exists(media_dir):
shutil.rmtree(media_dir)
request.addfinalizer(rm_media_dir)
return media_dir
@pytest.fixture(scope="session")
def export_dir(request):
export_dir = tempfile.mkdtemp()
def rm_export_dir():
if os.path.exists(export_dir):
shutil.rmtree(export_dir)
request.addfinalizer(rm_export_dir)
return export_dir
@pytest.fixture
def cd_export_dir(request, export_dir):
curdir = os.path.abspath(os.curdir)
os.chdir(export_dir)
def cd_curdir():
os.chdir(curdir)
request.addfinalizer(cd_curdir)
| 7,026
|
Python
|
.py
| 198
| 29.282828
| 78
| 0.674878
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,882
|
debug.py
|
translate_pootle/pytest_pootle/fixtures/debug.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import functools
import importlib
import logging
import time
import pytest
from pytest_pootle import utils
logger = logging.getLogger("POOTLE_DEBUG")
@pytest.fixture(scope="session")
def memusage():
try:
dj_debug = importlib.import_module("dj.debug")
except ImportError:
return
return getattr(dj_debug, 'memusage', None)
@pytest.fixture(scope="session")
def log_timings(request, timings):
return functools.partial(
utils.log_test_timing,
logger,
timings)
@pytest.fixture(scope="session")
def timings(request):
debug_tests = request.config.getoption("--debug-tests")
if not debug_tests:
return
if debug_tests != "-":
logger.addHandler(logging.FileHandler(debug_tests))
utils.log_test_start(logger)
timings = dict(start=time.time(), tests={})
request.addfinalizer(
functools.partial(
utils.log_test_report,
logger,
timings))
return timings
| 1,268
|
Python
|
.py
| 42
| 25.5
| 77
| 0.702058
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,883
|
revision.py
|
translate_pootle/pytest_pootle/fixtures/revision.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
@pytest.fixture(autouse=True)
def revision(request, clear_cache):
"""Sets up the cached revision counter for each test call."""
from pootle.core.models import Revision
from pootle_store.models import Unit
if request.node.get_marker('django_db'):
Revision.set(Unit.max_revision())
| 596
|
Python
|
.py
| 15
| 36.933333
| 77
| 0.747405
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,884
|
contributors.py
|
translate_pootle/pytest_pootle/fixtures/contributors.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from collections import OrderedDict
import pytest
CONTRIBUTORS_KWARGS = dict(
noargs={},
projects=dict(project_codes=[u"project0"]),
languages=dict(language_codes=[u"language0"]),
projects_and_languages=dict(
project_codes=[u"project0"],
language_codes=[u"language0"]),
since=dict(since="2000-11-10"),
until=dict(until="2000-11-10"),
since_and_until=dict(since="2000-11-10", until="2000-11-10"),
sort_by=dict(sort_by="contributions"))
CONTRIBUTORS_WITH_EMAIL = OrderedDict((
('admin', {
'username': 'admin',
'full_name': '',
'email': '',
}),
('member', {
'username': 'member',
'full_name': '',
'email': 'member@membership.us',
}),
('funkymember', {
'username': 'funkymember',
'full_name': 'Funky " member with <> and @ and stuff',
'email': 'funky_member@membership.dk',
}),
('fullmember', {
'username': 'fullmember',
'full_name': 'Just a member',
'email': 'full_member@membership.fr',
}),
('comma_member', {
'username': 'comma_member',
'full_name': 'Member, with comma',
'email': 'comma_member@membership.de',
}),
))
@pytest.fixture
def default_contributors_kwargs():
return OrderedDict(
(("include_anon", False),
("since", None),
("until", None),
("project_codes", None),
("language_codes", None),
("sort_by", "username"),
("mailmerge", False)))
@pytest.fixture(params=CONTRIBUTORS_KWARGS)
def contributors_kwargs(request):
return CONTRIBUTORS_KWARGS[request.param]
@pytest.fixture
def dummy_contributors(request, default_contributors_kwargs):
from pootle.core.delegate import contributors
from pootle.core.plugin import getter
from pootle_statistics.utils import Contributors
orig_receivers = contributors.receivers
receivers_cache = contributors.sender_receivers_cache.copy()
contributors.receivers = []
contributors.sender_receivers_cache.clear()
class DummyContributors(Contributors):
@property
def contributors(self):
# Hack the output to get back our kwargs.
_result_kwargs = OrderedDict()
for k in default_contributors_kwargs.keys():
_result_kwargs[k] = dict(
full_name=k,
contributions=getattr(
self, k, default_contributors_kwargs[k]))
return _result_kwargs
@getter(contributors, weak=False)
def get_dummy_contribs_(**kwargs_):
return DummyContributors
def _reset_contributors():
contributors.receivers = orig_receivers
contributors.sender_receivers_cache = receivers_cache
request.addfinalizer(_reset_contributors)
@pytest.fixture
def dummy_email_contributors(request):
from pootle.core.delegate import contributors
from pootle.core.plugin import getter
from pootle_statistics.utils import Contributors
orig_receivers = contributors.receivers
receivers_cache = contributors.sender_receivers_cache.copy()
contributors.receivers = []
contributors.sender_receivers_cache.clear()
class DummyContributors(Contributors):
@property
def contributors(self):
return OrderedDict(
sorted(CONTRIBUTORS_WITH_EMAIL.items(),
key=lambda x: str.lower(x[1]['username'])))
@getter(contributors, weak=False)
def get_dummy_contribs_(**kwargs_):
return DummyContributors
def _reset_contributors():
contributors.receivers = orig_receivers
contributors.sender_receivers_cache = receivers_cache
request.addfinalizer(_reset_contributors)
| 4,058
|
Python
|
.py
| 109
| 30.165138
| 77
| 0.6557
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,885
|
data.py
|
translate_pootle/pytest_pootle/fixtures/data.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
STATS_DATA = [
"max_unit_revision",
"max_unit_mtime",
"last_submission",
"last_created_unit",
"pending_suggestions",
"total_words",
"fuzzy_words",
"translated_words"]
@pytest.fixture
def stats_data_dict(request):
return STATS_DATA
@pytest.fixture(params=STATS_DATA)
def stats_data_types(request):
return request.param
| 654
|
Python
|
.py
| 23
| 25.391304
| 77
| 0.725962
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,886
|
views.py
|
translate_pootle/pytest_pootle/fixtures/views.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import functools
import urllib
from collections import OrderedDict
from datetime import timedelta
import pytest
from dateutil.relativedelta import relativedelta
from pytest_pootle.fixtures.models.user import TEST_USERS
from pytest_pootle.utils import create_store, get_test_uids
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
from django.utils import timezone
DAY_AGO = (timezone.now() - timedelta(days=1))
MONTH_AGO = (timezone.now() - relativedelta(months=1))
TWO_MONTHS_AGO = (timezone.now() - relativedelta(months=2))
SEVEN_MONTHS_AGO = (timezone.now() - relativedelta(months=7))
BAD_VIEW_TESTS = OrderedDict(
(("/foo/bar", dict(code=301, location="/foo/bar/")),
("/foo/bar/", {}),
("/projects", dict(code=301, location="/projects/")),
("/projects/project0",
dict(code=301, location="/projects/project0/")),
("/projects/project0/foo.po", {}),
("/projects/projectfoo",
dict(code=301, location="/projects/projectfoo/")),
("/projects/projectfoo/", {}),
("/language0/projectfoo",
dict(code=301, location="/language0/projectfoo/")),
("/language0/projectfoo/", {}),
("/language0/project0",
dict(code=301, location="/language0/project0/")),
("/projects/project0/subdir0/foo.po", {}),
# these may not be correct - but are current behaviour
("/language0/project0/foo/",
dict(code=302, location="/language0/project0/")),
("/language0/project0/foo",
dict(code=302, location="/language0/project0/")),
("/language0/project0/subdir0",
dict(code=302, location="/language0/project0/")),
("/projects/PROJECT0/", {}),
("/language0/PROJECT0/", {}),
("/language0/PROJECT0/subdir0/", {}),
("/language0/PROJECT0/store0.po", {}),
("/LANGUAGE0/",
dict(code=301, location="/language0/")),
("/LANGUAGE0/foo/",
dict(code=301, location="/language0/foo/")),
("/LANGUAGE0/project0/",
dict(code=301, location="/language0/project0/")),
("/LANGUAGE0/project0/subdir0/",
dict(code=301, location="/language0/project0/subdir0/")),
("/LANGUAGE0/project0/store0.po",
dict(code=301, location="/language0/project0/store0.po")),
("/xhr/units/1/edit/", dict(code=400)),
("/xhr/units/?path=/%s" % ("BAD" * 800),
dict(ajax=True, code=400)),
("/xhr/units?filter=translated&"
"path=/",
dict(ajax=True))))
GET_UNITS_TESTS = OrderedDict(
(("default_path", {}),
("root_path", dict(path="/")),
("projects_path", dict(path="/projects/")),
("project_path", dict(path="/projects/project0/")),
("bad_project_path", dict(path="/projects/FOO/")),
("state_translated",
{"filter": "translated"}),
("state_translated_continued",
{"filter": "translated",
"uids": functools.partial(get_test_uids, count=9),
"offset": 10}),
("state_untranslated",
{"filter": "untranslated"}),
("state_untranslated",
{"filter": "untranslated",
"offset": 100000}),
("state_incomplete",
{"filter": "incomplete"}),
("state_fuzzy",
{"filter": "fuzzy"}),
("sort_units_oldest",
{"sort_by_param": "oldest"}),
("filter_from_uid",
{"path": "/language0/project0/store0.po",
"uids": functools.partial(get_test_uids,
pootle_path="/language0/project0/store0.po"),
"filter": "all"}),
("filter_from_uid_sort_priority",
{"uids": functools.partial(get_test_uids,
pootle_path="/language0/project0/store0.po"),
"filter": "all",
"sort": "priority"}),
("translated_by_member",
{"filter": "translated",
"user": "member"}),
("translated_by_member_FOO",
{"filter": "translated",
"user": "member_FOO"}),
("modified_last_month",
{"filter": "translated",
"modified-since": MONTH_AGO.isoformat()}),
("modified_last_calendar_month",
{"filter": "translated",
"month": MONTH_AGO.strftime("%Y-%m")}),
("modified_calendar_month_7_month_ago",
{"filter": "translated",
"month": SEVEN_MONTHS_AGO.strftime("%Y-%m")}),
("modified_last_two_months",
{"modified_since": TWO_MONTHS_AGO.isoformat()}),
("modified_last_day",
{"modified_since": DAY_AGO.isoformat()}),
("filter_suggestions",
{"filter": "suggestions"}),
("filter_user_suggestions",
{"filter": "user-suggestions"}),
("filter_user_suggestions_accepted",
{"filter": "user-suggestions-accepted"}),
("filter_user_suggestions_rejected",
{"filter": "user-suggestions-rejected"}),
("filter_user_submissions",
{"filter": "user-submissions"}),
("filter_user_submissions_overwritten",
{"filter": "user-submissions-overwritten"}),
("filter_search_empty",
{"search": "SEARCH_NOT_EXIST",
"sfields": "source"}),
("filter_search_untranslated",
{"search": "untranslated",
"sfields": "source"}),
("filter_search_sfields_multi",
{"search": "SEARCH_NOT_EXIST",
"sfields": "source,target"}),
("sort_user_suggestion_newest",
{"sort": "newest",
"filter": "user-suggestions"}),
("sort_user_suggestion_oldest",
{"sort": "oldest",
"filter": "user-suggestions"}),
("checks_foo",
{"filter": "checks",
"checks": "foo"}),
("checks_endpunc",
{"filter": "checks",
"checks": ["endpunc"]}),
("checks_category_critical",
{"filter": "checks",
"category": "critical"})))
GET_VFOLDER_UNITS_TESTS = OrderedDict(
(("path_vfolder",
{"path": "/++vfolder/virtualfolder0/language0/project0/translate/"}), ))
LANGUAGE_VIEW_TESTS = OrderedDict(
(("browse", {}),
("translate", {})))
PROJECT_VIEW_TESTS = OrderedDict(
(("browse", {}),
("browse_directory",
{"dir_path": "subdir0/"}),
("browse_store",
{"filename": "store0.po"}),
("browse_directory_store",
{"dir_path": "subdir0/",
"filename": "store3.po"}),
("translate", {}),
("translate_directory",
{"dir_path": "subdir0/"}),
("translate_store",
{"filename": "store0.po"}),
("translate_directory_store",
{"dir_path": "subdir0/",
"filename": "store3.po"})))
TP_VIEW_TESTS = OrderedDict(
(("browse", {}),
("browse_directory",
{"dir_path": "subdir0/"}),
("browse_store",
{"filename": "store0.po"}),
("browse_directory_store",
{"dir_path": "subdir0/",
"filename": "store3.po"}),
("translate", {}),
("translate_directory",
{"dir_path": "subdir0/"}),
("translate_store",
{"filename": "store0.po"}),
("translate_directory_store",
{"dir_path": "subdir0/",
"filename": "store3.po"}),
("translate_no_vfolders_in_subdir",
{"dir_path": "subdir0/subdir1/"})))
VFOLDER_VIEW_TESTS = OrderedDict(
(("translate_vfolder",
{"dir_path": ""}),
("translate_vfolder_subdir",
{"dir_path": "subdir0/"})))
DISABLED_PROJECT_URL_PARAMS = OrderedDict(
(("project", {
"view_name": "pootle-project",
"project_code": "disabled_project0",
"dir_path": "",
"filename": ""}),
("tp", {
"view_name": "pootle-tp",
"project_code": "disabled_project0",
"language_code": "language0",
"dir_path": ""}),
("tp_subdir", {
"view_name": "pootle-tp",
"project_code": "disabled_project0",
"language_code": "language0",
"dir_path": "subdir0/"}),
("tp_store", {
"view_name": "pootle-tp-store",
"project_code": "disabled_project0",
"language_code": "language0",
"dir_path": "",
"filename": "store0.po"}),
("tp_subdir_store", {
"view_name": "pootle-tp-store",
"project_code": "disabled_project0",
"language_code": "language0",
"dir_path": "subdir0/",
"filename": "store1.po"})))
@pytest.fixture(params=GET_UNITS_TESTS.keys())
def get_units_views(request, client, request_users):
params = GET_UNITS_TESTS[request.param].copy()
params["path"] = params.get("path", "/language0/")
user = request_users["user"]
if user.username != "nobody":
client.login(
username=user.username,
password=request_users["password"])
if "uids" in params and callable(params["uids"]):
params["uids"] = ",".join(str(uid) for uid in params["uids"]())
url_params = urllib.urlencode(params, True)
response = client.get(
"%s?%s"
% (reverse("pootle-xhr-units"),
url_params),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
params["pootle_path"] = params["path"]
return user, params, url_params, response
@pytest.fixture(params=PROJECT_VIEW_TESTS.keys())
def project_views(request, client, request_users, settings):
from pootle_project.models import Project
test_kwargs = PROJECT_VIEW_TESTS[request.param].copy()
user = request_users["user"]
client.login(
username=user.username,
password=request_users["password"])
test_type = request.param.split("_")[0]
project = Project.objects.get(code="project0")
kwargs = {"project_code": project.code, "dir_path": "", "filename": ""}
kwargs.update(test_kwargs)
view_name = "pootle-project-%s" % test_type
response = client.get(reverse(view_name, kwargs=kwargs))
return test_type, project, response.wsgi_request, response, kwargs
@pytest.fixture(params=TP_VIEW_TESTS.keys())
def tp_views(request, client, request_users, settings):
from pootle_translationproject.models import TranslationProject
tp_view_test_names = request.param
user = request_users["user"]
test_type = tp_view_test_names.split("_")[0]
tp = TranslationProject.objects.all()[0]
tp_view = "pootle-tp"
kwargs = {
"project_code": tp.project.code,
"language_code": tp.language.code,
"dir_path": "",
"filename": ""}
test_kwargs = TP_VIEW_TESTS[request.param].copy()
kwargs.update(test_kwargs)
if kwargs.get("filename"):
tp_view = "%s-store" % tp_view
else:
del kwargs["filename"]
view_name = "%s-%s" % (tp_view, test_type)
if user.username != "nobody":
client.login(
username=user.username,
password=request_users["password"])
response = client.get(reverse(view_name, kwargs=kwargs))
kwargs["filename"] = kwargs.get("filename", "")
return test_type, tp, response.wsgi_request, response, kwargs
@pytest.fixture(params=LANGUAGE_VIEW_TESTS.keys())
def language_views(request, client):
from pootle_language.models import Language
test_type = request.param.split("_")[0]
language = Language.objects.get(code="language0")
kwargs = {"language_code": language.code}
kwargs.update(LANGUAGE_VIEW_TESTS[request.param])
view_name = "pootle-language-%s" % test_type
response = client.get(reverse(view_name, kwargs=kwargs))
return test_type, language, response.wsgi_request, response, kwargs
@pytest.fixture(params=BAD_VIEW_TESTS.keys())
def bad_views(request, client):
test = dict(code=404)
test.update(BAD_VIEW_TESTS[request.param])
if test.get("ajax"):
response = client.get(request.param, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
else:
response = client.get(request.param)
return (
request.param,
response,
test)
@pytest.fixture(params=[
("member", "member", {}),
# member doesn't have administarate permissions to set member2 as uploader
("member", "member2", {"user_id": ""}),
("admin", "member2", {}),
])
def tp_uploads(request, client):
from pootle.core.delegate import language_team
from pootle_language.models import Language
from pootle_translationproject.models import TranslationProject
from pootle_store.models import Store
from django.contrib.auth import get_user_model
submitter_name, uploader_name, errors = request.param
uploader = get_user_model().objects.get(username=uploader_name)
tp = TranslationProject.objects.all()[0]
store = Store.objects.filter(parent=tp.directory)[0]
kwargs = {
"project_code": tp.project.code,
"language_code": tp.language.code,
"dir_path": "",
"filename": store.name}
password = TEST_USERS[submitter_name]['password']
language_team.get(Language)(tp.language).add_member(uploader, "submitter")
client.login(username=submitter_name, password=password)
updated_units = [
(unit.source_f, "%s UPDATED" % unit.target_f, False)
for unit in store.units
]
updated_store = create_store(store.pootle_path, "0", updated_units)
uploaded_file = SimpleUploadedFile(
store.name,
str(updated_store),
"text/x-gettext-translation"
)
response = client.post(
reverse("pootle-tp-store-browse", kwargs=kwargs),
{
'name': '',
'file': uploaded_file,
'user_id': uploader.id
}
)
return tp, response.wsgi_request, response, kwargs, errors
@pytest.fixture(params=("browse", "translate"))
def view_types(request):
"""List of possible view types."""
return request.param
@pytest.fixture(params=DISABLED_PROJECT_URL_PARAMS.keys())
def dp_view_urls(request, view_types):
"""List of url params required for disabled project tests."""
kwargs = DISABLED_PROJECT_URL_PARAMS[request.param].copy()
view_name = kwargs.pop("view_name")
view_name = "%s-%s" % (view_name, view_types)
return reverse(view_name, kwargs=kwargs)
@pytest.fixture(params=VFOLDER_VIEW_TESTS.keys())
def vfolder_views(request, client, request_users, settings, tp0):
vfolder0 = tp0.stores.filter(
vfolders__isnull=False)[0].vfolders.first()
test_kwargs = VFOLDER_VIEW_TESTS[request.param].copy()
tp_view_test_names = request.param
user = request_users["user"]
test_type = tp_view_test_names.split("_")[0]
tp_view = "pootle-vfolder-tp"
kwargs = {
"vfolder_name": vfolder0.name,
"project_code": tp0.project.code,
"language_code": tp0.language.code,
"dir_path": "",
"filename": ""}
kwargs.update(test_kwargs)
del kwargs["filename"]
view_name = "%s-%s" % (tp_view, test_type)
if user.username != "nobody":
client.login(
username=user.username,
password=request_users["password"])
response = client.get(reverse(view_name, kwargs=kwargs))
kwargs["filename"] = kwargs.get("filename", "")
return test_type, tp0, response.wsgi_request, response, kwargs
@pytest.fixture(params=GET_VFOLDER_UNITS_TESTS.keys())
def get_vfolder_units_views(request, client, request_users):
from virtualfolder.models import VirtualFolder
params = GET_VFOLDER_UNITS_TESTS[request.param].copy()
params["path"] = params.get("path", "/language0/")
vfolder0 = VirtualFolder.objects.first()
user = request_users["user"]
if user.username != "nobody":
client.login(
username=user.username,
password=request_users["password"])
url_params = urllib.urlencode(params, True)
response = client.get(
"%s?%s"
% (reverse("vfolder-pootle-xhr-units",
kwargs=dict(vfolder_name=vfolder0.name)),
url_params),
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
params["pootle_path"] = params["path"]
return user, vfolder0, params, url_params, response
| 15,942
|
Python
|
.py
| 410
| 32.482927
| 84
| 0.626696
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,887
|
formats.py
|
translate_pootle/pytest_pootle/fixtures/models/formats.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
@pytest.fixture
def po():
from pootle_format.models import Format
return Format.objects.get(name="po")
@pytest.fixture
def ts():
from pootle_format.models import Format
return Format.objects.get(name="ts")
@pytest.fixture
def po2():
from pootle.core.delegate import formats
registry = formats.get()
# register po2
return registry.register(
"special_po_2", "po2", template_extension="pot2")
@pytest.fixture
def xliff():
from pootle_format.models import Format
return Format.objects.get(name="xliff")
| 850
|
Python
|
.py
| 27
| 28.185185
| 77
| 0.74042
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,888
|
storefs.py
|
translate_pootle/pytest_pootle/fixtures/models/storefs.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
from collections import OrderedDict
import pytest
TRANSLATION_PATHS = OrderedDict(
[("default", "gnu_style/po/<language_code>.<ext>"),
("subdir1",
"gnu_style_named_folders/po-<filename>/<language_code>.<ext>"),
("subdir2",
"gnu_style_named_files/po/<filename>-<language_code>.<ext>"),
("subdir3",
"non_gnu_style/locales/<language_code>/<dir_path>/<filename>.<ext>")])
@pytest.fixture
def pootle_fs_working_path(settings, tmpdir):
settings.POOTLE_FS_WORKING_PATH = str(tmpdir)
return str(tmpdir)
@pytest.fixture
def fs_src(pootle_fs_working_path):
src_path = os.path.join(pootle_fs_working_path, "__src__")
os.mkdir(src_path)
return src_path
@pytest.fixture
def tp0_store(po_directory, settings, tp0, fs_src):
from pootle_config.utils import ObjectConfig
from .store import _require_store
conf = ObjectConfig(tp0.project)
conf["pootle_fs.fs_type"] = "localfs"
conf["pootle_fs.fs_url"] = fs_src
conf["pootle_fs.translation_mappings"] = OrderedDict(TRANSLATION_PATHS)
return _require_store(
tp0,
settings.POOTLE_TRANSLATION_DIRECTORY, 'project0_fs.po')
@pytest.fixture
def tp0_store_fs(tp0_store):
"""Require the /en/project0/project0.po store."""
from pootle_fs.models import StoreFS
return StoreFS.objects.create(
store=tp0_store,
path="/some/fs/path")
| 1,682
|
Python
|
.py
| 45
| 33.155556
| 77
| 0.705737
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,889
|
project.py
|
translate_pootle/pytest_pootle/fixtures/models/project.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import posixpath
import shutil
import pytest
def _require_project(code, name, source_language, settings, **kwargs):
"""Helper to get/create a new project."""
from pootle_project.models import Project
criteria = {
'code': code,
'fullname': name,
'source_language': source_language,
'checkstyle': 'standard'}
criteria.update(kwargs)
new_project = Project.objects.get_or_create(**criteria)[0]
new_project.config["pootle_fs.fs_type"] = "localfs"
new_project.config["pootle_fs.translation_mappings"] = {
"default": "/<language_code>/<dir_path>/<filename>.<ext>"}
new_project.config["pootle_fs.fs_url"] = posixpath.join(
settings.POOTLE_TRANSLATION_DIRECTORY,
"tutorial")
return new_project
@pytest.fixture
def tutorial(english, settings):
"""Require `tutorial` test project."""
import pytest_pootle
from pootle_fs.utils import FSPlugin
shutil.copytree(
os.path.join(
os.path.dirname(pytest_pootle.__file__),
"data", "po", "tutorial"),
os.path.join(
settings.POOTLE_TRANSLATION_DIRECTORY,
"tutorial"))
project = _require_project('tutorial', 'Tutorial', english, settings)
plugin = FSPlugin(project)
plugin.fetch()
plugin.add()
plugin.sync()
return project
@pytest.fixture
def tutorial_disabled(english, settings):
"""Require `tutorial-disabled` test project in a disabled state."""
return _require_project(
'tutorial-disabled',
'Tutorial',
english,
settings,
disabled=True)
@pytest.fixture
def project_foo(english, settings):
"""Require `foo` test project."""
return _require_project('foo', 'Foo Project', english, settings)
@pytest.fixture
def project_bar(english, settings):
"""Require `bar` test project."""
return _require_project('bar', 'Bar Project', english, settings)
@pytest.fixture
def project0():
"""project0 Project"""
from pootle_project.models import Project
return Project.objects.select_related(
"source_language").get(code="project0")
@pytest.fixture
def project1():
"""project0 Project"""
from pootle_project.models import Project
return Project.objects.select_related(
"source_language").get(code="project1")
@pytest.fixture
def project0_directory(po_directory, project0):
"""project0 Project"""
return project0
@pytest.fixture
def project0_nongnu(project0_directory, project0, settings):
project_dir = os.path.join(
settings.POOTLE_TRANSLATION_DIRECTORY, project0.code)
if not os.path.exists(project_dir):
os.makedirs(project_dir)
for tp in project0.translationproject_set.all():
tp.save()
return project0
@pytest.fixture
def project_dir_resources0(project0, subdir0):
"""Returns a ProjectResource object for a Directory"""
from pootle_app.models import Directory
from pootle_project.models import ProjectResource
resources = Directory.objects.live().filter(
name=subdir0.name,
parent__translationproject__project=project0)
return ProjectResource(
resources,
("/projects/%s/%s"
% (project0.code,
subdir0.name)))
@pytest.fixture
def project_store_resources0(project0, subdir0):
"""Returns a ProjectResource object for a Store"""
from pootle_project.models import ProjectResource
from pootle_store.models import Store
store = subdir0.child_stores.live().first()
resources = Store.objects.live().filter(
name=store.name,
parent__name=subdir0.name,
translation_project__project=project0)
return ProjectResource(
resources,
("/projects/%s/%s/%s"
% (project0.code,
subdir0.name,
store.name)))
@pytest.fixture
def project_set():
from pootle_project.models import Project, ProjectSet
return ProjectSet(Project.objects.exclude(disabled=True))
| 4,296
|
Python
|
.py
| 121
| 29.85124
| 77
| 0.691638
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,890
|
user.py
|
translate_pootle/pytest_pootle/fixtures/models/user.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import copy
import pytest
from .language import language0
TEST_USERS = {
'nobody': dict(
fullname='Nobody',
password=''),
'system': dict(
fullname='System',
password=''),
'default': dict(
fullname='Default',
password=''),
'admin': dict(
fullname='Admin',
password='admin',
is_superuser=True,
email="admin@poot.le"),
'member': dict(
fullname='Member',
password='member',
alt_src_lang=language0),
'member2': dict(
fullname='Member2',
password='member2')}
@pytest.fixture(
scope="session",
params=["nobody", "admin", "member", "member2"])
def request_users(request):
from django.core.cache import cache
from django.utils.encoding import iri_to_uri
test_user = copy.deepcopy(TEST_USERS[request.param])
key = iri_to_uri('Permissions:%s' % test_user["user"].username)
cache.delete(key)
return test_user
@pytest.fixture(scope="session", params=TEST_USERS.keys())
def site_users(request):
return copy.deepcopy(TEST_USERS[request.param])
def _require_user(username, fullname, password=None,
is_superuser=False, email=None, alt_src_lang=None):
"""Helper to get/create a new user."""
from accounts.utils import verify_user
from django.contrib.auth import get_user_model
User = get_user_model()
criteria = {
'username': username,
'full_name': fullname,
'is_active': True,
'is_superuser': is_superuser,
}
user, created = User.objects.get_or_create(**criteria)
if created:
if password is None:
user.set_unusable_password()
else:
user.set_password(password)
if email:
user.email = email
user.save()
if email:
verify_user(user)
if alt_src_lang is not None:
user.alt_src_langs.add(alt_src_lang())
return user
@pytest.fixture
def nobody():
"""Require the default anonymous user."""
from django.contrib.auth import get_user_model
return get_user_model().objects.get_nobody_user()
@pytest.fixture
def default():
"""Require the default authenticated user."""
from django.contrib.auth import get_user_model
return get_user_model().objects.get_default_user()
@pytest.fixture
def system():
"""Require the system user."""
from django.contrib.auth import get_user_model
return get_user_model().objects.get_system_user()
@pytest.fixture
def admin():
"""Require the admin user."""
from django.contrib.auth import get_user_model
return get_user_model().objects.get(username="admin")
@pytest.fixture
def member():
"""Require a member user."""
from django.contrib.auth import get_user_model
return get_user_model().objects.get(username="member")
@pytest.fixture
def trans_member():
"""Require a member user."""
return _require_user('trans_member', 'Transactional member')
@pytest.fixture
def member_with_email():
"""Require a member user."""
user = _require_user('member_with_email', 'Member with email')
user.email = "member_with_email@this.test"
user.save()
return user
@pytest.fixture
def member2():
"""Require a member2 user."""
from django.contrib.auth import get_user_model
return get_user_model().objects.get(username="member2")
@pytest.fixture
def member2_with_email():
"""Require a member2 user."""
user = _require_user('member2_with_email', 'Member2 with email')
user.email = "member2_with_email@this.test"
user.save()
return user
@pytest.fixture
def evil_member():
"""Require a evil_member user."""
return _require_user('evil_member', 'Evil member')
@pytest.fixture
def no_perms_user():
"""Require a user with no permissions."""
return _require_user('no_perms_member', 'User with no permissions')
| 4,196
|
Python
|
.py
| 127
| 27.755906
| 77
| 0.669978
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,891
|
config.py
|
translate_pootle/pytest_pootle/fixtures/models/config.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from collections import OrderedDict
import pytest
BAD_CONFIG_FLAGS = OrderedDict(
[("get_and_set",
("-s", "foo", "bar", "-g", "foo")),
("get_and_append",
("-a", "foo", "bar", "-g")),
("get_and_list",
("-l", "-g")),
("get_and_clear",
("-c", "foo", "-g")),
("list_and_set",
("-s", "foo", "bar", "-l")),
("list_and_append",
("-a", "foo", "bar", "-l")),
("list_and_clear",
("-c", "foo", "-l")),
("set_and_append",
("-a", "foo", "bar", "-s", "foo2", "bar2")),
("set_and_clear",
("-c", "foo", "-s", "foo2", "bar2")),
("bad_ct", "foobar"),
("missing_ct", "foo.bar")])
@pytest.fixture(params=BAD_CONFIG_FLAGS.keys())
def bad_config_flags(request):
return BAD_CONFIG_FLAGS[request.param]
@pytest.fixture
def no_config_env():
from pootle_config.models import Config
Config.objects.all().delete()
| 1,189
|
Python
|
.py
| 37
| 27.675676
| 77
| 0.567308
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,892
|
statistics.py
|
translate_pootle/pytest_pootle/fixtures/models/statistics.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
@pytest.fixture(scope="session")
def submissions():
"""A dictionary of submission.id, submission for all
submissions created in test env
as this fixture is session-scoped tests should not change its contents
"""
from pootle_statistics.models import Submission
select_related = (
"unit", "quality_check", "submitter", "suggestion")
return {
s.id: s
for s
in Submission.objects.select_related(
*select_related).iterator()}
| 787
|
Python
|
.py
| 22
| 31.272727
| 77
| 0.706579
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,893
|
language.py
|
translate_pootle/pytest_pootle/fixtures/models/language.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
"""Language fixtures.
NOTE: when adding new language fixtures, it should require the
``english`` fixture first, otherwise the behavior can be unpredicted when
creating projects and translation projects later on.
"""
import pytest
def _require_language(code, fullname, plurals=2, plural_equation='(n != 1)'):
"""Helper to get/create a new language."""
from pootle_language.models import Language
criteria = {
'code': code,
'fullname': fullname,
'nplurals': plurals,
'pluralequation': plural_equation,
}
language, created = Language.objects.get_or_create(**criteria)
if created:
language.save()
return language
@pytest.fixture
def english():
"""Require the English language."""
from pootle_language.models import Language
return Language.objects.get(code="en")
@pytest.fixture
def templates():
"""Require the special Templates language."""
from pootle_language.models import Language
return Language.objects.get(code="templates")
@pytest.fixture
def afrikaans():
"""Require the Afrikaans language."""
return _require_language('af', 'Afrikaans')
@pytest.fixture
def italian():
"""Require the Italian language."""
return _require_language('it', 'Italian')
@pytest.fixture
def language0():
"""language0 Language"""
from pootle_language.models import Language
return Language.objects.get(code="language0")
@pytest.fixture
def language1():
"""language1 Language"""
from pootle_language.models import Language
return Language.objects.get(code="language1")
| 1,877
|
Python
|
.py
| 54
| 30.907407
| 77
| 0.727929
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,894
|
translation_project.py
|
translate_pootle/pytest_pootle/fixtures/models/translation_project.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from pootle.core.delegate import tp_tool
def pytest_generate_tests(metafunc):
from pootle_project.models import PROJECT_CHECKERS
if 'checkers' in metafunc.funcargnames:
metafunc.parametrize("checkers", PROJECT_CHECKERS.keys())
def _require_tp(language, project):
"""Helper to get/create a new translation project."""
from pootle_translationproject.models import TranslationProject
tp, __ = TranslationProject.objects.get_or_create(
language=language, project=project)
return tp
@pytest.fixture
def afrikaans_tutorial(afrikaans, tutorial):
"""Require Afrikaans Tutorial."""
return _require_tp(afrikaans, tutorial)
@pytest.fixture
def en_tutorial_obsolete(english_tutorial):
"""Require Arabic Tutorial in obsolete state."""
english_tutorial.directory.makeobsolete()
return english_tutorial
@pytest.fixture
def english_tutorial(english, tutorial):
"""Require English Tutorial."""
return _require_tp(english, tutorial)
@pytest.fixture
def italian_tutorial(italian, tutorial):
"""Require Italian Tutorial."""
return _require_tp(italian, tutorial)
@pytest.fixture
def tp_checker_tests(request, english, checkers):
from pytest_pootle.factories import ProjectDBFactory
checker_name = checkers
project = ProjectDBFactory(
checkstyle=checker_name,
source_language=english)
return (checker_name, project)
@pytest.fixture
def templates_project0(request, templates, project0):
"""Require the templates/project0/ translation project."""
tps = project0.translationproject_set.select_related(
"data",
"directory")
template_tp = tps.get(language=templates)
template_tp.language = templates
return template_tp
@pytest.fixture
def tp0(language0, project0):
"""Require English Project0."""
tps = project0.translationproject_set.select_related(
"data",
"directory")
tp0 = tps.get(language=language0)
tp0.language = language0
return tp0
@pytest.fixture
def no_tp_tool_(request):
start_receivers = tp_tool.receivers
tp_tool.receivers = []
def _reset_tp_tool():
tp_tool.receivers = start_receivers
request.addfinalizer(_reset_tp_tool)
| 2,531
|
Python
|
.py
| 69
| 32.333333
| 77
| 0.741684
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,895
|
permission.py
|
translate_pootle/pytest_pootle/fixtures/models/permission.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
@pytest.fixture(scope="session")
def pootle_content_type():
"""Require the pootle ContentType."""
from django.contrib.contenttypes.models import ContentType
args = {
'app_label': 'pootle_app',
'model': 'directory',
}
return ContentType.objects.get(**args)
def _require_permission(code, name, content_type):
"""Helper to get/create a new permission."""
from django.contrib.auth.models import Permission
criteria = {
'codename': code,
'name': name,
'content_type': content_type,
}
permission = Permission.objects.get_or_create(**criteria)[0]
return permission
@pytest.fixture(scope="session")
def view(pootle_content_type):
"""Require the `view` permission."""
return _require_permission('view', 'Can access a project',
pootle_content_type)
@pytest.fixture(scope="session")
def hide(pootle_content_type):
"""Require the `hide` permission."""
return _require_permission('hide', 'Cannot access a project',
pootle_content_type)
@pytest.fixture(scope="session")
def administrate(pootle_content_type):
"""Require the `suggest` permission."""
return _require_permission('administrate', 'Can administrate a TP',
pootle_content_type)
@pytest.fixture
def translate():
"""Require the `translate` permission."""
from django.contrib.auth.models import Permission
return Permission.objects.get(codename="translate")
| 1,820
|
Python
|
.py
| 47
| 32.829787
| 77
| 0.680524
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,896
|
vfolder.py
|
translate_pootle/pytest_pootle/fixtures/models/vfolder.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
VF_RULE_TESTS = [
"*",
"store.po",
"*/subdir0/*"]
@pytest.fixture(params=VF_RULE_TESTS)
def vf_rules(request):
return request.param
@pytest.fixture
def vfolder0():
from virtualfolder.models import VirtualFolder
return VirtualFolder.objects.first()
| 569
|
Python
|
.py
| 19
| 27.263158
| 77
| 0.739852
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,897
|
unit.py
|
translate_pootle/pytest_pootle/fixtures/models/unit.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from translate.storage.factory import getclass
@pytest.fixture
def unit_syncer(store0):
from pootle_store.constants import TRANSLATED
unit = store0.units.filter(state=TRANSLATED).first()
ttk = getclass(store0)
return unit, ttk.UnitClass
@pytest.fixture
def unit_plural(store0):
from pootle_store.constants import TRANSLATED
from ...factories import UnitDBFactory
return UnitDBFactory(
store=store0,
state=TRANSLATED,
source=["%d day ago", "%d days ago"],
target=["%d dag gelede", "%d dae gelede"]
)
@pytest.fixture
def get_edit_unit(default):
from pootle_store.constants import TRANSLATED
from pootle_store.models import Store, Unit
from ...factories import UnitDBFactory
unit = Unit.objects.get_translatable(
default,
language_code='language1'
).filter(state=TRANSLATED).first()
store_path = unit.store.pootle_path.replace('language1', 'language0')
store = Store.objects.filter(pootle_path=store_path).first()
# create unit which will be handled as alternative source
UnitDBFactory(store=store, source_f=unit.source, state=TRANSLATED)
return unit
| 1,470
|
Python
|
.py
| 39
| 33.282051
| 77
| 0.734133
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,898
|
store.py
|
translate_pootle/pytest_pootle/fixtures/models/store.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from __future__ import print_function
from collections import OrderedDict
import pytest
from pytest_pootle.factories import (
LanguageDBFactory, StoreDBFactory, TranslationProjectFactory)
from pytest_pootle.utils import create_store, update_store
from translate.storage.factory import getclass
def _update_fuzzy(is_fuzzy, source):
if source == 'Unit 4':
return not is_fuzzy
return is_fuzzy
DEFAULT_STORE_UNITS_1 = [("Unit 1", "Unit 1", False),
("Unit 2", "Unit 2", False)]
DEFAULT_STORE_UNITS_2 = [("Unit 3", "Unit 3", False),
("Unit 4", "Unit 4", True),
("Unit 5", "Unit 5", False)]
DEFAULT_STORE_UNITS_3 = [("Unit 6", "Unit 6", False),
("Unit 7", "Unit 7", True),
("Unit 8", "Unit 8", False)]
UPDATED_STORE_UNITS_1 = [(src, "UPDATED %s" % target, is_fuzzy)
for src, target, is_fuzzy
in DEFAULT_STORE_UNITS_1]
UPDATED_STORE_UNITS_2 = [(src, "UPDATED %s" % target, _update_fuzzy(is_fuzzy, src))
for src, target, is_fuzzy
in DEFAULT_STORE_UNITS_2]
UPDATED_STORE_UNITS_3 = [(src, "UPDATED %s" % target, is_fuzzy)
for src, target, is_fuzzy
in DEFAULT_STORE_UNITS_3]
TEST_UPDATE_PO = "tests/data/po/tutorial/en/tutorial_update.po"
TEST_EVIL_UPDATE_PO = "tests/data/po/tutorial/en/tutorial_update_evil.po"
UPDATE_STORE_TESTS = OrderedDict()
UPDATE_STORE_TESTS['min_empty'] = {"update_store": (0, [])}
UPDATE_STORE_TESTS['min_new_units'] = {
"update_store": (0, DEFAULT_STORE_UNITS_3)
}
UPDATE_STORE_TESTS['old_empty'] = {"update_store": ("MID", [])}
UPDATE_STORE_TESTS['old_subset_1'] = {
"update_store": ("MID", UPDATED_STORE_UNITS_1)
}
UPDATE_STORE_TESTS['old_subset_2'] = {
"update_store": ("MID", UPDATED_STORE_UNITS_2)
}
UPDATE_STORE_TESTS['old_subset_2_pootle_wins'] = {
"update_store": ("MID", UPDATED_STORE_UNITS_2),
"fs_wins": False
}
UPDATE_STORE_TESTS['old_same_updated'] = {
"update_store": ("MID", UPDATED_STORE_UNITS_1 + UPDATED_STORE_UNITS_2)
}
UPDATE_STORE_TESTS['old_same_updated_pootle_wins'] = {
"update_store": ("MID", UPDATED_STORE_UNITS_1 + UPDATED_STORE_UNITS_2),
"fs_wins": False
}
UPDATE_STORE_TESTS['old_unobsolete'] = {
"setup": [DEFAULT_STORE_UNITS_1,
DEFAULT_STORE_UNITS_2,
[]],
"update_store": ("MID", UPDATED_STORE_UNITS_1 + UPDATED_STORE_UNITS_2)
}
UPDATE_STORE_TESTS['old_merge'] = {
"update_store": ("MID", UPDATED_STORE_UNITS_1 + UPDATED_STORE_UNITS_3)
}
UPDATE_STORE_TESTS['max_empty'] = {"update_store": ("MAX", [])}
UPDATE_STORE_TESTS['max_subset'] = {
"update_store": ("MAX", DEFAULT_STORE_UNITS_1)
}
UPDATE_STORE_TESTS['max_same'] = {
"update_store": ("MAX", DEFAULT_STORE_UNITS_1 + DEFAULT_STORE_UNITS_2)
}
UPDATE_STORE_TESTS['max_new_units'] = {
"update_store": ("MAX",
(DEFAULT_STORE_UNITS_1
+ DEFAULT_STORE_UNITS_2
+ DEFAULT_STORE_UNITS_3))
}
UPDATE_STORE_TESTS['max_change_order'] = {
"update_store": ("MAX", DEFAULT_STORE_UNITS_2 + DEFAULT_STORE_UNITS_1)
}
UPDATE_STORE_TESTS['max_unobsolete'] = {
"setup": [DEFAULT_STORE_UNITS_1 + DEFAULT_STORE_UNITS_2,
DEFAULT_STORE_UNITS_1],
"update_store": ("MAX", DEFAULT_STORE_UNITS_1 + DEFAULT_STORE_UNITS_2)
}
UPDATE_STORE_TESTS['max_obsolete'] = {
"setup": [DEFAULT_STORE_UNITS_1,
(DEFAULT_STORE_UNITS_1
+ DEFAULT_STORE_UNITS_2
+ DEFAULT_STORE_UNITS_3)],
"update_store": ("MAX", DEFAULT_STORE_UNITS_1 + DEFAULT_STORE_UNITS_3)
}
def _setup_store_test(store, member, member2, test):
from pootle_store.constants import POOTLE_WINS, SOURCE_WINS
setup = test.get("setup", None)
if setup is None:
setup = [(DEFAULT_STORE_UNITS_1),
(DEFAULT_STORE_UNITS_1 + DEFAULT_STORE_UNITS_2)]
for units in setup:
store_revision = store.get_max_unit_revision()
ttkstore = create_store(units=units)
for unit in ttkstore.units[1:]:
unit.addnote(
origin="translator",
text=("Set up unit(%s) with store_revision: %s"
% (unit.source, store_revision)))
store.update(
store=ttkstore,
store_revision=store_revision,
user=member)
store_revision, units_update = test["update_store"]
units_before = [
(unit, unit.change)
for unit in store.unit_set.select_related("change").all().order_by("index")]
fs_wins = test.get("fs_wins", True)
if fs_wins:
resolve_conflict = SOURCE_WINS
else:
resolve_conflict = POOTLE_WINS
if store_revision == "MAX":
store_revision = store.get_max_unit_revision()
elif store_revision == "MID":
revisions = [unit.revision for unit, change in units_before]
store_revision = sum(revisions) / len(revisions)
return (store, units_update, store_revision, resolve_conflict,
units_before, member, member2)
@pytest.fixture(params=UPDATE_STORE_TESTS.keys())
def store_diff_tests(request, tp0, member, member2):
from pootle_store.contextmanagers import update_store_after
from pootle_store.diff import StoreDiff
store = StoreDBFactory(
translation_project=tp0,
parent=tp0.directory)
with update_store_after(store):
test = _setup_store_test(store, member, member2,
UPDATE_STORE_TESTS[request.param])
test_store = create_store(units=test[1])
return [StoreDiff(test[0], test_store, test[2])] + list(test[:3])
@pytest.fixture(params=UPDATE_STORE_TESTS.keys())
def param_update_store_test(request, tp0, member, member2):
from pootle.core.contextmanagers import keep_data
from pootle.core.signals import update_data
store = StoreDBFactory(
translation_project=tp0,
parent=tp0.directory)
with keep_data():
test = _setup_store_test(
store, member, member2,
UPDATE_STORE_TESTS[request.param])
update_data.send(store.__class__, instance=store)
with keep_data():
update_store(
test[0],
units=test[1],
store_revision=test[2],
user=member2,
resolve_conflict=test[3])
update_data.send(store.__class__, instance=store)
return test
def _require_store(tp, po_dir, name):
"""Helper to get/create a new store."""
from pootle_store.models import Store
parent_dir = tp.directory
pootle_path = tp.pootle_path + name
try:
store = Store.objects.get(
pootle_path=pootle_path,
translation_project=tp,
)
except Store.DoesNotExist:
store = Store.objects.create_by_path(
create_tp=False,
create_directory=False,
pootle_path=(
"%s%s"
% (parent_dir.pootle_path,
name)))
return store
def _create_submission_and_suggestion(store, user,
units=None,
suggestion="SUGGESTION"):
from pootle.core.delegate import review
from pootle.core.models import Revision
from pootle_store.models import Suggestion
# Update store as user
if units is None:
units = [("Hello, world", "Hello, world UPDATED", False)]
update_store(
store,
units,
user=user,
store_revision=Revision.get() + 1)
# Add a suggestion
unit = store.units[0]
review.get(Suggestion)().add(unit, suggestion, user)
return unit
def _create_comment_on_unit(unit, user, comment):
unit.translator_comment = comment
unit.save(user=user)
def _mark_unit_fuzzy(unit, user):
unit.markfuzzy()
unit.save(user=user)
def _make_member_updates(store, member):
from pootle_store.contextmanagers import update_store_after
# Member updates first unit, adding a suggestion, and marking unit as fuzzy
with update_store_after(store):
_create_submission_and_suggestion(store, member)
_create_comment_on_unit(store.units[0], member, "NICE COMMENT")
_mark_unit_fuzzy(store.units[0], member)
@pytest.fixture
def af_tutorial_po(po_directory, settings, afrikaans_tutorial):
"""Require the /af/tutorial/tutorial.po store."""
return _require_store(afrikaans_tutorial,
settings.POOTLE_TRANSLATION_DIRECTORY, 'tutorial.po')
@pytest.fixture
def en_tutorial_po(po_directory, settings, english_tutorial):
"""Require the /en/tutorial/tutorial.po store."""
return _require_store(english_tutorial,
settings.POOTLE_TRANSLATION_DIRECTORY, 'tutorial.po')
@pytest.fixture
def en_tutorial_po_member_updated(po_directory, settings, english_tutorial, member):
"""Require the /en/tutorial/tutorial.po store."""
store = _require_store(english_tutorial,
settings.POOTLE_TRANSLATION_DIRECTORY,
'tutorial.po')
_make_member_updates(store, member)
return store
@pytest.fixture
def it_tutorial_po(po_directory, settings, italian_tutorial):
"""Require the /it/tutorial/tutorial.po store."""
return _require_store(italian_tutorial,
settings.POOTLE_TRANSLATION_DIRECTORY, 'tutorial.po')
@pytest.fixture
def issue_2401_po(po_directory, settings, afrikaans_tutorial):
"""Require the /af/tutorial/issue_2401.po store."""
return _require_store(afrikaans_tutorial,
settings.POOTLE_TRANSLATION_DIRECTORY,
'issue_2401.po')
@pytest.fixture
def store_po(tp0):
"""An empty Store in the /language0/project0 TP"""
from pootle_translationproject.models import TranslationProject
tp = TranslationProject.objects.get(
project__code="project0",
language__code="language0")
store = StoreDBFactory(
parent=tp.directory,
translation_project=tp,
name="test_store.po")
return store
@pytest.fixture(scope="session")
def complex_ttk(test_fs):
with test_fs.open(("data", "po", "complex.po")) as f:
ttk = getclass(f)(f.read())
return ttk
@pytest.fixture
def complex_po():
from pootle_store.models import Store
return Store.objects.get(name="complex.po")
@pytest.fixture
def no_complex_po_():
from pootle_store.models import Store
Store.objects.get(name="complex.po").delete()
@pytest.fixture
def diffable_stores(complex_po, request):
from pootle.core.delegate import format_diffs
from pootle_store.models import Store
from pootle_translationproject.models import TranslationProject
start_receivers = format_diffs.receivers
tp = TranslationProject.objects.get(
language=complex_po.translation_project.language,
project__code="project1")
other_po = Store.objects.create(
name="complex.po",
translation_project=tp,
parent=tp.directory,
pootle_path=complex_po.pootle_path.replace("project0", "project1"))
other_po.update(other_po.deserialize(complex_po.serialize()))
def _reset_format_diffs():
format_diffs.receivers = start_receivers
request.addfinalizer(_reset_format_diffs)
return complex_po, other_po
@pytest.fixture
def dummy_store_structure_syncer():
from pootle_store.syncer import StoreSyncer
from django.utils.functional import cached_property
class DummyUnit(object):
def __init__(self, unit, expected):
self.unit = unit
self.expected = expected
def convert(self, unit_class):
assert unit_class == self.expected["unit_class"]
return self.unit, unit_class
class DummyDiskStore(object):
def __init__(self, expected):
self.expected = expected
self.UnitClass = expected["unit_class"]
@cached_property
def _units(self):
for unit in self.expected["new_units"]:
yield unit
def addunit(self, newunit):
unit, unit_class = newunit
assert unit == self._units.next().unit
assert unit_class == self.UnitClass
class DummyStoreSyncer(StoreSyncer):
def __init__(self, *args, **kwargs):
self.expected = kwargs.pop("expected")
super(DummyStoreSyncer, self).__init__(*args, **kwargs)
@cached_property
def _units(self):
for unit in self.expected["obsolete_units"]:
yield unit
def obsolete_unit(self, unit, conservative):
assert conservative == self.expected["conservative"]
assert unit == self._units.next()
return self.expected["obsolete_delete"]
return DummyStoreSyncer, DummyDiskStore, DummyUnit
@pytest.fixture
def dummy_store_syncer_units():
from pootle_store.syncer import StoreSyncer
class DummyStore(object):
def __init__(self, expected):
self.expected = expected
def findid_bulk(self, uids):
return uids
class DummyStoreSyncer(StoreSyncer):
def __init__(self, *args, **kwargs):
self.expected = kwargs.pop("expected")
super(DummyStoreSyncer, self).__init__(*args, **kwargs)
self.store = DummyStore(self.expected)
@property
def dbid_index(self):
return self.expected["db_ids"]
return DummyStoreSyncer
@pytest.fixture
def dummy_store_syncer():
from pootle_store.syncer import StoreSyncer
class DummyDiskStore(object):
def __init__(self, expected):
self.expected = expected
def getids(self):
return self.expected["disk_ids"]
class DummyStoreSyncer(StoreSyncer):
def __init__(self, *args, **kwargs):
self.expected = kwargs.pop("expected")
super(DummyStoreSyncer, self).__init__(*args, **kwargs)
@property
def dbid_index(self):
return self.expected["db_index"]
def get_units_to_obsolete(self, disk_store, old_ids_, new_ids_):
return self.expected["obsolete_units"]
def get_new_units(self, old_ids, new_ids):
assert old_ids == set(self.expected["disk_ids"])
assert new_ids == set(self.expected["db_index"].keys())
return self.expected["new_units"]
def get_common_units(self, units_, last_revision, conservative):
assert last_revision == self.expected["last_revision"]
assert conservative == self.expected["conservative"]
return self.expected["common_units"]
def update_structure(self, disk_store, obsolete_units,
new_units, conservative):
assert obsolete_units == self.expected["obsolete_units"]
assert new_units == self.expected["new_units"]
assert conservative == self.expected["conservative"]
return self.expected["structure_changed"]
def sync_units(self, disk_store, units):
assert units == self.expected["common_units"]
return self.expected["changes"]
expected = dict(
last_revision=23,
conservative=True,
update_structure=False,
disk_ids=[5, 6, 7],
db_index={"a": 1, "b": 2, "c": 3},
structure_changed=(8, 9, 10),
obsolete_units=["obsolete", "units"],
new_units=["new", "units"],
common_units=["common", "units"],
changes=["some", "changes"])
return DummyStoreSyncer, DummyDiskStore, expected
@pytest.fixture
def store0(tp0):
stores = tp0.stores.select_related(
"data",
"parent",
"filetype__extension",
"filetype__template_extension")
return stores.get(name="store0.po")
@pytest.fixture
def ordered_po(test_fs, tp0):
"""Create a store with ordered units."""
store = StoreDBFactory(
name="ordered.po",
translation_project=tp0,
parent=tp0.directory)
with test_fs.open("data/po/ordered.po") as src:
store.update(store.deserialize(src.read()))
return store
@pytest.fixture
def numbered_po(test_fs, project0):
"""Create a store with numbered units."""
tp = TranslationProjectFactory(
project=project0,
language=LanguageDBFactory())
store = StoreDBFactory(
name="numbered.po",
translation_project=tp,
parent=tp.directory)
with test_fs.open("data/po/1234.po") as src:
store.update(store.deserialize(src.read()))
return store
@pytest.fixture
def ordered_update_ttk(test_fs, store0):
with test_fs.open("data/po/ordered_updated.po") as src:
ttk = store0.deserialize(src.read())
return ttk
| 17,251
|
Python
|
.py
| 422
| 32.770142
| 84
| 0.635966
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|
14,899
|
permission_set.py
|
translate_pootle/pytest_pootle/fixtures/models/permission_set.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
def _require_permission_set(user, directory, positive_permissions=None,
negative_permissions=None):
"""Helper to get/create a new PermissionSet."""
from pootle_app.models.permissions import PermissionSet
criteria = {
'user': user,
'directory': directory,
}
permission_set = PermissionSet.objects.get_or_create(**criteria)[0]
if positive_permissions is not None:
permission_set.positive_permissions.set(positive_permissions)
if negative_permissions is not None:
permission_set.negative_permissions.set(negative_permissions)
permission_set.save()
return permission_set
| 943
|
Python
|
.py
| 22
| 37.272727
| 77
| 0.718341
|
translate/pootle
| 1,486
| 288
| 526
|
GPL-3.0
|
9/5/2024, 5:11:58 PM (Europe/Amsterdam)
|