_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q26100
|
ESEDBParser.ParseFileObject
|
train
|
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an ESE database file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
"""
esedb_file = pyesedb.file()
try:
esedb_file.open_file_object(file_object)
except IOError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to open file with error: {0!s}'.format(exception))
return
# Compare the list of available plugin objects.
cache = ESEDBCache()
try:
table_names = frozenset(self._GetTableNames(esedb_file))
for plugin in self._plugins:
if parser_mediator.abort:
break
if not plugin.required_tables.issubset(table_names):
continue
try:
plugin.UpdateChainAndProcess(
parser_mediator, cache=cache, database=esedb_file)
except Exception as exception: # pylint: disable=broad-except
parser_mediator.ProduceExtractionWarning((
'plugin: {0:s} unable to parse ESE database with error: '
'{1!s}').format(plugin.NAME, exception))
finally:
# TODO: explicitly clean up cache.
esedb_file.close()
|
python
|
{
"resource": ""
}
|
q26101
|
ChromeExtensionActivityPlugin.ParseActivityLogUncompressedRow
|
train
|
def ParseActivityLogUncompressedRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses an activity log row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = ChromeExtensionActivityEventData()
event_data.action_type = self._GetRowValue(query_hash, row, 'action_type')
event_data.activity_id = self._GetRowValue(query_hash, row, 'activity_id')
event_data.api_name = self._GetRowValue(query_hash, row, 'api_name')
event_data.arg_url = self._GetRowValue(query_hash, row, 'arg_url')
event_data.args = self._GetRowValue(query_hash, row, 'args')
event_data.extension_id = self._GetRowValue(query_hash, row, 'extension_id')
event_data.other = self._GetRowValue(query_hash, row, 'other')
event_data.page_title = self._GetRowValue(query_hash, row, 'page_title')
event_data.page_url = self._GetRowValue(query_hash, row, 'page_url')
event_data.query = query
timestamp = self._GetRowValue(query_hash, row, 'time')
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_UNKNOWN)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
python
|
{
"resource": ""
}
|
q26102
|
Sqlite3DatabaseFile.Close
|
train
|
def Close(self):
"""Closes the database file.
Raises:
RuntimeError: if the database is not opened.
"""
if not self._connection:
raise RuntimeError('Cannot close database not opened.')
# We need to run commit or not all data is stored in the database.
self._connection.commit()
self._connection.close()
self._connection = None
self._cursor = None
self.filename = None
self.read_only = None
|
python
|
{
"resource": ""
}
|
q26103
|
Sqlite3DatabaseFile.GetValues
|
train
|
def GetValues(self, table_names, column_names, condition):
"""Retrieves values from a table.
Args:
table_names (list[str]): table names.
column_names (list[str]): column names.
condition (str): query condition such as
"log_source == 'Application Error'".
Yields:
sqlite3.row: row.
Raises:
RuntimeError: if the database is not opened.
"""
if not self._connection:
raise RuntimeError('Cannot retrieve values database not opened.')
if condition:
condition = ' WHERE {0:s}'.format(condition)
sql_query = 'SELECT {1:s} FROM {0:s}{2:s}'.format(
', '.join(table_names), ', '.join(column_names), condition)
self._cursor.execute(sql_query)
# TODO: have a look at https://docs.python.org/2/library/
# sqlite3.html#sqlite3.Row.
for row in self._cursor:
yield {
column_name: row[column_index]
for column_index, column_name in enumerate(column_names)}
|
python
|
{
"resource": ""
}
|
q26104
|
Sqlite3DatabaseFile.Open
|
train
|
def Open(self, filename, read_only=False):
"""Opens the database file.
Args:
filename (str): filename of the database.
read_only (Optional[bool]): True if the database should be opened in
read-only mode. Since sqlite3 does not support a real read-only
mode we fake it by only permitting SELECT queries.
Returns:
bool: True if successful.
Raises:
RuntimeError: if the database is already opened.
"""
if self._connection:
raise RuntimeError('Cannot open database already opened.')
self.filename = filename
self.read_only = read_only
try:
self._connection = sqlite3.connect(filename)
except sqlite3.OperationalError:
return False
if not self._connection:
return False
self._cursor = self._connection.cursor()
if not self._cursor:
return False
return True
|
python
|
{
"resource": ""
}
|
q26105
|
WinevtResourcesSqlite3DatabaseReader._GetEventLogProviderKey
|
train
|
def _GetEventLogProviderKey(self, log_source):
"""Retrieves the Event Log provider key.
Args:
log_source (str): Event Log source.
Returns:
str: Event Log provider key or None if not available.
Raises:
RuntimeError: if more than one value is found in the database.
"""
table_names = ['event_log_providers']
column_names = ['event_log_provider_key']
condition = 'log_source == "{0:s}"'.format(log_source)
values_list = list(self._database_file.GetValues(
table_names, column_names, condition))
number_of_values = len(values_list)
if number_of_values == 0:
return None
if number_of_values == 1:
values = values_list[0]
return values['event_log_provider_key']
raise RuntimeError('More than one value found in database.')
|
python
|
{
"resource": ""
}
|
q26106
|
WinevtResourcesSqlite3DatabaseReader._GetMessage
|
train
|
def _GetMessage(self, message_file_key, lcid, message_identifier):
"""Retrieves a specific message from a specific message table.
Args:
message_file_key (int): message file key.
lcid (int): language code identifier (LCID).
message_identifier (int): message identifier.
Returns:
str: message string or None if not available.
Raises:
RuntimeError: if more than one value is found in the database.
"""
table_name = 'message_table_{0:d}_0x{1:08x}'.format(message_file_key, lcid)
has_table = self._database_file.HasTable(table_name)
if not has_table:
return None
column_names = ['message_string']
condition = 'message_identifier == "0x{0:08x}"'.format(message_identifier)
values = list(self._database_file.GetValues(
[table_name], column_names, condition))
number_of_values = len(values)
if number_of_values == 0:
return None
if number_of_values == 1:
return values[0]['message_string']
raise RuntimeError('More than one value found in database.')
|
python
|
{
"resource": ""
}
|
q26107
|
WinevtResourcesSqlite3DatabaseReader._GetMessageFileKeys
|
train
|
def _GetMessageFileKeys(self, event_log_provider_key):
"""Retrieves the message file keys.
Args:
event_log_provider_key (int): Event Log provider key.
Yields:
int: message file key.
"""
table_names = ['message_file_per_event_log_provider']
column_names = ['message_file_key']
condition = 'event_log_provider_key == {0:d}'.format(
event_log_provider_key)
generator = self._database_file.GetValues(
table_names, column_names, condition)
for values in generator:
yield values['message_file_key']
|
python
|
{
"resource": ""
}
|
q26108
|
WinevtResourcesSqlite3DatabaseReader._ReformatMessageString
|
train
|
def _ReformatMessageString(self, message_string):
"""Reformats the message string.
Args:
message_string (str): message string.
Returns:
str: message string in Python format() (PEP 3101) style.
"""
def _PlaceHolderSpecifierReplacer(match_object):
"""Replaces message string place holders into Python format() style."""
expanded_groups = []
for group in match_object.groups():
try:
place_holder_number = int(group, 10) - 1
expanded_group = '{{{0:d}:s}}'.format(place_holder_number)
except ValueError:
expanded_group = group
expanded_groups.append(expanded_group)
return ''.join(expanded_groups)
if not message_string:
return None
message_string = self._WHITE_SPACE_SPECIFIER_RE.sub(r'', message_string)
message_string = self._TEXT_SPECIFIER_RE.sub(r'\\\1', message_string)
message_string = self._CURLY_BRACKETS.sub(r'\1\1', message_string)
return self._PLACE_HOLDER_SPECIFIER_RE.sub(
_PlaceHolderSpecifierReplacer, message_string)
|
python
|
{
"resource": ""
}
|
q26109
|
WinevtResourcesSqlite3DatabaseReader.GetMessage
|
train
|
def GetMessage(self, log_source, lcid, message_identifier):
"""Retrieves a specific message for a specific Event Log source.
Args:
log_source (str): Event Log source.
lcid (int): language code identifier (LCID).
message_identifier (int): message identifier.
Returns:
str: message string or None if not available.
"""
event_log_provider_key = self._GetEventLogProviderKey(log_source)
if not event_log_provider_key:
return None
generator = self._GetMessageFileKeys(event_log_provider_key)
if not generator:
return None
# TODO: cache a number of message strings.
message_string = None
for message_file_key in generator:
message_string = self._GetMessage(
message_file_key, lcid, message_identifier)
if message_string:
break
if self._string_format == 'wrc':
message_string = self._ReformatMessageString(message_string)
return message_string
|
python
|
{
"resource": ""
}
|
q26110
|
WinevtResourcesSqlite3DatabaseReader.GetMetadataAttribute
|
train
|
def GetMetadataAttribute(self, attribute_name):
"""Retrieves the metadata attribute.
Args:
attribute_name (str): name of the metadata attribute.
Returns:
str: the metadata attribute or None.
Raises:
RuntimeError: if more than one value is found in the database.
"""
table_name = 'metadata'
has_table = self._database_file.HasTable(table_name)
if not has_table:
return None
column_names = ['value']
condition = 'name == "{0:s}"'.format(attribute_name)
values = list(self._database_file.GetValues(
[table_name], column_names, condition))
number_of_values = len(values)
if number_of_values == 0:
return None
if number_of_values == 1:
return values[0]['value']
raise RuntimeError('More than one value found in database.')
|
python
|
{
"resource": ""
}
|
q26111
|
WinevtResourcesSqlite3DatabaseReader.Open
|
train
|
def Open(self, filename):
"""Opens the database reader object.
Args:
filename (str): filename of the database.
Returns:
bool: True if successful.
Raises:
RuntimeError: if the version or string format of the database
is not supported.
"""
if not super(WinevtResourcesSqlite3DatabaseReader, self).Open(filename):
return False
version = self.GetMetadataAttribute('version')
if not version or version != '20150315':
raise RuntimeError('Unsupported version: {0:s}'.format(version))
string_format = self.GetMetadataAttribute('string_format')
if not string_format:
string_format = 'wrc'
if string_format not in ('pep3101', 'wrc'):
raise RuntimeError('Unsupported string format: {0:s}'.format(
string_format))
self._string_format = string_format
return True
|
python
|
{
"resource": ""
}
|
q26112
|
AnalysisReport.CopyToDict
|
train
|
def CopyToDict(self):
"""Copies the attribute container to a dictionary.
Returns:
dict[str, object]: attribute values per name.
"""
dictionary = {}
for attribute_name, attribute_value in self.GetAttributes():
if attribute_value is None:
continue
dictionary[attribute_name] = attribute_value
return dictionary
|
python
|
{
"resource": ""
}
|
q26113
|
AnalysisReport.GetString
|
train
|
def GetString(self):
"""Retrieves a string representation of the report.
Returns:
str: string representation of the report.
"""
string_list = []
string_list.append('Report generated from: {0:s}'.format(self.plugin_name))
time_compiled = getattr(self, 'time_compiled', 0)
if time_compiled:
time_compiled = timelib.Timestamp.CopyToIsoFormat(time_compiled)
string_list.append('Generated on: {0:s}'.format(time_compiled))
filter_string = getattr(self, 'filter_string', '')
if filter_string:
string_list.append('Filter String: {0:s}'.format(filter_string))
string_list.append('')
string_list.append('Report text:')
string_list.append(self.text)
return '\n'.join(string_list)
|
python
|
{
"resource": ""
}
|
q26114
|
BasePlugin.Process
|
train
|
def Process(self, parser_mediator, **kwargs):
"""Evaluates if this is the correct plugin and processes data accordingly.
The purpose of the process function is to evaluate if this particular
plugin is the correct one for the particular data structure at hand.
This function accepts one value to use for evaluation, that could be
a registry key, list of table names for a database or any other criteria
that can be used to evaluate if the plugin should be run or not.
Args:
parser_mediator (ParserMediator): mediates interactions between
parsers and other components, such as storage and dfvfs.
kwargs (dict[str, object]): Depending on the plugin they may require
different sets of arguments to be able to evaluate whether or not
this is the correct plugin.
Raises:
ValueError: when there are unused keyword arguments.
"""
if kwargs:
raise ValueError('Unused keyword arguments: {0:s}.'.format(
', '.join(kwargs.keys())))
|
python
|
{
"resource": ""
}
|
q26115
|
ConfigureLogging
|
train
|
def ConfigureLogging(
debug_output=False, filename=None, mode='w', quiet_mode=False):
"""Configures the logging root logger.
Args:
debug_output (Optional[bool]): True if the logging should include debug
output.
filename (Optional[str]): log filename.
mode (Optional[str]): log file access mode.
quiet_mode (Optional[bool]): True if the logging should not include
information output. Note that debug_output takes precedence over
quiet_mode.
"""
# Remove all possible log handlers. The log handlers cannot be reconfigured
# and therefore must be recreated.
for handler in logging.root.handlers:
logging.root.removeHandler(handler)
logger = logging.getLogger()
if filename and filename.endswith('.gz'):
handler = CompressedFileHandler(filename, mode=mode)
elif filename:
handler = logging.FileHandler(filename, mode=mode)
else:
handler = logging.StreamHandler()
format_string = (
'%(asctime)s [%(levelname)s] (%(processName)-10s) PID:%(process)d '
'<%(module)s> %(message)s')
formatter = logging.Formatter(format_string)
handler.setFormatter(formatter)
if debug_output:
level = logging.DEBUG
elif quiet_mode:
level = logging.WARNING
else:
level = logging.INFO
logger.setLevel(level)
handler.setLevel(level)
logger.addHandler(handler)
|
python
|
{
"resource": ""
}
|
q26116
|
CompressedFileHandler._open
|
train
|
def _open(self):
"""Opens the compressed log file.
Returns
file: file-like object of the resulting stream.
"""
# The gzip module supports directly setting encoding as of Python 3.3.
# pylint: disable=unexpected-keyword-arg
if py2to3.PY_3:
return gzip.open(
self.baseFilename, mode=self.mode, encoding=self.encoding)
return gzip.open(self.baseFilename, self.mode)
|
python
|
{
"resource": ""
}
|
q26117
|
Broker.shutdown
|
train
|
def shutdown(self):
"""
Stop broker instance.
Closes all connected session, stop listening on network socket and free resources.
"""
try:
self._sessions = dict()
self._subscriptions = dict()
self._retained_messages = dict()
self.transitions.shutdown()
except (MachineError, ValueError) as exc:
# Backwards compat: MachineError is raised by transitions < 0.5.0.
self.logger.debug("Invalid method call at this moment: %s" % exc)
raise BrokerException("Broker instance can't be stopped: %s" % exc)
# Fire broker_shutdown event to plugins
yield from self.plugins_manager.fire_event(EVENT_BROKER_PRE_SHUTDOWN)
# Stop broadcast loop
if self._broadcast_task:
self._broadcast_task.cancel()
if self._broadcast_queue.qsize() > 0:
self.logger.warning("%d messages not broadcasted" % self._broadcast_queue.qsize())
for listener_name in self._servers:
server = self._servers[listener_name]
yield from server.close_instance()
self.logger.debug("Broker closing")
self.logger.info("Broker closed")
yield from self.plugins_manager.fire_event(EVENT_BROKER_POST_SHUTDOWN)
self.transitions.stopping_success()
|
python
|
{
"resource": ""
}
|
q26118
|
MQTTClient.disconnect
|
train
|
def disconnect(self):
"""
Disconnect from the connected broker.
This method sends a `DISCONNECT <http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718090>`_ message and closes the network socket.
This method is a *coroutine*.
"""
if self.session.transitions.is_connected():
if not self._disconnect_task.done():
self._disconnect_task.cancel()
yield from self._handler.mqtt_disconnect()
self._connected_state.clear()
yield from self._handler.stop()
self.session.transitions.disconnect()
else:
self.logger.warning("Client session is not currently connected, ignoring call")
|
python
|
{
"resource": ""
}
|
q26119
|
MQTTClient.reconnect
|
train
|
def reconnect(self, cleansession=None):
"""
Reconnect a previously connected broker.
Reconnection tries to establish a network connection and send a `CONNECT <http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718028>`_ message.
Retries interval and attempts can be controled with the ``reconnect_max_interval`` and ``reconnect_retries`` configuration parameters.
This method is a *coroutine*.
:param cleansession: clean session flag used in MQTT CONNECT messages sent for reconnections.
:return: `CONNACK <http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718033>`_ return code
:raise: :class:`hbmqtt.client.ConnectException` if re-connection fails after max retries.
"""
if self.session.transitions.is_connected():
self.logger.warning("Client already connected")
return CONNECTION_ACCEPTED
if cleansession:
self.session.clean_session = cleansession
self.logger.debug("Reconnecting with session parameters: %s" % self.session)
reconnect_max_interval = self.config.get('reconnect_max_interval', 10)
reconnect_retries = self.config.get('reconnect_retries', 5)
nb_attempt = 1
yield from asyncio.sleep(1, loop=self._loop)
while True:
try:
self.logger.debug("Reconnect attempt %d ..." % nb_attempt)
return (yield from self._do_connect())
except BaseException as e:
self.logger.warning("Reconnection attempt failed: %r" % e)
if reconnect_retries >= 0 and nb_attempt > reconnect_retries:
self.logger.error("Maximum number of connection attempts reached. Reconnection aborted")
raise ConnectException("Too many connection attempts failed")
exp = 2 ** nb_attempt
delay = exp if exp < reconnect_max_interval else reconnect_max_interval
self.logger.debug("Waiting %d second before next attempt" % delay)
yield from asyncio.sleep(delay, loop=self._loop)
nb_attempt += 1
|
python
|
{
"resource": ""
}
|
q26120
|
MQTTClient.ping
|
train
|
def ping(self):
"""
Ping the broker.
Send a MQTT `PINGREQ <http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718081>`_ message for response.
This method is a *coroutine*.
"""
if self.session.transitions.is_connected():
yield from self._handler.mqtt_ping()
else:
self.logger.warning("MQTT PING request incompatible with current session state '%s'" %
self.session.transitions.state)
|
python
|
{
"resource": ""
}
|
q26121
|
MQTTClient.publish
|
train
|
def publish(self, topic, message, qos=None, retain=None, ack_timeout=None):
"""
Publish a message to the broker.
Send a MQTT `PUBLISH <http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718037>`_ message and wait for acknowledgment depending on Quality Of Service
This method is a *coroutine*.
:param topic: topic name to which message data is published
:param message: payload message (as bytes) to send.
:param qos: requested publish quality of service : QOS_0, QOS_1 or QOS_2. Defaults to ``default_qos`` config parameter or QOS_0.
:param retain: retain flag. Defaults to ``default_retain`` config parameter or False.
"""
def get_retain_and_qos():
if qos:
assert qos in (QOS_0, QOS_1, QOS_2)
_qos = qos
else:
_qos = self.config['default_qos']
try:
_qos = self.config['topics'][topic]['qos']
except KeyError:
pass
if retain:
_retain = retain
else:
_retain = self.config['default_retain']
try:
_retain = self.config['topics'][topic]['retain']
except KeyError:
pass
return _qos, _retain
(app_qos, app_retain) = get_retain_and_qos()
return (yield from self._handler.mqtt_publish(topic, message, app_qos, app_retain, ack_timeout))
|
python
|
{
"resource": ""
}
|
q26122
|
MQTTClient.unsubscribe
|
train
|
def unsubscribe(self, topics):
"""
Unsubscribe from some topics.
Send a MQTT `UNSUBSCRIBE <http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718072>`_ message and wait for broker `UNSUBACK <http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718077>`_ message.
This method is a *coroutine*.
:param topics: array of topics to unsubscribe from.
Example of ``topics`` argument expected structure:
::
['$SYS/broker/uptime', '$SYS/broker/load/#']
"""
yield from self._handler.mqtt_unsubscribe(topics, self.session.next_packet_id)
|
python
|
{
"resource": ""
}
|
q26123
|
MQTTClient.deliver_message
|
train
|
def deliver_message(self, timeout=None):
"""
Deliver next received message.
Deliver next message received from the broker. If no message is available, this methods waits until next message arrives or ``timeout`` occurs.
This method is a *coroutine*.
:param timeout: maximum number of seconds to wait before returning. If timeout is not specified or None, there is no limit to the wait time until next message arrives.
:return: instance of :class:`hbmqtt.session.ApplicationMessage` containing received message information flow.
:raises: :class:`asyncio.TimeoutError` if timeout occurs before a message is delivered
"""
deliver_task = asyncio.ensure_future(self._handler.mqtt_deliver_next_message(), loop=self._loop)
self.client_tasks.append(deliver_task)
self.logger.debug("Waiting message delivery")
done, pending = yield from asyncio.wait([deliver_task], loop=self._loop, return_when=asyncio.FIRST_EXCEPTION, timeout=timeout)
if deliver_task in done:
if deliver_task.exception() is not None:
# deliver_task raised an exception, pass it on to our caller
raise deliver_task.exception()
self.client_tasks.pop()
return deliver_task.result()
else:
#timeout occured before message received
deliver_task.cancel()
raise asyncio.TimeoutError
|
python
|
{
"resource": ""
}
|
q26124
|
PluginManager.fire_event
|
train
|
def fire_event(self, event_name, wait=False, *args, **kwargs):
"""
Fire an event to plugins.
PluginManager schedule @asyncio.coroutinecalls for each plugin on method called "on_" + event_name
For example, on_connect will be called on event 'connect'
Method calls are schedule in the asyn loop. wait parameter must be set to true to wait until all
mehtods are completed.
:param event_name:
:param args:
:param kwargs:
:param wait: indicates if fire_event should wait for plugin calls completion (True), or not
:return:
"""
tasks = []
event_method_name = "on_" + event_name
for plugin in self._plugins:
event_method = getattr(plugin.object, event_method_name, None)
if event_method:
try:
task = self._schedule_coro(event_method(*args, **kwargs))
tasks.append(task)
def clean_fired_events(future):
try:
self._fired_events.remove(task)
except (KeyError, ValueError):
pass
task.add_done_callback(clean_fired_events)
except AssertionError:
self.logger.error("Method '%s' on plugin '%s' is not a coroutine" %
(event_method_name, plugin.name))
self._fired_events.extend(tasks)
if wait:
if tasks:
yield from asyncio.wait(tasks, loop=self._loop)
|
python
|
{
"resource": ""
}
|
q26125
|
BrokerSysPlugin._clear_stats
|
train
|
def _clear_stats(self):
"""
Initializes broker statistics data structures
"""
for stat in (STAT_BYTES_RECEIVED,
STAT_BYTES_SENT,
STAT_MSG_RECEIVED,
STAT_MSG_SENT,
STAT_CLIENTS_MAXIMUM,
STAT_CLIENTS_CONNECTED,
STAT_CLIENTS_DISCONNECTED,
STAT_PUBLISH_RECEIVED,
STAT_PUBLISH_SENT):
self._stats[stat] = 0
|
python
|
{
"resource": ""
}
|
q26126
|
WebSocketsWriter.drain
|
train
|
def drain(self):
"""
Let the write buffer of the underlying transport a chance to be flushed.
"""
data = self._stream.getvalue()
if len(data):
yield from self._protocol.send(data)
self._stream = io.BytesIO(b'')
|
python
|
{
"resource": ""
}
|
q26127
|
search
|
train
|
def search(query, results=10, suggestion=False):
'''
Do a Wikipedia search for `query`.
Keyword arguments:
* results - the maxmimum number of results returned
* suggestion - if True, return results and suggestion (if any) in a tuple
'''
search_params = {
'list': 'search',
'srprop': '',
'srlimit': results,
'limit': results,
'srsearch': query
}
if suggestion:
search_params['srinfo'] = 'suggestion'
raw_results = _wiki_request(search_params)
if 'error' in raw_results:
if raw_results['error']['info'] in ('HTTP request timed out.', 'Pool queue is full'):
raise HTTPTimeoutError(query)
else:
raise WikipediaException(raw_results['error']['info'])
search_results = (d['title'] for d in raw_results['query']['search'])
if suggestion:
if raw_results['query'].get('searchinfo'):
return list(search_results), raw_results['query']['searchinfo']['suggestion']
else:
return list(search_results), None
return list(search_results)
|
python
|
{
"resource": ""
}
|
q26128
|
suggest
|
train
|
def suggest(query):
'''
Get a Wikipedia search suggestion for `query`.
Returns a string or None if no suggestion was found.
'''
search_params = {
'list': 'search',
'srinfo': 'suggestion',
'srprop': '',
}
search_params['srsearch'] = query
raw_result = _wiki_request(search_params)
if raw_result['query'].get('searchinfo'):
return raw_result['query']['searchinfo']['suggestion']
return None
|
python
|
{
"resource": ""
}
|
q26129
|
random
|
train
|
def random(pages=1):
'''
Get a list of random Wikipedia article titles.
.. note:: Random only gets articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages.
Keyword arguments:
* pages - the number of random pages returned (max of 10)
'''
#http://en.wikipedia.org/w/api.php?action=query&list=random&rnlimit=5000&format=jsonfm
query_params = {
'list': 'random',
'rnnamespace': 0,
'rnlimit': pages,
}
request = _wiki_request(query_params)
titles = [page['title'] for page in request['query']['random']]
if len(titles) == 1:
return titles[0]
return titles
|
python
|
{
"resource": ""
}
|
q26130
|
_wiki_request
|
train
|
def _wiki_request(params):
'''
Make a request to the Wikipedia API using the given search parameters.
Returns a parsed dict of the JSON response.
'''
global RATE_LIMIT_LAST_CALL
global USER_AGENT
params['format'] = 'json'
if not 'action' in params:
params['action'] = 'query'
headers = {
'User-Agent': USER_AGENT
}
if RATE_LIMIT and RATE_LIMIT_LAST_CALL and \
RATE_LIMIT_LAST_CALL + RATE_LIMIT_MIN_WAIT > datetime.now():
# it hasn't been long enough since the last API call
# so wait until we're in the clear to make the request
wait_time = (RATE_LIMIT_LAST_CALL + RATE_LIMIT_MIN_WAIT) - datetime.now()
time.sleep(int(wait_time.total_seconds()))
r = requests.get(API_URL, params=params, headers=headers)
if RATE_LIMIT:
RATE_LIMIT_LAST_CALL = datetime.now()
return r.json()
|
python
|
{
"resource": ""
}
|
q26131
|
WikipediaPage.html
|
train
|
def html(self):
'''
Get full page HTML.
.. warning:: This can get pretty slow on long pages.
'''
if not getattr(self, '_html', False):
query_params = {
'prop': 'revisions',
'rvprop': 'content',
'rvlimit': 1,
'rvparse': '',
'titles': self.title
}
request = _wiki_request(query_params)
self._html = request['query']['pages'][self.pageid]['revisions'][0]['*']
return self._html
|
python
|
{
"resource": ""
}
|
q26132
|
WikipediaPage.content
|
train
|
def content(self):
'''
Plain text content of the page, excluding images, tables, and other data.
'''
if not getattr(self, '_content', False):
query_params = {
'prop': 'extracts|revisions',
'explaintext': '',
'rvprop': 'ids'
}
if not getattr(self, 'title', None) is None:
query_params['titles'] = self.title
else:
query_params['pageids'] = self.pageid
request = _wiki_request(query_params)
self._content = request['query']['pages'][self.pageid]['extract']
self._revision_id = request['query']['pages'][self.pageid]['revisions'][0]['revid']
self._parent_id = request['query']['pages'][self.pageid]['revisions'][0]['parentid']
return self._content
|
python
|
{
"resource": ""
}
|
q26133
|
WikipediaPage.images
|
train
|
def images(self):
'''
List of URLs of images on the page.
'''
if not getattr(self, '_images', False):
self._images = [
page['imageinfo'][0]['url']
for page in self.__continued_query({
'generator': 'images',
'gimlimit': 'max',
'prop': 'imageinfo',
'iiprop': 'url',
})
if 'imageinfo' in page
]
return self._images
|
python
|
{
"resource": ""
}
|
q26134
|
WikipediaPage.references
|
train
|
def references(self):
'''
List of URLs of external links on a page.
May include external links within page that aren't technically cited anywhere.
'''
if not getattr(self, '_references', False):
def add_protocol(url):
return url if url.startswith('http') else 'http:' + url
self._references = [
add_protocol(link['*'])
for link in self.__continued_query({
'prop': 'extlinks',
'ellimit': 'max'
})
]
return self._references
|
python
|
{
"resource": ""
}
|
q26135
|
WikipediaPage.links
|
train
|
def links(self):
'''
List of titles of Wikipedia page links on a page.
.. note:: Only includes articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages.
'''
if not getattr(self, '_links', False):
self._links = [
link['title']
for link in self.__continued_query({
'prop': 'links',
'plnamespace': 0,
'pllimit': 'max'
})
]
return self._links
|
python
|
{
"resource": ""
}
|
q26136
|
WikipediaPage.categories
|
train
|
def categories(self):
'''
List of categories of a page.
'''
if not getattr(self, '_categories', False):
self._categories = [re.sub(r'^Category:', '', x) for x in
[link['title']
for link in self.__continued_query({
'prop': 'categories',
'cllimit': 'max'
})
]]
return self._categories
|
python
|
{
"resource": ""
}
|
q26137
|
WikipediaPage.sections
|
train
|
def sections(self):
'''
List of section titles from the table of contents on the page.
'''
if not getattr(self, '_sections', False):
query_params = {
'action': 'parse',
'prop': 'sections',
}
query_params.update(self.__title_query_param)
request = _wiki_request(query_params)
self._sections = [section['line'] for section in request['parse']['sections']]
return self._sections
|
python
|
{
"resource": ""
}
|
q26138
|
WikipediaPage.section
|
train
|
def section(self, section_title):
'''
Get the plain text content of a section from `self.sections`.
Returns None if `section_title` isn't found, otherwise returns a whitespace stripped string.
This is a convenience method that wraps self.content.
.. warning:: Calling `section` on a section that has subheadings will NOT return
the full text of all of the subsections. It only gets the text between
`section_title` and the next subheading, which is often empty.
'''
section = u"== {} ==".format(section_title)
try:
index = self.content.index(section) + len(section)
except ValueError:
return None
try:
next_index = self.content.index("==", index)
except ValueError:
next_index = len(self.content)
return self.content[index:next_index].lstrip("=").strip()
|
python
|
{
"resource": ""
}
|
q26139
|
List.update
|
train
|
def update(self, render, force = False):
"""
Draw GUI that list active session
"""
if not force and not self._needUpdate:
return
self._needUpdate = False
i = 0
drawArea = QtGui.QImage(self._width, self._height, render.getImageFormat())
#fill with background Color
drawArea.fill(self._backgroudColor)
with QtGui.QPainter(drawArea) as qp:
for label in self._labels:
rect = QtCore.QRect(0, i * self._cellHeight, self._width - 2, self._cellHeight)
if i == self._current:
qp.setPen(QtCore.Qt.darkGreen)
qp.drawRoundedRect(rect, 5.0, 5.0)
qp.setPen(QtCore.Qt.white)
qp.setFont(QtGui.QFont('arial', self._fontSize, QtGui.QFont.Bold))
qp.drawText(rect, QtCore.Qt.AlignCenter, label)
i += 1
render.drawImage(drawArea)
|
python
|
{
"resource": ""
}
|
q26140
|
RDPRenderer.drawImage
|
train
|
def drawImage(self, image):
"""
Render of widget
"""
padding = image.width() % 4
for i in range(0, image.height()):
tmp = image.copy(0, i, image.width() + padding, 1)
#in RDP image or bottom top encoded
ptr = tmp.bits()
ptr.setsize(tmp.byteCount())
self._controller.sendUpdate(self._dx, i + self._dy, image.width() + self._dx - 1, i + self._dy, tmp.width(), tmp.height(), self._colorDepth, False, ptr.asstring())
|
python
|
{
"resource": ""
}
|
q26141
|
RFB.expectWithHeader
|
train
|
def expectWithHeader(self, expectedHeaderLen, callbackBody):
"""
2nd level of waiting event
read expectedHeaderLen that contain body size
@param expectedHeaderLen: contains the number of bytes, which body length needs to be encoded
@param callbackBody: next state use when expected date from expectedHeaderLen
are received
"""
self._callbackBody = callbackBody
self.expect(expectedHeaderLen, self.expectedBody)
|
python
|
{
"resource": ""
}
|
q26142
|
RFB.expectedBody
|
train
|
def expectedBody(self, data):
"""
Read header and wait header value to call next state
@param data: Stream that length are to header length (1|2|4 bytes)
set next state to callBack body when length read from header
are received
"""
bodyLen = None
if data.len == 1:
bodyLen = UInt8()
elif data.len == 2:
bodyLen = UInt16Be()
elif data.len == 4:
bodyLen = UInt32Be()
else:
log.error("invalid header length")
return
data.readType(bodyLen)
self.expect(bodyLen.value, self._callbackBody)
|
python
|
{
"resource": ""
}
|
q26143
|
RFB.readProtocolVersion
|
train
|
def readProtocolVersion(self, data):
"""
Read protocol version
@param data: Stream may contain protocol version string (ProtocolVersion)
"""
data.readType(self._version)
if not self._version.value in [ProtocolVersion.RFB003003, ProtocolVersion.RFB003007, ProtocolVersion.RFB003008]:
self._version.value = ProtocolVersion.UNKNOWN
|
python
|
{
"resource": ""
}
|
q26144
|
RFB.recvSecurityList
|
train
|
def recvSecurityList(self, data):
"""
Read security list packet send from server to client
@param data: Stream that contains well formed packet
"""
securityList = []
while data.dataLen() > 0:
securityElement = UInt8()
data.readType(securityElement)
securityList.append(securityElement)
#select high security level
for s in securityList:
if s.value in [SecurityType.NONE, SecurityType.VNC] and s > self._securityLevel:
self._securityLevel = s
break
#send back security level choosen
self.send(self._securityLevel)
if self._securityLevel.value == SecurityType.VNC:
self.expect(16, self.recvVNCChallenge)
else:
self.expect(4, self.recvSecurityResult)
|
python
|
{
"resource": ""
}
|
q26145
|
RFB.recvSecurityResult
|
train
|
def recvSecurityResult(self, data):
"""
Read security result packet
Use by server to inform connection status of client
@param data: Stream that contain well formed packet
"""
result = UInt32Be()
data.readType(result)
if result == UInt32Be(1):
log.info("Authentification failed")
if self._version.value == ProtocolVersion.RFB003008:
self.expectWithHeader(4, self.recvSecurityFailed)
else:
log.debug("Authentification OK")
self.sendClientInit()
|
python
|
{
"resource": ""
}
|
q26146
|
RFB.recvServerInit
|
train
|
def recvServerInit(self, data):
"""
Read server init packet
@param data: Stream that contains well formed packet
"""
data.readType(self._serverInit)
self.expectWithHeader(4, self.recvServerName)
|
python
|
{
"resource": ""
}
|
q26147
|
dt_to_filetime
|
train
|
def dt_to_filetime(dt):
"""Converts a datetime to Microsoft filetime format. If the object is
time zone-naive, it is forced to UTC before conversion.
>>> "%.0f" % dt_to_filetime(datetime(2009, 7, 25, 23, 0))
'128930364000000000'
>>> "%.0f" % dt_to_filetime(datetime(1970, 1, 1, 0, 0, tzinfo=utc))
'116444736000000000'
>>> "%.0f" % dt_to_filetime(datetime(1970, 1, 1, 0, 0))
'116444736000000000'
>>> dt_to_filetime(datetime(2009, 7, 25, 23, 0, 0, 100))
128930364000001000
"""
if (dt.tzinfo is None) or (dt.tzinfo.utcoffset(dt) is None):
dt = dt.replace(tzinfo=utc)
ft = EPOCH_AS_FILETIME + (timegm(dt.timetuple()) * HUNDREDS_OF_NANOSECONDS)
return ft + (dt.microsecond * 10)
|
python
|
{
"resource": ""
}
|
q26148
|
filetime_to_dt
|
train
|
def filetime_to_dt(ft):
"""Converts a Microsoft filetime number to a Python datetime. The new
datetime object is time zone-naive but is equivalent to tzinfo=utc.
>>> filetime_to_dt(116444736000000000)
datetime.datetime(1970, 1, 1, 0, 0)
>>> filetime_to_dt(128930364000000000)
datetime.datetime(2009, 7, 25, 23, 0)
>>> filetime_to_dt(128930364000001000)
datetime.datetime(2009, 7, 25, 23, 0, 0, 100)
"""
# Get seconds and remainder in terms of Unix epoch
(s, ns100) = divmod(ft - EPOCH_AS_FILETIME, HUNDREDS_OF_NANOSECONDS)
# Convert to datetime object
dt = datetime.utcfromtimestamp(s)
# Add remainder in as microseconds. Python 3.2 requires an integer
dt = dt.replace(microsecond=(ns100 // 10))
return dt
|
python
|
{
"resource": ""
}
|
q26149
|
des.__des_crypt
|
train
|
def __des_crypt(self, block, crypt_type):
"""Crypt the block of data through DES bit-manipulation"""
block = self.__permutate(des.__ip, block)
self.L = block[:32]
self.R = block[32:]
# Encryption starts from Kn[1] through to Kn[16]
if crypt_type == des.ENCRYPT:
iteration = 0
iteration_adjustment = 1
# Decryption starts from Kn[16] down to Kn[1]
else:
iteration = 15
iteration_adjustment = -1
i = 0
while i < 16:
# Make a copy of R[i-1], this will later become L[i]
tempR = self.R[:]
# Permutate R[i - 1] to start creating R[i]
self.R = self.__permutate(des.__expansion_table, self.R)
# Exclusive or R[i - 1] with K[i], create B[1] to B[8] whilst here
self.R = list(map(lambda x, y: x ^ y, self.R, self.Kn[iteration]))
B = [self.R[:6], self.R[6:12], self.R[12:18], self.R[18:24], self.R[24:30], self.R[30:36], self.R[36:42], self.R[42:]]
# Optimization: Replaced below commented code with above
#j = 0
#B = []
#while j < len(self.R):
# self.R[j] = self.R[j] ^ self.Kn[iteration][j]
# j += 1
# if j % 6 == 0:
# B.append(self.R[j-6:j])
# Permutate B[1] to B[8] using the S-Boxes
j = 0
Bn = [0] * 32
pos = 0
while j < 8:
# Work out the offsets
m = (B[j][0] << 1) + B[j][5]
n = (B[j][1] << 3) + (B[j][2] << 2) + (B[j][3] << 1) + B[j][4]
# Find the permutation value
v = des.__sbox[j][(m << 4) + n]
# Turn value into bits, add it to result: Bn
Bn[pos] = (v & 8) >> 3
Bn[pos + 1] = (v & 4) >> 2
Bn[pos + 2] = (v & 2) >> 1
Bn[pos + 3] = v & 1
pos += 4
j += 1
# Permutate the concatination of B[1] to B[8] (Bn)
self.R = self.__permutate(des.__p, Bn)
# Xor with L[i - 1]
self.R = list(map(lambda x, y: x ^ y, self.R, self.L))
# Optimization: This now replaces the below commented code
#j = 0
#while j < len(self.R):
# self.R[j] = self.R[j] ^ self.L[j]
# j += 1
# L[i] becomes R[i - 1]
self.L = tempR
i += 1
iteration += iteration_adjustment
# Final permutation of R[16]L[16]
self.final = self.__permutate(des.__fp, self.R + self.L)
return self.final
|
python
|
{
"resource": ""
}
|
q26150
|
check_arg_compatibility
|
train
|
def check_arg_compatibility(args: argparse.Namespace):
"""
Check if some arguments are incompatible with each other.
:param args: Arguments as returned by argparse.
"""
if args.lhuc is not None:
# Actually this check is a bit too strict
check_condition(args.encoder != C.CONVOLUTION_TYPE or args.decoder != C.CONVOLUTION_TYPE,
"LHUC is not supported for convolutional models yet.")
check_condition(args.decoder != C.TRANSFORMER_TYPE or C.LHUC_STATE_INIT not in args.lhuc,
"The %s options only applies to RNN models" % C.LHUC_STATE_INIT)
if args.decoder_only:
check_condition(args.decoder != C.TRANSFORMER_TYPE and args.decoder != C.CONVOLUTION_TYPE,
"Decoder pre-training currently supports RNN decoders only.")
|
python
|
{
"resource": ""
}
|
q26151
|
check_resume
|
train
|
def check_resume(args: argparse.Namespace, output_folder: str) -> bool:
"""
Check if we should resume a broken training run.
:param args: Arguments as returned by argparse.
:param output_folder: Main output folder for the model.
:return: Flag signaling if we are resuming training and the directory with
the training status.
"""
resume_training = False
training_state_dir = os.path.join(output_folder, C.TRAINING_STATE_DIRNAME)
if os.path.exists(output_folder):
if args.overwrite_output:
logger.info("Removing existing output folder %s.", output_folder)
shutil.rmtree(output_folder)
os.makedirs(output_folder)
elif os.path.exists(training_state_dir):
old_args = vars(arguments.load_args(os.path.join(output_folder, C.ARGS_STATE_NAME)))
arg_diffs = _dict_difference(vars(args), old_args) | _dict_difference(old_args, vars(args))
# Remove args that may differ without affecting the training.
arg_diffs -= set(C.ARGS_MAY_DIFFER)
# allow different device-ids provided their total count is the same
if 'device_ids' in arg_diffs and len(old_args['device_ids']) == len(vars(args)['device_ids']):
arg_diffs.discard('device_ids')
if not arg_diffs:
resume_training = True
else:
# We do not have the logger yet
logger.error("Mismatch in arguments for training continuation.")
logger.error("Differing arguments: %s.", ", ".join(arg_diffs))
sys.exit(1)
elif os.path.exists(os.path.join(output_folder, C.PARAMS_BEST_NAME)):
logger.error("Refusing to overwrite model folder %s as it seems to contain a trained model.", output_folder)
sys.exit(1)
else:
logger.info("The output folder %s already exists, but no training state or parameter file was found. "
"Will start training from scratch.", output_folder)
else:
os.makedirs(output_folder)
return resume_training
|
python
|
{
"resource": ""
}
|
q26152
|
use_shared_vocab
|
train
|
def use_shared_vocab(args: argparse.Namespace) -> bool:
"""
True if arguments entail a shared source and target vocabulary.
:param: args: Arguments as returned by argparse.
"""
weight_tying = args.weight_tying
weight_tying_type = args.weight_tying_type
shared_vocab = args.shared_vocab
decoder_only = args.decoder_only
if weight_tying and C.WEIGHT_TYING_SRC in weight_tying_type and C.WEIGHT_TYING_TRG in weight_tying_type:
if not shared_vocab:
logger.info("A shared source/target vocabulary will be used as weight tying source/target weight tying "
"is enabled")
shared_vocab = True
if decoder_only:
if not shared_vocab:
logger.info("A shared source/target vocabulary will be used for pre-training the decoder.")
shared_vocab = True
return shared_vocab
|
python
|
{
"resource": ""
}
|
q26153
|
check_encoder_decoder_args
|
train
|
def check_encoder_decoder_args(args) -> None:
"""
Check possible encoder-decoder argument conflicts.
:param args: Arguments as returned by argparse.
"""
encoder_embed_dropout, decoder_embed_dropout = args.embed_dropout
encoder_rnn_dropout_inputs, decoder_rnn_dropout_inputs = args.rnn_dropout_inputs
encoder_rnn_dropout_states, decoder_rnn_dropout_states = args.rnn_dropout_states
if encoder_embed_dropout > 0 and encoder_rnn_dropout_inputs > 0:
logger.warning("Setting encoder RNN AND source embedding dropout > 0 leads to "
"two dropout layers on top of each other.")
if decoder_embed_dropout > 0 and decoder_rnn_dropout_inputs > 0:
logger.warning("Setting encoder RNN AND source embedding dropout > 0 leads to "
"two dropout layers on top of each other.")
encoder_rnn_dropout_recurrent, decoder_rnn_dropout_recurrent = args.rnn_dropout_recurrent
if encoder_rnn_dropout_recurrent > 0 or decoder_rnn_dropout_recurrent > 0:
check_condition(args.rnn_cell_type == C.LSTM_TYPE,
"Recurrent dropout without memory loss only supported for LSTMs right now.")
|
python
|
{
"resource": ""
}
|
q26154
|
create_training_model
|
train
|
def create_training_model(config: model.ModelConfig,
context: List[mx.Context],
output_dir: str,
train_iter: data_io.BaseParallelSampleIter,
args: argparse.Namespace) -> training.TrainingModel:
"""
Create a training model and load the parameters from disk if needed.
:param config: The configuration for the model.
:param context: The context(s) to run on.
:param output_dir: Output folder.
:param train_iter: The training data iterator.
:param args: Arguments as returned by argparse.
:return: The training model.
"""
training_model = training.TrainingModel(config=config,
context=context,
output_dir=output_dir,
provide_data=train_iter.provide_data,
provide_label=train_iter.provide_label,
default_bucket_key=train_iter.default_bucket_key,
bucketing=not args.no_bucketing,
gradient_compression_params=gradient_compression_params(args),
gradient_accumulation=args.update_interval > 1,
fixed_param_names=args.fixed_param_names,
fixed_param_strategy=args.fixed_param_strategy)
return training_model
|
python
|
{
"resource": ""
}
|
q26155
|
create_optimizer_config
|
train
|
def create_optimizer_config(args: argparse.Namespace, source_vocab_sizes: List[int],
extra_initializers: List[Tuple[str, mx.initializer.Initializer]] = None) -> OptimizerConfig:
"""
Returns an OptimizerConfig.
:param args: Arguments as returned by argparse.
:param source_vocab_sizes: Source vocabulary sizes.
:param extra_initializers: extra initializer to pass to `get_initializer`.
:return: The optimizer type and its parameters as well as the kvstore.
"""
optimizer_params = {'wd': args.weight_decay,
"learning_rate": args.initial_learning_rate}
gradient_clipping_threshold = none_if_negative(args.gradient_clipping_threshold)
if gradient_clipping_threshold is None:
logger.info("Gradient clipping threshold set to negative value. Will not perform gradient clipping.")
gradient_clipping_type = C.GRADIENT_CLIPPING_TYPE_NONE
else:
gradient_clipping_type = args.gradient_clipping_type
effective_batch_size = args.batch_size * args.update_interval
# Note: for 'abs' we use the implementation inside of MXNet's optimizer and 'norm_*' we implement ourselves
# inside the TrainingModel.
if gradient_clipping_threshold is not None and gradient_clipping_type == C.GRADIENT_CLIPPING_TYPE_ABS:
optimizer_params["clip_gradient"] = gradient_clipping_threshold
if args.momentum is not None:
optimizer_params["momentum"] = args.momentum
if args.loss_normalization_type == C.LOSS_NORM_VALID:
# When we normalize by the number of non-PAD symbols in a batch we need to disable rescale_grad.
optimizer_params["rescale_grad"] = 1.0 / args.update_interval
elif args.loss_normalization_type == C.LOSS_NORM_BATCH:
# Making MXNet module API's default scaling factor explicit
optimizer_params["rescale_grad"] = 1.0 / effective_batch_size
# Manually specified params
if args.optimizer_params:
optimizer_params.update(args.optimizer_params)
weight_init = initializer.get_initializer(default_init_type=args.weight_init,
default_init_scale=args.weight_init_scale,
default_init_xavier_rand_type=args.weight_init_xavier_rand_type,
default_init_xavier_factor_type=args.weight_init_xavier_factor_type,
embed_init_type=args.embed_weight_init,
embed_init_sigma=source_vocab_sizes[0] ** -0.5,
rnn_init_type=args.rnn_h2h_init,
extra_initializers=extra_initializers)
lr_sched = lr_scheduler.get_lr_scheduler(args.learning_rate_scheduler_type,
args.checkpoint_interval,
none_if_negative(args.learning_rate_half_life),
args.learning_rate_reduce_factor,
args.learning_rate_reduce_num_not_improved,
args.learning_rate_schedule,
args.learning_rate_warmup)
config = OptimizerConfig(name=args.optimizer,
params=optimizer_params,
kvstore=args.kvstore,
initializer=weight_init,
gradient_clipping_type=gradient_clipping_type,
gradient_clipping_threshold=gradient_clipping_threshold,
update_interval=args.update_interval)
config.set_lr_scheduler(lr_sched)
logger.info("Optimizer: %s", config)
logger.info("Gradient Compression: %s", gradient_compression_params(args))
if args.update_interval > 1:
logger.info("Gradient accumulation over %d batches. Effective batch size: %d",
args.update_interval, effective_batch_size)
return config
|
python
|
{
"resource": ""
}
|
q26156
|
Config.freeze
|
train
|
def freeze(self):
"""
Freezes this Config object, disallowing modification or addition of any parameters.
"""
if getattr(self, '_frozen'):
return
object.__setattr__(self, "_frozen", True)
for k, v in self.__dict__.items():
if isinstance(v, Config) and k != "self":
v.freeze()
|
python
|
{
"resource": ""
}
|
q26157
|
Config.__del_frozen
|
train
|
def __del_frozen(self):
"""
Removes _frozen attribute from this instance and all its child configurations.
"""
self.__delattr__('_frozen')
for attr, val in self.__dict__.items():
if isinstance(val, Config) and hasattr(val, '_frozen'):
val.__del_frozen()
|
python
|
{
"resource": ""
}
|
q26158
|
Config.__add_frozen
|
train
|
def __add_frozen(self):
"""
Adds _frozen attribute to this instance and all its child configurations.
"""
setattr(self, "_frozen", False)
for attr, val in self.__dict__.items():
if isinstance(val, Config):
val.__add_frozen()
|
python
|
{
"resource": ""
}
|
q26159
|
Config.load
|
train
|
def load(fname: str) -> 'Config':
"""
Returns a Config object loaded from a file. The loaded object is not frozen.
:param fname: Name of file to load the Config from.
:return: Configuration.
"""
with open(fname) as inp:
obj = yaml.load(inp)
obj.__add_frozen()
return obj
|
python
|
{
"resource": ""
}
|
q26160
|
nearest_k
|
train
|
def nearest_k(similarity_matrix: mx.nd.NDArray,
query_word_id: int,
k: int,
gamma: float = 1.0) -> Iterable[Tuple[int, float]]:
"""
Returns values and indices of k items with largest similarity.
:param similarity_matrix: Similarity matrix.
:param query_word_id: Query word id.
:param k: Number of closest items to retrieve.
:param gamma: Parameter to control distribution steepness.
:return: List of indices and values of k nearest elements.
"""
# pylint: disable=unbalanced-tuple-unpacking
values, indices = mx.nd.topk(mx.nd.softmax(similarity_matrix[query_word_id] / gamma), k=k, ret_typ='both')
return zip(indices.asnumpy(), values.asnumpy())
|
python
|
{
"resource": ""
}
|
q26161
|
main
|
train
|
def main():
"""
Command-line tool to inspect model embeddings.
"""
setup_main_logger(file_logging=False)
params = argparse.ArgumentParser(description='Shows nearest neighbours of input tokens in the embedding space.')
params.add_argument('--model', '-m', required=True,
help='Model folder to load config from.')
params.add_argument('--checkpoint', '-c', required=False, type=int, default=None,
help='Optional specific checkpoint to load parameters from. Best params otherwise.')
params.add_argument('--side', '-s', required=True, choices=['source', 'target'], help='what embeddings to look at')
params.add_argument('--norm', '-n', action='store_true', help='normalize embeddings to unit length')
params.add_argument('-k', type=int, default=5, help='Number of neighbours to print')
params.add_argument('--gamma', '-g', type=float, default=1.0, help='Softmax distribution steepness.')
args = params.parse_args()
embeddings(args)
|
python
|
{
"resource": ""
}
|
q26162
|
_get_word_ngrams
|
train
|
def _get_word_ngrams(n, sentences):
"""Calculates word n-grams for multiple sentences.
"""
assert len(sentences) > 0
assert n > 0
words = _split_into_words(sentences)
return _get_ngrams(n, words)
|
python
|
{
"resource": ""
}
|
q26163
|
rouge
|
train
|
def rouge(hypotheses, references):
"""Calculates average rouge scores for a list of hypotheses and
references"""
# Filter out hyps that are of 0 length
# hyps_and_refs = zip(hypotheses, references)
# hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]
# hypotheses, references = zip(*hyps_and_refs)
# Calculate ROUGE-1 F1, precision, recall scores
rouge_1 = [
rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references)
]
rouge_1_f, rouge_1_p, rouge_1_r = map(np.mean, zip(*rouge_1))
# Calculate ROUGE-2 F1, precision, recall scores
rouge_2 = [
rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references)
]
rouge_2_f, rouge_2_p, rouge_2_r = map(np.mean, zip(*rouge_2))
# Calculate ROUGE-L F1, precision, recall scores
rouge_l = [
rouge_l_sentence_level([hyp], [ref])
for hyp, ref in zip(hypotheses, references)
]
rouge_l_f, rouge_l_p, rouge_l_r = map(np.mean, zip(*rouge_l))
return {
"rouge_1/f_score": rouge_1_f,
"rouge_1/r_score": rouge_1_r,
"rouge_1/p_score": rouge_1_p,
"rouge_2/f_score": rouge_2_f,
"rouge_2/r_score": rouge_2_r,
"rouge_2/p_score": rouge_2_p,
"rouge_l/f_score": rouge_l_f,
"rouge_l/r_score": rouge_l_r,
"rouge_l/p_score": rouge_l_p,
}
|
python
|
{
"resource": ""
}
|
q26164
|
rouge_1
|
train
|
def rouge_1(hypotheses, references):
"""
Calculate ROUGE-1 F1, precision, recall scores
"""
rouge_1 = [
rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references)
]
rouge_1_f, _, _ = map(np.mean, zip(*rouge_1))
return rouge_1_f
|
python
|
{
"resource": ""
}
|
q26165
|
rouge_2
|
train
|
def rouge_2(hypotheses, references):
"""
Calculate ROUGE-2 F1, precision, recall scores
"""
rouge_2 = [
rouge_n([hyp], [ref], 2) for hyp, ref in zip(hypotheses, references)
]
rouge_2_f, _, _ = map(np.mean, zip(*rouge_2))
return rouge_2_f
|
python
|
{
"resource": ""
}
|
q26166
|
rouge_l
|
train
|
def rouge_l(hypotheses, references):
"""
Calculate ROUGE-L F1, precision, recall scores
"""
rouge_l = [
rouge_l_sentence_level([hyp], [ref])
for hyp, ref in zip(hypotheses, references)
]
rouge_l_f, _, _ = map(np.mean, zip(*rouge_l))
return rouge_l_f
|
python
|
{
"resource": ""
}
|
q26167
|
RawListTextDatasetLoader.load
|
train
|
def load(self,
source_list: Iterable[List[str]],
target_sentences: Iterable[List[Any]],
num_samples_per_bucket: List[int]) -> 'ParallelDataSet':
"""
Creates a parallel dataset base on source list of strings and target sentences.
Returns a `sockeye.data_io.ParallelDataSet`.
:param source_list: Source list of strings (e.g., filenames).
:param target_sentences: Target sentences used to do bucketing.
:param num_samples_per_bucket: Number of samples per bucket.
:return: Returns a parallel dataset `sockeye.data_io.ParallelDataSet`.
"""
assert len(num_samples_per_bucket) == len(self.buckets)
data_source = [np.full((num_samples,), self.pad_id, dtype=object)
for num_samples in num_samples_per_bucket]
# data_source is a List[numpy.array[str]] which semantic is bucket, index, str
# Its loading to memory is deferred to the iterator, since the full data
# is supposed to not fit in memory.
data_target = [np.full((num_samples, target_len), self.pad_id, dtype=self.dtype)
for (source_len, target_len), num_samples in zip(self.buckets, num_samples_per_bucket)]
data_label = [np.full((num_samples, target_len), self.pad_id, dtype=self.dtype)
for (source_len, target_len), num_samples in zip(self.buckets, num_samples_per_bucket)]
bucket_sample_index = [0 for buck in self.buckets]
# track amount of padding introduced through bucketing
num_tokens_target = 0
num_pad_target = 0
# Bucket sentences as padded np arrays
for source, target in zip(source_list, target_sentences):
target_len = len(target)
buck_index, buck = get_target_bucket(self.buckets, target_len)
if buck is None:
continue # skip this sentence pair
num_tokens_target += buck[1]
num_pad_target += buck[1] - target_len
sample_index = bucket_sample_index[buck_index]
data_source[buck_index][sample_index] = source
data_target[buck_index][sample_index, :target_len] = target
# NOTE(fhieber): while this is wasteful w.r.t memory, we need to explicitly create the label sequence
# with the EOS symbol here sentence-wise and not per-batch due to variable sequence length within a batch.
# Once MXNet allows item assignments given a list of indices (probably MXNet 1.0): e.g a[[0,1,5,2]] = x,
# we can try again to compute the label sequence on the fly in next().
data_label[buck_index][sample_index, :target_len] = target[1:] + [self.eos_id]
bucket_sample_index[buck_index] += 1
for i in range(len(data_source)):
data_target[i] = mx.nd.array(data_target[i], dtype=self.dtype)
data_label[i] = mx.nd.array(data_label[i], dtype=self.dtype)
if num_tokens_target > 0:
logger.info("Created bucketed parallel data set. Introduced padding: target=%.1f%%)",
num_pad_target / num_tokens_target * 100)
return ParallelDataSet(data_source, data_target, data_label)
|
python
|
{
"resource": ""
}
|
q26168
|
get_bank_sizes
|
train
|
def get_bank_sizes(num_constraints: int,
beam_size: int,
candidate_counts: List[int]) -> List[int]:
"""
Evenly distributes the beam across the banks, where each bank is a portion of the beam devoted
to hypotheses having met the same number of constraints, 0..num_constraints.
After the assignment, banks with more slots than candidates are adjusted.
:param num_constraints: The number of constraints.
:param beam_size: The beam size.
:param candidate_counts: The empirical counts of number of candidates in each bank.
:return: A distribution over banks.
"""
num_banks = num_constraints + 1
bank_size = beam_size // num_banks
remainder = beam_size - bank_size * num_banks
# Distribute any remainder to the end
assigned = [bank_size for x in range(num_banks)]
assigned[-1] += remainder
# Now, moving right to left, push extra allocation to earlier buckets.
# This encodes a bias for higher buckets, but if no candidates are found, space
# will be made in lower buckets. This may not be the best strategy, but it is important
# that you start pushing from the bucket that is assigned the remainder, for cases where
# num_constraints >= beam_size.
for i in reversed(range(num_banks)):
overfill = assigned[i] - candidate_counts[i]
if overfill > 0:
assigned[i] -= overfill
assigned[(i - 1) % num_banks] += overfill
return assigned
|
python
|
{
"resource": ""
}
|
q26169
|
AvoidTrie.add_phrase
|
train
|
def add_phrase(self,
phrase: List[int]) -> None:
"""
Recursively adds a phrase to this trie node.
:param phrase: A list of word IDs to add to this trie node.
"""
if len(phrase) == 1:
self.final_ids.add(phrase[0])
else:
next_word = phrase[0]
if next_word not in self.children:
self.children[next_word] = AvoidTrie()
self.step(next_word).add_phrase(phrase[1:])
|
python
|
{
"resource": ""
}
|
q26170
|
AvoidState.consume
|
train
|
def consume(self, word_id: int) -> 'AvoidState':
"""
Consumes a word, and updates the state based on it. Returns new objects on a state change.
The next state for a word can be tricky. Here are the cases:
(1) If the word is found in our set of outgoing child arcs, we take that transition.
(2) If the word is not found, and we are not in the root state, we need to reset.
This means we pretend we were in the root state, and see if we can take a step
(3) Otherwise, if we are not already in the root state (i.e., we were partially through
the trie), we need to create a new object whose state is the root state
(4) Finally, if we couldn't advance and were already in the root state, we can reuse
this object.
:param word_id: The word that was just generated.
"""
if word_id in self.state.children:
return AvoidState(self.root, self.state.step(word_id))
elif word_id in self.root.children:
return AvoidState(self.root, self.root.step(word_id))
elif self.state != self.root:
return AvoidState(self.root, self.root)
else:
return self
|
python
|
{
"resource": ""
}
|
q26171
|
AvoidState.avoid
|
train
|
def avoid(self) -> Set[int]:
"""
Returns a set of word IDs that should be avoided. This includes the set of final states from the
root node, which are single tokens that must never be generated.
:return: A set of integers representing words that must not be generated next by this hypothesis.
"""
return self.root.final() | self.state.final()
|
python
|
{
"resource": ""
}
|
q26172
|
AvoidBatch.consume
|
train
|
def consume(self, word_ids: mx.nd.NDArray) -> None:
"""
Consumes a word for each trie, updating respective states.
:param word_ids: The set of word IDs.
"""
word_ids = word_ids.asnumpy().tolist()
for i, word_id in enumerate(word_ids):
if self.global_avoid_states:
self.global_avoid_states[i] = self.global_avoid_states[i].consume(word_id)
if self.local_avoid_states:
self.local_avoid_states[i] = self.local_avoid_states[i].consume(word_id)
|
python
|
{
"resource": ""
}
|
q26173
|
ConstrainedHypothesis.allowed
|
train
|
def allowed(self) -> Set[int]:
"""
Returns the set of constrained words that could follow this one.
For unfinished phrasal constraints, it is the next word in the phrase.
In other cases, it is the list of all unmet constraints.
If all constraints are met, an empty set is returned.
:return: The ID of the next required word, or -1 if any word can follow
"""
items = set() # type: Set[int]
# Add extensions of a started-but-incomplete sequential constraint
if self.last_met != -1 and self.is_sequence[self.last_met] == 1:
word_id = self.constraints[self.last_met + 1]
if word_id != self.eos_id or self.num_needed() == 1:
items.add(word_id)
# Add all constraints that aren't non-initial sequences
else:
for i, word_id in enumerate(self.constraints):
if not self.met[i] and (i == 0 or not self.is_sequence[i - 1]):
if word_id != self.eos_id or self.num_needed() == 1:
items.add(word_id)
return items
|
python
|
{
"resource": ""
}
|
q26174
|
SockeyeModel.load_config
|
train
|
def load_config(fname: str) -> ModelConfig:
"""
Loads model configuration.
:param fname: Path to load model configuration from.
:return: Model configuration.
"""
config = ModelConfig.load(fname)
logger.info('ModelConfig loaded from "%s"', fname)
return cast(ModelConfig, config)
|
python
|
{
"resource": ""
}
|
q26175
|
SockeyeModel.load_params_from_file
|
train
|
def load_params_from_file(self, fname: str):
"""
Loads and sets model parameters from file.
:param fname: Path to load parameters from.
"""
utils.check_condition(os.path.exists(fname), "No model parameter file found under %s. "
"This is either not a model directory or the first training "
"checkpoint has not happened yet." % fname)
self.params, self.aux_params = utils.load_params(fname)
utils.check_condition(all(name.startswith(self.prefix) for name in self.params.keys()),
"Not all parameter names start with model prefix '%s'" % self.prefix)
utils.check_condition(all(name.startswith(self.prefix) for name in self.aux_params.keys()),
"Not all auxiliary parameter names start with model prefix '%s'" % self.prefix)
logger.info('Loaded params from "%s"', fname)
|
python
|
{
"resource": ""
}
|
q26176
|
SockeyeModel._get_embed_weights
|
train
|
def _get_embed_weights(self, prefix: str) -> Tuple[mx.sym.Symbol, mx.sym.Symbol, mx.sym.Symbol]:
"""
Returns embedding parameters for source and target.
When source and target embeddings are shared, they are created here and passed in to each side,
instead of being created in the Embedding constructors.
:param prefix: Prefix.
:return: Tuple of source and target parameter symbols.
"""
w_embed_source = mx.sym.Variable(prefix + C.SOURCE_EMBEDDING_PREFIX + "weight",
shape=(self.config.config_embed_source.vocab_size,
self.config.config_embed_source.num_embed))
w_embed_target = mx.sym.Variable(prefix + C.TARGET_EMBEDDING_PREFIX + "weight",
shape=(self.config.config_embed_target.vocab_size,
self.config.config_embed_target.num_embed))
w_out_target = mx.sym.Variable(prefix + "target_output_weight", dtype='float32',
shape=(self.config.vocab_target_size, self.decoder.get_num_hidden()))
if self.config.weight_tying:
if C.WEIGHT_TYING_SRC in self.config.weight_tying_type \
and C.WEIGHT_TYING_TRG in self.config.weight_tying_type:
logger.info("Tying the source and target embeddings.")
w_embed_source = w_embed_target = mx.sym.Variable(prefix + C.SHARED_EMBEDDING_PREFIX + "weight",
shape=(self.config.config_embed_source.vocab_size,
self.config.config_embed_source.num_embed))
if C.WEIGHT_TYING_SOFTMAX in self.config.weight_tying_type:
logger.info("Tying the target embeddings and output layer parameters.")
utils.check_condition(self.config.config_embed_target.num_embed == self.decoder.get_num_hidden(),
"Weight tying requires target embedding size and decoder hidden size " +
"to be equal: %d vs. %d" % (self.config.config_embed_target.num_embed,
self.decoder.get_num_hidden()))
w_out_target = w_embed_target
self._embed_weight_source_name = None
if w_embed_source is not None:
self._embed_weight_source_name = w_embed_source.name
self._embed_weight_target_name = w_embed_target.name
self._out_weight_target_name = w_out_target.name
return w_embed_source, w_embed_target, w_out_target
|
python
|
{
"resource": ""
}
|
q26177
|
models_max_input_output_length
|
train
|
def models_max_input_output_length(models: List[InferenceModel],
num_stds: int,
forced_max_input_len: Optional[int] = None,
forced_max_output_len: Optional[int] = None) -> Tuple[int, Callable]:
"""
Returns a function to compute maximum output length given a fixed number of standard deviations as a
safety margin, and the current input length.
Mean and std are taken from the model with the largest values to allow proper ensembling of models
trained on different data sets.
:param models: List of models.
:param num_stds: Number of standard deviations to add as a safety margin. If -1, returned maximum output lengths
will always be 2 * input_length.
:param forced_max_input_len: An optional overwrite of the maximum input length.
:param forced_max_output_len: An optional overwrite of the maximum output length.
:return: The maximum input length and a function to get the output length given the input length.
"""
max_mean = max(model.length_ratio_mean for model in models)
max_std = max(model.length_ratio_std for model in models)
supported_max_seq_len_source = min((model.max_supported_seq_len_source for model in models
if model.max_supported_seq_len_source is not None),
default=None)
supported_max_seq_len_target = min((model.max_supported_seq_len_target for model in models
if model.max_supported_seq_len_target is not None),
default=None)
training_max_seq_len_source = min(model.training_max_seq_len_source for model in models)
return get_max_input_output_length(supported_max_seq_len_source,
supported_max_seq_len_target,
training_max_seq_len_source,
length_ratio_mean=max_mean,
length_ratio_std=max_std,
num_stds=num_stds,
forced_max_input_len=forced_max_input_len,
forced_max_output_len=forced_max_output_len)
|
python
|
{
"resource": ""
}
|
q26178
|
get_max_input_output_length
|
train
|
def get_max_input_output_length(supported_max_seq_len_source: Optional[int],
supported_max_seq_len_target: Optional[int],
training_max_seq_len_source: Optional[int],
length_ratio_mean: float,
length_ratio_std: float,
num_stds: int,
forced_max_input_len: Optional[int] = None,
forced_max_output_len: Optional[int] = None) -> Tuple[int, Callable]:
"""
Returns a function to compute maximum output length given a fixed number of standard deviations as a
safety margin, and the current input length. It takes into account optional maximum source and target lengths.
:param supported_max_seq_len_source: The maximum source length supported by the models.
:param supported_max_seq_len_target: The maximum target length supported by the models.
:param training_max_seq_len_source: The maximum source length observed during training.
:param length_ratio_mean: The mean of the length ratio that was calculated on the raw sequences with special
symbols such as EOS or BOS.
:param length_ratio_std: The standard deviation of the length ratio.
:param num_stds: The number of standard deviations the target length may exceed the mean target length (as long as
the supported maximum length allows for this).
:param forced_max_input_len: An optional overwrite of the maximum input length.
:param forced_max_output_len: An optional overwrite of the maximum out length.
:return: The maximum input length and a function to get the output length given the input length.
"""
space_for_bos = 1
space_for_eos = 1
if num_stds < 0:
factor = C.TARGET_MAX_LENGTH_FACTOR # type: float
else:
factor = length_ratio_mean + (length_ratio_std * num_stds)
if forced_max_input_len is None:
# Make sure that if there is a hard constraint on the maximum source or target length we never exceed this
# constraint. This is for example the case for learned positional embeddings, which are only defined for the
# maximum source and target sequence length observed during training.
if supported_max_seq_len_source is not None and supported_max_seq_len_target is None:
max_input_len = supported_max_seq_len_source
elif supported_max_seq_len_source is None and supported_max_seq_len_target is not None:
max_output_len = supported_max_seq_len_target - space_for_bos - space_for_eos
if np.ceil(factor * training_max_seq_len_source) > max_output_len:
max_input_len = int(np.floor(max_output_len / factor))
else:
max_input_len = training_max_seq_len_source
elif supported_max_seq_len_source is not None or supported_max_seq_len_target is not None:
max_output_len = supported_max_seq_len_target - space_for_bos - space_for_eos
if np.ceil(factor * supported_max_seq_len_source) > max_output_len:
max_input_len = int(np.floor(max_output_len / factor))
else:
max_input_len = supported_max_seq_len_source
else:
# Any source/target length is supported and max_input_len was not manually set, therefore we use the
# maximum length from training.
max_input_len = training_max_seq_len_source
else:
max_input_len = forced_max_input_len
def get_max_output_length(input_length: int):
"""
Returns the maximum output length for inference given the input length.
Explicitly includes space for BOS and EOS sentence symbols in the target sequence, because we assume
that the mean length ratio computed on the training data do not include these special symbols.
(see data_io.analyze_sequence_lengths)
"""
if forced_max_output_len is not None:
return forced_max_output_len
else:
return int(np.ceil(factor * input_length)) + space_for_bos + space_for_eos
return max_input_len, get_max_output_length
|
python
|
{
"resource": ""
}
|
q26179
|
make_input_from_plain_string
|
train
|
def make_input_from_plain_string(sentence_id: SentenceId, string: str) -> TranslatorInput:
"""
Returns a TranslatorInput object from a plain string.
:param sentence_id: Sentence id.
:param string: An input string.
:return: A TranslatorInput.
"""
return TranslatorInput(sentence_id, tokens=list(data_io.get_tokens(string)), factors=None)
|
python
|
{
"resource": ""
}
|
q26180
|
make_input_from_factored_string
|
train
|
def make_input_from_factored_string(sentence_id: SentenceId,
factored_string: str,
translator: 'Translator',
delimiter: str = C.DEFAULT_FACTOR_DELIMITER) -> TranslatorInput:
"""
Returns a TranslatorInput object from a string with factor annotations on a token level, separated by delimiter.
If translator does not require any source factors, the string is parsed as a plain token string.
:param sentence_id: Sentence id.
:param factored_string: An input string with additional factors per token, separated by delimiter.
:param translator: A translator object.
:param delimiter: A factor delimiter. Default: '|'.
:return: A TranslatorInput.
"""
utils.check_condition(bool(delimiter) and not delimiter.isspace(),
"Factor delimiter can not be whitespace or empty.")
model_num_source_factors = translator.num_source_factors
if model_num_source_factors == 1:
return make_input_from_plain_string(sentence_id=sentence_id, string=factored_string)
tokens = [] # type: Tokens
factors = [[] for _ in range(model_num_source_factors - 1)] # type: List[Tokens]
for token_id, token in enumerate(data_io.get_tokens(factored_string)):
pieces = token.split(delimiter)
if not all(pieces) or len(pieces) != model_num_source_factors:
logger.error("Failed to parse %d factors at position %d ('%s') in '%s'" % (model_num_source_factors,
token_id, token,
factored_string.strip()))
return _bad_input(sentence_id, reason=factored_string)
tokens.append(pieces[0])
for i, factor in enumerate(factors):
factors[i].append(pieces[i + 1])
return TranslatorInput(sentence_id=sentence_id, tokens=tokens, factors=factors)
|
python
|
{
"resource": ""
}
|
q26181
|
make_input_from_multiple_strings
|
train
|
def make_input_from_multiple_strings(sentence_id: SentenceId, strings: List[str]) -> TranslatorInput:
"""
Returns a TranslatorInput object from multiple strings, where the first element corresponds to the surface tokens
and the remaining elements to additional factors. All strings must parse into token sequences of the same length.
:param sentence_id: Sentence id.
:param strings: A list of strings representing a factored input sequence.
:return: A TranslatorInput.
"""
if not bool(strings):
return TranslatorInput(sentence_id=sentence_id, tokens=[], factors=None)
tokens = list(data_io.get_tokens(strings[0]))
factors = [list(data_io.get_tokens(factor)) for factor in strings[1:]]
if not all(len(factor) == len(tokens) for factor in factors):
logger.error("Length of string sequences do not match: '%s'", strings)
return _bad_input(sentence_id, reason=str(strings))
return TranslatorInput(sentence_id=sentence_id, tokens=tokens, factors=factors)
|
python
|
{
"resource": ""
}
|
q26182
|
empty_translation
|
train
|
def empty_translation(add_nbest: bool = False) -> Translation:
"""
Return an empty translation.
:param add_nbest: Include (empty) nbest_translations in the translation object.
"""
return Translation(target_ids=[],
attention_matrix=np.asarray([[0]]),
score=-np.inf,
nbest_translations=NBestTranslations([], [], []) if add_nbest else None)
|
python
|
{
"resource": ""
}
|
q26183
|
_concat_nbest_translations
|
train
|
def _concat_nbest_translations(translations: List[Translation], stop_ids: Set[int],
length_penalty: LengthPenalty,
brevity_penalty: Optional[BrevityPenalty] = None) -> Translation:
"""
Combines nbest translations through concatenation.
:param translations: A list of translations (sequence starting with BOS symbol,
attention_matrix), score and length.
:param stop_ids: The EOS symbols.
:param length_penalty: LengthPenalty.
:param brevity_penalty: Optional BrevityPenalty.
:return: A concatenation of the translations with a score.
"""
expanded_translations = (_expand_nbest_translation(translation) for translation in translations)
concatenated_translations = [] # type: List[Translation]
for translations_to_concat in zip(*expanded_translations):
concatenated_translations.append(_concat_translations(translations=list(translations_to_concat),
stop_ids=stop_ids,
length_penalty=length_penalty,
brevity_penalty=brevity_penalty))
return _reduce_nbest_translations(concatenated_translations)
|
python
|
{
"resource": ""
}
|
q26184
|
_reduce_nbest_translations
|
train
|
def _reduce_nbest_translations(nbest_translations_list: List[Translation]) -> Translation:
"""
Combines Translation objects that are nbest translations of the same sentence.
:param nbest_translations_list: A list of Translation objects, all of them translations of
the same source sentence.
:return: A single Translation object where nbest lists are collapsed.
"""
best_translation = nbest_translations_list[0]
sequences = [translation.target_ids for translation in nbest_translations_list]
attention_matrices = [translation.attention_matrix for translation in nbest_translations_list]
scores = [translation.score for translation in nbest_translations_list]
nbest_translations = NBestTranslations(sequences, attention_matrices, scores)
return Translation(best_translation.target_ids,
best_translation.attention_matrix,
best_translation.score,
best_translation.beam_histories,
nbest_translations,
best_translation.estimated_reference_length)
|
python
|
{
"resource": ""
}
|
q26185
|
_expand_nbest_translation
|
train
|
def _expand_nbest_translation(translation: Translation) -> List[Translation]:
"""
Expand nbest translations in a single Translation object to one Translation
object per nbest translation.
:param translation: A Translation object.
:return: A list of Translation objects.
"""
nbest_list = [] # type = List[Translation]
for target_ids, attention_matrix, score in zip(translation.nbest_translations.target_ids_list,
translation.nbest_translations.attention_matrices,
translation.nbest_translations.scores):
nbest_list.append(Translation(target_ids, attention_matrix, score, translation.beam_histories,
estimated_reference_length=translation.estimated_reference_length))
return nbest_list
|
python
|
{
"resource": ""
}
|
q26186
|
_concat_translations
|
train
|
def _concat_translations(translations: List[Translation],
stop_ids: Set[int],
length_penalty: LengthPenalty,
brevity_penalty: Optional[BrevityPenalty] = None) -> Translation:
"""
Combines translations through concatenation.
:param translations: A list of translations (sequence starting with BOS symbol, attention_matrix), score and length.
:param stop_ids: The EOS symbols.
:param length_penalty: Instance of the LengthPenalty class initialized with alpha and beta.
:param brevity_penalty: Optional Instance of the BrevityPenalty class initialized with a brevity weight.
:return: A concatenation of the translations with a score.
"""
# Concatenation of all target ids without BOS and EOS
target_ids = []
attention_matrices = []
beam_histories = [] # type: List[BeamHistory]
estimated_reference_length = None # type: float
for idx, translation in enumerate(translations):
if idx == len(translations) - 1:
target_ids.extend(translation.target_ids)
attention_matrices.append(translation.attention_matrix)
else:
if translation.target_ids[-1] in stop_ids:
target_ids.extend(translation.target_ids[:-1])
attention_matrices.append(translation.attention_matrix[:-1, :])
else:
target_ids.extend(translation.target_ids)
attention_matrices.append(translation.attention_matrix)
beam_histories.extend(translation.beam_histories)
if translation.estimated_reference_length is not None:
if estimated_reference_length is None:
estimated_reference_length = translation.estimated_reference_length
else:
estimated_reference_length += translation.estimated_reference_length
# Combine attention matrices:
attention_shapes = [attention_matrix.shape for attention_matrix in attention_matrices]
attention_matrix_combined = np.zeros(np.sum(np.asarray(attention_shapes), axis=0))
pos_t, pos_s = 0, 0
for attention_matrix, (len_t, len_s) in zip(attention_matrices, attention_shapes):
attention_matrix_combined[pos_t:pos_t + len_t, pos_s:pos_s + len_s] = attention_matrix
pos_t += len_t
pos_s += len_s
def _brevity_penalty(hypothesis_length, reference_length):
return 0.0 if brevity_penalty is None else brevity_penalty.get(hypothesis_length, reference_length)
# Unnormalize + sum and renormalize the score:
score = sum((translation.score + _brevity_penalty(len(translation.target_ids), translation.estimated_reference_length)) \
* length_penalty.get(len(translation.target_ids))
for translation in translations)
score = score / length_penalty.get(len(target_ids)) - _brevity_penalty(len(target_ids), estimated_reference_length)
return Translation(target_ids, attention_matrix_combined, score, beam_histories,
estimated_reference_length=estimated_reference_length)
|
python
|
{
"resource": ""
}
|
q26187
|
InferenceModel.initialize
|
train
|
def initialize(self, max_batch_size: int, max_input_length: int, get_max_output_length_function: Callable):
"""
Delayed construction of modules to ensure multiple Inference models can agree on computing a common
maximum output length.
:param max_batch_size: Maximum batch size.
:param max_input_length: Maximum input length.
:param get_max_output_length_function: Callable to compute maximum output length.
"""
self.max_batch_size = max_batch_size
self.max_input_length = max_input_length
if self.max_input_length > self.training_max_seq_len_source:
logger.warning("Model was only trained with sentences up to a length of %d, "
"but a max_input_len of %d is used.",
self.training_max_seq_len_source, self.max_input_length)
self.get_max_output_length = get_max_output_length_function
# check the maximum supported length of the encoder & decoder:
if self.max_supported_seq_len_source is not None:
utils.check_condition(self.max_input_length <= self.max_supported_seq_len_source,
"Encoder only supports a maximum length of %d" % self.max_supported_seq_len_source)
if self.max_supported_seq_len_target is not None:
decoder_max_len = self.get_max_output_length(max_input_length)
utils.check_condition(decoder_max_len <= self.max_supported_seq_len_target,
"Decoder only supports a maximum length of %d, but %d was requested. Note that the "
"maximum output length depends on the input length and the source/target length "
"ratio observed during training." % (self.max_supported_seq_len_target,
decoder_max_len))
self.encoder_module, self.encoder_default_bucket_key = self._get_encoder_module()
self.decoder_module, self.decoder_default_bucket_key = self._get_decoder_module()
max_encoder_data_shapes = self._get_encoder_data_shapes(self.encoder_default_bucket_key,
self.max_batch_size)
max_decoder_data_shapes = self._get_decoder_data_shapes(self.decoder_default_bucket_key,
self.max_batch_size * self.beam_size)
self.encoder_module.bind(data_shapes=max_encoder_data_shapes, for_training=False, grad_req="null")
self.decoder_module.bind(data_shapes=max_decoder_data_shapes, for_training=False, grad_req="null")
self.load_params_from_file(self.params_fname)
self.encoder_module.init_params(arg_params=self.params, aux_params=self.aux_params, allow_missing=False)
self.decoder_module.init_params(arg_params=self.params, aux_params=self.aux_params, allow_missing=False)
if self.cache_output_layer_w_b:
if self.output_layer.weight_normalization:
# precompute normalized output layer weight imperatively
assert self.output_layer.weight_norm is not None
weight = self.params[self.output_layer.weight_norm.weight.name].as_in_context(self.context)
scale = self.params[self.output_layer.weight_norm.scale.name].as_in_context(self.context)
self.output_layer_w = self.output_layer.weight_norm(weight, scale)
else:
self.output_layer_w = self.params[self.output_layer.w.name].as_in_context(self.context)
self.output_layer_b = self.params[self.output_layer.b.name].as_in_context(self.context)
|
python
|
{
"resource": ""
}
|
q26188
|
InferenceModel._get_encoder_module
|
train
|
def _get_encoder_module(self) -> Tuple[mx.mod.BucketingModule, int]:
"""
Returns a BucketingModule for the encoder. Given a source sequence, it returns
the initial decoder states of the model.
The bucket key for this module is the length of the source sequence.
:return: Tuple of encoder module and default bucket key.
"""
def sym_gen(source_seq_len: int):
source = mx.sym.Variable(C.SOURCE_NAME)
source_words = source.split(num_outputs=self.num_source_factors, axis=2, squeeze_axis=True)[0]
source_length = utils.compute_lengths(source_words)
# source embedding
(source_embed,
source_embed_length,
source_embed_seq_len) = self.embedding_source.encode(source, source_length, source_seq_len)
# encoder
# source_encoded: (source_encoded_length, batch_size, encoder_depth)
(source_encoded,
source_encoded_length,
source_encoded_seq_len) = self.encoder.encode(source_embed,
source_embed_length,
source_embed_seq_len)
# initial decoder states
decoder_init_states = self.decoder.init_states(source_encoded,
source_encoded_length,
source_encoded_seq_len)
data_names = [C.SOURCE_NAME]
label_names = [] # type: List[str]
# predict length ratios
predicted_length_ratios = [] # type: List[mx.nd.NDArray]
if self.length_ratio is not None:
# predicted_length_ratios: List[(n, 1)]
predicted_length_ratios = [self.length_ratio(source_encoded, source_encoded_length)]
return mx.sym.Group(decoder_init_states + predicted_length_ratios), data_names, label_names
default_bucket_key = self.max_input_length
module = mx.mod.BucketingModule(sym_gen=sym_gen,
default_bucket_key=default_bucket_key,
context=self.context)
return module, default_bucket_key
|
python
|
{
"resource": ""
}
|
q26189
|
InferenceModel._get_decoder_data_shapes
|
train
|
def _get_decoder_data_shapes(self, bucket_key: Tuple[int, int], batch_beam_size: int) -> List[mx.io.DataDesc]:
"""
Returns data shapes of the decoder module.
:param bucket_key: Tuple of (maximum input length, maximum target length).
:param batch_beam_size: Batch size * beam size.
:return: List of data descriptions.
"""
source_max_length, target_max_length = bucket_key
return [mx.io.DataDesc(name=C.TARGET_NAME, shape=(batch_beam_size,),
layout="NT")] + self.decoder.state_shapes(batch_beam_size,
target_max_length,
self.encoder.get_encoded_seq_len(
source_max_length),
self.encoder.get_num_hidden())
|
python
|
{
"resource": ""
}
|
q26190
|
InferenceModel.run_encoder
|
train
|
def run_encoder(self,
source: mx.nd.NDArray,
source_max_length: int) -> Tuple['ModelState', mx.nd.NDArray]:
"""
Runs forward pass of the encoder.
Encodes source given source length and bucket key.
Returns encoder representation of the source, source_length, initial hidden state of decoder RNN,
and initial decoder states tiled to beam size.
:param source: Integer-coded input tokens. Shape (batch_size, source length, num_source_factors).
:param source_max_length: Bucket key.
:return: Initial model state.
"""
batch_size = source.shape[0]
batch = mx.io.DataBatch(data=[source],
label=None,
bucket_key=source_max_length,
provide_data=self._get_encoder_data_shapes(source_max_length, batch_size))
self.encoder_module.forward(data_batch=batch, is_train=False)
decoder_init_states = self.encoder_module.get_outputs()
if self.length_ratio is not None:
estimated_length_ratio = decoder_init_states[-1]
estimated_length_ratio = mx.nd.repeat(estimated_length_ratio, repeats=self.beam_size, axis=0)
decoder_init_states = decoder_init_states[:-1]
else:
estimated_length_ratio = None
decoder_init_states = decoder_init_states
# replicate encoder/init module results beam size times
decoder_init_states = [mx.nd.repeat(s, repeats=self.beam_size, axis=0) for s in decoder_init_states]
return ModelState(decoder_init_states), estimated_length_ratio
|
python
|
{
"resource": ""
}
|
q26191
|
ModelState.sort_state
|
train
|
def sort_state(self, best_hyp_indices: mx.nd.NDArray):
"""
Sorts states according to k-best order from last step in beam search.
"""
self.states = [mx.nd.take(ds, best_hyp_indices) for ds in self.states]
|
python
|
{
"resource": ""
}
|
q26192
|
Translator._log_linear_interpolation
|
train
|
def _log_linear_interpolation(predictions):
"""
Returns averaged and re-normalized log probabilities
"""
log_probs = utils.average_arrays([p.log() for p in predictions])
# pylint: disable=invalid-unary-operand-type
return -log_probs.log_softmax()
|
python
|
{
"resource": ""
}
|
q26193
|
Translator._make_result
|
train
|
def _make_result(self,
trans_input: TranslatorInput,
translation: Translation) -> TranslatorOutput:
"""
Returns a translator result from generated target-side word ids, attention matrices and scores.
Strips stop ids from translation string.
:param trans_input: Translator input.
:param translation: The translation + attention and score.
:return: TranslatorOutput.
"""
target_ids = translation.target_ids
target_tokens = [self.vocab_target_inv[target_id] for target_id in target_ids]
target_string = C.TOKEN_SEPARATOR.join(data_io.ids2tokens(target_ids, self.vocab_target_inv, self.strip_ids))
attention_matrix = translation.attention_matrix
attention_matrix = attention_matrix[:, :len(trans_input.tokens)]
if translation.nbest_translations is None:
return TranslatorOutput(sentence_id=trans_input.sentence_id,
translation=target_string,
tokens=target_tokens,
attention_matrix=attention_matrix,
score=translation.score,
pass_through_dict=trans_input.pass_through_dict,
beam_histories=translation.beam_histories)
else:
nbest_target_ids = translation.nbest_translations.target_ids_list
target_tokens_list = [[self.vocab_target_inv[id] for id in ids] for ids in nbest_target_ids]
target_strings = [C.TOKEN_SEPARATOR.join(
data_io.ids2tokens(target_ids,
self.vocab_target_inv,
self.strip_ids)) for target_ids in nbest_target_ids]
attention_matrices = [matrix[:, :len(trans_input.tokens)] for matrix in
translation.nbest_translations.attention_matrices]
scores = translation.nbest_translations.scores
return TranslatorOutput(sentence_id=trans_input.sentence_id,
translation=target_string,
tokens=target_tokens,
attention_matrix=attention_matrix,
score=translation.score,
pass_through_dict=trans_input.pass_through_dict,
beam_histories=translation.beam_histories,
nbest_translations=target_strings,
nbest_tokens=target_tokens_list,
nbest_attention_matrices=attention_matrices,
nbest_scores=scores)
|
python
|
{
"resource": ""
}
|
q26194
|
Translator._translate_nd
|
train
|
def _translate_nd(self,
source: mx.nd.NDArray,
source_length: int,
restrict_lexicon: Optional[lexicon.TopKLexicon],
raw_constraints: List[Optional[constrained.RawConstraintList]],
raw_avoid_list: List[Optional[constrained.RawConstraintList]],
max_output_lengths: mx.nd.NDArray) -> List[Translation]:
"""
Translates source of source_length, given a bucket_key.
:param source: Source ids. Shape: (batch_size, bucket_key, num_factors).
:param source_length: Bucket key.
:param restrict_lexicon: Lexicon to use for vocabulary restriction.
:param raw_constraints: A list of optional constraint lists.
:return: Sequence of translations.
"""
return self._get_best_from_beam(*self._beam_search(source,
source_length,
restrict_lexicon,
raw_constraints,
raw_avoid_list,
max_output_lengths))
|
python
|
{
"resource": ""
}
|
q26195
|
Translator._encode
|
train
|
def _encode(self, sources: mx.nd.NDArray, source_length: int) -> Tuple[List[ModelState], mx.nd.NDArray]:
"""
Returns a ModelState for each model representing the state of the model after encoding the source.
:param sources: Source ids. Shape: (batch_size, bucket_key, num_factors).
:param source_length: Bucket key.
:return: List of ModelStates and the estimated reference length based on ratios averaged over models.
"""
model_states = []
ratios = []
for model in self.models:
state, ratio = model.run_encoder(sources, source_length)
model_states.append(state)
if ratio is not None:
ratios.append(ratio)
# num_seq takes batch_size and beam_size into account
num_seq = model_states[0].states[0].shape[0]
if self.constant_length_ratio > 0.0:
# override all ratios with the constant value
length_ratios = mx.nd.full(val=self.constant_length_ratio, shape=(num_seq, 1), ctx=self.context)
else:
if len(ratios) > 0: # some model predicted a ratio?
# average the ratios over the models that actually we able to predict them
length_ratios = mx.nd.mean(mx.nd.stack(*ratios, axis=1), axis=1)
else:
length_ratios = mx.nd.zeros((num_seq, 1), ctx=self.context)
encoded_source_length=self.models[0].encoder.get_encoded_seq_len(source_length)
return model_states, length_ratios * encoded_source_length
|
python
|
{
"resource": ""
}
|
q26196
|
Translator._get_best_word_indices_for_kth_hypotheses
|
train
|
def _get_best_word_indices_for_kth_hypotheses(ks: np.ndarray, all_hyp_indices: np.ndarray) -> np.ndarray:
"""
Traverses the matrix of best hypotheses indices collected during beam search in reversed order by
using the kth hypotheses index as a backpointer.
Returns an array containing the indices into the best_word_indices collected during beam search to extract
the kth hypotheses.
:param ks: The kth-best hypotheses to extract. Supports multiple for batch_size > 1. Shape: (batch,).
:param all_hyp_indices: All best hypotheses indices list collected in beam search. Shape: (batch * beam, steps).
:return: Array of indices into the best_word_indices collected in beam search
that extract the kth-best hypothesis. Shape: (batch,).
"""
batch_size = ks.shape[0]
num_steps = all_hyp_indices.shape[1]
result = np.zeros((batch_size, num_steps - 1), dtype=all_hyp_indices.dtype)
# first index into the history of the desired hypotheses.
pointer = all_hyp_indices[ks, -1]
# for each column/step follow the pointer, starting from the penultimate column/step
num_steps = all_hyp_indices.shape[1]
for step in range(num_steps - 2, -1, -1):
result[:, step] = pointer
pointer = all_hyp_indices[pointer, step]
return result
|
python
|
{
"resource": ""
}
|
q26197
|
Translator._print_beam
|
train
|
def _print_beam(self,
sequences: mx.nd.NDArray,
accumulated_scores: mx.nd.NDArray,
finished: mx.nd.NDArray,
inactive: mx.nd.NDArray,
constraints: List[Optional[constrained.ConstrainedHypothesis]],
timestep: int) -> None:
"""
Prints the beam for debugging purposes.
:param sequences: The beam histories (shape: batch_size * beam_size, max_output_len).
:param accumulated_scores: The accumulated scores for each item in the beam.
Shape: (batch_size * beam_size, target_vocab_size).
:param finished: Indicates which items are finished (shape: batch_size * beam_size).
:param inactive: Indicates any inactive items (shape: batch_size * beam_size).
:param timestep: The current timestep.
"""
logger.info('BEAM AT TIMESTEP %d', timestep)
batch_beam_size = sequences.shape[0]
for i in range(batch_beam_size):
# for each hypothesis, print its entire history
score = accumulated_scores[i].asscalar()
word_ids = [int(x.asscalar()) for x in sequences[i]]
unmet = constraints[i].num_needed() if constraints[i] is not None else -1
hypothesis = '----------' if inactive[i] else ' '.join(
[self.vocab_target_inv[x] for x in word_ids if x != 0])
logger.info('%d %d %d %d %.2f %s', i + 1, finished[i].asscalar(), inactive[i].asscalar(), unmet, score,
hypothesis)
|
python
|
{
"resource": ""
}
|
q26198
|
TopK.hybrid_forward
|
train
|
def hybrid_forward(self, F, scores, offset):
"""
Get the lowest k elements per sentence from a `scores` matrix.
:param scores: Vocabulary scores for the next beam step. (batch_size * beam_size, target_vocabulary_size)
:param offset: Array to add to the hypothesis indices for offsetting in batch decoding.
:return: The row indices, column indices and values of the k smallest items in matrix.
"""
# Shape: (batch size, beam_size * vocab_size)
folded_scores = F.reshape(scores, shape=(-1, self.k * self.vocab_size))
values, indices = F.topk(folded_scores, axis=1, k=self.k, ret_typ='both', is_ascend=True)
# Project indices back into original shape (which is different for t==1 and t>1)
indices = F.reshape(F.cast(indices, 'int32'), shape=(-1,))
# TODO: we currently exploit a bug in the implementation of unravel_index to not require knowing the first shape
# value. See https://github.com/apache/incubator-mxnet/issues/13862
unraveled = F.unravel_index(indices, shape=(C.LARGEST_INT, self.vocab_size))
best_hyp_indices, best_word_indices = F.split(unraveled, axis=0, num_outputs=2, squeeze_axis=True)
best_hyp_indices = best_hyp_indices + offset
values = F.reshape(values, shape=(-1, 1))
return best_hyp_indices, best_word_indices, values
|
python
|
{
"resource": ""
}
|
q26199
|
SampleK.hybrid_forward
|
train
|
def hybrid_forward(self, F, scores, target_dists, finished, best_hyp_indices):
"""
Choose an extension of each hypothesis from its softmax distribution.
:param scores: Vocabulary scores for the next beam step. (batch_size * beam_size, target_vocabulary_size)
:param target_dists: The non-cumulative target distributions (ignored).
:param finished: The list of finished hypotheses.
:param best_hyp_indices: Best hypothesis indices constant.
:return: The row indices, column indices, and values of the sampled words.
"""
# Map the negative logprobs to probabilities so as to have a distribution
target_dists = F.exp(-target_dists)
# n == 0 means sample from the full vocabulary. Otherwise, we sample from the top n.
if self.n != 0:
# select the top n in each row, via a mask
masked_items = F.topk(target_dists, k=self.n, ret_typ='mask', axis=1, is_ascend=False)
# set unmasked items to 0
masked_items = F.where(masked_items, target_dists, masked_items)
# renormalize
target_dists = F.broadcast_div(masked_items, F.sum(masked_items, axis=1, keepdims=True))
# Sample from the target distributions over words, then get the corresponding values from the cumulative scores
best_word_indices = F.random.multinomial(target_dists, get_prob=False)
# Zeroes for finished hypotheses.
best_word_indices = F.where(finished, F.zeros_like(best_word_indices), best_word_indices)
values = F.pick(scores, best_word_indices, axis=1, keepdims=True)
best_hyp_indices = F.slice_like(best_hyp_indices, best_word_indices, axes=(0,))
return best_hyp_indices, best_word_indices, values
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.