_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q7600
|
Slot._set_value
|
train
|
def _set_value(self, slot_record):
"""Sets the value of this slot based on its corresponding _SlotRecord.
Does nothing if the slot has not yet been filled.
Args:
slot_record: The _SlotRecord containing this Slot's value.
"""
if slot_record.status == _SlotRecord.FILLED:
self.filled = True
self._filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore(
slot_record)
self._fill_datetime = slot_record.fill_time
self._value = slot_record.value
|
python
|
{
"resource": ""
}
|
q7601
|
PipelineFuture._inherit_outputs
|
train
|
def _inherit_outputs(self,
pipeline_name,
already_defined,
resolve_outputs=False):
"""Inherits outputs from a calling Pipeline.
Args:
pipeline_name: The Pipeline class name (used for debugging).
already_defined: Maps output name to stringified db.Key (of _SlotRecords)
of any exiting output slots to be inherited by this future.
resolve_outputs: When True, this method will dereference all output slots
before returning back to the caller, making those output slots' values
available.
Raises:
UnexpectedPipelineError when resolve_outputs is True and any of the output
slots could not be retrived from the Datastore.
"""
for name, slot_key in already_defined.iteritems():
if not isinstance(slot_key, db.Key):
slot_key = db.Key(slot_key)
slot = self._output_dict.get(name)
if slot is None:
if self._strict:
raise UnexpectedPipelineError(
'Inherited output named "%s" must be filled but '
'not declared for pipeline class "%s"' % (name, pipeline_name))
else:
self._output_dict[name] = Slot(name=name, slot_key=slot_key)
else:
slot.key = slot_key
slot._exists = True
if resolve_outputs:
slot_key_dict = dict((s.key, s) for s in self._output_dict.itervalues())
all_slots = db.get(slot_key_dict.keys())
for slot, slot_record in zip(slot_key_dict.itervalues(), all_slots):
if slot_record is None:
raise UnexpectedPipelineError(
'Inherited output named "%s" for pipeline class "%s" is '
'missing its Slot in the datastore: "%s"' %
(slot.name, pipeline_name, slot.key))
slot = slot_key_dict[slot_record.key()]
slot._set_value(slot_record)
|
python
|
{
"resource": ""
}
|
q7602
|
Pipeline.from_id
|
train
|
def from_id(cls, pipeline_id, resolve_outputs=True, _pipeline_record=None):
"""Returns an instance corresponding to an existing Pipeline.
The returned object will have the same properties a Pipeline does while
it's running synchronously (e.g., like what it's first allocated), allowing
callers to inspect caller arguments, outputs, fill slots, complete the
pipeline, abort, retry, etc.
Args:
pipeline_id: The ID of this pipeline (a string).
resolve_outputs: When True, dereference the outputs of this Pipeline
so their values can be accessed by the caller.
_pipeline_record: Internal-only. The _PipelineRecord instance to use
to instantiate this instance instead of fetching it from
the datastore.
Returns:
Pipeline sub-class instances or None if it could not be found.
"""
pipeline_record = _pipeline_record
# Support pipeline IDs and idempotence_keys that are not unicode.
if not isinstance(pipeline_id, unicode):
try:
pipeline_id = pipeline_id.encode('utf-8')
except UnicodeDecodeError:
pipeline_id = hashlib.sha1(pipeline_id).hexdigest()
pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id)
if pipeline_record is None:
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
return None
try:
pipeline_func_class = mr_util.for_name(pipeline_record.class_path)
except ImportError, e:
logging.warning('Tried to find Pipeline %s#%s, but class could '
'not be found. Using default Pipeline class instead.',
pipeline_record.class_path, pipeline_id)
pipeline_func_class = cls
params = pipeline_record.params
arg_list, kwarg_dict = _dereference_args(
pipeline_record.class_path, params['args'], params['kwargs'])
outputs = PipelineFuture(pipeline_func_class.output_names)
outputs._inherit_outputs(
pipeline_record.class_path,
params['output_slots'],
resolve_outputs=resolve_outputs)
stage = pipeline_func_class(*arg_list, **kwarg_dict)
stage.backoff_seconds = params['backoff_seconds']
stage.backoff_factor = params['backoff_factor']
stage.max_attempts = params['max_attempts']
stage.task_retry = params['task_retry']
stage.target = params.get('target') # May not be defined for old Pipelines
stage._current_attempt = pipeline_record.current_attempt
stage._set_values_internal(
_PipelineContext('', params['queue_name'], params['base_path']),
pipeline_key,
_PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record),
outputs,
pipeline_record.status)
return stage
|
python
|
{
"resource": ""
}
|
q7603
|
Pipeline.start
|
train
|
def start(self,
idempotence_key='',
queue_name='default',
base_path='/_ah/pipeline',
return_task=False,
countdown=None,
eta=None):
"""Starts a new instance of this pipeline.
Args:
idempotence_key: The ID to use for this Pipeline and throughout its
asynchronous workflow to ensure the operations are idempotent. If
empty a starting key will be automatically assigned.
queue_name: What queue this Pipeline's workflow should execute on.
base_path: The relative URL path to where the Pipeline API is
mounted for access by the taskqueue API or external requests.
return_task: When True, a task to start this pipeline will be returned
instead of submitted, allowing the caller to start off this pipeline
as part of a separate transaction (potentially leaving this newly
allocated pipeline's datastore entities in place if that separate
transaction fails for any reason).
countdown: Time in seconds into the future that this Task should execute.
Defaults to zero.
eta: A datetime.datetime specifying the absolute time at which the task
should be executed. Must not be specified if 'countdown' is specified.
This may be timezone-aware or timezone-naive. If None, defaults to now.
For pull tasks, no worker will be able to lease this task before the
time indicated by eta.
Returns:
A taskqueue.Task instance if return_task was True. This task will *not*
have a name, thus to ensure reliable execution of your pipeline you
should add() this task as part of a separate Datastore transaction.
Raises:
PipelineExistsError if the pipeline with the given idempotence key exists.
PipelineSetupError if the pipeline could not start for any other reason.
"""
if not idempotence_key:
idempotence_key = uuid.uuid4().hex
elif not isinstance(idempotence_key, unicode):
try:
idempotence_key.encode('utf-8')
except UnicodeDecodeError:
idempotence_key = hashlib.sha1(idempotence_key).hexdigest()
pipeline_key = db.Key.from_path(_PipelineRecord.kind(), idempotence_key)
context = _PipelineContext('', queue_name, base_path)
future = PipelineFuture(self.output_names, force_strict=True)
try:
self._set_values_internal(
context, pipeline_key, pipeline_key, future, _PipelineRecord.WAITING)
return context.start(
self, return_task=return_task, countdown=countdown, eta=eta)
except Error:
# Pass through exceptions that originate in this module.
raise
except Exception, e:
# Re-type any exceptions that were raised in dependent methods.
raise PipelineSetupError('Error starting %s#%s: %s' % (
self, idempotence_key, str(e)))
|
python
|
{
"resource": ""
}
|
q7604
|
Pipeline.retry
|
train
|
def retry(self, retry_message=''):
"""Forces a currently running asynchronous pipeline to retry.
Note this may not be called by synchronous or generator pipelines. Those
must instead raise the 'Retry' exception during execution.
Args:
retry_message: Optional message explaining why the retry happened.
Returns:
True if the Pipeline should be retried, False if it cannot be cancelled
mid-flight for some reason.
"""
if not self.async:
raise UnexpectedPipelineError(
'May only call retry() method for asynchronous pipelines.')
if self.try_cancel():
self._context.transition_retry(self._pipeline_key, retry_message)
return True
else:
return False
|
python
|
{
"resource": ""
}
|
q7605
|
Pipeline.abort
|
train
|
def abort(self, abort_message=''):
"""Mark the entire pipeline up to the root as aborted.
Note this should only be called from *outside* the context of a running
pipeline. Synchronous and generator pipelines should raise the 'Abort'
exception to cause this behavior during execution.
Args:
abort_message: Optional message explaining why the abort happened.
Returns:
True if the abort signal was sent successfully; False if the pipeline
could not be aborted for any reason.
"""
# TODO: Use thread-local variable to enforce that this is not called
# while a pipeline is executing in the current thread.
if (self.async and self._root_pipeline_key == self._pipeline_key and
not self.try_cancel()):
# Handle the special case where the root pipeline is async and thus
# cannot be aborted outright.
return False
else:
return self._context.begin_abort(
self._root_pipeline_key, abort_message=abort_message)
|
python
|
{
"resource": ""
}
|
q7606
|
Pipeline.fill
|
train
|
def fill(self, name_or_slot, value):
"""Fills an output slot required by this Pipeline.
Args:
name_or_slot: The name of the slot (a string) or Slot record to fill.
value: The serializable value to assign to this slot.
Raises:
UnexpectedPipelineError if the Slot no longer exists. SlotNotDeclaredError
if trying to output to a slot that was not declared ahead of time.
"""
if isinstance(name_or_slot, basestring):
slot = getattr(self.outputs, name_or_slot)
elif isinstance(name_or_slot, Slot):
slot = name_or_slot
else:
raise UnexpectedPipelineError(
'Could not fill invalid output name: %r' % name_or_slot)
if not slot._exists:
raise SlotNotDeclaredError(
'Cannot fill output with name "%s" that was just '
'declared within the Pipeline context.' % slot.name)
self._context.fill_slot(self._pipeline_key, slot, value)
|
python
|
{
"resource": ""
}
|
q7607
|
Pipeline.set_status
|
train
|
def set_status(self, message=None, console_url=None, status_links=None):
"""Sets the current status of this pipeline.
This method is purposefully non-transactional. Updates are written to the
datastore immediately and overwrite all existing statuses.
Args:
message: (optional) Overall status message.
console_url: (optional) Relative URL to use for the "console" of this
pipeline that displays current progress. When None, no console will
be displayed.
status_links: (optional) Dictionary of readable link names to relative
URLs that should be associated with this pipeline as it runs. These links
provide convenient access to other dashboards, consoles, etc associated
with the pipeline.
Raises:
PipelineRuntimeError if the status could not be set for any reason.
"""
if _TEST_MODE:
logging.info(
'New status for %s#%s: message=%r, console_url=%r, status_links=%r',
self, self.pipeline_id, message, console_url, status_links)
return
status_key = db.Key.from_path(_StatusRecord.kind(), self.pipeline_id)
root_pipeline_key = db.Key.from_path(
_PipelineRecord.kind(), self.root_pipeline_id)
status_record = _StatusRecord(
key=status_key, root_pipeline=root_pipeline_key)
try:
if message:
status_record.message = message
if console_url:
status_record.console_url = console_url
if status_links:
# Alphabeticalize the list.
status_record.link_names = sorted(
db.Text(s) for s in status_links.iterkeys())
status_record.link_urls = [
db.Text(status_links[name]) for name in status_record.link_names]
status_record.status_time = datetime.datetime.utcnow()
status_record.put()
except Exception, e:
raise PipelineRuntimeError('Could not set status for %s#%s: %s' %
(self, self.pipeline_id, str(e)))
|
python
|
{
"resource": ""
}
|
q7608
|
Pipeline.complete
|
train
|
def complete(self, default_output=None):
"""Marks this asynchronous Pipeline as complete.
Args:
default_output: What value the 'default' output slot should be assigned.
Raises:
UnexpectedPipelineError if the slot no longer exists or this method was
called for a pipeline that is not async.
"""
# TODO: Enforce that all outputs expected by this async pipeline were
# filled before this complete() function was called. May required all
# async functions to declare their outputs upfront.
if not self.async:
raise UnexpectedPipelineError(
'May only call complete() method for asynchronous pipelines.')
self._context.fill_slot(
self._pipeline_key, self.outputs.default, default_output)
|
python
|
{
"resource": ""
}
|
q7609
|
Pipeline.get_callback_url
|
train
|
def get_callback_url(self, **kwargs):
"""Returns a relative URL for invoking this Pipeline's callback method.
Args:
kwargs: Dictionary mapping keyword argument names to single values that
should be passed to the callback when it is invoked.
Raises:
UnexpectedPipelineError if this is invoked on pipeline that is not async.
"""
# TODO: Support positional parameters.
if not self.async:
raise UnexpectedPipelineError(
'May only call get_callback_url() method for asynchronous pipelines.')
kwargs['pipeline_id'] = self._pipeline_key.name()
params = urllib.urlencode(sorted(kwargs.items()))
return '%s/callback?%s' % (self.base_path, params)
|
python
|
{
"resource": ""
}
|
q7610
|
Pipeline.get_callback_task
|
train
|
def get_callback_task(self, *args, **kwargs):
"""Returns a task for calling back this Pipeline.
Args:
params: Keyword argument containing a dictionary of key/value pairs
that will be passed to the callback when it is executed.
args, kwargs: Passed to the taskqueue.Task constructor. Use these
arguments to set the task name (for idempotence), etc.
Returns:
A taskqueue.Task instance that must be enqueued by the caller.
"""
if not self.async:
raise UnexpectedPipelineError(
'May only call get_callback_task() method for asynchronous pipelines.')
params = kwargs.get('params', {})
kwargs['params'] = params
params['pipeline_id'] = self._pipeline_key.name()
kwargs['url'] = self.base_path + '/callback'
kwargs['method'] = 'POST'
return taskqueue.Task(*args, **kwargs)
|
python
|
{
"resource": ""
}
|
q7611
|
Pipeline.cleanup
|
train
|
def cleanup(self):
"""Clean up this Pipeline and all Datastore records used for coordination.
Only works when called on a root pipeline. Child pipelines will ignore
calls to this method.
After this method is called, Pipeline.from_id() and related status
methods will return inconsistent or missing results. This method is
fire-and-forget and asynchronous.
"""
if self._root_pipeline_key is None:
raise UnexpectedPipelineError(
'Could not cleanup Pipeline with unknown root pipeline ID.')
if not self.is_root:
return
task = taskqueue.Task(
params=dict(root_pipeline_key=self._root_pipeline_key),
url=self.base_path + '/cleanup',
headers={'X-Ae-Pipeline-Key': self._root_pipeline_key})
taskqueue.Queue(self.queue_name).add(task)
|
python
|
{
"resource": ""
}
|
q7612
|
Pipeline.with_params
|
train
|
def with_params(self, **kwargs):
"""Modify various execution parameters of a Pipeline before it runs.
This method has no effect in test mode.
Args:
kwargs: Attributes to modify on this Pipeline instance before it has
been executed.
Returns:
This Pipeline instance, for easy chaining.
"""
if _TEST_MODE:
logging.info(
'Setting runtime parameters for %s#%s: %r',
self, self.pipeline_id, kwargs)
return self
if self.pipeline_id is not None:
raise UnexpectedPipelineError(
'May only call with_params() on a Pipeline that has not yet '
'been scheduled for execution.')
ALLOWED = ('backoff_seconds', 'backoff_factor', 'max_attempts', 'target')
for name, value in kwargs.iteritems():
if name not in ALLOWED:
raise TypeError('Unexpected keyword: %s=%r' % (name, value))
setattr(self, name, value)
return self
|
python
|
{
"resource": ""
}
|
q7613
|
Pipeline._set_class_path
|
train
|
def _set_class_path(cls, module_dict=sys.modules):
"""Sets the absolute path to this class as a string.
Used by the Pipeline API to reconstruct the Pipeline sub-class object
at execution time instead of passing around a serialized function.
Args:
module_dict: Used for testing.
"""
# Do not traverse the class hierarchy fetching the class path attribute.
found = cls.__dict__.get('_class_path')
if found is not None:
return
# Do not set the _class_path for the base-class, otherwise all children's
# lookups for _class_path will fall through and return 'Pipeline' above.
# This situation can happen if users call the generic Pipeline.from_id
# to get the result of a Pipeline without knowing its specific class.
if cls is Pipeline:
return
class_path = '%s.%s' % (cls.__module__, cls.__name__)
# When a WSGI handler is invoked as an entry point, any Pipeline class
# defined in the same file as the handler will get __module__ set to
# __main__. Thus we need to find out its real fully qualified path.
if cls.__module__ == '__main__':
for name, module in module_dict.items():
if name == '__main__':
continue
found = getattr(module, cls.__name__, None)
if found is cls:
class_path = '%s.%s' % (name, cls.__name__)
break
cls._class_path = class_path
|
python
|
{
"resource": ""
}
|
q7614
|
Pipeline._set_values_internal
|
train
|
def _set_values_internal(self,
context,
pipeline_key,
root_pipeline_key,
outputs,
result_status):
"""Sets the user-visible values provided as an API by this class.
Args:
context: The _PipelineContext used for this Pipeline.
pipeline_key: The db.Key of this pipeline.
root_pipeline_key: The db.Key of the root pipeline.
outputs: The PipelineFuture for this pipeline.
result_status: The result status of this pipeline.
"""
self._context = context
self._pipeline_key = pipeline_key
self._root_pipeline_key = root_pipeline_key
self._result_status = result_status
self.outputs = outputs
|
python
|
{
"resource": ""
}
|
q7615
|
Pipeline._callback_internal
|
train
|
def _callback_internal(self, kwargs):
"""Used to execute callbacks on asynchronous pipelines."""
logging.debug('Callback %s(*%s, **%s)#%s with params: %r',
self._class_path, _short_repr(self.args),
_short_repr(self.kwargs), self._pipeline_key.name(), kwargs)
return self.callback(**kwargs)
|
python
|
{
"resource": ""
}
|
q7616
|
Pipeline._run_internal
|
train
|
def _run_internal(self,
context,
pipeline_key,
root_pipeline_key,
caller_output):
"""Used by the Pipeline evaluator to execute this Pipeline."""
self._set_values_internal(
context, pipeline_key, root_pipeline_key, caller_output,
_PipelineRecord.RUN)
logging.debug('Running %s(*%s, **%s)#%s',
self._class_path, _short_repr(self.args),
_short_repr(self.kwargs), self._pipeline_key.name())
return self.run(*self.args, **self.kwargs)
|
python
|
{
"resource": ""
}
|
q7617
|
Pipeline._finalized_internal
|
train
|
def _finalized_internal(self,
context,
pipeline_key,
root_pipeline_key,
caller_output,
aborted):
"""Used by the Pipeline evaluator to finalize this Pipeline."""
result_status = _PipelineRecord.RUN
if aborted:
result_status = _PipelineRecord.ABORTED
self._set_values_internal(
context, pipeline_key, root_pipeline_key, caller_output, result_status)
logging.debug('Finalizing %s(*%r, **%r)#%s',
self._class_path, _short_repr(self.args),
_short_repr(self.kwargs), self._pipeline_key.name())
try:
self.finalized()
except NotImplementedError:
pass
|
python
|
{
"resource": ""
}
|
q7618
|
InOrder._add_future
|
train
|
def _add_future(cls, future):
"""Adds a future to the list of in-order futures thus far.
Args:
future: The future to add to the list.
"""
if cls._local._activated:
cls._local._in_order_futures.add(future)
|
python
|
{
"resource": ""
}
|
q7619
|
InOrder._thread_init
|
train
|
def _thread_init(cls):
"""Ensure thread local is initialized."""
if not hasattr(cls._local, '_in_order_futures'):
cls._local._in_order_futures = set()
cls._local._activated = False
|
python
|
{
"resource": ""
}
|
q7620
|
_PipelineContext.from_environ
|
train
|
def from_environ(cls, environ=os.environ):
"""Constructs a _PipelineContext from the task queue environment."""
base_path, unused = (environ['PATH_INFO'].rsplit('/', 1) + [''])[:2]
return cls(
environ['HTTP_X_APPENGINE_TASKNAME'],
environ['HTTP_X_APPENGINE_QUEUENAME'],
base_path)
|
python
|
{
"resource": ""
}
|
q7621
|
_PipelineContext.fill_slot
|
train
|
def fill_slot(self, filler_pipeline_key, slot, value):
"""Fills a slot, enqueueing a task to trigger pending barriers.
Args:
filler_pipeline_key: db.Key or stringified key of the _PipelineRecord
that filled this slot.
slot: The Slot instance to fill.
value: The serializable value to assign.
Raises:
UnexpectedPipelineError if the _SlotRecord for the 'slot' could not
be found in the Datastore.
"""
if not isinstance(filler_pipeline_key, db.Key):
filler_pipeline_key = db.Key(filler_pipeline_key)
if _TEST_MODE:
slot._set_value_test(filler_pipeline_key, value)
else:
encoded_value = json.dumps(value,
sort_keys=True,
cls=mr_util.JsonEncoder)
value_text = None
value_blob = None
if len(encoded_value) <= _MAX_JSON_SIZE:
value_text = db.Text(encoded_value)
else:
# The encoded value is too big. Save it as a blob.
value_blob = _write_json_blob(encoded_value, filler_pipeline_key.name())
def txn():
slot_record = db.get(slot.key)
if slot_record is None:
raise UnexpectedPipelineError(
'Tried to fill missing slot "%s" '
'by pipeline ID "%s" with value: %r'
% (slot.key, filler_pipeline_key.name(), value))
# NOTE: Always take the override value here. If down-stream pipelines
# need a consitent view of all up-stream outputs (meaning, all of the
# outputs came from the same retry attempt of the upstream pipeline),
# the down-stream pipeline must also wait for the 'default' output
# of these up-stream pipelines.
slot_record.filler = filler_pipeline_key
slot_record.value_text = value_text
slot_record.value_blob = value_blob
slot_record.status = _SlotRecord.FILLED
slot_record.fill_time = self._gettime()
slot_record.put()
task = taskqueue.Task(
url=self.barrier_handler_path,
params=dict(
slot_key=slot.key,
use_barrier_indexes=True),
headers={'X-Ae-Slot-Key': slot.key,
'X-Ae-Filler-Pipeline-Key': filler_pipeline_key})
task.add(queue_name=self.queue_name, transactional=True)
db.run_in_transaction_options(
db.create_transaction_options(propagation=db.ALLOWED), txn)
self.session_filled_output_names.add(slot.name)
|
python
|
{
"resource": ""
}
|
q7622
|
_PipelineContext.begin_abort
|
train
|
def begin_abort(self, root_pipeline_key, abort_message):
"""Kicks off the abort process for a root pipeline and all its children.
Args:
root_pipeline_key: db.Key of the root pipeline to abort.
abort_message: Message explaining why the abort happened, only saved
into the root pipeline.
Returns:
True if the abort signal was sent successfully; False otherwise.
"""
def txn():
pipeline_record = db.get(root_pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to abort root pipeline ID "%s" but it does not exist.',
root_pipeline_key.name())
raise db.Rollback()
if pipeline_record.status == _PipelineRecord.ABORTED:
logging.warning(
'Tried to abort root pipeline ID "%s"; already in state: %s',
root_pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
if pipeline_record.abort_requested:
logging.warning(
'Tried to abort root pipeline ID "%s"; abort signal already sent.',
root_pipeline_key.name())
raise db.Rollback()
pipeline_record.abort_requested = True
pipeline_record.abort_message = abort_message
pipeline_record.put()
task = taskqueue.Task(
url=self.fanout_abort_handler_path,
params=dict(root_pipeline_key=root_pipeline_key))
task.add(queue_name=self.queue_name, transactional=True)
return True
return db.run_in_transaction(txn)
|
python
|
{
"resource": ""
}
|
q7623
|
_PipelineContext.continue_abort
|
train
|
def continue_abort(self,
root_pipeline_key,
cursor=None,
max_to_notify=_MAX_ABORTS_TO_BEGIN):
"""Sends the abort signal to all children for a root pipeline.
Args:
root_pipeline_key: db.Key of the root pipeline to abort.
cursor: The query cursor for enumerating _PipelineRecords when inserting
tasks to cause child pipelines to terminate.
max_to_notify: Used for testing.
"""
if not isinstance(root_pipeline_key, db.Key):
root_pipeline_key = db.Key(root_pipeline_key)
# NOTE: The results of this query may include _PipelineRecord instances
# that are not actually "reachable", meaning you cannot get to them by
# starting at the root pipeline and following "fanned_out" onward. This
# is acceptable because even these defunct _PipelineRecords will properly
# set their status to ABORTED when the signal comes, regardless of any
# other status they may have had.
#
# The only gotcha here is if a Pipeline's finalize method somehow modifies
# its inputs (like deleting an input file). In the case there are
# unreachable child pipelines, it will appear as if two finalize methods
# have been called instead of just one. The saving grace here is that
# finalize must be idempotent, so this *should* be harmless.
query = (
_PipelineRecord.all(cursor=cursor)
.filter('root_pipeline =', root_pipeline_key))
results = query.fetch(max_to_notify)
task_list = []
for pipeline_record in results:
if pipeline_record.status not in (
_PipelineRecord.RUN, _PipelineRecord.WAITING):
continue
pipeline_key = pipeline_record.key()
task_list.append(taskqueue.Task(
name='%s-%s-abort' % (self.task_name, pipeline_key.name()),
url=self.abort_handler_path,
params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.ABORT),
headers={'X-Ae-Pipeline-Key': pipeline_key}))
# Task continuation with sequence number to prevent fork-bombs.
if len(results) == max_to_notify:
the_match = re.match('(.*)-([0-9]+)', self.task_name)
if the_match:
prefix = the_match.group(1)
end = int(the_match.group(2)) + 1
else:
prefix = self.task_name
end = 0
task_list.append(taskqueue.Task(
name='%s-%d' % (prefix, end),
url=self.fanout_abort_handler_path,
params=dict(root_pipeline_key=root_pipeline_key,
cursor=query.cursor())))
if task_list:
try:
taskqueue.Queue(self.queue_name).add(task_list)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
pass
|
python
|
{
"resource": ""
}
|
q7624
|
_PipelineContext.start
|
train
|
def start(self, pipeline, return_task=True, countdown=None, eta=None):
"""Starts a pipeline.
Args:
pipeline: Pipeline instance to run.
return_task: When True, do not submit the task to start the pipeline
but instead return it for someone else to enqueue.
countdown: Time in seconds into the future that this Task should execute.
Defaults to zero.
eta: A datetime.datetime specifying the absolute time at which the task
should be executed. Must not be specified if 'countdown' is specified.
This may be timezone-aware or timezone-naive. If None, defaults to now.
For pull tasks, no worker will be able to lease this task before the
time indicated by eta.
Returns:
The task to start this pipeline if return_task was True.
Raises:
PipelineExistsError if the pipeline with the given ID already exists.
"""
# Adjust all pipeline output keys for this Pipeline to be children of
# the _PipelineRecord, that way we can write them all and submit in a
# single transaction.
for name, slot in pipeline.outputs._output_dict.iteritems():
slot.key = db.Key.from_path(
*slot.key.to_path(), **dict(parent=pipeline._pipeline_key))
_, output_slots, params_text, params_blob = _generate_args(
pipeline, pipeline.outputs, self.queue_name, self.base_path)
@db.transactional(propagation=db.INDEPENDENT)
def txn():
pipeline_record = db.get(pipeline._pipeline_key)
if pipeline_record is not None:
raise PipelineExistsError(
'Pipeline with idempotence key "%s" already exists; params=%s' %
(pipeline._pipeline_key.name(),
_short_repr(pipeline_record.params)))
entities_to_put = []
for name, slot in pipeline.outputs._output_dict.iteritems():
entities_to_put.append(_SlotRecord(
key=slot.key,
root_pipeline=pipeline._pipeline_key))
entities_to_put.append(_PipelineRecord(
key=pipeline._pipeline_key,
root_pipeline=pipeline._pipeline_key,
is_root_pipeline=True,
# Bug in DB means we need to use the storage name here,
# not the local property name.
params=params_text,
params_blob=params_blob,
start_time=self._gettime(),
class_path=pipeline._class_path,
max_attempts=pipeline.max_attempts))
entities_to_put.extend(_PipelineContext._create_barrier_entities(
pipeline._pipeline_key,
pipeline._pipeline_key,
_BarrierRecord.FINALIZE,
output_slots))
db.put(entities_to_put)
task = taskqueue.Task(
url=self.pipeline_handler_path,
params=dict(pipeline_key=pipeline._pipeline_key),
headers={'X-Ae-Pipeline-Key': pipeline._pipeline_key},
target=pipeline.target,
countdown=countdown,
eta=eta)
if return_task:
return task
task.add(queue_name=self.queue_name, transactional=True)
task = txn()
# Immediately mark the output slots as existing so they can be filled
# by asynchronous pipelines or used in test mode.
for output_slot in pipeline.outputs._output_dict.itervalues():
output_slot._exists = True
return task
|
python
|
{
"resource": ""
}
|
q7625
|
_PipelineContext._create_barrier_entities
|
train
|
def _create_barrier_entities(root_pipeline_key,
child_pipeline_key,
purpose,
blocking_slot_keys):
"""Creates all of the entities required for a _BarrierRecord.
Args:
root_pipeline_key: The root pipeline this is part of.
child_pipeline_key: The pipeline this barrier is for.
purpose: _BarrierRecord.START or _BarrierRecord.FINALIZE.
blocking_slot_keys: Set of db.Keys corresponding to _SlotRecords that
this barrier should wait on before firing.
Returns:
List of entities, starting with the _BarrierRecord entity, followed by
_BarrierIndexes used for firing when _SlotRecords are filled in the same
order as the blocking_slot_keys list provided. All of these entities
should be put in the Datastore to ensure the barrier fires properly.
"""
result = []
blocking_slot_keys = list(blocking_slot_keys)
barrier = _BarrierRecord(
parent=child_pipeline_key,
key_name=purpose,
target=child_pipeline_key,
root_pipeline=root_pipeline_key,
blocking_slots=blocking_slot_keys)
result.append(barrier)
for slot_key in blocking_slot_keys:
barrier_index_path = []
barrier_index_path.extend(slot_key.to_path())
barrier_index_path.extend(child_pipeline_key.to_path())
barrier_index_path.extend([_BarrierIndex.kind(), purpose])
barrier_index_key = db.Key.from_path(*barrier_index_path)
barrier_index = _BarrierIndex(
key=barrier_index_key,
root_pipeline=root_pipeline_key)
result.append(barrier_index)
return result
|
python
|
{
"resource": ""
}
|
q7626
|
_PipelineContext.handle_run_exception
|
train
|
def handle_run_exception(self, pipeline_key, pipeline_func, e):
"""Handles an exception raised by a Pipeline's user code.
Args:
pipeline_key: The pipeline that raised the error.
pipeline_func: The class path name of the Pipeline that was running.
e: The exception that was raised.
Returns:
True if the exception should be re-raised up through the calling stack
by the caller of this method.
"""
if isinstance(e, Retry):
retry_message = str(e)
logging.warning('User forced retry for pipeline ID "%s" of %r: %s',
pipeline_key.name(), pipeline_func, retry_message)
self.transition_retry(pipeline_key, retry_message)
elif isinstance(e, Abort):
abort_message = str(e)
logging.warning('User forced abort for pipeline ID "%s" of %r: %s',
pipeline_key.name(), pipeline_func, abort_message)
pipeline_func.abort(abort_message)
else:
retry_message = '%s: %s' % (e.__class__.__name__, str(e))
logging.exception('Generator %r#%s raised exception. %s',
pipeline_func, pipeline_key.name(), retry_message)
self.transition_retry(pipeline_key, retry_message)
return pipeline_func.task_retry
|
python
|
{
"resource": ""
}
|
q7627
|
_PipelineContext.transition_run
|
train
|
def transition_run(self,
pipeline_key,
blocking_slot_keys=None,
fanned_out_pipelines=None,
pipelines_to_run=None):
"""Marks an asynchronous or generator pipeline as running.
Does nothing if the pipeline is no longer in a runnable state.
Args:
pipeline_key: The db.Key of the _PipelineRecord to update.
blocking_slot_keys: List of db.Key instances that this pipeline's
finalization barrier should wait on in addition to the existing one.
This is used to update the barrier to include all child outputs. When
None, the barrier will not be updated.
fanned_out_pipelines: List of db.Key instances of _PipelineRecords that
were fanned out by this generator pipeline. This is distinct from the
'pipelines_to_run' list because not all of the pipelines listed here
will be immediately ready to execute. When None, then this generator
yielded no children.
pipelines_to_run: List of db.Key instances of _PipelineRecords that should
be kicked off (fan-out) transactionally as part of this transition.
When None, no child pipelines will run. All db.Keys in this list must
also be present in the fanned_out_pipelines list.
Raises:
UnexpectedPipelineError if blocking_slot_keys was not empty and the
_BarrierRecord has gone missing.
"""
def txn():
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.warning('Pipeline ID "%s" cannot be marked as run. '
'Does not exist.', pipeline_key.name())
raise db.Rollback()
if pipeline_record.status != _PipelineRecord.WAITING:
logging.warning('Pipeline ID "%s" in bad state to be marked as run: %s',
pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
pipeline_record.status = _PipelineRecord.RUN
if fanned_out_pipelines:
# NOTE: We must model the pipeline relationship in a top-down manner,
# meaning each pipeline must point forward to the pipelines that it
# fanned out to. The reason is race conditions. If evaluate()
# dies early, it may create many unused _PipelineRecord and _SlotRecord
# instances that never progress. The only way we know which of these
# are valid is by traversing the graph from the root, where the
# fanned_out property refers to those pipelines that were run using a
# transactional task.
child_pipeline_list = list(fanned_out_pipelines)
pipeline_record.fanned_out = child_pipeline_list
if pipelines_to_run:
child_indexes = [
child_pipeline_list.index(p) for p in pipelines_to_run]
child_indexes.sort()
task = taskqueue.Task(
url=self.fanout_handler_path,
params=dict(parent_key=str(pipeline_key),
child_indexes=child_indexes))
task.add(queue_name=self.queue_name, transactional=True)
pipeline_record.put()
if blocking_slot_keys:
# NOTE: Always update a generator pipeline's finalization barrier to
# include all of the outputs of any pipelines that it runs, to ensure
# that finalized calls will not happen until all child pipelines have
# completed. This must happen transactionally with the enqueue of
# the fan-out kickoff task above to ensure the child output slots and
# the barrier blocking slots are the same.
barrier_key = db.Key.from_path(
_BarrierRecord.kind(), _BarrierRecord.FINALIZE,
parent=pipeline_key)
finalize_barrier = db.get(barrier_key)
if finalize_barrier is None:
raise UnexpectedPipelineError(
'Pipeline ID "%s" cannot update finalize barrier. '
'Does not exist.' % pipeline_key.name())
else:
finalize_barrier.blocking_slots = list(
blocking_slot_keys.union(set(finalize_barrier.blocking_slots)))
finalize_barrier.put()
db.run_in_transaction(txn)
|
python
|
{
"resource": ""
}
|
q7628
|
_PipelineContext.transition_complete
|
train
|
def transition_complete(self, pipeline_key):
"""Marks the given pipeline as complete.
Does nothing if the pipeline is no longer in a state that can be completed.
Args:
pipeline_key: db.Key of the _PipelineRecord that has completed.
"""
def txn():
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to mark pipeline ID "%s" as complete but it does not exist.',
pipeline_key.name())
raise db.Rollback()
if pipeline_record.status not in (
_PipelineRecord.WAITING, _PipelineRecord.RUN):
logging.warning(
'Tried to mark pipeline ID "%s" as complete, found bad state: %s',
pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
pipeline_record.status = _PipelineRecord.DONE
pipeline_record.finalized_time = self._gettime()
pipeline_record.put()
db.run_in_transaction(txn)
|
python
|
{
"resource": ""
}
|
q7629
|
_PipelineContext.transition_retry
|
train
|
def transition_retry(self, pipeline_key, retry_message):
"""Marks the given pipeline as requiring another retry.
Does nothing if all attempts have been exceeded.
Args:
pipeline_key: db.Key of the _PipelineRecord that needs to be retried.
retry_message: User-supplied message indicating the reason for the retry.
"""
def txn():
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to retry pipeline ID "%s" but it does not exist.',
pipeline_key.name())
raise db.Rollback()
if pipeline_record.status not in (
_PipelineRecord.WAITING, _PipelineRecord.RUN):
logging.warning(
'Tried to retry pipeline ID "%s", found bad state: %s',
pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
params = pipeline_record.params
offset_seconds = (
params['backoff_seconds'] *
(params['backoff_factor'] ** pipeline_record.current_attempt))
pipeline_record.next_retry_time = (
self._gettime() + datetime.timedelta(seconds=offset_seconds))
pipeline_record.current_attempt += 1
pipeline_record.retry_message = retry_message
pipeline_record.status = _PipelineRecord.WAITING
if pipeline_record.current_attempt >= pipeline_record.max_attempts:
root_pipeline_key = (
_PipelineRecord.root_pipeline.get_value_for_datastore(
pipeline_record))
logging.warning(
'Giving up on pipeline ID "%s" after %d attempt(s); causing abort '
'all the way to the root pipeline ID "%s"', pipeline_key.name(),
pipeline_record.current_attempt, root_pipeline_key.name())
# NOTE: We do *not* set the status to aborted here to ensure that
# this pipeline will be finalized before it has been marked as aborted.
pipeline_record.abort_message = (
'Aborting after %d attempts' % pipeline_record.current_attempt)
task = taskqueue.Task(
url=self.fanout_abort_handler_path,
params=dict(root_pipeline_key=root_pipeline_key))
task.add(queue_name=self.queue_name, transactional=True)
else:
task = taskqueue.Task(
url=self.pipeline_handler_path,
eta=pipeline_record.next_retry_time,
params=dict(pipeline_key=pipeline_key,
purpose=_BarrierRecord.START,
attempt=pipeline_record.current_attempt),
headers={'X-Ae-Pipeline-Key': pipeline_key},
target=pipeline_record.params['target'])
task.add(queue_name=self.queue_name, transactional=True)
pipeline_record.put()
db.run_in_transaction(txn)
|
python
|
{
"resource": ""
}
|
q7630
|
_CallbackHandler.run_callback
|
train
|
def run_callback(self):
"""Runs the callback for the pipeline specified in the request.
Raises:
_CallbackTaskError if something was wrong with the request parameters.
"""
pipeline_id = self.request.get('pipeline_id')
if not pipeline_id:
raise _CallbackTaskError('"pipeline_id" parameter missing.')
pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id)
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
raise _CallbackTaskError(
'Pipeline ID "%s" for callback does not exist.' % pipeline_id)
params = pipeline_record.params
real_class_path = params['class_path']
try:
pipeline_func_class = mr_util.for_name(real_class_path)
except ImportError, e:
raise _CallbackTaskError(
'Cannot load class named "%s" for pipeline ID "%s".'
% (real_class_path, pipeline_id))
if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
if pipeline_func_class.public_callbacks:
pass
elif pipeline_func_class.admin_callbacks:
if not users.is_current_user_admin():
raise _CallbackTaskError(
'Unauthorized callback for admin-only pipeline ID "%s"'
% pipeline_id)
else:
raise _CallbackTaskError(
'External callback for internal-only pipeline ID "%s"'
% pipeline_id)
kwargs = {}
for key in self.request.arguments():
if key != 'pipeline_id':
kwargs[str(key)] = self.request.get(key)
def perform_callback():
stage = pipeline_func_class.from_id(pipeline_id)
if stage is None:
raise _CallbackTaskError(
'Pipeline ID "%s" deleted during callback' % pipeline_id)
return stage._callback_internal(kwargs)
# callback_xg_transaction is a 3-valued setting (None=no trans,
# False=1-eg-trans, True=xg-trans)
if pipeline_func_class._callback_xg_transaction is not None:
transaction_options = db.create_transaction_options(
xg=pipeline_func_class._callback_xg_transaction)
callback_result = db.run_in_transaction_options(transaction_options,
perform_callback)
else:
callback_result = perform_callback()
if callback_result is not None:
status_code, content_type, content = callback_result
self.response.set_status(status_code)
self.response.headers['Content-Type'] = content_type
self.response.out.write(content)
|
python
|
{
"resource": ""
}
|
q7631
|
_fix_path
|
train
|
def _fix_path():
"""Finds the google_appengine directory and fixes Python imports to use it."""
import os
import sys
all_paths = os.environ.get('PYTHONPATH').split(os.pathsep)
for path_dir in all_paths:
dev_appserver_path = os.path.join(path_dir, 'dev_appserver.py')
if os.path.exists(dev_appserver_path):
logging.debug('Found appengine SDK on path!')
google_appengine = os.path.dirname(os.path.realpath(dev_appserver_path))
sys.path.append(google_appengine)
# Use the next import will fix up sys.path even further to bring in
# any dependent lib directories that the SDK needs.
dev_appserver = __import__('dev_appserver')
sys.path.extend(dev_appserver.EXTRA_PATHS)
return
|
python
|
{
"resource": ""
}
|
q7632
|
_PipelineRecord.params
|
train
|
def params(self):
"""Returns the dictionary of parameters for this Pipeline."""
if hasattr(self, '_params_decoded'):
return self._params_decoded
if self.params_blob is not None:
value_encoded = self.params_blob.open().read()
else:
value_encoded = self.params_text
value = json.loads(value_encoded, cls=util.JsonDecoder)
if isinstance(value, dict):
kwargs = value.get('kwargs')
if kwargs:
adjusted_kwargs = {}
for arg_key, arg_value in kwargs.iteritems():
# Python only allows non-unicode strings as keyword arguments.
adjusted_kwargs[str(arg_key)] = arg_value
value['kwargs'] = adjusted_kwargs
self._params_decoded = value
return self._params_decoded
|
python
|
{
"resource": ""
}
|
q7633
|
_SlotRecord.value
|
train
|
def value(self):
"""Returns the value of this Slot."""
if hasattr(self, '_value_decoded'):
return self._value_decoded
if self.value_blob is not None:
encoded_value = self.value_blob.open().read()
else:
encoded_value = self.value_text
self._value_decoded = json.loads(encoded_value, cls=util.JsonDecoder)
return self._value_decoded
|
python
|
{
"resource": ""
}
|
q7634
|
_BarrierIndex.to_barrier_key
|
train
|
def to_barrier_key(cls, barrier_index_key):
"""Converts a _BarrierIndex key to a _BarrierRecord key.
Args:
barrier_index_key: db.Key for a _BarrierIndex entity.
Returns:
db.Key for the corresponding _BarrierRecord entity.
"""
barrier_index_path = barrier_index_key.to_path()
# Pick out the items from the _BarrierIndex key path that we need to
# construct the _BarrierRecord key path.
(pipeline_kind, dependent_pipeline_id,
unused_kind, purpose) = barrier_index_path[-4:]
barrier_record_path = (
pipeline_kind, dependent_pipeline_id,
_BarrierRecord.kind(), purpose)
return db.Key.from_path(*barrier_record_path)
|
python
|
{
"resource": ""
}
|
q7635
|
Sheet.partial_page
|
train
|
def partial_page(self, page, used_labels):
"""Allows a page to be marked as already partially used so you can
generate a PDF to print on the remaining labels.
Parameters
----------
page: positive integer
The page number to mark as partially used. The page must not have
already been started, i.e., for page 1 this must be called before
any labels have been started, for page 2 this must be called before
the first page is full and so on.
used_labels: iterable
An iterable of (row, column) pairs marking which labels have been
used already. The rows and columns must be within the bounds of the
sheet.
"""
# Check the page number is valid.
if page <= self.page_count:
raise ValueError("Page {0:d} has already started, cannot mark used labels now.".format(page))
# Add these to any existing labels marked as used.
used = self._used.get(page, set())
for row, column in used_labels:
# Check the index is valid.
if row < 1 or row > self.specs.rows:
raise IndexError("Invalid row number: {0:d}.".format(row))
if column < 1 or column > self.specs.columns:
raise IndexError("Invalid column number: {0:d}.".format(column))
# Add it.
used.add((int(row), int(column)))
# Save the details.
self._used[page] = used
|
python
|
{
"resource": ""
}
|
q7636
|
Sheet._new_page
|
train
|
def _new_page(self):
"""Helper function to start a new page. Not intended for external use.
"""
self._current_page = Drawing(*self._pagesize)
if self._bgimage:
self._current_page.add(self._bgimage)
self._pages.append(self._current_page)
self.page_count += 1
self._position = [1, 0]
|
python
|
{
"resource": ""
}
|
q7637
|
Sheet._next_label
|
train
|
def _next_label(self):
"""Helper method to move to the next label. Not intended for external use.
This does not increment the label_count attribute as the next label may
not be usable (it may have been marked as missing through
partial_pages). See _next_unused_label for generally more useful method.
"""
# Special case for the very first label.
if self.page_count == 0:
self._new_page()
# Filled up a page.
elif self._position == self._numlabels:
self._new_page()
# Filled up a row.
elif self._position[1] == self.specs.columns:
self._position[0] += 1
self._position[1] = 0
# Move to the next column.
self._position[1] += 1
|
python
|
{
"resource": ""
}
|
q7638
|
Sheet._next_unused_label
|
train
|
def _next_unused_label(self):
"""Helper method to move to the next unused label. Not intended for external use.
This method will shade in any missing labels if desired, and will
increment the label_count attribute once a suitable label position has
been found.
"""
self._next_label()
# This label may be missing.
if self.page_count in self._used:
# Keep try while the label is missing.
missing = self._used.get(self.page_count, set())
while tuple(self._position) in missing:
# Throw the missing information away now we have used it. This
# allows the _shade_remaining_missing method to work.
missing.discard(tuple(self._position))
# Shade the missing label if desired.
if self.shade_missing:
self._shade_missing_label()
# Try our luck with the next label.
self._next_label()
missing = self._used.get(self.page_count, set())
# Increment the count now we have found a suitable position.
self.label_count += 1
|
python
|
{
"resource": ""
}
|
q7639
|
Sheet._calculate_edges
|
train
|
def _calculate_edges(self):
"""Calculate edges of the current label. Not intended for external use.
"""
# Calculate the left edge of the label.
left = self.specs.left_margin
left += (self.specs.label_width * (self._position[1] - 1))
if self.specs.column_gap:
left += (self.specs.column_gap * (self._position[1] - 1))
left *= mm
# And the bottom.
bottom = self.specs.sheet_height - self.specs.top_margin
bottom -= (self.specs.label_height * self._position[0])
if self.specs.row_gap:
bottom -= (self.specs.row_gap * (self._position[0] - 1))
bottom *= mm
# Done.
return float(left), float(bottom)
|
python
|
{
"resource": ""
}
|
q7640
|
Sheet._shade_missing_label
|
train
|
def _shade_missing_label(self):
"""Helper method to shade a missing label. Not intended for external use.
"""
# Start a drawing for the whole label.
label = Drawing(float(self._lw), float(self._lh))
label.add(self._clip_label)
# Fill with a rectangle; the clipping path will take care of the borders.
r = shapes.Rect(0, 0, float(self._lw), float(self._lh))
r.fillColor = self.shade_missing
r.strokeColor = None
label.add(r)
# Add the label to the page.
label.shift(*self._calculate_edges())
self._current_page.add(label)
|
python
|
{
"resource": ""
}
|
q7641
|
Sheet._shade_remaining_missing
|
train
|
def _shade_remaining_missing(self):
"""Helper method to shade any missing labels remaining on the current
page. Not intended for external use.
Note that this will modify the internal _position attribute and should
therefore only be used once all the 'real' labels have been drawn.
"""
# Sanity check.
if not self.shade_missing:
return
# Run through each missing label left in the current page and shade it.
missing = self._used.get(self.page_count, set())
for position in missing:
self._position = position
self._shade_missing_label()
|
python
|
{
"resource": ""
}
|
q7642
|
Sheet._draw_label
|
train
|
def _draw_label(self, obj, count):
"""Helper method to draw on the current label. Not intended for external use.
"""
# Start a drawing for the whole label.
label = Drawing(float(self._lw), float(self._lh))
label.add(self._clip_label)
# And one for the available area (i.e., after padding).
available = Drawing(float(self._dw), float(self._dh))
available.add(self._clip_drawing)
# Call the drawing function.
self.drawing_callable(available, float(self._dw), float(self._dh), obj)
# Render the contents on the label.
available.shift(float(self._lp), float(self._bp))
label.add(available)
# Draw the border if requested.
if self.border:
label.add(self._border)
# Add however many copies we need to.
for i in range(count):
# Find the next available label.
self._next_unused_label()
# Have we been told to skip this page?
if self.pages_to_draw and self.page_count not in self.pages_to_draw:
continue
# Add the label to the page. ReportLab stores the added drawing by
# reference so we have to copy it N times.
thislabel = copy(label)
thislabel.shift(*self._calculate_edges())
self._current_page.add(thislabel)
|
python
|
{
"resource": ""
}
|
q7643
|
Sheet.add_labels
|
train
|
def add_labels(self, objects, count=1):
"""Add multiple labels to the sheet.
Parameters
----------
objects: iterable
An iterable of the objects to add. Each of these will be passed to
the add_label method. Note that if this is a generator it will be
consumed.
count: positive integer or iterable of positive integers, default 1
The number of copies of each label to add. If a single integer,
that many copies of every label are added. If an iterable, then
each value specifies how many copies of the corresponding label to
add. The iterables are advanced in parallel until one is exhausted;
extra values in the other one are ignored. This means that if there
are fewer count entries than objects, the objects corresponding to
the missing counts will not be added to the sheet.
Note that if this is a generator it will be consumed. Also note
that the drawing function will only be called once for each label
and the results copied for the repeats. If the drawing function
maintains any state internally then using this parameter may break
it.
"""
# If we can convert it to an int, do so and use the itertools.repeat()
# method to create an infinite iterator from it. Otherwise, assume it
# is an iterable or sequence.
try:
count = int(count)
except TypeError:
pass
else:
count = repeat(count)
# If it is not an iterable (e.g., a list or range object),
# create an iterator over it.
if not hasattr(count, 'next') and not hasattr(count, '__next__'):
count = iter(count)
# Go through the objects.
for obj in objects:
# Check we have a count for this one.
try:
thiscount = next(count)
except StopIteration:
break
# Draw it.
self._draw_label(obj, thiscount)
|
python
|
{
"resource": ""
}
|
q7644
|
Sheet.save
|
train
|
def save(self, filelike):
"""Save the file as a PDF.
Parameters
----------
filelike: path or file-like object
The filename or file-like object to save the labels under. Any
existing contents will be overwritten.
"""
# Shade any remaining missing labels if desired.
self._shade_remaining_missing()
# Create a canvas.
canvas = Canvas(filelike, pagesize=self._pagesize)
# Render each created page onto the canvas.
for page in self._pages:
renderPDF.draw(page, canvas, 0, 0)
canvas.showPage()
# Done.
canvas.save()
|
python
|
{
"resource": ""
}
|
q7645
|
Sheet.preview
|
train
|
def preview(self, page, filelike, format='png', dpi=72, background_colour=0xFFFFFF):
"""Render a preview image of a page.
Parameters
----------
page: positive integer
Which page to render. Must be in the range [1, page_count]
filelike: path or file-like object
Can be a filename as a string, a Python file object, or something
which behaves like a Python file object. For example, if you were
using the Django web framework, an HttpResponse object could be
passed to render the preview to the browser (as long as you remember
to set the mimetype of the response). If you pass a filename, the
existing contents will be overwritten.
format: string
The image format to use for the preview. ReportLab uses the Python
Imaging Library (PIL) internally, so any PIL format should be
supported.
dpi: positive real
The dots-per-inch to use when rendering.
background_colour: Hex colour specification
What color background to use.
Notes
-----
If you are creating this sheet for a preview only, you can pass the
pages_to_draw parameter to the constructor to avoid the drawing function
being called for all the labels on pages you'll never look at. If you
preview a page you did not tell the sheet to draw, you will get a blank
image.
Raises
------
ValueError:
If the page number is not valid.
"""
# Check the page number.
if page < 1 or page > self.page_count:
raise ValueError("Invalid page number; should be between 1 and {0:d}.".format(self.page_count))
# Shade any remaining missing labels if desired.
self._shade_remaining_missing()
# Rendering to an image (as opposed to a PDF) requires any background
# to have an integer width and height if it is a ReportLab Image
# object. Drawing objects are exempt from this.
oldw, oldh = None, None
if isinstance(self._bgimage, Image):
oldw, oldh = self._bgimage.width, self._bgimage.height
self._bgimage.width = int(oldw) + 1
self._bgimage.height = int(oldh) + 1
# Let ReportLab do the heavy lifting.
renderPM.drawToFile(self._pages[page-1], filelike, format, dpi, background_colour)
# Restore the size of the background image if we changed it.
if oldw:
self._bgimage.width = oldw
self._bgimage.height = oldh
|
python
|
{
"resource": ""
}
|
q7646
|
Sheet.preview_string
|
train
|
def preview_string(self, page, format='png', dpi=72, background_colour=0xFFFFFF):
"""Render a preview image of a page as a string.
Parameters
----------
page: positive integer
Which page to render. Must be in the range [1, page_count]
format: string
The image format to use for the preview. ReportLab uses the Python
Imaging Library (PIL) internally, so any PIL format should be
supported.
dpi: positive real
The dots-per-inch to use when rendering.
background_colour: Hex colour specification
What color background to use.
Notes
-----
If you are creating this sheet for a preview only, you can pass the
pages_to_draw parameter to the constructor to avoid the drawing function
being called for all the labels on pages you'll never look at. If you
preview a page you did not tell the sheet to draw, you will get a blank
image.
Raises
------
ValueError:
If the page number is not valid.
"""
# Check the page number.
if page < 1 or page > self.page_count:
raise ValueError("Invalid page number; should be between 1 and {0:d}.".format(self.page_count))
# Shade any remaining missing labels if desired.
self._shade_remaining_missing()
# Rendering to an image (as opposed to a PDF) requires any background
# to have an integer width and height if it is a ReportLab Image
# object. Drawing objects are exempt from this.
oldw, oldh = None, None
if isinstance(self._bgimage, Image):
oldw, oldh = self._bgimage.width, self._bgimage.height
self._bgimage.width = int(oldw) + 1
self._bgimage.height = int(oldh) + 1
# Let ReportLab do the heavy lifting.
s = renderPM.drawToString(self._pages[page-1], format, dpi, background_colour)
# Restore the size of the background image if we changed it.
if oldw:
self._bgimage.width = oldw
self._bgimage.height = oldh
# Done.
return s
|
python
|
{
"resource": ""
}
|
q7647
|
Specification.bounding_boxes
|
train
|
def bounding_boxes(self, mode='fraction', output='dict'):
"""Get the bounding boxes of the labels on a page.
Parameters
----------
mode: 'fraction', 'actual'
If 'fraction', the bounding boxes are expressed as a fraction of the
height and width of the sheet. If 'actual', they are the actual
position of the labels in millimetres from the top-left of the
sheet.
output: 'dict', 'json'
If 'dict', a dictionary with label identifier tuples (row, column)
as keys and a dictionary with 'left', 'right', 'top', and 'bottom'
entries as the values.
If 'json', a JSON encoded string which represents a dictionary with
keys of the string format 'rowxcolumn' and each value being a
bounding box dictionary with 'left', 'right', 'top', and 'bottom'
entries.
Returns
-------
The bounding boxes in the format set by the output parameter.
"""
boxes = {}
# Check the parameters.
if mode not in ('fraction', 'actual'):
raise ValueError("Unknown mode {0}.".format(mode))
if output not in ('dict', 'json'):
raise ValueError("Unknown output {0}.".format(output))
# Iterate over the rows.
for row in range(1, self.rows + 1):
# Top and bottom of all labels in the row.
top = self.top_margin + ((row - 1) * (self.label_height + self.row_gap))
bottom = top + self.label_height
# Now iterate over all columns in this row.
for column in range(1, self.columns + 1):
# Left and right position of this column.
left = self.left_margin + ((column - 1) * (self.label_width + self.column_gap))
right = left + self.label_width
# Output in the appropriate mode format.
if mode == 'fraction':
box = {
'top': top / self.sheet_height,
'bottom': bottom / self.sheet_height,
'left': left / self.sheet_width,
'right': right / self.sheet_width,
}
elif mode == 'actual':
box = {'top': top, 'bottom': bottom, 'left': left, 'right': right}
# Add to the collection.
if output == 'json':
boxes['{0:d}x{1:d}'.format(row, column)] = box
box['top'] = float(box['top'])
box['bottom'] = float(box['bottom'])
box['left'] = float(box['left'])
box['right'] = float(box['right'])
else:
boxes[(row, column)] = box
# Done.
if output == 'json':
return json.dumps(boxes)
return boxes
|
python
|
{
"resource": ""
}
|
q7648
|
templatesCollector
|
train
|
def templatesCollector(text, open, close):
"""leaves related articles and wikitables in place"""
others = []
spans = [i for i in findBalanced(text, open, close)]
spanscopy = copy(spans)
for i in range(len(spans)):
start, end = spans[i]
o = text[start:end]
ol = o.lower()
if 'vaata|' in ol or 'wikitable' in ol:
spanscopy.remove(spans[i])
continue
others.append(o)
text = dropSpans(spanscopy, text)
return text, others
|
python
|
{
"resource": ""
}
|
q7649
|
assert_legal_arguments
|
train
|
def assert_legal_arguments(kwargs):
"""Assert that PrettyPrinter arguments are correct.
Raises
------
ValueError
In case there are unknown arguments or a single layer is mapped to more than one aesthetic.
"""
seen_layers = set()
for k, v in kwargs.items():
if k not in LEGAL_ARGUMENTS:
raise ValueError('Illegal argument <{0}>!'.format(k))
if k in AESTHETICS:
if v in seen_layers:
raise ValueError('Layer <{0}> mapped for more than a single aesthetic!'.format(v))
seen_layers.add(v)
if k in VALUES:
if not isinstance(v, six.string_types) and not isinstance(v, list):
raise ValueError('Value <{0}> must be either string or list'.format(k))
if isinstance(v, list):
if len(v) == 0:
raise ValueError('Rules cannot be empty list')
for rule_matcher, rule_value in v:
if not isinstance(rule_matcher, six.string_types) or not isinstance(rule_value, six.string_types):
raise ValueError('Rule tuple elements must be strings')
|
python
|
{
"resource": ""
}
|
q7650
|
parse_arguments
|
train
|
def parse_arguments(kwargs):
"""Function that parses PrettyPrinter arguments.
Detects which aesthetics are mapped to which layers
and collects user-provided values.
Parameters
----------
kwargs: dict
The keyword arguments to PrettyPrinter.
Returns
-------
dict, dict
First dictionary is aesthetic to layer mapping.
Second dictionary is aesthetic to user value mapping.
"""
aesthetics = {}
values = {}
for aes in AESTHETICS:
if aes in kwargs:
aesthetics[aes] = kwargs[aes]
val_name = AES_VALUE_MAP[aes]
# map the user-provided CSS value or use the default
values[aes] = kwargs.get(val_name, DEFAULT_VALUE_MAP[aes])
return aesthetics, values
|
python
|
{
"resource": ""
}
|
q7651
|
PrettyPrinter.render
|
train
|
def render(self, text, add_header=False):
"""Render the HTML.
Parameters
----------
add_header: boolean (default: False)
If True, add HTML5 header and footer.
Returns
-------
str
The rendered HTML.
"""
html = mark_text(text, self.aesthetics, self.rules)
html = html.replace('\n', '<br/>')
if add_header:
html = '\n'.join([HEADER, self.css, MIDDLE, html, FOOTER])
#print('\n'.join((HEADER, self.css, MIDDLE, html, FOOTER)))
return html
|
python
|
{
"resource": ""
}
|
q7652
|
Trainer.train
|
train
|
def train(self, nerdocs, mode_filename):
"""Train a CRF model using given documents.
Parameters
----------
nerdocs: list of estnltk.estner.ner.Document.
The documents for model training.
mode_filename: str
The fielname where to save the model.
"""
trainer = pycrfsuite.Trainer(algorithm=self.algorithm,
params={'c2': self.c2},
verbose=self.verbose)
for doc in nerdocs:
for snt in doc.sentences:
xseq = [t.feature_list() for t in snt]
yseq = [t.label for t in snt]
trainer.append(xseq, yseq)
trainer.train(mode_filename)
|
python
|
{
"resource": ""
}
|
q7653
|
json_2_text
|
train
|
def json_2_text(inp, out, verbose = False):
"""Convert a Wikipedia article to Text object.
Concatenates the sections in wikipedia file and rearranges other information so it
can be interpreted as a Text object.
Links and other elements with start and end positions are annotated
as layers.
Parameters
----------
inp: directory of parsed et.wikipedia articles in json format
out: output directory of .txt files
verbose: if True, prints every article title and total count of converted files
if False prints every 50th count
Returns
-------
estnltk.text.Text
The Text object.
"""
for root, dirs, filenames in os.walk(inp):
for f in filenames:
log = codecs.open(os.path.join(root, f), 'r')
j_obj = json.load(log)
j_obj = json_format(j_obj)
#not needed, cause the json_format takes care of the right structuring
#text = Text(j_obj)
textWriter(j_obj, out, verbose)
|
python
|
{
"resource": ""
}
|
q7654
|
concatenate_matches
|
train
|
def concatenate_matches(a, b, text, name):
"""Concatenate matches a and b.
All submatches will be copied to result."""
match = Match(a.start, b.end, text[a.start:b.end], name)
for k, v in a.matches.items():
match.matches[k] = v
for k, v in b.matches.items():
match.matches[k] = v
if a.name is not None:
aa = copy(a)
del aa[MATCHES]
match.matches[a.name] = aa
if b.name is not None:
bb = copy(b)
del bb[MATCHES]
match.matches[b.name] = bb
return match
|
python
|
{
"resource": ""
}
|
q7655
|
Match.dict
|
train
|
def dict(self):
"""Dictionary representing this match and all child symbol matches."""
res = copy(self)
if MATCHES in res:
del res[MATCHES]
if NAME in res:
del res[NAME]
res = {self.name: res}
for k, v in self.matches.items():
res[k] = v
if NAME in res[k]:
del res[k][NAME]
return res
|
python
|
{
"resource": ""
}
|
q7656
|
regex_from_markers
|
train
|
def regex_from_markers(markers):
"""Given a string of characters, construct a regex that matches them.
Parameters
----------
markers: str
The list of string containing the markers
Returns
-------
regex
The regular expression matching the given markers.
"""
return re.compile('|'.join([re.escape(c) for c in markers]))
|
python
|
{
"resource": ""
}
|
q7657
|
convert
|
train
|
def convert(word):
"""This method converts given `word` to UTF-8 encoding and `bytes` type for the
SWIG wrapper."""
if six.PY2:
if isinstance(word, unicode):
return word.encode('utf-8')
else:
return word.decode('utf-8').encode('utf-8') # make sure it is real utf8, otherwise complain
else: # ==> Py3
if isinstance(word, bytes):
return word.decode('utf-8') # bytes must be in utf8
return word
|
python
|
{
"resource": ""
}
|
q7658
|
postprocess_result
|
train
|
def postprocess_result(morphresult, trim_phonetic, trim_compound):
"""Postprocess vabamorf wrapper output."""
word, analysis = morphresult
return {
'text': deconvert(word),
'analysis': [postprocess_analysis(a, trim_phonetic, trim_compound) for a in analysis]
}
|
python
|
{
"resource": ""
}
|
q7659
|
trim_phonetics
|
train
|
def trim_phonetics(root):
"""Function that trims phonetic markup from the root.
Parameters
----------
root: str
The string to remove the phonetic markup.
Returns
-------
str
The string with phonetic markup removed.
"""
global phonetic_markers
global phonetic_regex
if root in phonetic_markers:
return root
else:
return phonetic_regex.sub('', root)
|
python
|
{
"resource": ""
}
|
q7660
|
get_root
|
train
|
def get_root(root, phonetic, compound):
"""Get the root form without markers.
Parameters
----------
root: str
The word root form.
phonetic: boolean
If True, add phonetic information to the root forms.
compound: boolean
if True, add compound word markers to root forms.
"""
global compound_regex
if not phonetic:
root = trim_phonetics(root)
if not compound:
root = trim_compounds(root)
return root
|
python
|
{
"resource": ""
}
|
q7661
|
Vabamorf.instance
|
train
|
def instance():
"""Return an PyVabamorf instance.
It returns the previously initialized instance or creates a new
one if nothing exists. Also creates new instance in case the
process has been forked.
"""
if not hasattr(Vabamorf, 'pid') or Vabamorf.pid != os.getpid():
Vabamorf.pid = os.getpid()
Vabamorf.morf = Vabamorf()
return Vabamorf.morf
|
python
|
{
"resource": ""
}
|
q7662
|
Vabamorf.analyze
|
train
|
def analyze(self, words, **kwargs):
"""Perform morphological analysis and disambiguation of given text.
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
disambiguate: boolean (default: True)
Disambiguate the output and remove incosistent analysis.
guess: boolean (default: True)
Use guessing in case of unknown words
propername: boolean (default: True)
Perform additional analysis of proper names.
compound: boolean (default: True)
Add compound word markers to root forms.
phonetic: boolean (default: False)
Add phonetic information to root forms.
Returns
-------
list of (list of dict)
List of analysis for each word in input.
"""
# if input is a string, then tokenize it
if isinstance(words, six.string_types):
words = words.split()
# convert words to native strings
words = [convert(w) for w in words]
morfresults = self._morf.analyze(
vm.StringVector(words),
kwargs.get('disambiguate', True),
kwargs.get('guess', True),
True, # phonetic and compound information
kwargs.get('propername', True))
trim_phonetic = kwargs.get('phonetic', False)
trim_compound = kwargs.get('compound', True)
return [postprocess_result(mr, trim_phonetic, trim_compound) for mr in morfresults]
|
python
|
{
"resource": ""
}
|
q7663
|
Vabamorf.disambiguate
|
train
|
def disambiguate(self, words):
"""Disambiguate previously analyzed words.
Parameters
----------
words: list of dict
A sentence of words.
Returns
-------
list of dict
Sentence of disambiguated words.
"""
words = vm.SentenceAnalysis([as_wordanalysis(w) for w in words])
disambiguated = self._morf.disambiguate(words)
return [postprocess_result(mr, False, True) for mr in disambiguated]
|
python
|
{
"resource": ""
}
|
q7664
|
Vabamorf.spellcheck
|
train
|
def spellcheck(self, words, suggestions=True):
"""Spellcheck given sentence.
Note that spellchecker does not respect pre-tokenized words and concatenates
token sequences such as "New York".
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
suggestions: boolean (default: True)
Add spell suggestions to result.
Returns
-------
list of dict
Each dictionary contains following values:
'word': the original word
'spelling': True, if the word was spelled correctly
'suggestions': list of suggested strings in case of incorrect spelling
"""
if isinstance(words, six.string_types):
words = words.split()
# convert words to native strings
words = [convert(w) for w in words]
spellresults = self._morf.spellcheck(words, suggestions)
results = []
for spellresult in spellresults:
suggestions = [deconvert(s) for s in spellresult.suggestions]
result = {
'text': deconvert(spellresult.word),
'spelling': spellresult.spelling,
'suggestions': suggestions
}
results.append(result)
return results
|
python
|
{
"resource": ""
}
|
q7665
|
ClauseSegmenter.annotate_indices
|
train
|
def annotate_indices(self, sentence):
"""Add clause indexes to already annotated sentence."""
max_index = 0
max_depth = 1
stack_of_indexes = [ max_index ]
for token in sentence:
if CLAUSE_ANNOT not in token:
token[CLAUSE_IDX] = stack_of_indexes[-1]
else:
# Alustavad märgendused
for annotation in token[CLAUSE_ANNOT]:
if annotation == "KIILU_ALGUS":
# Liigume sügavamale, alustame järgmist kiilu
max_index += 1
stack_of_indexes.append(max_index)
if (len(stack_of_indexes) > max_depth):
max_depth = len(stack_of_indexes)
token[CLAUSE_IDX] = stack_of_indexes[-1]
# Lõpetavad märgendused
for annotation in token[CLAUSE_ANNOT]:
if annotation == "KINDEL_PIIR":
# Liigume edasi samal tasandil, alustame järgmist osalauset
max_index += 1
stack_of_indexes[-1] = max_index
elif annotation == "KIILU_LOPP":
# Taandume sügavusest, sulgeme ühe kiilu
stack_of_indexes.pop()
return sentence
|
python
|
{
"resource": ""
}
|
q7666
|
ClauseSegmenter.rename_annotations
|
train
|
def rename_annotations(self, sentence):
"""Function that renames and restructures clause information."""
annotations = []
for token in sentence:
data = {CLAUSE_IDX: token[CLAUSE_IDX]}
if CLAUSE_ANNOT in token:
if 'KINDEL_PIIR' in token[CLAUSE_ANNOT]:
data[CLAUSE_ANNOTATION] = CLAUSE_BOUNDARY
elif 'KIILU_ALGUS' in token[CLAUSE_ANNOT]:
data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_START
elif 'KIILU_LOPP' in token[CLAUSE_ANNOT]:
data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_END
annotations.append(data)
return annotations
|
python
|
{
"resource": ""
}
|
q7667
|
train_default_model
|
train
|
def train_default_model():
"""Function for training the default NER model.
NB! It overwrites the default model, so do not use it unless
you know what are you doing.
The training data is in file estnltk/corpora/estner.json.bz2 .
The resulting model will be saved to estnltk/estner/models/default.bin
"""
docs = read_json_corpus(DEFAULT_NER_DATASET)
trainer = NerTrainer(default_nersettings)
trainer.train(docs, DEFAULT_NER_MODEL_DIR)
|
python
|
{
"resource": ""
}
|
q7668
|
_get_synset_offsets
|
train
|
def _get_synset_offsets(synset_idxes):
"""Returs pointer offset in the WordNet file for every synset index.
Notes
-----
Internal function. Do not call directly.
Preserves order -- for [x,y,z] returns [offset(x),offset(y),offset(z)].
Parameters
----------
synset_idxes : list of ints
Lists synset IDs, which need offset.
Returns
-------
list of ints
Lists pointer offsets in Wordnet file.
"""
offsets = {}
current_seeked_offset_idx = 0
ordered_synset_idxes = sorted(synset_idxes)
with codecs.open(_SOI,'rb', 'utf-8') as fin:
for line in fin:
split_line = line.split(':')
while current_seeked_offset_idx < len(ordered_synset_idxes) and split_line[0] == str(ordered_synset_idxes[current_seeked_offset_idx]):
# Looping on single line entries in case synset_indexes contains duplicates.
offsets[synset_idxes[current_seeked_offset_idx]] = int(split_line[1])
current_seeked_offset_idx += 1
if current_seeked_offset_idx >= len(synset_idxes):
break
return [offsets[synset_idx] for synset_idx in synset_idxes]
|
python
|
{
"resource": ""
}
|
q7669
|
_get_synsets
|
train
|
def _get_synsets(synset_offsets):
"""Given synset offsets in the WordNet file, parses synset object for every offset.
Notes
-----
Internal function. Do not call directly.
Stores every parsed synset into global synset dictionary under two keys:
synset's name lemma.pos.sense_no and synset's id (unique integer).
Parameters
----------
synset_offsets : list of ints
Lists pointer offsets from which synset objects will be parsed.
Returns
-------
list of Synsets
Lists synset objects which synset_offsets point to.
"""
global parser
if parser is None:
parser = Parser(_WN_FILE)
synsets = []
for offset in synset_offsets:
raw_synset = parser.parse_synset(offset)
synset = Synset(raw_synset)
SYNSETS_DICT[_get_key_from_raw_synset(raw_synset)] = synset
SYNSETS_DICT[synset.id] = synset
synsets.append(synset)
return synsets
|
python
|
{
"resource": ""
}
|
q7670
|
_get_key_from_raw_synset
|
train
|
def _get_key_from_raw_synset(raw_synset):
"""Derives synset key in the form of `lemma.pos.sense_no` from the provided eurown.py Synset class,
Notes
-----
Internal function. Do not call directly.
Parameters
----------
raw_synset : eurown.Synset
Synset representation from which lemma, part-of-speech and sense is derived.
Returns
-------
string
Key of the synset in the form of `lemma.pos.sense_no`.
"""
pos = raw_synset.pos
literal = raw_synset.variants[0].literal
sense = "%02d"%raw_synset.variants[0].sense
return '.'.join([literal,pos,sense])
|
python
|
{
"resource": ""
}
|
q7671
|
synset
|
train
|
def synset(synset_key):
"""Returns synset object with the provided key.
Notes
-----
Uses lazy initialization - synsets will be fetched from a dictionary after the first request.
Parameters
----------
synset_key : string
Unique synset identifier in the form of `lemma.pos.sense_no`.
Returns
-------
Synset
Synset with key `synset_key`.
None, if no match was found.
"""
if synset_key in SYNSETS_DICT:
return SYNSETS_DICT[synset_key]
def _get_synset_idx(synset_key):
"""Returns synset index for the provided key.
Note
----
Internal function. Do not call directly.
"""
with codecs.open(_SENSE_FILE,'rb', 'utf-8') as fin:
for line in fin:
split_line = line.split(':')
if split_line[0] == synset_key:
return int(split_line[1].strip())
return None
synset_idx = _get_synset_idx(synset_key)
if synset_idx == None:
return None
synset_offset = _get_synset_offsets([synset_idx])
synset = _get_synsets(synset_offset)
return synset[0]
|
python
|
{
"resource": ""
}
|
q7672
|
synsets
|
train
|
def synsets(lemma,pos=None):
"""Returns all synset objects which have lemma as one of the variant literals and fixed pos, if provided.
Notes
-----
Uses lazy initialization - parses only those synsets which are not yet initialized, others are fetched from a dictionary.
Parameters
----------
lemma : str
Lemma of the synset.
pos : str, optional
Part-of-speech specification of the searched synsets, defaults to None.
Returns
-------
list of Synsets
Synsets which contain `lemma` and of which part-of-speech is `pos`, if specified.
Empty list, if no match was found.
"""
def _get_synset_idxes(lemma,pos):
line_prefix_regexp = "%s:%s:(.*)"%(lemma,pos if pos else "\w+")
line_prefix = re.compile(line_prefix_regexp)
idxes = []
with codecs.open(_LIT_POS_FILE,'rb', 'utf-8') as fin:
for line in fin:
result = line_prefix.match(line)
if result:
res_indices = [int(x) for x in result.group(1).split(' ')]
idxes.extend(res_indices)
LEM_POS_2_SS_IDX[lemma][pos].extend(idxes)
return sorted(idxes)
synset_idxes = None
if lemma in LEM_POS_2_SS_IDX:
if pos in LEM_POS_2_SS_IDX[lemma]:
synset_idxes = LEM_POS_2_SS_IDX[lemma][pos]
else:
synset_idxes = [idx for pos in LEM_POS_2_SS_IDX[lemma] for idx in LEM_POS_2_SS_IDX[lemma][pos]]
if not synset_idxes:
synset_idxes = _get_synset_idxes(lemma,pos)
if len(synset_idxes) == 0:
return []
stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len(synset_idxes)) if synset_idxes[i] in SYNSETS_DICT]
unstored_synset_idxes = [synset_idxes[i] for i in range(len(synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT]
synset_offsets = _get_synset_offsets(unstored_synset_idxes)
synsets = _get_synsets(synset_offsets)
return stored_synsets + synsets
|
python
|
{
"resource": ""
}
|
q7673
|
all_synsets
|
train
|
def all_synsets(pos=None):
"""Return all the synsets which have the provided pos.
Notes
-----
Returns thousands or tens of thousands of synsets - first time will take significant time.
Useful for initializing synsets as each returned synset is also stored in a global dictionary for fast retrieval the next time.
Parameters
----------
pos : str
Part-of-speech of the sought synsets. Sensible alternatives are wn.ADJ, wn.ADV, wn.VERB, wn.NOUN and `*`.
If pos == `*`, all the synsets are retrieved and initialized for fast retrieval the next time.
Returns
-------
list of Synsets
Lists the Synsets which have `pos` as part-of-speech.
Empty list, if `pos` not in [wn.ADJ, wn.ADV, wn.VERB, wn.NOUN, `*`].
"""
def _get_unique_synset_idxes(pos):
idxes = []
with codecs.open(_LIT_POS_FILE,'rb', 'utf-8') as fin:
if pos == None:
for line in fin:
split_line = line.strip().split(':')
idxes.extend([int(x) for x in split_line[2].split()])
else:
for line in fin:
split_line = line.strip().split(':')
if split_line[1] == pos:
idxes.extend([int(x) for x in split_line[2].split()])
idxes = list(set(idxes))
idxes.sort()
return idxes
if pos in LOADED_POS:
return [SYNSETS_DICT[idx] for lemma in LEM_POS_2_SS_IDX for idx in LEM_POS_2_SS_IDX[lemma][pos]]
else:
synset_idxes = _get_unique_synset_idxes(pos)
if len(synset_idxes) == 0:
return []
stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len(synset_idxes)) if synset_idxes[i] in SYNSETS_DICT]
unstored_synset_idxes = [synset_idxes[i] for i in range(len(synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT]
synset_offsets = _get_synset_offsets(unstored_synset_idxes)
synsets = _get_synsets(synset_offsets)
for synset in synsets:
for variant in synset.get_variants():
LEM_POS_2_SS_IDX[variant.literal][synset.pos].append(synset.id)
LOADED_POS.add(pos)
return stored_synsets + synsets
|
python
|
{
"resource": ""
}
|
q7674
|
lemma
|
train
|
def lemma(lemma_key):
"""Returns the Lemma object with the given key.
Parameters
----------
lemma_key : str
Key of the returned lemma.
Returns
-------
Lemma
Lemma matching the `lemma_key`.
"""
if lemma_key in LEMMAS_DICT:
return LEMMAS_DICT[lemma_key]
split_lemma_key = lemma_key.split('.')
synset_key = '.'.join(split_lemma_key[:3])
lemma_literal = split_lemma_key[3]
lemma_obj = Lemma(synset_key,lemma_literal)
LEMMAS_DICT[lemma_key] = lemma_obj
return lemma_obj
|
python
|
{
"resource": ""
}
|
q7675
|
lemmas
|
train
|
def lemmas(lemma,pos=None):
"""Returns all the Lemma objects of which name is `lemma` and which have `pos` as part
of speech.
Parameters
----------
lemma : str
Literal of the sought Lemma objects.
pos : str, optional
Part of speech of the sought Lemma objects. If None, matches any part of speech.
Defaults to None
Returns
-------
list of Lemmas
Lists all the matched Lemmas.
"""
lemma = lemma.lower()
return [lemma_obj
for synset in synsets(lemma,pos)
for lemma_obj in synset.lemmas()
if lemma_obj.name.lower() == lemma]
|
python
|
{
"resource": ""
}
|
q7676
|
Synset._recursive_hypernyms
|
train
|
def _recursive_hypernyms(self, hypernyms):
"""Finds all the hypernyms of the synset transitively.
Notes
-----
Internal method. Do not call directly.
Parameters
----------
hypernyms : set of Synsets
An set of hypernyms met so far.
Returns
-------
set of Synsets
Returns the input set.
"""
hypernyms |= set(self.hypernyms())
for synset in self.hypernyms():
hypernyms |= synset._recursive_hypernyms(hypernyms)
return hypernyms
|
python
|
{
"resource": ""
}
|
q7677
|
Synset._min_depth
|
train
|
def _min_depth(self):
"""Finds minimum path length from the root.
Notes
-----
Internal method. Do not call directly.
Returns
-------
int
Minimum path length from the root.
"""
if "min_depth" in self.__dict__:
return self.__dict__["min_depth"]
min_depth = 0
hypernyms = self.hypernyms()
if hypernyms:
min_depth = 1 + min(h._min_depth() for h in hypernyms)
self.__dict__["min_depth"] = min_depth
return min_depth
|
python
|
{
"resource": ""
}
|
q7678
|
Synset.get_related_synsets
|
train
|
def get_related_synsets(self,relation):
"""Retrieves all the synsets which are related by given relation.
Parameters
----------
relation : str
Name of the relation via which the sought synsets are linked.
Returns
-------
list of Synsets
Synsets which are related via `relation`.
"""
results = []
for relation_candidate in self._raw_synset.internalLinks:
if relation_candidate.name == relation:
linked_synset = synset(_get_key_from_raw_synset(relation_candidate.target_concept))
relation_candidate.target_concept = linked_synset._raw_synset
results.append(linked_synset)
return results
|
python
|
{
"resource": ""
}
|
q7679
|
Synset.closure
|
train
|
def closure(self, relation, depth=float('inf')):
"""Finds all the ancestors of the synset using provided relation.
Parameters
----------
relation : str
Name of the relation which is recursively used to fetch the ancestors.
Returns
-------
list of Synsets
Returns the ancestors of the synset via given relations.
"""
ancestors = []
unvisited_ancestors = [(synset,1) for synset in self.get_related_synsets(relation)]
while len(unvisited_ancestors) > 0:
ancestor_depth = unvisited_ancestors.pop()
if ancestor_depth[1] > depth:
continue
unvisited_ancestors.extend([(synset,ancestor_depth[1]+1) for synset in ancestor_depth[0].get_related_synsets(relation)])
ancestors.append(ancestor_depth[0])
return list(set(ancestors))
|
python
|
{
"resource": ""
}
|
q7680
|
Synset.lch_similarity
|
train
|
def lch_similarity(self, synset):
"""Calculates Leacock and Chodorow's similarity between the two synsets.
Notes
-----
Similarity is calculated using the formula -log( (dist(synset1,synset2)+1) / (2*maximum taxonomy depth) ).
Parameters
----------
synset : Synset
Synset from which the similarity is calculated.
Returns
-------
float
Leacock and Chodorow's from `synset`.
None, if synsets are not connected via hypernymy/hyponymy relations. Obvious, if part-of-speeches don't match.
"""
if self._raw_synset.pos != synset._raw_synset.pos:
return None
depth = MAX_TAXONOMY_DEPTHS[self._raw_synset.pos]
distance = self._shortest_path_distance(synset)
if distance >= 0:
return -math.log((distance + 1) / (2.0 * depth))
else:
return None
|
python
|
{
"resource": ""
}
|
q7681
|
SyntaxPreprocessing.process_vm_json
|
train
|
def process_vm_json( self, json_dict, **kwargs ):
''' Executes the preprocessing pipeline on vabamorf's JSON, given as a dict;
Returns a list: lines of analyses in the VISL CG3 input format;
'''
mrf_lines = convert_vm_json_to_mrf( json_dict )
return self.process_mrf_lines( mrf_lines, **kwargs )
|
python
|
{
"resource": ""
}
|
q7682
|
SyntaxPreprocessing.process_Text
|
train
|
def process_Text( self, text, **kwargs ):
''' Executes the preprocessing pipeline on estnltk's Text object.
Returns a list: lines of analyses in the VISL CG3 input format;
'''
mrf_lines = convert_Text_to_mrf( text )
return self.process_mrf_lines( mrf_lines, **kwargs )
|
python
|
{
"resource": ""
}
|
q7683
|
SyntaxPreprocessing.process_mrf_lines
|
train
|
def process_mrf_lines( self, mrf_lines, **kwargs ):
''' Executes the preprocessing pipeline on mrf_lines.
The input should be an analysis of the text in Filosoft's old mrf format;
Returns the input list, where elements (tokens/analyses) have been converted
into the new format;
'''
converted1 = convert_mrf_to_syntax_mrf( mrf_lines, self.fs_to_synt_rules )
converted2 = convert_pronouns( converted1 )
converted3 = remove_duplicate_analyses( converted2, allow_to_delete_all=self.allow_to_remove_all )
converted4 = add_hashtag_info( converted3 )
converted5 = tag_subcat_info( converted4, self.subcat_rules )
converted6 = remove_duplicate_analyses( converted5, allow_to_delete_all=self.allow_to_remove_all )
converted7 = convert_to_cg3_input( converted6 )
return converted7
|
python
|
{
"resource": ""
}
|
q7684
|
get_sources
|
train
|
def get_sources(src_dir='src', ending='.cpp'):
"""Function to get a list of files ending with `ending` in `src_dir`."""
return [os.path.join(src_dir, fnm) for fnm in os.listdir(src_dir) if fnm.endswith(ending)]
|
python
|
{
"resource": ""
}
|
q7685
|
TextCleaner.clean
|
train
|
def clean(self, text):
"""Remove all unwanted characters from text."""
return ''.join([c for c in text if c in self.alphabet])
|
python
|
{
"resource": ""
}
|
q7686
|
TextCleaner.invalid_characters
|
train
|
def invalid_characters(self, text):
"""Give simple list of invalid characters present in text."""
return ''.join(sorted(set([c for c in text if c not in self.alphabet])))
|
python
|
{
"resource": ""
}
|
q7687
|
TextCleaner.find_invalid_chars
|
train
|
def find_invalid_chars(self, text, context_size=20):
"""Find invalid characters in text and store information about
the findings.
Parameters
----------
context_size: int
How many characters to return as the context.
"""
result = defaultdict(list)
for idx, char in enumerate(text):
if char not in self.alphabet:
start = max(0, idx-context_size)
end = min(len(text), idx+context_size)
result[char].append(text[start:end])
return result
|
python
|
{
"resource": ""
}
|
q7688
|
TextCleaner.compute_report
|
train
|
def compute_report(self, texts, context_size=10):
"""Compute statistics of invalid characters on given texts.
Parameters
----------
texts: list of str
The texts to search for invalid characters.
context_size: int
How many characters to return as the context.
Returns
-------
dict of (char -> list of tuple (index, context))
Returns a dictionary, where keys are invalid characters.
Values are lists containign tuples with character indices
and context strings.
"""
result = defaultdict(list)
for text in texts:
for char, examples in self.find_invalid_chars(text, context_size).items():
result[char].extend(examples)
return result
|
python
|
{
"resource": ""
}
|
q7689
|
TextCleaner.report
|
train
|
def report(self, texts, n_examples=10, context_size=10, f=sys.stdout):
"""Compute statistics of invalid characters and print them.
Parameters
----------
texts: list of str
The texts to search for invalid characters.
n_examples: int
How many examples to display per invalid character.
context_size: int
How many characters to return as the context.
f: file
The file to print the report (default is sys.stdout)
"""
result = list(self.compute_report(texts, context_size).items())
result.sort(key=lambda x: (len(x[1]), x[0]), reverse=True)
s = 'Analyzed {0} texts.\n'.format(len(texts))
if (len(texts)) == 0:
f.write(s)
return
if len(result) > 0:
s += 'Invalid characters and their counts:\n'
for c, examples in result:
s += '"{0}"\t{1}\n'.format(c, len(examples))
s += '\n'
for c, examples in result:
s += 'For character "{0}", found {1} occurrences.\nExamples:\n'.format(c, len(examples))
examples = sample(examples, min(len(examples), n_examples))
for idx, example in enumerate(examples):
s += 'example {0}: {1}\n'.format(idx+1, example)
s += '\n'
f.write(s)
else:
f.write('All OK\n')
|
python
|
{
"resource": ""
}
|
q7690
|
__sort_analyses
|
train
|
def __sort_analyses(sentence):
''' Sorts analysis of all the words in the sentence.
This is required for consistency, because by default, analyses are
listed in arbitrary order; '''
for word in sentence:
if ANALYSIS not in word:
raise Exception( '(!) Error: no analysis found from word: '+str(word) )
else:
word[ANALYSIS] = sorted(word[ANALYSIS], \
key=lambda x : "_".join( [x[ROOT],x[POSTAG],x[FORM],x[CLITIC]] ))
return sentence
|
python
|
{
"resource": ""
}
|
q7691
|
augmentTextWithCONLLstr
|
train
|
def augmentTextWithCONLLstr( conll_str_array, text ):
''' Augments given Text object with the information from Maltparser's output.
More specifically, adds information about SYNTAX_LABEL, SYNTAX_HEAD and
DEPREL to each token in the Text object;
'''
j = 0
for sentence in text.divide( layer=WORDS, by=SENTENCES ):
sentence = __sort_analyses(sentence)
for i in range(len(sentence)):
estnltkToken = sentence[i]
maltparserToken = conll_str_array[j]
if len( maltparserToken ) > 1:
maltParserAnalysis = maltparserToken.split('\t')
if estnltkToken[TEXT] == maltParserAnalysis[1]:
# Fetch information about the syntactic relation:
estnltkToken[SYNTAX_LABEL] = maltParserAnalysis[0]
estnltkToken[SYNTAX_HEAD] = maltParserAnalysis[6]
# Fetch the name of the surface syntactic relation
estnltkToken[DEPREL] = maltParserAnalysis[7]
else:
raise Exception("A misalignment between Text and Maltparser's output: ",\
estnltkToken, maltparserToken )
j += 1
j += 1
|
python
|
{
"resource": ""
}
|
q7692
|
create_rules
|
train
|
def create_rules(aes, value):
"""Create a Rules instance for a single aesthetic value.
Parameter
---------
aes: str
The name of the aesthetic
value: str or list
The value associated with any aesthetic
"""
if isinstance(value, six.string_types):
return Rules(aes)
else:
rules = Rules()
for idx, (pattern, css_value) in enumerate(value):
rules.add_rule(pattern, '{0}_{1}'.format(aes, idx))
return rules
|
python
|
{
"resource": ""
}
|
q7693
|
Rules.add_rule
|
train
|
def add_rule(self, pattern, css_class):
"""Add a new rule.
Parameters
----------
pattern: str
Pattern that is compiled to a regular expression.
css_class: str
The class that will corresponds to given pattern.
"""
#print('adding rule <{0}> <{1}>'.format(pattern, css_class))
self.__patterns.append(re.compile(pattern, flags=re.U | re.M))
self.__css_classes.append(css_class)
|
python
|
{
"resource": ""
}
|
q7694
|
json_document_to_estner_document
|
train
|
def json_document_to_estner_document(jsondoc):
"""Convert an estnltk document to an estner document.
Parameters
----------
jsondoc: dict
Estnltk JSON-style document.
Returns
-------
estnltk.estner.ner.Document
A ner document.
"""
sentences = []
for json_sent in jsondoc.split_by_sentences():
snt = Sentence()
zipped = list(zip(
json_sent.word_texts,
json_sent.lemmas,
json_sent.root_tokens,
json_sent.forms,
json_sent.endings,
json_sent.postags))
json_toks = [{TEXT: text, LEMMA: lemma, ROOT_TOKENS: root_tokens, FORM: form, ENDING: ending, POSTAG: postag}
for text, lemma, root_tokens, form, ending, postag in zipped]
# add labels, if they are present
for tok, word in zip(json_toks, json_sent.words):
if LABEL in word:
tok[LABEL] = word[LABEL]
for json_tok in json_toks:
token = json_token_to_estner_token(json_tok)
snt.append(token)
if snt:
for i in range(1, len(snt)):
snt[i - 1].next = snt[i]
snt[i].prew = snt[i - 1]
sentences.append(snt)
return Document(sentences=sentences)
|
python
|
{
"resource": ""
}
|
q7695
|
json_token_to_estner_token
|
train
|
def json_token_to_estner_token(json_token):
"""Convert a JSON-style word token to an estner token.
Parameters
----------
vabamorf_token: dict
Vabamorf token representing a single word.
label: str
The label string.
Returns
-------
estnltk.estner.ner.Token
"""
token = Token()
word = json_token[TEXT]
lemma = word
morph = ''
label = 'O'
ending = json_token[ENDING]
root_toks = json_token[ROOT_TOKENS]
if isinstance(root_toks[0], list):
root_toks = root_toks[0]
lemma = '_'.join(root_toks) + ('+' + ending if ending else '')
if not lemma:
lemma = word
morph = '_%s_' % json_token[POSTAG]
morph += ' ' + json_token[FORM]
if LABEL in json_token:
label = json_token[LABEL]
return Token(word, lemma, morph, label)
|
python
|
{
"resource": ""
}
|
q7696
|
ModelStorageUtil.makedir
|
train
|
def makedir(self):
""" Create model_dir directory """
try:
os.makedirs(self.model_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
|
python
|
{
"resource": ""
}
|
q7697
|
ModelStorageUtil.copy_settings
|
train
|
def copy_settings(self, settings_module):
""" Copy settings module to the model_dir directory """
source = inspect.getsourcefile(settings_module)
dest = os.path.join(self.model_dir, 'settings.py')
shutil.copyfile(source, dest)
|
python
|
{
"resource": ""
}
|
q7698
|
ModelStorageUtil.load_settings
|
train
|
def load_settings(self):
"""Load settings module from the model_dir directory."""
mname = 'loaded_module'
if six.PY2:
import imp
return imp.load_source(mname, self.settings_filename)
else:
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(mname, self.settings_filename)
return loader.load_module(mname)
|
python
|
{
"resource": ""
}
|
q7699
|
VISLCG3Pipeline.check_if_vislcg_is_in_path
|
train
|
def check_if_vislcg_is_in_path( self, vislcg_cmd1 ):
''' Checks whether given vislcg_cmd1 is in system's PATH. Returns True, there is
a file named vislcg_cmd1 in the path, otherwise returns False;
The idea borrows from: http://stackoverflow.com/a/377028
'''
for path in os.environ["PATH"].split( os.pathsep ):
path1 = path.strip('"')
file1 = os.path.join(path1, vislcg_cmd1)
if os.path.isfile(file1) or os.path.isfile(file1+'.exe'):
return True
return False
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.