code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
namespaces = ct.Namespaces(**namespaces)
custom = kwargs.get('custom')
if custom is not None:
custom = ct.CustomSelectors(**custom)
if isinstance(pattern, SoupSieve):
if flags:
raise ValueError("Cannot process 'flags' argument on a compiled selector list")
elif namespaces is not None:
raise ValueError("Cannot process 'namespaces' argument on a compiled selector list")
elif custom is not None:
raise ValueError("Cannot process 'custom' argument on a compiled selector list")
return pattern
return cp._cached_css_compile(pattern, namespaces, custom, flags)
|
def compile(pattern, namespaces=None, flags=0, **kwargs): # noqa: A001
if namespaces is not None
|
Compile CSS pattern.
| 5.623659
| 4.642636
| 1.211307
|
return compile(select, namespaces, flags, **kwargs).closest(tag)
|
def closest(select, tag, namespaces=None, flags=0, **kwargs)
|
Match closest ancestor.
| 10.360913
| 8.313814
| 1.246229
|
return compile(select, namespaces, flags, **kwargs).match(tag)
|
def match(select, tag, namespaces=None, flags=0, **kwargs)
|
Match node.
| 6.842981
| 6.514735
| 1.050385
|
def filter(select, iterable, namespaces=None, flags=0, **kwargs): # noqa: A001
return compile(select, namespaces, flags, **kwargs).filter(iterable)
|
Filter list of nodes.
| null | null | null |
|
return [comment for comment in cm.CommentsMatch(tag).get_comments(limit)]
|
def comments(tag, limit=0, flags=0, **kwargs)
|
Get comments only.
| 22.378204
| 19.71785
| 1.134921
|
return compile(select, namespaces, flags, **kwargs).select_one(tag)
|
def select_one(select, tag, namespaces=None, flags=0, **kwargs)
|
Select a single tag.
| 7.294319
| 6.323229
| 1.153575
|
return compile(select, namespaces, flags, **kwargs).select(tag, limit)
|
def select(select, tag, namespaces=None, limit=0, flags=0, **kwargs)
|
Select the specified tags.
| 9.03167
| 8.439386
| 1.070181
|
for el in compile(select, namespaces, flags, **kwargs).iselect(tag, limit):
yield el
|
def iselect(select, tag, namespaces=None, limit=0, flags=0, **kwargs)
|
Iterate the specified tags.
| 11.530352
| 9.668357
| 1.192586
|
request = cls._make_request('GET', cls._get_all_path(), connection, params=params)
return cls._create_object(request, connection=connection)
|
def all(cls, connection=None, **params)
|
Returns first page if no params passed in as a list.
| 5.21197
| 4.726738
| 1.102657
|
try:
limit = kwargs['limit']
except KeyError:
limit = None
try:
page = kwargs['page']
except KeyError:
page = None
def _all_responses():
page = 1 # one based
params = kwargs.copy()
while True:
params.update(page=page, limit=250)
rsp = cls._make_request('GET', cls._get_all_path(), connection, params=params)
if rsp:
yield rsp
page += 1
else:
yield [] # needed for case where there is no objects
break
if not (limit or page):
for rsp in _all_responses():
for obj in rsp:
yield cls._create_object(obj, connection=connection)
else:
response = cls._make_request('GET', cls._get_all_path(), connection, params=kwargs)
for obj in cls._create_object(response, connection=connection):
yield obj
|
def iterall(cls, connection=None, **kwargs)
|
Returns a autopaging generator that yields each object returned one by one.
| 3.131404
| 3.017041
| 1.037906
|
if rid:
if resource[-1] != '/':
resource += '/'
resource += str(rid)
response = self._run_method('GET', resource, query=query)
return self._handle_response(resource, response)
|
def get(self, resource="", rid=None, **query)
|
Retrieves the resource with given id 'rid', or all resources of given type.
Keep in mind that the API returns a list for any query that doesn't specify an ID, even when applying
a limit=1 filter.
Also be aware that float values tend to come back as strings ("2.0000" instead of 2.0)
Keyword arguments can be parsed for filtering the query, for example:
connection.get('products', limit=3, min_price=10.5)
(see Bigcommerce resource documentation).
| 3.328432
| 4.114304
| 0.80899
|
if resource[-1] != '/':
resource += '/'
resource += str(rid)
return self.put(resource, data=updates)
|
def update(self, resource, rid, updates)
|
Updates the resource with id 'rid' with the given updates dictionary.
| 4.198846
| 3.857449
| 1.088503
|
if resource[-1] != '/':
resource += '/'
resource += str(rid)
response = self._run_method('DELETE', resource)
return self._handle_response(resource, response, suppress_empty=True)
|
def delete(self, resource, rid=None): # note that rid can't be 0 - problem?
if rid
|
Deletes the resource with given id 'rid', or all resources of given type if rid is not supplied.
| 7.551818
| 5.969725
| 1.265019
|
response = self._run_method('PUT', url, data=data)
log.debug("OUTPUT: %s" % response.content)
return self._handle_response(url, response)
|
def put(self, url, data)
|
Make a PUT request to save data.
data should be a dictionary.
| 4.583653
| 4.509381
| 1.016471
|
response = self._run_method('POST', url, data=data, headers=headers)
return self._handle_response(url, response)
|
def post(self, url, data, headers={})
|
POST request for creating new objects.
data should be a dictionary.
| 3.627533
| 3.3677
| 1.077154
|
self._last_response = res
result = {}
if res.status_code in (200, 201, 202):
try:
result = res.json()
except Exception as e: # json might be invalid, or store might be down
e.message += " (_handle_response failed to decode JSON: " + str(res.content) + ")"
raise # TODO better exception
elif res.status_code == 204 and not suppress_empty:
raise EmptyResponseWarning("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res)
elif res.status_code >= 500:
raise ServerException("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res)
elif res.status_code == 429:
raise RateLimitingException("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res)
elif res.status_code >= 400:
raise ClientRequestException("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res)
elif res.status_code >= 300:
raise RedirectionException("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res)
return result
|
def _handle_response(self, url, res, suppress_empty=True)
|
Returns parsed JSON or raises an exception appropriately.
| 1.993449
| 1.923556
| 1.036335
|
encoded_json, encoded_hmac = signed_payload.split('.')
dc_json = base64.b64decode(encoded_json)
signature = base64.b64decode(encoded_hmac)
expected_sig = hmac.new(client_secret.encode(), base64.b64decode(encoded_json), hashlib.sha256).hexdigest()
authorised = hmac.compare_digest(signature, expected_sig.encode())
return json.loads(dc_json.decode()) if authorised else False
|
def verify_payload(signed_payload, client_secret)
|
Given a signed payload (usually passed as parameter in a GET request to the app's load URL) and a client secret,
authenticates the payload and returns the user's data, or False on fail.
Uses constant-time str comparison to prevent vulnerability to timing attacks.
| 2.601664
| 2.371256
| 1.097167
|
res = self.post(token_url, {'client_id': self.client_id,
'client_secret': client_secret,
'code': code,
'context': context,
'scope': scope,
'grant_type': 'authorization_code',
'redirect_uri': redirect_uri},
headers={'Content-Type': 'application/x-www-form-urlencoded'})
self._session.headers.update(self._oauth_headers(self.client_id, res['access_token']))
return res
|
def fetch_token(self, client_secret, code, context, scope, redirect_uri,
token_url='https://login.bigcommerce.com/oauth2/token')
|
Fetches a token from given token_url, using given parameters, and sets up session headers for
future requests.
redirect_uri should be the same as your callback URL.
code, context, and scope should be passed as parameters to your callback URL on app installation.
Raises HttpException on failure (same as Connection methods).
| 1.963868
| 1.940658
| 1.01196
|
result = Connection._handle_response(self, url, res, suppress_empty)
if 'X-Rate-Limit-Time-Reset-Ms' in res.headers:
self.rate_limit = dict(ms_until_reset=int(res.headers['X-Rate-Limit-Time-Reset-Ms']),
window_size_ms=int(res.headers['X-Rate-Limit-Time-Window-Ms']),
requests_remaining=int(res.headers['X-Rate-Limit-Requests-Left']),
requests_quota=int(res.headers['X-Rate-Limit-Requests-Quota']))
if self.rate_limiting_management:
if self.rate_limiting_management['min_requests_remaining'] >= self.rate_limit['requests_remaining']:
if self.rate_limiting_management['wait']:
sleep(ceil(float(self.rate_limit['ms_until_reset']) / 1000))
if self.rate_limiting_management.get('callback_function'):
callback = self.rate_limiting_management['callback_function']
args_dict = self.rate_limiting_management.get('callback_args')
if args_dict:
callback(args_dict)
else:
callback()
return result
|
def _handle_response(self, url, res, suppress_empty=True)
|
Adds rate limiting information on to the response object
| 2.477981
| 2.362396
| 1.048927
|
byte_str = text.encode('utf-8')
ob = lib.hoedown_buffer_new(OUNIT)
lib.hoedown_escape_html(ob, byte_str, len(byte_str), int(escape_slash))
try:
return to_string(ob)
finally:
lib.hoedown_buffer_free(ob)
|
def escape_html(text, escape_slash=False)
|
Binding for Hoedown's HTML escaping function.
The implementation is inspired by the OWASP XSS Prevention recommendations:
.. code-block:: none
& --> &
< --> <
> --> >
" --> "
' --> '
/ --> / when escape_slash is set to True
.. versionadded:: 2.1.0
| 4.946358
| 5.857735
| 0.844415
|
extensions = args_to_int(extension_map, extensions)
render_flags = args_to_int(html_flag_map, render_flags)
ib = lib.hoedown_buffer_new(IUNIT)
ob = lib.hoedown_buffer_new(OUNIT)
renderer = lib.hoedown_html_renderer_new(render_flags, 0)
document = lib.hoedown_document_new(renderer, extensions, 16);
lib.hoedown_buffer_puts(ib, text.encode('utf-8'))
lib.hoedown_document_render(document, ob, ib.data, ib.size);
lib.hoedown_buffer_free(ib);
lib.hoedown_document_free(document);
lib.hoedown_html_renderer_free(renderer);
try:
return to_string(ob)
finally:
lib.hoedown_buffer_free(ob);
|
def html(text, extensions=0, render_flags=0)
|
Convert markdown text to HTML.
``extensions`` can be a list or tuple of extensions (e.g.
``('fenced-code', 'footnotes', 'strikethrough')``) or an integer
(e.g. ``EXT_FENCED_CODE | EXT_FOOTNOTES | EXT_STRIKETHROUGH``).
``render_flags`` can be a list or tuple of flags (e.g.
``('skip-html', 'hard-wrap')``) or an integer
(e.g. ``HTML_SKIP_HTML | HTML_HARD_WRAP``).
| 3.212556
| 3.641034
| 0.88232
|
byte_str = text.encode('utf-8')
ob = lib.hoedown_buffer_new(OUNIT)
lib.hoedown_html_smartypants(ob, byte_str, len(byte_str))
try:
return to_string(ob)
finally:
lib.hoedown_buffer_free(ob);
|
def smartypants(text)
|
Transforms sequences of characters into HTML entities.
=================================== ===================== =========
Markdown HTML Result
=================================== ===================== =========
``'s`` (s, t, m, d, re, ll, ve) ’s ’s
``"Quotes"`` “Quotes” “Quotes”
``---`` — —
``--`` – –
``...`` … …
``. . .`` … …
``(c)`` © ©
``(r)`` ® ®
``(tm)`` ™ ™
``3/4`` ¾ ¾
``1/2`` ½ ½
``1/4`` ¼ ¼
=================================== ===================== =========
| 6.058212
| 7.940225
| 0.762977
|
if self.check_url(raw_url):
url = self.rewrite_url(('mailto:' if is_email else '') + raw_url)
url = escape_html(url)
return '<a href="%s">%s</a>' % (url, escape_html(raw_url))
else:
return escape_html('<%s>' % raw_url)
|
def autolink(self, raw_url, is_email)
|
Filters links generated by the ``autolink`` extension.
| 2.773404
| 2.819677
| 0.983589
|
if self.check_url(raw_url, is_image_src=True):
url = self.rewrite_url(raw_url, is_image_src=True)
maybe_alt = ' alt="%s"' % escape_html(alt) if alt else ''
maybe_title = ' title="%s"' % escape_html(title) if title else ''
url = escape_html(url)
return '<img src="%s"%s%s />' % (url, maybe_alt, maybe_title)
else:
return escape_html("" % (alt, raw_url))
|
def image(self, raw_url, title='', alt='')
|
Filters the ``src`` attribute of an image.
Note that filtering the source URL of an ``<img>`` tag is only a very
basic protection, and it's mostly useless in modern browsers (they block
JavaScript in there by default). An example of attack that filtering
does not thwart is phishing based on HTTP Auth, see `this issue
<https://github.com/liberapay/liberapay.com/issues/504>`_ for details.
To mitigate this issue you should only allow images from trusted services,
for example your own image store, or a proxy (see :meth:`rewrite_url`).
| 2.399463
| 2.346298
| 1.022659
|
if self.check_url(raw_url):
url = self.rewrite_url(raw_url)
maybe_title = ' title="%s"' % escape_html(title) if title else ''
url = escape_html(url)
return ('<a href="%s"%s>' % (url, maybe_title)) + content + '</a>'
else:
return escape_html("[%s](%s)" % (content, raw_url))
|
def link(self, content, raw_url, title='')
|
Filters links.
| 3.032062
| 3.006151
| 1.008619
|
return bool(self._allowed_url_re.match(url))
|
def check_url(self, url, is_image_src=False)
|
This method is used to check a URL.
Returns :obj:`True` if the URL is "safe", :obj:`False` otherwise.
The default implementation only allows HTTP and HTTPS links. That means
no ``mailto:``, no ``xmpp:``, no ``ftp:``, etc.
This method exists specifically to allow easy customization of link
filtering through subclassing, so don't hesitate to write your own.
If you're thinking of implementing a blacklist approach, see
"`Which URL schemes are dangerous (XSS exploitable)?
<http://security.stackexchange.com/q/148428/37409>`_".
| 11.882514
| 14.315186
| 0.830064
|
rewrite = self.img_src_rewrite if is_image_src else self.link_rewrite
if rewrite:
return rewrite.format(url=urlquote(url))
return url
|
def rewrite_url(self, url, is_image_src=False)
|
This method is called to rewrite URLs.
It uses either ``self.link_rewrite`` or ``self.img_src_rewrite``
depending on the value of ``is_image_src``. The URL is returned
unchanged if the corresponding attribute is :obj:`None`.
| 4.768953
| 4.055655
| 1.175877
|
if isinstance(argument, int):
if argument == 0:
return 0
deprecation('passing extensions and flags as constants is deprecated')
return argument
elif isinstance(argument, (tuple, list)):
return reduce(op.or_, [mapping[n] for n in set(argument) if n in mapping], 0)
raise TypeError('argument must be a list of strings or an int')
|
def args_to_int(mapping, argument)
|
Convert list of strings to an int using a mapping.
| 5.492928
| 5.039479
| 1.089979
|
global compatibility
ext_handlers = options.get("ext_handlers")
if obj is None:
_pack_nil(obj, fp, options)
elif ext_handlers and obj.__class__ in ext_handlers:
_pack_ext(ext_handlers[obj.__class__](obj), fp, options)
elif isinstance(obj, bool):
_pack_boolean(obj, fp, options)
elif isinstance(obj, int):
_pack_integer(obj, fp, options)
elif isinstance(obj, float):
_pack_float(obj, fp, options)
elif compatibility and isinstance(obj, str):
_pack_oldspec_raw(obj.encode('utf-8'), fp, options)
elif compatibility and isinstance(obj, bytes):
_pack_oldspec_raw(obj, fp, options)
elif isinstance(obj, str):
_pack_string(obj, fp, options)
elif isinstance(obj, bytes):
_pack_binary(obj, fp, options)
elif isinstance(obj, (list, tuple)):
_pack_array(obj, fp, options)
elif isinstance(obj, dict):
_pack_map(obj, fp, options)
elif isinstance(obj, datetime.datetime):
_pack_ext_timestamp(obj, fp, options)
elif isinstance(obj, Ext):
_pack_ext(obj, fp, options)
elif ext_handlers:
# Linear search for superclass
t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None)
if t:
_pack_ext(ext_handlers[t](obj), fp, options)
else:
raise UnsupportedTypeException(
"unsupported type: %s" % str(type(obj)))
else:
raise UnsupportedTypeException(
"unsupported type: %s" % str(type(obj)))
|
def _pack3(obj, fp, **options)
|
Serialize a Python object into MessagePack bytes.
Args:
obj: a Python object
fp: a .write()-supporting file-like object
Kwargs:
ext_handlers (dict): dictionary of Ext handlers, mapping a custom type
to a callable that packs an instance of the type
into an Ext object
force_float_precision (str): "single" to force packing floats as
IEEE-754 single-precision floats,
"double" to force packing floats as
IEEE-754 double-precision floats.
Returns:
None.
Raises:
UnsupportedType(PackException):
Object type not supported for packing.
Example:
>>> f = open('test.bin', 'wb')
>>> umsgpack.pack({u"compact": True, u"schema": 0}, f)
>>>
| 1.976743
| 2.05599
| 0.961456
|
# Break circular dependency.
# pylint: disable=g-import-not-at-top
import pipeline
if pipeline._TEST_MODE:
return None
# Further protect against test cases that doesn't set env vars
# propertly.
if ("CURRENT_VERSION_ID" not in os.environ or
"CURRENT_MODULE_ID" not in os.environ):
logging.warning("Running Pipeline in non TEST_MODE but important "
"env vars are not set.")
return None
version = os.environ["CURRENT_VERSION_ID"].split(".")[0]
module = os.environ["CURRENT_MODULE_ID"]
return "%s.%s" % (version, module)
|
def _get_task_target()
|
Get the default target for a pipeline task.
Current version id format is: user_defined_version.minor_version_number
Current module id is just the module's name. It could be "default"
Returns:
A complete target name is of format version.module. If module is the
default module, just version. None if target can not be determined.
| 4.755438
| 4.31426
| 1.10226
|
fq_name = str(fq_name)
module_name = __name__
short_name = fq_name
if fq_name.rfind(".") >= 0:
(module_name, short_name) = (fq_name[:fq_name.rfind(".")],
fq_name[fq_name.rfind(".") + 1:])
try:
result = __import__(module_name, None, None, [short_name])
return result.__dict__[short_name]
except KeyError:
# If we're recursively inside a for_name() chain, then we want to raise
# this error as a key error so we can report the actual source of the
# problem. If we're *not* recursively being called, that means the
# module was found and the specific item could not be loaded, and thus
# we want to raise an ImportError directly.
if recursive:
raise
else:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError, e:
# module_name is not actually a module. Try for_name for it to figure
# out what's this.
try:
module = for_name(module_name, recursive=True)
if hasattr(module, short_name):
return getattr(module, short_name)
else:
# The module was found, but the function component is missing.
raise KeyError()
except KeyError:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError:
# This means recursive import attempts failed, thus we will raise the
# first ImportError we encountered, since it's likely the most accurate.
pass
# Raise the original import error that caused all of this, since it is
# likely the real cause of the overall problem.
raise
|
def for_name(fq_name, recursive=False)
|
Find class/function/method specified by its fully qualified name.
Fully qualified can be specified as:
* <module_name>.<class_name>
* <module_name>.<function_name>
* <module_name>.<class_name>.<method_name> (an unbound method will be
returned in this case).
for_name works by doing __import__ for <module_name>, and looks for
<class_name>/<function_name> in module's __dict__/attrs. If fully qualified
name doesn't contain '.', the current module will be used.
Args:
fq_name: fully qualified name of something to find
Returns:
class object.
Raises:
ImportError: when specified module could not be loaded or the class
was not found in the module.
| 3.989815
| 4.192488
| 0.951658
|
CO_GENERATOR = 0x20
return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and
obj.func_code.co_flags & CO_GENERATOR))
|
def is_generator_function(obj)
|
Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
Args:
obj: an object to test.
Returns:
true if the object is generator function.
| 3.849411
| 5.096028
| 0.755375
|
global _TYPE_TO_ENCODER
global _TYPE_NAME_TO_DECODER
if object_type not in _TYPE_TO_ENCODER:
_TYPE_TO_ENCODER[object_type] = encoder
_TYPE_NAME_TO_DECODER[object_type.__name__] = decoder
|
def _register_json_primitive(object_type, encoder, decoder)
|
Extend what Pipeline can serialize.
Args:
object_type: type of the object.
encoder: a function that takes in an object and returns
a dict of json primitives.
decoder: inverse function of encoder.
| 2.191168
| 2.34823
| 0.933115
|
k_c = d['key_string']
if isinstance(k_c, (list, tuple)):
return ndb.Key(flat=k_c)
return ndb.Key(urlsafe=d['key_string'])
|
def _JsonDecodeKey(d)
|
Json decode a ndb.Key object.
| 4.506491
| 3.60482
| 1.250129
|
if type(o) in _TYPE_TO_ENCODER:
encoder = _TYPE_TO_ENCODER[type(o)]
json_struct = encoder(o)
json_struct[self.TYPE_ID] = type(o).__name__
return json_struct
return super(JsonEncoder, self).default(o)
|
def default(self, o)
|
Inherit docs.
| 3.082567
| 3.205778
| 0.961566
|
if JsonEncoder.TYPE_ID not in d:
return d
type_name = d.pop(JsonEncoder.TYPE_ID)
if type_name in _TYPE_NAME_TO_DECODER:
decoder = _TYPE_NAME_TO_DECODER[type_name]
return decoder(d)
else:
raise TypeError("Invalid type %s.", type_name)
|
def _dict_to_obj(self, d)
|
Converts a dictionary of json object to a Python object.
| 3.429275
| 3.268134
| 1.049307
|
stringified = pprint.saferepr(obj)
if len(stringified) > 200:
return '%s... (%d bytes)' % (stringified[:200], len(stringified))
return stringified
|
def _short_repr(obj)
|
Helper function returns a truncated repr() of an object.
| 3.417651
| 3.079099
| 1.109952
|
default_bucket = app_identity.get_default_gcs_bucket_name()
if default_bucket is None:
raise Exception(
"No default cloud storage bucket has been set for this application. "
"This app was likely created before v1.9.0, please see: "
"https://cloud.google.com/appengine/docs/php/googlestorage/setup")
path_components = ['/', default_bucket, "appengine_pipeline"]
if pipeline_id:
path_components.append(pipeline_id)
path_components.append(uuid.uuid4().hex)
# Use posixpath to get a / even if we're running on windows somehow
file_name = posixpath.join(*path_components)
with cloudstorage.open(file_name, 'w', content_type='application/json') as f:
for start_index in xrange(0, len(encoded_value), _MAX_JSON_SIZE):
end_index = start_index + _MAX_JSON_SIZE
f.write(encoded_value[start_index:end_index])
key_str = blobstore.create_gs_key("/gs" + file_name)
logging.debug("Created blob for filename = %s gs_key = %s", file_name, key_str)
return blobstore.BlobKey(key_str)
|
def _write_json_blob(encoded_value, pipeline_id=None)
|
Writes a JSON encoded value to a Cloud Storage File.
This function will store the blob in a GCS file in the default bucket under
the appengine_pipeline directory. Optionally using another directory level
specified by pipeline_id
Args:
encoded_value: The encoded JSON string.
pipeline_id: A pipeline id to segment files in Cloud Storage, if none,
the file will be created under appengine_pipeline
Returns:
The blobstore.BlobKey for the file that was created.
| 3.59756
| 3.18468
| 1.129646
|
lookup_slots = set()
for arg in itertools.chain(args, kwargs.itervalues()):
if arg['type'] == 'slot':
lookup_slots.add(db.Key(arg['slot_key']))
slot_dict = {}
for key, slot_record in zip(lookup_slots, db.get(lookup_slots)):
if slot_record is None or slot_record.status != _SlotRecord.FILLED:
raise SlotNotFilledError(
'Slot "%s" missing its value. From %s(*args=%s, **kwargs=%s)' %
(key, pipeline_name, _short_repr(args), _short_repr(kwargs)))
slot_dict[key] = slot_record.value
arg_list = []
for current_arg in args:
if current_arg['type'] == 'slot':
arg_list.append(slot_dict[db.Key(current_arg['slot_key'])])
elif current_arg['type'] == 'value':
arg_list.append(current_arg['value'])
else:
raise UnexpectedPipelineError('Unknown parameter type: %r' % current_arg)
kwarg_dict = {}
for key, current_arg in kwargs.iteritems():
if current_arg['type'] == 'slot':
kwarg_dict[key] = slot_dict[db.Key(current_arg['slot_key'])]
elif current_arg['type'] == 'value':
kwarg_dict[key] = current_arg['value']
else:
raise UnexpectedPipelineError('Unknown parameter type: %r' % current_arg)
return (arg_list, kwarg_dict)
|
def _dereference_args(pipeline_name, args, kwargs)
|
Dereference a Pipeline's arguments that are slots, validating them.
Each argument value passed in is assumed to be a dictionary with the format:
{'type': 'value', 'value': 'serializable'} # A resolved value.
{'type': 'slot', 'slot_key': 'str() on a db.Key'} # A pending Slot.
Args:
pipeline_name: The name of the pipeline class; used for debugging.
args: Iterable of positional arguments.
kwargs: Dictionary of keyword arguments.
Returns:
Tuple (args, kwargs) where:
Args: A list of positional arguments values that are all dereferenced.
Kwargs: A list of keyword arguments values that are all dereferenced.
Raises:
SlotNotFilledError if any of the supplied 'slot_key' records are not
present in the Datastore or have not yet been filled.
UnexpectedPipelineError if an unknown parameter type was passed.
| 2.262472
| 1.941512
| 1.165314
|
params = {
'args': [],
'kwargs': {},
'after_all': [],
'output_slots': {},
'class_path': pipeline._class_path,
'queue_name': queue_name,
'base_path': base_path,
'backoff_seconds': pipeline.backoff_seconds,
'backoff_factor': pipeline.backoff_factor,
'max_attempts': pipeline.max_attempts,
'task_retry': pipeline.task_retry,
'target': pipeline.target,
}
dependent_slots = set()
arg_list = params['args']
for current_arg in pipeline.args:
if isinstance(current_arg, PipelineFuture):
current_arg = current_arg.default
if isinstance(current_arg, Slot):
arg_list.append({'type': 'slot', 'slot_key': str(current_arg.key)})
dependent_slots.add(current_arg.key)
else:
arg_list.append({'type': 'value', 'value': current_arg})
kwarg_dict = params['kwargs']
for name, current_arg in pipeline.kwargs.iteritems():
if isinstance(current_arg, PipelineFuture):
current_arg = current_arg.default
if isinstance(current_arg, Slot):
kwarg_dict[name] = {'type': 'slot', 'slot_key': str(current_arg.key)}
dependent_slots.add(current_arg.key)
else:
kwarg_dict[name] = {'type': 'value', 'value': current_arg}
after_all = params['after_all']
for other_future in future._after_all_pipelines:
slot_key = other_future._output_dict['default'].key
after_all.append(str(slot_key))
dependent_slots.add(slot_key)
output_slots = params['output_slots']
output_slot_keys = set()
for name, slot in future._output_dict.iteritems():
output_slot_keys.add(slot.key)
output_slots[name] = str(slot.key)
params_encoded = json.dumps(params, cls=mr_util.JsonEncoder)
params_text = None
params_blob = None
if len(params_encoded) > _MAX_JSON_SIZE:
params_blob = _write_json_blob(params_encoded, pipeline.pipeline_id)
else:
params_text = params_encoded
return dependent_slots, output_slot_keys, params_text, params_blob
|
def _generate_args(pipeline, future, queue_name, base_path)
|
Generate the params used to describe a Pipeline's depedencies.
The arguments passed to this method may be normal values, Slot instances
(for named outputs), or PipelineFuture instances (for referring to the
default output slot).
Args:
pipeline: The Pipeline instance to generate args for.
future: The PipelineFuture for the Pipeline these arguments correspond to.
queue_name: The queue to run the pipeline on.
base_path: Relative URL for pipeline URL handlers.
Returns:
Tuple (dependent_slots, output_slot_keys, params_text, params_blob) where:
dependent_slots: List of db.Key instances of _SlotRecords on which
this pipeline will need to block before execution (passed to
create a _BarrierRecord for running the pipeline).
output_slot_keys: List of db.Key instances of _SlotRecords that will
be filled by this pipeline during its execution (passed to create
a _BarrierRecord for finalizing the pipeline).
params_text: JSON dictionary of pipeline parameters to be serialized and
saved in a corresponding _PipelineRecord. Will be None if the params are
too big and must be saved in a blob instead.
params_blob: JSON dictionary of pipeline parameters to be serialized and
saved in a Blob file, and then attached to a _PipelineRecord. Will be
None if the params data size was small enough to fit in the entity.
| 2.286628
| 2.03418
| 1.124103
|
if when is None:
return None
ms_since_epoch = float(time.mktime(when.utctimetuple()) * 1000.0)
ms_since_epoch += when.microsecond / 1000.0
return int(ms_since_epoch)
|
def _get_timestamp_ms(when)
|
Converts a datetime.datetime to integer milliseconds since the epoch.
Requires special handling to preserve microseconds.
Args:
when: A datetime.datetime instance.
Returns:
Integer time since the epoch in milliseconds. If the supplied 'when' is
None, the return value will be None.
| 2.333607
| 2.426107
| 0.961873
|
if slot_dict is None:
slot_dict = {}
slot_record = slot_dict.get(slot_key)
if slot_record is None:
raise PipelineStatusError(
'Could not find data for output slot key "%s".' % slot_key)
output = {}
if slot_record.status == _SlotRecord.FILLED:
output['status'] = 'filled'
output['fillTimeMs'] = _get_timestamp_ms(slot_record.fill_time)
output['value'] = slot_record.value
filler_pipeline_key = (
_SlotRecord.filler.get_value_for_datastore(slot_record))
else:
output['status'] = 'waiting'
if filler_pipeline_key:
output['fillerPipelineId'] = filler_pipeline_key.name()
return output
|
def _get_internal_slot(slot_key=None,
filler_pipeline_key=None,
slot_dict=None)
|
Gets information about a _SlotRecord for display in UI.
Args:
slot_key: The db.Key of the slot to fetch.
filler_pipeline_key: In the case the slot has not yet been filled, assume
that the given db.Key (for a _PipelineRecord) will be the filler of
the slot in the future.
slot_dict: The slot JSON dictionary.
Returns:
Dictionary with the keys:
status: Slot status: 'filled' or 'waiting'
fillTimeMs: Time in milliseconds since the epoch of when it was filled.
value: The current value of the slot, which is a slot's JSON dictionary.
fillerPipelineId: The pipeline ID of what stage has or should fill
this slot.
Raises:
PipelineStatusError if any input is bad.
| 3.270113
| 2.398784
| 1.363238
|
class_path_set = set()
for cls in _PipelineMeta._all_classes:
if cls.class_path is not None:
class_path_set.add(cls.class_path)
return sorted(class_path_set)
|
def get_pipeline_names()
|
Returns the class paths of all Pipelines defined in alphabetical order.
| 4.307398
| 3.345995
| 1.28733
|
query = _PipelineRecord.all(cursor=cursor)
if class_path:
query.filter('class_path =', class_path)
query.filter('is_root_pipeline =', True)
query.order('-start_time')
root_list = query.fetch(count)
fetch_list = []
for pipeline_record in root_list:
fetch_list.append(db.Key(pipeline_record.params['output_slots']['default']))
fetch_list.append(db.Key.from_path(
_BarrierRecord.kind(), _BarrierRecord.FINALIZE,
parent=pipeline_record.key()))
fetch_list.append(db.Key.from_path(
_StatusRecord.kind(), pipeline_record.key().name()))
pipeline_dict = dict((stage.key(), stage) for stage in root_list)
slot_dict = {}
barrier_dict = {}
status_dict = {}
for entity in db.get(fetch_list):
if isinstance(entity, _BarrierRecord):
barrier_dict[entity.key()] = entity
elif isinstance(entity, _SlotRecord):
slot_dict[entity.key()] = entity
elif isinstance(entity, _StatusRecord):
status_dict[entity.key()] = entity
results = []
for pipeline_record in root_list:
try:
output = _get_internal_status(
pipeline_record.key(),
pipeline_dict=pipeline_dict,
slot_dict=slot_dict,
barrier_dict=barrier_dict,
status_dict=status_dict)
output['pipelineId'] = pipeline_record.key().name()
results.append(output)
except PipelineStatusError, e:
output = {'status': e.message}
output['classPath'] = ''
output['pipelineId'] = pipeline_record.key().name()
results.append(output)
result_dict = {}
cursor = query.cursor()
query.with_cursor(cursor)
if query.get(keys_only=True):
result_dict.update(cursor=cursor)
result_dict.update(pipelines=results)
return result_dict
|
def get_root_list(class_path=None, cursor=None, count=50)
|
Gets a list root Pipelines.
Args:
class_path: Optional. If supplied, only return root Pipelines with the
given class_path. By default all root pipelines are returned.
cursor: Optional. When supplied, the cursor returned from the last call to
get_root_list which indicates where to pick up.
count: How many pipeline returns to return.
Returns:
Dictionary with the keys:
pipelines: The list of Pipeline records in the same format as
returned by get_status_tree, but with only the roots listed.
cursor: Cursor to pass back to this function to resume the query. Will
only be present if there is another page of results.
Raises:
PipelineStatusError if any input is bad.
| 2.601475
| 2.572713
| 1.01118
|
return [
(prefix + '/output', _BarrierHandler),
(prefix + '/run', _PipelineHandler),
(prefix + '/finalized', _PipelineHandler),
(prefix + '/cleanup', _CleanupHandler),
(prefix + '/abort', _PipelineHandler),
(prefix + '/fanout', _FanoutHandler),
(prefix + '/fanout_abort', _FanoutAbortHandler),
(prefix + '/callback', _CallbackHandler),
(prefix + '/rpc/tree', status_ui._TreeStatusHandler),
(prefix + '/rpc/class_paths', status_ui._ClassPathListHandler),
(prefix + '/rpc/list', status_ui._RootListHandler),
(prefix + '(/.+)', status_ui._StatusUiHandler),
]
|
def create_handlers_map(prefix='.*')
|
Create new handlers map.
Args:
prefix: url prefix to use.
Returns:
list of (regexp, handler) pairs for WSGIApplication constructor.
| 4.225364
| 4.509743
| 0.936941
|
if not self.filled:
raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.'
% (self.name, self.key))
return self._value
|
def value(self)
|
Returns the current value of this slot.
Returns:
The value of the slot (a serializable Python type).
Raises:
SlotNotFilledError if the value hasn't been filled yet.
| 6.987284
| 4.494816
| 1.554521
|
if not self.filled:
raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.'
% (self.name, self.key))
return self._filler_pipeline_key.name()
|
def filler(self)
|
Returns the pipeline ID that filled this slot's value.
Returns:
A string that is the pipeline ID.
Raises:
SlotNotFilledError if the value hasn't been filled yet.
| 9.302297
| 5.38614
| 1.72708
|
if not self.filled:
raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.'
% (self.name, self.key))
return self._fill_datetime
|
def fill_datetime(self)
|
Returns when the slot was filled.
Returns:
A datetime.datetime.
Raises:
SlotNotFilledError if the value hasn't been filled yet.
| 6.905684
| 4.652738
| 1.48422
|
if slot_record.status == _SlotRecord.FILLED:
self.filled = True
self._filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore(
slot_record)
self._fill_datetime = slot_record.fill_time
self._value = slot_record.value
|
def _set_value(self, slot_record)
|
Sets the value of this slot based on its corresponding _SlotRecord.
Does nothing if the slot has not yet been filled.
Args:
slot_record: The _SlotRecord containing this Slot's value.
| 6.807661
| 6.026471
| 1.129626
|
for name, slot_key in already_defined.iteritems():
if not isinstance(slot_key, db.Key):
slot_key = db.Key(slot_key)
slot = self._output_dict.get(name)
if slot is None:
if self._strict:
raise UnexpectedPipelineError(
'Inherited output named "%s" must be filled but '
'not declared for pipeline class "%s"' % (name, pipeline_name))
else:
self._output_dict[name] = Slot(name=name, slot_key=slot_key)
else:
slot.key = slot_key
slot._exists = True
if resolve_outputs:
slot_key_dict = dict((s.key, s) for s in self._output_dict.itervalues())
all_slots = db.get(slot_key_dict.keys())
for slot, slot_record in zip(slot_key_dict.itervalues(), all_slots):
if slot_record is None:
raise UnexpectedPipelineError(
'Inherited output named "%s" for pipeline class "%s" is '
'missing its Slot in the datastore: "%s"' %
(slot.name, pipeline_name, slot.key))
slot = slot_key_dict[slot_record.key()]
slot._set_value(slot_record)
|
def _inherit_outputs(self,
pipeline_name,
already_defined,
resolve_outputs=False)
|
Inherits outputs from a calling Pipeline.
Args:
pipeline_name: The Pipeline class name (used for debugging).
already_defined: Maps output name to stringified db.Key (of _SlotRecords)
of any exiting output slots to be inherited by this future.
resolve_outputs: When True, this method will dereference all output slots
before returning back to the caller, making those output slots' values
available.
Raises:
UnexpectedPipelineError when resolve_outputs is True and any of the output
slots could not be retrived from the Datastore.
| 2.993275
| 2.621855
| 1.141663
|
pipeline_record = _pipeline_record
# Support pipeline IDs and idempotence_keys that are not unicode.
if not isinstance(pipeline_id, unicode):
try:
pipeline_id = pipeline_id.encode('utf-8')
except UnicodeDecodeError:
pipeline_id = hashlib.sha1(pipeline_id).hexdigest()
pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id)
if pipeline_record is None:
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
return None
try:
pipeline_func_class = mr_util.for_name(pipeline_record.class_path)
except ImportError, e:
logging.warning('Tried to find Pipeline %s#%s, but class could '
'not be found. Using default Pipeline class instead.',
pipeline_record.class_path, pipeline_id)
pipeline_func_class = cls
params = pipeline_record.params
arg_list, kwarg_dict = _dereference_args(
pipeline_record.class_path, params['args'], params['kwargs'])
outputs = PipelineFuture(pipeline_func_class.output_names)
outputs._inherit_outputs(
pipeline_record.class_path,
params['output_slots'],
resolve_outputs=resolve_outputs)
stage = pipeline_func_class(*arg_list, **kwarg_dict)
stage.backoff_seconds = params['backoff_seconds']
stage.backoff_factor = params['backoff_factor']
stage.max_attempts = params['max_attempts']
stage.task_retry = params['task_retry']
stage.target = params.get('target') # May not be defined for old Pipelines
stage._current_attempt = pipeline_record.current_attempt
stage._set_values_internal(
_PipelineContext('', params['queue_name'], params['base_path']),
pipeline_key,
_PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record),
outputs,
pipeline_record.status)
return stage
|
def from_id(cls, pipeline_id, resolve_outputs=True, _pipeline_record=None)
|
Returns an instance corresponding to an existing Pipeline.
The returned object will have the same properties a Pipeline does while
it's running synchronously (e.g., like what it's first allocated), allowing
callers to inspect caller arguments, outputs, fill slots, complete the
pipeline, abort, retry, etc.
Args:
pipeline_id: The ID of this pipeline (a string).
resolve_outputs: When True, dereference the outputs of this Pipeline
so their values can be accessed by the caller.
_pipeline_record: Internal-only. The _PipelineRecord instance to use
to instantiate this instance instead of fetching it from
the datastore.
Returns:
Pipeline sub-class instances or None if it could not be found.
| 3.821875
| 3.774747
| 1.012485
|
if not idempotence_key:
idempotence_key = uuid.uuid4().hex
elif not isinstance(idempotence_key, unicode):
try:
idempotence_key.encode('utf-8')
except UnicodeDecodeError:
idempotence_key = hashlib.sha1(idempotence_key).hexdigest()
pipeline_key = db.Key.from_path(_PipelineRecord.kind(), idempotence_key)
context = _PipelineContext('', queue_name, base_path)
future = PipelineFuture(self.output_names, force_strict=True)
try:
self._set_values_internal(
context, pipeline_key, pipeline_key, future, _PipelineRecord.WAITING)
return context.start(
self, return_task=return_task, countdown=countdown, eta=eta)
except Error:
# Pass through exceptions that originate in this module.
raise
except Exception, e:
# Re-type any exceptions that were raised in dependent methods.
raise PipelineSetupError('Error starting %s#%s: %s' % (
self, idempotence_key, str(e)))
|
def start(self,
idempotence_key='',
queue_name='default',
base_path='/_ah/pipeline',
return_task=False,
countdown=None,
eta=None)
|
Starts a new instance of this pipeline.
Args:
idempotence_key: The ID to use for this Pipeline and throughout its
asynchronous workflow to ensure the operations are idempotent. If
empty a starting key will be automatically assigned.
queue_name: What queue this Pipeline's workflow should execute on.
base_path: The relative URL path to where the Pipeline API is
mounted for access by the taskqueue API or external requests.
return_task: When True, a task to start this pipeline will be returned
instead of submitted, allowing the caller to start off this pipeline
as part of a separate transaction (potentially leaving this newly
allocated pipeline's datastore entities in place if that separate
transaction fails for any reason).
countdown: Time in seconds into the future that this Task should execute.
Defaults to zero.
eta: A datetime.datetime specifying the absolute time at which the task
should be executed. Must not be specified if 'countdown' is specified.
This may be timezone-aware or timezone-naive. If None, defaults to now.
For pull tasks, no worker will be able to lease this task before the
time indicated by eta.
Returns:
A taskqueue.Task instance if return_task was True. This task will *not*
have a name, thus to ensure reliable execution of your pipeline you
should add() this task as part of a separate Datastore transaction.
Raises:
PipelineExistsError if the pipeline with the given idempotence key exists.
PipelineSetupError if the pipeline could not start for any other reason.
| 3.881828
| 3.786742
| 1.02511
|
if not self.async:
raise UnexpectedPipelineError(
'May only call retry() method for asynchronous pipelines.')
if self.try_cancel():
self._context.transition_retry(self._pipeline_key, retry_message)
return True
else:
return False
|
def retry(self, retry_message='')
|
Forces a currently running asynchronous pipeline to retry.
Note this may not be called by synchronous or generator pipelines. Those
must instead raise the 'Retry' exception during execution.
Args:
retry_message: Optional message explaining why the retry happened.
Returns:
True if the Pipeline should be retried, False if it cannot be cancelled
mid-flight for some reason.
| 10.515561
| 7.471988
| 1.407331
|
# TODO: Use thread-local variable to enforce that this is not called
# while a pipeline is executing in the current thread.
if (self.async and self._root_pipeline_key == self._pipeline_key and
not self.try_cancel()):
# Handle the special case where the root pipeline is async and thus
# cannot be aborted outright.
return False
else:
return self._context.begin_abort(
self._root_pipeline_key, abort_message=abort_message)
|
def abort(self, abort_message='')
|
Mark the entire pipeline up to the root as aborted.
Note this should only be called from *outside* the context of a running
pipeline. Synchronous and generator pipelines should raise the 'Abort'
exception to cause this behavior during execution.
Args:
abort_message: Optional message explaining why the abort happened.
Returns:
True if the abort signal was sent successfully; False if the pipeline
could not be aborted for any reason.
| 8.032084
| 6.793746
| 1.182276
|
if isinstance(name_or_slot, basestring):
slot = getattr(self.outputs, name_or_slot)
elif isinstance(name_or_slot, Slot):
slot = name_or_slot
else:
raise UnexpectedPipelineError(
'Could not fill invalid output name: %r' % name_or_slot)
if not slot._exists:
raise SlotNotDeclaredError(
'Cannot fill output with name "%s" that was just '
'declared within the Pipeline context.' % slot.name)
self._context.fill_slot(self._pipeline_key, slot, value)
|
def fill(self, name_or_slot, value)
|
Fills an output slot required by this Pipeline.
Args:
name_or_slot: The name of the slot (a string) or Slot record to fill.
value: The serializable value to assign to this slot.
Raises:
UnexpectedPipelineError if the Slot no longer exists. SlotNotDeclaredError
if trying to output to a slot that was not declared ahead of time.
| 4.184992
| 3.192031
| 1.311075
|
if _TEST_MODE:
logging.info(
'New status for %s#%s: message=%r, console_url=%r, status_links=%r',
self, self.pipeline_id, message, console_url, status_links)
return
status_key = db.Key.from_path(_StatusRecord.kind(), self.pipeline_id)
root_pipeline_key = db.Key.from_path(
_PipelineRecord.kind(), self.root_pipeline_id)
status_record = _StatusRecord(
key=status_key, root_pipeline=root_pipeline_key)
try:
if message:
status_record.message = message
if console_url:
status_record.console_url = console_url
if status_links:
# Alphabeticalize the list.
status_record.link_names = sorted(
db.Text(s) for s in status_links.iterkeys())
status_record.link_urls = [
db.Text(status_links[name]) for name in status_record.link_names]
status_record.status_time = datetime.datetime.utcnow()
status_record.put()
except Exception, e:
raise PipelineRuntimeError('Could not set status for %s#%s: %s' %
(self, self.pipeline_id, str(e)))
|
def set_status(self, message=None, console_url=None, status_links=None)
|
Sets the current status of this pipeline.
This method is purposefully non-transactional. Updates are written to the
datastore immediately and overwrite all existing statuses.
Args:
message: (optional) Overall status message.
console_url: (optional) Relative URL to use for the "console" of this
pipeline that displays current progress. When None, no console will
be displayed.
status_links: (optional) Dictionary of readable link names to relative
URLs that should be associated with this pipeline as it runs. These links
provide convenient access to other dashboards, consoles, etc associated
with the pipeline.
Raises:
PipelineRuntimeError if the status could not be set for any reason.
| 2.531543
| 2.416037
| 1.047808
|
# TODO: Enforce that all outputs expected by this async pipeline were
# filled before this complete() function was called. May required all
# async functions to declare their outputs upfront.
if not self.async:
raise UnexpectedPipelineError(
'May only call complete() method for asynchronous pipelines.')
self._context.fill_slot(
self._pipeline_key, self.outputs.default, default_output)
|
def complete(self, default_output=None)
|
Marks this asynchronous Pipeline as complete.
Args:
default_output: What value the 'default' output slot should be assigned.
Raises:
UnexpectedPipelineError if the slot no longer exists or this method was
called for a pipeline that is not async.
| 16.434467
| 10.390821
| 1.581633
|
# TODO: Support positional parameters.
if not self.async:
raise UnexpectedPipelineError(
'May only call get_callback_url() method for asynchronous pipelines.')
kwargs['pipeline_id'] = self._pipeline_key.name()
params = urllib.urlencode(sorted(kwargs.items()))
return '%s/callback?%s' % (self.base_path, params)
|
def get_callback_url(self, **kwargs)
|
Returns a relative URL for invoking this Pipeline's callback method.
Args:
kwargs: Dictionary mapping keyword argument names to single values that
should be passed to the callback when it is invoked.
Raises:
UnexpectedPipelineError if this is invoked on pipeline that is not async.
| 6.090443
| 4.438367
| 1.372226
|
if not self.async:
raise UnexpectedPipelineError(
'May only call get_callback_task() method for asynchronous pipelines.')
params = kwargs.get('params', {})
kwargs['params'] = params
params['pipeline_id'] = self._pipeline_key.name()
kwargs['url'] = self.base_path + '/callback'
kwargs['method'] = 'POST'
return taskqueue.Task(*args, **kwargs)
|
def get_callback_task(self, *args, **kwargs)
|
Returns a task for calling back this Pipeline.
Args:
params: Keyword argument containing a dictionary of key/value pairs
that will be passed to the callback when it is executed.
args, kwargs: Passed to the taskqueue.Task constructor. Use these
arguments to set the task name (for idempotence), etc.
Returns:
A taskqueue.Task instance that must be enqueued by the caller.
| 5.104507
| 4.177999
| 1.221759
|
status = 'successful'
if self.was_aborted:
status = 'aborted'
app_id = os.environ['APPLICATION_ID']
shard_index = app_id.find('~')
if shard_index != -1:
app_id = app_id[shard_index+1:]
param_dict = {
'status': status,
'app_id': app_id,
'class_path': self._class_path,
'pipeline_id': self.root_pipeline_id,
'base_path': '%s.appspot.com%s' % (app_id, self.base_path),
}
subject = (
'Pipeline %(status)s: App "%(app_id)s", %(class_path)s'
'#%(pipeline_id)s' % param_dict)
body = % param_dict
html = % param_dict
if sender is None:
sender = '%s@%s.appspotmail.com' % (app_id, app_id)
try:
self._send_mail(sender, subject, body, html=html)
except (mail.InvalidSenderError, mail.InvalidEmailError):
logging.warning('Could not send result email for '
'root pipeline ID "%s" from sender "%s"',
self.root_pipeline_id, sender)
|
def send_result_email(self, sender=None)
|
Sends an email to admins indicating this Pipeline has completed.
For developer convenience. Automatically called from finalized for root
Pipelines that do not override the default action.
Args:
sender: (optional) Override the sender's email address.
| 3.310222
| 3.265927
| 1.013563
|
if self._root_pipeline_key is None:
raise UnexpectedPipelineError(
'Could not cleanup Pipeline with unknown root pipeline ID.')
if not self.is_root:
return
task = taskqueue.Task(
params=dict(root_pipeline_key=self._root_pipeline_key),
url=self.base_path + '/cleanup',
headers={'X-Ae-Pipeline-Key': self._root_pipeline_key})
taskqueue.Queue(self.queue_name).add(task)
|
def cleanup(self)
|
Clean up this Pipeline and all Datastore records used for coordination.
Only works when called on a root pipeline. Child pipelines will ignore
calls to this method.
After this method is called, Pipeline.from_id() and related status
methods will return inconsistent or missing results. This method is
fire-and-forget and asynchronous.
| 5.561945
| 4.706159
| 1.181844
|
if _TEST_MODE:
logging.info(
'Setting runtime parameters for %s#%s: %r',
self, self.pipeline_id, kwargs)
return self
if self.pipeline_id is not None:
raise UnexpectedPipelineError(
'May only call with_params() on a Pipeline that has not yet '
'been scheduled for execution.')
ALLOWED = ('backoff_seconds', 'backoff_factor', 'max_attempts', 'target')
for name, value in kwargs.iteritems():
if name not in ALLOWED:
raise TypeError('Unexpected keyword: %s=%r' % (name, value))
setattr(self, name, value)
return self
|
def with_params(self, **kwargs)
|
Modify various execution parameters of a Pipeline before it runs.
This method has no effect in test mode.
Args:
kwargs: Attributes to modify on this Pipeline instance before it has
been executed.
Returns:
This Pipeline instance, for easy chaining.
| 4.502721
| 4.319901
| 1.04232
|
# Do not traverse the class hierarchy fetching the class path attribute.
found = cls.__dict__.get('_class_path')
if found is not None:
return
# Do not set the _class_path for the base-class, otherwise all children's
# lookups for _class_path will fall through and return 'Pipeline' above.
# This situation can happen if users call the generic Pipeline.from_id
# to get the result of a Pipeline without knowing its specific class.
if cls is Pipeline:
return
class_path = '%s.%s' % (cls.__module__, cls.__name__)
# When a WSGI handler is invoked as an entry point, any Pipeline class
# defined in the same file as the handler will get __module__ set to
# __main__. Thus we need to find out its real fully qualified path.
if cls.__module__ == '__main__':
for name, module in module_dict.items():
if name == '__main__':
continue
found = getattr(module, cls.__name__, None)
if found is cls:
class_path = '%s.%s' % (name, cls.__name__)
break
cls._class_path = class_path
|
def _set_class_path(cls, module_dict=sys.modules)
|
Sets the absolute path to this class as a string.
Used by the Pipeline API to reconstruct the Pipeline sub-class object
at execution time instead of passing around a serialized function.
Args:
module_dict: Used for testing.
| 5.67066
| 5.556991
| 1.020455
|
self._context = context
self._pipeline_key = pipeline_key
self._root_pipeline_key = root_pipeline_key
self._result_status = result_status
self.outputs = outputs
|
def _set_values_internal(self,
context,
pipeline_key,
root_pipeline_key,
outputs,
result_status)
|
Sets the user-visible values provided as an API by this class.
Args:
context: The _PipelineContext used for this Pipeline.
pipeline_key: The db.Key of this pipeline.
root_pipeline_key: The db.Key of the root pipeline.
outputs: The PipelineFuture for this pipeline.
result_status: The result status of this pipeline.
| 2.254473
| 2.119153
| 1.063856
|
logging.debug('Callback %s(*%s, **%s)#%s with params: %r',
self._class_path, _short_repr(self.args),
_short_repr(self.kwargs), self._pipeline_key.name(), kwargs)
return self.callback(**kwargs)
|
def _callback_internal(self, kwargs)
|
Used to execute callbacks on asynchronous pipelines.
| 7.201515
| 6.624517
| 1.0871
|
self._set_values_internal(
context, pipeline_key, root_pipeline_key, caller_output,
_PipelineRecord.RUN)
logging.debug('Running %s(*%s, **%s)#%s',
self._class_path, _short_repr(self.args),
_short_repr(self.kwargs), self._pipeline_key.name())
return self.run(*self.args, **self.kwargs)
|
def _run_internal(self,
context,
pipeline_key,
root_pipeline_key,
caller_output)
|
Used by the Pipeline evaluator to execute this Pipeline.
| 5.137721
| 4.865238
| 1.056006
|
result_status = _PipelineRecord.RUN
if aborted:
result_status = _PipelineRecord.ABORTED
self._set_values_internal(
context, pipeline_key, root_pipeline_key, caller_output, result_status)
logging.debug('Finalizing %s(*%r, **%r)#%s',
self._class_path, _short_repr(self.args),
_short_repr(self.kwargs), self._pipeline_key.name())
try:
self.finalized()
except NotImplementedError:
pass
|
def _finalized_internal(self,
context,
pipeline_key,
root_pipeline_key,
caller_output,
aborted)
|
Used by the Pipeline evaluator to finalize this Pipeline.
| 5.260597
| 4.932958
| 1.066418
|
if cls._local._activated:
cls._local._in_order_futures.add(future)
|
def _add_future(cls, future)
|
Adds a future to the list of in-order futures thus far.
Args:
future: The future to add to the list.
| 15.162353
| 11.467258
| 1.32223
|
if not hasattr(cls._local, '_in_order_futures'):
cls._local._in_order_futures = set()
cls._local._activated = False
|
def _thread_init(cls)
|
Ensure thread local is initialized.
| 8.647036
| 6.582638
| 1.313612
|
base_path, unused = (environ['PATH_INFO'].rsplit('/', 1) + [''])[:2]
return cls(
environ['HTTP_X_APPENGINE_TASKNAME'],
environ['HTTP_X_APPENGINE_QUEUENAME'],
base_path)
|
def from_environ(cls, environ=os.environ)
|
Constructs a _PipelineContext from the task queue environment.
| 4.197647
| 3.537248
| 1.186699
|
if not isinstance(filler_pipeline_key, db.Key):
filler_pipeline_key = db.Key(filler_pipeline_key)
if _TEST_MODE:
slot._set_value_test(filler_pipeline_key, value)
else:
encoded_value = json.dumps(value,
sort_keys=True,
cls=mr_util.JsonEncoder)
value_text = None
value_blob = None
if len(encoded_value) <= _MAX_JSON_SIZE:
value_text = db.Text(encoded_value)
else:
# The encoded value is too big. Save it as a blob.
value_blob = _write_json_blob(encoded_value, filler_pipeline_key.name())
def txn():
slot_record = db.get(slot.key)
if slot_record is None:
raise UnexpectedPipelineError(
'Tried to fill missing slot "%s" '
'by pipeline ID "%s" with value: %r'
% (slot.key, filler_pipeline_key.name(), value))
# NOTE: Always take the override value here. If down-stream pipelines
# need a consitent view of all up-stream outputs (meaning, all of the
# outputs came from the same retry attempt of the upstream pipeline),
# the down-stream pipeline must also wait for the 'default' output
# of these up-stream pipelines.
slot_record.filler = filler_pipeline_key
slot_record.value_text = value_text
slot_record.value_blob = value_blob
slot_record.status = _SlotRecord.FILLED
slot_record.fill_time = self._gettime()
slot_record.put()
task = taskqueue.Task(
url=self.barrier_handler_path,
params=dict(
slot_key=slot.key,
use_barrier_indexes=True),
headers={'X-Ae-Slot-Key': slot.key,
'X-Ae-Filler-Pipeline-Key': filler_pipeline_key})
task.add(queue_name=self.queue_name, transactional=True)
db.run_in_transaction_options(
db.create_transaction_options(propagation=db.ALLOWED), txn)
self.session_filled_output_names.add(slot.name)
|
def fill_slot(self, filler_pipeline_key, slot, value)
|
Fills a slot, enqueueing a task to trigger pending barriers.
Args:
filler_pipeline_key: db.Key or stringified key of the _PipelineRecord
that filled this slot.
slot: The Slot instance to fill.
value: The serializable value to assign.
Raises:
UnexpectedPipelineError if the _SlotRecord for the 'slot' could not
be found in the Datastore.
| 4.202594
| 3.970735
| 1.058392
|
def txn():
pipeline_record = db.get(root_pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to abort root pipeline ID "%s" but it does not exist.',
root_pipeline_key.name())
raise db.Rollback()
if pipeline_record.status == _PipelineRecord.ABORTED:
logging.warning(
'Tried to abort root pipeline ID "%s"; already in state: %s',
root_pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
if pipeline_record.abort_requested:
logging.warning(
'Tried to abort root pipeline ID "%s"; abort signal already sent.',
root_pipeline_key.name())
raise db.Rollback()
pipeline_record.abort_requested = True
pipeline_record.abort_message = abort_message
pipeline_record.put()
task = taskqueue.Task(
url=self.fanout_abort_handler_path,
params=dict(root_pipeline_key=root_pipeline_key))
task.add(queue_name=self.queue_name, transactional=True)
return True
return db.run_in_transaction(txn)
|
def begin_abort(self, root_pipeline_key, abort_message)
|
Kicks off the abort process for a root pipeline and all its children.
Args:
root_pipeline_key: db.Key of the root pipeline to abort.
abort_message: Message explaining why the abort happened, only saved
into the root pipeline.
Returns:
True if the abort signal was sent successfully; False otherwise.
| 2.480379
| 2.267797
| 1.09374
|
if not isinstance(root_pipeline_key, db.Key):
root_pipeline_key = db.Key(root_pipeline_key)
# NOTE: The results of this query may include _PipelineRecord instances
# that are not actually "reachable", meaning you cannot get to them by
# starting at the root pipeline and following "fanned_out" onward. This
# is acceptable because even these defunct _PipelineRecords will properly
# set their status to ABORTED when the signal comes, regardless of any
# other status they may have had.
#
# The only gotcha here is if a Pipeline's finalize method somehow modifies
# its inputs (like deleting an input file). In the case there are
# unreachable child pipelines, it will appear as if two finalize methods
# have been called instead of just one. The saving grace here is that
# finalize must be idempotent, so this *should* be harmless.
query = (
_PipelineRecord.all(cursor=cursor)
.filter('root_pipeline =', root_pipeline_key))
results = query.fetch(max_to_notify)
task_list = []
for pipeline_record in results:
if pipeline_record.status not in (
_PipelineRecord.RUN, _PipelineRecord.WAITING):
continue
pipeline_key = pipeline_record.key()
task_list.append(taskqueue.Task(
name='%s-%s-abort' % (self.task_name, pipeline_key.name()),
url=self.abort_handler_path,
params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.ABORT),
headers={'X-Ae-Pipeline-Key': pipeline_key}))
# Task continuation with sequence number to prevent fork-bombs.
if len(results) == max_to_notify:
the_match = re.match('(.*)-([0-9]+)', self.task_name)
if the_match:
prefix = the_match.group(1)
end = int(the_match.group(2)) + 1
else:
prefix = self.task_name
end = 0
task_list.append(taskqueue.Task(
name='%s-%d' % (prefix, end),
url=self.fanout_abort_handler_path,
params=dict(root_pipeline_key=root_pipeline_key,
cursor=query.cursor())))
if task_list:
try:
taskqueue.Queue(self.queue_name).add(task_list)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError):
pass
|
def continue_abort(self,
root_pipeline_key,
cursor=None,
max_to_notify=_MAX_ABORTS_TO_BEGIN)
|
Sends the abort signal to all children for a root pipeline.
Args:
root_pipeline_key: db.Key of the root pipeline to abort.
cursor: The query cursor for enumerating _PipelineRecords when inserting
tasks to cause child pipelines to terminate.
max_to_notify: Used for testing.
| 4.569677
| 4.36583
| 1.046691
|
# Adjust all pipeline output keys for this Pipeline to be children of
# the _PipelineRecord, that way we can write them all and submit in a
# single transaction.
for name, slot in pipeline.outputs._output_dict.iteritems():
slot.key = db.Key.from_path(
*slot.key.to_path(), **dict(parent=pipeline._pipeline_key))
_, output_slots, params_text, params_blob = _generate_args(
pipeline, pipeline.outputs, self.queue_name, self.base_path)
@db.transactional(propagation=db.INDEPENDENT)
def txn():
pipeline_record = db.get(pipeline._pipeline_key)
if pipeline_record is not None:
raise PipelineExistsError(
'Pipeline with idempotence key "%s" already exists; params=%s' %
(pipeline._pipeline_key.name(),
_short_repr(pipeline_record.params)))
entities_to_put = []
for name, slot in pipeline.outputs._output_dict.iteritems():
entities_to_put.append(_SlotRecord(
key=slot.key,
root_pipeline=pipeline._pipeline_key))
entities_to_put.append(_PipelineRecord(
key=pipeline._pipeline_key,
root_pipeline=pipeline._pipeline_key,
is_root_pipeline=True,
# Bug in DB means we need to use the storage name here,
# not the local property name.
params=params_text,
params_blob=params_blob,
start_time=self._gettime(),
class_path=pipeline._class_path,
max_attempts=pipeline.max_attempts))
entities_to_put.extend(_PipelineContext._create_barrier_entities(
pipeline._pipeline_key,
pipeline._pipeline_key,
_BarrierRecord.FINALIZE,
output_slots))
db.put(entities_to_put)
task = taskqueue.Task(
url=self.pipeline_handler_path,
params=dict(pipeline_key=pipeline._pipeline_key),
headers={'X-Ae-Pipeline-Key': pipeline._pipeline_key},
target=pipeline.target,
countdown=countdown,
eta=eta)
if return_task:
return task
task.add(queue_name=self.queue_name, transactional=True)
task = txn()
# Immediately mark the output slots as existing so they can be filled
# by asynchronous pipelines or used in test mode.
for output_slot in pipeline.outputs._output_dict.itervalues():
output_slot._exists = True
return task
|
def start(self, pipeline, return_task=True, countdown=None, eta=None)
|
Starts a pipeline.
Args:
pipeline: Pipeline instance to run.
return_task: When True, do not submit the task to start the pipeline
but instead return it for someone else to enqueue.
countdown: Time in seconds into the future that this Task should execute.
Defaults to zero.
eta: A datetime.datetime specifying the absolute time at which the task
should be executed. Must not be specified if 'countdown' is specified.
This may be timezone-aware or timezone-naive. If None, defaults to now.
For pull tasks, no worker will be able to lease this task before the
time indicated by eta.
Returns:
The task to start this pipeline if return_task was True.
Raises:
PipelineExistsError if the pipeline with the given ID already exists.
| 4.585701
| 4.634648
| 0.989439
|
result = []
blocking_slot_keys = list(blocking_slot_keys)
barrier = _BarrierRecord(
parent=child_pipeline_key,
key_name=purpose,
target=child_pipeline_key,
root_pipeline=root_pipeline_key,
blocking_slots=blocking_slot_keys)
result.append(barrier)
for slot_key in blocking_slot_keys:
barrier_index_path = []
barrier_index_path.extend(slot_key.to_path())
barrier_index_path.extend(child_pipeline_key.to_path())
barrier_index_path.extend([_BarrierIndex.kind(), purpose])
barrier_index_key = db.Key.from_path(*barrier_index_path)
barrier_index = _BarrierIndex(
key=barrier_index_key,
root_pipeline=root_pipeline_key)
result.append(barrier_index)
return result
|
def _create_barrier_entities(root_pipeline_key,
child_pipeline_key,
purpose,
blocking_slot_keys)
|
Creates all of the entities required for a _BarrierRecord.
Args:
root_pipeline_key: The root pipeline this is part of.
child_pipeline_key: The pipeline this barrier is for.
purpose: _BarrierRecord.START or _BarrierRecord.FINALIZE.
blocking_slot_keys: Set of db.Keys corresponding to _SlotRecords that
this barrier should wait on before firing.
Returns:
List of entities, starting with the _BarrierRecord entity, followed by
_BarrierIndexes used for firing when _SlotRecords are filled in the same
order as the blocking_slot_keys list provided. All of these entities
should be put in the Datastore to ensure the barrier fires properly.
| 2.572696
| 2.179284
| 1.180524
|
if isinstance(e, Retry):
retry_message = str(e)
logging.warning('User forced retry for pipeline ID "%s" of %r: %s',
pipeline_key.name(), pipeline_func, retry_message)
self.transition_retry(pipeline_key, retry_message)
elif isinstance(e, Abort):
abort_message = str(e)
logging.warning('User forced abort for pipeline ID "%s" of %r: %s',
pipeline_key.name(), pipeline_func, abort_message)
pipeline_func.abort(abort_message)
else:
retry_message = '%s: %s' % (e.__class__.__name__, str(e))
logging.exception('Generator %r#%s raised exception. %s',
pipeline_func, pipeline_key.name(), retry_message)
self.transition_retry(pipeline_key, retry_message)
return pipeline_func.task_retry
|
def handle_run_exception(self, pipeline_key, pipeline_func, e)
|
Handles an exception raised by a Pipeline's user code.
Args:
pipeline_key: The pipeline that raised the error.
pipeline_func: The class path name of the Pipeline that was running.
e: The exception that was raised.
Returns:
True if the exception should be re-raised up through the calling stack
by the caller of this method.
| 2.898162
| 2.963789
| 0.977857
|
def txn():
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.warning('Pipeline ID "%s" cannot be marked as run. '
'Does not exist.', pipeline_key.name())
raise db.Rollback()
if pipeline_record.status != _PipelineRecord.WAITING:
logging.warning('Pipeline ID "%s" in bad state to be marked as run: %s',
pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
pipeline_record.status = _PipelineRecord.RUN
if fanned_out_pipelines:
# NOTE: We must model the pipeline relationship in a top-down manner,
# meaning each pipeline must point forward to the pipelines that it
# fanned out to. The reason is race conditions. If evaluate()
# dies early, it may create many unused _PipelineRecord and _SlotRecord
# instances that never progress. The only way we know which of these
# are valid is by traversing the graph from the root, where the
# fanned_out property refers to those pipelines that were run using a
# transactional task.
child_pipeline_list = list(fanned_out_pipelines)
pipeline_record.fanned_out = child_pipeline_list
if pipelines_to_run:
child_indexes = [
child_pipeline_list.index(p) for p in pipelines_to_run]
child_indexes.sort()
task = taskqueue.Task(
url=self.fanout_handler_path,
params=dict(parent_key=str(pipeline_key),
child_indexes=child_indexes))
task.add(queue_name=self.queue_name, transactional=True)
pipeline_record.put()
if blocking_slot_keys:
# NOTE: Always update a generator pipeline's finalization barrier to
# include all of the outputs of any pipelines that it runs, to ensure
# that finalized calls will not happen until all child pipelines have
# completed. This must happen transactionally with the enqueue of
# the fan-out kickoff task above to ensure the child output slots and
# the barrier blocking slots are the same.
barrier_key = db.Key.from_path(
_BarrierRecord.kind(), _BarrierRecord.FINALIZE,
parent=pipeline_key)
finalize_barrier = db.get(barrier_key)
if finalize_barrier is None:
raise UnexpectedPipelineError(
'Pipeline ID "%s" cannot update finalize barrier. '
'Does not exist.' % pipeline_key.name())
else:
finalize_barrier.blocking_slots = list(
blocking_slot_keys.union(set(finalize_barrier.blocking_slots)))
finalize_barrier.put()
db.run_in_transaction(txn)
|
def transition_run(self,
pipeline_key,
blocking_slot_keys=None,
fanned_out_pipelines=None,
pipelines_to_run=None)
|
Marks an asynchronous or generator pipeline as running.
Does nothing if the pipeline is no longer in a runnable state.
Args:
pipeline_key: The db.Key of the _PipelineRecord to update.
blocking_slot_keys: List of db.Key instances that this pipeline's
finalization barrier should wait on in addition to the existing one.
This is used to update the barrier to include all child outputs. When
None, the barrier will not be updated.
fanned_out_pipelines: List of db.Key instances of _PipelineRecords that
were fanned out by this generator pipeline. This is distinct from the
'pipelines_to_run' list because not all of the pipelines listed here
will be immediately ready to execute. When None, then this generator
yielded no children.
pipelines_to_run: List of db.Key instances of _PipelineRecords that should
be kicked off (fan-out) transactionally as part of this transition.
When None, no child pipelines will run. All db.Keys in this list must
also be present in the fanned_out_pipelines list.
Raises:
UnexpectedPipelineError if blocking_slot_keys was not empty and the
_BarrierRecord has gone missing.
| 4.844608
| 4.264215
| 1.136108
|
def txn():
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to mark pipeline ID "%s" as complete but it does not exist.',
pipeline_key.name())
raise db.Rollback()
if pipeline_record.status not in (
_PipelineRecord.WAITING, _PipelineRecord.RUN):
logging.warning(
'Tried to mark pipeline ID "%s" as complete, found bad state: %s',
pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
pipeline_record.status = _PipelineRecord.DONE
pipeline_record.finalized_time = self._gettime()
pipeline_record.put()
db.run_in_transaction(txn)
|
def transition_complete(self, pipeline_key)
|
Marks the given pipeline as complete.
Does nothing if the pipeline is no longer in a state that can be completed.
Args:
pipeline_key: db.Key of the _PipelineRecord that has completed.
| 3.227152
| 2.629639
| 1.227222
|
def txn():
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
logging.warning(
'Tried to retry pipeline ID "%s" but it does not exist.',
pipeline_key.name())
raise db.Rollback()
if pipeline_record.status not in (
_PipelineRecord.WAITING, _PipelineRecord.RUN):
logging.warning(
'Tried to retry pipeline ID "%s", found bad state: %s',
pipeline_key.name(), pipeline_record.status)
raise db.Rollback()
params = pipeline_record.params
offset_seconds = (
params['backoff_seconds'] *
(params['backoff_factor'] ** pipeline_record.current_attempt))
pipeline_record.next_retry_time = (
self._gettime() + datetime.timedelta(seconds=offset_seconds))
pipeline_record.current_attempt += 1
pipeline_record.retry_message = retry_message
pipeline_record.status = _PipelineRecord.WAITING
if pipeline_record.current_attempt >= pipeline_record.max_attempts:
root_pipeline_key = (
_PipelineRecord.root_pipeline.get_value_for_datastore(
pipeline_record))
logging.warning(
'Giving up on pipeline ID "%s" after %d attempt(s); causing abort '
'all the way to the root pipeline ID "%s"', pipeline_key.name(),
pipeline_record.current_attempt, root_pipeline_key.name())
# NOTE: We do *not* set the status to aborted here to ensure that
# this pipeline will be finalized before it has been marked as aborted.
pipeline_record.abort_message = (
'Aborting after %d attempts' % pipeline_record.current_attempt)
task = taskqueue.Task(
url=self.fanout_abort_handler_path,
params=dict(root_pipeline_key=root_pipeline_key))
task.add(queue_name=self.queue_name, transactional=True)
else:
task = taskqueue.Task(
url=self.pipeline_handler_path,
eta=pipeline_record.next_retry_time,
params=dict(pipeline_key=pipeline_key,
purpose=_BarrierRecord.START,
attempt=pipeline_record.current_attempt),
headers={'X-Ae-Pipeline-Key': pipeline_key},
target=pipeline_record.params['target'])
task.add(queue_name=self.queue_name, transactional=True)
pipeline_record.put()
db.run_in_transaction(txn)
|
def transition_retry(self, pipeline_key, retry_message)
|
Marks the given pipeline as requiring another retry.
Does nothing if all attempts have been exceeded.
Args:
pipeline_key: db.Key of the _PipelineRecord that needs to be retried.
retry_message: User-supplied message indicating the reason for the retry.
| 2.991238
| 2.795133
| 1.07016
|
pipeline_id = self.request.get('pipeline_id')
if not pipeline_id:
raise _CallbackTaskError('"pipeline_id" parameter missing.')
pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id)
pipeline_record = db.get(pipeline_key)
if pipeline_record is None:
raise _CallbackTaskError(
'Pipeline ID "%s" for callback does not exist.' % pipeline_id)
params = pipeline_record.params
real_class_path = params['class_path']
try:
pipeline_func_class = mr_util.for_name(real_class_path)
except ImportError, e:
raise _CallbackTaskError(
'Cannot load class named "%s" for pipeline ID "%s".'
% (real_class_path, pipeline_id))
if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ:
if pipeline_func_class.public_callbacks:
pass
elif pipeline_func_class.admin_callbacks:
if not users.is_current_user_admin():
raise _CallbackTaskError(
'Unauthorized callback for admin-only pipeline ID "%s"'
% pipeline_id)
else:
raise _CallbackTaskError(
'External callback for internal-only pipeline ID "%s"'
% pipeline_id)
kwargs = {}
for key in self.request.arguments():
if key != 'pipeline_id':
kwargs[str(key)] = self.request.get(key)
def perform_callback():
stage = pipeline_func_class.from_id(pipeline_id)
if stage is None:
raise _CallbackTaskError(
'Pipeline ID "%s" deleted during callback' % pipeline_id)
return stage._callback_internal(kwargs)
# callback_xg_transaction is a 3-valued setting (None=no trans,
# False=1-eg-trans, True=xg-trans)
if pipeline_func_class._callback_xg_transaction is not None:
transaction_options = db.create_transaction_options(
xg=pipeline_func_class._callback_xg_transaction)
callback_result = db.run_in_transaction_options(transaction_options,
perform_callback)
else:
callback_result = perform_callback()
if callback_result is not None:
status_code, content_type, content = callback_result
self.response.set_status(status_code)
self.response.headers['Content-Type'] = content_type
self.response.out.write(content)
|
def run_callback(self)
|
Runs the callback for the pipeline specified in the request.
Raises:
_CallbackTaskError if something was wrong with the request parameters.
| 2.795971
| 2.653804
| 1.053571
|
import os
import sys
all_paths = os.environ.get('PYTHONPATH').split(os.pathsep)
for path_dir in all_paths:
dev_appserver_path = os.path.join(path_dir, 'dev_appserver.py')
if os.path.exists(dev_appserver_path):
logging.debug('Found appengine SDK on path!')
google_appengine = os.path.dirname(os.path.realpath(dev_appserver_path))
sys.path.append(google_appengine)
# Use the next import will fix up sys.path even further to bring in
# any dependent lib directories that the SDK needs.
dev_appserver = __import__('dev_appserver')
sys.path.extend(dev_appserver.EXTRA_PATHS)
return
|
def _fix_path()
|
Finds the google_appengine directory and fixes Python imports to use it.
| 3.79623
| 3.405303
| 1.114799
|
if hasattr(self, '_params_decoded'):
return self._params_decoded
if self.params_blob is not None:
value_encoded = self.params_blob.open().read()
else:
value_encoded = self.params_text
value = json.loads(value_encoded, cls=util.JsonDecoder)
if isinstance(value, dict):
kwargs = value.get('kwargs')
if kwargs:
adjusted_kwargs = {}
for arg_key, arg_value in kwargs.iteritems():
# Python only allows non-unicode strings as keyword arguments.
adjusted_kwargs[str(arg_key)] = arg_value
value['kwargs'] = adjusted_kwargs
self._params_decoded = value
return self._params_decoded
|
def params(self)
|
Returns the dictionary of parameters for this Pipeline.
| 3.312776
| 3.291795
| 1.006374
|
if hasattr(self, '_value_decoded'):
return self._value_decoded
if self.value_blob is not None:
encoded_value = self.value_blob.open().read()
else:
encoded_value = self.value_text
self._value_decoded = json.loads(encoded_value, cls=util.JsonDecoder)
return self._value_decoded
|
def value(self)
|
Returns the value of this Slot.
| 3.344494
| 3.351375
| 0.997947
|
barrier_index_path = barrier_index_key.to_path()
# Pick out the items from the _BarrierIndex key path that we need to
# construct the _BarrierRecord key path.
(pipeline_kind, dependent_pipeline_id,
unused_kind, purpose) = barrier_index_path[-4:]
barrier_record_path = (
pipeline_kind, dependent_pipeline_id,
_BarrierRecord.kind(), purpose)
return db.Key.from_path(*barrier_record_path)
|
def to_barrier_key(cls, barrier_index_key)
|
Converts a _BarrierIndex key to a _BarrierRecord key.
Args:
barrier_index_key: db.Key for a _BarrierIndex entity.
Returns:
db.Key for the corresponding _BarrierRecord entity.
| 5.902054
| 5.802835
| 1.017098
|
# Check the page number is valid.
if page <= self.page_count:
raise ValueError("Page {0:d} has already started, cannot mark used labels now.".format(page))
# Add these to any existing labels marked as used.
used = self._used.get(page, set())
for row, column in used_labels:
# Check the index is valid.
if row < 1 or row > self.specs.rows:
raise IndexError("Invalid row number: {0:d}.".format(row))
if column < 1 or column > self.specs.columns:
raise IndexError("Invalid column number: {0:d}.".format(column))
# Add it.
used.add((int(row), int(column)))
# Save the details.
self._used[page] = used
|
def partial_page(self, page, used_labels)
|
Allows a page to be marked as already partially used so you can
generate a PDF to print on the remaining labels.
Parameters
----------
page: positive integer
The page number to mark as partially used. The page must not have
already been started, i.e., for page 1 this must be called before
any labels have been started, for page 2 this must be called before
the first page is full and so on.
used_labels: iterable
An iterable of (row, column) pairs marking which labels have been
used already. The rows and columns must be within the bounds of the
sheet.
| 3.64602
| 3.171363
| 1.14967
|
self._current_page = Drawing(*self._pagesize)
if self._bgimage:
self._current_page.add(self._bgimage)
self._pages.append(self._current_page)
self.page_count += 1
self._position = [1, 0]
|
def _new_page(self)
|
Helper function to start a new page. Not intended for external use.
| 4.119328
| 3.796548
| 1.085019
|
# Special case for the very first label.
if self.page_count == 0:
self._new_page()
# Filled up a page.
elif self._position == self._numlabels:
self._new_page()
# Filled up a row.
elif self._position[1] == self.specs.columns:
self._position[0] += 1
self._position[1] = 0
# Move to the next column.
self._position[1] += 1
|
def _next_label(self)
|
Helper method to move to the next label. Not intended for external use.
This does not increment the label_count attribute as the next label may
not be usable (it may have been marked as missing through
partial_pages). See _next_unused_label for generally more useful method.
| 4.251585
| 3.944029
| 1.07798
|
self._next_label()
# This label may be missing.
if self.page_count in self._used:
# Keep try while the label is missing.
missing = self._used.get(self.page_count, set())
while tuple(self._position) in missing:
# Throw the missing information away now we have used it. This
# allows the _shade_remaining_missing method to work.
missing.discard(tuple(self._position))
# Shade the missing label if desired.
if self.shade_missing:
self._shade_missing_label()
# Try our luck with the next label.
self._next_label()
missing = self._used.get(self.page_count, set())
# Increment the count now we have found a suitable position.
self.label_count += 1
|
def _next_unused_label(self)
|
Helper method to move to the next unused label. Not intended for external use.
This method will shade in any missing labels if desired, and will
increment the label_count attribute once a suitable label position has
been found.
| 6.028337
| 4.985713
| 1.209122
|
# Calculate the left edge of the label.
left = self.specs.left_margin
left += (self.specs.label_width * (self._position[1] - 1))
if self.specs.column_gap:
left += (self.specs.column_gap * (self._position[1] - 1))
left *= mm
# And the bottom.
bottom = self.specs.sheet_height - self.specs.top_margin
bottom -= (self.specs.label_height * self._position[0])
if self.specs.row_gap:
bottom -= (self.specs.row_gap * (self._position[0] - 1))
bottom *= mm
# Done.
return float(left), float(bottom)
|
def _calculate_edges(self)
|
Calculate edges of the current label. Not intended for external use.
| 2.970569
| 2.69391
| 1.102698
|
# Start a drawing for the whole label.
label = Drawing(float(self._lw), float(self._lh))
label.add(self._clip_label)
# Fill with a rectangle; the clipping path will take care of the borders.
r = shapes.Rect(0, 0, float(self._lw), float(self._lh))
r.fillColor = self.shade_missing
r.strokeColor = None
label.add(r)
# Add the label to the page.
label.shift(*self._calculate_edges())
self._current_page.add(label)
|
def _shade_missing_label(self)
|
Helper method to shade a missing label. Not intended for external use.
| 5.771303
| 5.469357
| 1.055207
|
# Sanity check.
if not self.shade_missing:
return
# Run through each missing label left in the current page and shade it.
missing = self._used.get(self.page_count, set())
for position in missing:
self._position = position
self._shade_missing_label()
|
def _shade_remaining_missing(self)
|
Helper method to shade any missing labels remaining on the current
page. Not intended for external use.
Note that this will modify the internal _position attribute and should
therefore only be used once all the 'real' labels have been drawn.
| 8.91956
| 6.495528
| 1.373185
|
# Start a drawing for the whole label.
label = Drawing(float(self._lw), float(self._lh))
label.add(self._clip_label)
# And one for the available area (i.e., after padding).
available = Drawing(float(self._dw), float(self._dh))
available.add(self._clip_drawing)
# Call the drawing function.
self.drawing_callable(available, float(self._dw), float(self._dh), obj)
# Render the contents on the label.
available.shift(float(self._lp), float(self._bp))
label.add(available)
# Draw the border if requested.
if self.border:
label.add(self._border)
# Add however many copies we need to.
for i in range(count):
# Find the next available label.
self._next_unused_label()
# Have we been told to skip this page?
if self.pages_to_draw and self.page_count not in self.pages_to_draw:
continue
# Add the label to the page. ReportLab stores the added drawing by
# reference so we have to copy it N times.
thislabel = copy(label)
thislabel.shift(*self._calculate_edges())
self._current_page.add(thislabel)
|
def _draw_label(self, obj, count)
|
Helper method to draw on the current label. Not intended for external use.
| 5.590129
| 5.461792
| 1.023497
|
# If we can convert it to an int, do so and use the itertools.repeat()
# method to create an infinite iterator from it. Otherwise, assume it
# is an iterable or sequence.
try:
count = int(count)
except TypeError:
pass
else:
count = repeat(count)
# If it is not an iterable (e.g., a list or range object),
# create an iterator over it.
if not hasattr(count, 'next') and not hasattr(count, '__next__'):
count = iter(count)
# Go through the objects.
for obj in objects:
# Check we have a count for this one.
try:
thiscount = next(count)
except StopIteration:
break
# Draw it.
self._draw_label(obj, thiscount)
|
def add_labels(self, objects, count=1)
|
Add multiple labels to the sheet.
Parameters
----------
objects: iterable
An iterable of the objects to add. Each of these will be passed to
the add_label method. Note that if this is a generator it will be
consumed.
count: positive integer or iterable of positive integers, default 1
The number of copies of each label to add. If a single integer,
that many copies of every label are added. If an iterable, then
each value specifies how many copies of the corresponding label to
add. The iterables are advanced in parallel until one is exhausted;
extra values in the other one are ignored. This means that if there
are fewer count entries than objects, the objects corresponding to
the missing counts will not be added to the sheet.
Note that if this is a generator it will be consumed. Also note
that the drawing function will only be called once for each label
and the results copied for the repeats. If the drawing function
maintains any state internally then using this parameter may break
it.
| 4.234927
| 4.135586
| 1.024021
|
# Shade any remaining missing labels if desired.
self._shade_remaining_missing()
# Create a canvas.
canvas = Canvas(filelike, pagesize=self._pagesize)
# Render each created page onto the canvas.
for page in self._pages:
renderPDF.draw(page, canvas, 0, 0)
canvas.showPage()
# Done.
canvas.save()
|
def save(self, filelike)
|
Save the file as a PDF.
Parameters
----------
filelike: path or file-like object
The filename or file-like object to save the labels under. Any
existing contents will be overwritten.
| 6.395486
| 5.670358
| 1.12788
|
# Check the page number.
if page < 1 or page > self.page_count:
raise ValueError("Invalid page number; should be between 1 and {0:d}.".format(self.page_count))
# Shade any remaining missing labels if desired.
self._shade_remaining_missing()
# Rendering to an image (as opposed to a PDF) requires any background
# to have an integer width and height if it is a ReportLab Image
# object. Drawing objects are exempt from this.
oldw, oldh = None, None
if isinstance(self._bgimage, Image):
oldw, oldh = self._bgimage.width, self._bgimage.height
self._bgimage.width = int(oldw) + 1
self._bgimage.height = int(oldh) + 1
# Let ReportLab do the heavy lifting.
renderPM.drawToFile(self._pages[page-1], filelike, format, dpi, background_colour)
# Restore the size of the background image if we changed it.
if oldw:
self._bgimage.width = oldw
self._bgimage.height = oldh
|
def preview(self, page, filelike, format='png', dpi=72, background_colour=0xFFFFFF)
|
Render a preview image of a page.
Parameters
----------
page: positive integer
Which page to render. Must be in the range [1, page_count]
filelike: path or file-like object
Can be a filename as a string, a Python file object, or something
which behaves like a Python file object. For example, if you were
using the Django web framework, an HttpResponse object could be
passed to render the preview to the browser (as long as you remember
to set the mimetype of the response). If you pass a filename, the
existing contents will be overwritten.
format: string
The image format to use for the preview. ReportLab uses the Python
Imaging Library (PIL) internally, so any PIL format should be
supported.
dpi: positive real
The dots-per-inch to use when rendering.
background_colour: Hex colour specification
What color background to use.
Notes
-----
If you are creating this sheet for a preview only, you can pass the
pages_to_draw parameter to the constructor to avoid the drawing function
being called for all the labels on pages you'll never look at. If you
preview a page you did not tell the sheet to draw, you will get a blank
image.
Raises
------
ValueError:
If the page number is not valid.
| 3.961828
| 3.943706
| 1.004595
|
# Check the page number.
if page < 1 or page > self.page_count:
raise ValueError("Invalid page number; should be between 1 and {0:d}.".format(self.page_count))
# Shade any remaining missing labels if desired.
self._shade_remaining_missing()
# Rendering to an image (as opposed to a PDF) requires any background
# to have an integer width and height if it is a ReportLab Image
# object. Drawing objects are exempt from this.
oldw, oldh = None, None
if isinstance(self._bgimage, Image):
oldw, oldh = self._bgimage.width, self._bgimage.height
self._bgimage.width = int(oldw) + 1
self._bgimage.height = int(oldh) + 1
# Let ReportLab do the heavy lifting.
s = renderPM.drawToString(self._pages[page-1], format, dpi, background_colour)
# Restore the size of the background image if we changed it.
if oldw:
self._bgimage.width = oldw
self._bgimage.height = oldh
# Done.
return s
|
def preview_string(self, page, format='png', dpi=72, background_colour=0xFFFFFF)
|
Render a preview image of a page as a string.
Parameters
----------
page: positive integer
Which page to render. Must be in the range [1, page_count]
format: string
The image format to use for the preview. ReportLab uses the Python
Imaging Library (PIL) internally, so any PIL format should be
supported.
dpi: positive real
The dots-per-inch to use when rendering.
background_colour: Hex colour specification
What color background to use.
Notes
-----
If you are creating this sheet for a preview only, you can pass the
pages_to_draw parameter to the constructor to avoid the drawing function
being called for all the labels on pages you'll never look at. If you
preview a page you did not tell the sheet to draw, you will get a blank
image.
Raises
------
ValueError:
If the page number is not valid.
| 3.978232
| 3.957016
| 1.005362
|
boxes = {}
# Check the parameters.
if mode not in ('fraction', 'actual'):
raise ValueError("Unknown mode {0}.".format(mode))
if output not in ('dict', 'json'):
raise ValueError("Unknown output {0}.".format(output))
# Iterate over the rows.
for row in range(1, self.rows + 1):
# Top and bottom of all labels in the row.
top = self.top_margin + ((row - 1) * (self.label_height + self.row_gap))
bottom = top + self.label_height
# Now iterate over all columns in this row.
for column in range(1, self.columns + 1):
# Left and right position of this column.
left = self.left_margin + ((column - 1) * (self.label_width + self.column_gap))
right = left + self.label_width
# Output in the appropriate mode format.
if mode == 'fraction':
box = {
'top': top / self.sheet_height,
'bottom': bottom / self.sheet_height,
'left': left / self.sheet_width,
'right': right / self.sheet_width,
}
elif mode == 'actual':
box = {'top': top, 'bottom': bottom, 'left': left, 'right': right}
# Add to the collection.
if output == 'json':
boxes['{0:d}x{1:d}'.format(row, column)] = box
box['top'] = float(box['top'])
box['bottom'] = float(box['bottom'])
box['left'] = float(box['left'])
box['right'] = float(box['right'])
else:
boxes[(row, column)] = box
# Done.
if output == 'json':
return json.dumps(boxes)
return boxes
|
def bounding_boxes(self, mode='fraction', output='dict')
|
Get the bounding boxes of the labels on a page.
Parameters
----------
mode: 'fraction', 'actual'
If 'fraction', the bounding boxes are expressed as a fraction of the
height and width of the sheet. If 'actual', they are the actual
position of the labels in millimetres from the top-left of the
sheet.
output: 'dict', 'json'
If 'dict', a dictionary with label identifier tuples (row, column)
as keys and a dictionary with 'left', 'right', 'top', and 'bottom'
entries as the values.
If 'json', a JSON encoded string which represents a dictionary with
keys of the string format 'rowxcolumn' and each value being a
bounding box dictionary with 'left', 'right', 'top', and 'bottom'
entries.
Returns
-------
The bounding boxes in the format set by the output parameter.
| 2.05872
| 1.957114
| 1.051916
|
others = []
spans = [i for i in findBalanced(text, open, close)]
spanscopy = copy(spans)
for i in range(len(spans)):
start, end = spans[i]
o = text[start:end]
ol = o.lower()
if 'vaata|' in ol or 'wikitable' in ol:
spanscopy.remove(spans[i])
continue
others.append(o)
text = dropSpans(spanscopy, text)
return text, others
|
def templatesCollector(text, open, close)
|
leaves related articles and wikitables in place
| 5.370447
| 5.229371
| 1.026978
|
seen_layers = set()
for k, v in kwargs.items():
if k not in LEGAL_ARGUMENTS:
raise ValueError('Illegal argument <{0}>!'.format(k))
if k in AESTHETICS:
if v in seen_layers:
raise ValueError('Layer <{0}> mapped for more than a single aesthetic!'.format(v))
seen_layers.add(v)
if k in VALUES:
if not isinstance(v, six.string_types) and not isinstance(v, list):
raise ValueError('Value <{0}> must be either string or list'.format(k))
if isinstance(v, list):
if len(v) == 0:
raise ValueError('Rules cannot be empty list')
for rule_matcher, rule_value in v:
if not isinstance(rule_matcher, six.string_types) or not isinstance(rule_value, six.string_types):
raise ValueError('Rule tuple elements must be strings')
|
def assert_legal_arguments(kwargs)
|
Assert that PrettyPrinter arguments are correct.
Raises
------
ValueError
In case there are unknown arguments or a single layer is mapped to more than one aesthetic.
| 2.895252
| 2.551737
| 1.13462
|
aesthetics = {}
values = {}
for aes in AESTHETICS:
if aes in kwargs:
aesthetics[aes] = kwargs[aes]
val_name = AES_VALUE_MAP[aes]
# map the user-provided CSS value or use the default
values[aes] = kwargs.get(val_name, DEFAULT_VALUE_MAP[aes])
return aesthetics, values
|
def parse_arguments(kwargs)
|
Function that parses PrettyPrinter arguments.
Detects which aesthetics are mapped to which layers
and collects user-provided values.
Parameters
----------
kwargs: dict
The keyword arguments to PrettyPrinter.
Returns
-------
dict, dict
First dictionary is aesthetic to layer mapping.
Second dictionary is aesthetic to user value mapping.
| 4.540017
| 3.871659
| 1.172628
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.