sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def update_bbox(self):
"""
Recalculates the bbox region attribute for the entire file.
Useful after adding and/or removing features.
No need to use this method just for saving, because saving
automatically updates the bbox.
"""
xmins, ymins, xmaxs, ymaxs = zip(*(feat.geometry.bbox for feat in self if feat.geometry.type != "Null"))
bbox = [min(xmins), min(ymins), max(xmaxs), max(ymaxs)]
self._data["bbox"] = bbox | Recalculates the bbox region attribute for the entire file.
Useful after adding and/or removing features.
No need to use this method just for saving, because saving
automatically updates the bbox. | entailment |
def add_unique_id(self):
"""
Adds a unique id property to each feature.
Raises:
- An Exception if any of the features already
have an "id" field.
"""
uid = 0
for feature in self._data["features"]:
if feature["properties"].get("id"):
raise Exception("one of the features already had an id field")
feature["properties"]["id"] = uid
uid += 1 | Adds a unique id property to each feature.
Raises:
- An Exception if any of the features already
have an "id" field. | entailment |
def add_all_bboxes(self):
"""
Calculates and adds a bbox attribute to the geojson entry of all feature geometries, updating any existing ones.
"""
for feature in self:
if feature.geometry.type != "Null":
feature.geometry._data["bbox"] = Feature(feature).geometry.bbox | Calculates and adds a bbox attribute to the geojson entry of all feature geometries, updating any existing ones. | entailment |
def save(self, savepath, **kwargs):
"""
Saves the geojson instance to file. To save with a different text encoding use the 'encoding' argument.
Parameters:
- **savepath**: Filepath to save the file.
"""
self.update_bbox()
tempfile = open(savepath,"w")
json.dump(self._data, tempfile, **kwargs)
tempfile.close() | Saves the geojson instance to file. To save with a different text encoding use the 'encoding' argument.
Parameters:
- **savepath**: Filepath to save the file. | entailment |
def _loadfilepath(self, filepath, **kwargs):
"""This loads a geojson file into a geojson python
dictionary using the json module.
Note: to load with a different text encoding use the encoding argument.
"""
with open(filepath, "r") as f:
data = json.load(f, **kwargs)
return data | This loads a geojson file into a geojson python
dictionary using the json module.
Note: to load with a different text encoding use the encoding argument. | entailment |
def _prepdata(self):
"""Adds potentially missing items to the geojson dictionary"""
# if missing, compute and add bbox
if not self._data.get("bbox"):
self.update_bbox()
# if missing, set crs to default crs (WGS84), see http://geojson.org/geojson-spec.html
if not self._data.get("crs"):
self._data["crs"] = {"type":"name",
"properties":{"name":"urn:ogc:def:crs:OGC:2:84"}} | Adds potentially missing items to the geojson dictionary | entailment |
def hash_parameters(keys, minimize=True, to_int=None):
"""
Calculates the parameters for a perfect hash. The result is returned
as a HashInfo tuple which has the following fields:
t
The "table parameter". This is the minimum side length of the
table used to create the hash. In practice, t**2 is the maximum
size of the output hash.
slots
The original inputs mapped to a vector. This is the hash
function.
r
The displacement vector. This is the displacement of the given
row in the result vector. To find a given value, use
``x + r[y]``.
offset
The amount by which to offset all values (once converted to ints)
to_int
A function that converts the input to an int (if given).
Keyword parameters:
``minimize``
Whether or not offset all integer keys internally by the minimum
value. This typically results in smaller output.
``to_int``
A callable that converts the input keys to ints. If not
specified, all keys should be given as ints.
>>> hash_parameters([1, 5, 7], minimize=False)
HashInfo(t=3, slots=(1, 5, 7), r=(-1, -1, 1), offset=0, to_int=None)
>>> hash_parameters([1, 5, 7])
HashInfo(t=3, slots=(1, 5, 7), r=(0, 0, 2), offset=-1, to_int=None)
>>> l = (0, 3, 4, 7 ,10, 13, 15, 18, 19, 21, 22, 24, 26, 29, 30, 34)
>>> phash = hash_parameters(l)
>>> phash.slots
(18, 19, 0, 21, 22, 3, 4, 24, 7, 26, 30, 10, 29, 13, 34, 15)
For some values, the displacement vector will be rather empty:
>>> hash_parameters('Andrea', to_int=ord).r
(1, None, None, None, 0, -3, 4, None)
"""
# If to_int is not assigned, simply use the identity function.
if to_int is None:
to_int = __identity
key_to_original = {to_int(original): original for original in keys}
# Create a set of all items to be hashed.
items = list(key_to_original.keys())
if minimize:
offset = 0 - min(items)
items = frozenset(x + offset for x in items)
else:
offset = 0
# 1. Start with a square array (not stored) that is t units on each side.
# Choose a t such that t * t >= max(S)
t = choose_best_t(items)
assert t * t > max(items) and t * t >= len(items)
# 2. Place each key K in the square at location (x,y), where
# x = K mod t, y = K / t.
row_queue = place_items_in_square(items, t)
# 3. Arrange rows so that they'll fit into one row and generate a
# displacement vector.
final_row, displacement_vector = arrange_rows(row_queue, t)
# Translate the internal keys to their original items.
slots = tuple(key_to_original[item - offset] if item is not None else None
for item in final_row)
# Return the parameters
return HashInfo(
t=t,
slots=slots,
r=displacement_vector,
offset=offset,
to_int=to_int if to_int is not __identity else None
) | Calculates the parameters for a perfect hash. The result is returned
as a HashInfo tuple which has the following fields:
t
The "table parameter". This is the minimum side length of the
table used to create the hash. In practice, t**2 is the maximum
size of the output hash.
slots
The original inputs mapped to a vector. This is the hash
function.
r
The displacement vector. This is the displacement of the given
row in the result vector. To find a given value, use
``x + r[y]``.
offset
The amount by which to offset all values (once converted to ints)
to_int
A function that converts the input to an int (if given).
Keyword parameters:
``minimize``
Whether or not offset all integer keys internally by the minimum
value. This typically results in smaller output.
``to_int``
A callable that converts the input keys to ints. If not
specified, all keys should be given as ints.
>>> hash_parameters([1, 5, 7], minimize=False)
HashInfo(t=3, slots=(1, 5, 7), r=(-1, -1, 1), offset=0, to_int=None)
>>> hash_parameters([1, 5, 7])
HashInfo(t=3, slots=(1, 5, 7), r=(0, 0, 2), offset=-1, to_int=None)
>>> l = (0, 3, 4, 7 ,10, 13, 15, 18, 19, 21, 22, 24, 26, 29, 30, 34)
>>> phash = hash_parameters(l)
>>> phash.slots
(18, 19, 0, 21, 22, 3, 4, 24, 7, 26, 30, 10, 29, 13, 34, 15)
For some values, the displacement vector will be rather empty:
>>> hash_parameters('Andrea', to_int=ord).r
(1, None, None, None, 0, -3, 4, None) | entailment |
def place_items_in_square(items, t):
"""
Returns a list of rows that are stored as a priority queue to be
used with heapq functions.
>>> place_items_in_square([1,5,7], 4)
[(2, 1, [(1, 5), (3, 7)]), (3, 0, [(1, 1)])]
>>> place_items_in_square([1,5,7], 3)
[(2, 0, [(1, 1)]), (2, 1, [(2, 5)]), (2, 2, [(1, 7)])]
"""
# A minheap (because that's all that heapq supports :/)
# of the length of each row. Why this is important is because
# we'll be popping the largest rows when figuring out row displacements.
# Each item is a tuple of (t - |row|, y, [(xpos_1, item_1), ...]).
# Until the call to heapq.heapify(), the rows are ordered in
# increasing row number (y).
rows = [(t, y, []) for y in range(t)]
for item in items:
# Calculate the cell the item should fall in.
x = item % t
y = item // t
# Push the item to its corresponding row...
inverse_length, _, row_contents = rows[y]
heapq.heappush(row_contents, (x, item))
# Ensure the heap key is kept intact.
rows[y] = inverse_length - 1, y, row_contents
assert all(inv_len == t - len(rows) for inv_len, _, rows in rows)
heapq.heapify(rows)
# Return only rows that are populated.
return [row for row in rows if row[2]] | Returns a list of rows that are stored as a priority queue to be
used with heapq functions.
>>> place_items_in_square([1,5,7], 4)
[(2, 1, [(1, 5), (3, 7)]), (3, 0, [(1, 1)])]
>>> place_items_in_square([1,5,7], 3)
[(2, 0, [(1, 1)]), (2, 1, [(2, 5)]), (2, 2, [(1, 7)])] | entailment |
def arrange_rows(row_queue, t):
"""
Takes a priority queue as generated by place_items_in_square().
Arranges the items from its conceptual square to one list. Returns
both the resultant vector, plus the displacement vector, to be used
in the final output hash function.
>>> rows = [(2, 1, [(0, 1), (1, 5)]), (3, 3, [(1, 7)])]
>>> result, displacements = arrange_rows(rows, 4)
>>> result
(1, 5, 7)
>>> displacements
(None, 0, None, 1)
>>> rows = [(1, 1, [(0, 1), (2, 7)]), (2, 2, [(1, 5)])]
>>> result, displacements = arrange_rows(rows, 3)
>>> result
(1, 5, 7)
>>> displacements
(None, 0, 0)
"""
# Create a set of all of the unoccupied columns.
max_columns = t ** 2
cols = ((x, True) for x in range(max_columns))
unoccupied_columns = collections.OrderedDict(cols)
# Create the resultant and displacement vectors.
result = [None] * max_columns
displacements = [None] * t
while row_queue:
# Get the next row to place.
_inverse_length, y, row = heapq.heappop(row_queue)
offset = find_first_fit(unoccupied_columns, row, max_columns)
# Calculate the offset of the first item.
first_item_x = row[0][0]
displacements[y] = offset
for x, item in row:
actual_x = x + offset
result[actual_x] = item
del unoccupied_columns[actual_x]
return tuple(trim_nones_from_right(result)), tuple(displacements) | Takes a priority queue as generated by place_items_in_square().
Arranges the items from its conceptual square to one list. Returns
both the resultant vector, plus the displacement vector, to be used
in the final output hash function.
>>> rows = [(2, 1, [(0, 1), (1, 5)]), (3, 3, [(1, 7)])]
>>> result, displacements = arrange_rows(rows, 4)
>>> result
(1, 5, 7)
>>> displacements
(None, 0, None, 1)
>>> rows = [(1, 1, [(0, 1), (2, 7)]), (2, 2, [(1, 5)])]
>>> result, displacements = arrange_rows(rows, 3)
>>> result
(1, 5, 7)
>>> displacements
(None, 0, 0) | entailment |
def find_first_fit(unoccupied_columns, row, row_length):
"""
Finds the first index that the row's items can fit.
"""
for free_col in unoccupied_columns:
# The offset is that such that the first item goes in the free column.
first_item_x = row[0][0]
offset = free_col - first_item_x
if check_columns_fit(unoccupied_columns, row, offset, row_length):
return offset
raise ValueError("Row cannot bossily fit in %r: %r"
% (list(unoccupied_columns.keys()), row)) | Finds the first index that the row's items can fit. | entailment |
def check_columns_fit(unoccupied_columns, row, offset, row_length):
"""
Checks if all the occupied columns in the row fit in the indices
given by free columns.
>>> check_columns_fit({0,1,2,3}, [(0, True), (2, True)], 0, 4)
True
>>> check_columns_fit({0,2,3}, [(2, True), (3, True)], 0, 4)
True
>>> check_columns_fit({}, [(2, True), (3, True)], 0, 4)
False
>>> check_columns_fit({0}, [(2, True)], 2, 4)
True
>>> check_columns_fit({0}, [(3, True)], 2, 4)
False
"""
for index, item in row:
adjusted_index = (index + offset) % row_length
# Check if the index is in the appropriate place.
if adjusted_index not in unoccupied_columns:
return False
return True | Checks if all the occupied columns in the row fit in the indices
given by free columns.
>>> check_columns_fit({0,1,2,3}, [(0, True), (2, True)], 0, 4)
True
>>> check_columns_fit({0,2,3}, [(2, True), (3, True)], 0, 4)
True
>>> check_columns_fit({}, [(2, True), (3, True)], 0, 4)
False
>>> check_columns_fit({0}, [(2, True)], 2, 4)
True
>>> check_columns_fit({0}, [(3, True)], 2, 4)
False | entailment |
def print_square(row_queue, t):
"""
Prints a row queue as its conceptual square array.
"""
occupied_rows = {y: row for _, y, row in row_queue}
empty_row = ', '.join('...' for _ in range(t))
for y in range(t):
print('|', end=' ')
if y not in occupied_rows:
print(empty_row, end=' ')
else:
row = dict(occupied_rows[y])
all_cols = ('%3d' % row[x] if x in row else '...'
for x in range(t))
print(', '.join(all_cols), end=' ')
print("|") | Prints a row queue as its conceptual square array. | entailment |
def trim_nones_from_right(xs):
"""
Returns the list without all the Nones at the right end.
>>> trim_nones_from_right([1, 2, None, 4, None, 5, None, None])
[1, 2, None, 4, None, 5]
"""
# Find the first element that does not contain none.
for i, item in enumerate(reversed(xs)):
if item is not None:
break
return xs[:-i] | Returns the list without all the Nones at the right end.
>>> trim_nones_from_right([1, 2, None, 4, None, 5, None, None])
[1, 2, None, 4, None, 5] | entailment |
def make_hash(keys, **kwargs):
"""
Creates a perfect hash function from the given keys. For a
description of the keyword arguments see :py:func:`hash_parameters`.
>>> l = (0, 3, 4, 7 ,10, 13, 15, 18, 19, 21, 22, 24, 26, 29, 30, 34)
>>> hf = make_hash(l)
>>> hf(19)
1
>>> hash_parameters(l).slots[1]
19
"""
params = hash_parameters(keys, **kwargs)
t = params.t
r = params.r
offset = params.offset
to_int = params.to_int if params.to_int else __identity
def perfect_hash(x):
val = to_int(x) + offset
x = val % t
y = val // t
return x + r[y]
# Undocumented properties, but used in make_dict()...
perfect_hash.length = len(params.slots)
perfect_hash.slots = params.slots
return perfect_hash | Creates a perfect hash function from the given keys. For a
description of the keyword arguments see :py:func:`hash_parameters`.
>>> l = (0, 3, 4, 7 ,10, 13, 15, 18, 19, 21, 22, 24, 26, 29, 30, 34)
>>> hf = make_hash(l)
>>> hf(19)
1
>>> hash_parameters(l).slots[1]
19 | entailment |
def make_dict(name, keys, **kwargs):
"""
Creates a dictionary-like mapping class that uses perfect hashing.
``name`` is the proper class name of the returned class. See
``hash_parameters()`` for documentation on all arguments after
``name``.
>>> MyDict = make_dict('MyDict', '+-<>[],.', to_int=ord)
>>> d = MyDict([('+', 1), ('-', 2)])
>>> d[','] = 3
>>> d
MyDict([('+', 1), (',', 3), ('-', 2)])
>>> del d['+']
>>> del d['.']
Traceback (most recent call last):
...
KeyError: '.'
>>> len(d)
2
"""
hash_func = make_hash(keys, **kwargs)
slots = hash_func.slots
# Create a docstring that at least describes where the class came from...
doc = """
Dictionary-like object that uses perfect hashing. This class was
generated by `%s.%s(%r, ...)`.
""" % (__name__, make_dict.__name__, name)
return create_dict_subclass(name, hash_func, slots, doc) | Creates a dictionary-like mapping class that uses perfect hashing.
``name`` is the proper class name of the returned class. See
``hash_parameters()`` for documentation on all arguments after
``name``.
>>> MyDict = make_dict('MyDict', '+-<>[],.', to_int=ord)
>>> d = MyDict([('+', 1), ('-', 2)])
>>> d[','] = 3
>>> d
MyDict([('+', 1), (',', 3), ('-', 2)])
>>> del d['+']
>>> del d['.']
Traceback (most recent call last):
...
KeyError: '.'
>>> len(d)
2 | entailment |
def _get_timestamp(dirname_full, remove):
"""
Get the timestamp from the timestamp file.
Optionally mark it for removal if we're going to write another one.
"""
record_filename = os.path.join(dirname_full, RECORD_FILENAME)
if not os.path.exists(record_filename):
return None
mtime = os.stat(record_filename).st_mtime
mtime_str = datetime.fromtimestamp(mtime)
print('Found timestamp {}:{}'.format(dirname_full, mtime_str))
if Settings.record_timestamp and remove:
OLD_TIMESTAMPS.add(record_filename)
return mtime | Get the timestamp from the timestamp file.
Optionally mark it for removal if we're going to write another one. | entailment |
def _get_timestamp_cached(dirname_full, remove):
"""
Get the timestamp from the cache or fill the cache
Much quicker than reading the same files over and over
"""
if dirname_full not in TIMESTAMP_CACHE:
mtime = _get_timestamp(dirname_full, remove)
TIMESTAMP_CACHE[dirname_full] = mtime
return TIMESTAMP_CACHE[dirname_full] | Get the timestamp from the cache or fill the cache
Much quicker than reading the same files over and over | entailment |
def _max_timestamps(dirname_full, remove, compare_tstamp):
"""Compare a timestamp file to one passed in. Get the max."""
tstamp = _get_timestamp_cached(dirname_full, remove)
return max_none((tstamp, compare_tstamp)) | Compare a timestamp file to one passed in. Get the max. | entailment |
def _get_parent_timestamp(dirname, mtime):
"""
Get the timestamps up the directory tree. All the way to root.
Because they affect every subdirectory.
"""
parent_pathname = os.path.dirname(dirname)
# max between the parent timestamp the one passed in
mtime = _max_timestamps(parent_pathname, False, mtime)
if dirname != os.path.dirname(parent_pathname):
# this is only called if we're not at the root
mtime = _get_parent_timestamp(parent_pathname, mtime)
return mtime | Get the timestamps up the directory tree. All the way to root.
Because they affect every subdirectory. | entailment |
def get_walk_after(filename, optimize_after=None):
"""
Figure out the which mtime to check against.
If we have to look up the path return that.
"""
if Settings.optimize_after is not None:
return Settings.optimize_after
dirname = os.path.dirname(filename)
if optimize_after is None:
optimize_after = _get_parent_timestamp(dirname, optimize_after)
return _max_timestamps(dirname, True, optimize_after) | Figure out the which mtime to check against.
If we have to look up the path return that. | entailment |
def record_timestamp(pathname_full):
"""Record the timestamp of running in a dotfile."""
if Settings.test or Settings.list_only or not Settings.record_timestamp:
return
if not Settings.follow_symlinks and os.path.islink(pathname_full):
if Settings.verbose:
print('Not setting timestamp because not following symlinks')
return
if not os.path.isdir(pathname_full):
if Settings.verbose:
print('Not setting timestamp for a non-directory')
return
record_filename_full = os.path.join(pathname_full, RECORD_FILENAME)
try:
with open(record_filename_full, 'w'):
os.utime(record_filename_full, None)
if Settings.verbose:
print("Set timestamp: {}".format(record_filename_full))
for fname in OLD_TIMESTAMPS:
if fname.startswith(pathname_full) and \
fname != record_filename_full:
# only remove timestamps below the curent path
# but don't remove the timestamp we just set!
os.remove(fname)
if Settings.verbose:
print('Removed old timestamp: {}'.format(fname))
except IOError:
print("Could not set timestamp in {}".format(pathname_full)) | Record the timestamp of running in a dotfile. | entailment |
def walk_comic_archive(filename_full, image_format, optimize_after):
"""
Optimize a comic archive.
This is done mostly inline to use the master processes process pool
for workers. And to avoid calling back up into walk from a dedicated
module or format processor. It does mean that we block on uncompress
and on waiting for the contents subprocesses to compress.
"""
# uncompress archive
tmp_dir, report_stats = comic.comic_archive_uncompress(filename_full,
image_format)
if tmp_dir is None and report_stats:
return Settings.pool.apply_async(_comic_archive_skip,
args=report_stats)
# optimize contents of archive
archive_mtime = os.stat(filename_full).st_mtime
result_set = walk_dir(tmp_dir, optimize_after, True, archive_mtime)
# wait for archive contents to optimize before recompressing
nag_about_gifs = False
for result in result_set:
res = result.get()
nag_about_gifs = nag_about_gifs or res.nag_about_gifs
# recompress archive
args = (filename_full, image_format, Settings, nag_about_gifs)
return Settings.pool.apply_async(comic.comic_archive_compress,
args=(args,)) | Optimize a comic archive.
This is done mostly inline to use the master processes process pool
for workers. And to avoid calling back up into walk from a dedicated
module or format processor. It does mean that we block on uncompress
and on waiting for the contents subprocesses to compress. | entailment |
def _is_skippable(filename_full):
"""Handle things that are not optimizable files."""
# File types
if not Settings.follow_symlinks and os.path.islink(filename_full):
return True
if os.path.basename(filename_full) == timestamp.RECORD_FILENAME:
return True
if not os.path.exists(filename_full):
if Settings.verbose:
print(filename_full, 'was not found.')
return True
return False | Handle things that are not optimizable files. | entailment |
def walk_file(filename, walk_after, recurse=None, archive_mtime=None):
"""Optimize an individual file."""
filename = os.path.normpath(filename)
result_set = set()
if _is_skippable(filename):
return result_set
walk_after = timestamp.get_walk_after(filename, walk_after)
# File is a directory
if os.path.isdir(filename):
return walk_dir(filename, walk_after, recurse, archive_mtime)
if _is_older_than_timestamp(filename, walk_after, archive_mtime):
return result_set
# Check image format
try:
image_format = detect_format.detect_file(filename)
except Exception:
res = Settings.pool.apply_async(stats.ReportStats,
(filename,),
{'error': "Detect Format"})
result_set.add(res)
image_format = False
if not image_format:
return result_set
if Settings.list_only:
# list only
print("{}: {}".format(filename, image_format))
return result_set
if detect_format.is_format_selected(image_format, comic.FORMATS,
comic.PROGRAMS):
# comic archive
result = walk_comic_archive(filename, image_format, walk_after)
else:
# regular image
args = [filename, image_format, Settings]
result = Settings.pool.apply_async(optimize.optimize_image,
args=(args,))
result_set.add(result)
return result_set | Optimize an individual file. | entailment |
def walk_dir(dir_path, walk_after, recurse=None, archive_mtime=None):
"""Recursively optimize a directory."""
if recurse is None:
recurse = Settings.recurse
result_set = set()
if not recurse:
return result_set
for root, _, filenames in os.walk(dir_path):
for filename in filenames:
filename_full = os.path.join(root, filename)
try:
results = walk_file(filename_full, walk_after, recurse,
archive_mtime)
result_set = result_set.union(results)
except Exception:
print("Error with file: {}".format(filename_full))
raise
return result_set | Recursively optimize a directory. | entailment |
def _walk_all_files():
"""
Optimize the files from the arugments list in two batches.
One for absolute paths which are probably outside the current
working directory tree and one for relative files.
"""
# Init records
record_dirs = set()
result_set = set()
for filename in Settings.paths:
# Record dirs to put timestamps in later
filename_full = os.path.abspath(filename)
if Settings.recurse and os.path.isdir(filename_full):
record_dirs.add(filename_full)
walk_after = timestamp.get_walk_after(filename_full)
results = walk_file(filename_full, walk_after, Settings.recurse)
result_set = result_set.union(results)
bytes_in = 0
bytes_out = 0
nag_about_gifs = False
errors = []
for result in result_set:
res = result.get()
if res.error:
errors += [(res.final_filename, res.error)]
continue
bytes_in += res.bytes_in
bytes_out += res.bytes_out
nag_about_gifs = nag_about_gifs or res.nag_about_gifs
return record_dirs, bytes_in, bytes_out, nag_about_gifs, errors | Optimize the files from the arugments list in two batches.
One for absolute paths which are probably outside the current
working directory tree and one for relative files. | entailment |
def run():
"""Use preconfigured settings to optimize files."""
# Setup Multiprocessing
# manager = multiprocessing.Manager()
Settings.pool = multiprocessing.Pool(Settings.jobs)
# Optimize Files
record_dirs, bytes_in, bytes_out, nag_about_gifs, errors = \
_walk_all_files()
# Shut down multiprocessing
Settings.pool.close()
Settings.pool.join()
# Write timestamps
for filename in record_dirs:
timestamp.record_timestamp(filename)
# Finish by reporting totals
stats.report_totals(bytes_in, bytes_out, nag_about_gifs, errors) | Use preconfigured settings to optimize files. | entailment |
def replace_ext(filename, new_ext):
"""Replace the file extention."""
filename_base = os.path.splitext(filename)[0]
new_filename = '{}.{}'.format(filename_base, new_ext)
return new_filename | Replace the file extention. | entailment |
def _cleanup_after_optimize_aux(filename, new_filename, old_format,
new_format):
"""
Replace old file with better one or discard new wasteful file.
"""
bytes_in = 0
bytes_out = 0
final_filename = filename
try:
bytes_in = os.stat(filename).st_size
bytes_out = os.stat(new_filename).st_size
if (bytes_out > 0) and ((bytes_out < bytes_in) or Settings.bigger):
if old_format != new_format:
final_filename = replace_ext(filename,
new_format.lower())
rem_filename = filename + REMOVE_EXT
if not Settings.test:
os.rename(filename, rem_filename)
os.rename(new_filename, final_filename)
os.remove(rem_filename)
else:
os.remove(new_filename)
else:
os.remove(new_filename)
bytes_out = bytes_in
except OSError as ex:
print(ex)
return final_filename, bytes_in, bytes_out | Replace old file with better one or discard new wasteful file. | entailment |
def cleanup_after_optimize(filename, new_filename, old_format, new_format):
"""
Replace old file with better one or discard new wasteful file.
And report results using the stats module.
"""
final_filename, bytes_in, bytes_out = _cleanup_after_optimize_aux(
filename, new_filename, old_format, new_format)
return stats.ReportStats(final_filename, bytes_count=(bytes_in, bytes_out)) | Replace old file with better one or discard new wasteful file.
And report results using the stats module. | entailment |
def parse_reqs(filename):
"""Parse setup requirements from a requirements.txt file."""
install_reqs = parse_requirements(filename, session=False)
return [str(ir.req) for ir in install_reqs] | Parse setup requirements from a requirements.txt file. | entailment |
def get_req_list():
"""Get the requirements by weather we're building develop or not."""
req_list = parse_reqs(REQUIREMENTS['prod'])
if len(sys.argv) > 2 and sys.argv[2] == ('develop'):
req_list += parse_reqs(REQUIREMENTS['dev'])
return req_list | Get the requirements by weather we're building develop or not. | entailment |
def get_comic_format(filename):
"""Return the comic format if it is a comic archive."""
image_format = None
filename_ext = os.path.splitext(filename)[-1].lower()
if filename_ext in _COMIC_EXTS:
if zipfile.is_zipfile(filename):
image_format = _CBZ_FORMAT
elif rarfile.is_rarfile(filename):
image_format = _CBR_FORMAT
return image_format | Return the comic format if it is a comic archive. | entailment |
def _get_archive_tmp_dir(filename):
"""Get the name of the working dir to use for this filename."""
head, tail = os.path.split(filename)
return os.path.join(head, _ARCHIVE_TMP_DIR_TEMPLATE.format(tail)) | Get the name of the working dir to use for this filename. | entailment |
def comic_archive_uncompress(filename, image_format):
"""
Uncompress comic archives.
Return the name of the working directory we uncompressed into.
"""
if not Settings.comics:
report = ['Skipping archive file: {}'.format(filename)]
return None, ReportStats(filename, report=report)
if Settings.verbose:
truncated_filename = stats.truncate_cwd(filename)
print("Extracting {}...".format(truncated_filename), end='')
# create the tmpdir
tmp_dir = _get_archive_tmp_dir(filename)
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
os.mkdir(tmp_dir)
# extract archvie into the tmpdir
if image_format == _CBZ_FORMAT:
with zipfile.ZipFile(filename, 'r') as zfile:
zfile.extractall(tmp_dir)
elif image_format == _CBR_FORMAT:
with rarfile.RarFile(filename, 'r') as rfile:
rfile.extractall(tmp_dir)
else:
report = '{} {} is not a good format'.format(filename, image_format)
return None, ReportStats(filename, report=report)
if Settings.verbose:
print('done')
return tmp_dir, None | Uncompress comic archives.
Return the name of the working directory we uncompressed into. | entailment |
def _comic_archive_write_zipfile(new_filename, tmp_dir):
"""Zip up the files in the tempdir into the new filename."""
if Settings.verbose:
print('Rezipping archive', end='')
with zipfile.ZipFile(new_filename, 'w',
compression=zipfile.ZIP_DEFLATED) as new_zf:
root_len = len(os.path.abspath(tmp_dir))
for r_d_f in os.walk(tmp_dir):
root = r_d_f[0]
filenames = r_d_f[2]
archive_root = os.path.abspath(root)[root_len:]
for fname in filenames:
fullpath = os.path.join(root, fname)
archive_name = os.path.join(archive_root, fname)
if Settings.verbose:
print('.', end='')
new_zf.write(fullpath, archive_name, zipfile.ZIP_DEFLATED) | Zip up the files in the tempdir into the new filename. | entailment |
def comic_archive_compress(args):
"""
Called back by every optimization inside a comic archive.
When they're all done it creates the new archive and cleans up.
"""
try:
filename, old_format, settings, nag_about_gifs = args
Settings.update(settings)
tmp_dir = _get_archive_tmp_dir(filename)
# archive into new filename
new_filename = files.replace_ext(filename, _NEW_ARCHIVE_SUFFIX)
_comic_archive_write_zipfile(new_filename, tmp_dir)
# Cleanup tmpdir
if os.path.isdir(tmp_dir):
if Settings.verbose:
print('.', end='')
shutil.rmtree(tmp_dir)
if Settings.verbose:
print('done.')
report_stats = files.cleanup_after_optimize(
filename, new_filename, old_format, _CBZ_FORMAT)
report_stats.nag_about_gifs = nag_about_gifs
stats.report_saved(report_stats)
return report_stats
except Exception as exc:
print(exc)
traceback.print_exc(exc)
raise exc | Called back by every optimization inside a comic archive.
When they're all done it creates the new archive and cleans up. | entailment |
def mozjpeg(ext_args):
"""Create argument list for mozjpeg."""
args = copy.copy(_MOZJPEG_ARGS)
if Settings.destroy_metadata:
args += ["-copy", "none"]
else:
args += ["-copy", "all"]
args += ['-outfile']
args += [ext_args.new_filename, ext_args.old_filename]
extern.run_ext(args)
return _JPEG_FORMAT | Create argument list for mozjpeg. | entailment |
def jpegtran(ext_args):
"""Create argument list for jpegtran."""
args = copy.copy(_JPEGTRAN_ARGS)
if Settings.destroy_metadata:
args += ["-copy", "none"]
else:
args += ["-copy", "all"]
if Settings.jpegtran_prog:
args += ["-progressive"]
args += ['-outfile']
args += [ext_args.new_filename, ext_args.old_filename]
extern.run_ext(args)
return _JPEG_FORMAT | Create argument list for jpegtran. | entailment |
def jpegrescan(ext_args):
"""Run the EXTERNAL program jpegrescan."""
args = copy.copy(_JPEGRESCAN_ARGS)
if Settings.jpegrescan_multithread:
args += ['-t']
if Settings.destroy_metadata:
args += ['-s']
args += [ext_args.old_filename, ext_args.new_filename]
extern.run_ext(args)
return _JPEG_FORMAT | Run the EXTERNAL program jpegrescan. | entailment |
def get_arguments(args):
"""Parse the command line."""
usage = "%(prog)s [arguments] [image files]"
programs_str = ', '.join([prog.__name__ for prog in PROGRAMS])
description = "Uses "+programs_str+" if they are on the path."
parser = argparse.ArgumentParser(usage=usage, description=description)
parser.add_argument("-r", "--recurse", action="store_true",
dest="recurse", default=0,
help="Recurse down through directories ignoring the"
"image file arguments on the command line")
parser.add_argument("-v", "--verbose", action="count",
dest="verbose", default=0,
help="Display more output. -v (default) and -vv "
"(noisy)")
parser.add_argument("-Q", "--quiet", action="store_const",
dest="verbose", const=-1,
help="Display little to no output")
parser.add_argument("-a", "--enable_advpng", action="store_true",
dest="advpng", default=0,
help="Optimize with advpng (disabled by default)")
parser.add_argument("-c", "--comics", action="store_true",
dest="comics", default=0,
help="Also optimize comic book archives (cbz & cbr)")
parser.add_argument("-f", "--formats", action="store", dest="formats",
default=DEFAULT_FORMATS,
help="Only optimize images of the specifed '{}' "
"delimited formats from: {}".format(
FORMAT_DELIMETER,
', '.join(sorted(ALL_FORMATS))))
parser.add_argument("-O", "--disable_optipng", action="store_false",
dest="optipng", default=1,
help="Do not optimize with optipng")
parser.add_argument("-P", "--disable_pngout", action="store_false",
dest="pngout", default=1,
help="Do not optimize with pngout")
parser.add_argument("-J", "--disable_jpegrescan", action="store_false",
dest="jpegrescan", default=1,
help="Do not optimize with jpegrescan")
parser.add_argument("-E", "--disable_progressive", action="store_false",
dest="jpegtran_prog", default=1,
help="Don't try to reduce size by making "
"progressive JPEGs with jpegtran")
parser.add_argument("-Z", "--disable_mozjpeg", action="store_false",
dest="mozjpeg", default=1,
help="Do not optimize with mozjpeg")
parser.add_argument("-T", "--disable_jpegtran", action="store_false",
dest="jpegtran", default=1,
help="Do not optimize with jpegtran")
parser.add_argument("-G", "--disable_gifsicle", action="store_false",
dest="gifsicle", default=1,
help="disable optimizing animated GIFs")
parser.add_argument("-Y", "--disable_convert_type", action="store_const",
dest="to_png_formats",
const=png.FORMATS,
default=png.CONVERTABLE_FORMATS,
help="Do not convert other lossless formats like "
" {} to PNG when optimizing. By default, {}"
" does convert these formats to PNG".format(
', '.join(png.LOSSLESS_FORMATS),
PROGRAM_NAME))
parser.add_argument("-S", "--disable_follow_symlinks",
action="store_false",
dest="follow_symlinks", default=1,
help="disable following symlinks for files and "
"directories")
parser.add_argument("-b", "--bigger", action="store_true",
dest="bigger", default=0,
help="Save optimized files that are larger than "
"the originals")
parser.add_argument("-t", "--record_timestamp", action="store_true",
dest="record_timestamp", default=0,
help="Store the time of the optimization of full "
"directories in directory local dotfiles.")
parser.add_argument("-D", "--optimize_after", action="store",
dest="optimize_after", default=None,
help="only optimize files after the specified "
"timestamp. Supercedes -t")
parser.add_argument("-N", "--noop", action="store_true",
dest="test", default=0,
help="Do not replace files with optimized versions")
parser.add_argument("-l", "--list", action="store_true",
dest="list_only", default=0,
help="Only list files that would be optimized")
parser.add_argument("-V", "--version", action="version",
version=__version__,
help="display the version number")
parser.add_argument("-M", "--destroy_metadata", action="store_true",
dest="destroy_metadata", default=0,
help="*Destroy* metadata like EXIF and JFIF")
parser.add_argument("paths", metavar="path", type=str, nargs="+",
help="File or directory paths to optimize")
parser.add_argument("-j", "--jobs", type=int, action="store",
dest="jobs", default=multiprocessing.cpu_count(),
help="Number of parallel jobs to run simultaneously.")
return parser.parse_args(args) | Parse the command line. | entailment |
def process_arguments(arguments):
"""Recompute special cases for input arguments."""
Settings.update(arguments)
Settings.config_program_reqs(PROGRAMS)
Settings.verbose = arguments.verbose + 1
Settings.paths = set(arguments.paths)
if arguments.formats == DEFAULT_FORMATS:
Settings.formats = arguments.to_png_formats | \
jpeg.FORMATS | gif.FORMATS
else:
Settings.formats = set(
arguments.formats.upper().split(FORMAT_DELIMETER))
if arguments.comics:
Settings.formats = Settings.formats | comic.FORMATS
if arguments.optimize_after is not None:
try:
after_dt = dateutil.parser.parse(arguments.optimize_after)
arguments.optimize_after = time.mktime(after_dt.timetuple())
except Exception as ex:
print(ex)
print('Could not parse date to optimize after.')
exit(1)
if arguments.jobs < 1:
Settings.jobs = 1
# Make a rough guess about weather or not to invoke multithreding
# jpegrescan '-t' uses three threads
# one off multithread switch bcaseu this is the only one right now
files_in_paths = 0
non_file_in_paths = False
for filename in arguments.paths:
if os.path.isfile(filename):
files_in_paths += 1
else:
non_file_in_paths = True
Settings.jpegrescan_multithread = not non_file_in_paths and \
Settings.jobs - (files_in_paths*3) > -1
return arguments | Recompute special cases for input arguments. | entailment |
def run(args):
"""Process command line arguments and walk inputs."""
raw_arguments = get_arguments(args[1:])
process_arguments(raw_arguments)
walk.run()
return True | Process command line arguments and walk inputs. | entailment |
def does_external_program_run(prog, verbose):
"""Test to see if the external programs can be run."""
try:
with open('/dev/null') as null:
subprocess.call([prog, '-h'], stdout=null, stderr=null)
result = True
except OSError:
if verbose > 1:
print("couldn't run {}".format(prog))
result = False
return result | Test to see if the external programs can be run. | entailment |
def run_ext(args):
"""Run EXTERNAL program."""
try:
subprocess.check_call(args)
except subprocess.CalledProcessError as exc:
print(exc)
print(exc.cmd)
print(exc.returncode)
print(exc.output)
raise | Run EXTERNAL program. | entailment |
def _humanize_bytes(num_bytes, precision=1):
"""
Return a humanized string representation of a number of num_bytes.
from:
http://code.activestate.com/recipes/
577081-humanized-representation-of-a-number-of-num_bytes/
Assumes `from __future__ import division`.
>>> humanize_bytes(1)
'1 byte'
>>> humanize_bytes(1024)
'1.0 kB'
>>> humanize_bytes(1024*123)
'123.0 kB'
>>> humanize_bytes(1024*12342)
'12.1 MB'
>>> humanize_bytes(1024*12342,2)
'12.05 MB'
>>> humanize_bytes(1024*1234,2)
'1.21 MB'
>>> humanize_bytes(1024*1234*1111,2)
'1.31 GB'
>>> humanize_bytes(1024*1234*1111,1)
'1.3 GB'
"""
if num_bytes == 0:
return 'no bytes'
if num_bytes == 1:
return '1 byte'
factored_bytes = 0
factor_suffix = 'bytes'
for factor, suffix in ABBREVS:
if num_bytes >= factor:
factored_bytes = num_bytes / factor
factor_suffix = suffix
break
if factored_bytes == 1:
precision = 0
return '{:.{prec}f} {}'.format(factored_bytes, factor_suffix,
prec=precision) | Return a humanized string representation of a number of num_bytes.
from:
http://code.activestate.com/recipes/
577081-humanized-representation-of-a-number-of-num_bytes/
Assumes `from __future__ import division`.
>>> humanize_bytes(1)
'1 byte'
>>> humanize_bytes(1024)
'1.0 kB'
>>> humanize_bytes(1024*123)
'123.0 kB'
>>> humanize_bytes(1024*12342)
'12.1 MB'
>>> humanize_bytes(1024*12342,2)
'12.05 MB'
>>> humanize_bytes(1024*1234,2)
'1.21 MB'
>>> humanize_bytes(1024*1234*1111,2)
'1.31 GB'
>>> humanize_bytes(1024*1234*1111,1)
'1.3 GB' | entailment |
def new_percent_saved(report_stats):
"""Spit out how much space the optimization saved."""
size_in = report_stats.bytes_in
if size_in > 0:
size_out = report_stats.bytes_out
ratio = size_out / size_in
kb_saved = _humanize_bytes(size_in - size_out)
else:
ratio = 0
kb_saved = 0
percent_saved = (1 - ratio) * 100
result = '{:.{prec}f}% ({})'.format(percent_saved, kb_saved, prec=2)
return result | Spit out how much space the optimization saved. | entailment |
def truncate_cwd(full_filename):
"""Remove the cwd from the full filename."""
if full_filename.startswith(os.getcwd()):
truncated_filename = full_filename.split(os.getcwd(), 1)[1]
truncated_filename = truncated_filename.split(os.sep, 1)[1]
else:
truncated_filename = full_filename
return truncated_filename | Remove the cwd from the full filename. | entailment |
def report_saved(report_stats):
"""Record the percent saved & print it."""
if Settings.verbose:
report = ''
truncated_filename = truncate_cwd(report_stats.final_filename)
report += '{}: '.format(truncated_filename)
total = new_percent_saved(report_stats)
if total:
report += total
else:
report += '0%'
if Settings.test:
report += ' could be saved.'
if Settings.verbose > 1:
tools_report = ', '.join(report_stats.report_list)
if tools_report:
report += '\n\t' + tools_report
print(report) | Record the percent saved & print it. | entailment |
def report_totals(bytes_in, bytes_out, nag_about_gifs, errors):
"""Report the total number and percent of bytes saved."""
if bytes_in:
bytes_saved = bytes_in - bytes_out
percent_bytes_saved = bytes_saved / bytes_in * 100
msg = ''
if Settings.test:
if percent_bytes_saved > 0:
msg += "Could save"
elif percent_bytes_saved == 0:
msg += "Could even out for"
else:
msg += "Could lose"
else:
if percent_bytes_saved > 0:
msg += "Saved"
elif percent_bytes_saved == 0:
msg += "Evened out"
else:
msg = "Lost"
msg += " a total of {} or {:.{prec}f}%".format(
_humanize_bytes(bytes_saved), percent_bytes_saved, prec=2)
if Settings.verbose:
print(msg)
if Settings.test:
print("Test run did not change any files.")
else:
if Settings.verbose:
print("Didn't optimize any files.")
if nag_about_gifs and Settings.verbose:
print("Most animated GIFS would be better off converted to"
" HTML5 video")
if not errors:
return
print("Errors with the following files:")
for error in errors:
print("{}: {}".format(error[0], error[1])) | Report the total number and percent of bytes saved. | entailment |
def skip(type_name, filename):
"""Provide reporting statistics for a skipped file."""
report = ['Skipping {} file: {}'.format(type_name, filename)]
report_stats = ReportStats(filename, report=report)
return report_stats | Provide reporting statistics for a skipped file. | entailment |
def gifsicle(ext_args):
"""Run the EXTERNAL program gifsicle."""
args = _GIFSICLE_ARGS + [ext_args.new_filename]
extern.run_ext(args)
return _GIF_FORMAT | Run the EXTERNAL program gifsicle. | entailment |
def _optimize_image_external(filename, func, image_format, new_ext):
"""Optimize the file with the external function."""
new_filename = filename + TMP_SUFFIX + new_ext
new_filename = os.path.normpath(new_filename)
shutil.copy2(filename, new_filename)
ext_args = ExtArgs(filename, new_filename)
new_image_format = func(ext_args)
report_stats = files.cleanup_after_optimize(filename, new_filename,
image_format,
new_image_format)
percent = stats.new_percent_saved(report_stats)
if percent != 0:
report = '{}: {}'.format(func.__name__, percent)
else:
report = ''
report_stats.report_list.append(report)
return report_stats | Optimize the file with the external function. | entailment |
def _optimize_with_progs(format_module, filename, image_format):
"""
Use the correct optimizing functions in sequence.
And report back statistics.
"""
filesize_in = os.stat(filename).st_size
report_stats = None
for func in format_module.PROGRAMS:
if not getattr(Settings, func.__name__):
continue
report_stats = _optimize_image_external(
filename, func, image_format, format_module.OUT_EXT)
filename = report_stats.final_filename
if format_module.BEST_ONLY:
break
if report_stats is not None:
report_stats.bytes_in = filesize_in
else:
report_stats = stats.skip(image_format, filename)
return report_stats | Use the correct optimizing functions in sequence.
And report back statistics. | entailment |
def _get_format_module(image_format):
"""Get the format module to use for optimizing the image."""
format_module = None
nag_about_gifs = False
if detect_format.is_format_selected(image_format,
Settings.to_png_formats,
png.PROGRAMS):
format_module = png
elif detect_format.is_format_selected(image_format, jpeg.FORMATS,
jpeg.PROGRAMS):
format_module = jpeg
elif detect_format.is_format_selected(image_format, gif.FORMATS,
gif.PROGRAMS):
# this captures still GIFs too if not caught above
format_module = gif
nag_about_gifs = True
return format_module, nag_about_gifs | Get the format module to use for optimizing the image. | entailment |
def optimize_image(arg):
"""Optimize a given image from a filename."""
try:
filename, image_format, settings = arg
Settings.update(settings)
format_module, nag_about_gifs = _get_format_module(image_format)
if format_module is None:
if Settings.verbose > 1:
print(filename, image_format) # image.mode)
print("\tFile format not selected.")
return None
report_stats = _optimize_with_progs(format_module, filename,
image_format)
report_stats.nag_about_gifs = nag_about_gifs
stats.report_saved(report_stats)
return report_stats
except Exception as exc:
print(exc)
traceback.print_exc()
return stats.ReportStats(filename, error="Optimizing Image") | Optimize a given image from a filename. | entailment |
def _is_program_selected(progs):
"""Determine if the program is enabled in the settings."""
mode = False
for prog in progs:
if getattr(Settings, prog.__name__):
mode = True
break
return mode | Determine if the program is enabled in the settings. | entailment |
def is_format_selected(image_format, formats, progs):
"""Determine if the image format is selected by command line arguments."""
intersection = formats & Settings.formats
mode = _is_program_selected(progs)
result = (image_format in intersection) and mode
return result | Determine if the image format is selected by command line arguments. | entailment |
def _is_image_sequenced(image):
"""Determine if the image is a sequenced image."""
try:
image.seek(1)
image.seek(0)
result = True
except EOFError:
result = False
return result | Determine if the image is a sequenced image. | entailment |
def get_image_format(filename):
"""Get the image format."""
image = None
bad_image = 1
image_format = NONE_FORMAT
sequenced = False
try:
bad_image = Image.open(filename).verify()
image = Image.open(filename)
image_format = image.format
sequenced = _is_image_sequenced(image)
except (OSError, IOError, AttributeError):
pass
if sequenced:
image_format = gif.SEQUENCED_TEMPLATE.format(image_format)
elif image is None or bad_image or image_format == NONE_FORMAT:
image_format = ERROR_FORMAT
comic_format = comic.get_comic_format(filename)
if comic_format:
image_format = comic_format
if (Settings.verbose > 1) and image_format == ERROR_FORMAT and \
(not Settings.list_only):
print(filename, "doesn't look like an image or comic archive.")
return image_format | Get the image format. | entailment |
def detect_file(filename):
"""Decide what to do with the file."""
image_format = get_image_format(filename)
if image_format in Settings.formats:
return image_format
if image_format in (NONE_FORMAT, ERROR_FORMAT):
return None
if Settings.verbose > 1 and not Settings.list_only:
print(filename, image_format, 'is not a enabled image or '
'comic archive type.')
return None | Decide what to do with the file. | entailment |
def update(cls, settings):
"""Update settings with a dict."""
for key, val in settings.__dict__.items():
if key.startswith('_'):
continue
setattr(cls, key, val) | Update settings with a dict. | entailment |
def _set_program_defaults(cls, programs):
"""Run the external program tester on the required binaries."""
for program in programs:
val = getattr(cls, program.__name__) \
and extern.does_external_program_run(program.__name__,
Settings.verbose)
setattr(cls, program.__name__, val) | Run the external program tester on the required binaries. | entailment |
def config_program_reqs(cls, programs):
"""Run the program tester and determine if we can do anything."""
cls._set_program_defaults(programs)
do_png = cls.optipng or cls.pngout or cls.advpng
do_jpeg = cls.mozjpeg or cls.jpegrescan or cls.jpegtran
do_comics = cls.comics
if not do_png and not do_jpeg and not do_comics:
print("All optimizers are not available or disabled.")
exit(1) | Run the program tester and determine if we can do anything. | entailment |
def optipng(ext_args):
"""Run the external program optipng on the file."""
args = _OPTIPNG_ARGS + [ext_args.new_filename]
extern.run_ext(args)
return _PNG_FORMAT | Run the external program optipng on the file. | entailment |
def advpng(ext_args):
"""Run the external program advpng on the file."""
args = _ADVPNG_ARGS + [ext_args.new_filename]
extern.run_ext(args)
return _PNG_FORMAT | Run the external program advpng on the file. | entailment |
def pngout(ext_args):
"""Run the external program pngout on the file."""
args = _PNGOUT_ARGS + [ext_args.old_filename, ext_args.new_filename]
extern.run_ext(args)
return _PNG_FORMAT | Run the external program pngout on the file. | entailment |
def _from_func(cls, f, *args, _attr_scts=None, **kwargs):
"""Creates a function chain starting with the specified SCT (f), and its arguments."""
func_chain = cls(attr_scts=_attr_scts)
func_chain._stack.append([f, args, kwargs])
return func_chain | Creates a function chain starting with the specified SCT (f), and its arguments. | entailment |
def _nested_transactional(fn):
# type: (Callable) -> Callable
"""In a transactional method create a nested transaction."""
@wraps(fn)
def wrapped(self, *args, **kwargs):
# type: (SessionFactory) -> Any
try:
rv = fn(self, *args, **kwargs)
except _TransactionalPolicyViolationError as e:
getattr(self, _TX_HOLDER_ATTRIBUTE).rollback()
rv = e.result
return rv
return wrapped | In a transactional method create a nested transaction. | entailment |
def check_file(
state,
fname,
missing_msg="Did you create a file named `{}`?",
is_dir_msg="Want to check a file named `{}`, but found a directory.",
parse=True,
use_fs=True,
use_solution=False,
):
"""Test whether file exists, and make its contents the student code.
Note: this SCT fails if the file is a directory.
"""
if use_fs:
p = Path(fname)
if not p.exists():
state.report(Feedback(missing_msg.format(fname))) # test file exists
if p.is_dir():
state.report(Feedback(is_dir_msg.format(fname))) # test its not a dir
code = p.read_text()
else:
code = _get_fname(state, "student_code", fname)
if code is None:
state.report(Feedback(missing_msg.format(fname))) # test file exists
sol_kwargs = {"solution_code": None, "solution_ast": None}
if use_solution:
sol_code = _get_fname(state, "solution_code", fname)
if sol_code is None:
raise Exception("Solution code does not have file named: %s" % fname)
sol_kwargs["solution_code"] = sol_code
sol_kwargs["solution_ast"] = (
state.parse(sol_code, test=False) if parse else None
)
return state.to_child(
student_code=code,
student_ast=state.parse(code) if parse else None,
fname=fname,
**sol_kwargs
) | Test whether file exists, and make its contents the student code.
Note: this SCT fails if the file is a directory. | entailment |
def has_dir(state, fname, incorrect_msg="Did you create a directory named `{}`?"):
"""Test whether a directory exists."""
if not Path(fname).is_dir():
state.report(Feedback(incorrect_msg.format(fname)))
return state | Test whether a directory exists. | entailment |
def exec_task(task_path, data):
"""Execute task.
:param task_path: task path
:type task_path: str|Callable
:param data: task's data
:type data: Any
:return:
"""
if not data:
data = {'data': None, 'path': task_path}
elif not isinstance(data, (str, bytes)):
data = {'data': json.dumps(data, cls=RequestJSONEncoder),
'path': task_path}
else:
# Open the data from file, if necessary.
if data is not None and data.startswith("file://"):
with open(data[len("file://"):]) as f:
data = f.read()
data = {'data': data, 'path': task_path}
# Prepare the task.
job = Job(data)
(task, task_callable) = create_task(task_path)
with delegating_job_context(job, task, task_callable) as jc:
return jc.task_callable(jc.task_data) | Execute task.
:param task_path: task path
:type task_path: str|Callable
:param data: task's data
:type data: Any
:return: | entailment |
def dump(node, config):
"""
Convert a node tree to a simple nested dict
All steps in this conversion are configurable using DumpConfig
dump dictionary node: {"type": str, "data": dict}
"""
if config.is_node(node):
fields = OrderedDict()
for name in config.fields_iter(node):
attr = config.field_val(node, name)
if attr is not None:
fields[name] = dump(attr, config)
return {"type": config.node_type(node), "data": fields}
elif config.is_list(node):
return [dump(x, config) for x in config.list_iter(node)]
else:
return config.leaf_val(node) | Convert a node tree to a simple nested dict
All steps in this conversion are configurable using DumpConfig
dump dictionary node: {"type": str, "data": dict} | entailment |
def apply_pagination(self, q):
"""
Filters the query so that a given page is returned. The record count
must be set in advance.
:param q: Query to be paged.
:return: Paged query.
"""
# type: (Query)->Query
assert self.record_count >= 0, "Record count must be set."
return q.limit(self.page_size).offset(self.offset) | Filters the query so that a given page is returned. The record count
must be set in advance.
:param q: Query to be paged.
:return: Paged query. | entailment |
def paginate(self, q):
"""
Filters the query so that a given page is returned. The record count
is computed automatically from query.
:param q: Query to be paged.
:return: Paged query.
"""
self.record_count = q.count()
return self.apply_pagination(q).all() | Filters the query so that a given page is returned. The record count
is computed automatically from query.
:param q: Query to be paged.
:return: Paged query. | entailment |
def cluster(seqs, threshold=0.975, out_file=None, temp_dir=None, make_db=True,
quiet=False, threads=0, return_just_seq_ids=False, max_memory=800, debug=False):
'''
Perform sequence clustering with CD-HIT.
Args:
seqs (list): An iterable of sequences, in any format that `abutils.utils.sequence.Sequence()`
can handle
threshold (float): Clustering identity threshold. Default is `0.975`.
out_file (str): Path to the clustering output file. Default is to use
`tempfile.NamedTemporaryFile` to generate an output file name.
temp_dir (str): Path to the temporary directory. If not provided, `'/tmp'` is used.
make_db (bool): Whether to build a SQlite database of sequence information. Required
if you want to calculate consensus/centroid sequences for the resulting
clusters or if you need to access the clustered sequences (not just sequence IDs)
Default is `True`.
quiet (bool): If `True`, surpresses printing of output/progress info. Default is `False`.
threads (int): Number of threads (CPU cores) to be used for clustering. Default is `0`,
which results in all available cores being used.
return_just_seq_ids (bool): If `True`, will return a 2D list of sequence IDs
(a list containing a list of sequence IDs for each cluster) rather than returning a
list of `Cluster` objects.
max_memory (int): Max memory (in MB) for CD-HIT. Will be passed directly to CD-HIT through
the `-M` runtime option. Default is `800`.
debug (bool): If `True`, print standard output and standard error from CD-HIT. Default is `False`.
Returns:
list: A list of `Cluster` objects (or a 2D list of sequence IDs, if `return_just_seq_ids` is `True`).
'''
if make_db:
ofile, cfile, seq_db, db_path = cdhit(seqs, out_file=out_file, temp_dir=temp_dir,
threshold=threshold, make_db=True, quiet=quiet,
threads=threads, max_memory=max_memory, debug=debug)
return parse_clusters(ofile, cfile, seq_db=seq_db, db_path=db_path, return_just_seq_ids=return_just_seq_ids)
else:
seqs = [Sequence(s) for s in seqs]
seq_dict = {s.id: s for s in seqs}
ofile, cfile, = cdhit(seqs, out_file=out_file, temp_dir=temp_dir, threads=threads,
threshold=threshold, make_db=False, quiet=quiet,
max_memory=max_memory, debug=debug)
return parse_clusters(ofile, cfile, seq_dict=seq_dict, return_just_seq_ids=return_just_seq_ids) | Perform sequence clustering with CD-HIT.
Args:
seqs (list): An iterable of sequences, in any format that `abutils.utils.sequence.Sequence()`
can handle
threshold (float): Clustering identity threshold. Default is `0.975`.
out_file (str): Path to the clustering output file. Default is to use
`tempfile.NamedTemporaryFile` to generate an output file name.
temp_dir (str): Path to the temporary directory. If not provided, `'/tmp'` is used.
make_db (bool): Whether to build a SQlite database of sequence information. Required
if you want to calculate consensus/centroid sequences for the resulting
clusters or if you need to access the clustered sequences (not just sequence IDs)
Default is `True`.
quiet (bool): If `True`, surpresses printing of output/progress info. Default is `False`.
threads (int): Number of threads (CPU cores) to be used for clustering. Default is `0`,
which results in all available cores being used.
return_just_seq_ids (bool): If `True`, will return a 2D list of sequence IDs
(a list containing a list of sequence IDs for each cluster) rather than returning a
list of `Cluster` objects.
max_memory (int): Max memory (in MB) for CD-HIT. Will be passed directly to CD-HIT through
the `-M` runtime option. Default is `800`.
debug (bool): If `True`, print standard output and standard error from CD-HIT. Default is `False`.
Returns:
list: A list of `Cluster` objects (or a 2D list of sequence IDs, if `return_just_seq_ids` is `True`). | entailment |
def cdhit(seqs, out_file=None, temp_dir=None, threshold=0.975, make_db=True, quiet=False, threads=0, max_memory=800, retries=5, debug=False):
'''
Run CD-HIT.
Args:
seqs (list): An iterable of sequences, in any format that `abutils.utils.sequence.Sequence()`
can handle
threshold (float): Clustering identity threshold. Default is `0.975`.
out_file (str): Path to the clustering output file. Default is to use
`tempfile.NamedTemporaryFile` to generate an output file name.
temp_dir (str): Path to the temporary directory. If not provided, `'/tmp'` is used.
make_db (bool): Whether to build a SQlite database of sequence information. Required
if you want to calculate consensus/centroid sequences for the resulting
clusters or if you need to access the clustered sequences (not just sequence IDs)
Default is `True`.
quiet (bool): If `True`, surpresses printing of output/progress info. Default is `False`.
threads (int): Number of threads (CPU cores) to be used for clustering. Default is `0`,
which results in all available cores being used.
max_memory (int): Max memory (in MB) for CD-HIT. Will be passed directly to CD-HIT through
the `-M` runtime option. Default is `800`.
debug (bool): If `True`, print standard output and standard error from CD-HIT. Default is `False`.
Returns:
If `make_db` is `True`, returns the CD-HIT output file path, the CD-HIT cluster file path,
a `sqlite3` database connection object, and the database path. If `make_db` is `False`, only the
CD-HIT output file path and CD-HIT cluster file path are returned.
'''
start_time = time.time()
seqs = [Sequence(s) for s in seqs]
if not quiet:
print('CD-HIT: clustering {} seqeunces'.format(len(seqs)))
if out_file is None:
out_file = tempfile.NamedTemporaryFile(dir=temp_dir, delete=False)
out_file.close()
ofile = out_file.name
else:
ofile = os.path.expanduser(out_file)
cfile = ofile + '.clstr'
with open(ofile, 'w') as f: f.write('')
with open(cfile, 'w') as f: f.write('')
ifile = _make_cdhit_input(seqs, temp_dir)
cdhit_cmd = 'cdhit -i {} -o {} -c {} -n 5 -d 0 -T {} -M {}'.format(ifile,
ofile,
threshold,
threads,
max_memory)
while not all([os.path.getsize(cfile), os.path.getsize(cfile)]):
cluster = sp.Popen(cdhit_cmd,
shell=True,
stdout=sp.PIPE,
stderr=sp.PIPE)
stdout, stderr = cluster.communicate()
if not retries:
break
retries -= 1
end_time = time.time()
if debug:
print(stdout)
print(stderr)
else:
os.unlink(ifile)
if not quiet:
print('CD-HIT: clustering took {:.2f} seconds'.format(end_time - start_time))
if make_db:
if not quiet:
print('CD-HIT: building a SQLite3 database')
seq_db, db_path = _build_seq_db(seqs, direc=temp_dir)
return ofile, cfile, seq_db, db_path
return ofile, cfile | Run CD-HIT.
Args:
seqs (list): An iterable of sequences, in any format that `abutils.utils.sequence.Sequence()`
can handle
threshold (float): Clustering identity threshold. Default is `0.975`.
out_file (str): Path to the clustering output file. Default is to use
`tempfile.NamedTemporaryFile` to generate an output file name.
temp_dir (str): Path to the temporary directory. If not provided, `'/tmp'` is used.
make_db (bool): Whether to build a SQlite database of sequence information. Required
if you want to calculate consensus/centroid sequences for the resulting
clusters or if you need to access the clustered sequences (not just sequence IDs)
Default is `True`.
quiet (bool): If `True`, surpresses printing of output/progress info. Default is `False`.
threads (int): Number of threads (CPU cores) to be used for clustering. Default is `0`,
which results in all available cores being used.
max_memory (int): Max memory (in MB) for CD-HIT. Will be passed directly to CD-HIT through
the `-M` runtime option. Default is `800`.
debug (bool): If `True`, print standard output and standard error from CD-HIT. Default is `False`.
Returns:
If `make_db` is `True`, returns the CD-HIT output file path, the CD-HIT cluster file path,
a `sqlite3` database connection object, and the database path. If `make_db` is `False`, only the
CD-HIT output file path and CD-HIT cluster file path are returned. | entailment |
def parse_clusters(out_file, clust_file, seq_db=None, db_path=None, seq_dict=None, return_just_seq_ids=False):
'''
Parses CD-HIT output.
Args:
out_file (str): Path to the CD-HIT output file. Required.
clust_file (str): Path to the CD-HIT cluster file. Required.
seq_db (sqlite.Connection): SQLite3 `Connection` object. Default is `None`. If not provided and
`return_just_seq_ids` is False, the returned `Cluster` objects will not contain any sequence
information beyond the sequence ID.
db_path (str): Path to a SQLite3 database file. Default is `None`. Must be provided if
`seq_db` is also provided.
seq_dict (dict): A `dict` mapping sequence IDs to `abutils.core.sequence.Sequence` objects. Default
is `None`. Typically used when a relatively small number of sequences are being clustered and
creating a `sqlite3` database would be overkill.
temp_dir (str): Path to the temporary directory. If not provided, `'/tmp'` is used.
make_db (bool): Whether to build a SQlite database of sequence information. Required
if you want to calculate consensus/centroid sequences for the resulting
clusters or if you need to access the clustered sequences (not just sequence IDs)
Default is `True`.
quiet (bool): If `True`, surpresses printing of output/progress info. Default is `False`.
threads (int): Number of threads (CPU cores) to be used for clustering. Default is `0`,
which results in all available cores being used.
return_just_seq_ids (bool): If `True`, will return a 2D list of sequence IDs
(a list containing a list of sequence IDs for each cluster) rather than returning a
list of `Cluster` objects.
max_memory (int): Max memory (in MB) for CD-HIT. Will be passed directly to CD-HIT through
the `-M` runtime option. Default is `800`.
debug (bool): If `True`, print standard output and standard error from CD-HIT. Default is `False`.
Returns:
A CDHITResult object, or a 2D list of sequence IDs, if `return_just_seq_ids` is `True`.
'''
raw_clusters = [c.split('\n') for c in open(clust_file, 'r').read().split('\n>')]
if return_just_seq_ids:
ids = []
for rc in raw_clusters:
_ids = []
for c in rc[1:]:
if c:
_ids.append(c.split()[2][1:-3])
ids.append(_ids)
os.unlink(out_file)
os.unlink(clust_file)
return ids
os.unlink(out_file)
os.unlink(clust_file)
clusters = [Cluster(rc, seq_db, db_path, seq_dict) for rc in raw_clusters]
return CDHITResult(clusters, seq_db=seq_db, db_path=db_path, seq_dict=seq_dict) | Parses CD-HIT output.
Args:
out_file (str): Path to the CD-HIT output file. Required.
clust_file (str): Path to the CD-HIT cluster file. Required.
seq_db (sqlite.Connection): SQLite3 `Connection` object. Default is `None`. If not provided and
`return_just_seq_ids` is False, the returned `Cluster` objects will not contain any sequence
information beyond the sequence ID.
db_path (str): Path to a SQLite3 database file. Default is `None`. Must be provided if
`seq_db` is also provided.
seq_dict (dict): A `dict` mapping sequence IDs to `abutils.core.sequence.Sequence` objects. Default
is `None`. Typically used when a relatively small number of sequences are being clustered and
creating a `sqlite3` database would be overkill.
temp_dir (str): Path to the temporary directory. If not provided, `'/tmp'` is used.
make_db (bool): Whether to build a SQlite database of sequence information. Required
if you want to calculate consensus/centroid sequences for the resulting
clusters or if you need to access the clustered sequences (not just sequence IDs)
Default is `True`.
quiet (bool): If `True`, surpresses printing of output/progress info. Default is `False`.
threads (int): Number of threads (CPU cores) to be used for clustering. Default is `0`,
which results in all available cores being used.
return_just_seq_ids (bool): If `True`, will return a 2D list of sequence IDs
(a list containing a list of sequence IDs for each cluster) rather than returning a
list of `Cluster` objects.
max_memory (int): Max memory (in MB) for CD-HIT. Will be passed directly to CD-HIT through
the `-M` runtime option. Default is `800`.
debug (bool): If `True`, print standard output and standard error from CD-HIT. Default is `False`.
Returns:
A CDHITResult object, or a 2D list of sequence IDs, if `return_just_seq_ids` is `True`. | entailment |
def log_output(f):
"""
Logs the output value.
"""
@wraps(f)
def wrapper_fn(*args, **kwargs):
res = f(*args, **kwargs)
logging.debug("Logging result %s.", res)
return res
return wrapper_fn | Logs the output value. | entailment |
def save_to_file(destination_filename, append=False):
"""
Save the output value to file.
"""
def decorator_fn(f):
@wraps(f)
def wrapper_fn(*args, **kwargs):
res = f(*args, **kwargs)
makedirs(os.path.dirname(destination_filename))
mode = "a" if append else "w"
with open(destination_filename, mode) as text_file:
text_file.write(res)
return res
return wrapper_fn
return decorator_fn | Save the output value to file. | entailment |
def json_input(f):
"""
Expects task input data in json format and parse this data.
"""
@wraps(f)
def json_input_decorator(*args, **kwargs):
# If the data is already transformed, we do not transform it any
# further.
task_data = _get_data_from_args(args)
if task_data is None:
logging.error("Task data is empty during JSON decoding.")
if task_data.payload:
try:
is_transformed = request.get_json()
# We transform the data only in the case of plain POST requests.
if not is_transformed:
task_data.transform_payload(json.loads)
except (ValueError, RuntimeError):
logging.error(
"Exception while processing JSON input decorator.")
task_data.transform_payload(json.loads)
else:
task_data.transform_payload(lambda _: {})
return f(*args, **kwargs)
return json_input_decorator | Expects task input data in json format and parse this data. | entailment |
def json_output(f):
"""
Format response to json and in case of web-request set response content type
to 'application/json'.
"""
@wraps(f)
def json_output_decorator(*args, **kwargs):
@inject(config=Config)
def get_config(config):
return config
config = get_config()
rv = f(*args, **kwargs)
indent = None
if config.get('DEBUG', False):
logging.getLogger(__name__).debug("Formatting JSON nicely.")
indent = 2
rv = json.dumps(rv, cls=AppModelJSONEncoder, indent=indent)
_set_json_response_content_type()
return rv
return json_output_decorator | Format response to json and in case of web-request set response content type
to 'application/json'. | entailment |
def jsonp_wrap(callback_key='callback'):
"""
Format response to jsonp and add a callback to JSON data - a jsonp request
"""
def decorator_fn(f):
@wraps(f)
def jsonp_output_decorator(*args, **kwargs):
task_data = _get_data_from_args(args)
data = task_data.get_data()
if callback_key not in data:
raise KeyError(
'Missing required parameter "{0}" for task.'.format(
callback_key))
callback = data[callback_key]
jsonp = f(*args, **kwargs)
if isinstance(JobContext.get_current_context(), WebJobContext):
JobContext.get_current_context().add_responder(
MimeSetterWebTaskResponder('application/javascript'))
jsonp = "{callback}({data})".format(callback=callback, data=jsonp)
return jsonp
return jsonp_output_decorator
return decorator_fn | Format response to jsonp and add a callback to JSON data - a jsonp request | entailment |
def jsend_output(fail_exception_classes=None):
"""
Format task result to json output in jsend specification format. See:
https://github.com/omniti-labs. Task return value must be dict or None.
@param fail_exception_classes: exceptions which will produce 'fail' response
status.
"""
fail_exception_classes = fail_exception_classes if fail_exception_classes \
else ()
def decorator_fn(f):
@wraps(f)
@json_output
def jsend_output_decorator(*args, **kwargs):
try:
rv = f(*args, **kwargs)
except fail_exception_classes as e:
return {'status': 'fail', 'data': {'message': str(e)}}
except Exception as e:
logging.error(str(e) + "\n" + traceback.format_exc())
return {'status': 'error', 'message': 'Server error.'}
if not isinstance(rv, dict) and rv is not None:
msg = 'jsend_output decorator error: task must return dict ' \
'or None.\nTask return value: {0}.'
logging.error(msg.format(rv))
return {'status': 'error', 'message': 'Server error.'}
return {'status': 'success', 'data': rv}
return jsend_output_decorator
return decorator_fn | Format task result to json output in jsend specification format. See:
https://github.com/omniti-labs. Task return value must be dict or None.
@param fail_exception_classes: exceptions which will produce 'fail' response
status. | entailment |
def web_error_and_result(f):
"""
Same as error_and_result decorator, except:
If no exception was raised during task execution, ONLY IN CASE OF WEB
REQUEST formats task result into json dictionary {'data': task return value}
"""
@wraps(f)
def web_error_and_result_decorator(*args, **kwargs):
return error_and_result_decorator_inner_fn(f, True, *args, **kwargs)
return web_error_and_result_decorator | Same as error_and_result decorator, except:
If no exception was raised during task execution, ONLY IN CASE OF WEB
REQUEST formats task result into json dictionary {'data': task return value} | entailment |
def error_and_result(f):
"""
Format task result into json dictionary `{'data': task return value}` if no
exception was raised during the task execution. If there was raised an
exception during task execution, formats task result into dictionary
`{'error': exception message with traceback}`.
"""
@wraps(f)
def error_and_result_decorator(*args, **kwargs):
return error_and_result_decorator_inner_fn(f, False, *args, **kwargs)
return error_and_result_decorator | Format task result into json dictionary `{'data': task return value}` if no
exception was raised during the task execution. If there was raised an
exception during task execution, formats task result into dictionary
`{'error': exception message with traceback}`. | entailment |
def required_data(*data):
"""
Task decorator which checks if the given variables (indices) are stored
inside the task data.
"""
def decorator_fn(f):
@wraps(f)
def required_data_decorator(*args, **kwargs):
task_data = _get_data_from_args(args).get_data()
for i in data:
if i not in task_data:
raise KeyError(
'Missing required parameter "{0}" for task.'.format(i))
return f(*args, **kwargs)
return required_data_decorator
return decorator_fn | Task decorator which checks if the given variables (indices) are stored
inside the task data. | entailment |
def append_get_parameters(accept_only_web=True):
# type: (bool) -> Callable
"""
Task decorator which appends the GET data to the task data.
:param accept_only_web: Parameter which limits using this task only
with web requests.
"""
def wrapper(f):
@wraps(f)
def append_get_parameters_wrapper_fn(*args, **kwargs):
jc = JobContext.get_current_context()
if isinstance(jc, WebJobContext):
# Update the data with GET parameters
web_request = jc.get_web_request()
task_data = _get_data_from_args(args)
data = task_data.get_data()
data.update(web_request.args.to_dict(flat=True))
elif accept_only_web:
# Raise exception on non web usage if necessary
raise Exception("append_get_parameters decorator may be used "
"with GET requests only.")
return f(*args, **kwargs)
return append_get_parameters_wrapper_fn
return wrapper | Task decorator which appends the GET data to the task data.
:param accept_only_web: Parameter which limits using this task only
with web requests. | entailment |
def web_task(f):
"""
Checks if the task is called through the web interface.
Task return value should be in format {'data': ...}.
"""
@wraps(f)
def web_task_decorator(*args, **kwargs):
jc = JobContext.get_current_context()
if not isinstance(jc, WebJobContext):
raise Exception(
"The WebTask is not called through the web interface.")
data = f(*args, **kwargs)
jc.add_responder(WebTaskResponder(data))
return data['data'] if 'data' in data else ""
return web_task_decorator | Checks if the task is called through the web interface.
Task return value should be in format {'data': ...}. | entailment |
def secured_task(f):
"""
Secured task decorator.
"""
@wraps(f)
def secured_task_decorator(*args, **kwargs):
task_data = _get_data_from_args(args)
assert isinstance(task_data, TaskData)
if not verify_security_data(task_data.get_data()['security']):
raise SecurityException(
task_data.get_data()['security']['hashed_token'])
task_data.transform_payload(lambda x: x['data'])
return f(*args, **kwargs)
return secured_task_decorator | Secured task decorator. | entailment |
def xml_output(f):
"""
Set content-type for response to WEB-REQUEST to 'text/xml'
"""
@wraps(f)
def xml_output_inner_fn(*args, **kwargs):
ret_val = f(*args, **kwargs)
if isinstance(JobContext.get_current_context(), WebJobContext):
JobContext.get_current_context().add_responder(
MimeSetterWebTaskResponder('text/xml'))
return ret_val
return xml_output_inner_fn | Set content-type for response to WEB-REQUEST to 'text/xml' | entailment |
def file_upload(f):
"""
Return list of `werkzeug.datastructures.FileStorage` objects - files to be
uploaded
"""
@wraps(f)
def file_upload_decorator(*args, **kwargs):
# If the data is already transformed, we do not transform it any
# further.
task_data = _get_data_from_args(args)
if task_data is None:
logging.error("Task data is empty during FilesUploadDecorator.")
task_data.transform_payload(lambda _: request.files.getlist('file'))
return f(*args, **kwargs)
return file_upload_decorator | Return list of `werkzeug.datastructures.FileStorage` objects - files to be
uploaded | entailment |
def forbid_web_access(f):
"""
Forbids running task using http request.
:param f: Callable
:return Callable
"""
@wraps(f)
def wrapper_fn(*args, **kwargs):
if isinstance(JobContext.get_current_context(), WebJobContext):
raise ForbiddenError('Access forbidden from web.')
return f(*args, **kwargs)
return wrapper_fn | Forbids running task using http request.
:param f: Callable
:return Callable | entailment |
def filter_from_url_arg(model_cls, query, arg, query_operator=and_,
arg_types=None):
"""
Parse filter URL argument ``arg`` and apply to ``query``
Example: 'column1<=value,column2==value' -> query.filter(Model.column1 <= value, Model.column2 == value)
"""
fields = arg.split(',')
mapper = class_mapper(model_cls)
if not arg_types:
arg_types = {}
exprs = []
joins = set()
for expr in fields:
if expr == "":
continue
e_mapper = mapper
e_model_cls = model_cls
operator = None
method = None
for op in operator_order:
if op in expr:
operator = op
method = operator_to_method[op]
break
if operator is None:
raise Exception('No operator in expression "{0}".'.format(expr))
(column_names, value) = expr.split(operator)
column_names = column_names.split('__')
value = value.strip()
for column_name in column_names:
if column_name in arg_types:
typed_value = arg_types[column_name](value)
else:
typed_value = value
if column_name in e_mapper.relationships:
joins.add(column_name)
e_model_cls = e_mapper.attrs[column_name].mapper.class_
e_mapper = class_mapper(e_model_cls)
if hasattr(e_model_cls, column_name):
column = getattr(e_model_cls, column_name)
exprs.append(getattr(column, method)(typed_value))
else:
raise Exception('Invalid property {0} in class {1}.'.format(column_name, e_model_cls))
return query.join(*joins).filter(query_operator(*exprs)) | Parse filter URL argument ``arg`` and apply to ``query``
Example: 'column1<=value,column2==value' -> query.filter(Model.column1 <= value, Model.column2 == value) | entailment |
def model_tree(name, model_cls, visited=None):
"""Create a simple tree of model's properties and its related models.
It traverse trough relations, but ignore any loops.
:param name: name of the model
:type name: str
:param model_cls: model class
:param visited: set of visited models
:type visited: list or None
:return: a dictionary where values are lists of string or other \
dictionaries
"""
if not visited:
visited = set()
visited.add(model_cls)
mapper = class_mapper(model_cls)
columns = [column.key for column in mapper.column_attrs]
related = [model_tree(rel.key, rel.mapper.entity, visited)
for rel in mapper.relationships if rel.mapper.entity not in visited]
return {name: columns + related} | Create a simple tree of model's properties and its related models.
It traverse trough relations, but ignore any loops.
:param name: name of the model
:type name: str
:param model_cls: model class
:param visited: set of visited models
:type visited: list or None
:return: a dictionary where values are lists of string or other \
dictionaries | entailment |
def flat_model(tree):
"""Flatten the tree into a list of properties adding parents as prefixes."""
names = []
for columns in viewvalues(tree):
for col in columns:
if isinstance(col, dict):
col_name = list(col)[0]
names += [col_name + '__' + c for c in flat_model(col)]
else:
names.append(col)
return names | Flatten the tree into a list of properties adding parents as prefixes. | entailment |
def execute_job(job, app=Injected, task_router=Injected):
# type: (Job, Zsl, TaskRouter) -> dict
"""Execute a job.
:param job: job to execute
:type job: Job
:param app: service application instance, injected
:type app: ServiceApplication
:param task_router: task router instance, injected
:type task_router: TaskRouter
:return: task result
:rtype: dict
"""
app.logger.info("Job fetched, preparing the task '{0}'.".format(job.path))
task, task_callable = task_router.route(job.path)
jc = JobContext(job, task, task_callable)
app.logger.info("Executing task.")
result = jc.task_callable(jc.task_data)
app.logger.info("Task {0} executed successfully.".format(job.path))
return {'task_name': job.path, 'data': result} | Execute a job.
:param job: job to execute
:type job: Job
:param app: service application instance, injected
:type app: ServiceApplication
:param task_router: task router instance, injected
:type task_router: TaskRouter
:return: task result
:rtype: dict | entailment |
def handle_exception(self, e, task_path):
# type: (Exception, str) -> dict
"""Handle exception raised during task execution.
:param e: exception
:type e: Exception
:param task_path: task path
:type task_path: str
:return: exception as task result
:rtype: dict
"""
self._app.logger.error(str(e) + "\n" + traceback.format_exc())
return {'task_name': task_path, 'data': None, 'error': str(e)} | Handle exception raised during task execution.
:param e: exception
:type e: Exception
:param task_path: task path
:type task_path: str
:return: exception as task result
:rtype: dict | entailment |
def execute_job(self, job):
# type: (Job) -> dict
"""Execute job given by the task queue.
:param job: job
:type job: Job
:return: task result
:rtype: dict
"""
try:
return execute_job(job)
except KillWorkerException:
self._app.logger.info("Stopping Gearman worker on demand flag set.")
self.stop_worker()
except Exception as e:
return self.handle_exception(e, job.path) | Execute job given by the task queue.
:param job: job
:type job: Job
:return: task result
:rtype: dict | entailment |
def legacy_signature(**kwargs_mapping):
"""
This decorator makes it possible to call a function using old argument names
when they are passed as keyword arguments.
@legacy_signature(old_arg1='arg1', old_arg2='arg2')
def func(arg1, arg2=1):
return arg1 + arg2
func(old_arg1=1) == 2
func(old_arg1=1, old_arg2=2) == 3
"""
def signature_decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
redirected_kwargs = {
kwargs_mapping[k] if k in kwargs_mapping else k: v
for k, v in kwargs.items()
}
return f(*args, **redirected_kwargs)
return wrapper
return signature_decorator | This decorator makes it possible to call a function using old argument names
when they are passed as keyword arguments.
@legacy_signature(old_arg1='arg1', old_arg2='arg2')
def func(arg1, arg2=1):
return arg1 + arg2
func(old_arg1=1) == 2
func(old_arg1=1, old_arg2=2) == 3 | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.