text
stringlengths
0
828
The padding that was used for the ``eyes`` source in :py:func:`bounding_box_from_annotation`, has a proper default.
**Returns:**
``eyes`` : {'reye' : (rey, rex), 'leye' : (ley, lex)}
A dictionary containing the average left and right eye annotation.
""""""
if padding is None:
padding = default_paddings['eyes']
top, left, right = padding['top'], padding['left'], padding['right']
inter_eye_distance = (bounding_box.size[1]) / (right - left)
return {
'reye':(bounding_box.top_f - top*inter_eye_distance, bounding_box.left_f - left/2.*inter_eye_distance),
'leye':(bounding_box.top_f - top*inter_eye_distance, bounding_box.right_f - right/2.*inter_eye_distance)
}"
1054,"def parallel_part(data, parallel):
""""""parallel_part(data, parallel) -> part
Splits off samples from the the given data list and the given number of parallel jobs based on the ``SGE_TASK_ID`` environment variable.
**Parameters:**
``data`` : [object]
A list of data that should be split up into ``parallel`` parts
``parallel`` : int or ``None``
The total number of parts, in which the data should be split into
**Returns:**
``part`` : [object]
The desired partition of the ``data``
""""""
if parallel is None or ""SGE_TASK_ID"" not in os.environ:
return data
data_per_job = int(math.ceil(float(len(data)) / float(parallel)))
task_id = int(os.environ['SGE_TASK_ID'])
first = (task_id-1) * data_per_job
last = min(len(data), task_id * data_per_job)
return data[first:last]"
1055,"def quasi_random_indices(number_of_total_items, number_of_desired_items = None):
""""""quasi_random_indices(number_of_total_items, [number_of_desired_items]) -> index
Yields an iterator to a quasi-random list of indices that will contain exactly the number of desired indices (or the number of total items in the list, if this is smaller).
This function can be used to retrieve a consistent and reproducible list of indices of the data, in case the ``number_of_total_items`` is lower that the given ``number_of_desired_items``.
**Parameters:**
``number_of_total_items`` : int
The total number of elements in the collection, which should be sub-sampled
``number_of_desired_items`` : int or ``None``
The number of items that should be used; if ``None`` or greater than ``number_of_total_items``, all indices are yielded
**Yields:**
``index`` : int
An iterator to indices, which will span ``number_of_total_items`` evenly.
""""""
# check if we need to compute a sublist at all
if number_of_desired_items is None or number_of_desired_items >= number_of_total_items or number_of_desired_items < 0:
for i in range(number_of_total_items):
yield i
else:
increase = float(number_of_total_items)/float(number_of_desired_items)
# generate a regular quasi-random index list
for i in range(number_of_desired_items):
yield int((i +.5)*increase)"
1056,"def exception_class(self, exception):
""""""Return a name representing the class of an exception.""""""
cls = type(exception)
if cls.__module__ == 'exceptions': # Built-in exception.
return cls.__name__
return ""%s.%s"" % (cls.__module__, cls.__name__)"
1057,"def request_info(self, request):
""""""
Return a dictionary of information for a given request.
This will be run once for every request.
""""""
# We have to re-resolve the request path here, because the information
# is not stored on the request.
view, args, kwargs = resolve(request.path)
for i, arg in enumerate(args):
kwargs[i] = arg
parameters = {}
parameters.update(kwargs)
parameters.update(request.POST.items())
environ = request.META
return {
""session"": dict(request.session),
'cookies': dict(request.COOKIES),