code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def weighted_rand(weights, round_result=False):
"""
Generate a non-uniform random value based on a list of weight tuples.
Treats weights as coordinates for a probability distribution curve and
rolls accordingly. Constructs a piece-wise linear curve according to
coordinates given in ``weights`` and rolls random values in the
curve's bounding box until a value is found under the curve
Weight tuples should be of the form: (outcome, strength).
Args:
weights: (list): the list of weights where each weight
is a tuple of form ``(float, float)`` corresponding to
``(outcome, strength)``.
Weights with strength ``0`` or less will have no chance to be
rolled. The list must be sorted in increasing order of outcomes.
round_result (bool): Whether or not to round the resulting value
to the nearest integer.
Returns:
float: A weighted random number
int: A weighted random number rounded to the nearest ``int``
Example:
>>> weighted_rand([(-3, 4), (0, 10), (5, 1)]) # doctest: +SKIP
-0.650612268193731
>>> weighted_rand([(-3, 4), (0, 10), (5, 1)]) # doctest: +SKIP
-2
"""
# If just one weight is passed, simply return the weight's name
if len(weights) == 1:
return weights[0][0]
# Is there a way to do this more efficiently? Maybe even require that
# ``weights`` already be sorted?
weights = sorted(weights, key=lambda w: w[0])
x_min = weights[0][0]
x_max = weights[-1][0]
y_min = 0
y_max = max([point[1] for point in weights])
# Roll random numbers until a valid one is found
attempt_count = 0
while attempt_count < 500000:
# Get sample point
sample = (random.uniform(x_min, x_max), random.uniform(y_min, y_max))
if _point_under_curve(weights, sample):
# The sample point is under the curve
if round_result:
return int(round(sample[0]))
else:
return sample[0]
attempt_count += 1
else:
warnings.warn(
'Point not being found in weighted_rand() after 500000 '
'attempts, defaulting to a random weight point. '
'If this happens often, it is probably a bug.')
return random.choice(weights)[0] | Generate a non-uniform random value based on a list of weight tuples.
Treats weights as coordinates for a probability distribution curve and
rolls accordingly. Constructs a piece-wise linear curve according to
coordinates given in ``weights`` and rolls random values in the
curve's bounding box until a value is found under the curve
Weight tuples should be of the form: (outcome, strength).
Args:
weights: (list): the list of weights where each weight
is a tuple of form ``(float, float)`` corresponding to
``(outcome, strength)``.
Weights with strength ``0`` or less will have no chance to be
rolled. The list must be sorted in increasing order of outcomes.
round_result (bool): Whether or not to round the resulting value
to the nearest integer.
Returns:
float: A weighted random number
int: A weighted random number rounded to the nearest ``int``
Example:
>>> weighted_rand([(-3, 4), (0, 10), (5, 1)]) # doctest: +SKIP
-0.650612268193731
>>> weighted_rand([(-3, 4), (0, 10), (5, 1)]) # doctest: +SKIP
-2 |
def _put_attributes_using_post(self, domain_or_name, item_name, attributes,
replace=True, expected_value=None):
"""
Monkey-patched version of SDBConnection.put_attributes that uses POST instead of GET
The GET version is subject to the URL length limit which kicks in before the 256 x 1024 limit
for attribute values. Using POST prevents that.
https://github.com/BD2KGenomics/toil/issues/502
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName': domain_name,
'ItemName': item_name}
self._build_name_value_list(params, attributes, replace)
if expected_value:
self._build_expected_value(params, expected_value)
# The addition of the verb keyword argument is the only difference to put_attributes (Hannes)
return self.get_status('PutAttributes', params, verb='POST') | Monkey-patched version of SDBConnection.put_attributes that uses POST instead of GET
The GET version is subject to the URL length limit which kicks in before the 256 x 1024 limit
for attribute values. Using POST prevents that.
https://github.com/BD2KGenomics/toil/issues/502 |
def _get_roles_query(self, session, identifier):
"""
:type identifier: string
"""
return (session.query(Role).
join(role_membership_table, Role.pk_id == role_membership_table.c.role_id).
join(User, role_membership_table.c.user_id == User.pk_id).
filter(User.identifier == identifier)) | :type identifier: string |
def show(dataset_uri):
"""Show the descriptive metadata in the readme."""
try:
dataset = dtoolcore.ProtoDataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
except dtoolcore.DtoolCoreTypeError:
dataset = dtoolcore.DataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
readme_content = dataset.get_readme_content()
click.secho(readme_content) | Show the descriptive metadata in the readme. |
def Result(self, res):
"""Inform about the result of the test. If res is not a string, displays
'yes' or 'no' depending on whether res is evaluated as true or false.
The result is only displayed when self.did_show_result is not set.
"""
if isinstance(res, str):
text = res
elif res:
text = "yes"
else:
text = "no"
if self.did_show_result == 0:
# Didn't show result yet, do it now.
self.Display(text + "\n")
self.did_show_result = 1 | Inform about the result of the test. If res is not a string, displays
'yes' or 'no' depending on whether res is evaluated as true or false.
The result is only displayed when self.did_show_result is not set. |
def create_anonymous_client(cls):
"""Factory: return client with anonymous credentials.
.. note::
Such a client has only limited access to "public" buckets:
listing their contents and downloading their blobs.
:rtype: :class:`google.cloud.storage.client.Client`
:returns: Instance w/ anonymous credentials and no project.
"""
client = cls(project="<none>", credentials=AnonymousCredentials())
client.project = None
return client | Factory: return client with anonymous credentials.
.. note::
Such a client has only limited access to "public" buckets:
listing their contents and downloading their blobs.
:rtype: :class:`google.cloud.storage.client.Client`
:returns: Instance w/ anonymous credentials and no project. |
def get_all_memberships(
self, limit_to=100, max_calls=None, parameters=None,
since_when=None, start_record=0, verbose=False):
"""
Retrieve all memberships updated since "since_when"
Loop over queries of size limit_to until either a non-full queryset
is returned, or max_depth is reached (used in tests). Then the
recursion collapses to return a single concatenated list.
"""
if not self.client.session_id:
self.client.request_session()
query = "SELECT Objects() FROM Membership"
# collect all where parameters into a list of
# (key, operator, value) tuples
where_params = []
if parameters:
for k, v in parameters.items():
where_params.append((k, "=", v))
if since_when:
d = datetime.date.today() - datetime.timedelta(days=since_when)
where_params.append(
('LastModifiedDate', ">", "'%s 00:00:00'" % d))
if where_params:
query += " WHERE "
query += " AND ".join(
["%s %s %s" % (p[0], p[1], p[2]) for p in where_params])
query += " ORDER BY LocalID"
# note, get_long_query is overkill when just looking at
# one org, but it still only executes once
# `get_long_query` uses `ms_object_to_model` to return Organizations
membership_list = self.get_long_query(
query, limit_to=limit_to, max_calls=max_calls,
start_record=start_record, verbose=verbose)
return membership_list or [] | Retrieve all memberships updated since "since_when"
Loop over queries of size limit_to until either a non-full queryset
is returned, or max_depth is reached (used in tests). Then the
recursion collapses to return a single concatenated list. |
def includeme(config):
"""
Include `crabpy_pyramid` in this `Pyramid` application.
:param pyramid.config.Configurator config: A Pyramid configurator.
"""
settings = _parse_settings(config.registry.settings)
base_settings = _get_proxy_settings(settings)
# http caching tween
if not settings.get('etag_tween_disabled', False):
config.add_tween('crabpy_pyramid.conditional_http_tween_factory')
# create cache
root = settings.get('cache.file.root', '/tmp/dogpile_data')
if not os.path.exists(root):
os.makedirs(root)
capakey_settings = dict(_filter_settings(settings, 'capakey.'),
**base_settings)
if 'include' in capakey_settings:
log.info("The 'capakey.include' setting is deprecated. Capakey will "
"always be included.")
log.info('Adding CAPAKEY Gateway.')
config.add_renderer('capakey_listjson', capakey_json_list_renderer)
config.add_renderer('capakey_itemjson', capakey_json_item_renderer)
_build_capakey(config.registry, capakey_settings)
config.add_request_method(get_capakey, 'capakey_gateway')
config.add_directive('get_capakey', get_capakey)
config.include('crabpy_pyramid.routes.capakey')
config.scan('crabpy_pyramid.views.capakey')
crab_settings = dict(_filter_settings(settings, 'crab.'), **base_settings)
if crab_settings['include']:
log.info('Adding CRAB Gateway.')
del crab_settings['include']
config.add_renderer('crab_listjson', crab_json_list_renderer)
config.add_renderer('crab_itemjson', crab_json_item_renderer)
_build_crab(config.registry, crab_settings)
config.add_directive('get_crab', get_crab)
config.add_request_method(get_crab, 'crab_gateway')
config.include('crabpy_pyramid.routes.crab')
config.scan('crabpy_pyramid.views.crab') | Include `crabpy_pyramid` in this `Pyramid` application.
:param pyramid.config.Configurator config: A Pyramid configurator. |
def on_key_press(self, event):
"""Pan and zoom with the keyboard."""
# Zooming with the keyboard.
key = event.key
if event.modifiers:
return
# Pan.
if self.enable_keyboard_pan and key in self._arrows:
self._pan_keyboard(key)
# Zoom.
if key in self._pm:
self._zoom_keyboard(key)
# Reset with 'R'.
if key == 'R':
self.reset() | Pan and zoom with the keyboard. |
def send_message(self, payload):
""" Create a post request to the message sender
"""
self.l.info("Creating outbound message request")
result = ms_client.create_outbound(payload)
self.l.info("Created outbound message request")
return result | Create a post request to the message sender |
def fetch(self, category=CATEGORY_MESSAGE, offset=DEFAULT_OFFSET, chats=None):
"""Fetch the messages the bot can read from the server.
The method retrieves, from the Telegram server, the messages
sent with an offset equal or greater than the given.
A list of chats, groups and channels identifiers can be set
using the parameter `chats`. When it is set, only those
messages sent to any of these will be returned. An empty list
will return no messages.
:param category: the category of items to fetch
:param offset: obtain messages from this offset
:param chats: list of chat names used to filter messages
:returns: a generator of messages
:raises ValueError: when `chats` is an empty list
"""
if not offset:
offset = DEFAULT_OFFSET
kwargs = {"offset": offset, "chats": chats}
items = super().fetch(category, **kwargs)
return items | Fetch the messages the bot can read from the server.
The method retrieves, from the Telegram server, the messages
sent with an offset equal or greater than the given.
A list of chats, groups and channels identifiers can be set
using the parameter `chats`. When it is set, only those
messages sent to any of these will be returned. An empty list
will return no messages.
:param category: the category of items to fetch
:param offset: obtain messages from this offset
:param chats: list of chat names used to filter messages
:returns: a generator of messages
:raises ValueError: when `chats` is an empty list |
def url_unparse(components):
"""The reverse operation to :meth:`url_parse`. This accepts arbitrary
as well as :class:`URL` tuples and returns a URL as a string.
:param components: the parsed URL as tuple which should be converted
into a URL string.
"""
scheme, netloc, path, query, fragment = \
normalize_string_tuple(components)
s = make_literal_wrapper(scheme)
url = s('')
# We generally treat file:///x and file:/x the same which is also
# what browsers seem to do. This also allows us to ignore a schema
# register for netloc utilization or having to differenciate between
# empty and missing netloc.
if netloc or (scheme and path.startswith(s('/'))):
if path and path[:1] != s('/'):
path = s('/') + path
url = s('//') + (netloc or s('')) + path
elif path:
url += path
if scheme:
url = scheme + s(':') + url
if query:
url = url + s('?') + query
if fragment:
url = url + s('#') + fragment
return url | The reverse operation to :meth:`url_parse`. This accepts arbitrary
as well as :class:`URL` tuples and returns a URL as a string.
:param components: the parsed URL as tuple which should be converted
into a URL string. |
def token_list_len(tokenlist):
"""
Return the amount of characters in this token list.
:param tokenlist: List of (token, text) or (token, text, mouse_handler)
tuples.
"""
ZeroWidthEscape = Token.ZeroWidthEscape
return sum(len(item[1]) for item in tokenlist if item[0] != ZeroWidthEscape) | Return the amount of characters in this token list.
:param tokenlist: List of (token, text) or (token, text, mouse_handler)
tuples. |
def plot_somas(somas):
'''Plot set of somas on same figure as spheres, each with different color'''
_, ax = common.get_figure(new_fig=True, subplot=111,
params={'projection': '3d', 'aspect': 'equal'})
for s in somas:
common.plot_sphere(ax, s.center, s.radius, color=random_color(), alpha=1)
plt.show() | Plot set of somas on same figure as spheres, each with different color |
def to_properties(cls, config, compact=False, indent=2, key_stack=[]):
"""Convert HOCON input into a .properties output
:return: .properties string representation
:type return: basestring
:return:
"""
def escape_value(value):
return value.replace('=', '\\=').replace('!', '\\!').replace('#', '\\#').replace('\n', '\\\n')
stripped_key_stack = [key.strip('"') for key in key_stack]
lines = []
if isinstance(config, ConfigTree):
for key, item in config.items():
if item is not None:
lines.append(cls.to_properties(item, compact, indent, stripped_key_stack + [key]))
elif isinstance(config, list):
for index, item in enumerate(config):
if item is not None:
lines.append(cls.to_properties(item, compact, indent, stripped_key_stack + [str(index)]))
elif isinstance(config, basestring):
lines.append('.'.join(stripped_key_stack) + ' = ' + escape_value(config))
elif config is True:
lines.append('.'.join(stripped_key_stack) + ' = true')
elif config is False:
lines.append('.'.join(stripped_key_stack) + ' = false')
elif config is None or isinstance(config, NoneValue):
pass
else:
lines.append('.'.join(stripped_key_stack) + ' = ' + str(config))
return '\n'.join([line for line in lines if len(line) > 0]) | Convert HOCON input into a .properties output
:return: .properties string representation
:type return: basestring
:return: |
def scan_processes_and_threads(self):
"""
Populates the snapshot with running processes and threads.
Tipically you don't need to call this method directly, if unsure use
L{scan} instead.
@note: This method uses the Toolhelp API.
@see: L{scan_modules}
@raise WindowsError: An error occured while updating the snapshot.
The snapshot was not modified.
"""
# The main module filename may be spoofed by malware,
# since this information resides in usermode space.
# See: http://www.ragestorm.net/blogs/?p=163
our_pid = win32.GetCurrentProcessId()
dead_pids = set( compat.iterkeys(self.__processDict) )
found_tids = set()
# Ignore our own process if it's in the snapshot for some reason
if our_pid in dead_pids:
dead_pids.remove(our_pid)
# Take a snapshot of all processes and threads
dwFlags = win32.TH32CS_SNAPPROCESS | win32.TH32CS_SNAPTHREAD
with win32.CreateToolhelp32Snapshot(dwFlags) as hSnapshot:
# Add all the processes (excluding our own)
pe = win32.Process32First(hSnapshot)
while pe is not None:
dwProcessId = pe.th32ProcessID
if dwProcessId != our_pid:
if dwProcessId in dead_pids:
dead_pids.remove(dwProcessId)
if dwProcessId not in self.__processDict:
aProcess = Process(dwProcessId, fileName=pe.szExeFile)
self._add_process(aProcess)
elif pe.szExeFile:
aProcess = self.get_process(dwProcessId)
if not aProcess.fileName:
aProcess.fileName = pe.szExeFile
pe = win32.Process32Next(hSnapshot)
# Add all the threads
te = win32.Thread32First(hSnapshot)
while te is not None:
dwProcessId = te.th32OwnerProcessID
if dwProcessId != our_pid:
if dwProcessId in dead_pids:
dead_pids.remove(dwProcessId)
if dwProcessId in self.__processDict:
aProcess = self.get_process(dwProcessId)
else:
aProcess = Process(dwProcessId)
self._add_process(aProcess)
dwThreadId = te.th32ThreadID
found_tids.add(dwThreadId)
if not aProcess._has_thread_id(dwThreadId):
aThread = Thread(dwThreadId, process = aProcess)
aProcess._add_thread(aThread)
te = win32.Thread32Next(hSnapshot)
# Remove dead processes
for pid in dead_pids:
self._del_process(pid)
# Remove dead threads
for aProcess in compat.itervalues(self.__processDict):
dead_tids = set( aProcess._get_thread_ids() )
dead_tids.difference_update(found_tids)
for tid in dead_tids:
aProcess._del_thread(tid) | Populates the snapshot with running processes and threads.
Tipically you don't need to call this method directly, if unsure use
L{scan} instead.
@note: This method uses the Toolhelp API.
@see: L{scan_modules}
@raise WindowsError: An error occured while updating the snapshot.
The snapshot was not modified. |
def options(self, context, module_options):
'''
ACTION Enable/Disable RDP (choices: enable, disable)
'''
if not 'ACTION' in module_options:
context.log.error('ACTION option not specified!')
exit(1)
if module_options['ACTION'].lower() not in ['enable', 'disable']:
context.log.error('Invalid value for ACTION option!')
exit(1)
self.action = module_options['ACTION'].lower() | ACTION Enable/Disable RDP (choices: enable, disable) |
def flair_template_sync(self, editable, limit, # pylint: disable=R0912
static, sort, use_css, use_text):
"""Synchronize templates with flair that already exists on the site.
:param editable: Indicates that all the options should be editable.
:param limit: The minimum number of users that must share the flair
before it is added as a template.
:param static: A list of flair templates that will always be added.
:param sort: The order to sort the flair templates.
:param use_css: Include css in the templates.
:param use_text: Include text in the templates.
"""
# Parameter verification
if not use_text and not use_css:
raise Exception('At least one of use_text or use_css must be True')
sorts = ('alpha', 'size')
if sort not in sorts:
raise Exception('Sort must be one of: {}'.format(', '.join(sorts)))
# Build current flair list along with static values
counter = {}
if static:
for key in static:
if use_css and use_text:
parts = tuple(x.strip() for x in key.split(','))
if len(parts) != 2:
raise Exception('--static argument {!r} must have two '
'parts (comma separated) when using '
'both text and css.'.format(parts))
key = parts
counter[key] = limit
if self.verbose:
sys.stdout.write('Retrieving current flair\n')
sys.stdout.flush()
for flair in self.current_flair():
if self.verbose:
sys.stdout.write('.')
sys.stdout.flush()
if use_text and use_css:
key = (flair['flair_text'], flair['flair_css_class'])
elif use_text:
key = flair['flair_text']
else:
key = flair['flair_css_class']
if key in counter:
counter[key] += 1
else:
counter[key] = 1
if self.verbose:
print()
# Sort flair list items according to the specified sort
if sort == 'alpha':
items = sorted(counter.items())
else:
items = sorted(counter.items(), key=lambda x: x[1], reverse=True)
# Clear current templates and store flair according to the sort
if self.verbose:
print('Clearing current flair templates')
self.sub.flair.templates.clear()
for key, count in items:
if not key or count < limit:
print('a')
continue
if use_text and use_css:
text, css = key
elif use_text:
text, css = key, ''
else:
text, css = '', key
if self.verbose:
print('Adding template: text: {!r} css: {!r}'
.format(text, css))
self.sub.flair.templates.add(text, css, editable) | Synchronize templates with flair that already exists on the site.
:param editable: Indicates that all the options should be editable.
:param limit: The minimum number of users that must share the flair
before it is added as a template.
:param static: A list of flair templates that will always be added.
:param sort: The order to sort the flair templates.
:param use_css: Include css in the templates.
:param use_text: Include text in the templates. |
def set_iscsi_initiator_info(self, initiator_iqn):
"""Set iSCSI initiator information in iLO.
:param initiator_iqn: Initiator iqn for iLO.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the system is
in the bios boot mode.
"""
if(self._is_boot_mode_uefi() is True):
iscsi_uri = self._check_iscsi_rest_patch_allowed()
initiator_info = {'iSCSIInitiatorName': initiator_iqn}
status, headers, response = self._rest_patch(iscsi_uri,
None, initiator_info)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg)
else:
msg = 'iSCSI initiator cannot be set in the BIOS boot mode'
raise exception.IloCommandNotSupportedError(msg) | Set iSCSI initiator information in iLO.
:param initiator_iqn: Initiator iqn for iLO.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the system is
in the bios boot mode. |
def dump_response_data(response_schema,
response_data,
status_code=200,
headers=None,
response_format=None):
"""
Dumps response data as JSON using the given schema.
Forces JSON encoding even if the client did not specify the `Accept` header properly.
This is friendlier to client and test software, even at the cost of not distinguishing
HTTP 400 and 406 errors.
"""
if response_schema:
response_data = response_schema.dump(response_data).data
return make_response(response_data, response_schema, response_format, status_code, headers) | Dumps response data as JSON using the given schema.
Forces JSON encoding even if the client did not specify the `Accept` header properly.
This is friendlier to client and test software, even at the cost of not distinguishing
HTTP 400 and 406 errors. |
def create_dir(self, directory_path, perm_bits=PERM_DEF):
"""Create `directory_path`, and all the parent directories.
Helper method to set up your test faster.
Args:
directory_path: The full directory path to create.
perm_bits: The permission bits as set by `chmod`.
Returns:
The newly created FakeDirectory object.
Raises:
OSError: if the directory already exists.
"""
directory_path = self.make_string_path(directory_path)
directory_path = self.absnormpath(directory_path)
self._auto_mount_drive_if_needed(directory_path)
if self.exists(directory_path, check_link=True):
self.raise_os_error(errno.EEXIST, directory_path)
path_components = self._path_components(directory_path)
current_dir = self.root
new_dirs = []
for component in path_components:
directory = self._directory_content(current_dir, component)[1]
if not directory:
new_dir = FakeDirectory(component, filesystem=self)
new_dirs.append(new_dir)
current_dir.add_entry(new_dir)
current_dir = new_dir
else:
if S_ISLNK(directory.st_mode):
directory = self.resolve(directory.contents)
current_dir = directory
if directory.st_mode & S_IFDIR != S_IFDIR:
self.raise_os_error(errno.ENOTDIR, current_dir.path)
# set the permission after creating the directories
# to allow directory creation inside a read-only directory
for new_dir in new_dirs:
new_dir.st_mode = S_IFDIR | perm_bits
self._last_ino += 1
current_dir.st_ino = self._last_ino
return current_dir | Create `directory_path`, and all the parent directories.
Helper method to set up your test faster.
Args:
directory_path: The full directory path to create.
perm_bits: The permission bits as set by `chmod`.
Returns:
The newly created FakeDirectory object.
Raises:
OSError: if the directory already exists. |
def retention_schedule(name, retain, strptime_format=None, timezone=None):
'''
Apply retention scheduling to backup storage directory.
.. versionadded:: 2016.11.0
:param name:
The filesystem path to the directory containing backups to be managed.
:param retain:
Delete the backups, except for the ones we want to keep.
The N below should be an integer but may also be the special value of ``all``,
which keeps all files matching the criteria.
All of the retain options default to None,
which means to not keep files based on this criteria.
:most_recent N:
Keep the most recent N files.
:first_of_hour N:
For the last N hours from now, keep the first file after the hour.
:first_of_day N:
For the last N days from now, keep the first file after midnight.
See also ``timezone``.
:first_of_week N:
For the last N weeks from now, keep the first file after Sunday midnight.
:first_of_month N:
For the last N months from now, keep the first file after the start of the month.
:first_of_year N:
For the last N years from now, keep the first file after the start of the year.
:param strptime_format:
A python strptime format string used to first match the filenames of backups
and then parse the filename to determine the datetime of the file.
https://docs.python.org/2/library/datetime.html#datetime.datetime.strptime
Defaults to None, which considers all files in the directory to be backups eligible for deletion
and uses ``os.path.getmtime()`` to determine the datetime.
:param timezone:
The timezone to use when determining midnight.
This is only used when datetime is pulled from ``os.path.getmtime()``.
Defaults to ``None`` which uses the timezone from the locale.
Usage example:
.. code-block:: yaml
/var/backups/example_directory:
file.retention_schedule:
- retain:
most_recent: 5
first_of_hour: 4
first_of_day: 7
first_of_week: 6 # NotImplemented yet.
first_of_month: 6
first_of_year: all
- strptime_format: example_name_%Y%m%dT%H%M%S.tar.bz2
- timezone: None
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {'retained': [], 'deleted': [], 'ignored': []},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.retention_schedule')
if not os.path.isdir(name):
return _error(ret, 'Name provided to file.retention must be a directory')
# get list of files in directory
all_files = __salt__['file.readdir'](name)
# if strptime_format is set, filter through the list to find names which parse and get their datetimes.
beginning_of_unix_time = datetime(1970, 1, 1)
def get_file_time_from_strptime(f):
try:
ts = datetime.strptime(f, strptime_format)
ts_epoch = salt.utils.dateutils.total_seconds(ts - beginning_of_unix_time)
return (ts, ts_epoch)
except ValueError:
# Files which don't match the pattern are not relevant files.
return (None, None)
def get_file_time_from_mtime(f):
if f == '.' or f == '..':
return (None, None)
lstat = __salt__['file.lstat'](os.path.join(name, f))
if lstat:
mtime = lstat['st_mtime']
return (datetime.fromtimestamp(mtime, timezone), mtime)
else: # maybe it was deleted since we did the readdir?
return (None, None)
get_file_time = get_file_time_from_strptime if strptime_format else get_file_time_from_mtime
# data structures are nested dicts:
# files_by_ymd = year.month.day.hour.unixtime: filename
# files_by_y_week_dow = year.week_of_year.day_of_week.unixtime: filename
# http://the.randomengineer.com/2015/04/28/python-recursive-defaultdict/
# TODO: move to an ordered dict model and reduce the number of sorts in the rest of the code?
def dict_maker():
return defaultdict(dict_maker)
files_by_ymd = dict_maker()
files_by_y_week_dow = dict_maker()
relevant_files = set()
ignored_files = set()
for f in all_files:
ts, ts_epoch = get_file_time(f)
if ts:
files_by_ymd[ts.year][ts.month][ts.day][ts.hour][ts_epoch] = f
week_of_year = ts.isocalendar()[1]
files_by_y_week_dow[ts.year][week_of_year][ts.weekday()][ts_epoch] = f
relevant_files.add(f)
else:
ignored_files.add(f)
# This is tightly coupled with the file_with_times data-structure above.
RETAIN_TO_DEPTH = {
'first_of_year': 1,
'first_of_month': 2,
'first_of_day': 3,
'first_of_hour': 4,
'most_recent': 5,
}
def get_first(fwt):
if isinstance(fwt, dict):
first_sub_key = sorted(fwt.keys())[0]
return get_first(fwt[first_sub_key])
else:
return set([fwt, ])
def get_first_n_at_depth(fwt, depth, n):
if depth <= 0:
return get_first(fwt)
else:
result_set = set()
for k in sorted(fwt.keys(), reverse=True):
needed = n - len(result_set)
if needed < 1:
break
result_set |= get_first_n_at_depth(fwt[k], depth - 1, needed)
return result_set
# for each retain criteria, add filenames which match the criteria to the retain set.
retained_files = set()
for retention_rule, keep_count in retain.items():
# This is kind of a hack, since 'all' should really mean all,
# but I think it's a large enough number that even modern filesystems would
# choke if they had this many files in a single directory.
keep_count = sys.maxsize if 'all' == keep_count else int(keep_count)
if 'first_of_week' == retention_rule:
first_of_week_depth = 2 # year + week_of_year = 2
# I'm adding 1 to keep_count below because it fixed an off-by one
# issue in the tests. I don't understand why, and that bothers me.
retained_files |= get_first_n_at_depth(files_by_y_week_dow,
first_of_week_depth,
keep_count + 1)
else:
retained_files |= get_first_n_at_depth(files_by_ymd,
RETAIN_TO_DEPTH[retention_rule],
keep_count)
deletable_files = list(relevant_files - retained_files)
deletable_files.sort(reverse=True)
changes = {
'retained': sorted(list(retained_files), reverse=True),
'deleted': deletable_files,
'ignored': sorted(list(ignored_files), reverse=True),
}
ret['changes'] = changes
# TODO: track and report how much space was / would be reclaimed
if __opts__['test']:
ret['comment'] = '{0} backups would have been removed from {1}.\n'.format(len(deletable_files), name)
if deletable_files:
ret['result'] = None
else:
for f in deletable_files:
__salt__['file.remove'](os.path.join(name, f))
ret['comment'] = '{0} backups were removed from {1}.\n'.format(len(deletable_files), name)
ret['changes'] = changes
return ret | Apply retention scheduling to backup storage directory.
.. versionadded:: 2016.11.0
:param name:
The filesystem path to the directory containing backups to be managed.
:param retain:
Delete the backups, except for the ones we want to keep.
The N below should be an integer but may also be the special value of ``all``,
which keeps all files matching the criteria.
All of the retain options default to None,
which means to not keep files based on this criteria.
:most_recent N:
Keep the most recent N files.
:first_of_hour N:
For the last N hours from now, keep the first file after the hour.
:first_of_day N:
For the last N days from now, keep the first file after midnight.
See also ``timezone``.
:first_of_week N:
For the last N weeks from now, keep the first file after Sunday midnight.
:first_of_month N:
For the last N months from now, keep the first file after the start of the month.
:first_of_year N:
For the last N years from now, keep the first file after the start of the year.
:param strptime_format:
A python strptime format string used to first match the filenames of backups
and then parse the filename to determine the datetime of the file.
https://docs.python.org/2/library/datetime.html#datetime.datetime.strptime
Defaults to None, which considers all files in the directory to be backups eligible for deletion
and uses ``os.path.getmtime()`` to determine the datetime.
:param timezone:
The timezone to use when determining midnight.
This is only used when datetime is pulled from ``os.path.getmtime()``.
Defaults to ``None`` which uses the timezone from the locale.
Usage example:
.. code-block:: yaml
/var/backups/example_directory:
file.retention_schedule:
- retain:
most_recent: 5
first_of_hour: 4
first_of_day: 7
first_of_week: 6 # NotImplemented yet.
first_of_month: 6
first_of_year: all
- strptime_format: example_name_%Y%m%dT%H%M%S.tar.bz2
- timezone: None |
def info(name, root=None):
'''
Return information for the specified user
name
User to get the information for
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' shadow.info root
'''
if root is not None:
getspnam = functools.partial(_getspnam, root=root)
else:
getspnam = functools.partial(spwd.getspnam)
try:
data = getspnam(name)
ret = {
'name': data.sp_namp if hasattr(data, 'sp_namp') else data.sp_nam,
'passwd': data.sp_pwdp if hasattr(data, 'sp_pwdp') else data.sp_pwd,
'lstchg': data.sp_lstchg,
'min': data.sp_min,
'max': data.sp_max,
'warn': data.sp_warn,
'inact': data.sp_inact,
'expire': data.sp_expire}
except KeyError:
return {
'name': '',
'passwd': '',
'lstchg': '',
'min': '',
'max': '',
'warn': '',
'inact': '',
'expire': ''}
return ret | Return information for the specified user
name
User to get the information for
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' shadow.info root |
def rotate(self, angle, axis=(1, 0, 0), axis_point=(0, 0, 0), rad=False):
"""Rotate ``Actor`` around an arbitrary `axis` passing through `axis_point`."""
if rad:
anglerad = angle
else:
anglerad = angle / 57.29578
axis = utils.versor(axis)
a = np.cos(anglerad / 2)
b, c, d = -axis * np.sin(anglerad / 2)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
R = np.array(
[
[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc],
]
)
rv = np.dot(R, self.GetPosition() - np.array(axis_point)) + axis_point
if rad:
angle *= 57.29578
# this vtk method only rotates in the origin of the actor:
self.RotateWXYZ(angle, axis[0], axis[1], axis[2])
self.SetPosition(rv)
if self.trail:
self.updateTrail()
return self | Rotate ``Actor`` around an arbitrary `axis` passing through `axis_point`. |
def refresh(self):
'''Refetch instance data from the API.
'''
response = requests.get('%s/guides/%s' % (API_BASE_URL, self.id))
attributes = response.json()
self.category = Category(attributes['category'])
self.url = attributes['url']
self.title = attributes['title']
if attributes['image']:
self.image = Image(attributes['image']['id'])
else:
self.image = None
self.locale = attributes['locale']
self.introduction = WikiText(attributes['introduction_raw'],
attributes['introduction_rendered'])
self.conclusion = WikiText(attributes['conclusion_raw'],
attributes['conclusion_rendered'])
#self.tools = attributes['tools']
#self.parts = attributes['parts']
self.subject = attributes['subject']
self.modifiedDate = datetime.utcfromtimestamp(attributes['modified_date'])
self.createdDate = datetime.utcfromtimestamp(attributes['created_date'])
self.publishedDate = datetime.utcfromtimestamp(attributes['published_date'])
#self.documents = attributes['documents']
author = attributes['author']
#self.author = User(author['userid'], name=author['text'])
#self.timeRequired = attributes['timeRequired']
self.steps = [Step(step['guideid'], step['stepid'], data=step) for step in attributes['steps']]
self.type = attributes['type']
self.public = attributes['public']
self.revision = attributes['revisionid']
self.difficulty = attributes['difficulty']
self.prerequisites = [Guide(guide['guideid']) for guide in attributes['prerequisites']]
# attributes['prereq_modified_date']
#self.summary = attributes['summary']
self.flags = [Flag.from_id(flag['flagid']) for flag in attributes['flags']] | Refetch instance data from the API. |
def write_report(self, output_file):
"""Writes a report with the test results for the current assembly
Parameters
----------
output_file : str
Name of the output assembly file.
"""
logger.debug("Writing the assembly report into: {}".format(
output_file))
with open(output_file, "w") as fh:
for contig_id, vals in self.report.items():
fh.write("{}, {}\\n".format(contig_id, vals)) | Writes a report with the test results for the current assembly
Parameters
----------
output_file : str
Name of the output assembly file. |
def _append(self, menu):
'''append this menu item to a menu'''
from wx_loader import wx
menu.AppendMenu(-1, self.name, self.wx_menu()) | append this menu item to a menu |
def _handle_end_relation(self):
"""
Handle closing relation element
"""
self._result.append(Relation(result=self._result, **self._curr))
self._curr = {} | Handle closing relation element |
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term | Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by |
def salsa20_8(B):
'''Salsa 20/8 stream cypher; Used by BlockMix. See http://en.wikipedia.org/wiki/Salsa20'''
# Create a working copy
x = B[:]
# Expanded form of this code. The expansion is significantly faster but
# this is much easier to understand
# ROUNDS = (
# (4, 0, 12, 7), (8, 4, 0, 9), (12, 8, 4, 13), (0, 12, 8, 18),
# (9, 5, 1, 7), (13, 9, 5, 9), (1, 13, 9, 13), (5, 1, 13, 18),
# (14, 10, 6, 7), (2, 14, 10, 9), (6, 2, 14, 13), (10, 6, 2, 18),
# (3, 15, 11, 7), (7, 3, 15, 9), (11, 7, 3, 13), (15, 11, 7, 18),
# (1, 0, 3, 7), (2, 1, 0, 9), (3, 2, 1, 13), (0, 3, 2, 18),
# (6, 5, 4, 7), (7, 6, 5, 9), (4, 7, 6, 13), (5, 4, 7, 18),
# (11, 10, 9, 7), (8, 11, 10, 9), (9, 8, 11, 13), (10, 9, 8, 18),
# (12, 15, 14, 7), (13, 12, 15, 9), (14, 13, 12, 13), (15, 14, 13, 18),
# )
#
# for (destination, a1, a2, b) in ROUNDS:
# a = (x[a1] + x[a2]) & 0xffffffff
# x[destination] ^= ((a << b) | (a >> (32 - b))) & 0xffffffff
for i in (8, 6, 4, 2):
a = (x[0] + x[12]) & 0xffffffff
x[4] ^= ((a << 7) | (a >> 25))
a = (x[4] + x[0]) & 0xffffffff
x[8] ^= ((a << 9) | (a >> 23))
a = (x[8] + x[4]) & 0xffffffff
x[12] ^= ((a << 13) | (a >> 19))
a = (x[12] + x[8]) & 0xffffffff
x[0] ^= ((a << 18) | (a >> 14))
a = (x[5] + x[1]) & 0xffffffff
x[9] ^= ((a << 7) | (a >> 25))
a = (x[9] + x[5]) & 0xffffffff
x[13] ^= ((a << 9) | (a >> 23))
a = (x[13] + x[9]) & 0xffffffff
x[1] ^= ((a << 13) | (a >> 19))
a = (x[1] + x[13]) & 0xffffffff
x[5] ^= ((a << 18) | (a >> 14))
a = (x[10] + x[6]) & 0xffffffff
x[14] ^= ((a << 7) | (a >> 25))
a = (x[14] + x[10]) & 0xffffffff
x[2] ^= ((a << 9) | (a >> 23))
a = (x[2] + x[14]) & 0xffffffff
x[6] ^= ((a << 13) | (a >> 19))
a = (x[6] + x[2]) & 0xffffffff
x[10] ^= ((a << 18) | (a >> 14))
a = (x[15] + x[11]) & 0xffffffff
x[3] ^= ((a << 7) | (a >> 25))
a = (x[3] + x[15]) & 0xffffffff
x[7] ^= ((a << 9) | (a >> 23))
a = (x[7] + x[3]) & 0xffffffff
x[11] ^= ((a << 13) | (a >> 19))
a = (x[11] + x[7]) & 0xffffffff
x[15] ^= ((a << 18) | (a >> 14))
a = (x[0] + x[3]) & 0xffffffff
x[1] ^= ((a << 7) | (a >> 25))
a = (x[1] + x[0]) & 0xffffffff
x[2] ^= ((a << 9) | (a >> 23))
a = (x[2] + x[1]) & 0xffffffff
x[3] ^= ((a << 13) | (a >> 19))
a = (x[3] + x[2]) & 0xffffffff
x[0] ^= ((a << 18) | (a >> 14))
a = (x[5] + x[4]) & 0xffffffff
x[6] ^= ((a << 7) | (a >> 25))
a = (x[6] + x[5]) & 0xffffffff
x[7] ^= ((a << 9) | (a >> 23))
a = (x[7] + x[6]) & 0xffffffff
x[4] ^= ((a << 13) | (a >> 19))
a = (x[4] + x[7]) & 0xffffffff
x[5] ^= ((a << 18) | (a >> 14))
a = (x[10] + x[9]) & 0xffffffff
x[11] ^= ((a << 7) | (a >> 25))
a = (x[11] + x[10]) & 0xffffffff
x[8] ^= ((a << 9) | (a >> 23))
a = (x[8] + x[11]) & 0xffffffff
x[9] ^= ((a << 13) | (a >> 19))
a = (x[9] + x[8]) & 0xffffffff
x[10] ^= ((a << 18) | (a >> 14))
a = (x[15] + x[14]) & 0xffffffff
x[12] ^= ((a << 7) | (a >> 25))
a = (x[12] + x[15]) & 0xffffffff
x[13] ^= ((a << 9) | (a >> 23))
a = (x[13] + x[12]) & 0xffffffff
x[14] ^= ((a << 13) | (a >> 19))
a = (x[14] + x[13]) & 0xffffffff
x[15] ^= ((a << 18) | (a >> 14))
# Add the original values
for i in xrange(0, 16):
B[i] = (B[i] + x[i]) & 0xffffffff | Salsa 20/8 stream cypher; Used by BlockMix. See http://en.wikipedia.org/wiki/Salsa20 |
def check_network_role(self, public_key):
""" Check the public key of a node on the network to see if they are
permitted to participate. The roles being checked are the
following, from first to last:
"network"
"default"
The first role that is set will be the one used to enforce if the
node is allowed.
Args:
public_key (string): The public key belonging to a node on the
network
"""
state_root = self._current_root_func()
if state_root == INIT_ROOT_KEY:
LOGGER.debug("Chain head is not set yet. Permit all.")
return True
self._cache.update_view(state_root)
role = self._cache.get_role("network", state_root)
if role is None:
policy_name = "default"
else:
policy_name = role.policy_name
policy = self._cache.get_policy(policy_name, state_root)
if policy is not None:
if not self._allowed(public_key, policy):
LOGGER.debug("Node is not permitted: %s.", public_key)
return False
return True | Check the public key of a node on the network to see if they are
permitted to participate. The roles being checked are the
following, from first to last:
"network"
"default"
The first role that is set will be the one used to enforce if the
node is allowed.
Args:
public_key (string): The public key belonging to a node on the
network |
def proxy_schema(self):
"""
Get the Proxy-Schema option of a request.
:return: the Proxy-Schema values or None if not specified by the request
:rtype : String
"""
for option in self.options:
if option.number == defines.OptionRegistry.PROXY_SCHEME.number:
return option.value
return None | Get the Proxy-Schema option of a request.
:return: the Proxy-Schema values or None if not specified by the request
:rtype : String |
def cfg_to_args(config):
"""Compatibility helper to use setup.cfg in setup.py."""
kwargs = {}
opts_to_args = {
'metadata': [
('name', 'name'),
('author', 'author'),
('author-email', 'author_email'),
('maintainer', 'maintainer'),
('maintainer-email', 'maintainer_email'),
('home-page', 'url'),
('summary', 'description'),
('description', 'long_description'),
('download-url', 'download_url'),
('classifier', 'classifiers'),
('platform', 'platforms'),
('license', 'license'),
('keywords', 'keywords'),
],
'files': [
('packages_root', 'package_dir'),
('packages', 'packages'),
('modules', 'py_modules'),
('scripts', 'scripts'),
('package_data', 'package_data'),
('data_files', 'data_files'),
],
}
opts_to_args['metadata'].append(('requires-dist', 'install_requires'))
if IS_PY2K and not which('3to2'):
kwargs['setup_requires'] = ['3to2']
kwargs['zip_safe'] = False
for section in opts_to_args:
for option, argname in opts_to_args[section]:
value = get_cfg_value(config, section, option)
if value:
kwargs[argname] = value
if 'long_description' not in kwargs:
kwargs['long_description'] = read_description_file(config)
if 'package_dir' in kwargs:
kwargs['package_dir'] = {'': kwargs['package_dir']}
if 'keywords' in kwargs:
kwargs['keywords'] = split_elements(kwargs['keywords'])
if 'package_data' in kwargs:
kwargs['package_data'] = get_package_data(kwargs['package_data'])
if 'data_files' in kwargs:
kwargs['data_files'] = get_data_files(kwargs['data_files'])
kwargs['version'] = get_version()
if not IS_PY2K:
kwargs['test_suite'] = 'test'
return kwargs | Compatibility helper to use setup.cfg in setup.py. |
def select_color(cpcolor, evalcolor, idx=0):
""" Selects item color for plotting.
:param cpcolor: color for control points grid item
:type cpcolor: str, list, tuple
:param evalcolor: color for evaluated points grid item
:type evalcolor: str, list, tuple
:param idx: index of the current geometry object
:type idx: int
:return: a list of color values
:rtype: list
"""
# Random colors by default
color = utilities.color_generator()
# Constant color for control points grid
if isinstance(cpcolor, str):
color[0] = cpcolor
# User-defined color for control points grid
if isinstance(cpcolor, (list, tuple)):
color[0] = cpcolor[idx]
# Constant color for evaluated points grid
if isinstance(evalcolor, str):
color[1] = evalcolor
# User-defined color for evaluated points grid
if isinstance(evalcolor, (list, tuple)):
color[1] = evalcolor[idx]
return color | Selects item color for plotting.
:param cpcolor: color for control points grid item
:type cpcolor: str, list, tuple
:param evalcolor: color for evaluated points grid item
:type evalcolor: str, list, tuple
:param idx: index of the current geometry object
:type idx: int
:return: a list of color values
:rtype: list |
def search_alert_entities(self, **kwargs): # noqa: E501
"""Search over a customer's non-deleted alerts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_alert_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedAlertWithStats
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_alert_entities_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_alert_entities_with_http_info(**kwargs) # noqa: E501
return data | Search over a customer's non-deleted alerts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_alert_entities(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SortableSearchRequest body:
:return: ResponseContainerPagedAlertWithStats
If the method is called asynchronously,
returns the request thread. |
def get_aliases(self):
"""
RETURN LIST OF {"alias":a, "index":i} PAIRS
ALL INDEXES INCLUDED, EVEN IF NO ALIAS {"alias":Null}
"""
for index, desc in self.get_metadata().indices.items():
if not desc["aliases"]:
yield wrap({"index": index})
elif desc['aliases'][0] == index:
Log.error("should not happen")
else:
for a in desc["aliases"]:
yield wrap({"index": index, "alias": a}) | RETURN LIST OF {"alias":a, "index":i} PAIRS
ALL INDEXES INCLUDED, EVEN IF NO ALIAS {"alias":Null} |
def set(self, value):
"""This parameter method attempts to set a specific value for this parameter. The value will be validated first, and if it can not be set. An error message will be set in the error property of this parameter"""
if self.validate(value):
#print "Parameter " + self.id + " successfully set to " + repr(value)
self.hasvalue = True
if isinstance(value, float):
self.value = round(value)
else:
self.value = int(value)
return True
else:
#print "Parameter " + self.id + " COULD NOT BE set to " + repr(value)
return False | This parameter method attempts to set a specific value for this parameter. The value will be validated first, and if it can not be set. An error message will be set in the error property of this parameter |
def accuracy_thresh(y_pred:Tensor, y_true:Tensor, thresh:float=0.5, sigmoid:bool=True)->Rank0Tensor:
"Compute accuracy when `y_pred` and `y_true` are the same size."
if sigmoid: y_pred = y_pred.sigmoid()
return ((y_pred>thresh)==y_true.byte()).float().mean() | Compute accuracy when `y_pred` and `y_true` are the same size. |
def aggregate_groups(self, ct_agg, nr_groups, skip_key,
carray_factor, groupby_cols, agg_ops,
dtype_dict, bool_arr=None):
'''Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter
'''
# this creates the groupby columns
for col in groupby_cols:
result_array = ctable_ext.groupby_value(self[col], carray_factor,
nr_groups, skip_key)
if bool_arr is not None:
result_array = np.delete(result_array, skip_key)
ct_agg.addcol(result_array, name=col)
del result_array
# this creates the aggregation columns
for input_col_name, output_col_name, agg_op in agg_ops:
input_col = self[input_col_name]
output_col_dtype = dtype_dict[output_col_name]
input_buffer = np.empty(input_col.chunklen, dtype=input_col.dtype)
output_buffer = np.zeros(nr_groups, dtype=output_col_dtype)
if agg_op == 'sum':
ctable_ext.aggregate_sum(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'mean':
ctable_ext.aggregate_mean(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'std':
ctable_ext.aggregate_std(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count':
ctable_ext.aggregate_count(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'count_distinct':
ctable_ext.aggregate_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
elif agg_op == 'sorted_count_distinct':
ctable_ext.aggregate_sorted_count_distinct(input_col, carray_factor, nr_groups,
skip_key, input_buffer, output_buffer)
else:
raise KeyError('Unknown aggregation operation ' + str(agg_op))
if bool_arr is not None:
output_buffer = np.delete(output_buffer, skip_key)
ct_agg.addcol(output_buffer, name=output_col_name)
del output_buffer
ct_agg.delcol('tmp_col_bquery__') | Perform aggregation and place the result in the given ctable.
Args:
ct_agg (ctable): the table to hold the aggregation
nr_groups (int): the number of groups (number of rows in output table)
skip_key (int): index of the output row to remove from results (used for filtering)
carray_factor: the carray for each row in the table a reference to the the unique group index
groupby_cols: the list of 'dimension' columns that are used to perform the groupby over
output_agg_ops (list): list of tuples of the form: (input_col, agg_op)
input_col (string): name of the column to act on
agg_op (int): aggregation operation to perform
bool_arr: a boolean array containing the filter |
def sys_pipes_forever(encoding=_default_encoding):
"""Redirect all C output to sys.stdout/err
This is not a context manager; it turns on C-forwarding permanently.
"""
global _mighty_wurlitzer
if _mighty_wurlitzer is None:
_mighty_wurlitzer = sys_pipes(encoding)
_mighty_wurlitzer.__enter__() | Redirect all C output to sys.stdout/err
This is not a context manager; it turns on C-forwarding permanently. |
def consume(self):
"""
Chops the tail off the stream starting at 0 and ending at C{tell()}.
The stream pointer is set to 0 at the end of this function.
@since: 0.4
"""
try:
bytes = self.read()
except IOError:
bytes = ''
self.truncate()
if len(bytes) > 0:
self.write(bytes)
self.seek(0) | Chops the tail off the stream starting at 0 and ending at C{tell()}.
The stream pointer is set to 0 at the end of this function.
@since: 0.4 |
def update_dict_key_value(
in_dict,
keys,
value,
delimiter=DEFAULT_TARGET_DELIM,
ordered_dict=False):
'''
Ensures that in_dict contains the series of recursive keys defined in keys.
Also updates the dict, that is at the end of `in_dict` traversed with `keys`,
with `value`.
:param dict in_dict: The dictionary to work with
:param str keys: The delimited string with one or more keys.
:param any value: The value to update the nested dict-key with.
:param str delimiter: The delimiter to use in `keys`. Defaults to ':'.
:param bool ordered_dict: Create OrderedDicts if keys are missing.
Default: create regular dicts.
:return dict: Though it updates in_dict in-place.
'''
dict_pointer, last_key = _dict_rpartition(in_dict,
keys,
delimiter=delimiter,
ordered_dict=ordered_dict)
if last_key not in dict_pointer or dict_pointer[last_key] is None:
dict_pointer[last_key] = OrderedDict() if ordered_dict else {}
try:
dict_pointer[last_key].update(value)
except AttributeError:
raise SaltInvocationError('The last key contains a {}, which cannot update.'
''.format(type(dict_pointer[last_key])))
except (ValueError, TypeError):
raise SaltInvocationError('Cannot update {} with a {}.'
''.format(type(dict_pointer[last_key]), type(value)))
return in_dict | Ensures that in_dict contains the series of recursive keys defined in keys.
Also updates the dict, that is at the end of `in_dict` traversed with `keys`,
with `value`.
:param dict in_dict: The dictionary to work with
:param str keys: The delimited string with one or more keys.
:param any value: The value to update the nested dict-key with.
:param str delimiter: The delimiter to use in `keys`. Defaults to ':'.
:param bool ordered_dict: Create OrderedDicts if keys are missing.
Default: create regular dicts.
:return dict: Though it updates in_dict in-place. |
def __update_service_status(self, statuscode):
"""Set the internal status of the service object, and notify frontend."""
if self.__service_status != statuscode:
self.__service_status = statuscode
self.__send_service_status_to_frontend() | Set the internal status of the service object, and notify frontend. |
def copy(self: BoardT, *, stack: Union[bool, int] = True) -> BoardT:
"""
Creates a copy of the board.
Defaults to copying the entire move stack. Alternatively, *stack* can
be ``False``, or an integer to copy a limited number of moves.
"""
board = super().copy()
board.chess960 = self.chess960
board.ep_square = self.ep_square
board.castling_rights = self.castling_rights
board.turn = self.turn
board.fullmove_number = self.fullmove_number
board.halfmove_clock = self.halfmove_clock
if stack:
stack = len(self.move_stack) if stack is True else stack
board.move_stack = [copy.copy(move) for move in self.move_stack[-stack:]]
board._stack = self._stack[-stack:]
return board | Creates a copy of the board.
Defaults to copying the entire move stack. Alternatively, *stack* can
be ``False``, or an integer to copy a limited number of moves. |
def get_class(classname, all=False):
"""Retrieve a class from the registry.
:raises: marshmallow.exceptions.RegistryError if the class cannot be found
or if there are multiple entries for the given class name.
"""
try:
classes = _registry[classname]
except KeyError:
raise RegistryError('Class with name {!r} was not found. You may need '
'to import the class.'.format(classname))
if len(classes) > 1:
if all:
return _registry[classname]
raise RegistryError('Multiple classes with name {!r} '
'were found. Please use the full, '
'module-qualified path.'.format(classname))
else:
return _registry[classname][0] | Retrieve a class from the registry.
:raises: marshmallow.exceptions.RegistryError if the class cannot be found
or if there are multiple entries for the given class name. |
def get_path(self, api_info):
"""Get the path portion of the URL to the method (for RESTful methods).
Request path can be specified in the method, and it could have a base
path prepended to it.
Args:
api_info: API information for this API, possibly including a base path.
This is the api_info property on the class that's been annotated for
this API.
Returns:
This method's request path (not including the http://.../{base_path}
prefix).
Raises:
ApiConfigurationError: If the path isn't properly formatted.
"""
path = self.__path or ''
if path and path[0] == '/':
# Absolute path, ignoring any prefixes. Just strip off the leading /.
path = path[1:]
else:
# Relative path.
if api_info.path:
path = '%s%s%s' % (api_info.path, '/' if path else '', path)
# Verify that the path seems valid.
parts = path.split('/')
for n, part in enumerate(parts):
r = _VALID_PART_RE if n < len(parts) - 1 else _VALID_LAST_PART_RE
if part and '{' in part and '}' in part:
if not r.match(part):
raise api_exceptions.ApiConfigurationError(
'Invalid path segment: %s (part of %s)' % (part, path))
return path | Get the path portion of the URL to the method (for RESTful methods).
Request path can be specified in the method, and it could have a base
path prepended to it.
Args:
api_info: API information for this API, possibly including a base path.
This is the api_info property on the class that's been annotated for
this API.
Returns:
This method's request path (not including the http://.../{base_path}
prefix).
Raises:
ApiConfigurationError: If the path isn't properly formatted. |
def sort_by(self, sb):
"""Sort results"""
self._dataset = self._dataset.sort(key=lambda x: x.pubdate, reverse=True)
return self | Sort results |
def read(filename, loader=None, implicit_tuple=True, allow_errors=False):
"""Load but don't evaluate a GCL expression from a file."""
with open(filename, 'r') as f:
return reads(f.read(),
filename=filename,
loader=loader,
implicit_tuple=implicit_tuple,
allow_errors=allow_errors) | Load but don't evaluate a GCL expression from a file. |
async def get_auth(request):
"""Returns the user_id associated with a particular request.
Args:
request: aiohttp Request object.
Returns:
The user_id associated with the request, or None if no user is
associated with the request.
Raises:
RuntimeError: Middleware is not installed
"""
auth_val = request.get(AUTH_KEY)
if auth_val:
return auth_val
auth_policy = request.get(POLICY_KEY)
if auth_policy is None:
raise RuntimeError('auth_middleware not installed')
request[AUTH_KEY] = await auth_policy.get(request)
return request[AUTH_KEY] | Returns the user_id associated with a particular request.
Args:
request: aiohttp Request object.
Returns:
The user_id associated with the request, or None if no user is
associated with the request.
Raises:
RuntimeError: Middleware is not installed |
def raw_request(self, method, resource, access_token=None, **kwargs):
"""
Makes a HTTP request and returns the raw
:class:`~requests.Response` object.
"""
headers = self._pop_headers(kwargs)
headers['Authorization'] = self._get_authorization_header(access_token)
url = self._get_resource_url(resource)
return self.session.request(method, url, allow_redirects=True, headers=headers, **kwargs) | Makes a HTTP request and returns the raw
:class:`~requests.Response` object. |
def groupby_task_class(self):
"""
Returns a dictionary mapping the task class to the list of tasks in the flow
"""
# Find all Task classes
class2tasks = OrderedDict()
for task in self.iflat_tasks():
cls = task.__class__
if cls not in class2tasks: class2tasks[cls] = []
class2tasks[cls].append(task)
return class2tasks | Returns a dictionary mapping the task class to the list of tasks in the flow |
def get_relative_airmass(zenith, model='kastenyoung1989'):
'''
Gives the relative (not pressure-corrected) airmass.
Gives the airmass at sea-level when given a sun zenith angle (in
degrees). The ``model`` variable allows selection of different
airmass models (described below). If ``model`` is not included or is
not valid, the default model is 'kastenyoung1989'.
Parameters
----------
zenith : numeric
Zenith angle of the sun in degrees. Note that some models use
the apparent (refraction corrected) zenith angle, and some
models use the true (not refraction-corrected) zenith angle. See
model descriptions to determine which type of zenith angle is
required. Apparent zenith angles must be calculated at sea level.
model : string, default 'kastenyoung1989'
Available models include the following:
* 'simple' - secant(apparent zenith angle) -
Note that this gives -inf at zenith=90
* 'kasten1966' - See reference [1] -
requires apparent sun zenith
* 'youngirvine1967' - See reference [2] -
requires true sun zenith
* 'kastenyoung1989' - See reference [3] -
requires apparent sun zenith
* 'gueymard1993' - See reference [4] -
requires apparent sun zenith
* 'young1994' - See reference [5] -
requries true sun zenith
* 'pickering2002' - See reference [6] -
requires apparent sun zenith
Returns
-------
airmass_relative : numeric
Relative airmass at sea level. Will return NaN values for any
zenith angle greater than 90 degrees.
References
----------
[1] Fritz Kasten. "A New Table and Approximation Formula for the
Relative Optical Air Mass". Technical Report 136, Hanover, N.H.:
U.S. Army Material Command, CRREL.
[2] A. T. Young and W. M. Irvine, "Multicolor Photoelectric
Photometry of the Brighter Planets," The Astronomical Journal, vol.
72, pp. 945-950, 1967.
[3] Fritz Kasten and Andrew Young. "Revised optical air mass tables
and approximation formula". Applied Optics 28:4735-4738
[4] C. Gueymard, "Critical analysis and performance assessment of
clear sky solar irradiance models using theoretical and measured
data," Solar Energy, vol. 51, pp. 121-138, 1993.
[5] A. T. Young, "AIR-MASS AND REFRACTION," Applied Optics, vol. 33,
pp. 1108-1110, Feb 1994.
[6] Keith A. Pickering. "The Ancient Star Catalog". DIO 12:1, 20,
[7] Matthew J. Reno, Clifford W. Hansen and Joshua S. Stein, "Global
Horizontal Irradiance Clear Sky Models: Implementation and Analysis"
Sandia Report, (2012).
'''
# need to filter first because python 2.7 does not support raising a
# negative number to a negative power.
z = np.where(zenith > 90, np.nan, zenith)
zenith_rad = np.radians(z)
model = model.lower()
if 'kastenyoung1989' == model:
am = (1.0 / (np.cos(zenith_rad) +
0.50572*(((6.07995 + (90 - z)) ** - 1.6364))))
elif 'kasten1966' == model:
am = 1.0 / (np.cos(zenith_rad) + 0.15*((93.885 - z) ** - 1.253))
elif 'simple' == model:
am = 1.0 / np.cos(zenith_rad)
elif 'pickering2002' == model:
am = (1.0 / (np.sin(np.radians(90 - z +
244.0 / (165 + 47.0 * (90 - z) ** 1.1)))))
elif 'youngirvine1967' == model:
sec_zen = 1.0 / np.cos(zenith_rad)
am = sec_zen * (1 - 0.0012 * (sec_zen * sec_zen - 1))
elif 'young1994' == model:
am = ((1.002432*((np.cos(zenith_rad)) ** 2) +
0.148386*(np.cos(zenith_rad)) + 0.0096467) /
(np.cos(zenith_rad) ** 3 +
0.149864*(np.cos(zenith_rad) ** 2) +
0.0102963*(np.cos(zenith_rad)) + 0.000303978))
elif 'gueymard1993' == model:
am = (1.0 / (np.cos(zenith_rad) +
0.00176759*(z)*((94.37515 - z) ** - 1.21563)))
else:
raise ValueError('%s is not a valid model for relativeairmass', model)
if isinstance(zenith, pd.Series):
am = pd.Series(am, index=zenith.index)
return am | Gives the relative (not pressure-corrected) airmass.
Gives the airmass at sea-level when given a sun zenith angle (in
degrees). The ``model`` variable allows selection of different
airmass models (described below). If ``model`` is not included or is
not valid, the default model is 'kastenyoung1989'.
Parameters
----------
zenith : numeric
Zenith angle of the sun in degrees. Note that some models use
the apparent (refraction corrected) zenith angle, and some
models use the true (not refraction-corrected) zenith angle. See
model descriptions to determine which type of zenith angle is
required. Apparent zenith angles must be calculated at sea level.
model : string, default 'kastenyoung1989'
Available models include the following:
* 'simple' - secant(apparent zenith angle) -
Note that this gives -inf at zenith=90
* 'kasten1966' - See reference [1] -
requires apparent sun zenith
* 'youngirvine1967' - See reference [2] -
requires true sun zenith
* 'kastenyoung1989' - See reference [3] -
requires apparent sun zenith
* 'gueymard1993' - See reference [4] -
requires apparent sun zenith
* 'young1994' - See reference [5] -
requries true sun zenith
* 'pickering2002' - See reference [6] -
requires apparent sun zenith
Returns
-------
airmass_relative : numeric
Relative airmass at sea level. Will return NaN values for any
zenith angle greater than 90 degrees.
References
----------
[1] Fritz Kasten. "A New Table and Approximation Formula for the
Relative Optical Air Mass". Technical Report 136, Hanover, N.H.:
U.S. Army Material Command, CRREL.
[2] A. T. Young and W. M. Irvine, "Multicolor Photoelectric
Photometry of the Brighter Planets," The Astronomical Journal, vol.
72, pp. 945-950, 1967.
[3] Fritz Kasten and Andrew Young. "Revised optical air mass tables
and approximation formula". Applied Optics 28:4735-4738
[4] C. Gueymard, "Critical analysis and performance assessment of
clear sky solar irradiance models using theoretical and measured
data," Solar Energy, vol. 51, pp. 121-138, 1993.
[5] A. T. Young, "AIR-MASS AND REFRACTION," Applied Optics, vol. 33,
pp. 1108-1110, Feb 1994.
[6] Keith A. Pickering. "The Ancient Star Catalog". DIO 12:1, 20,
[7] Matthew J. Reno, Clifford W. Hansen and Joshua S. Stein, "Global
Horizontal Irradiance Clear Sky Models: Implementation and Analysis"
Sandia Report, (2012). |
def complete_path(curr_dir, last_dir):
"""Return the path to complete that matches the last entered component.
If the last entered component is ~, expanded path would not
match, so return all of the available paths.
:param curr_dir: str
:param last_dir: str
:return: str
"""
if not last_dir or curr_dir.startswith(last_dir):
return curr_dir
elif last_dir == '~':
return os.path.join(last_dir, curr_dir) | Return the path to complete that matches the last entered component.
If the last entered component is ~, expanded path would not
match, so return all of the available paths.
:param curr_dir: str
:param last_dir: str
:return: str |
def lock_retention_policy(self, client=None):
"""Lock the bucket's retention policy.
:raises ValueError:
if the bucket has no metageneration (i.e., new or never reloaded);
if the bucket has no retention policy assigned;
if the bucket's retention policy is already locked.
"""
if "metageneration" not in self._properties:
raise ValueError("Bucket has no retention policy assigned: try 'reload'?")
policy = self._properties.get("retentionPolicy")
if policy is None:
raise ValueError("Bucket has no retention policy assigned: try 'reload'?")
if policy.get("isLocked"):
raise ValueError("Bucket's retention policy is already locked.")
client = self._require_client(client)
query_params = {"ifMetagenerationMatch": self.metageneration}
if self.user_project is not None:
query_params["userProject"] = self.user_project
path = "/b/{}/lockRetentionPolicy".format(self.name)
api_response = client._connection.api_request(
method="POST", path=path, query_params=query_params, _target_object=self
)
self._set_properties(api_response) | Lock the bucket's retention policy.
:raises ValueError:
if the bucket has no metageneration (i.e., new or never reloaded);
if the bucket has no retention policy assigned;
if the bucket's retention policy is already locked. |
def flush_on_close(self, stream):
"""Flush tornado iostream write buffer and prevent further writes.
Returns a future that resolves when the stream is flushed.
"""
assert get_thread_ident() == self.ioloop_thread_id
# Prevent futher writes
stream.KATCPServer_closing = True
# Write empty message to get future that resolves when buffer is flushed
return stream.write('\n') | Flush tornado iostream write buffer and prevent further writes.
Returns a future that resolves when the stream is flushed. |
def write_to_file(src, dst):
"""Write data from `src` into `dst`.
Args:
src (iterable): iterable that yields blocks of data to write
dst (file-like object): file-like object that must support
.write(block)
Returns:
number of bytes written to `dst`
"""
n = 0
for block in src:
dst.write(block)
n += len(block)
return n | Write data from `src` into `dst`.
Args:
src (iterable): iterable that yields blocks of data to write
dst (file-like object): file-like object that must support
.write(block)
Returns:
number of bytes written to `dst` |
def _set_default_versions(self, config):
"""Retrieve pre-computed version information for expensive to retrieve versions.
Starting up GATK takes a lot of resources so we do it once at start of analysis.
"""
out = []
for name in ["gatk", "gatk4", "picard", "mutect"]:
v = tz.get_in(["resources", name, "version"], config)
if not v:
try:
v = programs.get_version(name, config=config)
except KeyError:
v = None
out.append(v)
self._gatk_version, self._gatk4_version, self._picard_version, self._mutect_version = out | Retrieve pre-computed version information for expensive to retrieve versions.
Starting up GATK takes a lot of resources so we do it once at start of analysis. |
def p_class_constant_declaration(p):
'''class_constant_declaration : class_constant_declaration COMMA STRING EQUALS static_scalar
| CONST STRING EQUALS static_scalar'''
if len(p) == 6:
p[0] = p[1] + [ast.ClassConstant(p[3], p[5], lineno=p.lineno(2))]
else:
p[0] = [ast.ClassConstant(p[2], p[4], lineno=p.lineno(1))] | class_constant_declaration : class_constant_declaration COMMA STRING EQUALS static_scalar
| CONST STRING EQUALS static_scalar |
def decamel(string):
""""Split CamelCased words.
CamelCase -> Camel Case, dromedaryCase -> dromedary Case.
"""
regex = re.compile(r'(\B[A-Z][a-z]*)')
return regex.sub(r' \1', string) | Split CamelCased words.
CamelCase -> Camel Case, dromedaryCase -> dromedary Case. |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncListPermissionContext for this SyncListPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext
"""
if self._context is None:
self._context = SyncListPermissionContext(
self._version,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
identity=self._solution['identity'],
)
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncListPermissionContext for this SyncListPermissionInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_permission.SyncListPermissionContext |
def _post(url, headers, body, retries=3, timeout=3.0):
"""Try 3 times to request the content.
:param headers: The HTTP headers
:type headers: dict
:param body: The body of the HTTP post
:type body: str
:param retries: The number of times to retry before giving up
:type retries: int
:param timeout: The time to wait for the post to complete, before timing
out
:type timeout: float
"""
retry = 0
out = None
while out is None:
try:
out = requests.post(url, headers=headers, data=body,
timeout=timeout)
# Due to a bug in requests, the post command will sometimes fail to
# properly wrap a socket.timeout exception in requests own exception.
# See https://github.com/kennethreitz/requests/issues/2045
# Until this is fixed, we need to catch both types of exceptions
except (requests.exceptions.Timeout, socket.timeout) as exception:
retry += 1
if retry == retries:
# pylint: disable=maybe-no-member
raise requests.exceptions.Timeout(exception.message)
return out | Try 3 times to request the content.
:param headers: The HTTP headers
:type headers: dict
:param body: The body of the HTTP post
:type body: str
:param retries: The number of times to retry before giving up
:type retries: int
:param timeout: The time to wait for the post to complete, before timing
out
:type timeout: float |
def download_data(url, signature, data_home=None, replace=False, extract=True):
"""
Downloads the zipped data set specified at the given URL, saving it to
the data directory specified by ``get_data_home``. This function verifies
the download with the given signature and extracts the archive.
Parameters
----------
url : str
The URL of the dataset on the Internet to GET
signature : str
The SHA 256 hash of the dataset archive being downloaded to verify
that the dataset has been correctly downloaded
data_home : str, optional
The path on disk where data is stored. If not passed in, it is looked
up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.
replace : bool, default: False
If the data archive already exists, replace the dataset. If this is
False and the dataset exists, an exception is raised.
extract : bool, default: True
Extract the archive file after downloading it
"""
data_home = get_data_home(data_home)
# Get the name of the file from the URL
basename = os.path.basename(url)
name, _ = os.path.splitext(basename)
# Get the archive and data directory paths
archive = os.path.join(data_home, basename)
datadir = os.path.join(data_home, name)
# If the archive exists cleanup or raise override exception
if os.path.exists(archive):
if not replace:
raise DatasetsError((
"dataset already exists at {}, set replace=False to overwrite"
).format(archive))
cleanup_dataset(name, data_home=data_home)
# Create the output directory if it does not exist
if not os.path.exists(datadir):
os.mkdir(datadir)
# Fetch the response in a streaming fashion and write it to disk.
response = urlopen(url)
with open(archive, 'wb') as f:
while True:
chunk = response.read(CHUNK)
if not chunk:
break
f.write(chunk)
# Compare the signature of the archive to the expected one
if sha256sum(archive) != signature:
raise ValueError(
"Download signature does not match hardcoded signature!"
)
# If extract, extract the zipfile.
if extract:
zf = zipfile.ZipFile(archive)
zf.extractall(path=data_home) | Downloads the zipped data set specified at the given URL, saving it to
the data directory specified by ``get_data_home``. This function verifies
the download with the given signature and extracts the archive.
Parameters
----------
url : str
The URL of the dataset on the Internet to GET
signature : str
The SHA 256 hash of the dataset archive being downloaded to verify
that the dataset has been correctly downloaded
data_home : str, optional
The path on disk where data is stored. If not passed in, it is looked
up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.
replace : bool, default: False
If the data archive already exists, replace the dataset. If this is
False and the dataset exists, an exception is raised.
extract : bool, default: True
Extract the archive file after downloading it |
def matches_from_list(item,options,fuzzy=90,fname_match=True,fuzzy_fragment=None,guess=False):
'''Returns the members of ``options`` that best matches ``item``. Will prioritize
exact matches, then filename-style matching, then fuzzy matching. Returns a tuple of item,
index, match type, and fuzziness (if applicable)
:item: string to match
:options: list of examples to test against
:fuzzy: integer (out of 100) describing how close to match string
:fname_match: use filename globbing to match files?
:fuzzy_fragment: if not ``None``, will accept substring matches of
at least ``fuzzy_fragment`` fuzziness
:guess: if ``True``, shortcut for setting ``fuzzy`` and ``min_fragment``
to very lenient options
'''
matches = []
if guess:
fuzzy = min(fuzzy,80)
fuzzy_fragment = min(fuzzy_fragment,70)
option_not_in = lambda item,match_list: all([x[0]!=item for x in match_list])
# Exact matches
if item in options:
matches += [(options[i],i,'exact',None) for i in xrange(len(options)) if options[i].lower()==item.lower()]
# If we have exact matches, don't bother with fuzzy matching
return matches
# Filename-style matches
if fname_match:
matches += [(x,options.index(x),'fname',None) for x in fnmatch.filter(options,item) if option_not_in(x,matches)]
# Fuzzy matches
if fuzzy:
sub_matches = []
for i in xrange(len(options)):
r = fuzz.ratio(item.lower(),options[i].lower())
if r>=fuzzy and option_not_in(options[i],matches):
sub_matches.append((r,i))
matches += [(options[x[1]],x[1],'fuzzy',x[0]) for x in sorted(sub_matches)]
# Fragment matches
if fuzzy_fragment:
sub_matches = []
for i in xrange(len(options)):
r = fuzz.partial_ratio(item.lower(),options[i].lower())
if r>=fuzzy_fragment and option_not_in(options[i],matches):
sub_matches.append((r,i))
matches += [(options[x[1]],x[1],'fuzzy_fragment',x[0]) for x in sorted(sub_matches)]
return matches | Returns the members of ``options`` that best matches ``item``. Will prioritize
exact matches, then filename-style matching, then fuzzy matching. Returns a tuple of item,
index, match type, and fuzziness (if applicable)
:item: string to match
:options: list of examples to test against
:fuzzy: integer (out of 100) describing how close to match string
:fname_match: use filename globbing to match files?
:fuzzy_fragment: if not ``None``, will accept substring matches of
at least ``fuzzy_fragment`` fuzziness
:guess: if ``True``, shortcut for setting ``fuzzy`` and ``min_fragment``
to very lenient options |
def dump(dct, jfile, overwrite=False, dirlevel=0, sort_keys=True,
indent=2, default_name='root.json', **kwargs):
""" output dict to json
Parameters
----------
dct : dict
jfile : str or file_like
if file_like, must have write method
overwrite : bool
whether to overwrite existing files
dirlevel : int
if jfile is path to folder,
defines how many key levels to set as sub-folders
sort_keys : bool
if true then the output of dictionaries will be sorted by key
indent : int
if non-negative integer, then JSON array elements and object members
will be pretty-printed on new lines with that indent level spacing.
kwargs : dict
keywords for json.dump
"""
to_json(dct, jfile, overwrite=overwrite, dirlevel=dirlevel,
sort_keys=sort_keys, indent=indent,
default_name=default_name, **kwargs) | output dict to json
Parameters
----------
dct : dict
jfile : str or file_like
if file_like, must have write method
overwrite : bool
whether to overwrite existing files
dirlevel : int
if jfile is path to folder,
defines how many key levels to set as sub-folders
sort_keys : bool
if true then the output of dictionaries will be sorted by key
indent : int
if non-negative integer, then JSON array elements and object members
will be pretty-printed on new lines with that indent level spacing.
kwargs : dict
keywords for json.dump |
def value(self, observations):
""" Calculate only value head for given state """
input_data = self.input_block(observations)
base_output = self.value_backbone(input_data)
value_output = self.value_head(base_output)
return value_output | Calculate only value head for given state |
def modify(*units):
"""set the unit defined by in-game tag with desired properties
NOTE: all units must be owned by the same player or the command fails."""
ret = []
for unit in units: # add one command for each attribute
for attr, idx in [("energy", 1), ("life", 2), ("shields", 3)]: # see debug_pb2.UnitValue for enum declaration
newValue = getattr(unit, attr)
if not newValue: continue # don't bother setting something that isn't necessary
new = DebugCommand(unit_value=DebugSetUnitValue(
value = newValue,
unit_value = idx,
unit_tag = unit.tag))
ret.append(new)
return ret | set the unit defined by in-game tag with desired properties
NOTE: all units must be owned by the same player or the command fails. |
def centralManager_didDisconnectPeripheral_error_(self, manager, peripheral, error):
"""Called when a device is disconnected."""
logger.debug('centralManager_didDisconnectPeripheral called')
# Get the device and remove it from the device list, then fire its
# disconnected event.
device = device_list().get(peripheral)
if device is not None:
# Fire disconnected event and remove device from device list.
device._set_disconnected()
device_list().remove(peripheral) | Called when a device is disconnected. |
def update_description(self, description):
"""
Update the description) of an IOC
This creates the description node if it is not present.
:param description: Value to set the description too
:return: True
"""
desc_node = self.metadata.find('description')
if desc_node is None:
log.debug('Could not find short description node for [{}].'.format(str(self.iocid)))
log.debug('Creating & inserting the short description node')
desc_node = ioc_et.make_description_node(description)
insert_index = 0
for child in self.metadata.getchildren():
if child.tag == 'short_description':
index = self.metadata.index(child)
insert_index = index + 1
break
self.metadata.insert(insert_index, desc_node)
else:
desc_node.text = description
return True | Update the description) of an IOC
This creates the description node if it is not present.
:param description: Value to set the description too
:return: True |
def uncertain_conditional(Xnew_mu, Xnew_var, feat, kern, q_mu, q_sqrt, *,
mean_function=None, full_output_cov=False, full_cov=False, white=False):
"""
Calculates the conditional for uncertain inputs Xnew, p(Xnew) = N(Xnew_mu, Xnew_var).
See ``conditional`` documentation for further reference.
:param Xnew_mu: mean of the inputs, size N x Din
:param Xnew_var: covariance matrix of the inputs, size N x Din x Din
:param feat: gpflow.InducingFeature object, only InducingPoints is supported
:param kern: gpflow kernel object.
:param q_mu: mean inducing points, size M x Dout
:param q_sqrt: cholesky of the covariance matrix of the inducing points, size Dout x M x M
:param full_output_cov: boolean wheter to compute covariance between output dimension.
Influences the shape of return value ``fvar``. Default is False
:param white: boolean whether to use whitened representation. Default is False.
:return fmean, fvar: mean and covariance of the conditional, size ``fmean`` is N x Dout,
size ``fvar`` depends on ``full_output_cov``: if True ``f_var`` is N x Dout x Dout,
if False then ``f_var`` is N x Dout
"""
# TODO(VD): Tensorflow 1.7 doesn't support broadcasting in``tf.matmul`` and
# ``tf.matrix_triangular_solve``. This is reported in issue 216.
# As a temporary workaround, we are using ``tf.einsum`` for the matrix
# multiplications and tiling in the triangular solves.
# The code that should be used once the bug is resolved is added in comments.
if not isinstance(feat, InducingPoints):
raise NotImplementedError
if full_cov:
# TODO(VD): ``full_cov`` True would return a ``fvar`` of shape N x N x D x D,
# encoding the covariance between input datapoints as well.
# This is not implemented as this feature is only used for plotting purposes.
raise NotImplementedError
pXnew = Gaussian(Xnew_mu, Xnew_var)
num_data = tf.shape(Xnew_mu)[0] # number of new inputs (N)
num_ind = tf.shape(q_mu)[0] # number of inducing points (M)
num_func = tf.shape(q_mu)[1] # output dimension (D)
q_sqrt_r = tf.matrix_band_part(q_sqrt, -1, 0) # D x M x M
eKuf = tf.transpose(expectation(pXnew, (kern, feat))) # M x N (psi1)
Kuu = features.Kuu(feat, kern, jitter=settings.jitter) # M x M
Luu = tf.cholesky(Kuu) # M x M
if not white:
q_mu = tf.matrix_triangular_solve(Luu, q_mu, lower=True)
Luu_tiled = tf.tile(Luu[None, :, :], [num_func, 1, 1]) # remove line once issue 216 is fixed
q_sqrt_r = tf.matrix_triangular_solve(Luu_tiled, q_sqrt_r, lower=True)
Li_eKuf = tf.matrix_triangular_solve(Luu, eKuf, lower=True) # M x N
fmean = tf.matmul(Li_eKuf, q_mu, transpose_a=True)
eKff = expectation(pXnew, kern) # N (psi0)
eKuffu = expectation(pXnew, (kern, feat), (kern, feat)) # N x M x M (psi2)
Luu_tiled = tf.tile(Luu[None, :, :], [num_data, 1, 1]) # remove this line, once issue 216 is fixed
Li_eKuffu = tf.matrix_triangular_solve(Luu_tiled, eKuffu, lower=True)
Li_eKuffu_Lit = tf.matrix_triangular_solve(Luu_tiled, tf.matrix_transpose(Li_eKuffu), lower=True) # N x M x M
cov = tf.matmul(q_sqrt_r, q_sqrt_r, transpose_b=True) # D x M x M
if mean_function is None or isinstance(mean_function, mean_functions.Zero):
e_related_to_mean = tf.zeros((num_data, num_func, num_func), dtype=settings.float_type)
else:
# Update mean: \mu(x) + m(x)
fmean = fmean + expectation(pXnew, mean_function)
# Calculate: m(x) m(x)^T + m(x) \mu(x)^T + \mu(x) m(x)^T,
# where m(x) is the mean_function and \mu(x) is fmean
e_mean_mean = expectation(pXnew, mean_function, mean_function) # N x D x D
Lit_q_mu = tf.matrix_triangular_solve(Luu, q_mu, adjoint=True)
e_mean_Kuf = expectation(pXnew, mean_function, (kern, feat)) # N x D x M
# einsum isn't able to infer the rank of e_mean_Kuf, hence we explicitly set the rank of the tensor:
e_mean_Kuf = tf.reshape(e_mean_Kuf, [num_data, num_func, num_ind])
e_fmean_mean = tf.einsum("nqm,mz->nqz", e_mean_Kuf, Lit_q_mu) # N x D x D
e_related_to_mean = e_fmean_mean + tf.matrix_transpose(e_fmean_mean) + e_mean_mean
if full_output_cov:
fvar = (
tf.matrix_diag(tf.tile((eKff - tf.trace(Li_eKuffu_Lit))[:, None], [1, num_func])) +
tf.matrix_diag(tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov)) +
# tf.matrix_diag(tf.trace(tf.matmul(Li_eKuffu_Lit, cov))) +
tf.einsum("ig,nij,jh->ngh", q_mu, Li_eKuffu_Lit, q_mu) -
# tf.matmul(q_mu, tf.matmul(Li_eKuffu_Lit, q_mu), transpose_a=True) -
fmean[:, :, None] * fmean[:, None, :] +
e_related_to_mean
)
else:
fvar = (
(eKff - tf.trace(Li_eKuffu_Lit))[:, None] +
tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov) +
tf.einsum("ig,nij,jg->ng", q_mu, Li_eKuffu_Lit, q_mu) -
fmean ** 2 +
tf.matrix_diag_part(e_related_to_mean)
)
return fmean, fvar | Calculates the conditional for uncertain inputs Xnew, p(Xnew) = N(Xnew_mu, Xnew_var).
See ``conditional`` documentation for further reference.
:param Xnew_mu: mean of the inputs, size N x Din
:param Xnew_var: covariance matrix of the inputs, size N x Din x Din
:param feat: gpflow.InducingFeature object, only InducingPoints is supported
:param kern: gpflow kernel object.
:param q_mu: mean inducing points, size M x Dout
:param q_sqrt: cholesky of the covariance matrix of the inducing points, size Dout x M x M
:param full_output_cov: boolean wheter to compute covariance between output dimension.
Influences the shape of return value ``fvar``. Default is False
:param white: boolean whether to use whitened representation. Default is False.
:return fmean, fvar: mean and covariance of the conditional, size ``fmean`` is N x Dout,
size ``fvar`` depends on ``full_output_cov``: if True ``f_var`` is N x Dout x Dout,
if False then ``f_var`` is N x Dout |
def google(rest):
"Look up a phrase on google"
API_URL = 'https://www.googleapis.com/customsearch/v1?'
try:
key = pmxbot.config['Google API key']
except KeyError:
return "Configure 'Google API key' in config"
# Use a custom search that searches everything normally
# http://stackoverflow.com/a/11206266/70170
custom_search = '004862762669074674786:hddvfu0gyg0'
params = dict(
key=key,
cx=custom_search,
q=rest.strip(),
)
url = API_URL + urllib.parse.urlencode(params)
resp = requests.get(url)
resp.raise_for_status()
results = resp.json()
hit1 = next(iter(results['items']))
return ' - '.join((
urllib.parse.unquote(hit1['link']),
hit1['title'],
)) | Look up a phrase on google |
def parse_data_to_internal(self, data=None):
"""
Parse data and save to pickle/hickle
"""
if data is None:
data = parse.getdata(open(self.location_dat, "rb"),
argnum=self.argnum, close=True)
if self.filetype is "pickle":
pickle.dump(data, open(self.location_internal, "wb"))
elif self.filetype is "hickle":
import hickle
hickle.dump(data, open(self.location_internal, "wb"))
else:
raise ValueError(
"Invalid filetype {} (must be {} or {})".format(
self.filetype, "pickle", "hickle"
)
) | Parse data and save to pickle/hickle |
def find_line(scihdu, refhdu):
"""Obtain bin factors and corner location to extract
and bin the appropriate subset of a reference image to
match a science image.
If the science image has zero offset and is the same size and
binning as the reference image, ``same_size`` will be set to
`True`. Otherwise, the values of ``rx``, ``ry``, ``x0``, and
``y0`` will be assigned.
Normally the science image will be binned the same or more
than the reference image. In that case, ``rx`` and ``ry``
will be the bin size of the science image divided by the
bin size of the reference image.
If the binning of the reference image is greater than the
binning of the science image, the ratios (``rx`` and ``ry``)
of the bin sizes will be the reference image size divided by
the science image bin size. This is not necessarily an error.
.. note:: Translated from ``calacs/lib/findbin.c``.
Parameters
----------
scihdu, refhdu : obj
Extension HDU's of the science and reference image,
respectively.
Returns
-------
same_size : bool
`True` if zero offset and same size and binning.
rx, ry : int
Ratio of bin sizes.
x0, y0 : int
Location of start of subimage in reference image.
Raises
------
ValueError
Science and reference data size mismatch.
"""
sci_bin, sci_corner = get_corner(scihdu.header)
ref_bin, ref_corner = get_corner(refhdu.header)
# We can use the reference image directly, without binning
# and without extracting a subset.
if (sci_corner[0] == ref_corner[0] and sci_corner[1] == ref_corner[1] and
sci_bin[0] == ref_bin[0] and sci_bin[1] == ref_bin[1] and
scihdu.data.shape[1] == refhdu.data.shape[1]):
same_size = True
rx = 1
ry = 1
x0 = 0
y0 = 0
# Reference image is binned more than the science image.
elif ref_bin[0] > sci_bin[0] or ref_bin[1] > sci_bin[1]:
same_size = False
rx = ref_bin[0] / sci_bin[0]
ry = ref_bin[1] / sci_bin[1]
x0 = (sci_corner[0] - ref_corner[0]) / ref_bin[0]
y0 = (sci_corner[1] - ref_corner[1]) / ref_bin[1]
# For subarray input images, whether they are binned or not.
else:
same_size = False
# Ratio of bin sizes.
ratiox = sci_bin[0] / ref_bin[0]
ratioy = sci_bin[1] / ref_bin[1]
if (ratiox * ref_bin[0] != sci_bin[0] or
ratioy * ref_bin[1] != sci_bin[1]):
raise ValueError('Science and reference data size mismatch')
# cshift is the offset in units of unbinned pixels.
# Divide by ref_bin to convert to units of pixels in the ref image.
cshift = (sci_corner[0] - ref_corner[0], sci_corner[1] - ref_corner[1])
xzero = cshift[0] / ref_bin[0]
yzero = cshift[1] / ref_bin[1]
if (xzero * ref_bin[0] != cshift[0] or
yzero * ref_bin[1] != cshift[1]):
warnings.warn('Subimage offset not divisible by bin size',
AstropyUserWarning)
rx = ratiox
ry = ratioy
x0 = xzero
y0 = yzero
# Ensure integer index
x0 = int(x0)
y0 = int(y0)
return same_size, rx, ry, x0, y0 | Obtain bin factors and corner location to extract
and bin the appropriate subset of a reference image to
match a science image.
If the science image has zero offset and is the same size and
binning as the reference image, ``same_size`` will be set to
`True`. Otherwise, the values of ``rx``, ``ry``, ``x0``, and
``y0`` will be assigned.
Normally the science image will be binned the same or more
than the reference image. In that case, ``rx`` and ``ry``
will be the bin size of the science image divided by the
bin size of the reference image.
If the binning of the reference image is greater than the
binning of the science image, the ratios (``rx`` and ``ry``)
of the bin sizes will be the reference image size divided by
the science image bin size. This is not necessarily an error.
.. note:: Translated from ``calacs/lib/findbin.c``.
Parameters
----------
scihdu, refhdu : obj
Extension HDU's of the science and reference image,
respectively.
Returns
-------
same_size : bool
`True` if zero offset and same size and binning.
rx, ry : int
Ratio of bin sizes.
x0, y0 : int
Location of start of subimage in reference image.
Raises
------
ValueError
Science and reference data size mismatch. |
def count(self, val=True):
"""Get the number of bits in the array with the specified value.
Args:
val: A boolean value to check against the array's value.
Returns:
An integer of the number of bits in the array equal to val.
"""
return sum((elem.count(val) for elem in self._iter_components())) | Get the number of bits in the array with the specified value.
Args:
val: A boolean value to check against the array's value.
Returns:
An integer of the number of bits in the array equal to val. |
def get_min_inc_exc(self, inc_set=None, exc_set=None):
"""Get the user-specified Evidence codes. Return smaller set: include/exclude"""
if inc_set is None and exc_set is None:
return {}
inc = self.get_evcodes(inc_set, exc_set)
exc = set(self.code2nt.keys()).difference(inc)
return {'inc':inc} if len(inc) <= len(exc) else {'exc': exc} | Get the user-specified Evidence codes. Return smaller set: include/exclude |
def percent_encode(text, encode_set=DEFAULT_ENCODE_SET, encoding='utf-8'):
'''Percent encode text.
Unlike Python's ``quote``, this function accepts a blacklist instead of
a whitelist of safe characters.
'''
byte_string = text.encode(encoding)
try:
mapping = _percent_encoder_map_cache[encode_set]
except KeyError:
mapping = _percent_encoder_map_cache[encode_set] = PercentEncoderMap(
encode_set).__getitem__
return ''.join([mapping(char) for char in byte_string]) | Percent encode text.
Unlike Python's ``quote``, this function accepts a blacklist instead of
a whitelist of safe characters. |
def periodic_callback(self):
"""Periodic cleanup tasks to maintain this adapter, should be called every second
"""
if self.stopped:
return
# Check if we should start scanning again
if not self.scanning and len(self._connections) == 0 and self.connecting_count == 0:
self._logger.info("Restarting scan for devices")
self.start_scan(self._active_scan)
self._logger.info("Finished restarting scan for devices") | Periodic cleanup tasks to maintain this adapter, should be called every second |
async def contains_albums(self, *albums: Sequence[Union[str, Album]]) -> List[bool]:
"""Check if one or more albums is already saved in the current Spotify user’s ‘Your Music’ library.
Parameters
----------
albums : Union[Album, str]
A sequence of artist objects or spotify IDs
"""
_albums = [(obj if isinstance(obj, str) else obj.id) for obj in albums]
return await self.user.http.is_saved_album(_albums) | Check if one or more albums is already saved in the current Spotify user’s ‘Your Music’ library.
Parameters
----------
albums : Union[Album, str]
A sequence of artist objects or spotify IDs |
def main(dimension, iterations):
""" Main function to execute gbest GC PSO algorithm.
"""
objective_function = minimize(functions.sphere)
stopping_condition = max_iterations(iterations)
(solution, metrics) = optimize(objective_function=objective_function,
domain=Domain(-5.12, 5.12, dimension),
stopping_condition=stopping_condition,
parameters={'seed': 3758117674,
'rho': 1.0, 'e_s': 15, 'e_f': 5},
velocity_update=gc_velocity_update,
parameter_update=update_rho,
measurements=[fitness_measurement])
return solution | Main function to execute gbest GC PSO algorithm. |
def Engauge_2d_parser(lines, flat=False):
'''Not exposed function to read a 2D file generated by engauge-digitizer;
for curve fitting.
'''
z_values = []
x_lists = []
y_lists = []
working_xs = []
working_ys = []
new_curve = True
for line in lines:
if line.strip() == '':
new_curve = True
elif new_curve:
z = float(line.split(',')[1])
z_values.append(z)
if working_xs and working_ys:
x_lists.append(working_xs)
y_lists.append(working_ys)
working_xs = []
working_ys = []
new_curve = False
else:
x, y = [float(i) for i in line.strip().split(',')]
working_xs.append(x)
working_ys.append(y)
x_lists.append(working_xs)
y_lists.append(working_ys)
if flat:
all_zs = []
all_xs = []
all_ys = []
for z, xs, ys in zip(z_values, x_lists, y_lists):
for x, y in zip(xs, ys):
all_zs.append(z)
all_xs.append(x)
all_ys.append(y)
return all_zs, all_xs, all_ys
return z_values, x_lists, y_lists | Not exposed function to read a 2D file generated by engauge-digitizer;
for curve fitting. |
def index(self, config):
"""
Traverse the reports config definition and instantiate reportlets.
This method also places figures in their final location.
"""
for subrep_cfg in config:
# First determine whether we need to split by some ordering
# (ie. sessions / tasks / runs), which are separated by commas.
orderings = [s for s in subrep_cfg.get('ordering', '').strip().split(',') if s]
queries = []
for key in orderings:
values = getattr(self.layout, 'get_%s%s' % (key, PLURAL_SUFFIX[key]))()
if values:
queries.append((key, values))
if not queries: # E.g. this is an anatomical reportlet
reportlets = [Reportlet(self.layout, self.out_dir, config=cfg)
for cfg in subrep_cfg['reportlets']]
else:
# Do not use dictionary for queries, as we need to preserve ordering
# of ordering columns.
reportlets = []
entities, values = zip(*queries)
combinations = list(product(*values)) # e.g.: [('rest', 1), ('rest', 2)]
for c in combinations:
# Set a common title for this particular combination c
title = 'Reports for: %s.' % ', '.join(
['%s <span class="bids-entity">%s</span>' % (entities[i], c[i])
for i in range(len(c))])
for cfg in subrep_cfg['reportlets']:
cfg['bids'].update({entities[i]: c[i] for i in range(len(c))})
rlet = Reportlet(self.layout, self.out_dir, config=cfg)
if not rlet.is_empty():
rlet.title = title
title = None
reportlets.append(rlet)
# Filter out empty reportlets
reportlets = [r for r in reportlets if not r.is_empty()]
if reportlets:
sub_report = SubReport(
subrep_cfg['name'],
isnested=len(queries) > 0,
reportlets=reportlets,
title=subrep_cfg.get('title'))
self.sections.append(sub_report)
# Populate errors sections
error_dir = self.out_dir / self.packagename / 'sub-{}'.format(self.subject_id) / \
'log' / self.run_uuid
if error_dir.is_dir():
from ..utils.misc import read_crashfile
self.errors = [read_crashfile(str(f)) for f in error_dir.glob('crash*.*')] | Traverse the reports config definition and instantiate reportlets.
This method also places figures in their final location. |
def dict2DingoObjDict(data):
"""
Turn a dictionary of the form used by DINGO
(i.e., a key is mapped to either another dictionary,
a list or a value) into a DingoObjDict.
"""
info_tuple = dict2tuple(data)
info_dict = tuple2dict(info_tuple, constructor=DingoObjDict)
return info_dict | Turn a dictionary of the form used by DINGO
(i.e., a key is mapped to either another dictionary,
a list or a value) into a DingoObjDict. |
def _next_unscanned_addr(self, alignment=None):
"""
Find the next address that we haven't processed
:param alignment: Assures the address returns must be aligned by this number
:return: An address to process next, or None if all addresses have been processed
"""
# TODO: Take care of those functions that are already generated
if self._next_addr is None:
self._next_addr = self._get_min_addr()
curr_addr = self._next_addr
else:
curr_addr = self._next_addr + 1
if not self._inside_regions(curr_addr):
curr_addr = self._next_address_in_regions(curr_addr)
if curr_addr is None:
l.debug("All addresses within memory regions have been scanned.")
return None
if self._seg_list.has_blocks:
curr_addr = self._seg_list.next_free_pos(curr_addr)
if alignment is not None:
if curr_addr % alignment > 0:
curr_addr = curr_addr - (curr_addr % alignment) + alignment
# Make sure curr_addr exists in binary
accepted = False
for start, end in self._regions.items():
if start <= curr_addr < end:
# accept
accepted = True
break
if curr_addr < start:
# accept, but we are skipping the gap
accepted = True
curr_addr = start
break
if not accepted:
# No memory available!
return None
self._next_addr = curr_addr
if self._inside_regions(curr_addr):
l.debug("Returning a new recon address: %#x", curr_addr)
return curr_addr
l.debug("%#x is beyond the ending point. Returning None.", curr_addr)
return None | Find the next address that we haven't processed
:param alignment: Assures the address returns must be aligned by this number
:return: An address to process next, or None if all addresses have been processed |
def aes_encrypt(base64_encryption_key, data):
"""Encrypt data with AES-CBC and sign it with HMAC-SHA256
Arguments:
base64_encryption_key (str): a base64-encoded string containing an AES encryption key
and HMAC signing key as generated by generate_encryption_key()
data (str): a byte string containing the data to be encrypted
Returns:
str: the encrypted data as a byte string with the HMAC signature appended to the end
"""
if isinstance(data, text_type):
data = data.encode("UTF-8")
aes_key_bytes, hmac_key_bytes = _extract_keys(base64_encryption_key)
data = _pad(data)
iv_bytes = os.urandom(AES_BLOCK_SIZE)
cipher = AES.new(aes_key_bytes, mode=AES.MODE_CBC, IV=iv_bytes)
data = iv_bytes + cipher.encrypt(data) # prepend init vector
hmac_signature = hmac.new(hmac_key_bytes, data, hashlib.sha256).digest()
return as_base64(data + hmac_signature) | Encrypt data with AES-CBC and sign it with HMAC-SHA256
Arguments:
base64_encryption_key (str): a base64-encoded string containing an AES encryption key
and HMAC signing key as generated by generate_encryption_key()
data (str): a byte string containing the data to be encrypted
Returns:
str: the encrypted data as a byte string with the HMAC signature appended to the end |
def count(self, val):
"""Return the number of occurrences of *val* in the list."""
# pylint: disable=arguments-differ
_maxes = self._maxes
if not _maxes:
return 0
pos_left = bisect_left(_maxes, val)
if pos_left == len(_maxes):
return 0
_lists = self._lists
idx_left = bisect_left(_lists[pos_left], val)
pos_right = bisect_right(_maxes, val)
if pos_right == len(_maxes):
return self._len - self._loc(pos_left, idx_left)
idx_right = bisect_right(_lists[pos_right], val)
if pos_left == pos_right:
return idx_right - idx_left
right = self._loc(pos_right, idx_right)
left = self._loc(pos_left, idx_left)
return right - left | Return the number of occurrences of *val* in the list. |
def adapt_array(arr):
"""
http://stackoverflow.com/a/31312102/190597 (SoulNibbler)
"""
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return sqlite3.Binary(out.read()) | http://stackoverflow.com/a/31312102/190597 (SoulNibbler) |
def serialize_number(x, fmt=SER_BINARY, outlen=None):
""" Serializes `x' to a string of length `outlen' in format `fmt' """
ret = b''
if fmt == SER_BINARY:
while x:
x, r = divmod(x, 256)
ret = six.int2byte(int(r)) + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, b'\0')
return ret
assert fmt == SER_COMPACT
while x:
x, r = divmod(x, len(COMPACT_DIGITS))
ret = COMPACT_DIGITS[r:r + 1] + ret
if outlen is not None:
assert len(ret) <= outlen
ret = ret.rjust(outlen, COMPACT_DIGITS[0:1])
return ret | Serializes `x' to a string of length `outlen' in format `fmt' |
def setupDock(self):
"""Setup empty Dock at startup. """
self.dock = QtWidgets.QDockWidget("Classes", self)
self.dock.setWidget(self.tree)
self.dock.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dock) | Setup empty Dock at startup. |
def get_damage(self, amount: int, target) -> int:
"""
Override to modify the damage dealt to a target from the given amount.
"""
if target.immune:
self.log("%r is immune to %s for %i damage", target, self, amount)
return 0
return amount | Override to modify the damage dealt to a target from the given amount. |
def _check_file_syntax(filename, temp_dir, override_lang=None, enforce=True):
"""
Checks that the code in FILENAME parses, attempting to autodetect
the language if necessary.
Raises IOError if the file cannot be read.
Raises DXSyntaxError if there is a problem and "enforce" is True.
"""
def check_python(filename):
# Generate a semi-recognizable name to write the pyc to. Of
# course it's possible that different files being scanned could
# have the same basename, so this path won't be unique, but the
# checks don't run concurrently so this shouldn't cause any
# problems.
pyc_path = os.path.join(temp_dir, os.path.basename(filename) + ".pyc")
try:
if USING_PYTHON2:
filename = filename.encode(sys.getfilesystemencoding())
py_compile.compile(filename, cfile=pyc_path, doraise=True)
finally:
try:
os.unlink(pyc_path)
except OSError:
pass
def check_bash(filename):
if platform.system() == 'Windows':
logging.warn(
'Skipping bash syntax check due to unavailability of bash on Windows.')
else:
subprocess.check_output(["/bin/bash", "-n", filename], stderr=subprocess.STDOUT)
if override_lang == 'python2.7':
checker_fn = check_python
elif override_lang == 'bash':
checker_fn = check_bash
elif filename.endswith('.py'):
checker_fn = check_python
elif filename.endswith('.sh'):
checker_fn = check_bash
else:
# Ignore other kinds of files.
return
# Do a test read of the file to catch errors like the file not
# existing or not being readable.
open(filename)
try:
checker_fn(filename)
except subprocess.CalledProcessError as e:
print(filename + " has a syntax error! Interpreter output:", file=sys.stderr)
for line in e.output.strip("\n").split("\n"):
print(" " + line.rstrip("\n"), file=sys.stderr)
if enforce:
raise DXSyntaxError(filename + " has a syntax error")
except py_compile.PyCompileError as e:
print(filename + " has a syntax error! Interpreter output:", file=sys.stderr)
print(" " + e.msg.strip(), file=sys.stderr)
if enforce:
raise DXSyntaxError(e.msg.strip()) | Checks that the code in FILENAME parses, attempting to autodetect
the language if necessary.
Raises IOError if the file cannot be read.
Raises DXSyntaxError if there is a problem and "enforce" is True. |
def quiver(
x,
y,
z,
u,
v,
w,
size=default_size * 10,
size_selected=default_size_selected * 10,
color=default_color,
color_selected=default_color_selected,
marker="arrow",
**kwargs
):
"""Create a quiver plot, which is like a scatter plot but with arrows pointing in the direction given by u, v and w.
:param x: {x}
:param y: {y}
:param z: {z}
:param u: {u_dir}
:param v: {v_dir}
:param w: {w_dir}
:param size: {size}
:param size_selected: like size, but for selected glyphs
:param color: {color}
:param color_selected: like color, but for selected glyphs
:param marker: (currently only 'arrow' would make sense)
:param kwargs: extra arguments passed on to the Scatter constructor
:return: :any:`Scatter`
"""
fig = gcf()
_grow_limits(x, y, z)
if 'vx' in kwargs or 'vy' in kwargs or 'vz' in kwargs:
raise KeyError('Please use u, v, w instead of vx, vy, vz')
s = ipv.Scatter(
x=x,
y=y,
z=z,
vx=u,
vy=v,
vz=w,
color=color,
size=size,
color_selected=color_selected,
size_selected=size_selected,
geo=marker,
**kwargs
)
fig.scatters = fig.scatters + [s]
return s | Create a quiver plot, which is like a scatter plot but with arrows pointing in the direction given by u, v and w.
:param x: {x}
:param y: {y}
:param z: {z}
:param u: {u_dir}
:param v: {v_dir}
:param w: {w_dir}
:param size: {size}
:param size_selected: like size, but for selected glyphs
:param color: {color}
:param color_selected: like color, but for selected glyphs
:param marker: (currently only 'arrow' would make sense)
:param kwargs: extra arguments passed on to the Scatter constructor
:return: :any:`Scatter` |
def make_list(obj, cast=True):
"""
Converts an object *obj* to a list and returns it. Objects of types *tuple* and *set* are
converted if *cast* is *True*. Otherwise, and for all other types, *obj* is put in a new list.
"""
if isinstance(obj, list):
return list(obj)
if isinstance(obj, types.GeneratorType):
return list(obj)
if isinstance(obj, (tuple, set)) and cast:
return list(obj)
else:
return [obj] | Converts an object *obj* to a list and returns it. Objects of types *tuple* and *set* are
converted if *cast* is *True*. Otherwise, and for all other types, *obj* is put in a new list. |
def interpolate_to_grid(x, y, z, interp_type='linear', hres=50000,
minimum_neighbors=3, gamma=0.25, kappa_star=5.052,
search_radius=None, rbf_func='linear', rbf_smooth=0,
boundary_coords=None):
r"""Interpolate given (x,y), observation (z) pairs to a grid based on given parameters.
Parameters
----------
x: array_like
x coordinate
y: array_like
y coordinate
z: array_like
observation value
interp_type: str
What type of interpolation to use. Available options include:
1) "linear", "nearest", "cubic", or "rbf" from `scipy.interpolate`.
2) "natural_neighbor", "barnes", or "cressman" from `metpy.interpolate`.
Default "linear".
hres: float
The horizontal resolution of the generated grid, given in the same units as the
x and y parameters. Default 50000.
minimum_neighbors: int
Minimum number of neighbors needed to perform barnes or cressman interpolation for a
point. Default is 3.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default 0.25.
kappa_star: float
Response parameter for barnes interpolation, specified nondimensionally
in terms of the Nyquist. Default 5.052
search_radius: float
A search radius to use for the barnes and cressman interpolation schemes.
If search_radius is not specified, it will default to the average spacing of
observations.
rbf_func: str
Specifies which function to use for Rbf interpolation.
Options include: 'multiquadric', 'inverse', 'gaussian', 'linear', 'cubic',
'quintic', and 'thin_plate'. Defualt 'linear'. See `scipy.interpolate.Rbf` for more
information.
rbf_smooth: float
Smoothing value applied to rbf interpolation. Higher values result in more smoothing.
boundary_coords: dictionary
Optional dictionary containing coordinates of the study area boundary. Dictionary
should be in format: {'west': west, 'south': south, 'east': east, 'north': north}
Returns
-------
grid_x: (N, 2) ndarray
Meshgrid for the resulting interpolation in the x dimension
grid_y: (N, 2) ndarray
Meshgrid for the resulting interpolation in the y dimension ndarray
img: (M, N) ndarray
2-dimensional array representing the interpolated values for each grid.
Notes
-----
This function acts as a wrapper for `interpolate_points` to allow it to generate a regular
grid.
See Also
--------
interpolate_to_points
"""
# Generate the grid
if boundary_coords is None:
boundary_coords = get_boundary_coords(x, y)
grid_x, grid_y = generate_grid(hres, boundary_coords)
# Handle grid-to-points conversion, and use function from `interpolation`
points_obs = np.array(list(zip(x, y)))
points_grid = generate_grid_coords(grid_x, grid_y)
img = interpolate_to_points(points_obs, z, points_grid, interp_type=interp_type,
minimum_neighbors=minimum_neighbors, gamma=gamma,
kappa_star=kappa_star, search_radius=search_radius,
rbf_func=rbf_func, rbf_smooth=rbf_smooth)
return grid_x, grid_y, img.reshape(grid_x.shape) | r"""Interpolate given (x,y), observation (z) pairs to a grid based on given parameters.
Parameters
----------
x: array_like
x coordinate
y: array_like
y coordinate
z: array_like
observation value
interp_type: str
What type of interpolation to use. Available options include:
1) "linear", "nearest", "cubic", or "rbf" from `scipy.interpolate`.
2) "natural_neighbor", "barnes", or "cressman" from `metpy.interpolate`.
Default "linear".
hres: float
The horizontal resolution of the generated grid, given in the same units as the
x and y parameters. Default 50000.
minimum_neighbors: int
Minimum number of neighbors needed to perform barnes or cressman interpolation for a
point. Default is 3.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default 0.25.
kappa_star: float
Response parameter for barnes interpolation, specified nondimensionally
in terms of the Nyquist. Default 5.052
search_radius: float
A search radius to use for the barnes and cressman interpolation schemes.
If search_radius is not specified, it will default to the average spacing of
observations.
rbf_func: str
Specifies which function to use for Rbf interpolation.
Options include: 'multiquadric', 'inverse', 'gaussian', 'linear', 'cubic',
'quintic', and 'thin_plate'. Defualt 'linear'. See `scipy.interpolate.Rbf` for more
information.
rbf_smooth: float
Smoothing value applied to rbf interpolation. Higher values result in more smoothing.
boundary_coords: dictionary
Optional dictionary containing coordinates of the study area boundary. Dictionary
should be in format: {'west': west, 'south': south, 'east': east, 'north': north}
Returns
-------
grid_x: (N, 2) ndarray
Meshgrid for the resulting interpolation in the x dimension
grid_y: (N, 2) ndarray
Meshgrid for the resulting interpolation in the y dimension ndarray
img: (M, N) ndarray
2-dimensional array representing the interpolated values for each grid.
Notes
-----
This function acts as a wrapper for `interpolate_points` to allow it to generate a regular
grid.
See Also
--------
interpolate_to_points |
def _print_pgfplot_libs_message(data):
"""Prints message to screen indicating the use of PGFPlots and its
libraries."""
pgfplotslibs = ",".join(list(data["pgfplots libs"]))
tikzlibs = ",".join(list(data["tikz libs"]))
print(70 * "=")
print("Please add the following lines to your LaTeX preamble:\n")
print("\\usepackage[utf8]{inputenc}")
print("\\usepackage{fontspec} % This line only for XeLaTeX and LuaLaTeX")
print("\\usepackage{pgfplots}")
if tikzlibs:
print("\\usetikzlibrary{" + tikzlibs + "}")
if pgfplotslibs:
print("\\usepgfplotslibrary{" + pgfplotslibs + "}")
print(70 * "=")
return | Prints message to screen indicating the use of PGFPlots and its
libraries. |
def _get_instance_repo(self, namespace):
"""
Returns the instance repository for the specified CIM namespace
within the mock repository. This is the original instance variable,
so any modifications will change the mock repository.
Validates that the namespace exists in the mock repository.
If the instance repository does not contain the namespace yet, it is
added.
Parameters:
namespace(:term:`string`): Namespace name. Must not be `None`.
Returns:
list of CIMInstance: Instance repository.
Raises:
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist.
"""
self._validate_namespace(namespace)
if namespace not in self.instances:
self.instances[namespace] = []
return self.instances[namespace] | Returns the instance repository for the specified CIM namespace
within the mock repository. This is the original instance variable,
so any modifications will change the mock repository.
Validates that the namespace exists in the mock repository.
If the instance repository does not contain the namespace yet, it is
added.
Parameters:
namespace(:term:`string`): Namespace name. Must not be `None`.
Returns:
list of CIMInstance: Instance repository.
Raises:
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist. |
def _check_init(self, node):
"""check that the __init__ method call super or ancestors'__init__
method
"""
if not self.linter.is_message_enabled(
"super-init-not-called"
) and not self.linter.is_message_enabled("non-parent-init-called"):
return
klass_node = node.parent.frame()
to_call = _ancestors_to_call(klass_node)
not_called_yet = dict(to_call)
for stmt in node.nodes_of_class(astroid.Call):
expr = stmt.func
if not isinstance(expr, astroid.Attribute) or expr.attrname != "__init__":
continue
# skip the test if using super
if (
isinstance(expr.expr, astroid.Call)
and isinstance(expr.expr.func, astroid.Name)
and expr.expr.func.name == "super"
):
return
try:
for klass in expr.expr.infer():
if klass is astroid.Uninferable:
continue
# The infered klass can be super(), which was
# assigned to a variable and the `__init__`
# was called later.
#
# base = super()
# base.__init__(...)
if (
isinstance(klass, astroid.Instance)
and isinstance(klass._proxied, astroid.ClassDef)
and is_builtin_object(klass._proxied)
and klass._proxied.name == "super"
):
return
if isinstance(klass, objects.Super):
return
try:
del not_called_yet[klass]
except KeyError:
if klass not in to_call:
self.add_message(
"non-parent-init-called", node=expr, args=klass.name
)
except astroid.InferenceError:
continue
for klass, method in not_called_yet.items():
cls = node_frame_class(method)
if klass.name == "object" or (cls and cls.name == "object"):
continue
self.add_message("super-init-not-called", args=klass.name, node=node) | check that the __init__ method call super or ancestors'__init__
method |
def create_vehicle_icon(self, name, colour, follow=False, vehicle_type=None):
'''add a vehicle to the map'''
from MAVProxy.modules.mavproxy_map import mp_slipmap
if vehicle_type is None:
vehicle_type = self.vehicle_type_name
if name in self.have_vehicle and self.have_vehicle[name] == vehicle_type:
return
self.have_vehicle[name] = vehicle_type
icon = self.mpstate.map.icon(colour + vehicle_type + '.png')
self.mpstate.map.add_object(mp_slipmap.SlipIcon(name, (0,0), icon, layer=3, rotation=0, follow=follow,
trail=mp_slipmap.SlipTrail())) | add a vehicle to the map |
def clear(self):
"""Clear variable nodes for next computation."""
for n in self.nodes():
if self.nodes[n]["type"] == "variable":
self.nodes[n]["value"] = None
elif self.nodes[n]["type"] == "function":
self.nodes[n]["func_visited"] = False | Clear variable nodes for next computation. |
def size(self) -> Optional[int]:
"""Size of the payload."""
if not self._parts:
return 0
total = 0
for part, encoding, te_encoding in self._parts:
if encoding or te_encoding or part.size is None:
return None
total += int(
2 + len(self._boundary) + 2 + # b'--'+self._boundary+b'\r\n'
part.size + len(part._binary_headers) +
2 # b'\r\n'
)
total += 2 + len(self._boundary) + 4 # b'--'+self._boundary+b'--\r\n'
return total | Size of the payload. |
def _absf(ins):
''' Absolute value of top of the stack (48 bits)
'''
output = _float_oper(ins.quad[2])
output.append('res 7, e') # Just resets the sign bit!
output.extend(_fpush())
return output | Absolute value of top of the stack (48 bits) |
def fit(self, scores, y_true):
"""Train calibration
Parameters
----------
scores : (n_samples, ) array-like
Uncalibrated scores.
y_true : (n_samples, ) array-like
True labels (dtype=bool).
"""
# to force equal priors, randomly select (and average over)
# up to fifty balanced (i.e. #true == #false) calibration sets.
if self.equal_priors:
counter = Counter(y_true)
positive, negative = counter[True], counter[False]
if positive > negative:
majority, minority = True, False
n_majority, n_minority = positive, negative
else:
majority, minority = False, True
n_majority, n_minority = negative, positive
n_splits = min(50, n_majority // n_minority + 1)
minority_index = np.where(y_true == minority)[0]
majority_index = np.where(y_true == majority)[0]
cv = []
for _ in range(n_splits):
test_index = np.hstack([
np.random.choice(majority_index,
size=n_minority,
replace=False),
minority_index])
cv.append(([], test_index))
cv = _CVIterableWrapper(cv)
# to estimate priors from the data itself, use the whole set
else:
cv = 'prefit'
self.calibration_ = CalibratedClassifierCV(
base_estimator=_Passthrough(), method=self.method, cv=cv)
self.calibration_.fit(scores.reshape(-1, 1), y_true)
return self | Train calibration
Parameters
----------
scores : (n_samples, ) array-like
Uncalibrated scores.
y_true : (n_samples, ) array-like
True labels (dtype=bool). |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.