code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def _write_models_step(self, model, field=None):
"""
Write or update a model.
"""
model = get_model(model)
data = guess_types(self.hashes)
try:
func = _WRITE_MODEL[model]
except KeyError:
func = partial(write_models, model)
func(data, field) | Write or update a model. |
def shutdown(self):
"""Gracefully shutdown the consumer
Consumer will complete any outstanding processing, commit its current
offsets (if so configured) and stop.
Returns deferred which callbacks with a tuple of:
(last processed offset, last committed offset) if it was able to
successfully commit, or errbacks with the commit failure, if any,
or fail(RestopError) if consumer is not running.
"""
def _handle_shutdown_commit_success(result):
"""Handle the result of the commit attempted by shutdown"""
self._shutdown_d, d = None, self._shutdown_d
self.stop()
self._shuttingdown = False # Shutdown complete
d.callback((self._last_processed_offset,
self._last_committed_offset))
def _handle_shutdown_commit_failure(failure):
"""Handle failure of commit() attempted by shutdown"""
if failure.check(OperationInProgress):
failure.value.deferred.addCallback(_commit_and_stop)
return
self._shutdown_d, d = None, self._shutdown_d
self.stop()
self._shuttingdown = False # Shutdown complete
d.errback(failure)
def _commit_and_stop(result):
"""Commit the current offsets (if needed) and stop the consumer"""
if not self.consumer_group: # No consumer group, no committing
return _handle_shutdown_commit_success(None)
# Need to commit prior to stopping
self.commit().addCallbacks(_handle_shutdown_commit_success,
_handle_shutdown_commit_failure)
# If we're not running, return an failure
if self._start_d is None:
return fail(Failure(
RestopError("Shutdown called on non-running consumer")))
# If we're called multiple times, return a failure
if self._shutdown_d:
return fail(Failure(
RestopError("Shutdown called more than once.")))
# Set our _shuttingdown flag, so our _process_message routine will stop
# feeding new messages to the processor, and fetches won't be retried
self._shuttingdown = True
# Keep track of state for debugging
self._state = '[shutting down]'
# Create a deferred to track the shutdown
self._shutdown_d = d = Deferred()
# Are we waiting for the processor to complete? If so, when it's done,
# commit our offsets and stop.
if self._processor_d:
self._processor_d.addCallback(_commit_and_stop)
else:
# No need to wait for the processor, we can commit and stop now
_commit_and_stop(None)
# return the deferred
return d | Gracefully shutdown the consumer
Consumer will complete any outstanding processing, commit its current
offsets (if so configured) and stop.
Returns deferred which callbacks with a tuple of:
(last processed offset, last committed offset) if it was able to
successfully commit, or errbacks with the commit failure, if any,
or fail(RestopError) if consumer is not running. |
def _run_dnb_normalization(self, dnb_data, sza_data):
"""Scale the DNB data using a histogram equalization method.
Args:
dnb_data (ndarray): Day/Night Band data array
sza_data (ndarray): Solar Zenith Angle data array
"""
# convert dask arrays to DataArray objects
dnb_data = xr.DataArray(dnb_data, dims=('y', 'x'))
sza_data = xr.DataArray(sza_data, dims=('y', 'x'))
good_mask = ~(dnb_data.isnull() | sza_data.isnull())
output_dataset = dnb_data.where(good_mask)
# we only need the numpy array
output_dataset = output_dataset.values.copy()
dnb_data = dnb_data.values
sza_data = sza_data.values
day_mask, mixed_mask, night_mask = make_day_night_masks(
sza_data,
good_mask.values,
self.high_angle_cutoff,
self.low_angle_cutoff,
stepsDegrees=self.mixed_degree_step)
did_equalize = False
if day_mask.any():
LOG.debug("Histogram equalizing DNB day data...")
histogram_equalization(dnb_data, day_mask, out=output_dataset)
did_equalize = True
if mixed_mask:
for mask in mixed_mask:
if mask.any():
LOG.debug("Histogram equalizing DNB mixed data...")
histogram_equalization(dnb_data, mask, out=output_dataset)
did_equalize = True
if night_mask.any():
LOG.debug("Histogram equalizing DNB night data...")
histogram_equalization(dnb_data, night_mask, out=output_dataset)
did_equalize = True
if not did_equalize:
raise RuntimeError("No valid data found to histogram equalize")
return output_dataset | Scale the DNB data using a histogram equalization method.
Args:
dnb_data (ndarray): Day/Night Band data array
sza_data (ndarray): Solar Zenith Angle data array |
def update_file(self, file_id, upload_id):
"""
Send PUT request to /files/{file_id} to update the file contents to upload_id and sets a label.
:param file_id: str uuid of file
:param upload_id: str uuid of the upload where all the file chunks where uploaded
:param label: str short display label for the file
:return: requests.Response containing the successful result
"""
put_data = {
"upload[id]": upload_id,
}
return self._put("/files/" + file_id, put_data, content_type=ContentType.form) | Send PUT request to /files/{file_id} to update the file contents to upload_id and sets a label.
:param file_id: str uuid of file
:param upload_id: str uuid of the upload where all the file chunks where uploaded
:param label: str short display label for the file
:return: requests.Response containing the successful result |
def makemessages(application, locale):
"""
Updates the locale message files
"""
from django.core.management import call_command
if not locale:
locale = 'en'
with work_in(application):
call_command('makemessages', locale=(locale,)) | Updates the locale message files |
def _split_on_reappear(cls, df, p, id_offset):
"""Assign a new identity to an objects that appears after disappearing previously.
Works on `df` in-place.
:param df: data frame
:param p: presence
:param id_offset: offset added to new ids
:return:
"""
next_id = id_offset + 1
added_ids = []
nt = p.sum(0)
start = np.argmax(p, 0)
end = np.argmax(np.cumsum(p, 0), 0)
diff = end - start + 1
is_contiguous = np.equal(nt, diff)
for id, contiguous in enumerate(is_contiguous):
if not contiguous:
to_change = df[df.id == id]
index = to_change.index
diff = index[1:] - index[:-1]
where = np.where(np.greater(diff, 1))[0]
for w in where:
to_change.loc[w + 1:, 'id'] = next_id
added_ids.append(next_id)
next_id += 1
df[df.id == id] = to_change
return added_ids | Assign a new identity to an objects that appears after disappearing previously.
Works on `df` in-place.
:param df: data frame
:param p: presence
:param id_offset: offset added to new ids
:return: |
def forwarded(self) -> Tuple[Mapping[str, str], ...]:
"""A tuple containing all parsed Forwarded header(s).
Makes an effort to parse Forwarded headers as specified by RFC 7239:
- It adds one (immutable) dictionary per Forwarded 'field-value', ie
per proxy. The element corresponds to the data in the Forwarded
field-value added by the first proxy encountered by the client. Each
subsequent item corresponds to those added by later proxies.
- It checks that every value has valid syntax in general as specified
in section 4: either a 'token' or a 'quoted-string'.
- It un-escapes found escape sequences.
- It does NOT validate 'by' and 'for' contents as specified in section
6.
- It does NOT validate 'host' contents (Host ABNF).
- It does NOT validate 'proto' contents for valid URI scheme names.
Returns a tuple containing one or more immutable dicts
"""
elems = []
for field_value in self._message.headers.getall(hdrs.FORWARDED, ()):
length = len(field_value)
pos = 0
need_separator = False
elem = {} # type: Dict[str, str]
elems.append(types.MappingProxyType(elem))
while 0 <= pos < length:
match = _FORWARDED_PAIR_RE.match(field_value, pos)
if match is not None: # got a valid forwarded-pair
if need_separator:
# bad syntax here, skip to next comma
pos = field_value.find(',', pos)
else:
name, value, port = match.groups()
if value[0] == '"':
# quoted string: remove quotes and unescape
value = _QUOTED_PAIR_REPLACE_RE.sub(r'\1',
value[1:-1])
if port:
value += port
elem[name.lower()] = value
pos += len(match.group(0))
need_separator = True
elif field_value[pos] == ',': # next forwarded-element
need_separator = False
elem = {}
elems.append(types.MappingProxyType(elem))
pos += 1
elif field_value[pos] == ';': # next forwarded-pair
need_separator = False
pos += 1
elif field_value[pos] in ' \t':
# Allow whitespace even between forwarded-pairs, though
# RFC 7239 doesn't. This simplifies code and is in line
# with Postel's law.
pos += 1
else:
# bad syntax here, skip to next comma
pos = field_value.find(',', pos)
return tuple(elems) | A tuple containing all parsed Forwarded header(s).
Makes an effort to parse Forwarded headers as specified by RFC 7239:
- It adds one (immutable) dictionary per Forwarded 'field-value', ie
per proxy. The element corresponds to the data in the Forwarded
field-value added by the first proxy encountered by the client. Each
subsequent item corresponds to those added by later proxies.
- It checks that every value has valid syntax in general as specified
in section 4: either a 'token' or a 'quoted-string'.
- It un-escapes found escape sequences.
- It does NOT validate 'by' and 'for' contents as specified in section
6.
- It does NOT validate 'host' contents (Host ABNF).
- It does NOT validate 'proto' contents for valid URI scheme names.
Returns a tuple containing one or more immutable dicts |
def IsAllocated(self):
"""Determines if the file entry is allocated.
Returns:
bool: True if the file entry is allocated.
"""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object and self._stat_object.is_allocated | Determines if the file entry is allocated.
Returns:
bool: True if the file entry is allocated. |
def import_generated_autoboto(self):
"""
Imports the autoboto package generated in the build directory (not target_dir).
For example:
autoboto = botogen.import_generated_autoboto()
"""
if str(self.config.build_dir) not in sys.path:
sys.path.append(str(self.config.build_dir))
return importlib.import_module(self.config.target_package) | Imports the autoboto package generated in the build directory (not target_dir).
For example:
autoboto = botogen.import_generated_autoboto() |
def function_type(self):
"""returns function type. See :class:`type_t` hierarchy"""
return cpptypes.free_function_type_t(
return_type=self.return_type,
arguments_types=[
arg.decl_type for arg in self.arguments]) | returns function type. See :class:`type_t` hierarchy |
def team_info():
"""Returns a list of team information dictionaries"""
teams = __get_league_object().find('teams').findall('team')
output = []
for team in teams:
info = {}
for x in team.attrib:
info[x] = team.attrib[x]
output.append(info)
return output | Returns a list of team information dictionaries |
def hazard_notes(self):
"""Get the hazard specific notes defined in definitions.
This method will do a lookup in definitions and return the
hazard definition specific notes dictionary.
This is a helper function to make it
easy to get hazard specific notes from the definitions metadata.
.. versionadded:: 3.5
:returns: A list like e.g. safe.definitions.hazard_land_cover[
'notes']
:rtype: list, None
"""
notes = []
hazard = definition(self.hazard.keywords.get('hazard'))
if 'notes' in hazard:
notes += hazard['notes']
if self.hazard.keywords['layer_mode'] == 'classified':
if 'classified_notes' in hazard:
notes += hazard['classified_notes']
if self.hazard.keywords['layer_mode'] == 'continuous':
if 'continuous_notes' in hazard:
notes += hazard['continuous_notes']
if self.hazard.keywords['hazard_category'] == 'single_event':
if 'single_event_notes' in hazard:
notes += hazard['single_event_notes']
if self.hazard.keywords['hazard_category'] == 'multiple_event':
if 'multi_event_notes' in hazard:
notes += hazard['multi_event_notes']
return notes | Get the hazard specific notes defined in definitions.
This method will do a lookup in definitions and return the
hazard definition specific notes dictionary.
This is a helper function to make it
easy to get hazard specific notes from the definitions metadata.
.. versionadded:: 3.5
:returns: A list like e.g. safe.definitions.hazard_land_cover[
'notes']
:rtype: list, None |
def update_key_bundle(key_bundle, diff):
"""
Apply a diff specification to a KeyBundle.
The keys that are to be added are added.
The keys that should be deleted are marked as inactive.
:param key_bundle: The original KeyBundle
:param diff: The difference specification
:return: An updated key_bundle
"""
try:
_add = diff['add']
except KeyError:
pass
else:
key_bundle.extend(_add)
try:
_del = diff['del']
except KeyError:
pass
else:
_now = time.time()
for k in _del:
k.inactive_since = _now | Apply a diff specification to a KeyBundle.
The keys that are to be added are added.
The keys that should be deleted are marked as inactive.
:param key_bundle: The original KeyBundle
:param diff: The difference specification
:return: An updated key_bundle |
def common_params(task_instance, task_cls):
"""
Grab all the values in task_instance that are found in task_cls.
"""
if not isinstance(task_cls, task.Register):
raise TypeError("task_cls must be an uninstantiated Task")
task_instance_param_names = dict(task_instance.get_params()).keys()
task_cls_params_dict = dict(task_cls.get_params())
task_cls_param_names = task_cls_params_dict.keys()
common_param_names = set(task_instance_param_names).intersection(set(task_cls_param_names))
common_param_vals = [(key, task_cls_params_dict[key]) for key in common_param_names]
common_kwargs = dict((key, task_instance.param_kwargs[key]) for key in common_param_names)
vals = dict(task_instance.get_param_values(common_param_vals, [], common_kwargs))
return vals | Grab all the values in task_instance that are found in task_cls. |
def number_of_contacts(records, direction=None, more=0):
"""
The number of contacts the user interacted with.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing.
more : int, default is 0
Counts only contacts with more than this number of interactions.
"""
if direction is None:
counter = Counter(r.correspondent_id for r in records)
else:
counter = Counter(r.correspondent_id for r in records if r.direction == direction)
return sum(1 for d in counter.values() if d > more) | The number of contacts the user interacted with.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing.
more : int, default is 0
Counts only contacts with more than this number of interactions. |
def _add_throughput(self, y, x, width, op, title, available, used):
""" Write a single throughput measure to a row """
percent = float(used) / available
self.win.addstr(y, x, "[")
# Because we have disabled scrolling, writing the lower right corner
# character in a terminal can throw an error (this is inside the curses
# implementation). If that happens (and it will only ever happen here),
# we should just catch it and continue.
try:
self.win.addstr(y, x + width - 1, "]")
except curses.error:
pass
x += 1
right = "%.1f/%d:%s" % (used, available, op)
pieces = self._progress_bar(width - 2, percent, title, right)
for color, text in pieces:
self.win.addstr(y, x, text, curses.color_pair(color))
x += len(text) | Write a single throughput measure to a row |
def fetch_dictionary(name, url=None, format=None, index=0, rename=None,
save=True, force_retrieve=False):
''' Retrieve a dictionary of text norms from the web or local storage.
Args:
name (str): The name of the dictionary. If no url is passed, this must
match either one of the keys in the predefined dictionary file (see
dictionaries.json), or the name assigned to a previous dictionary
retrieved from a specific URL.
url (str): The URL of dictionary file to retrieve. Optional if name
matches an existing dictionary.
format (str): One of 'csv', 'tsv', 'xls', or None. Used to read data
appropriately. Note that most forms of compression will be detected
and handled automatically, so the format string refers only to the
format of the decompressed file. When format is None, the format
will be inferred from the filename.
index (str, int): The name or numeric index of the column to used as
the dictionary index. Passed directly to pd.ix.
rename (dict): An optional dictionary passed to pd.rename(); can be
used to rename columns in the loaded dictionary. Note that the
locally-saved dictionary will retain the renamed columns.
save (bool): Whether or not to save the dictionary locally the first
time it is retrieved.
force_retrieve (bool): If True, remote dictionary will always be
downloaded, even if a local copy exists (and the local copy will
be overwritten).
Returns: A pandas DataFrame indexed by strings (typically words).
'''
file_path = os.path.join(_get_dictionary_path(), name + '.csv')
if not force_retrieve and os.path.exists(file_path):
df = pd.read_csv(file_path)
index = datasets[name].get('index', df.columns[index])
return df.set_index(index)
if name in datasets:
url = datasets[name]['url']
format = datasets[name].get('format', format)
index = datasets[name].get('index', index)
rename = datasets.get('rename', rename)
if url is None:
raise ValueError("Dataset '%s' not found in local storage or presets, "
"and no download URL provided." % name)
data = _download_dictionary(url, format=format, rename=rename)
if isinstance(index, int):
index = data.columns[index]
data = data.set_index(index)
if save:
file_path = os.path.join(_get_dictionary_path(), name + '.csv')
data.to_csv(file_path, encoding='utf-8')
return data | Retrieve a dictionary of text norms from the web or local storage.
Args:
name (str): The name of the dictionary. If no url is passed, this must
match either one of the keys in the predefined dictionary file (see
dictionaries.json), or the name assigned to a previous dictionary
retrieved from a specific URL.
url (str): The URL of dictionary file to retrieve. Optional if name
matches an existing dictionary.
format (str): One of 'csv', 'tsv', 'xls', or None. Used to read data
appropriately. Note that most forms of compression will be detected
and handled automatically, so the format string refers only to the
format of the decompressed file. When format is None, the format
will be inferred from the filename.
index (str, int): The name or numeric index of the column to used as
the dictionary index. Passed directly to pd.ix.
rename (dict): An optional dictionary passed to pd.rename(); can be
used to rename columns in the loaded dictionary. Note that the
locally-saved dictionary will retain the renamed columns.
save (bool): Whether or not to save the dictionary locally the first
time it is retrieved.
force_retrieve (bool): If True, remote dictionary will always be
downloaded, even if a local copy exists (and the local copy will
be overwritten).
Returns: A pandas DataFrame indexed by strings (typically words). |
def off(self):
"""Turn off the alsa_sink sink.
This disconnects the sink from the relevant session events.
"""
spotifyconnect._session_instance.player.off(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY, self._on_music_delivery)
assert spotifyconnect._session_instance.player.num_listeners(
spotifyconnect.PlayerEvent.MUSIC_DELIVERY) == 0
self._close() | Turn off the alsa_sink sink.
This disconnects the sink from the relevant session events. |
def bulk_upsert(self, docs, namespace, timestamp):
"""Upsert each document in a set of documents.
This method may be overridden to upsert many documents at once.
"""
for doc in docs:
self.upsert(doc, namespace, timestamp) | Upsert each document in a set of documents.
This method may be overridden to upsert many documents at once. |
def smart_query(query, filters=None, sort_attrs=None, schema=None):
"""
Does magic Django-ish joins like post___user___name__startswith='Bob'
(see https://goo.gl/jAgCyM)
Does filtering, sorting and eager loading at the same time.
And if, say, filters and sorting need the same joinm it will be done
only one. That's why all stuff is combined in single method
:param query: sqlalchemy.orm.query.Query
:param filters: dict
:param sort_attrs: List[basestring]
:param schema: dict
"""
if not filters:
filters = {}
if not sort_attrs:
sort_attrs = []
if not schema:
schema = {}
# noinspection PyProtectedMember
root_cls = query._joinpoint_zero().class_ # for example, User or Post
attrs = list(filters.keys()) + \
list(map(lambda s: s.lstrip(DESC_PREFIX), sort_attrs))
aliases = OrderedDict({})
_parse_path_and_make_aliases(root_cls, '', attrs, aliases)
loaded_paths = []
for path, al in aliases.items():
relationship_path = path.replace(RELATION_SPLITTER, '.')
query = query.outerjoin(al[0], al[1]) \
.options(contains_eager(relationship_path, alias=al[0]))
loaded_paths.append(relationship_path)
for attr, value in filters.items():
if RELATION_SPLITTER in attr:
parts = attr.rsplit(RELATION_SPLITTER, 1)
entity, attr_name = aliases[parts[0]][0], parts[1]
else:
entity, attr_name = root_cls, attr
try:
query = query.filter(*entity.filter_expr(**{attr_name: value}))
except KeyError as e:
raise KeyError("Incorrect filter path `{}`: {}"
.format(attr, e))
for attr in sort_attrs:
if RELATION_SPLITTER in attr:
prefix = ''
if attr.startswith(DESC_PREFIX):
prefix = DESC_PREFIX
attr = attr.lstrip(DESC_PREFIX)
parts = attr.rsplit(RELATION_SPLITTER, 1)
entity, attr_name = aliases[parts[0]][0], prefix + parts[1]
else:
entity, attr_name = root_cls, attr
try:
query = query.order_by(*entity.order_expr(attr_name))
except KeyError as e:
raise KeyError("Incorrect order path `{}`: {}".format(attr, e))
if schema:
flat_schema = _flatten_schema(schema)
not_loaded_part = {path: v for path, v in flat_schema.items()
if path not in loaded_paths}
query = query.options(*_eager_expr_from_flat_schema(
not_loaded_part))
return query | Does magic Django-ish joins like post___user___name__startswith='Bob'
(see https://goo.gl/jAgCyM)
Does filtering, sorting and eager loading at the same time.
And if, say, filters and sorting need the same joinm it will be done
only one. That's why all stuff is combined in single method
:param query: sqlalchemy.orm.query.Query
:param filters: dict
:param sort_attrs: List[basestring]
:param schema: dict |
def sum_out(self, var, bn):
"Make a factor eliminating var by summing over its values."
vars = [X for X in self.vars if X != var]
cpt = dict((event_values(e, vars),
sum(self.p(extend(e, var, val))
for val in bn.variable_values(var)))
for e in all_events(vars, bn, {}))
return Factor(vars, cpt) | Make a factor eliminating var by summing over its values. |
def pull_requested_reviewers(self, pr_number):
"""Get pull requested reviewers"""
requested_reviewers_url = urijoin("pulls", str(pr_number), "requested_reviewers")
return self.fetch_items(requested_reviewers_url, {}) | Get pull requested reviewers |
def propose(self):
"""
Proposals for positive definite matrix using random walk deviations on the Cholesky
factor of the current value.
"""
# Locally store size of matrix
dims = self.stochastic.value.shape
# Add normal deviate to value and symmetrize
dev = rnormal(
0,
self.adaptive_scale_factor *
self.proposal_sd,
size=dims)
symmetrize(dev)
# Replace
self.stochastic.value = dev + self.stochastic.value | Proposals for positive definite matrix using random walk deviations on the Cholesky
factor of the current value. |
async def unset_lock(self, resource, lock_identifier):
"""
Tries to unset the lock to all the redis instances
:param resource: The resource string name to lock
:param lock_identifier: The id of the lock. A unique string
:return float: The elapsed time that took to lock the instances in iseconds
:raises: LockError if the lock has not matching identifier in more then
(N/2 - 1) instances
"""
start_time = time.time()
successes = await asyncio.gather(*[
i.unset_lock(resource, lock_identifier) for
i in self.instances
], return_exceptions=True)
successful_remvoes = sum(s is None for s in successes)
elapsed_time = time.time() - start_time
unlocked = True if successful_remvoes >= int(len(self.instances) / 2) + 1 else False
self.log.debug('Lock "%s" is unset on %d/%d instances in %s seconds',
resource, successful_remvoes, len(self.instances), elapsed_time)
if not unlocked:
raise LockError('Can not release the lock')
return elapsed_time | Tries to unset the lock to all the redis instances
:param resource: The resource string name to lock
:param lock_identifier: The id of the lock. A unique string
:return float: The elapsed time that took to lock the instances in iseconds
:raises: LockError if the lock has not matching identifier in more then
(N/2 - 1) instances |
def fibs(n, m):
"""
Yields Fibonacci numbers starting from ``n`` and ending at ``m``.
"""
a = b = 1
for x in range(3, m + 1):
a, b = b, a + b
if x >= n:
yield b | Yields Fibonacci numbers starting from ``n`` and ending at ``m``. |
async def get_tracks(self, *, limit=20, offset=0) -> List[Track]:
"""Get a list of the songs saved in the current Spotify user’s ‘Your Music’ library.
Parameters
----------
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0
"""
data = await self.user.http.saved_tracks(limit=limit, offset=offset)
return [Track(self.__client, item['track']) for item in data['items']] | Get a list of the songs saved in the current Spotify user’s ‘Your Music’ library.
Parameters
----------
limit : Optional[int]
The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.
offset : Optional[int]
The index of the first item to return. Default: 0 |
def add_interface_router(router, subnet, profile=None):
'''
Adds an internal network interface to the specified router
CLI Example:
.. code-block:: bash
salt '*' neutron.add_interface_router router-name subnet-name
:param router: ID or name of the router
:param subnet: ID or name of the subnet
:param profile: Profile to build on (Optional)
:return: Added interface information
'''
conn = _auth(profile)
return conn.add_interface_router(router, subnet) | Adds an internal network interface to the specified router
CLI Example:
.. code-block:: bash
salt '*' neutron.add_interface_router router-name subnet-name
:param router: ID or name of the router
:param subnet: ID or name of the subnet
:param profile: Profile to build on (Optional)
:return: Added interface information |
def atlasdb_cache_zonefile_info( con=None, path=None ):
"""
Load up and cache our zonefile inventory from the database
"""
global ZONEFILE_INV, NUM_ZONEFILES, ZONEFILE_INV_LOCK
inv = None
with ZONEFILE_INV_LOCK:
inv_len = atlasdb_zonefile_inv_length( con=con, path=path )
inv = atlas_make_zonefile_inventory( 0, inv_len, con=con, path=path )
ZONEFILE_INV = inv
NUM_ZONEFILES = inv_len
return inv | Load up and cache our zonefile inventory from the database |
def update(self, ttl=values.unset):
"""
Update the SyncStreamInstance
:param unicode ttl: Stream TTL.
:returns: Updated SyncStreamInstance
:rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamInstance
"""
return self._proxy.update(ttl=ttl, ) | Update the SyncStreamInstance
:param unicode ttl: Stream TTL.
:returns: Updated SyncStreamInstance
:rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamInstance |
def concrete_emulate(self, insn):
"""
Start executing in Unicorn from this point until we hit a syscall or reach break_unicorn_at
:param capstone.CsInsn insn: The instruction object to emulate
"""
if not self.emu:
self.emu = ConcreteUnicornEmulator(self)
self.emu._stop_at = self._break_unicorn_at
try:
self.emu.emulate(insn)
except unicorn.UcError as e:
if e.errno == unicorn.UC_ERR_INSN_INVALID:
text_bytes = ' '.join('%02x' % x for x in insn.bytes)
logger.error("Unimplemented instruction: 0x%016x:\t%s\t%s\t%s",
insn.address, text_bytes, insn.mnemonic, insn.op_str)
raise InstructionEmulationError(str(e)) | Start executing in Unicorn from this point until we hit a syscall or reach break_unicorn_at
:param capstone.CsInsn insn: The instruction object to emulate |
def pub(self, topic, message):
'''Publish the provided message to the provided topic'''
with self.random_connection() as client:
client.pub(topic, message)
return self.wait_response() | Publish the provided message to the provided topic |
def get_bin_lookup_session(self, proxy):
"""Gets the bin lookup session.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.resource.BinLookupSession) - a
``BinLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_bin_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_bin_lookup()`` is ``true``.*
"""
if not self.supports_bin_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.BinLookupSession(proxy=proxy, runtime=self._runtime) | Gets the bin lookup session.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.resource.BinLookupSession) - a
``BinLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_bin_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_bin_lookup()`` is ``true``.* |
def _captcha_form(self):
"""
captcha form
:return:
"""
try:
last_attempt = FailedAccessAttempt.objects.get(
ip_address=self._ip,
is_locked=True,
captcha_enabled=True,
is_expired=False
)
except FailedAccessAttempt.DoesNotExist:
last_attempt = None
self.required = False
self.widget = HiddenInput()
if last_attempt:
self._last_attempt = last_attempt
if last_attempt.is_locked:
self.required = True
self.widget = ReCaptcha(
public_key=self.public_key, use_ssl=self.use_ssl, attrs=self.attrs
) | captcha form
:return: |
def adsb_vehicle_encode(self, ICAO_address, lat, lon, altitude_type, altitude, heading, hor_velocity, ver_velocity, callsign, emitter_type, tslc, flags, squawk):
'''
The location and information of an ADSB vehicle
ICAO_address : ICAO address (uint32_t)
lat : Latitude, expressed as degrees * 1E7 (int32_t)
lon : Longitude, expressed as degrees * 1E7 (int32_t)
altitude_type : Type from ADSB_ALTITUDE_TYPE enum (uint8_t)
altitude : Altitude(ASL) in millimeters (int32_t)
heading : Course over ground in centidegrees (uint16_t)
hor_velocity : The horizontal velocity in centimeters/second (uint16_t)
ver_velocity : The vertical velocity in centimeters/second, positive is up (int16_t)
callsign : The callsign, 8+null (char)
emitter_type : Type from ADSB_EMITTER_TYPE enum (uint8_t)
tslc : Time since last communication in seconds (uint8_t)
flags : Flags to indicate various statuses including valid data fields (uint16_t)
squawk : Squawk code (uint16_t)
'''
return MAVLink_adsb_vehicle_message(ICAO_address, lat, lon, altitude_type, altitude, heading, hor_velocity, ver_velocity, callsign, emitter_type, tslc, flags, squawk) | The location and information of an ADSB vehicle
ICAO_address : ICAO address (uint32_t)
lat : Latitude, expressed as degrees * 1E7 (int32_t)
lon : Longitude, expressed as degrees * 1E7 (int32_t)
altitude_type : Type from ADSB_ALTITUDE_TYPE enum (uint8_t)
altitude : Altitude(ASL) in millimeters (int32_t)
heading : Course over ground in centidegrees (uint16_t)
hor_velocity : The horizontal velocity in centimeters/second (uint16_t)
ver_velocity : The vertical velocity in centimeters/second, positive is up (int16_t)
callsign : The callsign, 8+null (char)
emitter_type : Type from ADSB_EMITTER_TYPE enum (uint8_t)
tslc : Time since last communication in seconds (uint8_t)
flags : Flags to indicate various statuses including valid data fields (uint16_t)
squawk : Squawk code (uint16_t) |
def deprecated(message, exception=PendingDeprecationWarning):
"""Throw a warning when a function/method will be soon deprecated
Supports passing a ``message`` and an ``exception`` class
(uses ``PendingDeprecationWarning`` by default). This is useful if you
want to alternatively pass a ``DeprecationWarning`` exception for already
deprecated functions/methods.
Example::
>>> import warnings
>>> from functools import wraps
>>> message = "this function will be deprecated in the near future"
>>> @deprecated(message)
... def foo(n):
... return n+n
>>> with warnings.catch_warnings(record=True) as w:
... warnings.simplefilter("always")
... foo(4)
... assert len(w) == 1
... assert issubclass(w[-1].category, PendingDeprecationWarning)
... assert message == str(w[-1].message)
... assert foo.__name__ == 'foo'
8
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(message, exception, stacklevel=2)
return func(*args, **kwargs)
return wrapper
return decorator | Throw a warning when a function/method will be soon deprecated
Supports passing a ``message`` and an ``exception`` class
(uses ``PendingDeprecationWarning`` by default). This is useful if you
want to alternatively pass a ``DeprecationWarning`` exception for already
deprecated functions/methods.
Example::
>>> import warnings
>>> from functools import wraps
>>> message = "this function will be deprecated in the near future"
>>> @deprecated(message)
... def foo(n):
... return n+n
>>> with warnings.catch_warnings(record=True) as w:
... warnings.simplefilter("always")
... foo(4)
... assert len(w) == 1
... assert issubclass(w[-1].category, PendingDeprecationWarning)
... assert message == str(w[-1].message)
... assert foo.__name__ == 'foo'
8 |
def _get_namedrange(book, rangename, sheetname=None):
"""Get range from a workbook.
A workbook can contain multiple definitions for a single name,
as a name can be defined for the entire book or for
a particular sheet.
If sheet is None, the book-wide def is searched,
otherwise sheet-local def is looked up.
Args:
book: An openpyxl workbook object.
rangename (str): Range expression, such as "A1", "$G4:$K10",
named range "NamedRange1".
sheetname (str, optional): None for book-wide name def,
sheet name for sheet-local named range.
Returns:
Range object specified by the name.
"""
def cond(namedef):
if namedef.type.upper() == "RANGE":
if namedef.name.upper() == rangename.upper():
if sheetname is None:
if not namedef.localSheetId:
return True
else: # sheet local name
sheet_id = [sht.upper() for sht in book.sheetnames].index(
sheetname.upper()
)
if namedef.localSheetId == sheet_id:
return True
return False
def get_destinations(name_def):
"""Workaround for the bug in DefinedName.destinations"""
from openpyxl.formula import Tokenizer
from openpyxl.utils.cell import SHEETRANGE_RE
if name_def.type == "RANGE":
tok = Tokenizer("=" + name_def.value)
for part in tok.items:
if part.subtype == "RANGE":
m = SHEETRANGE_RE.match(part.value)
if m.group("quoted"):
sheet_name = m.group("quoted")
else:
sheet_name = m.group("notquoted")
yield sheet_name, m.group("cells")
namedef = next(
(item for item in book.defined_names.definedName if cond(item)), None
)
if namedef is None:
return None
dests = get_destinations(namedef)
xlranges = []
sheetnames_upper = [name.upper() for name in book.sheetnames]
for sht, addr in dests:
if sheetname:
sht = sheetname
index = sheetnames_upper.index(sht.upper())
xlranges.append(book.worksheets[index][addr])
if len(xlranges) == 1:
return xlranges[0]
else:
return xlranges | Get range from a workbook.
A workbook can contain multiple definitions for a single name,
as a name can be defined for the entire book or for
a particular sheet.
If sheet is None, the book-wide def is searched,
otherwise sheet-local def is looked up.
Args:
book: An openpyxl workbook object.
rangename (str): Range expression, such as "A1", "$G4:$K10",
named range "NamedRange1".
sheetname (str, optional): None for book-wide name def,
sheet name for sheet-local named range.
Returns:
Range object specified by the name. |
def getTotalExpectedOccurrencesTicks_2_5(ticks):
"""
Extract a set of tick locations and labels. The input ticks are assumed to
mean "How many *other* occurrences are there of the sensed feature?" but we
want to show how many *total* occurrences there are. So we add 1.
We label tick 2, and then 5, 10, 15, 20, ...
@param ticks
A list of ticks, typically calculated by one of the above generate*List functions.
"""
locs = [loc
for label, loc in ticks]
labels = [(str(label + 1) if
(label + 1 == 2
or (label+1) % 5 == 0)
else "")
for label, loc in ticks]
return locs, labels | Extract a set of tick locations and labels. The input ticks are assumed to
mean "How many *other* occurrences are there of the sensed feature?" but we
want to show how many *total* occurrences there are. So we add 1.
We label tick 2, and then 5, 10, 15, 20, ...
@param ticks
A list of ticks, typically calculated by one of the above generate*List functions. |
def createSummary(self, log):
"""
Match log lines against warningPattern.
Warnings are collected into another log for this step, and the
build-wide 'warnings-count' is updated."""
# If there were any warnings, make the log if lines with warnings
# available
if self.warnCount:
self.addCompleteLog("warnings (%d)" % self.warnCount,
"\n".join(self.loggedWarnings) + "\n")
warnings_stat = self.getStatistic('warnings', 0)
self.setStatistic('warnings', warnings_stat + self.warnCount)
old_count = self.getProperty("warnings-count", 0)
self.setProperty(
"warnings-count", old_count + self.warnCount, "WarningCountingShellCommand") | Match log lines against warningPattern.
Warnings are collected into another log for this step, and the
build-wide 'warnings-count' is updated. |
def get_space(self, current_result_list, current_query, param_space, runs,
result_parsing_function):
"""
Convert a parameter space specification to a nested array structure
representing the space. In other words, if the parameter space is::
param_space = {
'a': [1, 2],
'b': [3, 4]
}
the function will return a structure like the following::
[
[
{'a': 1, 'b': 3},
{'a': 1, 'b': 4}
],
[
{'a': 2, 'b': 3},
{'a': 2, 'b': 4}
]
]
where the first dimension represents a, and the second dimension
represents b. This nested-array structure can then be easily converted
to a numpy array via np.array().
Args:
current_query (dict): the query to apply to the structure.
param_space (dict): representation of the parameter space.
result_parsing_function (function): user-defined function to call
on results, typically used to parse data and outputting
metrics.
runs (int): the number of runs to query for each parameter
combination.
"""
if result_parsing_function is None:
result_parsing_function = CampaignManager.files_in_dictionary
# Note that this function operates recursively.
# Base case
if not param_space:
results = [r for r in current_result_list if
self.satisfies_query(r, current_query)]
parsed = []
for r in results[:runs]:
parsed.append(result_parsing_function(r))
return parsed
space = []
[key, value] = list(param_space.items())[0]
# Iterate over dictionary values
for v in value:
next_query = deepcopy(current_query)
temp_query = deepcopy(current_query)
# For each list, recur 'fixing' that dimension.
next_query[key] = v
next_param_space = deepcopy(param_space)
del(next_param_space[key])
temp_query[key] = v
temp_result_list = [r for r in current_result_list if
self.satisfies_query(r, temp_query)]
space.append(self.get_space(temp_result_list, next_query,
next_param_space, runs,
result_parsing_function))
return space | Convert a parameter space specification to a nested array structure
representing the space. In other words, if the parameter space is::
param_space = {
'a': [1, 2],
'b': [3, 4]
}
the function will return a structure like the following::
[
[
{'a': 1, 'b': 3},
{'a': 1, 'b': 4}
],
[
{'a': 2, 'b': 3},
{'a': 2, 'b': 4}
]
]
where the first dimension represents a, and the second dimension
represents b. This nested-array structure can then be easily converted
to a numpy array via np.array().
Args:
current_query (dict): the query to apply to the structure.
param_space (dict): representation of the parameter space.
result_parsing_function (function): user-defined function to call
on results, typically used to parse data and outputting
metrics.
runs (int): the number of runs to query for each parameter
combination. |
def to_vobjects(self, filename, uids=None):
"""Return iCal objects and etags of all Remind entries in uids
filename -- the remind file
uids -- the UIDs of the Remind lines (all if None)
"""
self._update()
if not uids:
uids = self._reminders[filename]
items = []
for uid in uids:
cal = iCalendar()
self._gen_vevent(self._reminders[filename][uid], cal.add('vevent'))
etag = md5()
etag.update(self._reminders[filename][uid]['line'].encode("utf-8"))
items.append((uid, cal, '"%s"' % etag.hexdigest()))
return items | Return iCal objects and etags of all Remind entries in uids
filename -- the remind file
uids -- the UIDs of the Remind lines (all if None) |
def _dx(self):
"""Return integer width of this shape's path in local units."""
min_x = max_x = self._start_x
for drawing_operation in self:
if hasattr(drawing_operation, 'x'):
min_x = min(min_x, drawing_operation.x)
max_x = max(max_x, drawing_operation.x)
return max_x - min_x | Return integer width of this shape's path in local units. |
def search(self, query=None, args=None):
'''query a Singularity registry for a list of images.
If query is None, collections are listed.
EXAMPLE QUERIES:
'''
# You can optionally better parse the image uri (query), but not
# necessary
# names = parse_image_name(remove_uri(query))
if query is not None:
# Here you might do a function that is a general list
# Note that this means adding the function Client in __init__
return self._container_query(query)
# or default to listing (searching) all things.
return self._search_all() | query a Singularity registry for a list of images.
If query is None, collections are listed.
EXAMPLE QUERIES: |
def _try_passwordless_paramiko(server, keyfile):
"""Try passwordless login with paramiko."""
if paramiko is None:
msg = "Paramiko unavaliable, "
if sys.platform == 'win32':
msg += "Paramiko is required for ssh tunneled connections on Windows."
else:
msg += "use OpenSSH."
raise ImportError(msg)
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True)
except paramiko.AuthenticationException:
return False
else:
client.close()
return True | Try passwordless login with paramiko. |
def apply_transformation(self, structure, return_ranked_list=False):
"""
Apply the transformation.
Args:
structure: input structure
return_ranked_list (bool): Whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
the key "transformation" is reserved for the transformation that
was actually applied to the structure.
This transformation is parsed by the alchemy classes for generating
a more specific transformation history. Any other information will
be stored in the transformation_parameters dictionary in the
transmuted structure class.
"""
num_remove_dict = {}
total_combis = 0
for indices, frac in zip(self.indices, self.fractions):
num_to_remove = len(indices) * frac
if abs(num_to_remove - int(round(num_to_remove))) > 1e-3:
raise ValueError("Fraction to remove must be consistent with "
"integer amounts in structure.")
else:
num_to_remove = int(round(num_to_remove))
num_remove_dict[tuple(indices)] = num_to_remove
n = len(indices)
total_combis += int(round(math.factorial(n) /
math.factorial(num_to_remove) /
math.factorial(n - num_to_remove)))
self.logger.debug("Total combinations = {}".format(total_combis))
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
num_to_return = max(1, num_to_return)
self.logger.debug("Will return {} best structures."
.format(num_to_return))
if self.algo == PartialRemoveSitesTransformation.ALGO_FAST:
all_structures = self.fast_ordering(structure, num_remove_dict,
num_to_return)
elif self.algo == PartialRemoveSitesTransformation.ALGO_COMPLETE:
all_structures = self.complete_ordering(structure, num_remove_dict)
elif self.algo == PartialRemoveSitesTransformation.ALGO_BEST_FIRST:
all_structures = self.best_first_ordering(structure,
num_remove_dict)
elif self.algo == PartialRemoveSitesTransformation.ALGO_ENUMERATE:
all_structures = self.enumerate_ordering(structure)
else:
raise ValueError("Invalid algo.")
opt_s = all_structures[0]["structure"]
return opt_s if not return_ranked_list \
else all_structures[0:num_to_return] | Apply the transformation.
Args:
structure: input structure
return_ranked_list (bool): Whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
the key "transformation" is reserved for the transformation that
was actually applied to the structure.
This transformation is parsed by the alchemy classes for generating
a more specific transformation history. Any other information will
be stored in the transformation_parameters dictionary in the
transmuted structure class. |
def rm(self, typ, id):
"""
remove typ by id
"""
return self._load(self._request(typ, id=id, method='DELETE')) | remove typ by id |
def deploy(self, site=None):
"""
Writes entire crontab to the host.
"""
r = self.local_renderer
self.deploy_logrotate()
cron_crontabs = []
# if self.verbose:
# print('hostname: "%s"' % (hostname,), file=sys.stderr)
for _site, site_data in self.iter_sites(site=site):
r.env.cron_stdout_log = r.format(r.env.stdout_log_template)
r.env.cron_stderr_log = r.format(r.env.stderr_log_template)
r.sudo('touch {cron_stdout_log}')
r.sudo('touch {cron_stderr_log}')
r.sudo('sudo chown {user}:{user} {cron_stdout_log}')
r.sudo('sudo chown {user}:{user} {cron_stderr_log}')
if self.verbose:
print('site:', site, file=sys.stderr)
print('env.crontabs_selected:', self.env.crontabs_selected, file=sys.stderr)
for selected_crontab in self.env.crontabs_selected:
lines = self.env.crontabs_available.get(selected_crontab, [])
if self.verbose:
print('lines:', lines, file=sys.stderr)
for line in lines:
cron_crontabs.append(r.format(line))
if not cron_crontabs:
return
cron_crontabs = self.env.crontab_headers + cron_crontabs
cron_crontabs.append('\n')
r.env.crontabs_rendered = '\n'.join(cron_crontabs)
fn = self.write_to_file(content=r.env.crontabs_rendered)
print('fn:', fn)
r.env.put_remote_path = r.put(local_path=fn)
if isinstance(r.env.put_remote_path, (tuple, list)):
r.env.put_remote_path = r.env.put_remote_path[0]
r.sudo('crontab -u {cron_user} {put_remote_path}') | Writes entire crontab to the host. |
def evaluate(self, reference_scene_list, estimated_scene_list=None, estimated_scene_probabilities=None):
"""Evaluate file pair (reference and estimated)
Parameters
----------
reference_scene_list : list of dict or dcase_util.containers.MetaDataContainer
Reference scene list.
Default value None
estimated_scene_list : list of dict or dcase_util.containers.MetaDataContainer
Estimated scene list.
Default value None
estimated_scene_probabilities : dcase_util.containers.ProbabilityContainer
Estimated scene probabilities. Currently not used.
Default value None
Returns
-------
self
"""
if estimated_scene_list is None and estimated_scene_probabilities is None:
raise ValueError("Nothing to evaluate, give at least estimated_scene_list or estimated_scene_probabilities")
# Make sure reference_scene_list is dcase_util.containers.MetaDataContainer
if not isinstance(estimated_scene_list, dcase_util.containers.MetaDataContainer):
reference_scene_list = dcase_util.containers.MetaDataContainer(reference_scene_list)
# Make sure estimated_scene_list is dcase_util.containers.MetaDataContainer
if not isinstance(estimated_scene_list, dcase_util.containers.MetaDataContainer):
estimated_scene_list = dcase_util.containers.MetaDataContainer(estimated_scene_list)
# Make sure estimated_tag_probabilities is dcase_util.containers.ProbabilityContainer
if estimated_scene_probabilities is not None:
if not isinstance(estimated_scene_probabilities, dcase_util.containers.ProbabilityContainer):
estimated_scene_probabilities = dcase_util.containers.ProbabilityContainer(estimated_scene_probabilities)
# Translate "file" field to "filename"
for item in reference_scene_list:
if 'filename' not in item and 'file' in item:
item['filename'] = item['file']
for item in estimated_scene_list:
if 'filename' not in item and 'file' in item:
item['filename'] = item['file']
y_true = []
y_pred = []
for estimated_item in estimated_scene_list:
reference_item_matched = {}
for reference_item in reference_scene_list:
if estimated_item['filename'] == reference_item['filename']:
reference_item_matched = reference_item
break
if not reference_item_matched:
raise ValueError(
"Cannot find reference_item for estimated item [{item}]".format(item=estimated_item['file'])
)
y_true.append(reference_item_matched['scene_label'])
y_pred.append(estimated_item['scene_label'])
y_true = numpy.array(y_true)
y_pred = numpy.array(y_pred)
Ncorr_overall = 0
for scene_id, scene_label in enumerate(self.scene_label_list):
true_id = numpy.where(y_true == scene_label)[0]
pred_id = numpy.where(y_pred == scene_label)[0]
Ncorr = 0
for id in true_id:
if id in pred_id:
Ncorr += 1
Ncorr_overall += Ncorr
self.scene_wise[scene_label]['Ncorr'] += Ncorr
self.scene_wise[scene_label]['Nref'] += true_id.shape[0]
self.scene_wise[scene_label]['Nsys'] += pred_id.shape[0]
self.overall['Ncorr'] += Ncorr_overall
self.overall['Nref'] += y_true.shape[0]
self.overall['Nsys'] += y_pred.shape[0]
return self | Evaluate file pair (reference and estimated)
Parameters
----------
reference_scene_list : list of dict or dcase_util.containers.MetaDataContainer
Reference scene list.
Default value None
estimated_scene_list : list of dict or dcase_util.containers.MetaDataContainer
Estimated scene list.
Default value None
estimated_scene_probabilities : dcase_util.containers.ProbabilityContainer
Estimated scene probabilities. Currently not used.
Default value None
Returns
-------
self |
def resolve(self, current_file, rel_path):
"""Search the filesystem."""
p = path.join(path.dirname(current_file), rel_path)
if p not in self.file_dict:
raise RuntimeError('No such fake file: %r' % p)
return p, p | Search the filesystem. |
def get_datacenter(content, obj):
'''
Get the datacenter to whom an object belongs
'''
datacenters = content.rootFolder.childEntity
for d in datacenters:
dch = get_all(content, d, type(obj))
if dch is not None and obj in dch:
return d | Get the datacenter to whom an object belongs |
def _transform_stats(prof):
"""Processes collected stats for UI."""
records = []
for info, params in prof.stats.items():
filename, lineno, funcname = info
cum_calls, num_calls, time_per_call, cum_time, _ = params
if prof.total_tt == 0:
percentage = 0
else:
percentage = round(100 * (cum_time / prof.total_tt), 4)
cum_time = round(cum_time, 4)
func_name = '%s @ %s' % (funcname, filename)
color_hash = base_profiler.hash_name(func_name)
records.append(
(filename, lineno, funcname, cum_time, percentage, num_calls,
cum_calls, time_per_call, filename, color_hash))
return sorted(records, key=operator.itemgetter(4), reverse=True) | Processes collected stats for UI. |
def udp_send(udpsock, frame, address, addrlen):
"""
Send zframe to UDP socket, return -1 if sending failed due to
interface having disappeared (happens easily with WiFi)
*** This is for CZMQ internal use only and may change arbitrarily ***
"""
return lib.zsys_udp_send(udpsock, frame, address, addrlen) | Send zframe to UDP socket, return -1 if sending failed due to
interface having disappeared (happens easily with WiFi)
*** This is for CZMQ internal use only and may change arbitrarily *** |
def window_size(self, value):
"""Set private ``_window_size`` and reset ``_block_matcher``."""
if (value > 4 and
value < self.parameter_maxima["window_size"] and
value % 2):
self._window_size = value
else:
raise InvalidWindowSizeError("Window size must be an odd number "
"between 0 and {}.".format(
self.parameter_maxima["window_size"] + 1))
self._replace_bm() | Set private ``_window_size`` and reset ``_block_matcher``. |
def TAPQuery(query):
"""The __main__ part of the script"""
tapURL = "http://cadc-ccda.hia-iha.nrc-cnrc.gc.ca/tap/sync"
## Some default parameters for that TAP service queries.
tapParams={'REQUEST': 'doQuery',
'LANG': 'ADQL',
'FORMAT': 'votable',
'QUERY': query}
cnt=0
while True:
try:
print "running query"
r=urllib2.urlopen(tapURL,urllib.urlencode(tapParams))
return r
except urllib2.HTTPError, e:
cnt+=1
if e.code!=503:
sys.stderr.write("# TAP Query got Code: %s Attempt: %d (exiting)\n" % (str(e.code),cnt))
sys.exit(-1)
sys.stderr.write("# TAP Query got Code: %s Attempt: %d (sleeping for 10)\n" % (str(e.code),cnt))
time.sleep(10) | The __main__ part of the script |
def zoom_leftup(self, event=None):
"""leftup event handler for zoom mode in images"""
if self.zoom_ini is None:
return
ini_x, ini_y, ini_xd, ini_yd = self.zoom_ini
try:
dx = abs(ini_x - event.x)
dy = abs(ini_y - event.y)
except:
dx, dy = 0, 0
t0 = time.time()
self.rbbox = None
self.zoom_ini = None
if (dx > 3) and (dy > 3) and (t0-self.mouse_uptime)>0.1:
self.mouse_uptime = t0
zlims, tlims = {}, {}
ax = self.axes
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
zlims[ax] = [xmin, xmax, ymin, ymax]
if len(self.conf.zoom_lims) == 0:
self.conf.zoom_lims.append(zlims)
ax_inv = ax.transData.inverted
try:
x1, y1 = ax_inv().transform((event.x, event.y))
except:
x1, y1 = self.x_lastmove, self.y_lastmove
try:
x0, y0 = ax_inv().transform((ini_x, ini_y))
except:
x0, y0 = ini_xd, ini_yd
tlims[ax] = [int(round(min(x0, x1))), int(round(max(x0, x1))),
int(round(min(y0, y1))), int(round(max(y0, y1)))]
self.conf.zoom_lims.append(tlims)
# now apply limits:
self.set_viewlimits()
if callable(self.zoom_callback):
self.zoom_callback(wid=self.GetId(), limits=tlims[ax]) | leftup event handler for zoom mode in images |
def _string_to_byte_list(self, data):
"""
Creates a hex digest of the input string given to create the image,
if it's not already hexadecimal
Returns:
Length 16 list of rgb value range integers
(each representing a byte of the hex digest)
"""
bytes_length = 16
m = self.digest()
m.update(str.encode(data))
hex_digest = m.hexdigest()
return list(int(hex_digest[num * 2:num * 2 + 2], bytes_length)
for num in range(bytes_length)) | Creates a hex digest of the input string given to create the image,
if it's not already hexadecimal
Returns:
Length 16 list of rgb value range integers
(each representing a byte of the hex digest) |
def read(self,filename,datatype=None,slaext=False,**kwargs):
'''
reader method.
:parameter filename: name of the file to load.
:keyword datatype: choose between DT/NRT/PISTACH/CTOH or other formats to call the corresponding reader. If datatype is :
* DT or NRT or PISTACH : calls :func:`altimetry.data.alti_data.read_sla` or :func:`altimetry.data.alti_data.read_slaext`
* CTOH : calls :func:`altimetry.data.alti_data.read_CTOH`
* else : calls :func:`altimetry.data.alti_data.read_nc`, based on :class:`altimetry.tools.nctools.nc` object.
:keyword slaext: force using :func:`altimetry.data.alti_data.read_slaext`
.. note:: This method is call from :meth:`altimetry.data.hydro_data.__init__` and returns a data structure to be handled by :meth:`altimetry.data.hydro_data.update_dataset`
'''
fname,extension = os.path.splitext(filename)
if os.path.basename(filename).count('.') > os.path.basename(filename).count('_'): delim='.'
else : delim = '_'
#Get data type
if datatype is None :
if os.path.basename(filename).split(delim)[0] == 'ctoh' : datatype='CTOH'
if os.path.basename(filename).split(delim)[0] == 'PISTACH' : datatype='PISTACH'
if os.path.basename(filename).split(delim)[0] == 'nrt' : datatype='NRT'
if os.path.basename(filename).split(delim)[0] == 'dt' : datatype='DT'
# else :
# datatype='RAW' #Setup default as raw NetCDF file
self.datatype=datatype
if (datatype == 'DT') | (datatype == 'NRT') | (datatype == 'PISTACH') :
if slaext : outStr=self.read_slaext(filename,datatype=datatype,**kwargs)
else : outStr=self.read_sla(filename,datatype=datatype,**kwargs)
if outStr.has_key('_dimensions'): self.update_fid_list(os.path.basename(filename),outStr['_dimensions']['time'])
elif (datatype == 'CTOH') :
outStr=self.read_CTOH(filename,**kwargs)
if outStr.has_key('_dimensions'): self.update_fid_list(os.path.basename(filename),outStr['_dimensions']['time'])
else: #Setup default as raw NetCDF file
outStr=self.read_nc(filename,**kwargs)
if outStr.has_key('_dimensions'): self.update_fid_list(os.path.basename(filename),outStr['_dimensions'][outStr['_dimensions'].keys()[1]])
return outStr | reader method.
:parameter filename: name of the file to load.
:keyword datatype: choose between DT/NRT/PISTACH/CTOH or other formats to call the corresponding reader. If datatype is :
* DT or NRT or PISTACH : calls :func:`altimetry.data.alti_data.read_sla` or :func:`altimetry.data.alti_data.read_slaext`
* CTOH : calls :func:`altimetry.data.alti_data.read_CTOH`
* else : calls :func:`altimetry.data.alti_data.read_nc`, based on :class:`altimetry.tools.nctools.nc` object.
:keyword slaext: force using :func:`altimetry.data.alti_data.read_slaext`
.. note:: This method is call from :meth:`altimetry.data.hydro_data.__init__` and returns a data structure to be handled by :meth:`altimetry.data.hydro_data.update_dataset` |
def append(self, station):
""" Append station to database.
Returns the index of the appended station.
"""
rec = station._pack(self)
with self:
_libtcd.add_tide_record(rec, self._header)
return self._header.number_of_records - 1 | Append station to database.
Returns the index of the appended station. |
def _rewrite_and_copy(src_file, dst_file, project_name):
"""Replace vars and copy."""
# Create temp file
fh, abs_path = mkstemp()
with io.open(abs_path, 'w', encoding='utf-8') as new_file:
with io.open(src_file, 'r', encoding='utf-8') as old_file:
for line in old_file:
new_line = line.replace('#{project}', project_name). \
replace('#{project|title}', project_name.title())
new_file.write(new_line)
# Copy to new file
shutil.copy(abs_path, dst_file)
os.close(fh) | Replace vars and copy. |
def get_vaults_by_ids(self, *args, **kwargs):
"""Pass through to provider VaultLookupSession.get_vaults_by_ids"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_by_ids
catalogs = self._get_provider_session('vault_lookup_session').get_vaults_by_ids(*args, **kwargs)
cat_list = []
for cat in catalogs:
cat_list.append(Vault(self._provider_manager, cat, self._runtime, self._proxy))
return VaultList(cat_list) | Pass through to provider VaultLookupSession.get_vaults_by_ids |
def calculate(self, **state):
"""
Calculate dynamic viscosity at the specified temperature and
composition:
:param T: [K] temperature
:param x: [mole fraction] composition dictionary , e.g. \
{'SiO2': 0.25, 'CaO': 0.25, 'MgO': 0.25, 'FeO': 0.25}
:returns: [Pa.s] dynamic viscosity
The **state parameter contains the keyword argument(s) specified above\
that are used to describe the state of the material.
"""
T = state['T']
x = state['x']
# create the slag constituent categories
compounds_sio2 = ['SiO2', 'PO2.5', 'TiO2', 'ZrO2']
compounds_cao = ['CaO', 'MgO', 'FeO1.5', 'FeO', 'MnO', 'BO1.5']
compounds_al2o3 = ['Al2O3']
compounds_caf2 = ['CaF2']
compounds_na2o = ['Na2O', 'K2O']
compounds_all = (compounds_sio2 + compounds_cao + compounds_al2o3 +
compounds_caf2 + compounds_na2o)
# convert compounds with two cations to single cation equivalents
if 'P2O5' in x:
x['PO2.5'] = 2.0 * x['P2O5']
if 'Fe2O3' in x:
x['FeO1.5'] = 2.0 * x['Fe2O3']
if 'B2O3' in x:
x['BO1.5'] = 2.0 * x['B2O3']
# normalise mole fractions, use only compounds in compounds_all
x_total = sum([x.get(c, 0.0) for c in compounds_all])
x = {c: x.get(c, 0.0)/x_total for c in compounds_all}
# calculate the cateogry mole fractions
x1 = sum([x.get(c, 0.0) for c in compounds_sio2])
x2 = sum([x.get(c, 0.0) for c in compounds_cao])
x3 = sum([x.get(c, 0.0) for c in compounds_al2o3])
x4 = sum([x.get(c, 0.0) for c in compounds_caf2])
x5 = sum([x.get(c, 0.0) for c in compounds_na2o])
# TODO: Why is x1 not used? This looks suspicious.
A = exp(-17.51 + 1.73*x2 + 5.82*x4 + 7.02*x5 - 33.76*x3)
B = 31140.0 - 23896.0*x2 - 46356.0*x4 - 39159.0*x5 + 68833.0*x3
result = A*T*exp(B/T) # [P]
return result / 10.0 | Calculate dynamic viscosity at the specified temperature and
composition:
:param T: [K] temperature
:param x: [mole fraction] composition dictionary , e.g. \
{'SiO2': 0.25, 'CaO': 0.25, 'MgO': 0.25, 'FeO': 0.25}
:returns: [Pa.s] dynamic viscosity
The **state parameter contains the keyword argument(s) specified above\
that are used to describe the state of the material. |
def _add_record(self, record_set_class, name, values, ttl=60, weight=None,
region=None,set_identifier=None, alias_hosted_zone_id=None,
alias_dns_name=None):
"""
Convenience method for creating ResourceRecordSets. Most of the calls
are basically the same, this saves on repetition.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created ResourceRecordSet sub-class
instance.
"""
self._halt_if_already_deleted()
rrset_kwargs = dict(
connection=self.connection,
zone_id=self.id,
name=name,
ttl=ttl,
records=values,
weight=weight,
region=region,
set_identifier=set_identifier,
)
if alias_hosted_zone_id or alias_dns_name:
rrset_kwargs.update(dict(
alias_hosted_zone_id=alias_hosted_zone_id,
alias_dns_name=alias_dns_name
))
rrset = record_set_class(**rrset_kwargs)
cset = ChangeSet(connection=self.connection, hosted_zone_id=self.id)
cset.add_change('CREATE', rrset)
change_info = self.connection._change_resource_record_sets(cset)
return rrset, change_info | Convenience method for creating ResourceRecordSets. Most of the calls
are basically the same, this saves on repetition.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created ResourceRecordSet sub-class
instance. |
def get_data(self):
"""
Fetch the data field if it does not exist.
"""
try:
return DocumentDataDict(self.__dict__['data'])
except KeyError:
self._lazy_load()
return DocumentDataDict(self.__dict__['data']) | Fetch the data field if it does not exist. |
def hello(environ, start_response):
'''The WSGI_ application handler which returns an iterable
over the "Hello World!" message.'''
if environ['REQUEST_METHOD'] == 'GET':
data = b'Hello World!\n'
status = '200 OK'
response_headers = [
('Content-type', 'text/plain'),
('Content-Length', str(len(data)))
]
start_response(status, response_headers)
return iter([data])
else:
raise MethodNotAllowed | The WSGI_ application handler which returns an iterable
over the "Hello World!" message. |
def ystep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`.
"""
self.Y = sp.prox_l1l2(self.AX + self.U, (self.lmbda/self.rho)*self.wl1,
(self.mu/self.rho), axis=self.cri.axisC)
cbpdn.GenericConvBPDN.ystep(self) | r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`. |
def verify(self):
"""
:desc: Verifies the integrity of the database, specifically checks
the values for unusedIDs and currentMax
:returns: A boolean indicating whether the database is valid or
not
:rval: bool
"""
collections = self.get_data_collections()
allIDs = []
for coll in collections:
IDs = self.noteDB[coll].find({"ID": {"$exists": True}},
{"ID": 1, "_id": 0})
for ID in IDs:
allIDs.append(int(ID["ID"]))
query = {"currentMax": {"$exists": True}}
maxID = int(self.noteDB['IDs'].find_one(query)["currentMax"])
query = {"unusedIDs": {"$exists": True}}
unusedIDs = self.noteDB['IDs'].find_one(query)["unusedIDs"]
unusedIDs = [int(ii) for ii in unusedIDs]
unusedIDsMatch = True
for ID in allIDs:
if ID in unusedIDs:
unusedIDsMatch = False
maxIDMatch = True
if maxID is not max(allIDs):
maxIDMatch = False
if maxIDMatch and unusedIDsMatch:
print "Database is valid"
elif not maxIDMatch and not unusedIDsMatch:
print "Database not valid, max ID and unused IDs are incorrent"
elif not maxIDMatch:
print "Database not valid, max ID is incorrent"
elif not unusedIDsMatch:
print "Database not valid, unusedIDs is incorrent" | :desc: Verifies the integrity of the database, specifically checks
the values for unusedIDs and currentMax
:returns: A boolean indicating whether the database is valid or
not
:rval: bool |
def install(environment, opts):
"""Install or reinstall Python packages within this environment
Usage:
datacats install [-q] [--address=IP] [ENVIRONMENT [PACKAGE ...]]
datacats install -c [q] [--address=IP] [ENVIRONMENT]
Options:
--address=IP The address to bind to when reloading after install
-c --clean Reinstall packages into a clean virtualenv
-q --quiet Do not show output from installing packages and requirements.
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
"""
environment.require_data()
install_all(environment, opts['--clean'], verbose=not opts['--quiet'],
packages=opts['PACKAGE'])
for site in environment.sites:
environment = Environment.load(environment.name, site)
if 'web' in environment.containers_running():
# FIXME: reload without changing debug setting?
manage.reload_(environment, {
'--address': opts['--address'],
'--background': False,
'--no-watch': False,
'--production': False,
'PORT': None,
'--syslog': False,
'--site-url': None,
'--interactive': False
}) | Install or reinstall Python packages within this environment
Usage:
datacats install [-q] [--address=IP] [ENVIRONMENT [PACKAGE ...]]
datacats install -c [q] [--address=IP] [ENVIRONMENT]
Options:
--address=IP The address to bind to when reloading after install
-c --clean Reinstall packages into a clean virtualenv
-q --quiet Do not show output from installing packages and requirements.
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.' |
def column_types(self):
"""
Returns the column types.
Returns
-------
out : list[type]
Column types of the SFrame.
"""
if self.__type__ == VERTEX_GFRAME:
return self.__graph__.__proxy__.get_vertex_field_types()
elif self.__type__ == EDGE_GFRAME:
return self.__graph__.__proxy__.get_edge_field_types() | Returns the column types.
Returns
-------
out : list[type]
Column types of the SFrame. |
def siblingsId(self):
""" Shortcut for getting the previous and next passage identifier
:rtype: CtsReference
:returns: Following passage reference
"""
if self._next_id is False or self._prev_id is False:
self._prev_id, self._next_id = self.getPrevNextUrn(reference=self.urn.reference)
return self._prev_id, self._next_id | Shortcut for getting the previous and next passage identifier
:rtype: CtsReference
:returns: Following passage reference |
def do_POST(self, ):
"""Handle POST requests
When the user is redirected, this handler will respond with a website
which will send a post request with the url fragment as parameters.
This will get the parameters and store the original redirection
url and fragments in :data:`LoginServer.tokenurl`.
:returns: None
:rtype: None
:raises: None
"""
log.debug('POST')
self._set_headers()
# convert the parameters back to the original fragment
# because we need to send the original uri to set_token
# url fragments will not show up in self.path though.
# thats why we make the hassle to send it as a post request.
# Note: oauth does not allow for http connections
# but twitch does, so we fake it
ruri = constants.REDIRECT_URI.replace('http://', 'https://')
self.server.set_token(ruri + self.path.replace('?', '#')) | Handle POST requests
When the user is redirected, this handler will respond with a website
which will send a post request with the url fragment as parameters.
This will get the parameters and store the original redirection
url and fragments in :data:`LoginServer.tokenurl`.
:returns: None
:rtype: None
:raises: None |
def request_upload_token(self, file):
"""
Request an upload token.
:param file: A file handler pointing to the file to upload.
:returns: True if the file uploaded successfully, False otherwise, \
and the JSON response from the API.
:rtype: tuple
"""
self.kwargs['name'] = os.path.basename(file.name)
self.kwargs['size'] = os.fstat(file.fileno()).st_size
response = self._requester.request(
'POST',
self.url,
_kwargs=combine_kwargs(**self.kwargs)
)
return self.upload(response, file) | Request an upload token.
:param file: A file handler pointing to the file to upload.
:returns: True if the file uploaded successfully, False otherwise, \
and the JSON response from the API.
:rtype: tuple |
def getParentTiles(self, zoom, col, row, zoomParent):
"""
Return the parent tile(s) for an irregular (not following quadindex)
and regular tiling scheme
Parameters:
zoom -- the zoom level a the child tile
row -- the row of the child tile
col -- the col of the child tile
zoomParent -- the target zoom of the parent tile
"""
assert zoomParent <= zoom
if zoomParent == zoom:
return [[zoom, col, row]]
extent = self.tileBounds(zoom, col, row)
minRow, minCol, maxRow, maxCol = self.getExtentAddress(
zoomParent, extent=extent, contained=True)
addresses = []
for c in range(minCol, maxCol + 1):
for r in range(minRow, maxRow + 1):
addresses.append([zoomParent, c, r])
return addresses | Return the parent tile(s) for an irregular (not following quadindex)
and regular tiling scheme
Parameters:
zoom -- the zoom level a the child tile
row -- the row of the child tile
col -- the col of the child tile
zoomParent -- the target zoom of the parent tile |
def process_auth(self):
""" Reads and processes SSPI stream.
Stream info: http://msdn.microsoft.com/en-us/library/dd302844.aspx
"""
r = self._reader
w = self._writer
pdu_size = r.get_smallint()
if not self.authentication:
raise tds_base.Error('Got unexpected token')
packet = self.authentication.handle_next(readall(r, pdu_size))
if packet:
w.write(packet)
w.flush() | Reads and processes SSPI stream.
Stream info: http://msdn.microsoft.com/en-us/library/dd302844.aspx |
def randstring(l):
"""
Returns a random string of length l (l >= 0)
"""
return b"".join(struct.pack('B', random.randint(0, 255)) for _ in range(l)) | Returns a random string of length l (l >= 0) |
def caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name='Graph',
conversion_out_dir_path=None, use_padding_same=False):
"""Create a TensorFlow Session from a Caffe model."""
try:
# noinspection PyUnresolvedReferences
from caffeflow import convert
except ImportError:
raise Exception("caffeflow package needs to be installed to freeze Caffe models. Check out the README file.")
with (dummy_context_mgr(conversion_out_dir_path) or util.TemporaryDirectory()) as dir_path:
params_values_output_path = os.path.join(dir_path, 'params_values.npy')
network_output_path = os.path.join(dir_path, 'network.py')
convert.convert(caffe_def_path, caffemodel_path, params_values_output_path, network_output_path, False,
use_padding_same=use_padding_same)
network_module = imp.load_source('module.name', network_output_path)
network_class = getattr(network_module, graph_name)
network = network_class(inputs)
sess = tf.Session()
network.load(params_values_output_path, sess)
return sess | Create a TensorFlow Session from a Caffe model. |
def absl_to_standard(level):
"""Converts an integer level from the absl value to the standard value.
Args:
level: int, an absl.logging level.
Raises:
TypeError: Raised when level is not an integer.
Returns:
The corresponding integer level for use in standard logging.
"""
if not isinstance(level, int):
raise TypeError('Expect an int level, found {}'.format(type(level)))
if level < ABSL_FATAL:
level = ABSL_FATAL
if level <= ABSL_DEBUG:
return ABSL_TO_STANDARD[level]
# Maps to vlog levels.
return STANDARD_DEBUG - level + 1 | Converts an integer level from the absl value to the standard value.
Args:
level: int, an absl.logging level.
Raises:
TypeError: Raised when level is not an integer.
Returns:
The corresponding integer level for use in standard logging. |
def notify(title, message, retcode=None):
"""
adapted from https://gist.github.com/baliw/4020619
"""
try:
import Foundation
import objc
except ImportError:
import sys
import logging
logger = logging.getLogger(__name__)
if sys.platform.startswith('darwin') and hasattr(sys, 'real_prefix'):
logger.error(
"Using ntfy with the MacOS Notification Center doesn't "
"work within a virtualenv")
sys.exit(1)
else:
raise
NSUserNotification = objc.lookUpClass('NSUserNotification')
NSUserNotificationCenter = objc.lookUpClass('NSUserNotificationCenter')
notification = NSUserNotification.alloc().init()
notification.setTitle_(title)
if message is not None:
notification.setInformativeText_(message)
notification.setDeliveryDate_(Foundation.NSDate.date())
NSUserNotificationCenter.defaultUserNotificationCenter()\
.scheduleNotification_(notification) | adapted from https://gist.github.com/baliw/4020619 |
def addLadder(settings):
"""define a new Ladder setting and save to disk file"""
ladder = Ladder(settings)
ladder.save()
getKnownLadders()[ladder.name] = ladder
return ladder | define a new Ladder setting and save to disk file |
def serialize(self, serializable: Optional[Union[SerializableType, List[SerializableType]]]) \
-> PrimitiveJsonType:
"""
Serializes the given serializable object or collection of serializable objects.
:param serializable: the object or objects to serialize
:return: a serialization of the given object
"""
if serializable is None:
# Implements #17
return None
elif isinstance(serializable, List):
return [self.serialize(item) for item in serializable]
else:
serialized = self._create_serialized_container()
for mapping in self._property_mappings:
if mapping.object_property_getter is not None and mapping.serialized_property_setter is not None:
value = mapping.object_property_getter(serializable)
if not (mapping.optional and value is None):
if isinstance(value, type(mapping.collection_factory([]))):
value = list(mapping.collection_iter(value))
encoded_value = self._serialize_property_value(value, mapping.serializer_cls)
mapping.serialized_property_setter(serialized, encoded_value)
return serialized | Serializes the given serializable object or collection of serializable objects.
:param serializable: the object or objects to serialize
:return: a serialization of the given object |
def get_var_type(col):
"""
Return var_type (for KDEMultivariate) of the column
Parameters
----------
col : pandas.Series
A dataframe column.
Returns
-------
out : str
One of ['c', 'o', 'u'].
See Also
--------
The origin of the character codes is
:class:`statsmodels.nonparametric.kernel_density.KDEMultivariate`.
"""
if pdtypes.is_numeric_dtype(col):
# continuous
return 'c'
elif pdtypes.is_categorical_dtype(col):
# ordered or unordered
return 'o' if col.cat.ordered else 'u'
else:
# unordered if unsure, e.g string columns that
# are not categorical
return 'u' | Return var_type (for KDEMultivariate) of the column
Parameters
----------
col : pandas.Series
A dataframe column.
Returns
-------
out : str
One of ['c', 'o', 'u'].
See Also
--------
The origin of the character codes is
:class:`statsmodels.nonparametric.kernel_density.KDEMultivariate`. |
def join(strings: Optional[Sequence[str]], separator: str = "") -> str:
"""Join strings in a given sequence.
Return an empty string if it is None or empty, otherwise join all items together
separated by separator if provided.
"""
return separator.join(s for s in strings if s) if strings else "" | Join strings in a given sequence.
Return an empty string if it is None or empty, otherwise join all items together
separated by separator if provided. |
def create_repo_from_pip_url(pip_url, **kwargs):
r"""Return a object representation of a VCS repository via pip-style url.
:returns: instance of a repository object
:rtype: :class:`libvcs.svn.SubversionRepo`, :class:`libvcs.git.GitRepo` or
:class:`libvcs.hg.MercurialRepo`.
Usage Example::
>>> from libvcs.shortcuts import create_repo_from_pip_url
>>> r = create_repo_from_pip_url(
... pip_url='git+https://www.github.com/you/myrepo',
... repo_dir='/tmp/myrepo')
>>> r.update_repo()
|myrepo| (git) Repo directory for myrepo (git) does not exist @ \
/tmp/myrepo
|myrepo| (git) Cloning.
|myrepo| (git) git clone https://www.github.com/tony/myrepo \
/tmp/myrepo
Cloning into '/tmp/myrepo'...
Checking connectivity... done.
|myrepo| (git) git fetch
|myrepo| (git) git pull
Already up-to-date.
"""
if pip_url.startswith('git+'):
return GitRepo.from_pip_url(pip_url, **kwargs)
elif pip_url.startswith('hg+'):
return MercurialRepo.from_pip_url(pip_url, **kwargs)
elif pip_url.startswith('svn+'):
return SubversionRepo.from_pip_url(pip_url, **kwargs)
else:
raise InvalidPipURL(pip_url) | r"""Return a object representation of a VCS repository via pip-style url.
:returns: instance of a repository object
:rtype: :class:`libvcs.svn.SubversionRepo`, :class:`libvcs.git.GitRepo` or
:class:`libvcs.hg.MercurialRepo`.
Usage Example::
>>> from libvcs.shortcuts import create_repo_from_pip_url
>>> r = create_repo_from_pip_url(
... pip_url='git+https://www.github.com/you/myrepo',
... repo_dir='/tmp/myrepo')
>>> r.update_repo()
|myrepo| (git) Repo directory for myrepo (git) does not exist @ \
/tmp/myrepo
|myrepo| (git) Cloning.
|myrepo| (git) git clone https://www.github.com/tony/myrepo \
/tmp/myrepo
Cloning into '/tmp/myrepo'...
Checking connectivity... done.
|myrepo| (git) git fetch
|myrepo| (git) git pull
Already up-to-date. |
def where_entry_last(query, ref):
""" Generate a where clause where this is the last entry
ref -- the entry of reference
"""
return orm.select(
e for e in query
if e.local_date < ref.local_date or
(e.local_date == ref.local_date and
e.id <= ref.id
)
) | Generate a where clause where this is the last entry
ref -- the entry of reference |
def load_image(self, data, quiet=None):
"""
Load an image that was previously saved using
:py:meth:`~docker.api.image.ImageApiMixin.get_image` (or ``docker
save``). Similar to ``docker load``.
Args:
data (binary): Image data to be loaded.
quiet (boolean): Suppress progress details in response.
Returns:
(generator): Progress output as JSON objects. Only available for
API version >= 1.23
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {}
if quiet is not None:
if utils.version_lt(self._version, '1.23'):
raise errors.InvalidVersion(
'quiet is not supported in API version < 1.23'
)
params['quiet'] = quiet
res = self._post(
self._url("/images/load"), data=data, params=params, stream=True
)
if utils.version_gte(self._version, '1.23'):
return self._stream_helper(res, decode=True)
self._raise_for_status(res) | Load an image that was previously saved using
:py:meth:`~docker.api.image.ImageApiMixin.get_image` (or ``docker
save``). Similar to ``docker load``.
Args:
data (binary): Image data to be loaded.
quiet (boolean): Suppress progress details in response.
Returns:
(generator): Progress output as JSON objects. Only available for
API version >= 1.23
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. |
def findAddressCandidates(self,
addressDict=None,
singleLine=None,
maxLocations=10,
outFields="*",
outSR=4326,
searchExtent=None,
location=None,
distance=2000,
magicKey=None,
category=None):
"""
The findAddressCandidates operation is performed on a geocode
service resource. The result of this operation is a resource
representing the list of address candidates. This resource provides
information about candidates, including the address, location, and
match score. Locators published using ArcGIS Server 10 or later
support the single line address field for the findAddressCandidates
operation.
You can provide arguments to the findAddressCandidates operation as
query parameters defined in the following parameters table:
Inputs:
addressDict - The various address fields accepted by the
corresponding geocode service. These fields are listed in the
addressFields property of the JSON representation associated
geocode service resource.
Example: Suppose that addressFields of a geocode service
resource includes fields with the following names: Street,
City, State, and Zone. If you want to perform the
findAddressCandidates operation by providing values for the
Street and Zone fields, you'd set the query parameters as
Street=380+New+York+St&Zone=92373
singleLine - Specifies the location to be geocoded. The input
address components are formatted as a single string. The
singleLine parameter and <addressField> parameters should not
be passed in the same request.
maxLocations - The maximum number of locations to be returned
by a search, up to the maximum number allowed by the geocode
service. If not specified, the maximum number of candidates
for which the service is configured will be returned.
outFields - The list of fields to be included in the returned
result set. This list is a comma-delimited list of field
names. If you specify the shape field in the list of return
fields, it is ignored. For non-intersection addresses, you can
specify the candidate fields from the geocode service
resource. For intersection addresses, you can specify the
intersection candidate fields from the geocode service
resource.
outSR - The well-known ID (WKID) of the spatial reference or a
spatial reference JSON object for the returned address
candidates. For a list of valid WKID values, see Projected
coordinate systems and Geographic coordinate systems.
searchExtent - The spatial extent (bounding box) to be used in
geocoding. The response will return only the candidates that
are within this spatial extent. Unless the spatialReference is
included in the searchExtent, the coordinates are assumed to
be in the spatial reference of the locator.
Simple syntax: <xmin>, <ymin>, <xmax>, <ymax>
location - Defines an origin point location that is used with
the distance parameter to sort geocoding candidates based on
their proximity to the location. The distance parameter
specifies the radial distance from the location in meters. The
priority of candidates within this radius is boosted relative
to those outside the radius. This is useful in mobile
applications where a user searches for places in the vicinity
of their current GPS location; the location and distance
parameters can be used in this scenario. The location
parameter can be specified without specifying a distance. If
distance is not specified, it defaults to 2000 meters. The
location can be represented with a simple comma-separated
syntax (x,y), or as a JSON point object. If the spatial
reference of the location coordinates is different than that
of the geocode service, then it must be defined in the JSON
object. If the comma-separated syntax is used, or if the
spatial reference is not included in the JSON object, then the
spatial reference of the location is assumed to be the same as
that of the geocode service. This parameter was added at 10.3
and is only supported by geocode services published with
ArcGIS 10.3 for Server and later versions.
distance - Specifies the radius of an area around a point
location that is used to boost the rank of geocoding
candidates so that candidates closest to the location are
returned first. The distance value is in meters. If the
distance parameter is specified, the location parameter must
be specified as well. Unlike the searchExtent parameter, the
location and distance parameters allow searches to extend
beyond the specified search radius. They are not used to
filter results, but rather to rank resulting candidates based
on their distance from a location. You must pass a
searchExtent value in addition to location and distance if you
want to confine the search results to a specific area.
magicKey - The findAddressCandidates operation retrieves
results more quickly when you pass in valid singleLine and
magicKey values than when you don't pass in magicKey. However,
to get this advantage, you need to make a prior request to the
suggest operation, which provides a magicKey. This may or may
not be relevant to your workflow.
The suggest operation is often called on to improve the user
experience of search boxes by analyzing partial text and
providing complete names of places, addresses, points of
interest, and so on. For instance, typing Mbu into a search
box offers Mbuji-Mayi, Democratic Republic of the Congo as a
suggestion, so the user doesn't need to type the complete
name.
Looking at the suggestion process from another perspective, as
the user types, the suggest operation performs a text search,
which is a redundant part of the overall search that the
findAddressCandidates operation can also perform. The user
chooses a place name or type-narrowing the results to a
specific record. The results from suggest include text and
magicKey values that contain the information the user chose;
passing these values from suggest into findAddressCandidates
results in faster and more accurate find operations.
In summary, using the magicKey parameter in
findAddressCandidates is a two-step process:
1. Make a request to suggest. The response includes text and
magicKey properties.
2. Make a request to findAddressCandidates and pass in the
text and magicKey values returned from suggest as the
singleLine and magicKey input parameters respectively.
category - The category parameter is only supported by geocode
services published using StreetMap Premium locators.
"""
url = self._url + "/findAddressCandidates"
params = {
"f" : "json",
"distance" : distance
}
if addressDict is None and \
singleLine is None:
raise Exception("A singleline address or an address dictionary must be passed into this function")
if not magicKey is None:
params['magicKey'] = magicKey
if not category is None:
params['category'] = category
if not addressDict is None:
params = params.update(addressDict)
if not singleLine is None:
params['SingleLine'] = singleLine
if not maxLocations is None:
params['maxLocations'] = maxLocations
if not outFields is None:
params['outFields'] = outFields
if not outSR is None:
params['outSR'] = {"wkid": outSR}
if not searchExtent is None:
params['searchExtent'] = searchExtent
if isinstance(location, Point):
params['location'] = location.asDictionary
elif isinstance(location, list):
params['location'] = "%s,%s" % (location[0], location[1])
return self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | The findAddressCandidates operation is performed on a geocode
service resource. The result of this operation is a resource
representing the list of address candidates. This resource provides
information about candidates, including the address, location, and
match score. Locators published using ArcGIS Server 10 or later
support the single line address field for the findAddressCandidates
operation.
You can provide arguments to the findAddressCandidates operation as
query parameters defined in the following parameters table:
Inputs:
addressDict - The various address fields accepted by the
corresponding geocode service. These fields are listed in the
addressFields property of the JSON representation associated
geocode service resource.
Example: Suppose that addressFields of a geocode service
resource includes fields with the following names: Street,
City, State, and Zone. If you want to perform the
findAddressCandidates operation by providing values for the
Street and Zone fields, you'd set the query parameters as
Street=380+New+York+St&Zone=92373
singleLine - Specifies the location to be geocoded. The input
address components are formatted as a single string. The
singleLine parameter and <addressField> parameters should not
be passed in the same request.
maxLocations - The maximum number of locations to be returned
by a search, up to the maximum number allowed by the geocode
service. If not specified, the maximum number of candidates
for which the service is configured will be returned.
outFields - The list of fields to be included in the returned
result set. This list is a comma-delimited list of field
names. If you specify the shape field in the list of return
fields, it is ignored. For non-intersection addresses, you can
specify the candidate fields from the geocode service
resource. For intersection addresses, you can specify the
intersection candidate fields from the geocode service
resource.
outSR - The well-known ID (WKID) of the spatial reference or a
spatial reference JSON object for the returned address
candidates. For a list of valid WKID values, see Projected
coordinate systems and Geographic coordinate systems.
searchExtent - The spatial extent (bounding box) to be used in
geocoding. The response will return only the candidates that
are within this spatial extent. Unless the spatialReference is
included in the searchExtent, the coordinates are assumed to
be in the spatial reference of the locator.
Simple syntax: <xmin>, <ymin>, <xmax>, <ymax>
location - Defines an origin point location that is used with
the distance parameter to sort geocoding candidates based on
their proximity to the location. The distance parameter
specifies the radial distance from the location in meters. The
priority of candidates within this radius is boosted relative
to those outside the radius. This is useful in mobile
applications where a user searches for places in the vicinity
of their current GPS location; the location and distance
parameters can be used in this scenario. The location
parameter can be specified without specifying a distance. If
distance is not specified, it defaults to 2000 meters. The
location can be represented with a simple comma-separated
syntax (x,y), or as a JSON point object. If the spatial
reference of the location coordinates is different than that
of the geocode service, then it must be defined in the JSON
object. If the comma-separated syntax is used, or if the
spatial reference is not included in the JSON object, then the
spatial reference of the location is assumed to be the same as
that of the geocode service. This parameter was added at 10.3
and is only supported by geocode services published with
ArcGIS 10.3 for Server and later versions.
distance - Specifies the radius of an area around a point
location that is used to boost the rank of geocoding
candidates so that candidates closest to the location are
returned first. The distance value is in meters. If the
distance parameter is specified, the location parameter must
be specified as well. Unlike the searchExtent parameter, the
location and distance parameters allow searches to extend
beyond the specified search radius. They are not used to
filter results, but rather to rank resulting candidates based
on their distance from a location. You must pass a
searchExtent value in addition to location and distance if you
want to confine the search results to a specific area.
magicKey - The findAddressCandidates operation retrieves
results more quickly when you pass in valid singleLine and
magicKey values than when you don't pass in magicKey. However,
to get this advantage, you need to make a prior request to the
suggest operation, which provides a magicKey. This may or may
not be relevant to your workflow.
The suggest operation is often called on to improve the user
experience of search boxes by analyzing partial text and
providing complete names of places, addresses, points of
interest, and so on. For instance, typing Mbu into a search
box offers Mbuji-Mayi, Democratic Republic of the Congo as a
suggestion, so the user doesn't need to type the complete
name.
Looking at the suggestion process from another perspective, as
the user types, the suggest operation performs a text search,
which is a redundant part of the overall search that the
findAddressCandidates operation can also perform. The user
chooses a place name or type-narrowing the results to a
specific record. The results from suggest include text and
magicKey values that contain the information the user chose;
passing these values from suggest into findAddressCandidates
results in faster and more accurate find operations.
In summary, using the magicKey parameter in
findAddressCandidates is a two-step process:
1. Make a request to suggest. The response includes text and
magicKey properties.
2. Make a request to findAddressCandidates and pass in the
text and magicKey values returned from suggest as the
singleLine and magicKey input parameters respectively.
category - The category parameter is only supported by geocode
services published using StreetMap Premium locators. |
def _find_usage_api_keys(self):
"""
Find usage on API Keys.
Update `self.limits`.
"""
logger.debug('Finding usage for API Keys')
key_count = 0
paginator = self.conn.get_paginator('get_api_keys')
for resp in paginator.paginate():
key_count += len(resp['items'])
self.limits['API keys per account']._add_current_usage(
key_count, aws_type='AWS::ApiGateway::ApiKey'
) | Find usage on API Keys.
Update `self.limits`. |
def load_entities():
"""Load entities from JSON file."""
path = os.path.join(TOPDIR, 'entities.json')
entities = json.load(open(path))
names = [i['name'] for i in entities]
try:
assert len(set(names)) == len(entities)
except AssertionError:
raise Exception('Entities with same name: %s' % [i for i in names if
names.count(i) > 1])
entities = dict((k['name'], c.Entity(name=k['name'],
dimensions=k['dimensions'],
uri=k['URI'])) for k in entities)
dimensions_ent = defaultdict(list)
for ent in entities:
if not entities[ent].dimensions:
continue
perms = get_dimension_permutations(entities, entities[ent].dimensions)
for perm in perms:
key = get_key_from_dimensions(perm)
dimensions_ent[key].append(entities[ent])
return entities, dimensions_ent | Load entities from JSON file. |
def extract_slitlet2d(self, image_2k2k):
"""Extract slitlet 2d image from image with original EMIR dimensions.
Parameters
----------
image_2k2k : numpy array
Original image (dimensions EMIR_NAXIS1 * EMIR_NAXIS2)
Returns
-------
slitlet2d : numpy array
Image corresponding to the slitlet region defined by its
bounding box.
"""
# protections
naxis2, naxis1 = image_2k2k.shape
if naxis1 != EMIR_NAXIS1:
raise ValueError('Unexpected naxis1')
if naxis2 != EMIR_NAXIS2:
raise ValueError('Unexpected naxis2')
# extract slitlet region
slitlet2d = image_2k2k[(self.bb_ns1_orig - 1):self.bb_ns2_orig,
(self.bb_nc1_orig - 1):self.bb_nc2_orig]
# transform to float
slitlet2d = slitlet2d.astype(np.float)
# display slitlet2d with boundaries and middle spectrum trail
if abs(self.debugplot) in [21, 22]:
self.ximshow_unrectified(slitlet2d)
# return slitlet image
return slitlet2d | Extract slitlet 2d image from image with original EMIR dimensions.
Parameters
----------
image_2k2k : numpy array
Original image (dimensions EMIR_NAXIS1 * EMIR_NAXIS2)
Returns
-------
slitlet2d : numpy array
Image corresponding to the slitlet region defined by its
bounding box. |
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "https://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout) | Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string. |
def p_sizeof(p):
""" bexpr : SIZEOF LP type RP
| SIZEOF LP ID RP
| SIZEOF LP ARRAY_ID RP
"""
if TYPE.to_type(p[3].lower()) is not None:
p[0] = make_number(TYPE.size(TYPE.to_type(p[3].lower())),
lineno=p.lineno(3))
else:
entry = SYMBOL_TABLE.get_id_or_make_var(p[3], p.lineno(1))
p[0] = make_number(TYPE.size(entry.type_), lineno=p.lineno(3)) | bexpr : SIZEOF LP type RP
| SIZEOF LP ID RP
| SIZEOF LP ARRAY_ID RP |
def parsed_function_to_ast(parsed: Parsed, parsed_key):
"""Create AST for top-level functions"""
sub = parsed[parsed_key]
subtree = {
"type": "Function",
"span": sub["span"],
"function": {
"name": sub["name"],
"name_span": sub["name_span"],
"parens_span": sub.get("parens_span", []),
},
}
args = []
for arg in parsed[parsed_key].get("args", []):
# pdb.set_trace()
if arg["type"] == "Function":
args.append(parsed_function_to_ast(parsed, arg["span"]))
elif arg["type"] == "NSArg":
args.append(
{
"arg": arg["arg"],
"type": arg["type"],
"span": arg["span"],
"nsarg": {
"ns": arg["ns"],
"ns_val": arg["ns_val"],
"ns_span": arg["ns_span"],
"ns_val_span": arg["ns_val_span"],
},
}
)
elif arg["type"] == "StrArg":
args.append({"arg": arg["arg"], "type": arg["type"], "span": arg["span"]})
subtree["args"] = copy.deepcopy(args)
return subtree | Create AST for top-level functions |
def clear(self):
"""
Clears the text for this edit and resizes the toolbar information.
"""
super(XTextEdit, self).clear()
self.textEntered.emit('')
self.htmlEntered.emit('')
if self.autoResizeToContents():
self.resizeToContents() | Clears the text for this edit and resizes the toolbar information. |
def plan(self, sql, timeout=10):
"""
:param sql: string
:param timeout: int
:return: pydrill.client.ResultQuery
"""
sql = 'explain plan for ' + sql
return self.query(sql, timeout) | :param sql: string
:param timeout: int
:return: pydrill.client.ResultQuery |
def query(self, model_or_index, key, filter=None, projection="all", consistent=False, forward=True):
"""Create a reusable :class:`~bloop.search.QueryIterator`.
:param model_or_index: A model or index to query. For example, ``User`` or ``User.by_email``.
:param key:
Key condition. This must include an equality against the hash key, and optionally one
of a restricted set of conditions on the range key.
:param filter: Filter condition. Only matching objects will be included in the results.
:param projection:
"all", "count", a list of column names, or a list of :class:`~bloop.models.Column`. When projection is
"count", you must advance the iterator to retrieve the count.
:param bool consistent: Use `strongly consistent reads`__ if True. Default is False.
:param bool forward: Query in ascending or descending order. Default is True (ascending).
:return: A reusable query iterator with helper methods.
:rtype: :class:`~bloop.search.QueryIterator`
__ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html
"""
if isinstance(model_or_index, Index):
model, index = model_or_index.model, model_or_index
else:
model, index = model_or_index, None
validate_not_abstract(model)
q = Search(
mode="query", engine=self, model=model, index=index, key=key, filter=filter,
projection=projection, consistent=consistent, forward=forward)
return iter(q.prepare()) | Create a reusable :class:`~bloop.search.QueryIterator`.
:param model_or_index: A model or index to query. For example, ``User`` or ``User.by_email``.
:param key:
Key condition. This must include an equality against the hash key, and optionally one
of a restricted set of conditions on the range key.
:param filter: Filter condition. Only matching objects will be included in the results.
:param projection:
"all", "count", a list of column names, or a list of :class:`~bloop.models.Column`. When projection is
"count", you must advance the iterator to retrieve the count.
:param bool consistent: Use `strongly consistent reads`__ if True. Default is False.
:param bool forward: Query in ascending or descending order. Default is True (ascending).
:return: A reusable query iterator with helper methods.
:rtype: :class:`~bloop.search.QueryIterator`
__ http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html |
def process_request():
"""
Retrieve a CameraStatus, Event or FileRecord from the request, based on the supplied type and ID. If the type is
'cached_request' then the ID must be specified in 'cached_request_id' - if this ID is not for an entity in the
cache this method will return None and clear the cache (this should only happen under conditions where we've
failed to correctly handle caching, such as a server restart or under extreme load, but will result in the
server having to re-request a previous value from the exporting party).
:return:
A dict containing 'entity' - the entity for this request or None if there was an issue causing an unexpected
cache miss, and 'entity-id' which will be the UUID of the entity requested.
The entity corresponding to this request, or None if we had an issue and there was an unexpected cache miss.
"""
g.request_dict = safe_load(request.get_data())
entity_type = g.request_dict['type']
entity_id = g.request_dict[entity_type]['id']
ImportRequest.logger.debug("Received request, type={0}, id={1}".format(entity_type, entity_id))
entity = ImportRequest._get_entity(entity_id)
ImportRequest.logger.debug("Entity with id={0} was {1}".format(entity_id, entity))
return ImportRequest(entity=entity, entity_id=entity_id) | Retrieve a CameraStatus, Event or FileRecord from the request, based on the supplied type and ID. If the type is
'cached_request' then the ID must be specified in 'cached_request_id' - if this ID is not for an entity in the
cache this method will return None and clear the cache (this should only happen under conditions where we've
failed to correctly handle caching, such as a server restart or under extreme load, but will result in the
server having to re-request a previous value from the exporting party).
:return:
A dict containing 'entity' - the entity for this request or None if there was an issue causing an unexpected
cache miss, and 'entity-id' which will be the UUID of the entity requested.
The entity corresponding to this request, or None if we had an issue and there was an unexpected cache miss. |
def operations(self):
"""
All of the operations that are done by this functions.
"""
return [op for block in self.blocks for op in block.vex.operations] | All of the operations that are done by this functions. |
def raw_search(cls, user, token, query, page=0):
"""Do a raw search for github issues.
:arg user: Username to use in accessing github.
:arg token: Token to use in accessing github.
:arg query: String query to use in searching github.
:arg page=0: Number of pages to automatically paginate.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: The pair (result, header) representing the result
from github along with the header.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Search for issues on github. If page > 0 then we
will pull out up to page more pages via automatic
pagination. The best way to check if you got the
full results is to check if results['total_count']
matches len(results['items']).
"""
page = int(page)
kwargs = {} if not user else {'auth': (user, token)}
my_url = cls.search_url
data = {'items': []}
while my_url:
cls.sleep_if_necessary(
user, token, msg='\nquery="%s"' % str(query))
my_req = requests.get(my_url, params={'q': query}, **kwargs)
if my_req.status_code != 200:
raise GitHubAngry(
'Bad status code %s finding query %s because %s' % (
my_req.status_code, query, my_req.reason))
my_json = my_req.json()
assert isinstance(my_json['items'], list)
data['items'].extend(my_json.pop('items'))
data.update(my_json)
my_url = None
if page and my_req.links.get('next', False):
my_url = my_req.links['next']['url']
if my_url:
page = page - 1
logging.debug(
'Paginating %s in raw_search (%i more pages allowed)',
my_req.links, page)
return data, my_req.headers | Do a raw search for github issues.
:arg user: Username to use in accessing github.
:arg token: Token to use in accessing github.
:arg query: String query to use in searching github.
:arg page=0: Number of pages to automatically paginate.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
:returns: The pair (result, header) representing the result
from github along with the header.
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
PURPOSE: Search for issues on github. If page > 0 then we
will pull out up to page more pages via automatic
pagination. The best way to check if you got the
full results is to check if results['total_count']
matches len(results['items']). |
def _parse_tags(tag_file):
"""Parses a tag file, according to RFC 2822. This
includes line folding, permitting extra-long
field values.
See http://www.faqs.org/rfcs/rfc2822.html for
more information.
"""
tag_name = None
tag_value = None
# Line folding is handled by yielding values only after we encounter
# the start of a new tag, or if we pass the EOF.
for num, line in enumerate(tag_file):
# If byte-order mark ignore it for now.
if num == 0:
if line.startswith(BOM):
line = line.lstrip(BOM)
# Skip over any empty or blank lines.
if len(line) == 0 or line.isspace():
continue
elif line[0].isspace() and tag_value is not None: # folded line
tag_value += line
else:
# Starting a new tag; yield the last one.
if tag_name:
yield (tag_name, tag_value.strip())
if ':' not in line:
raise BagValidationError("invalid line '%s' in %s" % (line.strip(),
os.path.basename(tag_file.name)))
parts = line.strip().split(':', 1)
tag_name = parts[0].strip()
tag_value = parts[1]
# Passed the EOF. All done after this.
if tag_name:
yield (tag_name, tag_value.strip()) | Parses a tag file, according to RFC 2822. This
includes line folding, permitting extra-long
field values.
See http://www.faqs.org/rfcs/rfc2822.html for
more information. |
def set_queue_metadata(self, queue_name, metadata=None, timeout=None):
'''
Sets user-defined metadata on the specified queue. Metadata is
associated with the queue as name-value pairs.
:param str queue_name:
The name of an existing queue.
:param dict metadata:
A dict containing name-value pairs to associate with the
queue as metadata.
:param int timeout:
The server timeout, expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(queue_name)
request.query = {
'comp': 'metadata',
'timeout': _int_to_str(timeout),
}
_add_metadata_headers(metadata, request)
self._perform_request(request) | Sets user-defined metadata on the specified queue. Metadata is
associated with the queue as name-value pairs.
:param str queue_name:
The name of an existing queue.
:param dict metadata:
A dict containing name-value pairs to associate with the
queue as metadata.
:param int timeout:
The server timeout, expressed in seconds. |
def _get_mod_conditions(self, mod_term):
"""Return a list of ModConditions given a mod term dict."""
site = mod_term.get('site')
if site is not None:
mods = self._parse_site_text(site)
else:
mods = [Site(None, None)]
mcs = []
for mod in mods:
mod_res, mod_pos = mod
mod_type_str = mod_term['type'].lower()
mod_state = agent_mod_map.get(mod_type_str)
if mod_state is not None:
mc = ModCondition(mod_state[0], residue=mod_res,
position=mod_pos, is_modified=mod_state[1])
mcs.append(mc)
else:
logger.warning('Unhandled entity modification type: %s'
% mod_type_str)
return mcs | Return a list of ModConditions given a mod term dict. |
def _set_password_attributes(self, v, load=False):
"""
Setter method for password_attributes, mapped from YANG variable /password_attributes (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_password_attributes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_password_attributes() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=password_attributes.password_attributes, is_container='container', presence=False, yang_name="password-attributes", rest_name="password-attributes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure system wide user password attributes', u'sort-priority': u'8', u'callpoint': u'password_attributes_cp', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """password_attributes must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=password_attributes.password_attributes, is_container='container', presence=False, yang_name="password-attributes", rest_name="password-attributes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure system wide user password attributes', u'sort-priority': u'8', u'callpoint': u'password_attributes_cp', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)""",
})
self.__password_attributes = t
if hasattr(self, '_set'):
self._set() | Setter method for password_attributes, mapped from YANG variable /password_attributes (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_password_attributes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_password_attributes() directly. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.