code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def _set_link_oam(self, v, load=False):
"""
Setter method for link_oam, mapped from YANG variable /protocol/link_oam (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_oam is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_oam() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=link_oam.link_oam, is_container='container', presence=True, yang_name="link-oam", rest_name="link-oam", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Link OAM Protocol configuration mode', u'callpoint': u'setDot3ahEnable', u'sort-priority': u'68', u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'cli-mode-name': u'config-link-oam'}}, namespace='urn:brocade.com:mgmt:brocade-dot3ah', defining_module='brocade-dot3ah', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_oam must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=link_oam.link_oam, is_container='container', presence=True, yang_name="link-oam", rest_name="link-oam", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Link OAM Protocol configuration mode', u'callpoint': u'setDot3ahEnable', u'sort-priority': u'68', u'cli-full-command': None, u'cli-add-mode': None, u'cli-full-no': None, u'cli-mode-name': u'config-link-oam'}}, namespace='urn:brocade.com:mgmt:brocade-dot3ah', defining_module='brocade-dot3ah', yang_type='container', is_config=True)""",
})
self.__link_oam = t
if hasattr(self, '_set'):
self._set() | Setter method for link_oam, mapped from YANG variable /protocol/link_oam (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_oam is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_oam() directly. |
def counts(args):
"""
%prog counts vcffile
Collect allele counts from RO and AO fields.
"""
p = OptionParser(counts.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
vcf_reader = vcf.Reader(open(vcffile))
for r in vcf_reader:
v = CPRA(r)
if not v.is_valid:
continue
for sample in r.samples:
ro = sample["RO"]
ao = sample["AO"]
print("\t".join(str(x) for x in (v, ro, ao))) | %prog counts vcffile
Collect allele counts from RO and AO fields. |
def parseHolidays(holidaysStr, holidayMap=None):
"""
Takes a string like NZ[WTL,Nelson],AU[*],Northern Ireland and builds a HolidaySum from it
"""
if holidayMap is None:
holidayMap = _HOLIDAY_MAP
retval = holidays.HolidayBase()
retval.country = None
holidaysStr = holidaysStr.strip()
for (country, subdivisions) in HolsRe.findall(holidaysStr):
cls = holidayMap.get(country)
if cls is not None:
if subdivisions:
retval += _parseSubdivisions(subdivisions, cls)
else:
retval += cls()
return retval | Takes a string like NZ[WTL,Nelson],AU[*],Northern Ireland and builds a HolidaySum from it |
def decode_param(data):
"""Decode any parameter to a byte sequence.
:param data: byte sequence representing an LLRP parameter.
:returns dict, bytes: where dict is {'Type': <decoded type>, 'Data':
<decoded data>} and bytes is the remaining bytes trailing the bytes we
could decode.
"""
logger.debug('decode_param data: %r', data)
header_len = struct.calcsize('!HH')
partype, parlen = struct.unpack('!HH', data[:header_len])
pardata = data[header_len:parlen]
logger.debug('decode_param pardata: %r', pardata)
ret = {
'Type': partype,
}
if partype == 1023:
vsfmt = '!II'
vendor, subtype = struct.unpack(vsfmt, pardata[:struct.calcsize(vsfmt)])
ret['Vendor'] = vendor
ret['Subtype'] = subtype
ret['Data'] = pardata[struct.calcsize(vsfmt):]
else:
ret['Data'] = pardata,
return ret, data[parlen:] | Decode any parameter to a byte sequence.
:param data: byte sequence representing an LLRP parameter.
:returns dict, bytes: where dict is {'Type': <decoded type>, 'Data':
<decoded data>} and bytes is the remaining bytes trailing the bytes we
could decode. |
def varintSize(value):
"""Compute the size of a varint value."""
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10 | Compute the size of a varint value. |
def dbmax50years(self, value=None):
""" Corresponds to IDD Field `dbmax50years`
50-year return period values for maximum extreme dry-bulb temperature
Args:
value (float): value for IDD Field `dbmax50years`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dbmax50years`'.format(value))
self._dbmax50years = value | Corresponds to IDD Field `dbmax50years`
50-year return period values for maximum extreme dry-bulb temperature
Args:
value (float): value for IDD Field `dbmax50years`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value |
async def stop(self):
"""
Stop recording.
"""
if self.__container:
for track, context in self.__tracks.items():
if context.task is not None:
context.task.cancel()
context.task = None
for packet in context.stream.encode(None):
self.__container.mux(packet)
self.__tracks = {}
if self.__container:
self.__container.close()
self.__container = None | Stop recording. |
def serve(self, host='127.0.0.1', port=5000):
"""Serve predictions as an API endpoint."""
from meinheld import server, middleware
# self.app.run(host=host, port=port)
server.listen((host, port))
server.run(middleware.WebSocketMiddleware(self.app)) | Serve predictions as an API endpoint. |
def p_bexpr_func(p):
""" bexpr : ID bexpr
"""
args = make_arg_list(make_argument(p[2], p.lineno(2)))
p[0] = make_call(p[1], p.lineno(1), args)
if p[0] is None:
return
if p[0].token in ('STRSLICE', 'VAR', 'STRING'):
entry = SYMBOL_TABLE.access_call(p[1], p.lineno(1))
entry.accessed = True
return
# TODO: Check that arrays really needs kind=function to be set
# Both array accesses and functions are tagged as functions
# functions also has the class_ attribute set to 'function'
p[0].entry.set_kind(KIND.function, p.lineno(1))
p[0].entry.accessed = True | bexpr : ID bexpr |
def list_user_logins_users(self, user_id):
"""
List user logins.
Given a user ID, return that user's logins for the given account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
self.logger.debug("GET /api/v1/users/{user_id}/logins with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/users/{user_id}/logins".format(**path), data=data, params=params, all_pages=True) | List user logins.
Given a user ID, return that user's logins for the given account. |
def single(self, predicate=None):
'''The only element (which satisfies a condition).
If the predicate is omitted or is None this query returns the only
element in the sequence; otherwise, it returns the only element in
the sequence for which the predicate evaluates to True. Exceptions are
raised if there is either no such element or more than one such
element.
Note: This method uses immediate execution.
Args:
predicate: An optional unary predicate function, the only argument
to which is the element. The return value should be True for
matching elements, otherwise False. If the predicate is
omitted or None the only element of the source sequence will
be returned.
Returns:
The only element of the sequence if predicate is None, otherwise
the only element for which the predicate returns True.
Raises:
ValueError: If the Queryable is closed.
ValueError: If, when predicate is None the source sequence contains
more than one element.
ValueError: If there are no elements matching the predicate or more
then one element matching the predicate.
TypeError: If the predicate is not callable.
'''
if self.closed():
raise ValueError("Attempt to call single() on a closed Queryable.")
return self._single() if predicate is None else self._single_predicate(predicate) | The only element (which satisfies a condition).
If the predicate is omitted or is None this query returns the only
element in the sequence; otherwise, it returns the only element in
the sequence for which the predicate evaluates to True. Exceptions are
raised if there is either no such element or more than one such
element.
Note: This method uses immediate execution.
Args:
predicate: An optional unary predicate function, the only argument
to which is the element. The return value should be True for
matching elements, otherwise False. If the predicate is
omitted or None the only element of the source sequence will
be returned.
Returns:
The only element of the sequence if predicate is None, otherwise
the only element for which the predicate returns True.
Raises:
ValueError: If the Queryable is closed.
ValueError: If, when predicate is None the source sequence contains
more than one element.
ValueError: If there are no elements matching the predicate or more
then one element matching the predicate.
TypeError: If the predicate is not callable. |
def _embedding_classical_mds(matrix, dimensions=3, additive_correct=False):
"""
Private method to calculate CMDS embedding
:param dimensions: (int)
:return: coordinate matrix (np.array)
"""
if additive_correct:
dbc = double_centre(_additive_correct(matrix))
else:
dbc = double_centre(matrix)
decomp = eigen(dbc)
lambda_ = np.diag(np.sqrt(np.abs(decomp.vals[:dimensions])))
evecs = decomp.vecs[:, :dimensions]
coords = evecs.dot(lambda_)
return coords | Private method to calculate CMDS embedding
:param dimensions: (int)
:return: coordinate matrix (np.array) |
def create_or_update_tags(self, tags):
"""
Creates new tags or updates existing tags for an Auto Scaling group.
:type tags: List of :class:`boto.ec2.autoscale.tag.Tag`
:param tags: The new or updated tags.
"""
params = {}
for i, tag in enumerate(tags):
tag.build_params(params, i+1)
return self.get_status('CreateOrUpdateTags', params, verb='POST') | Creates new tags or updates existing tags for an Auto Scaling group.
:type tags: List of :class:`boto.ec2.autoscale.tag.Tag`
:param tags: The new or updated tags. |
def GetLinkedFileEntry(self):
"""Retrieves the linked file entry, e.g. for a symbolic link.
Returns:
APFSFileEntry: linked file entry or None if not available.
"""
link = self._GetLink()
if not link:
return None
# TODO: is there a way to determine the identifier here?
link_identifier = None
parent_path_spec = getattr(self.path_spec, 'parent', None)
path_spec = apfs_path_spec.APFSPathSpec(
location=link, parent=parent_path_spec)
is_root = bool(
link == self._file_system.LOCATION_ROOT or
link_identifier == self._file_system.ROOT_DIRECTORY_IDENTIFIER)
return APFSFileEntry(
self._resolver_context, self._file_system, path_spec, is_root=is_root) | Retrieves the linked file entry, e.g. for a symbolic link.
Returns:
APFSFileEntry: linked file entry or None if not available. |
def group(self):
"""returns the community.Group class for the current group"""
split_count = self._url.lower().find("/content/")
len_count = len('/content/')
gURL = self._url[:self._url.lower().find("/content/")] + \
"/community/" + self._url[split_count+ len_count:]#self.__assembleURL(self._contentURL, self._groupId)
return CommunityGroup(url=gURL,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | returns the community.Group class for the current group |
def _handle_force_timeout(self) -> None:
"""Called by IOLoop periodically to ask libcurl to process any
events it may have forgotten about.
"""
while True:
try:
ret, num_handles = self._multi.socket_all()
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests() | Called by IOLoop periodically to ask libcurl to process any
events it may have forgotten about. |
def add_new(self, command):
"""Add a new entry to the queue."""
self.queue[self.next_key] = command
self.queue[self.next_key]['status'] = 'queued'
self.queue[self.next_key]['returncode'] = ''
self.queue[self.next_key]['stdout'] = ''
self.queue[self.next_key]['stderr'] = ''
self.queue[self.next_key]['start'] = ''
self.queue[self.next_key]['end'] = ''
self.next_key += 1
self.write() | Add a new entry to the queue. |
def calc_requiredrelease_v2(self):
"""Calculate the water release (immediately downstream) required for
reducing drought events.
Required control parameter:
|NearDischargeMinimumThreshold|
Required derived parameter:
|dam_derived.TOY|
Calculated flux sequence:
|RequiredRelease|
Basic equation:
:math:`RequiredRelease = NearDischargeMinimumThreshold`
Examples:
As in the examples above, define a short simulation time period first:
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
Prepare the dam model:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> derived.toy.update()
Define a minimum discharge value for a cross section immediately
downstream of 4 m³/s for the summer months and of 0 m³/s for the
winter months:
>>> neardischargeminimumthreshold(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=4.0, _10_31_12=4.0)
As to be expected, the calculated required release is 0.0 m³/s
on May 31 and 4.0 m³/s on April 1:
>>> model.idx_sim = pub.timegrids.init['2001.03.31']
>>> model.calc_requiredrelease_v2()
>>> fluxes.requiredrelease
requiredrelease(0.0)
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> model.calc_requiredrelease_v2()
>>> fluxes.requiredrelease
requiredrelease(4.0)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
flu.requiredrelease = con.neardischargeminimumthreshold[
der.toy[self.idx_sim]] | Calculate the water release (immediately downstream) required for
reducing drought events.
Required control parameter:
|NearDischargeMinimumThreshold|
Required derived parameter:
|dam_derived.TOY|
Calculated flux sequence:
|RequiredRelease|
Basic equation:
:math:`RequiredRelease = NearDischargeMinimumThreshold`
Examples:
As in the examples above, define a short simulation time period first:
>>> from hydpy import pub
>>> pub.timegrids = '2001.03.30', '2001.04.03', '1d'
Prepare the dam model:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> derived.toy.update()
Define a minimum discharge value for a cross section immediately
downstream of 4 m³/s for the summer months and of 0 m³/s for the
winter months:
>>> neardischargeminimumthreshold(_11_1_12=0.0, _03_31_12=0.0,
... _04_1_12=4.0, _10_31_12=4.0)
As to be expected, the calculated required release is 0.0 m³/s
on May 31 and 4.0 m³/s on April 1:
>>> model.idx_sim = pub.timegrids.init['2001.03.31']
>>> model.calc_requiredrelease_v2()
>>> fluxes.requiredrelease
requiredrelease(0.0)
>>> model.idx_sim = pub.timegrids.init['2001.04.01']
>>> model.calc_requiredrelease_v2()
>>> fluxes.requiredrelease
requiredrelease(4.0) |
def register_options(cls, register):
"""Register an option to make capturing snapshots optional.
This class is intended to be extended by Jvm resolvers (coursier and ivy), and the option name should reflect that.
"""
super(JvmResolverBase, cls).register_options(register)
# TODO This flag should be defaulted to True when we are doing hermetic execution,
# and should probably go away as we move forward into that direction.
register('--capture-snapshots', type=bool, default=False,
help='Enable capturing snapshots to add directory digests to dependency jars.'
'Note that this is necessary when hermetic execution is enabled.') | Register an option to make capturing snapshots optional.
This class is intended to be extended by Jvm resolvers (coursier and ivy), and the option name should reflect that. |
def remove_link_type_vlan(enode, name, shell=None):
"""
Delete a virtual link.
Deletes a vlan device with the name {name}.
Will raise an expection if the port is not already present.
:param enode: Engine node to communicate with.
:type enode: topology.platforms.base.BaseNode
:param str name: specifies the name of the new
virtual device.
:param str shell: Shell name to execute commands. If ``None``, use the
Engine Node default shell.
"""
assert name
if name not in enode.ports:
raise ValueError('Port {name} doesn\'t exists'.format(name=name))
cmd = 'ip link del link dev {name}'.format(name=name)
response = enode(cmd, shell=shell)
assert not response, 'Cannot remove virtual link {name}'.format(name=name)
del enode.ports[name] | Delete a virtual link.
Deletes a vlan device with the name {name}.
Will raise an expection if the port is not already present.
:param enode: Engine node to communicate with.
:type enode: topology.platforms.base.BaseNode
:param str name: specifies the name of the new
virtual device.
:param str shell: Shell name to execute commands. If ``None``, use the
Engine Node default shell. |
def aggregate_cap_val(self, conn, **kwargs):
'''
Returns the normalised feedin profile and installed capacity for
a given region.
Parameters
----------
region : Region instance
region.geom : shapely.geometry object
Geo-spatial data with information for location/region-shape. The
geometry can be a polygon/multi-polygon or a point.
Returns
-------
feedin_df : pandas dataframe
Dataframe containing the normalised feedin profile of the given
region. Index of the dataframe is the hour of the year; columns
are 'pv_pwr' and 'wind_pwr'.
cap : pandas series
Series containing the installed capacity (in W) of PV and wind
turbines of the given region.
'''
region = kwargs['region']
[pv_df, wind_df, cap] = self.get_timeseries(
conn,
geometry=region.geom,
**kwargs)
if kwargs.get('store', False):
self.store_full_df(pv_df, wind_df, **kwargs)
# Summerize the results to one column for pv and one for wind
cap = cap.sum()
df = pd.concat([pv_df.sum(axis=1) / cap['pv_pwr'],
wind_df.sum(axis=1) / cap['wind_pwr']], axis=1)
feedin_df = df.rename(columns={0: 'pv_pwr', 1: 'wind_pwr'})
return feedin_df, cap | Returns the normalised feedin profile and installed capacity for
a given region.
Parameters
----------
region : Region instance
region.geom : shapely.geometry object
Geo-spatial data with information for location/region-shape. The
geometry can be a polygon/multi-polygon or a point.
Returns
-------
feedin_df : pandas dataframe
Dataframe containing the normalised feedin profile of the given
region. Index of the dataframe is the hour of the year; columns
are 'pv_pwr' and 'wind_pwr'.
cap : pandas series
Series containing the installed capacity (in W) of PV and wind
turbines of the given region. |
def add_child(self, node, as_last=True):
"""
Add the given child to the current list of children.
The new child is appended as the last child if ``as_last``
is ``True``, or as the first child if ``as_last`` is ``False``.
This call updates the ``__parent`` and ``__level`` fields of ``node``.
:param node: the child node to be added
:type node: :class:`~aeneas.tree.Tree`
:param bool as_last: if ``True``, append the node as the last child;
if ``False``, append the node as the first child
:raises: TypeError if ``node`` is not an instance of :class:`~aeneas.tree.Tree`
"""
if not isinstance(node, Tree):
self.log_exc(u"node is not an instance of Tree", None, True, TypeError)
if as_last:
self.__children.append(node)
else:
self.__children = [node] + self.__children
node.__parent = self
new_height = 1 + self.level
for n in node.subtree:
n.__level += new_height | Add the given child to the current list of children.
The new child is appended as the last child if ``as_last``
is ``True``, or as the first child if ``as_last`` is ``False``.
This call updates the ``__parent`` and ``__level`` fields of ``node``.
:param node: the child node to be added
:type node: :class:`~aeneas.tree.Tree`
:param bool as_last: if ``True``, append the node as the last child;
if ``False``, append the node as the first child
:raises: TypeError if ``node`` is not an instance of :class:`~aeneas.tree.Tree` |
def create_data_and_metadata_io_handler(self, io_handler_delegate):
"""Create an I/O handler that reads and writes a single data_and_metadata.
:param io_handler_delegate: A delegate object :py:class:`DataAndMetadataIOHandlerInterface`
.. versionadded:: 1.0
Scriptable: No
"""
class DelegateIOHandler(ImportExportManager.ImportExportHandler):
def __init__(self):
super().__init__(io_handler_delegate.io_handler_id, io_handler_delegate.io_handler_name, io_handler_delegate.io_handler_extensions)
def read_data_elements(self, ui, extension, file_path):
data_and_metadata = io_handler_delegate.read_data_and_metadata(extension, file_path)
data_element = ImportExportManager.create_data_element_from_extended_data(data_and_metadata)
return [data_element]
def can_write(self, data_and_metadata, extension):
return io_handler_delegate.can_write_data_and_metadata(data_and_metadata, extension)
def write_display_item(self, ui, display_item: DisplayItemModule.DisplayItem, file_path: str, extension: str) -> None:
data_item = display_item.data_item
if data_item:
self.write_data_item(ui, data_item, file_path, extension)
def write_data_item(self, ui, data_item, file_path, extension):
data_and_metadata = data_item.xdata
data = data_and_metadata.data
if data is not None:
if hasattr(io_handler_delegate, "write_data_item"):
io_handler_delegate.write_data_item(DataItem(data_item), file_path, extension)
else:
assert hasattr(io_handler_delegate, "write_data_and_metadata")
io_handler_delegate.write_data_and_metadata(data_and_metadata, file_path, extension)
class IOHandlerReference:
def __init__(self):
self.__io_handler_delegate = io_handler_delegate
self.__io_handler = DelegateIOHandler()
ImportExportManager.ImportExportManager().register_io_handler(self.__io_handler)
def __del__(self):
self.close()
def close(self):
if self.__io_handler_delegate:
io_handler_delegate_close_fn = getattr(self.__io_handler_delegate, "close", None)
if io_handler_delegate_close_fn:
io_handler_delegate_close_fn()
ImportExportManager.ImportExportManager().unregister_io_handler(self.__io_handler)
self.__io_handler_delegate = None
return IOHandlerReference() | Create an I/O handler that reads and writes a single data_and_metadata.
:param io_handler_delegate: A delegate object :py:class:`DataAndMetadataIOHandlerInterface`
.. versionadded:: 1.0
Scriptable: No |
def process_refs_for_node(cls, manifest, current_project, node):
"""Given a manifest and a node in that manifest, process its refs"""
target_model = None
target_model_name = None
target_model_package = None
for ref in node.refs:
if len(ref) == 1:
target_model_name = ref[0]
elif len(ref) == 2:
target_model_package, target_model_name = ref
target_model = cls.resolve_ref(
manifest,
target_model_name,
target_model_package,
current_project,
node.get('package_name'))
if target_model is None or target_model is cls.DISABLED:
# This may raise. Even if it doesn't, we don't want to add
# this node to the graph b/c there is no destination node
node.config['enabled'] = False
dbt.utils.invalid_ref_fail_unless_test(
node, target_model_name, target_model_package,
disabled=(target_model is cls.DISABLED)
)
continue
target_model_id = target_model.get('unique_id')
node.depends_on['nodes'].append(target_model_id)
manifest.nodes[node['unique_id']] = node | Given a manifest and a node in that manifest, process its refs |
def neg_loglik(self,beta):
""" Creates the negative log-likelihood of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
The negative logliklihood of the model
"""
mu, Y = self._model(beta)
if self.use_ols_covariance is False:
cm = self.custom_covariance(beta)
else:
cm = self.ols_covariance()
diff = Y.T - mu.T
ll1 = -(mu.T.shape[0]*mu.T.shape[1]/2.0)*np.log(2.0*np.pi) - (mu.T.shape[0]/2.0)*np.linalg.slogdet(cm)[1]
inverse = np.linalg.pinv(cm)
return var_likelihood(ll1, mu.T.shape[0], diff, inverse) | Creates the negative log-likelihood of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
The negative logliklihood of the model |
def mkFilter(parts):
"""Convert a filter-convertable thing into a filter
@param parts: a filter, an endpoint, a callable, or a list of any of these.
"""
# Convert the parts into a list, and pass to mkCompoundFilter
if parts is None:
parts = [BasicServiceEndpoint]
try:
parts = list(parts)
except TypeError:
return mkCompoundFilter([parts])
else:
return mkCompoundFilter(parts) | Convert a filter-convertable thing into a filter
@param parts: a filter, an endpoint, a callable, or a list of any of these. |
def forward(self, observations):
""" Model forward pass """
input_data = self.input_block(observations)
advantage_features, value_features = self.backbone(input_data)
log_histogram = self.q_head(advantage_features, value_features)
return log_histogram | Model forward pass |
def unsign(self, signed_value):
"""Unsigns the given string."""
signed_value = want_bytes(signed_value)
sep = want_bytes(self.sep)
if sep not in signed_value:
raise BadSignature('No %r found in value' % self.sep)
value, sig = signed_value.rsplit(sep, 1)
if self.verify_signature(value, sig):
return value
raise BadSignature('Signature %r does not match' % sig,
payload=value) | Unsigns the given string. |
def chain(request):
"""shows how the XmlQuerySetChain can be used instead of @toxml decorator"""
bars = foobar_models.Bar.objects.all()
bazs = foobar_models.Baz.objects.all()
qsc = XmlQuerySetChain(bars, bazs)
return HttpResponse(tree.xml(qsc), mimetype='text/xml') | shows how the XmlQuerySetChain can be used instead of @toxml decorator |
def _is_known_signed_by_dtype(dt):
"""Helper returning True if dtype is known to be signed."""
return {
tf.float16: True,
tf.float32: True,
tf.float64: True,
tf.int8: True,
tf.int16: True,
tf.int32: True,
tf.int64: True,
}.get(dt.base_dtype, False) | Helper returning True if dtype is known to be signed. |
def start_output (self):
"""
Start log output.
"""
# map with spaces between part name and value
if self.logparts is None:
parts = Fields.keys()
else:
parts = self.logparts
values = (self.part(x) for x in parts)
# maximum indent for localized log part names
self.max_indent = max(len(x) for x in values)+1
for key in parts:
numspaces = (self.max_indent - len(self.part(key)))
self.logspaces[key] = u" " * numspaces
self.stats.reset()
self.starttime = time.time() | Start log output. |
def reverse_cyk_transforms(root):
# type: (Nonterminal) -> Nonterminal
"""
Reverse transformation made to grammar before CYK.
Performs following steps:
- transform from chomsky normal form
- restore unit rules
- restore epsilon rules
:param root: Root node of the parsed tree.
:return: Restored parsed tree.
"""
root = InverseContextFree.transform_from_chomsky_normal_form(root)
root = InverseContextFree.unit_rules_restore(root)
root = InverseContextFree.epsilon_rules_restore(root)
return root | Reverse transformation made to grammar before CYK.
Performs following steps:
- transform from chomsky normal form
- restore unit rules
- restore epsilon rules
:param root: Root node of the parsed tree.
:return: Restored parsed tree. |
def filter_users_by_email(email):
"""Return list of users by email address
Typically one, at most just a few in length. First we look through
EmailAddress table, than customisable User model table. Add results
together avoiding SQL joins and deduplicate.
"""
from .models import EmailAddress
User = get_user_model()
mails = EmailAddress.objects.filter(email__iexact=email)
users = [e.user for e in mails.prefetch_related('user')]
if app_settings.USER_MODEL_EMAIL_FIELD:
q_dict = {app_settings.USER_MODEL_EMAIL_FIELD + '__iexact': email}
users += list(User.objects.filter(**q_dict))
return list(set(users)) | Return list of users by email address
Typically one, at most just a few in length. First we look through
EmailAddress table, than customisable User model table. Add results
together avoiding SQL joins and deduplicate. |
def make_example_scripts_docs(spth, npth, rpth):
"""
Generate rst docs from example scripts. Arguments `spth`, `npth`,
and `rpth` are the top-level scripts directory, the top-level
notebooks directory, and the top-level output directory within the
docs respectively.
"""
# Ensure that output directory exists
mkdir(rpth)
# Iterate over index files
for fp in glob(os.path.join(spth, '*.rst')) + \
glob(os.path.join(spth, '*', '*.rst')):
# Index basename
b = os.path.basename(fp)
# Index dirname
dn = os.path.dirname(fp)
# Name of subdirectory of examples directory containing current index
sd = os.path.split(dn)
# Set d to the name of the subdirectory of the root directory
if dn == spth: # fp is the root directory index file
d = ''
else: # fp is a subdirectory index file
d = sd[-1]
# Path to corresponding subdirectory in docs directory
fd = os.path.join(rpth, d)
# Ensure notebook subdirectory exists
mkdir(fd)
# Filename of index file to be constructed
fn = os.path.join(fd, b)
# Process current index file if corresponding notebook file
# doesn't exist, or is older than index file
if update_required(fp, fn):
print('Converting %s ' % os.path.join(d, b),
end='\r')
# Convert script index to docs index
rst_to_docs_rst(fp, fn)
# Iterate over example scripts
for fp in sorted(glob(os.path.join(spth, '*', '*.py'))):
# Name of subdirectory of examples directory containing current script
d = os.path.split(os.path.dirname(fp))[1]
# Script basename
b = os.path.splitext(os.path.basename(fp))[0]
# Path to corresponding notebook
fn = os.path.join(npth, d, b + '.ipynb')
# Path to corresponding sphinx doc file
fr = os.path.join(rpth, d, b + '.rst')
# Only proceed if script and notebook exist
if os.path.exists(fp) and os.path.exists(fn):
# Convert notebook to rst if notebook is newer than rst
# file or if rst file doesn't exist
if update_required(fn, fr):
fnb = os.path.join(d, b + '.ipynb')
print('Processing %s ' % fnb, end='\r')
script_and_notebook_to_rst(fp, fn, fr)
else:
print('WARNING: script %s or notebook %s not found' %
(fp, fn)) | Generate rst docs from example scripts. Arguments `spth`, `npth`,
and `rpth` are the top-level scripts directory, the top-level
notebooks directory, and the top-level output directory within the
docs respectively. |
def copy(self, name=None):
"""
Creates a copy of this attribute.
:param name: the new name, uses the old one if None
:type name: str
:return: the copy of the attribute
:rtype: Attribute
"""
if name is None:
return Attribute(
javabridge.call(self.jobject, "copy", "()Ljava/lang/Object;"))
else:
return Attribute(
javabridge.call(self.jobject, "copy", "(Ljava/lang/String;)Lweka/core/Attribute;", name)) | Creates a copy of this attribute.
:param name: the new name, uses the old one if None
:type name: str
:return: the copy of the attribute
:rtype: Attribute |
def generate_gallery_rst(app):
"""Generate the Main examples gallery reStructuredText
Start the sphinx-gallery configuration and recursively scan the examples
directories in order to populate the examples gallery
"""
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
gallery_conf.update(app.config.sphinx_gallery_conf)
gallery_conf.update(plot_gallery=plot_gallery)
gallery_conf.update(abort_on_example_error=app.builder.config.abort_on_example_error)
# this assures I can call the config in other places
app.config.sphinx_gallery_conf = gallery_conf
app.config.html_static_path.append(glr_path_static())
clean_gallery_out(app.builder.outdir)
examples_dirs = gallery_conf['examples_dirs']
gallery_dirs = gallery_conf['gallery_dirs']
if not isinstance(examples_dirs, list):
examples_dirs = [examples_dirs]
if not isinstance(gallery_dirs, list):
gallery_dirs = [gallery_dirs]
mod_examples_dir = os.path.relpath(gallery_conf['mod_example_dir'],
app.builder.srcdir)
seen_backrefs = set()
for examples_dir, gallery_dir in zip(examples_dirs, gallery_dirs):
examples_dir = os.path.relpath(examples_dir,
app.builder.srcdir)
gallery_dir = os.path.relpath(gallery_dir,
app.builder.srcdir)
for workdir in [examples_dir, gallery_dir, mod_examples_dir]:
if not os.path.exists(workdir):
os.makedirs(workdir)
# we create an index.rst with all examples
fhindex = open(os.path.join(gallery_dir, 'index.rst'), 'w')
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
fhindex.write(generate_dir_rst(examples_dir, gallery_dir, gallery_conf,
seen_backrefs))
for directory in sorted(os.listdir(examples_dir)):
if os.path.isdir(os.path.join(examples_dir, directory)):
src_dir = os.path.join(examples_dir, directory)
target_dir = os.path.join(gallery_dir, directory)
fhindex.write(generate_dir_rst(src_dir, target_dir,
gallery_conf,
seen_backrefs))
fhindex.flush() | Generate the Main examples gallery reStructuredText
Start the sphinx-gallery configuration and recursively scan the examples
directories in order to populate the examples gallery |
def _normalize_tags_type(self, tags, device_name=None, metric_name=None):
"""
Normalize tags contents and type:
- append `device_name` as `device:` tag
- normalize tags type
- doesn't mutate the passed list, returns a new list
"""
normalized_tags = []
if device_name:
self._log_deprecation("device_name")
device_tag = self._to_bytes("device:{}".format(device_name))
if device_tag is None:
self.log.warning(
'Error encoding device name `{}` to utf-8 for metric `{}`, ignoring tag'.format(
repr(device_name), repr(metric_name)
)
)
else:
normalized_tags.append(device_tag)
if tags is not None:
for tag in tags:
encoded_tag = self._to_bytes(tag)
if encoded_tag is None:
self.log.warning(
'Error encoding tag `{}` to utf-8 for metric `{}`, ignoring tag'.format(
repr(tag), repr(metric_name)
)
)
continue
normalized_tags.append(encoded_tag)
return normalized_tags | Normalize tags contents and type:
- append `device_name` as `device:` tag
- normalize tags type
- doesn't mutate the passed list, returns a new list |
def qsub(command, queue=None, cwd=True, name=None, deps=[], stdout='',
stderr='', env=[], array=None, context='grid', hostname=None,
memfree=None, hvmem=None, gpumem=None, pe_opt=None, io_big=False):
"""Submits a shell job to a given grid queue
Keyword parameters:
command
The command to be submitted to the grid
queue
A valid queue name or None, to use the default queue
cwd
If the job should change to the current working directory before starting
name
An optional name to set for the job. If not given, defaults to the script
name being launched.
deps
Job ids to which this job will be dependent on
stdout
The standard output directory. If not given, defaults to what qsub has as a
default.
stderr
The standard error directory (if not given, defaults to the stdout
directory).
env
This is a list of extra variables that will be set on the environment
running the command of your choice.
array
If set should be either:
1. a string in the form m[-n[:s]] which indicates the starting range 'm',
the closing range 'n' and the step 's'.
2. an integer value indicating the total number of jobs to be submitted.
This is equivalent ot set the parameter to a string "1-k:1" where "k" is
the passed integer value
3. a tuple that contains either 1, 2 or 3 elements indicating the start,
end and step arguments ("m", "n", "s").
The minimum value for "m" is 1. Giving "0" is an error.
If submitted with this option, the job to be created will be an SGE
parametric job. In this mode SGE does not allow individual control of each
job. The environment variable SGE_TASK_ID will be set on the executing
process automatically by SGE and indicates the unique identifier in the
range for which the current job instance is for.
context
The setshell context in which we should try a 'qsub'. Normally you don't
need to change the default. This variable can also be set to a context
dictionary in which case we just setup using that context instead of
probing for a new one, what can be fast.
memfree
If set, it asks the queue for a node with a minimum amount of memory
Used only if mem is not set
(cf. qsub -l mem_free=<...>)
hvmem
If set, it asks the queue for a node with a minimum amount of memory
Used only if mem is not set
(cf. qsub -l h_vmem=<...>)
gpumem
Applicable only for GPU-based queues. If set, it asks for the GPU queue
with a minimum amount of memory. The amount should not be more than 24.
(cf. qsub -l gpumem=<...>)
hostname
If set, it asks the queue to use only a subset of the available nodes
Symbols: | for OR, & for AND, ! for NOT, etc.
(cf. qsub -l hostname=<...>)
pe_opt
If set, add a -pe option when launching a job (for instance pe_exclusive* 1-)
io_big
If set to true, the io_big flag will be set.
Use this flag if your process will need a lot of Input/Output operations.
Returns the job id assigned to this job (integer)
"""
scmd = ['qsub']
import six
if isinstance(queue, six.string_types) and queue not in ('all.q', 'default'):
scmd += ['-l', queue]
if memfree: scmd += ['-l', 'mem_free=%s' % memfree]
if hvmem: scmd += ['-l', 'h_vmem=%s' % hvmem]
if gpumem: scmd += ['-l', 'gpumem=%s' % gpumem]
if io_big: scmd += ['-l', 'io_big']
if hostname: scmd += ['-l', 'hostname=%s' % hostname]
if pe_opt: scmd += ['-pe'] + pe_opt.split()
if cwd: scmd += ['-cwd']
if name: scmd += ['-N', name]
if deps: scmd += ['-hold_jid', ','.join(['%d' % k for k in deps])]
if stdout:
if not cwd:
# pivot, temporarily, to home directory
curdir = os.path.realpath(os.curdir)
os.chdir(os.environ['HOME'])
if not os.path.exists(stdout): makedirs_safe(stdout)
if not cwd:
# go back
os.chdir(os.path.realpath(curdir))
scmd += ['-o', stdout]
if stderr:
if not os.path.exists(stderr): makedirs_safe(stderr)
scmd += ['-e', stderr]
elif stdout: #just re-use the stdout settings
scmd += ['-e', stdout]
scmd += ['-terse'] # simplified job identifiers returned by the command line
for k in env: scmd += ['-v', k]
if array is not None:
scmd.append('-t')
if isinstance(array, six.string_types):
try:
i = int(array)
scmd.append('1-%d:1' % i)
except ValueError:
#must be complete...
scmd.append('%s' % array)
if isinstance(array, six.integer_types):
scmd.append('1-%d:1' % array)
if isinstance(array, (tuple, list)):
if len(array) < 1 or len(array) > 3:
raise RuntimeError("Array tuple should have length between 1 and 3")
elif len(array) == 1:
scmd.append('%s' % array[0])
elif len(array) == 2:
scmd.append('%s-%s' % (array[0], array[1]))
elif len(array) == 3:
scmd.append('%s-%s:%s' % (array[0], array[1], array[2]))
if not isinstance(command, (list, tuple)): command = [command]
scmd += command
logger.debug("Qsub command '%s'", ' '.join(scmd))
from .setshell import sexec
jobid = str_(sexec(context, scmd))
return int(jobid.split('.',1)[0]) | Submits a shell job to a given grid queue
Keyword parameters:
command
The command to be submitted to the grid
queue
A valid queue name or None, to use the default queue
cwd
If the job should change to the current working directory before starting
name
An optional name to set for the job. If not given, defaults to the script
name being launched.
deps
Job ids to which this job will be dependent on
stdout
The standard output directory. If not given, defaults to what qsub has as a
default.
stderr
The standard error directory (if not given, defaults to the stdout
directory).
env
This is a list of extra variables that will be set on the environment
running the command of your choice.
array
If set should be either:
1. a string in the form m[-n[:s]] which indicates the starting range 'm',
the closing range 'n' and the step 's'.
2. an integer value indicating the total number of jobs to be submitted.
This is equivalent ot set the parameter to a string "1-k:1" where "k" is
the passed integer value
3. a tuple that contains either 1, 2 or 3 elements indicating the start,
end and step arguments ("m", "n", "s").
The minimum value for "m" is 1. Giving "0" is an error.
If submitted with this option, the job to be created will be an SGE
parametric job. In this mode SGE does not allow individual control of each
job. The environment variable SGE_TASK_ID will be set on the executing
process automatically by SGE and indicates the unique identifier in the
range for which the current job instance is for.
context
The setshell context in which we should try a 'qsub'. Normally you don't
need to change the default. This variable can also be set to a context
dictionary in which case we just setup using that context instead of
probing for a new one, what can be fast.
memfree
If set, it asks the queue for a node with a minimum amount of memory
Used only if mem is not set
(cf. qsub -l mem_free=<...>)
hvmem
If set, it asks the queue for a node with a minimum amount of memory
Used only if mem is not set
(cf. qsub -l h_vmem=<...>)
gpumem
Applicable only for GPU-based queues. If set, it asks for the GPU queue
with a minimum amount of memory. The amount should not be more than 24.
(cf. qsub -l gpumem=<...>)
hostname
If set, it asks the queue to use only a subset of the available nodes
Symbols: | for OR, & for AND, ! for NOT, etc.
(cf. qsub -l hostname=<...>)
pe_opt
If set, add a -pe option when launching a job (for instance pe_exclusive* 1-)
io_big
If set to true, the io_big flag will be set.
Use this flag if your process will need a lot of Input/Output operations.
Returns the job id assigned to this job (integer) |
def instance_cache(cls, func):
""" Save the cache to `self`
This decorator take it for granted that the decorated function
is a method. The first argument of the function is `self`.
:param func: function to decorate
:return: the decorator
"""
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
if not args:
raise ValueError('`self` is not available.')
else:
the_self = args[0]
func_key = cls.get_key(func)
val_cache = cls.get_self_cache(the_self, func_key)
lock = cls.get_self_cache_lock(the_self, func_key)
return cls._get_value_from_cache(
func, val_cache, lock, *args, **kwargs)
return func_wrapper | Save the cache to `self`
This decorator take it for granted that the decorated function
is a method. The first argument of the function is `self`.
:param func: function to decorate
:return: the decorator |
def __draw_leaf_density(self):
"""!
@brief Display densities by filling blocks by appropriate colors.
"""
leafs = self.__directory.get_leafs()
density_scale = leafs[-1].get_density()
if density_scale == 0.0: density_scale = 1.0
for block in leafs:
alpha = 0.8 * block.get_density() / density_scale
self.__draw_block(block, alpha) | !
@brief Display densities by filling blocks by appropriate colors. |
def smooth(y, radius, mode='two_sided', valid_only=False):
'''
Smooth signal y, where radius is determines the size of the window
mode='twosided':
average over the window [max(index - radius, 0), min(index + radius, len(y)-1)]
mode='causal':
average over the window [max(index - radius, 0), index]
valid_only: put nan in entries where the full-sized window is not available
'''
assert mode in ('two_sided', 'causal')
if len(y) < 2*radius+1:
return np.ones_like(y) * y.mean()
elif mode == 'two_sided':
convkernel = np.ones(2 * radius+1)
out = np.convolve(y, convkernel,mode='same') / np.convolve(np.ones_like(y), convkernel, mode='same')
if valid_only:
out[:radius] = out[-radius:] = np.nan
elif mode == 'causal':
convkernel = np.ones(radius)
out = np.convolve(y, convkernel,mode='full') / np.convolve(np.ones_like(y), convkernel, mode='full')
out = out[:-radius+1]
if valid_only:
out[:radius] = np.nan
return out | Smooth signal y, where radius is determines the size of the window
mode='twosided':
average over the window [max(index - radius, 0), min(index + radius, len(y)-1)]
mode='causal':
average over the window [max(index - radius, 0), index]
valid_only: put nan in entries where the full-sized window is not available |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'expansions') and self.expansions is not None:
_dict['expansions'] = [x._to_dict() for x in self.expansions]
return _dict | Return a json dictionary representing this model. |
def url_as_file(url, ext=None):
"""
Context manager that GETs a given `url` and provides it as a local file.
The file is in a closed state upon entering the context,
and removed when leaving it, if still there.
To give the file name a specific extension, use `ext`;
the extension can optionally include a separating dot,
otherwise it will be added.
Parameters:
url (str): URL to retrieve.
ext (str, optional): Extension for the generated filename.
Yields:
str: The path to a temporary file with the content of the URL.
Raises:
requests.RequestException: Base exception of ``requests``, see its
docs for more detailed ones.
Example:
>>> import io, re, json
>>> with url_as_file('https://api.github.com/meta', ext='json') as meta:
... meta, json.load(io.open(meta, encoding='ascii'))['hooks']
(u'/tmp/www-api.github.com-Ba5OhD.json', [u'192.30.252.0/22'])
"""
if ext:
ext = '.' + ext.strip('.') # normalize extension
url_hint = 'www-{}-'.format(urlparse(url).hostname or 'any')
if url.startswith('file://'):
url = os.path.abspath(url[len('file://'):])
if os.path.isabs(url):
with open(url, 'rb') as handle:
content = handle.read()
else:
content = requests.get(url).content
with tempfile.NamedTemporaryFile(suffix=ext or '', prefix=url_hint, delete=False) as handle:
handle.write(content)
try:
yield handle.name
finally:
if os.path.exists(handle.name):
os.remove(handle.name) | Context manager that GETs a given `url` and provides it as a local file.
The file is in a closed state upon entering the context,
and removed when leaving it, if still there.
To give the file name a specific extension, use `ext`;
the extension can optionally include a separating dot,
otherwise it will be added.
Parameters:
url (str): URL to retrieve.
ext (str, optional): Extension for the generated filename.
Yields:
str: The path to a temporary file with the content of the URL.
Raises:
requests.RequestException: Base exception of ``requests``, see its
docs for more detailed ones.
Example:
>>> import io, re, json
>>> with url_as_file('https://api.github.com/meta', ext='json') as meta:
... meta, json.load(io.open(meta, encoding='ascii'))['hooks']
(u'/tmp/www-api.github.com-Ba5OhD.json', [u'192.30.252.0/22']) |
def add_style(self, name, fg=None, bg=None, options=None):
"""
Adds a new style
"""
style = Style(name)
if fg is not None:
style.fg(fg)
if bg is not None:
style.bg(bg)
if options is not None:
if "bold" in options:
style.bold()
if "underline" in options:
style.underlined()
self._io.output.formatter.add_style(style)
self._io.error_output.formatter.add_style(style) | Adds a new style |
def get_series(self, key):
"""Get a series object from TempoDB given its key.
:param string key: a string name for the series
:rtype: :class:`tempodb.response.Response` with a
:class:`tempodb.protocol.objects.Series` data payload"""
url = make_series_url(key)
resp = self.session.get(url)
return resp | Get a series object from TempoDB given its key.
:param string key: a string name for the series
:rtype: :class:`tempodb.response.Response` with a
:class:`tempodb.protocol.objects.Series` data payload |
def filter_nremoved(self, filt=True, quiet=False):
"""
Report how many data are removed by the active filters.
"""
rminfo = {}
for n in self.subsets['All_Samples']:
s = self.data[n]
rminfo[n] = s.filt_nremoved(filt)
if not quiet:
maxL = max([len(s) for s in rminfo.keys()])
print('{string:{number}s}'.format(string='Sample ', number=maxL + 3) +
'{total:4s}'.format(total='tot') +
'{removed:4s}'.format(removed='flt') +
'{percent:4s}'.format(percent='%rm'))
for k, (ntot, nfilt, pcrm) in rminfo.items():
print('{string:{number}s}'.format(string=k, number=maxL + 3) +
'{total:4.0f}'.format(total=ntot) +
'{removed:4.0f}'.format(removed=nfilt) +
'{percent:4.0f}'.format(percent=pcrm))
return rminfo | Report how many data are removed by the active filters. |
def get_vertices(self, indexed=None):
"""Get the vertices
Parameters
----------
indexed : str | None
If Note, return an array (N,3) of the positions of vertices in
the mesh. By default, each unique vertex appears only once.
If indexed is 'faces', then the array will instead contain three
vertices per face in the mesh (and a single vertex may appear more
than once in the array).
Returns
-------
vertices : ndarray
The vertices.
"""
if indexed is None:
if (self._vertices is None and
self._vertices_indexed_by_faces is not None):
self._compute_unindexed_vertices()
return self._vertices
elif indexed == 'faces':
if (self._vertices_indexed_by_faces is None and
self._vertices is not None):
self._vertices_indexed_by_faces = \
self._vertices[self.get_faces()]
return self._vertices_indexed_by_faces
else:
raise Exception("Invalid indexing mode. Accepts: None, 'faces'") | Get the vertices
Parameters
----------
indexed : str | None
If Note, return an array (N,3) of the positions of vertices in
the mesh. By default, each unique vertex appears only once.
If indexed is 'faces', then the array will instead contain three
vertices per face in the mesh (and a single vertex may appear more
than once in the array).
Returns
-------
vertices : ndarray
The vertices. |
def parse_ACCT(chunk, encryption_key):
"""
Parses an account chunk, decrypts and creates an Account object.
May return nil when the chunk does not represent an account.
All secure notes are ACCTs but not all of them strore account
information.
"""
# TODO: Make a test case that covers secure note account
io = BytesIO(chunk.payload)
id = read_item(io)
name = decode_aes256_plain_auto(read_item(io), encryption_key)
group = decode_aes256_plain_auto(read_item(io), encryption_key)
url = decode_hex(read_item(io))
notes = decode_aes256_plain_auto(read_item(io), encryption_key)
skip_item(io, 2)
username = decode_aes256_plain_auto(read_item(io), encryption_key)
password = decode_aes256_plain_auto(read_item(io), encryption_key)
skip_item(io, 2)
secure_note = read_item(io)
# Parse secure note
if secure_note == b'1':
parsed = parse_secure_note_server(notes)
if parsed.get('type') in ALLOWED_SECURE_NOTE_TYPES:
url = parsed.get('url', url)
username = parsed.get('username', username)
password = parsed.get('password', password)
return Account(id, name, username, password, url, group, notes) | Parses an account chunk, decrypts and creates an Account object.
May return nil when the chunk does not represent an account.
All secure notes are ACCTs but not all of them strore account
information. |
def p_localparamdecl_integer(self, p):
'localparamdecl : LOCALPARAM INTEGER param_substitution_list SEMICOLON'
paramlist = [Localparam(rname, rvalue, lineno=p.lineno(3))
for rname, rvalue in p[3]]
p[0] = Decl(tuple(paramlist), lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | localparamdecl : LOCALPARAM INTEGER param_substitution_list SEMICOLON |
def _lexsorted_specs(self, order):
"""
A lexsort is specified using normal key string prefixed by '+'
(for ascending) or '-' for (for descending).
Note that in Python 2, if a key is missing, None is returned
(smallest Python value). In Python 3, an Exception will be
raised regarding comparison of heterogenous types.
"""
specs = self.specs[:]
if not all(el[0] in ['+', '-'] for el in order):
raise Exception("Please specify the keys for sorting, use"
"'+' prefix for ascending,"
"'-' for descending.)")
sort_cycles = [(el[1:], True if el[0]=='+' else False)
for el in reversed(order)
if el[1:] in self.varying_keys]
for (key, ascending) in sort_cycles:
specs = sorted(specs, key=lambda s: s.get(key, None),
reverse=(not ascending))
return specs | A lexsort is specified using normal key string prefixed by '+'
(for ascending) or '-' for (for descending).
Note that in Python 2, if a key is missing, None is returned
(smallest Python value). In Python 3, an Exception will be
raised regarding comparison of heterogenous types. |
def human_or_01(X, y, model_generator, method_name):
""" OR (false/true)
This tests how well a feature attribution method agrees with human intuition
for an OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough: +6 points
transform = "identity"
sort_order = 1
"""
return _human_or(X, model_generator, method_name, False, True) | OR (false/true)
This tests how well a feature attribution method agrees with human intuition
for an OR operation combined with linear effects. This metric deals
specifically with the question of credit allocation for the following function
when all three inputs are true:
if fever: +2 points
if cough: +2 points
if fever or cough: +6 points
transform = "identity"
sort_order = 1 |
def _k_prototypes_iter(Xnum, Xcat, centroids, cl_attr_sum, cl_memb_sum, cl_attr_freq,
membship, num_dissim, cat_dissim, gamma, random_state):
"""Single iteration of the k-prototypes algorithm"""
moves = 0
for ipoint in range(Xnum.shape[0]):
clust = np.argmin(
num_dissim(centroids[0], Xnum[ipoint]) +
gamma * cat_dissim(centroids[1], Xcat[ipoint], X=Xcat, membship=membship)
)
if membship[clust, ipoint]:
# Point is already in its right place.
continue
# Move point, and update old/new cluster frequencies and centroids.
moves += 1
old_clust = np.argwhere(membship[:, ipoint])[0][0]
# Note that membship gets updated by kmodes.move_point_cat.
# move_point_num only updates things specific to the k-means part.
cl_attr_sum, cl_memb_sum = move_point_num(
Xnum[ipoint], clust, old_clust, cl_attr_sum, cl_memb_sum
)
cl_attr_freq, membship, centroids[1] = kmodes.move_point_cat(
Xcat[ipoint], ipoint, clust, old_clust,
cl_attr_freq, membship, centroids[1]
)
# Update old and new centroids for numerical attributes using
# the means and sums of all values
for iattr in range(len(Xnum[ipoint])):
for curc in (clust, old_clust):
if cl_memb_sum[curc]:
centroids[0][curc, iattr] = cl_attr_sum[curc, iattr] / cl_memb_sum[curc]
else:
centroids[0][curc, iattr] = 0.
# In case of an empty cluster, reinitialize with a random point
# from largest cluster.
if not cl_memb_sum[old_clust]:
from_clust = membship.sum(axis=1).argmax()
choices = [ii for ii, ch in enumerate(membship[from_clust, :]) if ch]
rindx = random_state.choice(choices)
cl_attr_sum, cl_memb_sum = move_point_num(
Xnum[rindx], old_clust, from_clust, cl_attr_sum, cl_memb_sum
)
cl_attr_freq, membship, centroids[1] = kmodes.move_point_cat(
Xcat[rindx], rindx, old_clust, from_clust,
cl_attr_freq, membship, centroids[1]
)
return centroids, moves | Single iteration of the k-prototypes algorithm |
def load_files(filenames,multiproc=False,**kwargs):
""" Load a set of FITS files with kwargs. """
filenames = np.atleast_1d(filenames)
logger.debug("Loading %s files..."%len(filenames))
kwargs = [dict(filename=f,**kwargs) for f in filenames]
if multiproc:
from multiprocessing import Pool
processes = multiproc if multiproc > 0 else None
p = Pool(processes,maxtasksperchild=1)
out = p.map(load_file,kwargs)
else:
out = [load_file(kw) for kw in kwargs]
dtype = out[0].dtype
for i,d in enumerate(out):
if d.dtype != dtype:
# ADW: Not really safe...
logger.warn("Casting input data to same type.")
out[i] = d.astype(dtype,copy=False)
logger.debug('Concatenating arrays...')
return np.concatenate(out) | Load a set of FITS files with kwargs. |
def _image_data(self, normalize=False,
min_depth=MIN_DEPTH,
max_depth=MAX_DEPTH,
twobyte=False):
"""Returns the data in image format, with scaling and conversion to uint8 types.
Parameters
----------
normalize : bool
whether or not to normalize by the min and max depth of the image
min_depth : float
minimum depth value for the normalization
max_depth : float
maximum depth value for the normalization
twobyte: bool
whether or not to use 16-bit encoding
Returns
-------
:obj:`numpy.ndarray` of uint8
A 3D matrix representing the image. The first dimension is rows, the
second is columns, and the third is a set of 3 RGB values, each of
which is simply the depth entry scaled to between 0 and BINARY_IM_MAX_VAL.
"""
max_val = BINARY_IM_MAX_VAL
if twobyte:
max_val = np.iinfo(np.uint16).max
if normalize:
min_depth = np.min(self._data)
max_depth = np.max(self._data)
depth_data = (self._data - min_depth) / (max_depth - min_depth)
depth_data = float(max_val) * depth_data.squeeze()
else:
zero_px = np.where(self._data == 0)
zero_px = np.c_[zero_px[0], zero_px[1], zero_px[2]]
depth_data = ((self._data - min_depth) * \
(float(max_val) / (max_depth - min_depth))).squeeze()
depth_data[zero_px[:,0], zero_px[:,1]] = 0
im_data = np.zeros([self.height, self.width, 3])
im_data[:, :, 0] = depth_data
im_data[:, :, 1] = depth_data
im_data[:, :, 2] = depth_data
if twobyte:
return im_data.astype(np.uint16)
return im_data.astype(np.uint8) | Returns the data in image format, with scaling and conversion to uint8 types.
Parameters
----------
normalize : bool
whether or not to normalize by the min and max depth of the image
min_depth : float
minimum depth value for the normalization
max_depth : float
maximum depth value for the normalization
twobyte: bool
whether or not to use 16-bit encoding
Returns
-------
:obj:`numpy.ndarray` of uint8
A 3D matrix representing the image. The first dimension is rows, the
second is columns, and the third is a set of 3 RGB values, each of
which is simply the depth entry scaled to between 0 and BINARY_IM_MAX_VAL. |
def validate(node, source):
"""Call this function to validate an AST."""
# TODO: leaving strict checking off to support insert_grad_of
lf = LanguageFence(source, strict=False)
lf.visit(node)
return node | Call this function to validate an AST. |
def create_new_version(
self,
name,
subject,
text='',
template_id=None,
html=None,
locale=None,
timeout=None
):
""" API call to create a new version of a template """
if(html):
payload = {
'name': name,
'subject': subject,
'html': html,
'text': text
}
else:
payload = {
'name': name,
'subject': subject,
'text': text
}
if locale:
url = self.TEMPLATES_SPECIFIC_LOCALE_VERSIONS_ENDPOINT % (
template_id,
locale
)
else:
url = self.TEMPLATES_NEW_VERSION_ENDPOINT % template_id
return self._api_request(
url,
self.HTTP_POST,
payload=payload,
timeout=timeout
) | API call to create a new version of a template |
def get_options_from_file(self, file_path):
"""
Return the options parsed from a JSON file.
"""
# read options JSON file
with open(file_path) as options_file:
options_dict = json.load(options_file)
options = []
for opt_name in options_dict:
options.append(opt_name)
options.append(options_dict[opt_name])
return self.parse_args(options) | Return the options parsed from a JSON file. |
def main(*kw):
"""Command line entry point; arguments must match those defined in
in :meth:`create_parser()`; returns 0 for success, else 1.
Example::
command.main("-i", "**/*.py", "--no-default-excludes")
Runs formic printing out all .py files in the current working directory
and its children to ``sys.stdout``.
If *kw* is None, :func:`main()` will use ``sys.argv``."""
parser = create_parser()
args = parser.parse_args(kw if kw else None)
if args.help:
parser.print_help()
elif args.usage:
print("""Ant Globs
=========
Apache Ant fileset is documented at the Apache Ant project:
* http://ant.apache.org/manual/dirtasks.html#patterns
Examples
--------
Ant Globs are like simple file globs (they use ? and * in the same way), but
include powerful ways for selecting directories. The examples below use the
Ant glob naming, so a leading slash represents the top of the search, *not* the
root of the file system.
*.py
Selects every matching file anywhere in the whole tree
Matches /foo.py and /bar/foo.py
but not /foo.pyc or /bar/foo.pyc/
/*.py
Selects every matching file in the root of the directory (but no
deeper).
Matches /foo.py but not /bar/foo.py
/myapp/**
Matches all files under /myapp and below.
/myapp/**/__init__.py
Matches all __init__.py files /myapp and below.
dir1/__init__.py
Selects every __init__.py in directory dir1. dir1
directory can be anywhere in the directory tree
Matches /dir1/file.py, /dir3/dir1/file.py and
/dir3/dir2/dir1/file.py but not /dir1/another/__init__.py.
**/dir1/__init__.py
Same as above.
/**/dir1/__init__.py
Same as above.
/myapp/**/dir1/__init__.py
Selects every __init__.py in dir1 in the directory tree
/myapp under the root.
Matches /myapp/dir1/__init__.py and /myapp/dir2/dir1/__init__.py
but not /myapp/file.txt and /dir1/file.txt
Default excludes
----------------
Ant FileSet (and Formic) has built-in patterns to screen out a lot of
development 'noise', such as hidden VCS files and directories. The full list is
at:
* https://formic.readthedocs.io/en/latest/api.html#formic.formic.get_initial_default_excludes
Default excludes can be simply switched off on both the command line and the
API, for example::
$ formic -i "*.py" -e "__init__.py" "**/*test*/" "test_*" --no-default-excludes
""")
elif args.version:
print("formic", get_version())
elif args.license:
print(resource_string(__name__, "LICENSE.txt"))
else:
try:
fileset = FileSet(
directory=args.directory,
include=args.include if args.include else ["*"],
exclude=args.exclude,
default_excludes=args.default_excludes,
symlinks=not args.no_symlinks,
casesensitive=not args.insensitive)
except FormicError as exception:
parser.print_usage()
print(exception.message)
return 1
prefix = fileset.get_directory()
for directory, file_name in fileset.files():
if args.relative:
sys.stdout.write(".")
else:
sys.stdout.write(prefix)
if directory:
sys.stdout.write(os.path.sep)
sys.stdout.write(directory)
sys.stdout.write(os.path.sep)
sys.stdout.write(file_name)
sys.stdout.write("\n")
return 0 | Command line entry point; arguments must match those defined in
in :meth:`create_parser()`; returns 0 for success, else 1.
Example::
command.main("-i", "**/*.py", "--no-default-excludes")
Runs formic printing out all .py files in the current working directory
and its children to ``sys.stdout``.
If *kw* is None, :func:`main()` will use ``sys.argv``. |
def canonicalize_half_turns(
half_turns: Union[sympy.Basic, float]
) -> Union[sympy.Basic, float]:
"""Wraps the input into the range (-1, +1]."""
if isinstance(half_turns, sympy.Basic):
return half_turns
half_turns %= 2
if half_turns > 1:
half_turns -= 2
return half_turns | Wraps the input into the range (-1, +1]. |
def _compute_acq(self,x):
"""
Integrated GP-Lower Confidence Bound
"""
means, stds = self.model.predict(x)
f_acqu = 0
for m,s in zip(means, stds):
f_acqu += -m + self.exploration_weight * s
return f_acqu/(len(means)) | Integrated GP-Lower Confidence Bound |
def plot_sources(topo, mixmaps, unmixmaps, global_scale=None, fig=None):
"""Plot all scalp projections of mixing- and unmixing-maps.
.. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_map`.
Parameters
----------
topo : :class:`~eegtopo.topoplot.Topoplot`
This object draws the topo plot
mixmaps : array, shape = [w_pixels, h_pixels]
Scalp-projected mixing matrix
unmixmaps : array, shape = [w_pixels, h_pixels]
Scalp-projected unmixing matrix
global_scale : float, optional
Set common color scale as given percentile of all map values to use as the maximum.
`None` scales each plot individually (default).
fig : Figure object, optional
Figure to plot into. If set to `None`, a new figure is created.
Returns
-------
fig : Figure object
The figure into which was plotted.
"""
urange, mrange = None, None
m = len(mixmaps)
if global_scale:
tmp = np.asarray(unmixmaps)
tmp = tmp[np.logical_not(np.isnan(tmp))]
umax = np.percentile(np.abs(tmp), global_scale)
umin = -umax
urange = [umin, umax]
tmp = np.asarray(mixmaps)
tmp = tmp[np.logical_not(np.isnan(tmp))]
mmax = np.percentile(np.abs(tmp), global_scale)
mmin = -mmax
mrange = [mmin, mmax]
y = np.floor(np.sqrt(m * 3 / 4))
x = np.ceil(m / y)
if fig is None:
fig = new_figure()
axes = []
for i in range(m):
axes.append(fig.add_subplot(2 * y, x, i + 1))
plot_topo(axes[-1], topo, unmixmaps[i], crange=urange)
axes[-1].set_title(str(i))
axes.append(fig.add_subplot(2 * y, x, m + i + 1))
plot_topo(axes[-1], topo, mixmaps[i], crange=mrange)
axes[-1].set_title(str(i))
for a in axes:
a.set_yticks([])
a.set_xticks([])
a.set_frame_on(False)
axes[0].set_ylabel('Unmixing weights')
axes[1].set_ylabel('Scalp projections')
return fig | Plot all scalp projections of mixing- and unmixing-maps.
.. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_map`.
Parameters
----------
topo : :class:`~eegtopo.topoplot.Topoplot`
This object draws the topo plot
mixmaps : array, shape = [w_pixels, h_pixels]
Scalp-projected mixing matrix
unmixmaps : array, shape = [w_pixels, h_pixels]
Scalp-projected unmixing matrix
global_scale : float, optional
Set common color scale as given percentile of all map values to use as the maximum.
`None` scales each plot individually (default).
fig : Figure object, optional
Figure to plot into. If set to `None`, a new figure is created.
Returns
-------
fig : Figure object
The figure into which was plotted. |
def gb(args):
"""
%prog gb gffile fastafile
Convert GFF3 to Genbank format. Recipe taken from:
<http://www.biostars.org/p/2492/>
"""
from Bio.Alphabet import generic_dna
try:
from BCBio import GFF
except ImportError:
print("You need to install dep first: $ easy_install bcbio-gff", file=sys.stderr)
p = OptionParser(gb.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gff_file, fasta_file = args
pf = op.splitext(gff_file)[0]
out_file = pf + ".gb"
fasta_input = SeqIO.to_dict(SeqIO.parse(fasta_file, "fasta", generic_dna))
gff_iter = GFF.parse(gff_file, fasta_input)
SeqIO.write(gff_iter, out_file, "genbank") | %prog gb gffile fastafile
Convert GFF3 to Genbank format. Recipe taken from:
<http://www.biostars.org/p/2492/> |
def to_string(self):
"""
Return the current DEVICE_CONFIG as a string (always 4 bytes).
"""
fmt = '<BBH'
first = struct.pack(
fmt,
self._mode,
self._cr_timeout,
self._auto_eject_time
)
#crc = 0xffff - yubico_util.crc16(first)
#second = first + struct.pack('<H', crc)
return first | Return the current DEVICE_CONFIG as a string (always 4 bytes). |
def pip_uninstall(package, **options):
"""Uninstall a python package"""
command = ["uninstall", "-q", "-y"]
available_options = ('proxy', 'log', )
for option in parse_options(options, available_options):
command.append(option)
if isinstance(package, list):
command.extend(package)
else:
command.append(package)
log("Uninstalling {} package with options: {}".format(package,
command))
pip_execute(command) | Uninstall a python package |
def update_system(version='', ruby=None, runas=None, gem_bin=None):
'''
Update rubygems.
:param version: string : (newest)
The version of rubygems to install.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update_system
'''
return _gem(['update', '--system', version],
ruby,
gem_bin=gem_bin,
runas=runas) | Update rubygems.
:param version: string : (newest)
The version of rubygems to install.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update_system |
def inq_convolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
num_bits=4, inq_iterations=(), selection_algorithm='random',
seed=-1, w_init=None, i_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None,
with_bias=True):
"""Incremental Network Quantization Convolution Layer
During training, the weights are sequentially quantized to power-of-two
values, which allows the training of a multiplierless network.
Using `inq_iterations`, one can specify after how many forward passes
half of the learnable weights are fixed and quantized to powers-of-two.
After reaching the last value in `inq_iterations`, all weights are fixed.
For more details, please refer to the reference.
Reference:
Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:
Towards lossless CNNs with low-precision weights.
<https://arxiv.org/abs/1702.03044>
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0"
inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights.
selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly)
seed (int): Random seed for INQ algorithm
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps, tuple(kernel)), rng=rng)
if i_init is None:
i_init = ConstantInitializer()
if b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", (outmaps, inp.shape[base_axis]) + tuple(kernel),
w_init, True, not fix_parameters)
i = get_parameter_or_create(
"I", (outmaps, inp.shape[base_axis]) + tuple(kernel),
i_init, False)
b = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
return F.inq_convolution(inp, w, i, b, base_axis, pad, stride, dilation, group, num_bits, inq_iterations, selection_algorithm, seed) | Incremental Network Quantization Convolution Layer
During training, the weights are sequentially quantized to power-of-two
values, which allows the training of a multiplierless network.
Using `inq_iterations`, one can specify after how many forward passes
half of the learnable weights are fixed and quantized to powers-of-two.
After reaching the last value in `inq_iterations`, all weights are fixed.
For more details, please refer to the reference.
Reference:
Zhou A, Yao A, Guo Y, Xu L, Chen Y. Incremental network quantization:
Towards lossless CNNs with low-precision weights.
<https://arxiv.org/abs/1702.03044>
Args:
inp (~nnabla.Variable): Input N-D array with shape (:math:`M_0 \\times \ldots \\times M_{B-1} \\times D_B \\times \ldots \\times D_N`). Dimensions before and after base_axis are flattened as if it was a matrix.
n_outmaps (int or :obj:`tuple` of :obj:`int`): Number of output neurons per data.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
num_bits (int): Number of bits per weight. Value has to be larger than 1 as one bit is already used to code the value "0"
inq_iterations (tuple of int): Tuple of iteration numbers at which we fix half of the weights.
selection_algorithm (str): Chooses algorithm that is used to decide which weights are fixed. ("largest_abs" ... fix weights with largest absolute value, "random" ... fix weights randomly)
seed (int): Random seed for INQ algorithm
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
i_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the indicators (0 ... learnable, 1 ... fixed). By default, it is initialized with zeros.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for the bias. By default, it is initialized with zeros if `with_bias` is `True`.
fix_parameters (bool): When set to `True`, the weight and bias will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable` |
def blog_authors(*args):
"""
Put a list of authors (users) for blog posts into the template context.
"""
blog_posts = BlogPost.objects.published()
authors = User.objects.filter(blogposts__in=blog_posts)
return list(authors.annotate(post_count=Count("blogposts"))) | Put a list of authors (users) for blog posts into the template context. |
def remove(ctx, client, sources):
"""Remove files and check repository for potential problems."""
from renku.api._git import _expand_directories
def fmt_path(path):
"""Format path as relative to the client path."""
return str(Path(path).absolute().relative_to(client.path))
files = {
fmt_path(source): fmt_path(file_or_dir)
for file_or_dir in sources
for source in _expand_directories((file_or_dir, ))
}
# 1. Update dataset metadata files.
with progressbar(
client.datasets.values(),
item_show_func=lambda item: str(item.short_id) if item else '',
label='Updating dataset metadata',
width=0,
) as bar:
for dataset in bar:
remove = []
for key, file_ in dataset.files.items():
filepath = fmt_path(file_.full_path)
if filepath in files:
remove.append(key)
if remove:
for key in remove:
dataset.unlink_file(key)
dataset.to_yaml()
# 2. Manage .gitattributes for external storage.
tracked = tuple(
path for path, attr in client.find_attr(*files).items()
if attr.get('filter') == 'lfs'
)
client.untrack_paths_from_storage(*tracked)
existing = client.find_attr(*tracked)
if existing:
click.echo(WARNING + 'There are custom .gitattributes.\n')
if click.confirm(
'Do you want to edit ".gitattributes" now?', default=False
):
click.edit(filename=str(client.path / '.gitattributes'))
# Finally remove the files.
final_sources = list(set(files.values()))
if final_sources:
run(['git', 'rm', '-rf'] + final_sources, check=True) | Remove files and check repository for potential problems. |
def imshow(self, *args, show_crosshair=True, show_mask=True,
show_qscale=True, axes=None, invalid_color='black',
mask_opacity=0.8, show_colorbar=True, **kwargs):
"""Plot the matrix (imshow)
Keyword arguments [and their default values]:
show_crosshair [True]: if a cross-hair marking the beam position is
to be plotted.
show_mask [True]: if the mask is to be plotted.
show_qscale [True]: if the horizontal and vertical axes are to be
scaled into q
axes [None]: the axes into which the image should be plotted. If
None, defaults to the currently active axes (returned by plt.gca())
invalid_color ['black']: the color for invalid (NaN or infinite) pixels
mask_opacity [0.8]: the opacity of the overlaid mask (1 is fully
opaque, 0 is fully transparent)
show_colorbar [True]: if a colorbar is to be added. Can be a boolean
value (True or False) or an instance of matplotlib.axes.Axes, into
which the color bar should be drawn.
All other keywords are forwarded to plt.imshow() or
matplotlib.Axes.imshow()
Returns: the image instance returned by imshow()
"""
if 'aspect' not in kwargs:
kwargs['aspect'] = 'equal'
if 'interpolation' not in kwargs:
kwargs['interpolation'] = 'nearest'
if 'origin' not in kwargs:
kwargs['origin'] = 'upper'
if show_qscale:
ymin, xmin = self.pixel_to_q(0, 0)
ymax, xmax = self.pixel_to_q(*self.shape)
if kwargs['origin'].upper() == 'UPPER':
kwargs['extent'] = [xmin, xmax, -ymax, -ymin]
else:
kwargs['extent'] = [xmin, xmax, ymin, ymax]
bcx = 0
bcy = 0
else:
bcx = self.header.beamcenterx
bcy = self.header.beamcentery
xmin = 0
xmax = self.shape[1]
ymin = 0
ymax = self.shape[0]
if kwargs['origin'].upper() == 'UPPER':
kwargs['extent'] = [0, self.shape[1], self.shape[0], 0]
else:
kwargs['extent'] = [0, self.shape[1], 0, self.shape[0]]
if axes is None:
axes = plt.gca()
ret = axes.imshow(self.intensity, **kwargs)
if show_mask:
# workaround: because of the colour-scaling we do here, full one and
# full zero masks look the SAME, i.e. all the image is shaded.
# Thus if we have a fully unmasked matrix, skip this section.
# This also conserves memory.
if (self.mask == 0).sum(): # there are some masked pixels
# we construct another representation of the mask, where the masked pixels are 1.0, and the
# unmasked ones will be np.nan. They will thus be not rendered.
mf = np.ones(self.mask.shape, np.float)
mf[self.mask != 0] = np.nan
kwargs['cmap'] = matplotlib.cm.gray_r
kwargs['alpha'] = mask_opacity
kwargs['norm'] = matplotlib.colors.Normalize()
axes.imshow(mf, **kwargs)
if show_crosshair:
ax = axes.axis() # save zoom state
axes.plot([xmin, xmax], [bcy] * 2, 'w-')
axes.plot([bcx] * 2, [ymin, ymax], 'w-')
axes.axis(ax) # restore zoom state
axes.set_facecolor(invalid_color)
if show_colorbar:
if isinstance(show_colorbar, matplotlib.axes.Axes):
axes.figure.colorbar(
ret, cax=show_colorbar)
else:
# try to find a suitable colorbar axes: check if the plot target axes already
# contains some images, then check if their colorbars exist as
# axes.
cax = [i.colorbar[1]
for i in axes.images if i.colorbar is not None]
cax = [c for c in cax if c in c.figure.axes]
if cax:
cax = cax[0]
else:
cax = None
axes.figure.colorbar(ret, cax=cax, ax=axes)
axes.figure.canvas.draw()
return ret | Plot the matrix (imshow)
Keyword arguments [and their default values]:
show_crosshair [True]: if a cross-hair marking the beam position is
to be plotted.
show_mask [True]: if the mask is to be plotted.
show_qscale [True]: if the horizontal and vertical axes are to be
scaled into q
axes [None]: the axes into which the image should be plotted. If
None, defaults to the currently active axes (returned by plt.gca())
invalid_color ['black']: the color for invalid (NaN or infinite) pixels
mask_opacity [0.8]: the opacity of the overlaid mask (1 is fully
opaque, 0 is fully transparent)
show_colorbar [True]: if a colorbar is to be added. Can be a boolean
value (True or False) or an instance of matplotlib.axes.Axes, into
which the color bar should be drawn.
All other keywords are forwarded to plt.imshow() or
matplotlib.Axes.imshow()
Returns: the image instance returned by imshow() |
def set_global_defaults(**kwargs):
"""Set global defaults for the options passed to the icon painter."""
valid_options = [
'active', 'selected', 'disabled', 'on', 'off',
'on_active', 'on_selected', 'on_disabled',
'off_active', 'off_selected', 'off_disabled',
'color', 'color_on', 'color_off',
'color_active', 'color_selected', 'color_disabled',
'color_on_selected', 'color_on_active', 'color_on_disabled',
'color_off_selected', 'color_off_active', 'color_off_disabled',
'animation', 'offset', 'scale_factor',
]
for kw in kwargs:
if kw in valid_options:
_default_options[kw] = kwargs[kw]
else:
error = "Invalid option '{0}'".format(kw)
raise KeyError(error) | Set global defaults for the options passed to the icon painter. |
def _parse_udevadm_info(udev_info):
'''
Parse the info returned by udevadm command.
'''
devices = []
dev = {}
for line in (line.strip() for line in udev_info.splitlines()):
if line:
line = line.split(':', 1)
if len(line) != 2:
continue
query, data = line
if query == 'E':
if query not in dev:
dev[query] = {}
key, val = data.strip().split('=', 1)
try:
val = int(val)
except ValueError:
try:
val = float(val)
except ValueError:
pass # Quiet, this is not a number.
dev[query][key] = val
else:
if query not in dev:
dev[query] = []
dev[query].append(data.strip())
else:
if dev:
devices.append(_normalize_info(dev))
dev = {}
if dev:
_normalize_info(dev)
devices.append(_normalize_info(dev))
return devices | Parse the info returned by udevadm command. |
def info_hash(self):
"""Hash of torrent file info section. Also known as torrent hash."""
info = self._struct.get('info')
if not info:
return None
return sha1(Bencode.encode(info)).hexdigest() | Hash of torrent file info section. Also known as torrent hash. |
def parse_cluster(self, global_params, region, cluster):
"""
Parse a single Redshift cluster
:param global_params: Parameters shared for all regions
:param region: Name of the AWS region
:param cluster: Cluster
"""
vpc_id = cluster.pop('VpcId') if 'VpcId' in cluster else ec2_classic
manage_dictionary(self.vpcs, vpc_id, VPCConfig(self.vpc_resource_types))
name = cluster.pop('ClusterIdentifier')
cluster['name'] = name
self.vpcs[vpc_id].clusters[name] = cluster | Parse a single Redshift cluster
:param global_params: Parameters shared for all regions
:param region: Name of the AWS region
:param cluster: Cluster |
def clean_csvs(dialogpath=None):
""" Translate non-ASCII characters to spaces or equivalent ASCII characters """
dialogdir = os.dirname(dialogpath) if os.path.isfile(dialogpath) else dialogpath
filenames = [dialogpath.split(os.path.sep)[-1]] if os.path.isfile(dialogpath) else os.listdir(dialogpath)
for filename in filenames:
filepath = os.path.join(dialogdir, filename)
df = clean_df(filepath)
df.to_csv(filepath, header=None)
return filenames | Translate non-ASCII characters to spaces or equivalent ASCII characters |
def group_files_by_size(fileslist, multi): # pragma: no cover
''' Cluster files into the specified number of groups, where each groups total size is as close as possible to each other.
Pseudo-code (O(n^g) time complexity):
Input: number of groups G per cluster, list of files F with respective sizes
- Order F by descending size
- Until F is empty:
- Create a cluster X
- A = Pop first item in F
- Put A in X[0] (X[0] is thus the first group in cluster X)
For g in 1..len(G)-1 :
- B = Pop first item in F
- Put B in X[g]
- group_size := size(B)
If group_size != size(A):
While group_size < size(A):
- Find next item C in F which size(C) <= size(A) - group_size
- Put C in X[g]
- group_size := group_size + size(C)
'''
flord = OrderedDict(sorted(fileslist.items(), key=lambda x: x[1], reverse=True))
if multi <= 1:
fgrouped = {}
i = 0
for x in flord.keys():
i += 1
fgrouped[i] = [[x]]
return fgrouped
fgrouped = {}
i = 0
while flord:
i += 1
fgrouped[i] = []
big_key, big_value = flord.popitem(0)
fgrouped[i].append([big_key])
for j in xrange(multi-1):
cluster = []
if not flord: break
child_key, child_value = flord.popitem(0)
cluster.append(child_key)
if child_value == big_value:
fgrouped[i].append(cluster)
continue
else:
diff = big_value - child_value
for key, value in flord.iteritems():
if value <= diff:
cluster.append(key)
del flord[key]
if value == diff:
break
else:
child_value += value
diff = big_value - child_value
fgrouped[i].append(cluster)
return fgrouped | Cluster files into the specified number of groups, where each groups total size is as close as possible to each other.
Pseudo-code (O(n^g) time complexity):
Input: number of groups G per cluster, list of files F with respective sizes
- Order F by descending size
- Until F is empty:
- Create a cluster X
- A = Pop first item in F
- Put A in X[0] (X[0] is thus the first group in cluster X)
For g in 1..len(G)-1 :
- B = Pop first item in F
- Put B in X[g]
- group_size := size(B)
If group_size != size(A):
While group_size < size(A):
- Find next item C in F which size(C) <= size(A) - group_size
- Put C in X[g]
- group_size := group_size + size(C) |
def construct_from(cls, group_info):
"""Constructs ``FileGroup`` instance from group information."""
group = cls(group_info['id'])
group._info_cache = group_info
return group | Constructs ``FileGroup`` instance from group information. |
def setup_Jupyter():
"""
Set up a Jupyter notebook with a few defaults.
"""
sns.set(context = "paper", font = "monospace")
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
plt.rcParams["figure.figsize"] = (17, 10) | Set up a Jupyter notebook with a few defaults. |
def pprint_out(dct: Dict):
"""
Utility methods to pretty-print a dictionary that is typically outputted by parsyfiles (an ordered dict)
:param dct:
:return:
"""
for name, val in dct.items():
print(name + ':')
pprint(val, indent=4) | Utility methods to pretty-print a dictionary that is typically outputted by parsyfiles (an ordered dict)
:param dct:
:return: |
def save(self, *args, **kwargs):
"""
Custom save method does the following:
* automatically inherit node coordinates and elevation
* save shortcuts if HSTORE is enabled
"""
custom_checks = kwargs.pop('custom_checks', True)
super(Device, self).save(*args, **kwargs)
if custom_checks is False:
return
changed = False
if not self.location:
self.location = self.node.point
changed = True
if not self.elev and self.node.elev:
self.elev = self.node.elev
changed = True
original_user = self.shortcuts.get('user')
if self.node.user:
self.shortcuts['user'] = self.node.user
if original_user != self.shortcuts.get('user'):
changed = True
if 'nodeshot.core.layers' in settings.INSTALLED_APPS:
original_layer = self.shortcuts.get('layer')
self.shortcuts['layer'] = self.node.layer
if original_layer != self.shortcuts.get('layer'):
changed = True
if changed:
self.save(custom_checks=False) | Custom save method does the following:
* automatically inherit node coordinates and elevation
* save shortcuts if HSTORE is enabled |
def raw_corpus_rouge2(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around ROUGE-2 implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: ROUGE-2 score as float between 0 and 1.
"""
return rouge.rouge_2(hypotheses, references) | Simple wrapper around ROUGE-2 implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: ROUGE-2 score as float between 0 and 1. |
def delta(self, mapping, prefix):
"""
return a delta containing values that have changed.
"""
previous = self.getrange(prefix, strip=True)
if not previous:
pk = set()
else:
pk = set(previous.keys())
ck = set(mapping.keys())
delta = DeltaSet()
# added
for k in ck.difference(pk):
delta[k] = Delta(None, mapping[k])
# removed
for k in pk.difference(ck):
delta[k] = Delta(previous[k], None)
# changed
for k in pk.intersection(ck):
c = mapping[k]
p = previous[k]
if c != p:
delta[k] = Delta(p, c)
return delta | return a delta containing values that have changed. |
def visit_subscript(self, node):
""" Look for indexing exceptions. """
try:
for inferred in node.value.infer():
if not isinstance(inferred, astroid.Instance):
continue
if utils.inherit_from_std_ex(inferred):
self.add_message("indexing-exception", node=node)
except astroid.InferenceError:
return | Look for indexing exceptions. |
def store_initial_arguments(request, initial_arguments=None):
'Store initial arguments, if any, and return a cache identifier'
if initial_arguments is None:
return None
# Generate a cache id
cache_id = "dpd-initial-args-%s" % str(uuid.uuid4()).replace('-', '')
# Store args in json form in cache
if initial_argument_location():
cache.set(cache_id, initial_arguments, cache_timeout_initial_arguments())
else:
request.session[cache_id] = initial_arguments
return cache_id | Store initial arguments, if any, and return a cache identifier |
def fcoe_get_interface_input_fcoe_intf_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_interface = ET.Element("fcoe_get_interface")
config = fcoe_get_interface
input = ET.SubElement(fcoe_get_interface, "input")
fcoe_intf_name = ET.SubElement(input, "fcoe-intf-name")
fcoe_intf_name.text = kwargs.pop('fcoe_intf_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def circ_permutation(items):
"""Calculate the circular permutation for a given list of items."""
permutations = []
for i in range(len(items)):
permutations.append(items[i:] + items[:i])
return permutations | Calculate the circular permutation for a given list of items. |
def list_all_orders(cls, **kwargs):
"""List Orders
Return a list of Orders
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_orders(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Order]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_orders_with_http_info(**kwargs)
else:
(data) = cls._list_all_orders_with_http_info(**kwargs)
return data | List Orders
Return a list of Orders
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_orders(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[Order]
If the method is called asynchronously,
returns the request thread. |
def data():
"""Data providing function:
Make sure to have every relevant import statement included here and return data as
used in model function below. This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
"""
from keras.datasets import mnist
from keras.utils import np_utils
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
nb_classes = 10
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
return x_train, y_train, x_test, y_test | Data providing function:
Make sure to have every relevant import statement included here and return data as
used in model function below. This function is separated from model() so that hyperopt
won't reload data for each evaluation run. |
def __expand_subfeatures_aux (property_, dont_validate = False):
""" Helper for expand_subfeatures.
Given a feature and value, or just a value corresponding to an
implicit feature, returns a property set consisting of all component
subfeatures and their values. For example:
expand_subfeatures <toolset>gcc-2.95.2-linux-x86
-> <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86
equivalent to:
expand_subfeatures gcc-2.95.2-linux-x86
feature: The name of the feature, or empty if value corresponds to an implicit property
value: The value of the feature.
dont_validate: If True, no validation of value string will be done.
"""
from . import property # no __debug__ since Property is used elsewhere
assert isinstance(property_, property.Property)
assert isinstance(dont_validate, int) # matches bools
f = property_.feature
v = property_.value
if not dont_validate:
validate_value_string(f, v)
components = v.split ("-")
v = components[0]
result = [property.Property(f, components[0])]
subvalues = components[1:]
while len(subvalues) > 0:
subvalue = subvalues [0] # pop the head off of subvalues
subvalues = subvalues [1:]
subfeature = __find_implied_subfeature (f, subvalue, v)
# If no subfeature was found, reconstitute the value string and use that
if not subfeature:
return [property.Property(f, '-'.join(components))]
result.append(property.Property(subfeature, subvalue))
return result | Helper for expand_subfeatures.
Given a feature and value, or just a value corresponding to an
implicit feature, returns a property set consisting of all component
subfeatures and their values. For example:
expand_subfeatures <toolset>gcc-2.95.2-linux-x86
-> <toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86
equivalent to:
expand_subfeatures gcc-2.95.2-linux-x86
feature: The name of the feature, or empty if value corresponds to an implicit property
value: The value of the feature.
dont_validate: If True, no validation of value string will be done. |
def menu(items, heading):
'''Takes list of dictionaries and prints a menu.
items parameter should be in the form of a list, containing
dictionaries with the keys: {"key", "text", "function"}.
Typing the key for a menuitem, followed by return, will run
"function".
'''
heading = "\n"*5 + heading # A little vertical padding
while True:
keydict = {}
clear_screen()
print(heading)
for item in items:
menustring = " " + item["key"] + " " + item["text"]
keydict[item["key"]] = item["function"]
print(menustring)
key = input("\nType key and Return (q to quit): ").strip()
if key.lower() == "q":
return
else:
try:
ret = keydict[key]()
if ret: # If child returns non-false, exit menu.
return 1
except KeyError: # Handle garbage input.
continue | Takes list of dictionaries and prints a menu.
items parameter should be in the form of a list, containing
dictionaries with the keys: {"key", "text", "function"}.
Typing the key for a menuitem, followed by return, will run
"function". |
def _get_read_query(self, table_columns, limit=None):
"""Create the read (COPY TO) query"""
query_columns = [column.name for column in table_columns]
query_columns.remove('the_geom_webmercator')
query = 'SELECT {columns} FROM "{schema}"."{table_name}"'.format(
table_name=self.table_name,
schema=self.schema,
columns=', '.join(query_columns))
if limit is not None:
if isinstance(limit, int) and (limit >= 0):
query += ' LIMIT {limit}'.format(limit=limit)
else:
raise ValueError("`limit` parameter must an integer >= 0")
return query | Create the read (COPY TO) query |
def syzygyJD(jd):
""" Finds the latest new or full moon and
returns the julian date of that event.
"""
sun = swe.sweObjectLon(const.SUN, jd)
moon = swe.sweObjectLon(const.MOON, jd)
dist = angle.distance(sun, moon)
# Offset represents the Syzygy type.
# Zero is conjunction and 180 is opposition.
offset = 180 if (dist >= 180) else 0
while abs(dist) > MAX_ERROR:
jd = jd - dist / 13.1833 # Moon mean daily motion
sun = swe.sweObjectLon(const.SUN, jd)
moon = swe.sweObjectLon(const.MOON, jd)
dist = angle.closestdistance(sun - offset, moon)
return jd | Finds the latest new or full moon and
returns the julian date of that event. |
def screenshot(self, save_path, element=None, delay=0):
"""
This can be used no matter what driver that is being used
* ^ Soon requests support will be added
Save screenshot to local dir with uuid as filename
then move the file to `filename` (path must be part of the file name)
Return the filepath of the image
"""
if save_path is None:
logger.error("save_path cannot be None")
return None
save_location = cutil.norm_path(save_path)
cutil.create_path(save_location)
logger.info("Taking screenshot: {filename}".format(filename=save_location))
if not self.driver_type.startswith('selenium'):
logger.debug("Create tmp phantomjs web driver for screenshot")
# Create a tmp phantom driver to take the screenshot for us
from web_wrapper import DriverSeleniumPhantomJS
headers = self.get_headers() # Get headers to pass to the driver
proxy = self.get_proxy() # Get the current proxy being used if any
# TODO: ^ Do the same thing for cookies
screenshot_web = DriverSeleniumPhantomJS(headers=headers, proxy=proxy)
screenshot_web.get_site(self.url, page_format='raw')
screenshot_driver = screenshot_web.driver
else:
screenshot_driver = self.driver
# If a background color does need to be set
# self.driver.execute_script('document.body.style.background = "{}"'.format('white'))
# Take screenshot
# Give the page some extra time to load
time.sleep(delay)
if self.driver_type == 'selenium_chrome':
# Need to do this for chrome to get a fullpage screenshot
self.chrome_fullpage_screenshot(save_location, delay)
else:
screenshot_driver.get_screenshot_as_file(save_location)
# Use .png extenstion for users save file
if not save_location.endswith('.png'):
save_location += '.png'
# If an element was passed, just get that element so crop the screenshot
if element is not None:
logger.debug("Crop screenshot")
# Crop the image
el_location = element.location
el_size = element.size
try:
cutil.crop_image(save_location,
output_file=save_location,
width=int(el_size['width']),
height=int(el_size['height']),
x=int(el_location['x']),
y=int(el_location['y']),
)
except Exception as e:
raise e.with_traceback(sys.exc_info()[2])
if not self.driver_type.startswith('selenium'):
# Quit the tmp driver created to take the screenshot
screenshot_web.quit()
return save_location | This can be used no matter what driver that is being used
* ^ Soon requests support will be added
Save screenshot to local dir with uuid as filename
then move the file to `filename` (path must be part of the file name)
Return the filepath of the image |
def drop_table(self, table):
"""
Drop a table from the MyDB context.
## Arguments
* `table` (str): The name of the table to drop.
"""
job_id = self.submit("DROP TABLE %s"%table, context="MYDB")
status = self.monitor(job_id)
if status[0] != 5:
raise Exception("Couldn't drop table %s"%table) | Drop a table from the MyDB context.
## Arguments
* `table` (str): The name of the table to drop. |
def change_settings(self, bio=None, public_images=None,
messaging_enabled=None, album_privacy=None,
accepted_gallery_terms=None):
"""
Update the settings for the user.
:param bio: A basic description filled out by the user, is displayed in
the gallery profile page.
:param public_images: Set the default privacy setting of the users
images. If True images are public, if False private.
:param messaging_enabled: Set to True to enable messaging.
:param album_privacy: The default privacy level of albums created by
the user. Can be public, hidden or secret.
:param accepted_gallery_terms: The user agreement to Imgur Gallery
terms. Necessary before the user can submit to the gallery.
"""
# NOTE: album_privacy should maybe be renamed to default_privacy
# NOTE: public_images is a boolean, despite the documentation saying it
# is a string.
url = self._imgur._base_url + "/3/account/{0}/settings".format(self.name)
resp = self._imgur._send_request(url, needs_auth=True, params=locals(),
method='POST')
return resp | Update the settings for the user.
:param bio: A basic description filled out by the user, is displayed in
the gallery profile page.
:param public_images: Set the default privacy setting of the users
images. If True images are public, if False private.
:param messaging_enabled: Set to True to enable messaging.
:param album_privacy: The default privacy level of albums created by
the user. Can be public, hidden or secret.
:param accepted_gallery_terms: The user agreement to Imgur Gallery
terms. Necessary before the user can submit to the gallery. |
def get_contents(self):
"""The contents of an alias is the concatenation
of the content signatures of all its sources."""
childsigs = [n.get_csig() for n in self.children()]
return ''.join(childsigs) | The contents of an alias is the concatenation
of the content signatures of all its sources. |
def handle_read(self):
"""
Handle the 'channel readable' state. E.g. read from a socket.
"""
with self.lock:
logger.debug("handle_read()")
if self._eof or self._socket is None:
return
if self._state == "tls-handshake":
while True:
logger.debug("tls handshake read...")
self._continue_tls_handshake()
logger.debug(" state: {0}".format(self._tls_state))
if self._tls_state != "want_read":
break
elif self._tls_state == "connected":
while self._socket and not self._eof:
logger.debug("tls socket read...")
try:
data = self._socket.read(4096)
except ssl.SSLError, err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
break
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
break
else:
raise
except socket.error, err:
if err.args[0] == errno.EINTR:
continue
elif err.args[0] in BLOCKING_ERRORS:
break
elif err.args[0] == errno.ECONNRESET:
logger.warning("Connection reset by peer")
data = None
else:
raise
self._feed_reader(data)
else:
while self._socket and not self._eof:
logger.debug("raw socket read...")
try:
data = self._socket.recv(4096)
except socket.error, err:
if err.args[0] == errno.EINTR:
continue
elif err.args[0] in BLOCKING_ERRORS:
break
elif err.args[0] == errno.ECONNRESET:
logger.warning("Connection reset by peer")
data = None
else:
raise
self._feed_reader(data) | Handle the 'channel readable' state. E.g. read from a socket. |
def gmdaArray(arry, dtype, mask=None, numGhosts=1):
"""
ghosted distributed array constructor
@param arry numpy-like array
@param numGhosts the number of ghosts (>= 0)
"""
a = numpy.array(arry, dtype)
res = GhostedMaskedDistArray(a.shape, a.dtype)
res.mask = mask
res.setNumberOfGhosts(numGhosts)
res[:] = a
return res | ghosted distributed array constructor
@param arry numpy-like array
@param numGhosts the number of ghosts (>= 0) |
def _get_features(self, eopatch=None):
"""A generator of parsed features.
:param eopatch: A given EOPatch
:type eopatch: EOPatch or None
:return: One by one feature
:rtype: tuple(FeatureType, str) or tuple(FeatureType, str, str)
"""
for feature_type, feature_dict in self.feature_collection.items():
if feature_type is None and self.default_feature_type is not None:
feature_type = self.default_feature_type
if feature_type is None:
for feature_name, new_feature_name in feature_dict.items():
if eopatch is None:
yield self._return_feature(..., feature_name, new_feature_name)
else:
found_feature_type = self._find_feature_type(feature_name, eopatch)
if found_feature_type:
yield self._return_feature(found_feature_type, feature_name, new_feature_name)
else:
raise ValueError("Feature with name '{}' does not exist among features of allowed feature"
" types in given EOPatch. Allowed feature types are "
"{}".format(feature_name, self.allowed_feature_types))
elif feature_dict is ...:
if not feature_type.has_dict() or eopatch is None:
yield self._return_feature(feature_type, ...)
else:
for feature_name in eopatch[feature_type]:
yield self._return_feature(feature_type, feature_name)
else:
for feature_name, new_feature_name in feature_dict.items():
if eopatch is not None and feature_name not in eopatch[feature_type]:
raise ValueError('Feature {} of type {} was not found in EOPatch'.format(feature_name,
feature_type))
yield self._return_feature(feature_type, feature_name, new_feature_name) | A generator of parsed features.
:param eopatch: A given EOPatch
:type eopatch: EOPatch or None
:return: One by one feature
:rtype: tuple(FeatureType, str) or tuple(FeatureType, str, str) |
def format_in_original_format(numobj, region_calling_from):
"""Format a number using the original format that the number was parsed from.
The original format is embedded in the country_code_source field of the
PhoneNumber object passed in. If such information is missing, the number
will be formatted into the NATIONAL format by default.
When we don't have a formatting pattern for the number, the method
returns the raw input when it is available.
Note this method guarantees no digit will be inserted, removed or modified
as a result of formatting.
Arguments:
number -- The phone number that needs to be formatted in its original
number format
region_calling_from -- The region whose IDD needs to be prefixed if the
original number has one.
Returns the formatted phone number in its original number format.
"""
if (numobj.raw_input is not None and not _has_formatting_pattern_for_number(numobj)):
# We check if we have the formatting pattern because without that, we
# might format the number as a group without national prefix.
return numobj.raw_input
if numobj.country_code_source is CountryCodeSource.UNSPECIFIED:
return format_number(numobj, PhoneNumberFormat.NATIONAL)
formatted_number = _format_original_allow_mods(numobj, region_calling_from)
num_raw_input = numobj.raw_input
# If no digit is inserted/removed/modified as a result of our formatting,
# we return the formatted phone number; otherwise we return the raw input
# the user entered.
if (formatted_number is not None and num_raw_input):
normalized_formatted_number = normalize_diallable_chars_only(formatted_number)
normalized_raw_input = normalize_diallable_chars_only(num_raw_input)
if normalized_formatted_number != normalized_raw_input:
formatted_number = num_raw_input
return formatted_number | Format a number using the original format that the number was parsed from.
The original format is embedded in the country_code_source field of the
PhoneNumber object passed in. If such information is missing, the number
will be formatted into the NATIONAL format by default.
When we don't have a formatting pattern for the number, the method
returns the raw input when it is available.
Note this method guarantees no digit will be inserted, removed or modified
as a result of formatting.
Arguments:
number -- The phone number that needs to be formatted in its original
number format
region_calling_from -- The region whose IDD needs to be prefixed if the
original number has one.
Returns the formatted phone number in its original number format. |
def parse_list(cls, data):
"""Parse a list of JSON objects into a result set of model instances."""
results = ResultSet()
data = data or []
for obj in data:
if obj:
results.append(cls.parse(obj))
return results | Parse a list of JSON objects into a result set of model instances. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.