code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def validate_key(key: str):
"""
Validates the given key.
:param key: the key to validate
:raises InvalidKeyError: raised if the given key is invalid
"""
if "//" in key:
raise DoubleSlashKeyError(key)
elif normpath(key) != key:
raise NonNormalisedKeyError(key) | Validates the given key.
:param key: the key to validate
:raises InvalidKeyError: raised if the given key is invalid |
def _init_loaders(self) -> None:
"""
This creates the loaders instances and subscribes to their updates.
"""
for loader in settings.I18N_TRANSLATION_LOADERS:
loader_class = import_class(loader['loader'])
instance = loader_class()
instance.on_update(self.update)
run(instance.load(**loader['params'])) | This creates the loaders instances and subscribes to their updates. |
def _parse_remote_model(self, context):
"""
parse the remote resource model and adds its full name
:type context: models.QualiDriverModels.ResourceRemoteCommandContext
"""
if not context.remote_endpoints:
raise Exception('no remote resources found in context: {0}', jsonpickle.encode(context, unpicklable=False))
resource = context.remote_endpoints[0]
dictionary = jsonpickle.decode(resource.app_context.deployed_app_json)
holder = DeployDataHolder(dictionary)
app_resource_detail = GenericDeployedAppResourceModel()
app_resource_detail.vm_uuid = holder.vmdetails.uid
app_resource_detail.cloud_provider = context.resource.fullname
app_resource_detail.fullname = resource.fullname
if hasattr(holder.vmdetails, 'vmCustomParams'):
app_resource_detail.vm_custom_params = holder.vmdetails.vmCustomParams
return app_resource_detail | parse the remote resource model and adds its full name
:type context: models.QualiDriverModels.ResourceRemoteCommandContext |
def add_permission_view_menu(self, permission_name, view_menu_name):
"""
Adds a permission on a view or menu to the backend
:param permission_name:
name of the permission to add: 'can_add','can_edit' etc...
:param view_menu_name:
name of the view menu to add
"""
if not (permission_name and view_menu_name):
return None
pv = self.find_permission_view_menu(
permission_name,
view_menu_name
)
if pv:
return pv
vm = self.add_view_menu(view_menu_name)
perm = self.add_permission(permission_name)
pv = self.permissionview_model()
pv.view_menu, pv.permission = vm, perm
try:
pv.save()
log.info(c.LOGMSG_INF_SEC_ADD_PERMVIEW.format(str(pv)))
return pv
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_PERMVIEW.format(str(e))) | Adds a permission on a view or menu to the backend
:param permission_name:
name of the permission to add: 'can_add','can_edit' etc...
:param view_menu_name:
name of the view menu to add |
def firmware_manifest_destroy(self, manifest_id, **kwargs): # noqa: E501
"""Delete a manifest # noqa: E501
Delete a firmware manifest. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.firmware_manifest_destroy(manifest_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str manifest_id: The firmware manifest ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.firmware_manifest_destroy_with_http_info(manifest_id, **kwargs) # noqa: E501
else:
(data) = self.firmware_manifest_destroy_with_http_info(manifest_id, **kwargs) # noqa: E501
return data | Delete a manifest # noqa: E501
Delete a firmware manifest. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.firmware_manifest_destroy(manifest_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str manifest_id: The firmware manifest ID (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def get_copy(dict_, key, default=None):
"""
Looks for a key in a dictionary, if found returns
a deepcopied value, otherwise returns default value
"""
value = dict_.get(key, default)
if value:
return deepcopy(value)
return value | Looks for a key in a dictionary, if found returns
a deepcopied value, otherwise returns default value |
def _get_tables(self, base_dir):
"""Load the contents of meta_file and the corresponding data.
If fields containing Personally Identifiable Information are detected in the metadata
they are anonymized before asign them into `table_dict`.
Args:
base_dir(str): Root folder of the dataset files.
Returns:
dict: Mapping str -> tuple(pandas.DataFrame, dict)
"""
table_dict = {}
for table in self.metadata['tables']:
if table['use']:
relative_path = os.path.join(base_dir, self.metadata['path'], table['path'])
data_table = pd.read_csv(relative_path)
pii_fields = self._get_pii_fields(table)
data_table = self._anonymize_table(data_table, pii_fields)
table_dict[table['name']] = (data_table, table)
return table_dict | Load the contents of meta_file and the corresponding data.
If fields containing Personally Identifiable Information are detected in the metadata
they are anonymized before asign them into `table_dict`.
Args:
base_dir(str): Root folder of the dataset files.
Returns:
dict: Mapping str -> tuple(pandas.DataFrame, dict) |
def non_increasing(values):
"""True if values are not increasing."""
return all(x >= y for x, y in zip(values, values[1:])) | True if values are not increasing. |
def _edit(self, filename, line=None):
""" Opens a Python script for editing.
Parameters:
-----------
filename : str
A path to a local system file.
line : int, optional
A line of interest in the file.
"""
if self.custom_edit:
self.custom_edit_requested.emit(filename, line)
elif not self.editor:
self._append_plain_text('No default editor available.\n'
'Specify a GUI text editor in the `IPythonWidget.editor` '
'configurable to enable the %edit magic')
else:
try:
filename = '"%s"' % filename
if line and self.editor_line:
command = self.editor_line.format(filename=filename,
line=line)
else:
try:
command = self.editor.format()
except KeyError:
command = self.editor.format(filename=filename)
else:
command += ' ' + filename
except KeyError:
self._append_plain_text('Invalid editor command.\n')
else:
try:
Popen(command, shell=True)
except OSError:
msg = 'Opening editor with command "%s" failed.\n'
self._append_plain_text(msg % command) | Opens a Python script for editing.
Parameters:
-----------
filename : str
A path to a local system file.
line : int, optional
A line of interest in the file. |
async def ssh_exec(server, cmd, timeout=10, **ssh_kwargs):
"""Execute a command on a given server using asynchronous SSH-connection.
The connection to the server is wrapped in :func:`asyncio.wait_for` and
given :attr:`timeout` is applied to it. If the server is not reachable
before timeout expires, :exc:`asyncio.TimeoutError` is raised.
:param str server: Address of the server
:param str cmd: Command to be executed
:param int timeout: Timeout to connect to server.
:param ssh_kwargs:
Any additional SSH-connection arguments, as specified by
:func:`asyncssh.connect`. See `asyncssh documentation
<http://asyncssh.readthedocs.io/en/latest/api.html#connect>`_ for
details.
:returns:
closed SSH-connection
"""
conn = await asyncio.wait_for(asyncssh.connect(server, **ssh_kwargs),
timeout=timeout)
ret = await conn.run(cmd)
conn.close()
return ret | Execute a command on a given server using asynchronous SSH-connection.
The connection to the server is wrapped in :func:`asyncio.wait_for` and
given :attr:`timeout` is applied to it. If the server is not reachable
before timeout expires, :exc:`asyncio.TimeoutError` is raised.
:param str server: Address of the server
:param str cmd: Command to be executed
:param int timeout: Timeout to connect to server.
:param ssh_kwargs:
Any additional SSH-connection arguments, as specified by
:func:`asyncssh.connect`. See `asyncssh documentation
<http://asyncssh.readthedocs.io/en/latest/api.html#connect>`_ for
details.
:returns:
closed SSH-connection |
def RemoveScanNode(self, path_spec):
"""Removes a scan node of a certain path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
SourceScanNode: parent scan node or None if not available.
Raises:
RuntimeError: if the scan node has sub nodes.
"""
scan_node = self._scan_nodes.get(path_spec, None)
if not scan_node:
return None
if scan_node.sub_nodes:
raise RuntimeError('Scan node has sub nodes.')
parent_scan_node = scan_node.parent_node
if parent_scan_node:
parent_scan_node.sub_nodes.remove(scan_node)
if path_spec == self._root_path_spec:
self._root_path_spec = None
del self._scan_nodes[path_spec]
if path_spec.IsFileSystem():
del self._file_system_scan_nodes[path_spec]
return parent_scan_node | Removes a scan node of a certain path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
SourceScanNode: parent scan node or None if not available.
Raises:
RuntimeError: if the scan node has sub nodes. |
def _config_net_topology(self, conf):
"""
Initialize and populate all the network related elements, like
reserving ips and populating network specs of the given confiiguration
spec
Args:
conf (dict): Configuration spec to initalize
Returns:
None
"""
conf = self._init_net_specs(conf)
mgmts = self._select_mgmt_networks(conf)
self._validate_netconfig(conf)
allocated_subnets, conf = self._allocate_subnets(conf)
try:
self._add_mgmt_to_domains(conf, mgmts)
self._register_preallocated_ips(conf)
self._allocate_ips_to_nics(conf)
self._set_mtu_to_nics(conf)
self._add_dns_records(conf, mgmts)
except:
self._subnet_store.release(allocated_subnets)
raise
return conf | Initialize and populate all the network related elements, like
reserving ips and populating network specs of the given confiiguration
spec
Args:
conf (dict): Configuration spec to initalize
Returns:
None |
def get_sum_w2(self, ix, iy=0, iz=0):
"""
Obtain the true number of entries in the bin weighted by w^2
"""
if self.GetSumw2N() == 0:
raise RuntimeError(
"Attempting to access Sumw2 in histogram "
"where weights were not stored")
xl = self.nbins(axis=0, overflow=True)
yl = self.nbins(axis=1, overflow=True)
idx = xl * yl * iz + xl * iy + ix
if not 0 <= idx < self.GetSumw2N():
raise IndexError("bin index out of range")
return self.GetSumw2().At(idx) | Obtain the true number of entries in the bin weighted by w^2 |
def paginate_announcements_list(request, context, items):
"""
***TODO*** Migrate to django Paginator (see lostitems)
"""
# pagination
if "start" in request.GET:
try:
start_num = int(request.GET.get("start"))
except ValueError:
start_num = 0
else:
start_num = 0
display_num = 10
end_num = start_num + display_num
prev_page = start_num - display_num
more_items = ((len(items) - start_num) > display_num)
try:
items_sorted = items[start_num:end_num]
except (ValueError, AssertionError):
items_sorted = items[:display_num]
else:
items = items_sorted
context.update({
"items": items,
"start_num": start_num,
"end_num": end_num,
"prev_page": prev_page,
"more_items": more_items,
})
return context, items | ***TODO*** Migrate to django Paginator (see lostitems) |
def _make_renderer(self, at_paths, at_encoding, **kwargs):
"""
:param at_paths: Template search paths
:param at_encoding: Template encoding
:param kwargs: Keyword arguments passed to the template engine to
render templates with specific features enabled.
"""
for eopt in ("file_encoding", "string_encoding"):
default = self._roptions.get(eopt, at_encoding.lower())
self._roptions[eopt] = kwargs.get(eopt, default)
pkey = "search_dirs"
paths = kwargs.get(pkey, []) + self._roptions.get(pkey, [])
if at_paths is not None:
paths = at_paths + paths
self._roptions[pkey] = paths
return pystache.renderer.Renderer(**self._roptions) | :param at_paths: Template search paths
:param at_encoding: Template encoding
:param kwargs: Keyword arguments passed to the template engine to
render templates with specific features enabled. |
def get_object_info(self):
"""
Returns object info in following form <module.class object at address>
"""
objectinfo = str(self.__class__).replace(">", "")
objectinfo = objectinfo.replace("class ", "")
objectinfo = objectinfo.replace("'", "")
objectinfo += " object at 0x%x>" % id(self)
return objectinfo | Returns object info in following form <module.class object at address> |
def get_name_deadlines( self, name_rec, namespace_rec, block_number ):
"""
Get the expiry and renewal deadlines for a (registered) name.
NOTE: expire block here is NOT the block at which the owner loses the name, but the block at which lookups fail.
The name owner has until renewal_deadline to renew the name.
Return {'expire_block': ..., 'renewal_deadline': ...} on success
Return None if the namespace isn't ready yet
"""
if namespace_rec['op'] != NAMESPACE_READY:
# name cannot be in grace period, since the namespace is not ready
return None
namespace_id = namespace_rec['namespace_id']
namespace_lifetime_multiplier = get_epoch_namespace_lifetime_multiplier( block_number, namespace_id )
namespace_lifetime_grace_period = get_epoch_namespace_lifetime_grace_period( block_number, namespace_id )
expire_block = max(namespace_rec['ready_block'], name_rec['last_renewed']) + (namespace_rec['lifetime'] * namespace_lifetime_multiplier)
renewal_deadline = expire_block + namespace_lifetime_grace_period
return {'expire_block': expire_block, 'renewal_deadline': renewal_deadline} | Get the expiry and renewal deadlines for a (registered) name.
NOTE: expire block here is NOT the block at which the owner loses the name, but the block at which lookups fail.
The name owner has until renewal_deadline to renew the name.
Return {'expire_block': ..., 'renewal_deadline': ...} on success
Return None if the namespace isn't ready yet |
def minion_publish(self, load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
if not self.__verify_minion_publish(load):
return {}
# Set up the publication payload
pub_load = {
'fun': load['fun'],
'arg': salt.utils.args.parse_input(
load['arg'],
no_parse=load.get('no_parse', [])),
'tgt_type': load.get('tgt_type', 'glob'),
'tgt': load['tgt'],
'ret': load['ret'],
'id': load['id'],
}
if 'tmo' in load:
try:
pub_load['timeout'] = int(load['tmo'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(
load['tmo'])
log.warning(msg)
return {}
if 'timeout' in load:
try:
pub_load['timeout'] = int(load['timeout'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(
load['timeout'])
log.warning(msg)
return {}
if 'tgt_type' in load:
if load['tgt_type'].startswith('node'):
if load['tgt'] in self.opts['nodegroups']:
pub_load['tgt'] = self.opts['nodegroups'][load['tgt']]
pub_load['tgt_type'] = 'compound'
else:
return {}
else:
pub_load['tgt_type'] = load['tgt_type']
pub_load['raw'] = True
ret = {}
for minion in self.local.cmd_iter(**pub_load):
if load.get('form', '') == 'full':
data = minion
if 'jid' in minion:
ret['__jid__'] = minion['jid']
data['ret'] = data.pop('return')
ret[minion['id']] = data
else:
ret[minion['id']] = minion['return']
if 'jid' in minion:
ret['__jid__'] = minion['jid']
for key, val in six.iteritems(self.local.get_cache_returns(ret['__jid__'])):
if key not in ret:
ret[key] = val
if load.get('form', '') != 'full':
ret.pop('__jid__')
return ret | Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module |
def compute_tensor(self, x):
"""
:param x: (batch, time, vec)
"""
# Target class
class_matrix = self.target_tensor // self.output_size
class_vector = class_matrix.reshape((-1,))
# Target index
target_matrix = self.target_tensor % self.output_size
target_vector = target_matrix.reshape((-1,))
# Input matrix
input_matrix = x.reshape((-1, self.input_dim))
# Output matrix
output_tensor3d = self.output_layer.compute_tensor(x)
output_matrix = output_tensor3d.reshape((-1, self.class_size, self.output_size))
arange_vec = self.arange_cache[:output_matrix.shape[0]]
sub_output_matrix = output_matrix[arange_vec, class_vector]
# Softmax
softmax_output_matrix = self.softmax_layer.compute_tensor(sub_output_matrix)
# Class prediction
class_output_matrix = self.class_layer.compute_tensor(x)
# Costs
output_cost = LMCost(softmax_output_matrix, target_vector).get()
class_cost = LMCost(class_output_matrix, class_matrix).get()
final_cost = output_cost + class_cost
return final_cost | :param x: (batch, time, vec) |
def purge_db(self):
"""
Purge all database records for the current user.
"""
with self.engine.begin() as db:
purge_remote_checkpoints(db, self.user_id) | Purge all database records for the current user. |
def delete(self):
"""Delete template config for specified template name.
.. __: https://api.go.cd/current/#delete-a-template
Returns:
Response: :class:`gocd.api.response.Response` object
"""
headers = self._default_headers()
return self._request(self.name,
ok_status=None,
data=None,
headers=headers,
method="DELETE") | Delete template config for specified template name.
.. __: https://api.go.cd/current/#delete-a-template
Returns:
Response: :class:`gocd.api.response.Response` object |
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
center = (self.convert_xunits(self.center[0]),
self.convert_yunits(self.center[1]))
width = self.width #self.convert_xunits(self.width)
height = self.height #self.convert_yunits(self.height)
trans = artist.Artist.get_transform(self)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5 * self.scale, height * 0.5* self.scale) \
.rotate_deg(self.angle) \
.translate(*trans.transform(center)) | NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable. |
def hla_choices(orig_hla, min_parts=2):
"""Provide a range of options for HLA type, with decreasing resolution.
"""
yield orig_hla
try:
int(orig_hla[-1])
except ValueError:
yield orig_hla[:-1]
hla_parts = orig_hla.split(":")
for sub_i in range(len(hla_parts) - min_parts + 1):
yield ":".join(hla_parts[:len(hla_parts) - sub_i]) | Provide a range of options for HLA type, with decreasing resolution. |
def process_belrdf(rdf_str, print_output=True):
"""Return a BelRdfProcessor for a BEL/RDF string.
Parameters
----------
rdf_str : str
A BEL/RDF string to be processed. This will usually come from reading
a .rdf file.
Returns
-------
bp : BelRdfProcessor
A BelRdfProcessor object which contains INDRA Statements in
bp.statements.
Notes
-----
This function calls all the specific get_type_of_mechanism()
functions of the newly constructed BelRdfProcessor to extract
INDRA Statements.
"""
g = rdflib.Graph()
try:
g.parse(data=rdf_str, format='nt')
except ParseError as e:
logger.error('Could not parse rdf: %s' % e)
return None
# Build INDRA statements from RDF
bp = BelRdfProcessor(g)
bp.get_complexes()
bp.get_activating_subs()
bp.get_modifications()
bp.get_activating_mods()
bp.get_transcription()
bp.get_activation()
bp.get_conversions()
# Print some output about the process
if print_output:
bp.print_statement_coverage()
bp.print_statements()
return bp | Return a BelRdfProcessor for a BEL/RDF string.
Parameters
----------
rdf_str : str
A BEL/RDF string to be processed. This will usually come from reading
a .rdf file.
Returns
-------
bp : BelRdfProcessor
A BelRdfProcessor object which contains INDRA Statements in
bp.statements.
Notes
-----
This function calls all the specific get_type_of_mechanism()
functions of the newly constructed BelRdfProcessor to extract
INDRA Statements. |
def morlet(freq, s_freq, ratio=5, sigma_f=None, dur_in_sd=4, dur_in_s=None,
normalization='peak', zero_mean=False):
"""Create a Morlet wavelet.
Parameters
----------
freq : float
central frequency of the wavelet
s_freq : int
sampling frequency
ratio : float
ratio for a wavelet family ( = freq / sigma_f)
sigma_f : float
standard deviation of the wavelet in frequency domain
dur_in_sd : float
duration of the wavelet, given as number of the standard deviation in
the time domain, in one side.
dur_in_s : float
total duration of the wavelet, two-sided (i.e. from start to finish)
normalization : str
'area' means that energy is normalized to 1, 'peak' means that the peak
is set at 1, 'max' is a normalization used by nitime which does not
change max value of output when you change sigma_f.
zero_mean : bool
make sure that the wavelet has zero mean (only relevant if ratio < 5)
Returns
-------
ndarray
vector containing the complex Morlet wavelets
Notes
-----
'ratio' and 'sigma_f' are mutually exclusive. If you use 'sigma_f', the
standard deviation stays the same for all the frequency. It's more common
to specify a constant ratio for the wavelet family, so that the frequency
resolution changes with the frequency of interest.
'dur_in_sd' and 'dur_in_s' are mutually exclusive. 'dur_in_s' specifies the
total duration (from start to finish) of the window. 'dur_in_sd' calculates
the total duration as the length in standard deviations in the time domain:
dur_in_s = dur_in_sd * 2 * sigma_t, with sigma_t = 1 / (2 * pi * sigma_f)
"""
if sigma_f is None:
sigma_f = freq / ratio
else:
ratio = freq / sigma_f
sigma_t = 1 / (2 * pi * sigma_f)
if ratio < 5 and not zero_mean:
lg.info('The wavelet won\'t have zero mean, set zero_mean=True to '
'correct it')
if dur_in_s is None:
dur_in_s = sigma_t * dur_in_sd * 2
t = arange(-dur_in_s / 2, dur_in_s / 2, 1 / s_freq)
w = exp(1j * 2 * pi * freq * t)
if zero_mean:
w -= exp(-1 / 2 * ratio ** 2)
w *= exp(-t ** 2 / (2 * sigma_t ** 2))
if normalization == 'area':
w /= sqrt(sqrt(pi) * sigma_t * s_freq)
elif normalization == 'max':
w /= 2 * sigma_t * sqrt(2 * pi) / s_freq
elif normalization == 'peak':
pass
lg.info('At freq {0: 9.3f}Hz, sigma_f={1: 9.3f}Hz, sigma_t={2: 9.3f}s, '
'total duration={3: 9.3f}s'.format(freq, sigma_f, sigma_t,
dur_in_s))
lg.debug(' Real peak={0: 9.3f}, Mean={1: 12.6f}, '
'Energy={2: 9.3f}'.format(max(real(w)), mean(w), norm(w) ** 2))
return w | Create a Morlet wavelet.
Parameters
----------
freq : float
central frequency of the wavelet
s_freq : int
sampling frequency
ratio : float
ratio for a wavelet family ( = freq / sigma_f)
sigma_f : float
standard deviation of the wavelet in frequency domain
dur_in_sd : float
duration of the wavelet, given as number of the standard deviation in
the time domain, in one side.
dur_in_s : float
total duration of the wavelet, two-sided (i.e. from start to finish)
normalization : str
'area' means that energy is normalized to 1, 'peak' means that the peak
is set at 1, 'max' is a normalization used by nitime which does not
change max value of output when you change sigma_f.
zero_mean : bool
make sure that the wavelet has zero mean (only relevant if ratio < 5)
Returns
-------
ndarray
vector containing the complex Morlet wavelets
Notes
-----
'ratio' and 'sigma_f' are mutually exclusive. If you use 'sigma_f', the
standard deviation stays the same for all the frequency. It's more common
to specify a constant ratio for the wavelet family, so that the frequency
resolution changes with the frequency of interest.
'dur_in_sd' and 'dur_in_s' are mutually exclusive. 'dur_in_s' specifies the
total duration (from start to finish) of the window. 'dur_in_sd' calculates
the total duration as the length in standard deviations in the time domain:
dur_in_s = dur_in_sd * 2 * sigma_t, with sigma_t = 1 / (2 * pi * sigma_f) |
def post(cls, payload):
"""
A wrapper over Model.post() that handles the case where a Library has a PairedBarcode
and the user may have supplied the PairedBarcode in the form of index1-index2, i.e.
GATTTCCA-GGCGTCGA. This isn't the PairedBarcode's record name or a record ID, thus
Model.post() won't be able to figure out the PairedBarcode's ID to substitute in the payload
(via a call to cls.replace_name_with_id()). Thus, this wrapper will attempt to replace
a PairedBarcode sequence in the payload with a PairedBarcode ID, then pass the payload off
to Model.post().
"""
slpk_attr_name = "sequencing_library_prep_kit_id"
paired_bc_id_attr_name = "paired_barcode_id"
seq_reg = re.compile("^[ACGTN]+$")
if paired_bc_id_attr_name in payload:
try:
index1, index2 = payload[paired_bc_id_attr_name].upper().split("-")
except ValueError:
# Not in GATTTCCA-GGCGTCGA format so let it be.
return Model.post(cls=cls, payload=payload)
if not seq_reg.match(index1) or not seq_reg.match(index2):
# Not in GATTTCCA-GGCGTCGA format so let it be.
return Model.post(cls=cls, payload=payload)
if not slpk_attr_name in payload:
raise Exception("You need to include the " + slpk + " attribute name.")
slpk_id = SequencingLibraryPrepKit.replace_name_with_id(payload[slpk_attr_name])
payload[slpk_attr_name] = slpk_id
index1_id = Barcode.find_by(payload={slpk_attr_name: slpk_id, "index_number": 1, "sequence": index1}, require=True)["id"]
index2_id = Barcode.find_by(payload={slpk_attr_name: slpk_id, "index_number": 2, "sequence": index2}, require=True)["id"]
# Ensure that PairedBarcode for this index combo already exists:
pbc_payload = {"index1_id": index1_id, "index2_id": index2_id, slpk_attr_name: slpk_id}
pbc_exists = PairedBarcode.find_by(payload=pbc_payload)
if not pbc_exists:
pbc_exists = PairedBarcode.post(payload=pbc_payload)
pbc_id = pbc_exists["id"]
payload[paired_bc_id_attr_name] = pbc_id
return super().post(payload=payload) | A wrapper over Model.post() that handles the case where a Library has a PairedBarcode
and the user may have supplied the PairedBarcode in the form of index1-index2, i.e.
GATTTCCA-GGCGTCGA. This isn't the PairedBarcode's record name or a record ID, thus
Model.post() won't be able to figure out the PairedBarcode's ID to substitute in the payload
(via a call to cls.replace_name_with_id()). Thus, this wrapper will attempt to replace
a PairedBarcode sequence in the payload with a PairedBarcode ID, then pass the payload off
to Model.post(). |
def maketrans(fromstr, tostr):
"""maketrans(frm, to) -> string
Return a translation table (a string of 256 bytes long)
suitable for use in string.translate. The strings frm and to
must be of the same length.
"""
if len(fromstr) != len(tostr):
raise ValueError, "maketrans arguments must have same length"
global _idmapL
if not _idmapL:
_idmapL = list(_idmap)
L = _idmapL[:]
fromstr = map(ord, fromstr)
for i in range(len(fromstr)):
L[fromstr[i]] = tostr[i]
return ''.join(L) | maketrans(frm, to) -> string
Return a translation table (a string of 256 bytes long)
suitable for use in string.translate. The strings frm and to
must be of the same length. |
def build_image(self, conf, pushing=False):
"""Build this image"""
with conf.make_context() as context:
try:
stream = BuildProgressStream(conf.harpoon.silent_build)
with self.remove_replaced_images(conf) as info:
cached = NormalBuilder().build(conf, context, stream)
info['cached'] = cached
except (KeyboardInterrupt, Exception) as error:
exc_info = sys.exc_info()
if stream.current_container:
Runner().stage_build_intervention(conf, stream.current_container)
if isinstance(error, KeyboardInterrupt):
raise UserQuit()
else:
six.reraise(*exc_info)
finally:
if stream and stream.intermediate_images and conf.cleanup_intermediate_images:
for image in stream.intermediate_images:
log.info("Deleting intermediate image\timage=%s", image)
try:
conf.harpoon.docker_api.remove_image(image)
except Exception as error:
log.error("Failed to remove intermediate image\timage=%s\terror=%s", image, error)
return cached | Build this image |
def get_dependencies(ireq, sources=None, parent=None):
# type: (Union[InstallRequirement, InstallationCandidate], Optional[List[Dict[S, Union[S, bool]]]], Optional[AbstractDependency]) -> Set[S, ...]
"""Get all dependencies for a given install requirement.
:param ireq: A single InstallRequirement
:type ireq: :class:`~pip._internal.req.req_install.InstallRequirement`
:param sources: Pipfile-formatted sources, defaults to None
:type sources: list[dict], optional
:param parent: The parent of this list of dependencies, defaults to None
:type parent: :class:`~pip._internal.req.req_install.InstallRequirement`
:return: A set of dependency lines for generating new InstallRequirements.
:rtype: set(str)
"""
if not isinstance(ireq, pip_shims.shims.InstallRequirement):
name = getattr(
ireq, "project_name",
getattr(ireq, "project", ireq.name),
)
version = getattr(ireq, "version", None)
if not version:
ireq = pip_shims.shims.InstallRequirement.from_line("{0}".format(name))
else:
ireq = pip_shims.shims.InstallRequirement.from_line("{0}=={1}".format(name, version))
pip_options = get_pip_options(sources=sources)
getters = [
get_dependencies_from_cache,
get_dependencies_from_wheel_cache,
get_dependencies_from_json,
functools.partial(get_dependencies_from_index, pip_options=pip_options)
]
for getter in getters:
deps = getter(ireq)
if deps is not None:
return deps
raise RuntimeError('failed to get dependencies for {}'.format(ireq)) | Get all dependencies for a given install requirement.
:param ireq: A single InstallRequirement
:type ireq: :class:`~pip._internal.req.req_install.InstallRequirement`
:param sources: Pipfile-formatted sources, defaults to None
:type sources: list[dict], optional
:param parent: The parent of this list of dependencies, defaults to None
:type parent: :class:`~pip._internal.req.req_install.InstallRequirement`
:return: A set of dependency lines for generating new InstallRequirements.
:rtype: set(str) |
def words_for_language(language_code):
"""
Return the math words for a language code.
The language_code should be an ISO 639-2 language code.
https://www.loc.gov/standards/iso639-2/php/code_list.php
"""
word_groups = word_groups_for_language(language_code)
words = []
for group in word_groups:
words.extend(word_groups[group].keys())
return words | Return the math words for a language code.
The language_code should be an ISO 639-2 language code.
https://www.loc.gov/standards/iso639-2/php/code_list.php |
def _load_single_patient_cufflinks(self, patient, filter_ok):
"""
Load Cufflinks gene quantification given a patient
Parameters
----------
patient : Patient
filter_ok : bool, optional
If true, filter Cufflinks data to row with FPKM_status == "OK"
Returns
-------
data: Pandas dataframe
Pandas dataframe of sample's Cufflinks data
columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi
"""
data = pd.read_csv(patient.tumor_sample.cufflinks_path, sep="\t")
data["patient_id"] = patient.id
if filter_ok:
# Filter to OK FPKM counts
data = data[data["FPKM_status"] == "OK"]
return data | Load Cufflinks gene quantification given a patient
Parameters
----------
patient : Patient
filter_ok : bool, optional
If true, filter Cufflinks data to row with FPKM_status == "OK"
Returns
-------
data: Pandas dataframe
Pandas dataframe of sample's Cufflinks data
columns include patient_id, gene_id, gene_short_name, FPKM, FPKM_conf_lo, FPKM_conf_hi |
def get_unread_message_count_between(parser, token):
"""
Returns the unread message count between two users.
Syntax::
{% get_unread_message_count_between [user] and [user] as [var_name] %}
Example usage::
{% get_unread_message_count_between funky and wunki as message_count %}
"""
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError("%s tag requires arguments" % token.contents.split()[0])
m = re.search(r'(.*?) and (.*?) as (\w+)', arg)
if not m:
raise template.TemplateSyntaxError("%s tag had invalid arguments" % tag_name)
um_from_user, um_to_user, var_name = m.groups()
return MessageCount(um_from_user, var_name, um_to_user) | Returns the unread message count between two users.
Syntax::
{% get_unread_message_count_between [user] and [user] as [var_name] %}
Example usage::
{% get_unread_message_count_between funky and wunki as message_count %} |
def setup(self):
"""
copies default stylesheets and javascript files if necessary, and
appends them to the options
"""
from javatools import cheetah
options = self.options
datadir = getattr(options, "html_copy_data", None)
if getattr(options, "html_data_copied", False) or not datadir:
# either already run by a parent report, or not supposed
# to run at all.
return
# this is where we've installed the default media
datasrc = join(cheetah.__path__[0], "data")
# record the .js and .css content we copy
javascripts = list()
stylesheets = list()
# copy the contents of our data source to datadir
for _orig, copied in copydir(datasrc, datadir):
if copied.endswith(".js"):
javascripts.append(copied)
elif copied.endswith(".css"):
stylesheets.append(copied)
javascripts.extend(getattr(options, "html_javascripts", tuple()))
stylesheets.extend(getattr(options, "html_stylesheets", tuple()))
options.html_javascripts = javascripts
options.html_stylesheets = stylesheets
# keep from copying again
options.html_data_copied = True | copies default stylesheets and javascript files if necessary, and
appends them to the options |
def Copy(self, field_number=None):
"""Returns descriptor copy, optionally changing field number."""
new_args = self._kwargs.copy()
if field_number is not None:
new_args["field_number"] = field_number
return ProtoRDFValue(
rdf_type=self.original_proto_type_name,
default=getattr(self, "default", None),
**new_args) | Returns descriptor copy, optionally changing field number. |
def _path_to_baton_json(self, path: str) -> Dict:
"""
Converts a path to the type of iRODS entity the mapper deals with, to its JSON representation.
:param path: the path to convert
:return: the JSON representation of the path
"""
entity = self._create_entity_with_path(path)
return self._entity_to_baton_json(entity) | Converts a path to the type of iRODS entity the mapper deals with, to its JSON representation.
:param path: the path to convert
:return: the JSON representation of the path |
def header(self):
'''
Format this element's metadata as it would appear in a PLY
header.
'''
lines = ['element %s %d' % (self.name, self.count)]
# Some information is lost here, since all comments are placed
# between the 'element' line and the first property definition.
for c in self.comments:
lines.append('comment ' + c)
lines.extend(list(map(str, self.properties)))
return '\n'.join(lines) | Format this element's metadata as it would appear in a PLY
header. |
def _copytoscratch(self, maps):
"""Copies the data in maps to the scratch space.
If the maps contain arrays that are not the same shape as the scratch
space, a new scratch space will be created.
"""
try:
for p in self.inputs:
self._scratch[p][:] = maps[p]
except ValueError:
# we'll get a ValueError if the scratch space isn't the same size
# as the maps; in that case, re-create the scratch space with the
# appropriate size and try again
invals = maps[list(self.inputs)[0]]
if isinstance(invals, numpy.ndarray):
shape = invals.shape
else:
shape = len(invals)
self._createscratch(shape)
for p in self.inputs:
self._scratch[p][:] = maps[p] | Copies the data in maps to the scratch space.
If the maps contain arrays that are not the same shape as the scratch
space, a new scratch space will be created. |
def _get_grain(name, proxy=None):
'''
Retrieves the grain value from the cached dictionary.
'''
grains = _retrieve_grains_cache(proxy=proxy)
if grains.get('result', False) and grains.get('out', {}):
return grains.get('out').get(name) | Retrieves the grain value from the cached dictionary. |
def add_alignment_errors(self,ae):
"""If you alread have thealignment errors, add them for profile construction."""
self._target_context_errors = None
self._query_context_errors = None
self._alignment_errors.append(ae)
self._general_errors.add_alignment_errors(ae) | If you alread have thealignment errors, add them for profile construction. |
def height(self):
"""Get the height of a bounding box encapsulating the line."""
if len(self.coords) <= 1:
return 0
return np.max(self.yy) - np.min(self.yy) | Get the height of a bounding box encapsulating the line. |
def _determine_tool(files):
"""Yields tuples in the form of (linker file, tool the file links for"""
for file in files:
linker_ext = file.split('.')[-1]
if "sct" in linker_ext or "lin" in linker_ext:
yield (str(file),"uvision")
elif "ld" in linker_ext:
yield (str(file),"make_gcc_arm")
elif "icf" in linker_ext:
yield (str(file),"iar_arm") | Yields tuples in the form of (linker file, tool the file links for |
def follow_path(file_path, buffering=-1, encoding=None, errors='strict'):
"""
Similar to follow, but also looks up if inode of file is changed
e.g. if it was re-created.
Returned generator yields strings encoded by using encoding.
If encoding is not specified, it defaults to locale.getpreferredencoding()
>>> import io
>>> import os
>>> f = io.open('test_follow_path.txt', 'w+')
>>> generator = follow_path('test_follow_path.txt')
>>> _ = f.write('Line 1\\n')
>>> f.flush()
>>> print(next(generator))
Line 1
>>> _ = f.write('Line 2\\n')
>>> f.flush()
>>> print(next(generator))
Line 2
>>> _ = f.truncate(0)
>>> _ = f.seek(0)
>>> _ = f.write('Line 3\\n')
>>> f.flush()
>>> print(next(generator))
Line 3
>>> f.close()
>>> os.remove('test_follow_path.txt')
>>> f = io.open('test_follow_path.txt', 'w+')
>>> _ = f.write('Line 4\\n')
>>> f.flush()
>>> print(next(generator))
Line 4
>>> print(next(generator))
None
>>> f.close()
>>> os.remove('test_follow_path.txt')
"""
if encoding is None:
encoding = locale.getpreferredencoding()
class FollowPathGenerator(object):
def __init__(self):
if os.path.isfile(file_path):
self.following_file = io.open(file_path, 'rb', buffering)
self.follow_generator = Tailer(self.following_file, end=True).follow()
self.follow_from_end_on_open = False
else:
self.following_file = None
self.follow_generator = None
self.follow_from_end_on_open = True
def next(self):
while True:
if self.follow_generator:
line = next(self.follow_generator)
else:
line = None
if line is None:
if self.follow_generator:
try:
is_file_changed = not os.path.isfile(file_path) or os.stat(file_path).st_ino != os.fstat(self.following_file.fileno()).st_ino
except OSError:
# File could be deleted between isfile and stat invocations, which will make the latter to fail.
is_file_changed = True
if is_file_changed:
# File was deleted or re-created.
self.following_file.close()
self.following_file = None
self.follow_generator = None
if not self.follow_generator and os.path.isfile(file_path):
# New file is available. Open it.
try:
self.following_file = io.open(file_path, 'rb', buffering)
self.follow_generator = Tailer(self.following_file, end=self.follow_from_end_on_open).follow()
self.follow_from_end_on_open = False # something could be written before we noticed change of file
except (IOError, OSError) as e:
LOG.info("Unable to tail file: %s", e)
if self.following_file:
self.following_file.close()
self.following_file= None
self.follow_generator = None
line = None
else:
line = next(self.follow_generator)
return line.decode(encoding, errors) if line is not None else line
def __iter__(self):
return self
def __next__(self):
return self.next()
return FollowPathGenerator() | Similar to follow, but also looks up if inode of file is changed
e.g. if it was re-created.
Returned generator yields strings encoded by using encoding.
If encoding is not specified, it defaults to locale.getpreferredencoding()
>>> import io
>>> import os
>>> f = io.open('test_follow_path.txt', 'w+')
>>> generator = follow_path('test_follow_path.txt')
>>> _ = f.write('Line 1\\n')
>>> f.flush()
>>> print(next(generator))
Line 1
>>> _ = f.write('Line 2\\n')
>>> f.flush()
>>> print(next(generator))
Line 2
>>> _ = f.truncate(0)
>>> _ = f.seek(0)
>>> _ = f.write('Line 3\\n')
>>> f.flush()
>>> print(next(generator))
Line 3
>>> f.close()
>>> os.remove('test_follow_path.txt')
>>> f = io.open('test_follow_path.txt', 'w+')
>>> _ = f.write('Line 4\\n')
>>> f.flush()
>>> print(next(generator))
Line 4
>>> print(next(generator))
None
>>> f.close()
>>> os.remove('test_follow_path.txt') |
def get_class_from_settings_from_apps(settings_key):
"""Try and get a class from a settings path by lookin in installed apps.
"""
cls_path = getattr(settings, settings_key, None)
if not cls_path:
raise NotImplementedError()
try:
app_label = cls_path.split('.')[-2]
model_name = cls_path.split('.')[-1]
except ValueError:
raise ImproperlyConfigured("{0} must be of the form "
"'app_label.model_name'".format(
settings_key))
app = apps.get_app_config(app_label).models_module
if not app:
raise ImproperlyConfigured("{0} setting refers to an app that has not "
"been installed".format(settings_key))
return getattr(app, model_name) | Try and get a class from a settings path by lookin in installed apps. |
def init(ctx):
"""Initialize the project for use with EasyCI. This installs the necessary
git hooks (pre-commit + pre-push) and add a config file if one does not
already exists.
"""
# install hooks
git = ctx.obj['vcs']
click.echo("Installing hooks...", nl=False)
for old in ['commit-msg']:
path = os.path.join(git.path, '.git/hooks', old)
if os.path.exists(path):
os.remove(path)
for new in ['pre-commit', 'pre-push']:
git.install_hook(new, hooks_manager.get_hook(new))
click.echo("Done.")
# add a config file if one does not exist
config_path = os.path.join(git.path, 'eci.yaml')
if not os.path.exists(config_path):
click.echo("Placing a trivial config file in your project...", nl=False)
with open(config_path, 'w') as f:
f.write(yaml.safe_dump(
{'tests': ['echo please modify to run your tests', 'true']}))
click.echo("Done.")
# initialize lock
locking.init(git)
# update installed version
click.echo("Updating installed version...", nl=False)
set_installed_version(git, easyci.__version__)
click.echo("Done.") | Initialize the project for use with EasyCI. This installs the necessary
git hooks (pre-commit + pre-push) and add a config file if one does not
already exists. |
def present_params(paramlist, spacing = 0, maxchars=90, linecont=", &"):
"""Creates the (paramlist) for a method call formatted nicely for calls
with lots of parameters."""
#The +2 is spacing is for the tab indent at the start of the line.
#The +3 is for indent and the extra parenthesis at the start of the call.
line = []
length = 0
result = []
for param in paramlist:
extra = len(list(param))
if length + extra + 2 + spacing > maxchars:
result.append(", ".join(line) + linecont)
line = [ param ]
length = extra + 2
else:
line.append(param)
length += extra + 2
#Add on the remaining bits of the line
result.append(", ".join(line))
return "\n{}".format(" ".join([ "" for i in range(spacing + 3)])).join(result) | Creates the (paramlist) for a method call formatted nicely for calls
with lots of parameters. |
def _create_filter(self, condition):
""" Create a filter object from a textual condition.
"""
# "Normal" comparison operators?
comparison = re.match(r"^(%s)(<[>=]?|>=?|!=|~)(.*)$" % self.ident_re, condition)
if comparison:
name, comparison, values = comparison.groups()
if values and values[0] in "+-":
raise FilterError("Comparison operator cannot be followed by '%s' in '%s'" % (values[0], condition))
values = self.COMPARISON_OPS[comparison] % values
else:
# Split name from value(s)
try:
name, values = condition.split('=', 1)
except ValueError:
if self.default_field:
name, values = self.default_field, condition
else:
raise FilterError("Field name missing in '%s' (expected '=')" % condition)
# Try to find field definition
field = self.lookup(name)
if not field:
raise FilterError("Unknown field %r in %r" % (name, condition))
if field.get("matcher") is None:
raise FilterError("Field %r cannot be used as a filter" % (name,))
# Make filters from values (split on commas outside of /…/)
filters = []
split_values = re.findall(r'(!?/[^/]*/|[^,]+)(?:,|$)', values) if values else ['']
if not split_values:
raise FilterError("Internal Error: Cannot split %r into match values" % (values,))
for value in split_values:
wrapper = None
if value.startswith('!'):
wrapper = NegateFilter
value = value[1:]
field_matcher = field["matcher"](name, value)
filters.append(wrapper(field_matcher) if wrapper else field_matcher)
# Return filters
return CompoundFilterAny(filters) if len(filters) > 1 else filters[0] | Create a filter object from a textual condition. |
def serialize(self, private=True):
"""Serialize this key.
:param private: Whether or not the serialized key should contain
private information. Set to False for a public-only representation
that cannot spend funds but can create children. You want
private=False if you are, for example, running an e-commerce
website and want to accept bitcoin payments. See the README
for more information.
:type private: bool, defaults to True
See the spec in `deserialize` for more details.
"""
if private and not self.private_key:
raise ValueError("Cannot serialize a public key as private")
if private:
network_version = long_to_hex(
self.network.EXT_SECRET_KEY, 8)
else:
network_version = long_to_hex(
self.network.EXT_PUBLIC_KEY, 8)
depth = long_to_hex(self.depth, 2)
parent_fingerprint = self.parent_fingerprint[2:] # strip leading 0x
child_number = long_to_hex(self.child_number, 8)
chain_code = self.chain_code
ret = (network_version + depth + parent_fingerprint + child_number +
chain_code)
# Private and public serializations are slightly different
if private:
ret += b'00' + self.private_key.get_key()
else:
ret += self.get_public_key_hex(compressed=True)
return ensure_bytes(ret.lower()) | Serialize this key.
:param private: Whether or not the serialized key should contain
private information. Set to False for a public-only representation
that cannot spend funds but can create children. You want
private=False if you are, for example, running an e-commerce
website and want to accept bitcoin payments. See the README
for more information.
:type private: bool, defaults to True
See the spec in `deserialize` for more details. |
def check_grid_mapping(self, ds):
"""
5.6 When the coordinate variables for a horizontal grid are not
longitude and latitude, it is required that the true latitude and
longitude coordinates be supplied via the coordinates attribute. If in
addition it is desired to describe the mapping between the given
coordinate variables and the true latitude and longitude coordinates,
the attribute grid_mapping may be used to supply this description.
This attribute is attached to data variables so that variables with
different mappings may be present in a single file. The attribute takes
a string value which is the name of another variable in the file that
provides the description of the mapping via a collection of attached
attributes. This variable is called a grid mapping variable and is of
arbitrary type since it contains no data. Its purpose is to act as a
container for the attributes that define the mapping.
The one attribute that all grid mapping variables must have is
grid_mapping_name which takes a string value that contains the mapping's
name. The other attributes that define a specific mapping depend on the
value of grid_mapping_name. The valid values of grid_mapping_name along
with the attributes that provide specific map parameter values are
described in Appendix F, Grid Mappings.
When the coordinate variables for a horizontal grid are longitude and
latitude, a grid mapping variable with grid_mapping_name of
latitude_longitude may be used to specify the ellipsoid and prime
meridian.
In order to make use of a grid mapping to directly calculate latitude
and longitude values it is necessary to associate the coordinate
variables with the independent variables of the mapping. This is done by
assigning a standard_name to the coordinate variable. The appropriate
values of the standard_name depend on the grid mapping and are given in
Appendix F, Grid Mappings.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
grid_mapping_variables = cfutil.get_grid_mapping_variables(ds)
# Check the grid_mapping attribute to be a non-empty string and that its reference exists
for variable in ds.get_variables_by_attributes(grid_mapping=lambda x: x is not None):
grid_mapping = getattr(variable, 'grid_mapping', None)
defines_grid_mapping = TestCtx(BaseCheck.HIGH,
self.section_titles["5.6"])
defines_grid_mapping.assert_true((isinstance(grid_mapping, basestring) and grid_mapping),
"{}'s grid_mapping attribute must be a "+\
"space-separated non-empty string".format(variable.name))
if isinstance(grid_mapping, basestring):
for grid_var_name in grid_mapping.split():
defines_grid_mapping.assert_true(grid_var_name in ds.variables,
"grid mapping variable {} must exist in this dataset".format(variable.name))
ret_val.append(defines_grid_mapping.to_result())
# Check the grid mapping variables themselves
for grid_var_name in grid_mapping_variables:
valid_grid_mapping = TestCtx(BaseCheck.HIGH, self.section_titles["5.6"])
grid_var = ds.variables[grid_var_name]
grid_mapping_name = getattr(grid_var, 'grid_mapping_name', None)
# Grid mapping name must be in appendix F
valid_grid_mapping.assert_true(grid_mapping_name in grid_mapping_dict,
"{} is not a valid grid_mapping_name.".format(grid_mapping_name)+\
" See Appendix F for valid grid mappings")
# The grid_mapping_dict has a values of:
# - required attributes
# - optional attributes (can't check)
# - required standard_names defined
# - at least one of these attributes must be defined
# We can't do any of the other grid mapping checks if it's not a valid grid mapping name
if grid_mapping_name not in grid_mapping_dict:
ret_val.append(valid_grid_mapping.to_result())
continue
grid_mapping = grid_mapping_dict[grid_mapping_name]
required_attrs = grid_mapping[0]
# Make sure all the required attributes are defined
for req in required_attrs:
valid_grid_mapping.assert_true(hasattr(grid_var, req),
"{} is a required attribute for grid mapping {}".format(req, grid_mapping_name))
# Make sure that exactly one of the exclusive attributes exist
if len(grid_mapping_dict) == 4:
at_least_attr = grid_mapping_dict[3]
number_found = 0
for attr in at_least_attr:
if hasattr(grid_var, attr):
number_found += 1
valid_grid_mapping.assert_true(number_found == 1,
"grid mapping {}".format(grid_mapping_name) +\
"must define exactly one of these attributes: "+\
"{}".format(' or '.join(at_least_attr)))
# Make sure that exactly one variable is defined for each of the required standard_names
expected_std_names = grid_mapping[2]
for expected_std_name in expected_std_names:
found_vars = ds.get_variables_by_attributes(standard_name=expected_std_name)
valid_grid_mapping.assert_true(len(found_vars) == 1,
"grid mapping {} requires exactly".format(grid_mapping_name)+\
"one variable with standard_name "+\
"{} to be defined".format(expected_std_name))
ret_val.append(valid_grid_mapping.to_result())
return ret_val | 5.6 When the coordinate variables for a horizontal grid are not
longitude and latitude, it is required that the true latitude and
longitude coordinates be supplied via the coordinates attribute. If in
addition it is desired to describe the mapping between the given
coordinate variables and the true latitude and longitude coordinates,
the attribute grid_mapping may be used to supply this description.
This attribute is attached to data variables so that variables with
different mappings may be present in a single file. The attribute takes
a string value which is the name of another variable in the file that
provides the description of the mapping via a collection of attached
attributes. This variable is called a grid mapping variable and is of
arbitrary type since it contains no data. Its purpose is to act as a
container for the attributes that define the mapping.
The one attribute that all grid mapping variables must have is
grid_mapping_name which takes a string value that contains the mapping's
name. The other attributes that define a specific mapping depend on the
value of grid_mapping_name. The valid values of grid_mapping_name along
with the attributes that provide specific map parameter values are
described in Appendix F, Grid Mappings.
When the coordinate variables for a horizontal grid are longitude and
latitude, a grid mapping variable with grid_mapping_name of
latitude_longitude may be used to specify the ellipsoid and prime
meridian.
In order to make use of a grid mapping to directly calculate latitude
and longitude values it is necessary to associate the coordinate
variables with the independent variables of the mapping. This is done by
assigning a standard_name to the coordinate variable. The appropriate
values of the standard_name depend on the grid mapping and are given in
Appendix F, Grid Mappings.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results |
def parse(self, xmp):
"""Run parser and return a dictionary of all the parsed metadata."""
tree = etree.fromstring(xmp)
rdf_tree = tree.find(RDF_NS + 'RDF')
meta = defaultdict(dict)
for desc in rdf_tree.findall(RDF_NS + 'Description'):
for el in desc.getchildren():
ns, tag = self._parse_tag(el)
value = self._parse_value(el)
meta[ns][tag] = value
return dict(meta) | Run parser and return a dictionary of all the parsed metadata. |
def check_permission(permission, obj):
"""
Returns if the current user has rights for the permission passed in against
the obj passed in
:param permission: name of the permission
:param obj: the object to check the permission against for the current user
:return: 1 if the user has rights for this permission for the passed in obj
"""
mtool = api.get_tool('portal_membership')
object = api.get_object(obj)
return mtool.checkPermission(permission, object) | Returns if the current user has rights for the permission passed in against
the obj passed in
:param permission: name of the permission
:param obj: the object to check the permission against for the current user
:return: 1 if the user has rights for this permission for the passed in obj |
def visitLexerTerminal(self, ctx: jsgParser.LexerTerminalContext):
""" terminal: LEXER_ID | STRING """
if ctx.LEXER_ID():
# Substitute LEXER_ID with its string equivalent - "{LEXER_ID}".format(LEXER_ID=LEXER_ID.pattern)
idtoken = as_token(ctx)
self._rulePattern += '({' + idtoken + '})'
self._ruleTokens.add(idtoken)
else:
self.add_string(ctx.getText()[1:-1], False) | terminal: LEXER_ID | STRING |
def lag_plot(data, lag=1, kind="scatter", **kwds):
"""Lag plot for time series.
Parameters
----------
data: pandas.Series
the time series to plot
lag: integer
The lag of the scatter plot, default=1
kind: string
The kind of plot to use (e.g. 'scatter', 'line')
**kwds:
Additional keywords passed to data.vgplot.scatter
Returns
-------
chart: alt.Chart object
"""
if lag != int(lag) or int(lag) <= 0:
raise ValueError("lag must be a positive integer")
lag = int(lag)
values = data.values
y1 = "y(t)"
y2 = "y(t + {0})".format(lag)
lags = pd.DataFrame({y1: values[:-lag].T.ravel(), y2: values[lag:].T.ravel()})
if isinstance(data, pd.DataFrame):
lags["variable"] = np.repeat(data.columns, lags.shape[0] / data.shape[1])
kwds["c"] = "variable"
return lags.vgplot(kind=kind, x=y1, y=y2, **kwds) | Lag plot for time series.
Parameters
----------
data: pandas.Series
the time series to plot
lag: integer
The lag of the scatter plot, default=1
kind: string
The kind of plot to use (e.g. 'scatter', 'line')
**kwds:
Additional keywords passed to data.vgplot.scatter
Returns
-------
chart: alt.Chart object |
def _fetch_pageviews(self, storage, year, week, ip_users=False):
"""
Fetch PageViews from Elasticsearch.
:param time_from: Staring at timestamp.
:param time_to: To timestamp
"""
prefix = 'Pageviews'
if ip_users:
query_add = "AND !(bot:True) AND (id_user:0)"
prefix += '_IP'
else:
query_add = "AND !(bot:True) AND !(id_user:0)"
store = self.storage.get(prefix, year, week)
if not self.config['overwrite_files'] and store.does_file_exist():
logger.debug("File already exist, skip: {}-{}".format(year, week))
return
store.open('overwrite')
time_from, time_to = get_week_dates(year, week, as_timestamp=True)
es_type = "events.pageviews"
es_query = self.ES_QUERY % {'timestamp_start': time_from * 1000,
'timestamp_end': time_to * 1000,
'event_name': es_type,
'query_add': query_add}
logger.info("{}: {} - {}".format(es_type, time_from, time_to))
for hit in self._fetch_elasticsearch(es_query):
item = {}
try:
item['user'] = hit['_source'].get('id_user')
if ip_users:
assert 0 == item['user']
else:
assert 0 != item['user']
assert es_type == hit['_type']
item['timestamp'] = float(hit['_source']['@timestamp']) / 1000
if ip_users:
item['ip'] = str(hit['_source'].get('client_host'))
user_agent = str(hit['_source'].get('user_agent'))
if user_agent is None or user_agent == 'None':
continue
elif _is_bot(user_agent):
continue
item['user_agent'] = user_agent
item['recid'] = int(hit['_source'].get('id_bibrec'))
except UnicodeEncodeError as e:
# TODO: Error logging.
# print(e)
continue
# Save entry
store.add_hit(item)
store.close()
# Delete File if no hits were added.
if store.number_of_hits == 0:
store.delete() | Fetch PageViews from Elasticsearch.
:param time_from: Staring at timestamp.
:param time_to: To timestamp |
def _get_num_locations(d):
"""
Find out how many locations are being parsed. Compare lengths of each
coordinate list and return the max
:param dict d: Geo metadata
:return int: Max number of locations
"""
lengths = []
for key in EXCEL_GEO:
try:
if key != "siteName":
lengths.append(len(d[key]))
except Exception:
lengths.append(1)
try:
num = max(lengths)
except ValueError:
num = 0
return num | Find out how many locations are being parsed. Compare lengths of each
coordinate list and return the max
:param dict d: Geo metadata
:return int: Max number of locations |
def _get_derived_feature_types(self, limit):
"""
Make a pass through the feature table in order to properly type
the FBal (allele) features, which are derived either from other
sequence features (which can be things like RNAi products)
or transgenic-transposons. We'll save the allele type into a hasmap.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
raw = '/'.join((self.rawdir, 'feature_relationship'))
LOG.info("determining some feature types based on relationships")
with open(raw, 'r') as f:
f.readline() # read the header row; skip
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
for line in filereader:
(feature_relationship_id, subject_id, object_id, name, rank,
value) = line
if name == 'derived_tp_assoc_alleles':
# derived_tp_assoc_alleles
self.feature_types[subject_id] = \
self.globaltt['transgenic_insertion']
sid = self.idhash['allele'].get(subject_id)
model.addType(sid, self.feature_types[subject_id])
elif name == 'derived_sf_assoc_alleles':
# only take the derived_sf_assoc_alleles
# my subject is a reagent_targeted_gene
# my object is the dsRNA
self.feature_types[subject_id] = \
self.globaltt['reagent_targeted_gene']
sid = self.idhash['allele'].get(subject_id)
model.addType(sid, self.feature_types[subject_id])
else:
continue
return | Make a pass through the feature table in order to properly type
the FBal (allele) features, which are derived either from other
sequence features (which can be things like RNAi products)
or transgenic-transposons. We'll save the allele type into a hasmap.
:param limit:
:return: |
async def save_changes(self, turn_context: TurnContext, force: bool = False) -> None:
"""
If it has changed, writes to storage the state object that is cached in the current context object for this turn.
:param turn_context: The context object for this turn.
:param force: Optional. True to save state to storage whether or not there are changes.
"""
if turn_context == None:
raise TypeError('BotState.save_changes(): turn_context cannot be None.')
cached_state = turn_context.turn_state.get(self._context_service_key)
if force or (cached_state != None and cached_state.is_changed == True):
storage_key = self.get_storage_key(turn_context)
changes : Dict[str, object] = { storage_key: cached_state.state }
await self._storage.write(changes)
cached_state.hash = cached_state.compute_hash(cached_state.state) | If it has changed, writes to storage the state object that is cached in the current context object for this turn.
:param turn_context: The context object for this turn.
:param force: Optional. True to save state to storage whether or not there are changes. |
def rotate(self, angle, direction='z', axis=None):
"""
Returns a new Place which is the same but rotated about a
given axis.
If the axis given is ``None``, the rotation will be computed
about the Place's centroid.
:param angle: Rotation angle (in radians)
:type angle: float
:param direction: Axis direction ('x', 'y' or 'z')
:type direction: str
:param axis: Point in z=0 to perform as rotation axis
:type axis: tuple (len=2 or 3) or None
:returns: ``pyny.Place``
"""
return Space(self).rotate(angle, direction, axis)[0] | Returns a new Place which is the same but rotated about a
given axis.
If the axis given is ``None``, the rotation will be computed
about the Place's centroid.
:param angle: Rotation angle (in radians)
:type angle: float
:param direction: Axis direction ('x', 'y' or 'z')
:type direction: str
:param axis: Point in z=0 to perform as rotation axis
:type axis: tuple (len=2 or 3) or None
:returns: ``pyny.Place`` |
def to_native_types(self, slicer=None, na_rep='nan', quoting=None,
**kwargs):
""" convert to our native types format, slicing if desired """
values = self.get_values()
if slicer is not None:
values = values[:, slicer]
mask = isna(values)
if not self.is_object and not quoting:
values = values.astype(str)
else:
values = np.array(values, dtype='object')
values[mask] = na_rep
return values | convert to our native types format, slicing if desired |
def get_parser():
"""Return the parser object for this script."""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("-n",
dest="n", default=10, type=int,
help="Show TOP-N results")
parser.add_argument("--port",
dest="port", default=5000, type=int,
help="where should the webserver run")
parser.add_argument("--use_segmenter",
dest="use_segmenter",
default=False,
action='store_true',
help=("try to segment the input for multiple symbol "
"recognition"))
return parser | Return the parser object for this script. |
def _vec_alpha(self, donor_catchments):
"""
Return vector alpha which is the weights for donor model errors
Methodology source: Kjeldsen, Jones & Morris 2014, eq 10
:param donor_catchments: Catchments to use as donors
:type donor_catchments: list of :class:`Catchment`
:return: Vector of donor weights
:rtype: :class:`numpy.ndarray`
"""
return np.dot(linalg.inv(self._matrix_omega(donor_catchments)), self._vec_b(donor_catchments)) | Return vector alpha which is the weights for donor model errors
Methodology source: Kjeldsen, Jones & Morris 2014, eq 10
:param donor_catchments: Catchments to use as donors
:type donor_catchments: list of :class:`Catchment`
:return: Vector of donor weights
:rtype: :class:`numpy.ndarray` |
def loop(self, intro=None):
""" TODO as heck.
See Python's cmd.Cmd.cmdloop for some (somewhat horrifying)
example loops - that's what we're working similarly to.
"""
self.fire("preloop")
if intro is not None:
self.intro = intro
if self.intro is not None:
self.stdout.write(self.intro + "\n")
self.stdout.flush()
stop = None
while not stop:
if self.use_rawinput:
try:
line = self.inputter(self.prompt)
except EOFError:
line = self._eof
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = self._eof
else:
line = line.rstrip("\r\n")
line = self.fire("precmd", line)
stop, results = self.onecmd(line)
stop, results = self.fire("postcmd", stop, results, line)
self.fire("postloop") | TODO as heck.
See Python's cmd.Cmd.cmdloop for some (somewhat horrifying)
example loops - that's what we're working similarly to. |
def writeAMF3(self, data):
"""
Writes an element in L{AMF3<pyamf.amf3>} format.
"""
self.writeType(TYPE_AMF3)
self.context.getAMF3Encoder(self).writeElement(data) | Writes an element in L{AMF3<pyamf.amf3>} format. |
def get_cachedir_csig(self):
"""
Fetch a Node's content signature for purposes of computing
another Node's cachesig.
This is a wrapper around the normal get_csig() method that handles
the somewhat obscure case of using CacheDir with the -n option.
Any files that don't exist would normally be "built" by fetching
them from the cache, but the normal get_csig() method will try
to open up the local file, which doesn't exist because the -n
option meant we didn't actually pull the file from cachedir.
But since the file *does* actually exist in the cachedir, we
can use its contents for the csig.
"""
try:
return self.cachedir_csig
except AttributeError:
pass
cachedir, cachefile = self.get_build_env().get_CacheDir().cachepath(self)
if not self.exists() and cachefile and os.path.exists(cachefile):
self.cachedir_csig = SCons.Util.MD5filesignature(cachefile, \
SCons.Node.FS.File.md5_chunksize * 1024)
else:
self.cachedir_csig = self.get_csig()
return self.cachedir_csig | Fetch a Node's content signature for purposes of computing
another Node's cachesig.
This is a wrapper around the normal get_csig() method that handles
the somewhat obscure case of using CacheDir with the -n option.
Any files that don't exist would normally be "built" by fetching
them from the cache, but the normal get_csig() method will try
to open up the local file, which doesn't exist because the -n
option meant we didn't actually pull the file from cachedir.
But since the file *does* actually exist in the cachedir, we
can use its contents for the csig. |
def _datatype_size(datatype, numElms): # @NoSelf
'''
Gets datatype size
Parameters:
datatype : int
CDF variable data type
numElms : int
number of elements
Returns:
numBytes : int
The number of bytes for the data
'''
sizes = {1: 1,
2: 2,
4: 4,
8: 8,
11: 1,
12: 2,
14: 4,
21: 4,
22: 8,
31: 8,
32: 16,
33: 8,
41: 1,
44: 4,
45: 8,
51: 1,
52: 1}
try:
if (isinstance(datatype, int)):
if (datatype == 51 or datatype == 52):
return numElms
else:
return sizes[datatype]
else:
datatype = datatype.upper()
if (datatype == 'CDF_INT1' or datatype == 'CDF_UINT1' or
datatype == 'CDF_BYTE'):
return 1
elif (datatype == 'CDF_INT2' or datatype == 'CDF_UINT2'):
return 2
elif (datatype == 'CDF_INT4' or datatype == 'CDF_UINT4'):
return 4
elif (datatype == 'CDF_INT8' or datatype == 'CDF_TIME_TT2000'):
return 8
elif (datatype == 'CDF_REAL4' or datatype == 'CDF_FLOAT'):
return 4
elif (datatype == 'CDF_REAL8' or datatype == 'CDF_DOUBLE' or
datatype == 'CDF_EPOCH'):
return 8
elif (datatype == 'CDF_EPOCH16'):
return 16
elif (datatype == 'CDF_CHAR' or datatype == 'CDF_UCHAR'):
return numElms
else:
return -1
except Exception:
return -1 | Gets datatype size
Parameters:
datatype : int
CDF variable data type
numElms : int
number of elements
Returns:
numBytes : int
The number of bytes for the data |
def filter_resp(self, action_resp, filter_params):
"""Filter response of action. Used to make printed results more
specific
:param action_resp: named tuple (CommandsResponse)
containing response from action.
:param filter_params: params used after '|' specific for given filter
:return: filtered response.
"""
if action_resp.status == STATUS_OK:
try:
return CommandsResponse(
STATUS_OK,
TextFilter.filter(action_resp.value, filter_params)
)
except FilterError as e:
return CommandsResponse(STATUS_ERROR, str(e))
else:
return action_resp | Filter response of action. Used to make printed results more
specific
:param action_resp: named tuple (CommandsResponse)
containing response from action.
:param filter_params: params used after '|' specific for given filter
:return: filtered response. |
def get_info(self, params={}):
"""
Gets mailbox info.
@param params: params to retrieve
@return: AccountInfo
"""
res = self.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.GetInfoRequest,
params)
return res | Gets mailbox info.
@param params: params to retrieve
@return: AccountInfo |
def restore_scoped_package_version_from_recycle_bin(self, package_version_details, feed_id, package_scope, unscoped_package_name, package_version):
"""RestoreScopedPackageVersionFromRecycleBin.
[Preview API] Restore a package version with an npm scope from the recycle bin to its feed.
:param :class:`<NpmRecycleBinPackageVersionDetails> <azure.devops.v5_0.npm.models.NpmRecycleBinPackageVersionDetails>` package_version_details:
:param str feed_id: Name or ID of the feed.
:param str package_scope: Scope of the package (the 'scope' part of @scope/name).
:param str unscoped_package_name: Name of the package (the 'name' part of @scope/name).
:param str package_version: Version of the package.
"""
route_values = {}
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_scope is not None:
route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str')
if unscoped_package_name is not None:
route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str')
if package_version is not None:
route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str')
content = self._serialize.body(package_version_details, 'NpmRecycleBinPackageVersionDetails')
self._send(http_method='PATCH',
location_id='220f45eb-94a5-432c-902a-5b8c6372e415',
version='5.0-preview.1',
route_values=route_values,
content=content) | RestoreScopedPackageVersionFromRecycleBin.
[Preview API] Restore a package version with an npm scope from the recycle bin to its feed.
:param :class:`<NpmRecycleBinPackageVersionDetails> <azure.devops.v5_0.npm.models.NpmRecycleBinPackageVersionDetails>` package_version_details:
:param str feed_id: Name or ID of the feed.
:param str package_scope: Scope of the package (the 'scope' part of @scope/name).
:param str unscoped_package_name: Name of the package (the 'name' part of @scope/name).
:param str package_version: Version of the package. |
def export(self, name, columns, points):
"""Write the points to the ZeroMQ server."""
logger.debug("Export {} stats to ZeroMQ".format(name))
# Create DB input
data = dict(zip(columns, points))
# Do not publish empty stats
if data == {}:
return False
# Glances envelopes the stats in a publish message with two frames:
# - First frame containing the following prefix (STRING)
# - Second frame with the Glances plugin name (STRING)
# - Third frame with the Glances plugin stats (JSON)
message = [b(self.prefix),
b(name),
asbytes(json.dumps(data))]
# Write data to the ZeroMQ bus
# Result can be view: tcp://host:port
try:
self.client.send_multipart(message)
except Exception as e:
logger.error("Cannot export {} stats to ZeroMQ ({})".format(name, e))
return True | Write the points to the ZeroMQ server. |
def get_logger(cls, *name, **kwargs):
"""Construct a new :class:`KvLoggerAdapter` which encapsulates
the :class:`logging.Logger` specified by ``name``.
:param name:
Any amount of symbols. Will be concatenated and normalized
to form the logger name. Can also be empty.
:param extra:
Additional context relevant information.
:return:
A new :class:`KvLoggerAdapter` instance ready to use.
:rtype:
:class:`KvLoggerAdapter`
"""
return cls(getLogger(_normalize_name(name)),
kwargs.get('extra', None)) | Construct a new :class:`KvLoggerAdapter` which encapsulates
the :class:`logging.Logger` specified by ``name``.
:param name:
Any amount of symbols. Will be concatenated and normalized
to form the logger name. Can also be empty.
:param extra:
Additional context relevant information.
:return:
A new :class:`KvLoggerAdapter` instance ready to use.
:rtype:
:class:`KvLoggerAdapter` |
def _indent(indent=0, quote='', indent_char=' '):
"""Indent util function, compute new indent_string"""
if indent > 0:
indent_string = ''.join((
str(quote),
(indent_char * (indent - len(quote)))
))
else:
indent_string = ''.join((
('\x08' * (-1 * (indent - len(quote)))),
str(quote))
)
if len(indent_string):
INDENT_STRINGS.append(indent_string) | Indent util function, compute new indent_string |
def blend_alpha(image_fg, image_bg, alpha, eps=1e-2):
"""
Blend two images using an alpha blending.
In an alpha blending, the two images are naively mixed. Let ``A`` be the foreground image
and ``B`` the background image and ``a`` is the alpha value. Each pixel intensity is then
computed as ``a * A_ij + (1-a) * B_ij``.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; fully tested
* ``uint32``: yes; fully tested
* ``uint64``: yes; fully tested (1)
* ``int8``: yes; fully tested
* ``int16``: yes; fully tested
* ``int32``: yes; fully tested
* ``int64``: yes; fully tested (1)
* ``float16``: yes; fully tested
* ``float32``: yes; fully tested
* ``float64``: yes; fully tested (1)
* ``float128``: no (2)
* ``bool``: yes; fully tested (2)
- (1) Tests show that these dtypes work, but a conversion to float128 happens, which only
has 96 bits of size instead of true 128 bits and hence not twice as much resolution.
It is possible that these dtypes result in inaccuracies, though the tests did not
indicate that.
- (2) Not available due to the input dtype having to be increased to an equivalent float
dtype with two times the input resolution.
- (3) Mapped internally to ``float16``.
Parameters
----------
image_fg : (H,W,[C]) ndarray
Foreground image. Shape and dtype kind must match the one of the
background image.
image_bg : (H,W,[C]) ndarray
Background image. Shape and dtype kind must match the one of the
foreground image.
alpha : number or iterable of number or ndarray
The blending factor, between 0.0 and 1.0. Can be interpreted as the opacity of the
foreground image. Values around 1.0 result in only the foreground image being visible.
Values around 0.0 result in only the background image being visible.
Multiple alphas may be provided. In these cases, there must be exactly one alpha per
channel in the foreground/background image. Alternatively, for ``(H,W,C)`` images,
either one ``(H,W)`` array or an ``(H,W,C)`` array of alphas may be provided,
denoting the elementwise alpha value.
eps : number, optional
Controls when an alpha is to be interpreted as exactly 1.0 or exactly 0.0, resulting
in only the foreground/background being visible and skipping the actual computation.
Returns
-------
image_blend : (H,W,C) ndarray
Blend of foreground and background image.
"""
assert image_fg.shape == image_bg.shape
assert image_fg.dtype.kind == image_bg.dtype.kind
# TODO switch to gate_dtypes()
assert image_fg.dtype.name not in ["float128"]
assert image_bg.dtype.name not in ["float128"]
# TODO add test for this
input_was_2d = (len(image_fg.shape) == 2)
if input_was_2d:
image_fg = np.atleast_3d(image_fg)
image_bg = np.atleast_3d(image_bg)
input_was_bool = False
if image_fg.dtype.kind == "b":
input_was_bool = True
# use float32 instead of float16 here because it seems to be faster
image_fg = image_fg.astype(np.float32)
image_bg = image_bg.astype(np.float32)
alpha = np.array(alpha, dtype=np.float64)
if alpha.size == 1:
pass
else:
if alpha.ndim == 2:
assert alpha.shape == image_fg.shape[0:2]
alpha = alpha.reshape((alpha.shape[0], alpha.shape[1], 1))
elif alpha.ndim == 3:
assert alpha.shape == image_fg.shape or alpha.shape == image_fg.shape[0:2] + (1,)
else:
alpha = alpha.reshape((1, 1, -1))
if alpha.shape[2] != image_fg.shape[2]:
alpha = np.tile(alpha, (1, 1, image_fg.shape[2]))
if not input_was_bool:
if np.all(alpha >= 1.0 - eps):
return np.copy(image_fg)
elif np.all(alpha <= eps):
return np.copy(image_bg)
# for efficiency reaons, only test one value of alpha here, even if alpha is much larger
assert 0 <= alpha.item(0) <= 1.0
dt_images = iadt.get_minimal_dtype([image_fg, image_bg])
# doing this only for non-float images led to inaccuracies for large floats values
isize = dt_images.itemsize * 2
isize = max(isize, 4) # at least 4 bytes (=float32), tends to be faster than float16
dt_blend = np.dtype("f%d" % (isize,))
if alpha.dtype != dt_blend:
alpha = alpha.astype(dt_blend)
if image_fg.dtype != dt_blend:
image_fg = image_fg.astype(dt_blend)
if image_bg.dtype != dt_blend:
image_bg = image_bg.astype(dt_blend)
# the following is equivalent to
# image_blend = alpha * image_fg + (1 - alpha) * image_bg
# but supposedly faster
image_blend = image_bg + alpha * (image_fg - image_bg)
if input_was_bool:
image_blend = image_blend > 0.5
else:
# skip clip, because alpha is expected to be in range [0.0, 1.0] and both images must have same dtype
# dont skip round, because otherwise it is very unlikely to hit the image's max possible value
image_blend = iadt.restore_dtypes_(image_blend, dt_images, clip=False, round=True)
if input_was_2d:
return image_blend[:, :, 0]
return image_blend | Blend two images using an alpha blending.
In an alpha blending, the two images are naively mixed. Let ``A`` be the foreground image
and ``B`` the background image and ``a`` is the alpha value. Each pixel intensity is then
computed as ``a * A_ij + (1-a) * B_ij``.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: yes; fully tested
* ``uint32``: yes; fully tested
* ``uint64``: yes; fully tested (1)
* ``int8``: yes; fully tested
* ``int16``: yes; fully tested
* ``int32``: yes; fully tested
* ``int64``: yes; fully tested (1)
* ``float16``: yes; fully tested
* ``float32``: yes; fully tested
* ``float64``: yes; fully tested (1)
* ``float128``: no (2)
* ``bool``: yes; fully tested (2)
- (1) Tests show that these dtypes work, but a conversion to float128 happens, which only
has 96 bits of size instead of true 128 bits and hence not twice as much resolution.
It is possible that these dtypes result in inaccuracies, though the tests did not
indicate that.
- (2) Not available due to the input dtype having to be increased to an equivalent float
dtype with two times the input resolution.
- (3) Mapped internally to ``float16``.
Parameters
----------
image_fg : (H,W,[C]) ndarray
Foreground image. Shape and dtype kind must match the one of the
background image.
image_bg : (H,W,[C]) ndarray
Background image. Shape and dtype kind must match the one of the
foreground image.
alpha : number or iterable of number or ndarray
The blending factor, between 0.0 and 1.0. Can be interpreted as the opacity of the
foreground image. Values around 1.0 result in only the foreground image being visible.
Values around 0.0 result in only the background image being visible.
Multiple alphas may be provided. In these cases, there must be exactly one alpha per
channel in the foreground/background image. Alternatively, for ``(H,W,C)`` images,
either one ``(H,W)`` array or an ``(H,W,C)`` array of alphas may be provided,
denoting the elementwise alpha value.
eps : number, optional
Controls when an alpha is to be interpreted as exactly 1.0 or exactly 0.0, resulting
in only the foreground/background being visible and skipping the actual computation.
Returns
-------
image_blend : (H,W,C) ndarray
Blend of foreground and background image. |
def product_id_change(self):
'''
- @param self: object pointer
- '''
context = dict(self._context)
if not context:
context = {}
if context.get('folio', False):
if self.product_id and self.folio_id.partner_id:
self.name = self.product_id.name
self.price_unit = self.product_id.list_price
self.product_uom = self.product_id.uom_id
tax_obj = self.env['account.tax']
pr = self.product_id
self.price_unit = tax_obj._fix_tax_included_price(pr.price,
pr.taxes_id,
self.tax_id)
else:
if not self.product_id:
return {'domain': {'product_uom': []}}
val = {}
pr = self.product_id.with_context(
lang=self.folio_id.partner_id.lang,
partner=self.folio_id.partner_id.id,
quantity=val.get('product_uom_qty') or self.product_uom_qty,
date=self.folio_id.date_order,
pricelist=self.folio_id.pricelist_id.id,
uom=self.product_uom.id
)
p = pr.with_context(pricelist=self.order_id.pricelist_id.id).price
if self.folio_id.pricelist_id and self.folio_id.partner_id:
obj = self.env['account.tax']
val['price_unit'] = obj._fix_tax_included_price(p,
pr.taxes_id,
self.tax_id) | - @param self: object pointer
- |
def apply_to_image(self, image, reference=None, interpolation='linear'):
"""
Apply transform to an image
Arguments
---------
image : ANTsImage
image to which the transform will be applied
reference : ANTsImage
target space for transforming image
interpolation : string
type of interpolation to use
Returns
-------
list : transformed vector
"""
if reference is None:
reference = image.clone()
tform_fn = utils.get_lib_fn('transformImage%s%s' % (self._libsuffix, image._libsuffix))
reference = reference.clone(image.pixeltype)
img_ptr = tform_fn(self.pointer, image.pointer, reference.pointer, interpolation)
return iio.ANTsImage(pixeltype=image.pixeltype,
dimension=image.dimension,
components=image.components,
pointer=img_ptr) | Apply transform to an image
Arguments
---------
image : ANTsImage
image to which the transform will be applied
reference : ANTsImage
target space for transforming image
interpolation : string
type of interpolation to use
Returns
-------
list : transformed vector |
def fetch(self, url, open_graph=None, twitter_card=None, touch_icon=None,
favicon=None, all_images=None, parser=None, handle_file_content=None,
canonical=None):
"""Retrieves content from the specified url, parses it, and returns
a beautifully crafted dictionary of important information about that
web page.
Priority tree is as follows:
1. OEmbed
2. Open Graph
3. Twitter Card
4. Other meta content (i.e. description, keywords)
:param url: URL to send a GET request to
:param open_graph: (optional) If ``True``, filters web page content for Open Graph meta tags. The content of these properties have top priority on return values.
:type open_graph: bool
:param twitter_card: (optional) If ``True``, filters web page content for Twitter Card meta tags
:type twitter_card: bool
:param touch_icon: (optional) If ``True``, retrieves Apple touch icons and includes them in the response ``images`` array
:type touch_icon: bool
:param favicon: (optional) If ``True``, retrieves any favicon images and includes them in the response ``images`` array
:type favicon: bool
:param canonical: (optional) If ``True``, retrieves canonical url from meta tags. Default: False
:type canonical: bool
:param all_images: (optional) If ``True``, retrieves images inside web pages body and includes them in the response ``images`` array. Default: False
:type all_images: bool
:param parser: (optional) String reference for the parser that BeautifulSoup will use
:type parser: string
:param handle_file_content: (optional) If ``True``, lassie will return a generic response when a file is fetched. Default: False
:type handle_file_content: bool
"""
# Set params, method params have priority over class params
open_graph = merge_settings(open_graph, self.open_graph)
twitter_card = merge_settings(twitter_card, self.twitter_card)
touch_icon = merge_settings(touch_icon, self.touch_icon)
favicon = merge_settings(favicon, self.favicon)
canonical = merge_settings(canonical, self.canonical)
all_images = merge_settings(all_images, self.all_images)
parser = merge_settings(parser, self.parser)
handle_file_content = merge_settings(handle_file_content, self.handle_file_content)
data = {
'images': [],
'videos': [],
}
has_file_content = False
content_type = None
if handle_file_content:
headers, status_code = self._retrieve_headers(url)
content_type = headers.get('Content-Type')
has_file_content = content_type and not 'text/html' in content_type
if has_file_content and content_type:
has_image_content = content_type in IMAGE_MIMETYPES
if has_image_content:
parsed_url = urlparse(url)
data['title'] = basename(parsed_url.path.lstrip('/')) # TODO: if the url doesn't have an extension, maybe we should match it up to the mimetype and append an ext?
data['url'] = url
data['images'].append({
'type': 'body_image',
'src': url,
})
else:
try:
oembed_data, status_code = self._retrieve_oembed_data(url)
parse_oembed_data(oembed_data, data)
except LassieError:
oembed_data = None
html, status_code = self._retrieve_content(url)
if not html and not oembed_data:
raise LassieError('There was no content to parse.')
if '<html' not in html:
html = re.sub(r'(?:<!DOCTYPE(?:\s\w)?>(?:<head>)?)', '<!DOCTYPE html><html>', html)
soup = BeautifulSoup(clean_text(html), parser)
self._filter_amp_data(soup, data, url, all_images)
if open_graph:
self._filter_meta_data('open_graph', soup, data, url)
if twitter_card:
self._filter_meta_data('twitter_card', soup, data)
self._filter_meta_data('generic', soup, data)
if touch_icon:
self._filter_link_tag_data('touch_icon', soup, data, url)
if favicon:
self._filter_link_tag_data('favicon', soup, data, url)
if canonical:
self._filter_link_tag_data('canonical', soup, data, url)
if all_images:
# Maybe filter out 1x1, no "good" way to do this if image doesn't supply
# width/height.
self._find_all_images(soup, data, url)
# TODO: Find a good place for setting url, title and locale
if soup.html.get('lang'):
lang = soup.html.get('lang')
else:
lang = soup.html.get('xml:lang')
if lang and ('locale' not in data):
locale = normalize_locale(lang)
if locale:
data['locale'] = locale
data_url = data.get('url')
if not data_url or (data_url in url and len(data_url) < len(url)):
data['url'] = url
if ('title' not in data or not data.get('title')) and hasattr(soup.title, 'string'):
data['title'] = soup.title.string
data['status_code'] = status_code
return data | Retrieves content from the specified url, parses it, and returns
a beautifully crafted dictionary of important information about that
web page.
Priority tree is as follows:
1. OEmbed
2. Open Graph
3. Twitter Card
4. Other meta content (i.e. description, keywords)
:param url: URL to send a GET request to
:param open_graph: (optional) If ``True``, filters web page content for Open Graph meta tags. The content of these properties have top priority on return values.
:type open_graph: bool
:param twitter_card: (optional) If ``True``, filters web page content for Twitter Card meta tags
:type twitter_card: bool
:param touch_icon: (optional) If ``True``, retrieves Apple touch icons and includes them in the response ``images`` array
:type touch_icon: bool
:param favicon: (optional) If ``True``, retrieves any favicon images and includes them in the response ``images`` array
:type favicon: bool
:param canonical: (optional) If ``True``, retrieves canonical url from meta tags. Default: False
:type canonical: bool
:param all_images: (optional) If ``True``, retrieves images inside web pages body and includes them in the response ``images`` array. Default: False
:type all_images: bool
:param parser: (optional) String reference for the parser that BeautifulSoup will use
:type parser: string
:param handle_file_content: (optional) If ``True``, lassie will return a generic response when a file is fetched. Default: False
:type handle_file_content: bool |
def _register_function(name: str, func, universe: bool, in_place: bool):
"""Register a transformation function under the given name.
:param name: Name to register the function under
:param func: A function
:param universe:
:param in_place:
:return: The same function, with additional properties added
"""
if name in mapped:
mapped_func = mapped[name]
raise PipelineNameError('{name} is already registered with {func_mod}.{func_name}'.format(
name=name,
func_mod=mapped_func.__module__,
func_name=mapped_func.__name__
))
mapped[name] = func
if universe:
universe_map[name] = func
if in_place:
in_place_map[name] = func
if _has_arguments(func, universe):
has_arguments_map[name] = func
else:
no_arguments_map[name] = func
return func | Register a transformation function under the given name.
:param name: Name to register the function under
:param func: A function
:param universe:
:param in_place:
:return: The same function, with additional properties added |
def get_skeletons(self, component_info=None, data=None, component_position=None):
"""Get skeletons
"""
components = []
append_components = components.append
for _ in range(component_info.skeleton_count):
component_position, info = QRTPacket._get_exact(
RTSegmentCount, data, component_position
)
segments = []
for __ in range(info.segment_count):
component_position, segment = QRTPacket._get_exact(
RTSegmentId, data, component_position
)
component_position, position = QRTPacket._get_exact(
RTSegmentPosition, data, component_position
)
component_position, rotation = QRTPacket._get_exact(
RTSegmentRotation, data, component_position
)
segments.append((segment.id, position, rotation))
append_components(segments)
return components | Get skeletons |
def train(self, data, epochs, autostop=False):
"""!
@brief Trains self-organized feature map (SOM).
@param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates.
@param[in] epochs (uint): Number of epochs for training.
@param[in] autostop (bool): Automatic termination of learining process when adaptation is not occurred.
@return (uint) Number of learining iterations.
"""
self._data = data
if self.__ccore_som_pointer is not None:
return wrapper.som_train(self.__ccore_som_pointer, data, epochs, autostop)
self._sqrt_distances = self.__initialize_distances(self._size, self._location)
for i in range(self._size):
self._award[i] = 0
self._capture_objects[i].clear()
# weights
self._create_initial_weights(self._params.init_type)
previous_weights = None
for epoch in range(1, epochs + 1):
# Depression term of coupling
self._local_radius = (self._params.init_radius * math.exp(-(epoch / epochs))) ** 2
self._learn_rate = self._params.init_learn_rate * math.exp(-(epoch / epochs))
# Clear statistics
if autostop:
for i in range(self._size):
self._award[i] = 0
self._capture_objects[i].clear()
for i in range(len(self._data)):
# Step 1: Competition:
index = self._competition(self._data[i])
# Step 2: Adaptation:
self._adaptation(index, self._data[i])
# Update statistics
if (autostop == True) or (epoch == epochs):
self._award[index] += 1
self._capture_objects[index].append(i)
# Check requirement of stopping
if autostop:
if previous_weights is not None:
maximal_adaptation = self._get_maximal_adaptation(previous_weights)
if maximal_adaptation < self._params.adaptation_threshold:
return epoch
previous_weights = [item[:] for item in self._weights]
return epochs | !
@brief Trains self-organized feature map (SOM).
@param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates.
@param[in] epochs (uint): Number of epochs for training.
@param[in] autostop (bool): Automatic termination of learining process when adaptation is not occurred.
@return (uint) Number of learining iterations. |
def main():
"""The simplest usage of watershed delineation based on TauDEM."""
dem = '../tests/data/Jamaica_dem.tif'
num_proc = 2
wp = '../tests/data/tmp_results/wtsd_delineation'
TauDEMWorkflow.watershed_delineation(num_proc, dem, workingdir=wp) | The simplest usage of watershed delineation based on TauDEM. |
def to_detach(b:Tensors, cpu:bool=True):
"Recursively detach lists of tensors in `b `; put them on the CPU if `cpu=True`."
if is_listy(b): return [to_detach(o, cpu) for o in b]
if not isinstance(b,Tensor): return b
b = b.detach()
return b.cpu() if cpu else b | Recursively detach lists of tensors in `b `; put them on the CPU if `cpu=True`. |
def get_fragment(self, offset):
"""
Get the part of the source which is causing a problem.
"""
fragment_len = 10
s = '%r' % (self.source[offset:offset + fragment_len])
if offset + fragment_len < len(self.source):
s += '...'
return s | Get the part of the source which is causing a problem. |
def avg_bp_from_range(self, bp):
""" Helper function - FastQC often gives base pair ranges (eg. 10-15)
which are not helpful when plotting. This returns the average from such
ranges as an int, which is helpful. If not a range, just returns the int """
try:
if '-' in bp:
maxlen = float(bp.split("-",1)[1])
minlen = float(bp.split("-",1)[0])
bp = ((maxlen - minlen)/2) + minlen
except TypeError:
pass
return(int(bp)) | Helper function - FastQC often gives base pair ranges (eg. 10-15)
which are not helpful when plotting. This returns the average from such
ranges as an int, which is helpful. If not a range, just returns the int |
def get_column(name, model=None):
"""
get table column according to name, the name can be like `model.column`
"""
if '.' in name:
m, name = name.split('.')
model = get_model(m)
if model:
return model.c.get(name) | get table column according to name, the name can be like `model.column` |
def first_arg_to_level_name(arg):
"""Decide what level the argument specifies and return it. The argument
must contain (case-insensitive) one of the values in LEVELS or be an integer
constant. Otherwise None will be returned."""
try:
return int(arg)
except ValueError:
arg = arg.upper()
for level in LEVELS:
if level in arg:
return level
return None | Decide what level the argument specifies and return it. The argument
must contain (case-insensitive) one of the values in LEVELS or be an integer
constant. Otherwise None will be returned. |
def get_jira_key_from_scenario(scenario):
"""Extract Jira Test Case key from scenario tags.
Two tag formats are allowed:
@jira('PROJECT-32')
@jira=PROJECT-32
:param scenario: behave scenario
:returns: Jira test case key
"""
jira_regex = re.compile('jira[=\(\']*([A-Z]+\-[0-9]+)[\'\)]*$')
for tag in scenario.tags:
match = jira_regex.search(tag)
if match:
return match.group(1)
return None | Extract Jira Test Case key from scenario tags.
Two tag formats are allowed:
@jira('PROJECT-32')
@jira=PROJECT-32
:param scenario: behave scenario
:returns: Jira test case key |
def Search(self, artifact, os_name=None, cpe=None, label=None):
"""Whether the condition contains the specified values.
Args:
artifact: A string identifier for the artifact.
os_name: An OS string.
cpe: A CPE string.
label: A label string.
Returns:
True if the values match the non-empty query attributes.
Empty query attributes are ignored in the comparison.
"""
hit = lambda x: x[0] == x[1] or not x[0]
seq = [(artifact, self.artifact), (os_name, self.os_name), (cpe, self.cpe),
(label, self.label)]
return all(map(hit, seq)) | Whether the condition contains the specified values.
Args:
artifact: A string identifier for the artifact.
os_name: An OS string.
cpe: A CPE string.
label: A label string.
Returns:
True if the values match the non-empty query attributes.
Empty query attributes are ignored in the comparison. |
def filter_entries(entries, filters, exclude):
"""
Filters a list of host entries according to the given filters.
:param entries: A list of host entries.
:type entries: [:py:class:`HostEntry`]
:param filters: Regexes that must match a `HostEntry`.
:type filters: [``str``]
:param exclude: Regexes that must NOT match a `HostEntry`.
:type exclude: [``str``]
:return: The filtered list of host entries.
:rtype: [:py:class:`HostEntry`]
"""
filtered = [entry
for entry in entries
if all(entry.matches(f) for f in filters)
and not any(entry.matches(e) for e in exclude)]
return filtered | Filters a list of host entries according to the given filters.
:param entries: A list of host entries.
:type entries: [:py:class:`HostEntry`]
:param filters: Regexes that must match a `HostEntry`.
:type filters: [``str``]
:param exclude: Regexes that must NOT match a `HostEntry`.
:type exclude: [``str``]
:return: The filtered list of host entries.
:rtype: [:py:class:`HostEntry`] |
def to_string(self, endpoints):
# type: (List[EndpointDescription]) -> str
"""
Converts the given endpoint description beans into a string
:param endpoints: A list of EndpointDescription beans
:return: A string containing an XML document
"""
# Make the ElementTree
root = self._make_xml(endpoints)
tree = ElementTree.ElementTree(root)
# Force the default name space
ElementTree.register_namespace("", EDEF_NAMESPACE)
# Make the XML
# Prepare a StringIO output
output = StringIO()
# Try to write with a correct encoding
tree.write(
output,
encoding=self._encoding,
xml_declaration=self._xml_declaration,
)
return output.getvalue().strip() | Converts the given endpoint description beans into a string
:param endpoints: A list of EndpointDescription beans
:return: A string containing an XML document |
def manage_initial_host_status_brok(self, b):
"""Prepare the known hosts cache"""
host_name = b.data['host_name']
logger.debug("got initial host status: %s", host_name)
self.hosts_cache[host_name] = {
'realm_name':
sanitize_name(b.data.get('realm_name', b.data.get('realm', 'All'))),
}
if 'customs' in b.data:
self.hosts_cache[host_name]['_GRAPHITE_PRE'] = \
sanitize_name(b.data['customs'].get('_GRAPHITE_PRE', None))
self.hosts_cache[host_name]['_GRAPHITE_GROUP'] = \
sanitize_name(b.data['customs'].get('_GRAPHITE_GROUP', None))
logger.debug("initial host status received: %s", host_name) | Prepare the known hosts cache |
def enc(x, codec='ascii'):
"""Encodes a string for SGML/XML/HTML"""
x = x.replace('&', '&').replace('>', '>').replace('<', '<').replace('"', '"')
return x.encode(codec, 'xmlcharrefreplace') | Encodes a string for SGML/XML/HTML |
def DeleteSnapshot(self,names=None):
"""Removes an existing Hypervisor level snapshot.
Supply one or more snapshot names to delete them concurrently.
If no snapshot names are supplied will delete all existing snapshots.
>>> clc.v2.Server(alias='BTDI',id='WA1BTDIKRT02').DeleteSnapshot().WaitUntilComplete()
0
"""
if names is None: names = self.GetSnapshots()
requests_lst = []
for name in names:
name_links = [obj['links'] for obj in self.data['details']['snapshots'] if obj['name']==name][0]
requests_lst.append(
clc.v2.Requests(
clc.v2.API.Call('DELETE',
[obj['href'] for obj in name_links if obj['rel']=='delete'][0],
session=self.session),
alias=self.alias,
session=self.session))
return(sum(requests_lst)) | Removes an existing Hypervisor level snapshot.
Supply one or more snapshot names to delete them concurrently.
If no snapshot names are supplied will delete all existing snapshots.
>>> clc.v2.Server(alias='BTDI',id='WA1BTDIKRT02').DeleteSnapshot().WaitUntilComplete()
0 |
def deploy(provider=None):
"""
Deploys your project
"""
if os.path.exists(DEPLOY_YAML):
site = yaml.safe_load(_read_file(DEPLOY_YAML))
provider_class = PROVIDERS[site['provider']]
provider_class.deploy() | Deploys your project |
def mark_offer_as_win(self, offer_id):
"""
Mark offer as win
:param offer_id: the offer id
:return Response
"""
return self._create_put_request(
resource=OFFERS,
billomat_id=offer_id,
command=WIN,
) | Mark offer as win
:param offer_id: the offer id
:return Response |
def _read_uaa_cache(self):
"""
Read cache of UAA client/user details.
"""
self._cache_path = os.path.expanduser('~/.predix/uaa.json')
if not os.path.exists(self._cache_path):
return self._initialize_uaa_cache()
with open(self._cache_path, 'r') as data:
return json.load(data) | Read cache of UAA client/user details. |
def validate_metadata(self, xml):
"""
Validates an XML SP Metadata.
:param xml: Metadata's XML that will be validate
:type xml: string
:returns: The list of found errors
:rtype: list
"""
assert isinstance(xml, compat.text_types)
if len(xml) == 0:
raise Exception('Empty string supplied as input')
errors = []
root = OneLogin_Saml2_XML.validate_xml(xml, 'saml-schema-metadata-2.0.xsd', self.__debug)
if isinstance(root, str):
errors.append(root)
else:
if root.tag != '{%s}EntityDescriptor' % OneLogin_Saml2_Constants.NS_MD:
errors.append('noEntityDescriptor_xml')
else:
if (len(root.findall('.//md:SPSSODescriptor', namespaces=OneLogin_Saml2_Constants.NSMAP))) != 1:
errors.append('onlySPSSODescriptor_allowed_xml')
else:
valid_until, cache_duration = root.get('validUntil'), root.get('cacheDuration')
if valid_until:
valid_until = OneLogin_Saml2_Utils.parse_SAML_to_time(valid_until)
expire_time = OneLogin_Saml2_Utils.get_expire_time(cache_duration, valid_until)
if expire_time is not None and int(time()) > int(expire_time):
errors.append('expired_xml')
# TODO: Validate Sign
return errors | Validates an XML SP Metadata.
:param xml: Metadata's XML that will be validate
:type xml: string
:returns: The list of found errors
:rtype: list |
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
# Platform-specific: Python without ssl
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return self._prepare_conn(conn) | Return a fresh :class:`httplib.HTTPSConnection`. |
def search(d, recursive=True, store_meta=True):
'''
Search for DICOM files within a given directory and receive back a
dictionary of {StudyInstanceUID: {SeriesNumber: [files]}}
Example usage::
>>> import yaxil.dicom
>>> yaxil.dicom.search("~/dicoms").keys()
['1.2.340.500067.8.9.10.11012.13000001401516017181900000200']
:param d: Directory name
:type d: str
:param recursive: Search recursively
:type recursive: bool
:param store_meta: Read and store metadata for each file for fast lookups
:type store_meta: bool
:returns: Dictionary of {StudyInstanceUID: {SeriesNumber: [files]}}
:rtype: dict
'''
# say this fast three times
scans = col.defaultdict(lambda: col.defaultdict(lambda: col.defaultdict(list)))
for dirpath,dirnames,filenames in os.walk(os.path.expanduser(d)):
for f in filenames:
fullfile = os.path.join(dirpath, f)
try:
d = pydicom.read_file(fullfile, stop_before_pixels=True)
except pydicom.filereader.InvalidDicomError:
continue
meta = {k: getattr(d, k, None) for k in d.dir()} if store_meta else None
scans[d.StudyInstanceUID][d.SeriesNumber][d.InstanceNumber].append(DicomFile(meta=meta, file=fullfile))
if not recursive:
del dirnames[:]
return scans | Search for DICOM files within a given directory and receive back a
dictionary of {StudyInstanceUID: {SeriesNumber: [files]}}
Example usage::
>>> import yaxil.dicom
>>> yaxil.dicom.search("~/dicoms").keys()
['1.2.340.500067.8.9.10.11012.13000001401516017181900000200']
:param d: Directory name
:type d: str
:param recursive: Search recursively
:type recursive: bool
:param store_meta: Read and store metadata for each file for fast lookups
:type store_meta: bool
:returns: Dictionary of {StudyInstanceUID: {SeriesNumber: [files]}}
:rtype: dict |
def graph_repo(repo_url, output_loc, format='graphml'):
""" generates a graph for a git repository """
log = logging.getLogger("graphgit")
# repo type
local_repo = os.path.isabs(repo_url)
# repo name
repo_name = repo_url[repo_url.rfind('/')+1:repo_url.rfind('.git')] \
if not local_repo else repo_url[repo_url.rfind(os.sep)+1:]
log.info ("Processing git repository: %s" % repo_name)
# repo location
repo_loc = os.path.join(constants.REPO_DOWNLOAD_LOCATION, repo_name) \
if not local_repo else repo_url
# initialize repo
repo = None
gitt = git.Git()
try:
# check if repo is already cloned
# if local repo exists, assign
repo = git.Repo(repo_loc, odbt=git.GitCmdObjectDB)
log.info( "Repository already cloned... Going ahead and using it..." )
# TODO: check if repo is dirty and if so, update
except git.exc.NoSuchPathError:
# local repo doesn't exist. clone
try:
if local_repo:
raise Exception
log.info( "Cloning repository... this might take some time, please wait !" )
gitt.clone(repo_url, repo_loc)
log.info( "Git clone completed..." )
repo = git.Repo(repo_loc, odbt=git.GitCmdObjectDB)
except:
log.error( "Could not obtain repository: %s !" % repo_url )
sys.exit(1)
if repo is None:
log.error( "Could not obtain repository: %s !" % repo_url )
sys.exit(1)
# create a graph for the repo
G = nx.DiGraph()
# root node
G.add_node(repo_name, type=constants.NODE_TYPE_VALS['REPOSITORY'])
# branches & commits
for branch in repo.branches:
log.debug ("Processing branch %s" % branch)
G.add_node(branch, type=constants.NODE_TYPE_VALS['BRANCH'])
G.add_edge(repo_name, branch,
label=constants.EDGE_LABEL_VALS['REPOSITORY_BRANCH'])
for commit in repo.iter_commits(branch):
try:
author = safe_str(commit.author)
ts = commit.committed_date
sha = safe_str(commit)
log.debug ("%s> %s --[commit]--> %s" % (branch, author, sha))
G.add_node(author, type=constants.NODE_TYPE_VALS['PERSON'])
G.add_node(sha, ts=ts,
type=constants.NODE_TYPE_VALS['COMMIT'])
G.add_edge(author, sha,
label=constants.EDGE_LABEL_VALS['PERSON_COMMIT'])
G.add_edge(branch, sha,
label=constants.EDGE_LABEL_VALS['BRANCH_COMMIT'])
except LookupError:
log.warning('Could not process %s !' % commit)
continue
log.info( "Graph built ! saving..." )
# save graph
output_file_name = '%s.%s' % (repo_name, format)
output_file_loc = os.path.join(output_loc, output_file_name)
if format == 'graphml':
nx.write_graphml(G, output_file_loc, encoding='utf-8')
elif format == 'gexf':
nx.write_gexf(G, output_file_loc, encoding='utf-8')
else:
log.error( "Invalid output format: %s !" % format )
sys.exit(1)
log.info( "Saved to %s !" % output_file_loc ) | generates a graph for a git repository |
def hostgroup_exists(name=None, groupid=None, node=None, nodeids=None, **kwargs):
'''
Checks if at least one host group that matches the given filter criteria exists.
.. versionadded:: 2016.3.0
:param name: names of the host groups
:param groupid: host group IDs
:param node: name of the node the host groups must belong to (zabbix API < 2.4)
:param nodeids: IDs of the nodes the host groups must belong to (zabbix API < 2.4)
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: True if at least one host group exists, False if not or on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.hostgroup_exists MyNewGroup
'''
conn_args = _login(**kwargs)
zabbix_version = apiinfo_version(**kwargs)
ret = {}
try:
if conn_args:
# hostgroup.exists deprecated
if _LooseVersion(zabbix_version) > _LooseVersion("2.5"):
if not groupid:
groupid = None
if not name:
name = None
ret = hostgroup_get(name, groupid, **kwargs)
return bool(ret)
# zabbix 2.4 nad earlier
else:
params = {}
method = 'hostgroup.exists'
if groupid:
params['groupid'] = groupid
if name:
params['name'] = name
# deprecated in 2.4
if _LooseVersion(zabbix_version) < _LooseVersion("2.4"):
if node:
params['node'] = node
if nodeids:
params['nodeids'] = nodeids
if not groupid and not name and not node and not nodeids:
return {'result': False, 'comment': 'Please submit groupid, name, node or nodeids parameter to'
'check if at least one host group that matches the given filter'
' criteria exists.'}
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result']
else:
raise KeyError
except KeyError:
return ret | Checks if at least one host group that matches the given filter criteria exists.
.. versionadded:: 2016.3.0
:param name: names of the host groups
:param groupid: host group IDs
:param node: name of the node the host groups must belong to (zabbix API < 2.4)
:param nodeids: IDs of the nodes the host groups must belong to (zabbix API < 2.4)
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: True if at least one host group exists, False if not or on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.hostgroup_exists MyNewGroup |
def _range_check(self, value, min_value, max_value):
'''
Utility method to check that the given value is between min_value and max_value.
'''
if value < min_value or value > max_value:
raise ValueError('%s out of range - %s is not between %s and %s' % (self.__class__.__name__, value, min_value, max_value)) | Utility method to check that the given value is between min_value and max_value. |
def _has_nested(self, relations, operator='>=', count=1, boolean='and', extra=None):
"""
Add nested relationship count conditions to the query.
:param relations: nested relations
:type relations: str
:param operator: The operator
:type operator: str
:param count: The count
:type count: int
:param boolean: The boolean value
:type boolean: str
:param extra: The extra query
:type extra: Builder or callable
:rtype: Builder
"""
relations = relations.split('.')
def closure(q):
if len(relations) > 1:
q.where_has(relations.pop(0), closure)
else:
q.has(relations.pop(0), operator, count, boolean, extra)
return self.where_has(relations.pop(0), closure) | Add nested relationship count conditions to the query.
:param relations: nested relations
:type relations: str
:param operator: The operator
:type operator: str
:param count: The count
:type count: int
:param boolean: The boolean value
:type boolean: str
:param extra: The extra query
:type extra: Builder or callable
:rtype: Builder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.