code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def drape(raster, feature):
"""Convert a 2D feature to a 3D feature by sampling a raster
Parameters:
raster (rasterio): raster to provide the z coordinate
feature (dict): fiona feature record to convert
Returns:
result (Point or Linestring): shapely Point or LineString of xyz coordinate triples
"""
coords = feature['geometry']['coordinates']
geom_type = feature['geometry']['type']
if geom_type == 'Point':
xyz = sample(raster, [coords])
result = Point(xyz[0])
elif geom_type == 'LineString':
xyz = sample(raster, coords)
points = [Point(x, y, z) for x, y, z in xyz]
result = LineString(points)
else:
logging.error('drape not implemented for {}'.format(geom_type))
return result | Convert a 2D feature to a 3D feature by sampling a raster
Parameters:
raster (rasterio): raster to provide the z coordinate
feature (dict): fiona feature record to convert
Returns:
result (Point or Linestring): shapely Point or LineString of xyz coordinate triples |
def get_access_id(self):
"""
Gets the application access id. The value can be stored in parameters "access_id" pr "client_id"
:return: the application access id.
"""
access_id = self.get_as_nullable_string("access_id")
access_id = access_id if access_id != None else self.get_as_nullable_string("client_id")
return access_id | Gets the application access id. The value can be stored in parameters "access_id" pr "client_id"
:return: the application access id. |
def is_safe(self):
"""
Check if the option is safe.
:rtype : bool
:return: True, if option is safe
"""
if self._number == defines.OptionRegistry.URI_HOST.number \
or self._number == defines.OptionRegistry.URI_PORT.number \
or self._number == defines.OptionRegistry.URI_PATH.number \
or self._number == defines.OptionRegistry.MAX_AGE.number \
or self._number == defines.OptionRegistry.URI_QUERY.number \
or self._number == defines.OptionRegistry.PROXY_URI.number \
or self._number == defines.OptionRegistry.PROXY_SCHEME.number:
return False
return True | Check if the option is safe.
:rtype : bool
:return: True, if option is safe |
def _build_number_type(var, property_path=None):
""" Builds schema definitions for number type values.
:param var: The number type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
schema = {"type": "number"}
if is_builtin_type(var):
return schema
if is_config_var(var):
schema.update(
_build_attribute_modifiers(var, {"min": "minimum", "max": "maximum"})
)
return schema | Builds schema definitions for number type values.
:param var: The number type value
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:param property_path: [type], optional
:return: The built schema definition
:rtype: Dict[str, Any] |
def Tm_depression_eutectic(Tm, Hm, x=None, M=None, MW=None):
r'''Returns the freezing point depression caused by a solute in a solvent.
Can use either the mole fraction of the solute or its molality and the
molecular weight of the solvent. Assumes ideal system behavior.
.. math::
\Delta T_m = \frac{R T_m^2 x}{\Delta H_m}
\Delta T_m = \frac{R T_m^2 (MW) M}{1000 \Delta H_m}
Parameters
----------
Tm : float
Melting temperature of the solute [K]
Hm : float
Heat of melting at the melting temperature of the solute [J/mol]
x : float, optional
Mole fraction of the solute [-]
M : float, optional
Molality [mol/kg]
MW: float, optional
Molecular weight of the solvent [g/mol]
Returns
-------
dTm : float
Freezing point depression [K]
Notes
-----
MW is the molecular weight of the solvent. M is the molality of the solute.
Examples
--------
From [1]_, matching example.
>>> Tm_depression_eutectic(353.35, 19110, .02)
1.0864594900639515
References
----------
.. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012.
'''
if x:
dTm = R*Tm**2*x/Hm
elif M and MW:
MW = MW/1000. #g/mol to kg/mol
dTm = R*Tm**2*MW*M/Hm
else:
raise Exception('Either molality or mole fraction of the solute must be specified; MW of the solvent is required also if molality is provided')
return dTm | r'''Returns the freezing point depression caused by a solute in a solvent.
Can use either the mole fraction of the solute or its molality and the
molecular weight of the solvent. Assumes ideal system behavior.
.. math::
\Delta T_m = \frac{R T_m^2 x}{\Delta H_m}
\Delta T_m = \frac{R T_m^2 (MW) M}{1000 \Delta H_m}
Parameters
----------
Tm : float
Melting temperature of the solute [K]
Hm : float
Heat of melting at the melting temperature of the solute [J/mol]
x : float, optional
Mole fraction of the solute [-]
M : float, optional
Molality [mol/kg]
MW: float, optional
Molecular weight of the solvent [g/mol]
Returns
-------
dTm : float
Freezing point depression [K]
Notes
-----
MW is the molecular weight of the solvent. M is the molality of the solute.
Examples
--------
From [1]_, matching example.
>>> Tm_depression_eutectic(353.35, 19110, .02)
1.0864594900639515
References
----------
.. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation.
Weinheim, Germany: Wiley-VCH, 2012. |
def copy_file_content(self, file_id, source_file):
'''Copy file content from source file to target file.
Args:
file_id (str): The UUID of the file whose content is written.
source_file (str): The UUID of the file whose content is copied.
Returns:
None
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
'''
if not is_valid_uuid(file_id):
raise StorageArgumentException(
'Invalid UUID for file_id: {0}'.format(file_id))
if not is_valid_uuid(source_file):
raise StorageArgumentException(
'Invalid UUID for source_file: {0}'.format(source_file))
self._authenticated_request \
.to_endpoint('file/{}/content/'.format(file_id)) \
.with_headers({'X-Copy-From': source_file}) \
.put() | Copy file content from source file to target file.
Args:
file_id (str): The UUID of the file whose content is written.
source_file (str): The UUID of the file whose content is copied.
Returns:
None
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes |
def __prepare_resource(data):
"""Prepare the resourcepart of the JID.
:Parameters:
- `data`: Resourcepart of the JID
:raise JIDError: if the resource name is too long.
:raise pyxmpp.xmppstringprep.StringprepError: if the
resourcepart fails Resourceprep preparation."""
if not data:
return None
data = unicode(data)
try:
resource = RESOURCEPREP.prepare(data)
except StringprepError, err:
raise JIDError(u"Local part invalid: {0}".format(err))
if len(resource.encode("utf-8")) > 1023:
raise JIDError("Resource name too long")
return resource | Prepare the resourcepart of the JID.
:Parameters:
- `data`: Resourcepart of the JID
:raise JIDError: if the resource name is too long.
:raise pyxmpp.xmppstringprep.StringprepError: if the
resourcepart fails Resourceprep preparation. |
def plot_signal_sum_colorplot(ax, params, fname='LFPsum.h5', unit='mV', N=1, ylabels = True,
T=[800, 1000], ylim=[-1500, 0], fancy=False, colorbar=True,
cmap='spectral_r', absmax=None, transient=200, rasterized=True):
'''
on colorplot and as background plot the summed CSD contributions
args:
::
ax : matplotlib.axes.AxesSubplot object
T : list, [tstart, tstop], which timeinterval
ylims : list, set range of yaxis to scale with other plots
fancy : bool,
N : integer, set to number of LFP generators in order to get the normalized signal
'''
f = h5py.File(fname)
data = f['data'].value
tvec = np.arange(data.shape[1]) * 1000. / f['srate'].value
#for mean subtraction
datameanaxis1 = f['data'].value[:, tvec >= transient].mean(axis=1)
# slice
slica = (tvec <= T[1]) & (tvec >= T[0])
data = data[:,slica]
# subtract mean
#dataT = data.T - data.mean(axis=1)
dataT = data.T - datameanaxis1
data = dataT.T
# normalize
data = data/N
zvec = params.electrodeParams['z']
if fancy:
colors = phlp.get_colors(data.shape[0])
else:
colors = ['k']*data.shape[0]
if absmax == None:
absmax=abs(np.array([data.max(), data.min()])).max()
im = ax.pcolormesh(tvec[slica], np.r_[zvec, zvec[-1] + np.diff(zvec)[-1]] + 50, data,
rasterized=rasterized, vmax=absmax, vmin=-absmax, cmap=cmap)
ax.set_yticks(params.electrodeParams['z'])
if ylabels:
yticklabels = ['ch. %i' %(i+1) for i in np.arange(len(params.electrodeParams['z']))]
ax.set_yticklabels(yticklabels)
else:
ax.set_yticklabels([])
if colorbar:
#colorbar
divider=make_axes_locatable(ax)
cax=divider.append_axes("right", size="5%", pad=0.1)
cbar=plt.colorbar(im, cax=cax)
cbar.set_label(unit,labelpad=0.1)
plt.axis('tight')
ax.set_ylim(ylim)
f.close()
return im | on colorplot and as background plot the summed CSD contributions
args:
::
ax : matplotlib.axes.AxesSubplot object
T : list, [tstart, tstop], which timeinterval
ylims : list, set range of yaxis to scale with other plots
fancy : bool,
N : integer, set to number of LFP generators in order to get the normalized signal |
def start(self, timeout=None):
"""Start the process going
Args:
timeout (float): Maximum amount of time to wait for each spawned
process. None means forever
"""
assert self.state == STOPPED, "Process already started"
self.state = STARTING
should_publish = self._start_controllers(
self._controllers.values(), timeout)
if should_publish:
self._publish_controllers(timeout)
self.state = STARTED | Start the process going
Args:
timeout (float): Maximum amount of time to wait for each spawned
process. None means forever |
def match_description(self, description, string_match_type=DEFAULT_STRING_MATCH_TYPE, match=True):
"""Adds a description name to match.
Multiple description matches can be added to perform a boolean
``OR`` among them.
arg: description (string): description to match
arg: string_match_type (osid.type.Type): the string match
type
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: InvalidArgument - ``description`` is not of
``string_match_type``
raise: NullArgument - ``description`` or ``string_match_type``
is ``null``
raise: Unsupported -
``supports_string_match_type(string_match_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
self._match_display_text('description', description, string_match_type, match) | Adds a description name to match.
Multiple description matches can be added to perform a boolean
``OR`` among them.
arg: description (string): description to match
arg: string_match_type (osid.type.Type): the string match
type
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: InvalidArgument - ``description`` is not of
``string_match_type``
raise: NullArgument - ``description`` or ``string_match_type``
is ``null``
raise: Unsupported -
``supports_string_match_type(string_match_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.* |
def update_total(self, n=1):
"""Increment total pbar value."""
with self._lock:
self._pbar.total += n
self.refresh() | Increment total pbar value. |
def tds7_process_result(self):
""" Reads and processes COLMETADATA stream
This stream contains a list of returned columns.
Stream format link: http://msdn.microsoft.com/en-us/library/dd357363.aspx
"""
self.log_response_message('got COLMETADATA')
r = self._reader
# read number of columns and allocate the columns structure
num_cols = r.get_smallint()
# This can be a DUMMY results token from a cursor fetch
if num_cols == -1:
return
self.param_info = None
self.has_status = False
self.ret_status = None
self.skipped_to_status = False
self.rows_affected = tds_base.TDS_NO_COUNT
self.more_rows = True
self.row = [None] * num_cols
self.res_info = info = _Results()
#
# loop through the columns populating COLINFO struct from
# server response
#
header_tuple = []
for col in range(num_cols):
curcol = tds_base.Column()
info.columns.append(curcol)
self.get_type_info(curcol)
curcol.column_name = r.read_ucs2(r.get_byte())
precision = curcol.serializer.precision
scale = curcol.serializer.scale
size = curcol.serializer.size
header_tuple.append(
(curcol.column_name,
curcol.serializer.get_typeid(),
None,
size,
precision,
scale,
curcol.flags & tds_base.Column.fNullable))
info.description = tuple(header_tuple)
return info | Reads and processes COLMETADATA stream
This stream contains a list of returned columns.
Stream format link: http://msdn.microsoft.com/en-us/library/dd357363.aspx |
def construct(parent=None, defaults=None, **kwargs):
"""
Random variable constructor.
Args:
cdf:
Cumulative distribution function. Optional if ``parent`` is used.
bnd:
Boundary interval. Optional if ``parent`` is used.
parent (Dist):
Distribution used as basis for new distribution. Any other argument
that is omitted will instead take is function from ``parent``.
doc (str]):
Documentation for the distribution.
str (str, :py:data:typing.Callable):
Pretty print of the variable.
pdf:
Probability density function.
ppf:
Point percentile function.
mom:
Raw moment generator.
ttr:
Three terms recursion coefficient generator.
init:
Custom initialiser method.
defaults (dict):
Default values to provide to initialiser.
Returns:
(Dist):
New custom distribution.
"""
for key in kwargs:
assert key in LEGAL_ATTRS, "{} is not legal input".format(key)
if parent is not None:
for key, value in LEGAL_ATTRS.items():
if key not in kwargs and hasattr(parent, value):
kwargs[key] = getattr(parent, value)
assert "cdf" in kwargs, "cdf function must be defined"
assert "bnd" in kwargs, "bnd function must be defined"
if "str" in kwargs and isinstance(kwargs["str"], str):
string = kwargs.pop("str")
kwargs["str"] = lambda *args, **kwargs: string
defaults = defaults if defaults else {}
for key in defaults:
assert key in LEGAL_ATTRS, "invalid default value {}".format(key)
def custom_distribution(**kws):
prm = defaults.copy()
prm.update(kws)
dist = Dist(**prm)
for key, function in kwargs.items():
attr_name = LEGAL_ATTRS[key]
setattr(dist, attr_name, types.MethodType(function, dist))
return dist
if "doc" in kwargs:
custom_distribution.__doc__ = kwargs["doc"]
return custom_distribution | Random variable constructor.
Args:
cdf:
Cumulative distribution function. Optional if ``parent`` is used.
bnd:
Boundary interval. Optional if ``parent`` is used.
parent (Dist):
Distribution used as basis for new distribution. Any other argument
that is omitted will instead take is function from ``parent``.
doc (str]):
Documentation for the distribution.
str (str, :py:data:typing.Callable):
Pretty print of the variable.
pdf:
Probability density function.
ppf:
Point percentile function.
mom:
Raw moment generator.
ttr:
Three terms recursion coefficient generator.
init:
Custom initialiser method.
defaults (dict):
Default values to provide to initialiser.
Returns:
(Dist):
New custom distribution. |
def _set_ldp_params(self, v, load=False):
"""
Setter method for ldp_params, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/ldp_params (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ldp_params is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ldp_params() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ldp_params.ldp_params, is_container='container', presence=False, yang_name="ldp-params", rest_name="ldp-params", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure LDP parameters', u'cli-full-command': None, u'cli-full-no': None, u'cli-add-mode': None, u'cli-mode-name': u'config-router-mpls-interface-$(interface-name)-ldp-params'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ldp_params must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ldp_params.ldp_params, is_container='container', presence=False, yang_name="ldp-params", rest_name="ldp-params", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure LDP parameters', u'cli-full-command': None, u'cli-full-no': None, u'cli-add-mode': None, u'cli-mode-name': u'config-router-mpls-interface-$(interface-name)-ldp-params'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__ldp_params = t
if hasattr(self, '_set'):
self._set() | Setter method for ldp_params, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/ldp_params (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ldp_params is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ldp_params() directly. |
def __store_processing_state(self):
"""
Stores the processing state.
"""
steps = self.Application_Progress_Status_processing.Processing_progressBar.maximum()
value = self.Application_Progress_Status_processing.Processing_progressBar.value()
message = self.Application_Progress_Status_processing.Processing_label.text()
state = self.__is_processing
self.__processing_state = steps, value, message, state | Stores the processing state. |
def send_produce_request(self, payloads=(), acks=1, timeout=1000,
fail_on_error=True, callback=None):
"""
Encode and send some ProduceRequests
ProduceRequests will be grouped by (topic, partition) and then
sent to a specific broker. Output is a list of responses in the
same order as the list of payloads specified
Arguments:
payloads (list of ProduceRequest): produce requests to send to kafka
ProduceRequest payloads must not contain duplicates for any
topic-partition.
acks (int, optional): how many acks the servers should receive from replica
brokers before responding to the request. If it is 0, the server
will not send any response. If it is 1, the server will wait
until the data is written to the local log before sending a
response. If it is -1, the server will wait until the message
is committed by all in-sync replicas before sending a response.
For any value > 1, the server will wait for this number of acks to
occur (but the server will never wait for more acknowledgements than
there are in-sync replicas). defaults to 1.
timeout (int, optional): maximum time in milliseconds the server can
await the receipt of the number of acks, defaults to 1000.
fail_on_error (bool, optional): raise exceptions on connection and
server response errors, defaults to True.
callback (function, optional): instead of returning the ProduceResponse,
first pass it through this function, defaults to None.
Returns:
list of ProduceResponses, or callback results if supplied, in the
order of input payloads
"""
encoder = functools.partial(
KafkaProtocol.encode_produce_request,
acks=acks,
timeout=timeout)
if acks == 0:
decoder = None
else:
decoder = KafkaProtocol.decode_produce_response
resps = self._send_broker_aware_request(payloads, encoder, decoder)
return [resp if not callback else callback(resp) for resp in resps
if resp is not None and
(not fail_on_error or not self._raise_on_response_error(resp))] | Encode and send some ProduceRequests
ProduceRequests will be grouped by (topic, partition) and then
sent to a specific broker. Output is a list of responses in the
same order as the list of payloads specified
Arguments:
payloads (list of ProduceRequest): produce requests to send to kafka
ProduceRequest payloads must not contain duplicates for any
topic-partition.
acks (int, optional): how many acks the servers should receive from replica
brokers before responding to the request. If it is 0, the server
will not send any response. If it is 1, the server will wait
until the data is written to the local log before sending a
response. If it is -1, the server will wait until the message
is committed by all in-sync replicas before sending a response.
For any value > 1, the server will wait for this number of acks to
occur (but the server will never wait for more acknowledgements than
there are in-sync replicas). defaults to 1.
timeout (int, optional): maximum time in milliseconds the server can
await the receipt of the number of acks, defaults to 1000.
fail_on_error (bool, optional): raise exceptions on connection and
server response errors, defaults to True.
callback (function, optional): instead of returning the ProduceResponse,
first pass it through this function, defaults to None.
Returns:
list of ProduceResponses, or callback results if supplied, in the
order of input payloads |
def get_matches(self, src, src_idx):
"""Get elements equal to the idx'th in src from the other list.
e.g. get_matches(self, 'l1', 0) will return all elements from self.l2
matching with self.l1[0]
"""
if src not in ('l1', 'l2'):
raise ValueError('Must have one of "l1" or "l2" as src')
if src == 'l1':
target_list = self.l2
else:
target_list = self.l1
comparator = {
'l1': lambda s_idx, t_idx: (s_idx, t_idx) in self.matches,
'l2': lambda s_idx, t_idx: (t_idx, s_idx) in self.matches,
}[src]
return [(trg_idx, obj) for trg_idx, obj in enumerate(target_list)
if comparator(src_idx, trg_idx)] | Get elements equal to the idx'th in src from the other list.
e.g. get_matches(self, 'l1', 0) will return all elements from self.l2
matching with self.l1[0] |
def SynchronizedClassMethod(*locks_attr_names, **kwargs):
# pylint: disable=C1801
"""
A synchronizer decorator for class methods. An AttributeError can be raised
at runtime if the given lock attribute doesn't exist or if it is None.
If a parameter ``sorted`` is found in ``kwargs`` and its value is True,
then the list of locks names will be sorted before locking.
:param locks_attr_names: A list of the lock(s) attribute(s) name(s) to be
used for synchronization
:return: The decorator method, surrounded with the lock
"""
# Filter the names (remove empty ones)
locks_attr_names = [
lock_name for lock_name in locks_attr_names if lock_name
]
if not locks_attr_names:
raise ValueError("The lock names list can't be empty")
if "sorted" not in kwargs or kwargs["sorted"]:
# Sort the lock names if requested
# (locking always in the same order reduces the risk of dead lock)
locks_attr_names = list(locks_attr_names)
locks_attr_names.sort()
def wrapped(method):
"""
The wrapping method
:param method: The wrapped method
:return: The wrapped method
:raise AttributeError: The given attribute name doesn't exist
"""
@functools.wraps(method)
def synchronized(self, *args, **kwargs):
"""
Calls the wrapped method with a lock
"""
# Raises an AttributeError if needed
locks = [getattr(self, attr_name) for attr_name in locks_attr_names]
locked = collections.deque()
i = 0
try:
# Lock
for lock in locks:
if lock is None:
# No lock...
raise AttributeError(
"Lock '{0}' can't be None in class {1}".format(
locks_attr_names[i], type(self).__name__
)
)
# Get the lock
i += 1
lock.acquire()
locked.appendleft(lock)
# Use the method
return method(self, *args, **kwargs)
finally:
# Unlock what has been locked in all cases
for lock in locked:
lock.release()
locked.clear()
del locks[:]
return synchronized
# Return the wrapped method
return wrapped | A synchronizer decorator for class methods. An AttributeError can be raised
at runtime if the given lock attribute doesn't exist or if it is None.
If a parameter ``sorted`` is found in ``kwargs`` and its value is True,
then the list of locks names will be sorted before locking.
:param locks_attr_names: A list of the lock(s) attribute(s) name(s) to be
used for synchronization
:return: The decorator method, surrounded with the lock |
def normalizeFilePath(value):
"""
Normalizes file path.
* **value** must be a :ref:`type-string`.
* Returned value is an unencoded ``unicode`` string
"""
if not isinstance(value, basestring):
raise TypeError("File paths must be strings, not %s."
% type(value).__name__)
return unicode(value) | Normalizes file path.
* **value** must be a :ref:`type-string`.
* Returned value is an unencoded ``unicode`` string |
def pipe_strtransform(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that splits a string into tokens delimited by
separators. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings
conf : {'transformation': {value': <'swapcase'>}}
Returns
-------
_OUTPUT : generator of tokenized strings
"""
splits = get_splits(_INPUT, conf, **cdicts(opts, kwargs))
parsed = utils.dispatch(splits, *get_dispatch_funcs())
_OUTPUT = starmap(parse_result, parsed)
return _OUTPUT | A string module that splits a string into tokens delimited by
separators. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : iterable of items or strings
conf : {'transformation': {value': <'swapcase'>}}
Returns
-------
_OUTPUT : generator of tokenized strings |
def divine_format(text):
"""Guess the format of the notebook, based on its content #148"""
try:
nbformat.reads(text, as_version=4)
return 'ipynb'
except nbformat.reader.NotJSONError:
pass
lines = text.splitlines()
for comment in ['', '#'] + _COMMENT_CHARS:
metadata, _, _, _ = header_to_metadata_and_cell(lines, comment)
ext = metadata.get('jupytext', {}).get('text_representation', {}).get('extension')
if ext:
return ext[1:] + ':' + guess_format(text, ext)[0]
# No metadata, but ``` on at least one line => markdown
for line in lines:
if line == '```':
return 'md'
return 'py:' + guess_format(text, '.py')[0] | Guess the format of the notebook, based on its content #148 |
def construct_channel(self, *args, **kwargs):
"""
Create ChannelNode and build topic tree.
"""
channel = self.get_channel(*args, **kwargs) # creates ChannelNode from data in self.channel_info
city_topic = TopicNode(source_id="List_of_largest_cities", title="Cities!")
channel.add_child(city_topic)
add_subpages_from_wikipedia_list(city_topic, "https://en.wikipedia.org/wiki/List_of_largest_cities")
return channel | Create ChannelNode and build topic tree. |
def main():
"""
What will be executed when running as a stand alone program.
"""
args = parse_args()
try:
s = pyhsm.base.YHSM(device=args.device, debug=args.debug)
get_entropy(s, args.iterations, args.ratio)
return 0
except pyhsm.exception.YHSM_Error as e:
sys.stderr.write("ERROR: %s" % (e.reason))
return 1 | What will be executed when running as a stand alone program. |
def generalize_sql(sql):
"""
Removes most variables from an SQL query and replaces them with X or N for numbers.
Based on Mediawiki's DatabaseBase::generalizeSQL
:type sql str|None
:rtype: str
"""
if sql is None:
return None
# multiple spaces
sql = re.sub(r'\s{2,}', ' ', sql)
# MW comments
# e.g. /* CategoryDataService::getMostVisited N.N.N.N */
sql = remove_comments_from_sql(sql)
# handle LIKE statements
sql = normalize_likes(sql)
sql = re.sub(r"\\\\", '', sql)
sql = re.sub(r"\\'", '', sql)
sql = re.sub(r'\\"', '', sql)
sql = re.sub(r"'[^\']*'", 'X', sql)
sql = re.sub(r'"[^\"]*"', 'X', sql)
# All newlines, tabs, etc replaced by single space
sql = re.sub(r'\s+', ' ', sql)
# All numbers => N
sql = re.sub(r'-?[0-9]+', 'N', sql)
# WHERE foo IN ('880987','882618','708228','522330')
sql = re.sub(r' (IN|VALUES)\s*\([^,]+,[^)]+\)', ' \\1 (XYZ)', sql, flags=re.IGNORECASE)
return sql.strip() | Removes most variables from an SQL query and replaces them with X or N for numbers.
Based on Mediawiki's DatabaseBase::generalizeSQL
:type sql str|None
:rtype: str |
def show_rules():
"""
Show the list of available rules and quit
:return:
"""
from rules.loader import import_rules
from rules.rule_list import all_rules
rules = import_rules(all_rules)
print("")
for name, rule in rules.iteritems():
heading = "{} (`{}`)".format(rule.description(), name)
print("#### {} ####".format(heading))
for line in rule.reason():
print(line)
print("")
sys.exit(0) | Show the list of available rules and quit
:return: |
def assign(self, role):
'''Assign :class:`Role` ``role`` to this :class:`Subject`. If this
:class:`Subject` is the :attr:`Role.owner`, this method does nothing.'''
if role.owner_id != self.id:
return self.roles.add(role) | Assign :class:`Role` ``role`` to this :class:`Subject`. If this
:class:`Subject` is the :attr:`Role.owner`, this method does nothing. |
def submit(self, code: str, results: str ="html", prompt: dict = None) -> dict:
'''
This method is used to submit any SAS code. It returns the Log and Listing as a python dictionary.
code - the SAS statements you want to execute
results - format of results, HTML is default, TEXT is the alternative
prompt - dict of names:flags to prompt for; create macro variables (used in submitted code), then keep or delete
The keys are the names of the macro variables and the boolean flag is to either hide what you type and delete
the macros, or show what you type and keep the macros (they will still be available later)
for example (what you type for pw will not be displayed, user and dsname will):
results = sas.submit(
"""
libname tera teradata server=teracop1 user=&user pw=&pw;
proc print data=tera.&dsname (obs=10); run;
""" ,
prompt = {'user': False, 'pw': True, 'dsname': False}
)
Returns - a Dict containing two keys:values, [LOG, LST]. LOG is text and LST is 'results' (HTML or TEXT)
NOTE: to view HTML results in the ipykernel, issue: from IPython.display import HTML and use HTML() instead of print()
i.e,: results = sas.submit("data a; x=1; run; proc print;run')
print(results['LOG'])
HTML(results['LST'])
'''
prompt = prompt if prompt is not None else {}
#odsopen = b"ods listing close;ods html5 (id=saspy_internal) file=STDOUT options(bitmap_mode='inline') device=svg; ods graphics on / outputfmt=png;\n"
odsopen = b"ods listing close;ods "+self.sascfg.output.encode()+ \
b" (id=saspy_internal) file="+self._tomods1+b" options(bitmap_mode='inline') device=svg style="+self._sb.HTML_Style.encode()+ \
b"; ods graphics on / outputfmt=png;\n"
odsclose = b"ods "+self.sascfg.output.encode()+b" (id=saspy_internal) close;ods listing;\n"
ods = True;
mj = b";*\';*\";*/;"
lstf = b''
logf = b''
bail = False
eof = 5
bc = False
done = False
logn = self._logcnt()
logcodei = "%put E3969440A681A24088859985" + logn + ";"
logcodeo = b"\nE3969440A681A24088859985" + logn.encode()
pcodei = ''
pcodeiv = ''
pcodeo = ''
pgm = b''
if self.pid == None:
self._sb.SASpid = None
print("No SAS process attached. SAS process has terminated unexpectedly.")
return dict(LOG="No SAS process attached. SAS process has terminated unexpectedly.", LST='')
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
self._sb.SASpid = None
return dict(LOG='SAS process has terminated unexpectedly. RC from wait was: '+str(rc), LST='')
except:
pass
else:
if self.pid == None:
self._sb.SASpid = None
return "No SAS process attached. SAS process has terminated unexpectedly."
#rc = os.waitid(os.P_PID, self.pid, os.WEXITED | os.WNOHANG)
rc = os.waitpid(self.pid, os.WNOHANG)
#if rc != None:
if rc[1]:
self.pid = None
self._sb.SASpid = None
return dict(LOG='SAS process has terminated unexpectedly. Pid State= '+str(rc), LST='')
# to cover the possibility of an _asubmit w/ lst output not read; no known cases now; used to be __flushlst__()
# removing this and adding comment in _asubmit to use _getlst[txt] so this will never be necessary; delete later
#while(len(self.stdout.read1(4096)) > 0):
# continue
if results.upper() != "HTML":
ods = False
if len(prompt):
pcodei += 'options nosource nonotes;\n'
pcodeo += 'options nosource nonotes;\n'
for key in prompt:
gotit = False
while not gotit:
var = self.sascfg._prompt('Please enter value for macro variable '+key+' ', pw=prompt[key])
if var is None:
raise KeyboardInterrupt
if len(var) > 0:
gotit = True
else:
print("Sorry, didn't get a value for that variable.")
if prompt[key]:
pcodei += '%let '+key+'='+var+';\n'
pcodeo += '%symdel '+key+';\n'
else:
pcodeiv += '%let '+key+'='+var+';\n'
pcodei += 'options source notes;\n'
pcodeo += 'options source notes;\n'
if ods:
pgm += odsopen
pgm += mj+b'\n'+pcodei.encode()+pcodeiv.encode()
pgm += code.encode()+b'\n'+pcodeo.encode()+b'\n'+mj
if ods:
pgm += odsclose
pgm += b'\n'+logcodei.encode()+b'\n'
self.stdin[0].send(pgm+b'tom says EOL='+logcodeo+b'\n')
while not done:
try:
while True:
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
self._sb.SASpid = None
log = logf.partition(logcodeo)[0]+b'\nSAS process has terminated unexpectedly. RC from wait was: '+str(rc).encode()
return dict(LOG=log.decode(errors='replace'), LST='')
except:
pass
else:
#rc = os.waitid(os.P_PID, self.pid, os.WEXITED | os.WNOHANG)
rc = os.waitpid(self.pid, os.WNOHANG)
#if rc is not None:
if rc[1]:
self.pid = None
self._sb.SASpid = None
log = logf.partition(logcodeo)[0]+b'\nSAS process has terminated unexpectedly. Pid State= '+str(rc).encode()
return dict(LOG=log.decode(errors='replace'), LST='')
if bail:
if lstf.count(logcodeo) >= 1:
x = lstf.rsplit(logcodeo)
lstf = x[0]
if len(x[1]) > 7 and b"_tomods" in x[1]:
self._tomods1 = x[1]
#print("Tomods is now "+ self._tomods1.decode())
break
try:
lst = self.stdout[0].recv(4096)
except (BlockingIOError):
lst = b''
if len(lst) > 0:
#print("LIST = \n"+lst)
lstf += lst
else:
sleep(0.1)
try:
log = self.stderr[0].recv(4096)
except (BlockingIOError):
log = b''
if len(log) > 0:
#print("LOG = \n"+log)
logf += log
if logf.count(logcodeo) >= 1:
bail = True
if not bail and bc:
self.stdin[0].send(odsclose+logcodei.encode()+b'tom says EOL='+logcodeo+b'\n')
bc = False
done = True
except (ConnectionResetError):
rc = 0
if os.name == 'nt':
try:
rc = self.pid.wait()
except:
pass
else:
rc = os.waitpid(self.pid, 0)
self.pid = None
self._sb.SASpid = None
log =logf.partition(logcodeo)[0]+b'\nConnection Reset: SAS process has terminated unexpectedly. Pid State= '+str(rc).encode()
return dict(LOG=log.decode(errors='replace'), LST='')
except (KeyboardInterrupt, SystemExit):
print('Exception caught!')
ll = self._breakprompt(logcodeo)
if ll.get('ABORT', False):
return ll
logf += ll['LOG']
lstf += ll['LST']
bc = ll['BC']
if not bc:
print('Exception handled :)\n')
else:
print('Exception ignored, continuing to process...\n')
self.stdin[0].send(odsclose+logcodei.encode()+b'tom says EOL='+logcodeo+b'\n')
try:
lstf = lstf.decode()
except UnicodeDecodeError:
try:
lstf = lstf.decode(self.sascfg.encoding)
except UnicodeDecodeError:
lstf = lstf.decode(errors='replace')
logf = logf.decode(errors='replace')
trip = lstf.rpartition("/*]]>*/")
if len(trip[1]) > 0 and len(trip[2]) < 100:
lstf = ''
self._log += logf
final = logf.partition(logcodei)
z = final[0].rpartition(chr(10))
prev = '%08d' % (self._log_cnt - 1)
zz = z[0].rpartition("\nE3969440A681A24088859985" + prev +'\n')
logd = zz[2].replace(mj.decode(), '')
lstd = lstf.replace(chr(12), chr(10)).replace('<body class="c body">',
'<body class="l body">').replace("font-size: x-small;",
"font-size: normal;")
return dict(LOG=logd, LST=lstd) | This method is used to submit any SAS code. It returns the Log and Listing as a python dictionary.
code - the SAS statements you want to execute
results - format of results, HTML is default, TEXT is the alternative
prompt - dict of names:flags to prompt for; create macro variables (used in submitted code), then keep or delete
The keys are the names of the macro variables and the boolean flag is to either hide what you type and delete
the macros, or show what you type and keep the macros (they will still be available later)
for example (what you type for pw will not be displayed, user and dsname will):
results = sas.submit(
"""
libname tera teradata server=teracop1 user=&user pw=&pw;
proc print data=tera.&dsname (obs=10); run;
""" ,
prompt = {'user': False, 'pw': True, 'dsname': False}
)
Returns - a Dict containing two keys:values, [LOG, LST]. LOG is text and LST is 'results' (HTML or TEXT)
NOTE: to view HTML results in the ipykernel, issue: from IPython.display import HTML and use HTML() instead of print()
i.e,: results = sas.submit("data a; x=1; run; proc print;run')
print(results['LOG'])
HTML(results['LST']) |
def balanced_accuracy(y_true, y_pred):
"""Default scoring function: balanced accuracy.
Balanced accuracy computes each class' accuracy on a per-class basis using a
one-vs-rest encoding, then computes an unweighted average of the class accuracies.
Parameters
----------
y_true: numpy.ndarray {n_samples}
True class labels
y_pred: numpy.ndarray {n_samples}
Predicted class labels by the estimator
Returns
-------
fitness: float
Returns a float value indicating the individual's balanced accuracy
0.5 is as good as chance, and 1.0 is perfect predictive accuracy
"""
all_classes = list(set(np.append(y_true, y_pred)))
all_class_accuracies = []
for this_class in all_classes:
this_class_sensitivity = 0.
this_class_specificity = 0.
if sum(y_true == this_class) != 0:
this_class_sensitivity = \
float(sum((y_pred == this_class) & (y_true == this_class))) /\
float(sum((y_true == this_class)))
this_class_specificity = \
float(sum((y_pred != this_class) & (y_true != this_class))) /\
float(sum((y_true != this_class)))
this_class_accuracy = (this_class_sensitivity + this_class_specificity) / 2.
all_class_accuracies.append(this_class_accuracy)
return np.mean(all_class_accuracies) | Default scoring function: balanced accuracy.
Balanced accuracy computes each class' accuracy on a per-class basis using a
one-vs-rest encoding, then computes an unweighted average of the class accuracies.
Parameters
----------
y_true: numpy.ndarray {n_samples}
True class labels
y_pred: numpy.ndarray {n_samples}
Predicted class labels by the estimator
Returns
-------
fitness: float
Returns a float value indicating the individual's balanced accuracy
0.5 is as good as chance, and 1.0 is perfect predictive accuracy |
def tweet(self, status, images):
# time.sleep(10)
template = "%s #soyprice"
"""
if not images:
self.twitter.update_status(status=template % status)
else:
medias = map(lambda i: self.upload_media(i), images)
self.twitter.post('/statuses/update_with_media',
params={'status': template % status,
'media': medias[0]})
"""
print template % status, len(template % status) | if not images:
self.twitter.update_status(status=template % status)
else:
medias = map(lambda i: self.upload_media(i), images)
self.twitter.post('/statuses/update_with_media',
params={'status': template % status,
'media': medias[0]}) |
def _get_element_by_names(source, names):
"""
Given a dict and path '/' or '.' separated. Digs into de dict to retrieve
the specified element.
Args:
source (dict): set of nested objects in which the data will be searched
path (list): list of attribute names
"""
if source is None:
return source
else:
if names:
head, *rest = names
if isinstance(source, dict) and head in source:
return _get_element_by_names(source[head], rest)
elif isinstance(source, list) and head.isdigit():
return _get_element_by_names(source[int(head)], rest)
elif not names[0]:
pass
else:
source = None
return source | Given a dict and path '/' or '.' separated. Digs into de dict to retrieve
the specified element.
Args:
source (dict): set of nested objects in which the data will be searched
path (list): list of attribute names |
def tryCComment(self, block):
"""C comment checking. If the previous line begins with a "/*" or a "* ", then
return its leading white spaces + ' *' + the white spaces after the *
return: filler string or null, if not in a C comment
"""
indentation = None
prevNonEmptyBlock = self._prevNonEmptyBlock(block)
if not prevNonEmptyBlock.isValid():
return None
prevNonEmptyBlockText = prevNonEmptyBlock.text()
if prevNonEmptyBlockText.endswith('*/'):
try:
foundBlock, notUsedColumn = self.findTextBackward(prevNonEmptyBlock, prevNonEmptyBlock.length(), '/*')
except ValueError:
foundBlock = None
if foundBlock is not None:
dbg("tryCComment: success (1) in line %d" % foundBlock.blockNumber())
return self._lineIndent(foundBlock.text())
if prevNonEmptyBlock != block.previous():
# inbetween was an empty line, so do not copy the "*" character
return None
blockTextStripped = block.text().strip()
prevBlockTextStripped = prevNonEmptyBlockText.strip()
if prevBlockTextStripped.startswith('/*') and not '*/' in prevBlockTextStripped:
indentation = self._blockIndent(prevNonEmptyBlock)
if CFG_AUTO_INSERT_STAR:
# only add '*', if there is none yet.
indentation += ' '
if not blockTextStripped.endswith('*'):
indentation += '*'
secondCharIsSpace = len(blockTextStripped) > 1 and blockTextStripped[1].isspace()
if not secondCharIsSpace and \
not blockTextStripped.endswith("*/"):
indentation += ' '
dbg("tryCComment: success (2) in line %d" % block.blockNumber())
return indentation
elif prevBlockTextStripped.startswith('*') and \
(len(prevBlockTextStripped) == 1 or prevBlockTextStripped[1].isspace()):
# in theory, we could search for opening /*, and use its indentation
# and then one alignment character. Let's not do this for now, though.
indentation = self._lineIndent(prevNonEmptyBlockText)
# only add '*', if there is none yet.
if CFG_AUTO_INSERT_STAR and not blockTextStripped.startswith('*'):
indentation += '*'
if len(blockTextStripped) < 2 or not blockTextStripped[1].isspace():
indentation += ' '
dbg("tryCComment: success (2) in line %d" % block.blockNumber())
return indentation
return None | C comment checking. If the previous line begins with a "/*" or a "* ", then
return its leading white spaces + ' *' + the white spaces after the *
return: filler string or null, if not in a C comment |
def parse(self, group_by_stmt):
"""
Extract the data resolution of a query in seconds
E.g. "group by time(99s)" => 99
:param group_by_stmt: A raw InfluxDB group by statement
"""
if not group_by_stmt:
return Resolution.MAX_RESOLUTION
m = self.GROUP_BY_TIME_PATTERN.match(group_by_stmt)
if not m:
return None
value = int(m.group(1))
unit = m.group(2)
resolution = self.convert_to_seconds(value, unit)
# We can't have a higher resolution than the max resolution
return max(resolution, Resolution.MAX_RESOLUTION) | Extract the data resolution of a query in seconds
E.g. "group by time(99s)" => 99
:param group_by_stmt: A raw InfluxDB group by statement |
def element(self, inp=None, order=None, **kwargs):
"""Create an element from ``inp`` or from scratch.
Parameters
----------
inp : optional
Input used to initialize the new element. The following options
are available:
- ``None``: an empty element is created with no guarantee of
its state (memory allocation only). The new element will
use ``order`` as storage order if provided, otherwise
`default_order`.
- array-like: an element wrapping a `tensor` is created,
where a copy is avoided whenever possible. This usually
requires correct `shape`, `dtype` and `impl` if applicable,
and if ``order`` is provided, also contiguousness in that
ordering. See the ``element`` method of `tspace` for more
information.
If any of these conditions is not met, a copy is made.
- callable: a new element is created by sampling the function
using the `sampling` operator.
order : {None, 'C', 'F'}, optional
Storage order of the returned element. For ``'C'`` and ``'F'``,
contiguous memory in the respective ordering is enforced.
The default ``None`` enforces no contiguousness.
vectorized : bool, optional
If ``True``, assume that a provided callable ``inp`` supports
vectorized evaluation. Otherwise, wrap it in a vectorizer.
Default: ``True``.
kwargs :
Additional arguments passed on to `sampling` when called
on ``inp``, in the form ``sampling(inp, **kwargs)``.
This can be used e.g. for functions with parameters.
Returns
-------
element : `DiscreteLpElement`
The discretized element, calculated as ``sampling(inp)`` or
``tspace.element(inp)``, tried in this order.
Examples
--------
Elements can be created from array-like objects that represent
an already discretized function:
>>> space = odl.uniform_discr(-1, 1, 4)
>>> space.element([1, 2, 3, 4])
uniform_discr(-1.0, 1.0, 4).element([ 1., 2., 3., 4.])
>>> vector = odl.rn(4).element([0, 1, 2, 3])
>>> space.element(vector)
uniform_discr(-1.0, 1.0, 4).element([ 0., 1., 2., 3.])
On the other hand, non-discretized objects like Python functions
can be discretized "on the fly":
>>> space.element(lambda x: x * 2)
uniform_discr(-1.0, 1.0, 4).element([-1.5, -0.5, 0.5, 1.5])
This works also with parameterized functions, however only
through keyword arguments (not positional arguments with
defaults):
>>> def f(x, c=0.0):
... return np.maximum(x, c)
...
>>> space = odl.uniform_discr(-1, 1, 4)
>>> space.element(f, c=0.5)
uniform_discr(-1.0, 1.0, 4).element([ 0.5 , 0.5 , 0.5 , 0.75])
See Also
--------
sampling : create a discrete element from a non-discretized one
"""
if inp is None:
return self.element_type(self, self.tspace.element(order=order))
elif inp in self and order is None:
return inp
elif inp in self.tspace and order is None:
return self.element_type(self, inp)
elif callable(inp):
vectorized = kwargs.pop('vectorized', True)
# fspace element -> discretize
inp_elem = self.fspace.element(inp, vectorized=vectorized)
sampled = self.sampling(inp_elem, **kwargs)
return self.element_type(
self, self.tspace.element(sampled, order=order))
else:
# Sequence-type input
return self.element_type(
self, self.tspace.element(inp, order=order)) | Create an element from ``inp`` or from scratch.
Parameters
----------
inp : optional
Input used to initialize the new element. The following options
are available:
- ``None``: an empty element is created with no guarantee of
its state (memory allocation only). The new element will
use ``order`` as storage order if provided, otherwise
`default_order`.
- array-like: an element wrapping a `tensor` is created,
where a copy is avoided whenever possible. This usually
requires correct `shape`, `dtype` and `impl` if applicable,
and if ``order`` is provided, also contiguousness in that
ordering. See the ``element`` method of `tspace` for more
information.
If any of these conditions is not met, a copy is made.
- callable: a new element is created by sampling the function
using the `sampling` operator.
order : {None, 'C', 'F'}, optional
Storage order of the returned element. For ``'C'`` and ``'F'``,
contiguous memory in the respective ordering is enforced.
The default ``None`` enforces no contiguousness.
vectorized : bool, optional
If ``True``, assume that a provided callable ``inp`` supports
vectorized evaluation. Otherwise, wrap it in a vectorizer.
Default: ``True``.
kwargs :
Additional arguments passed on to `sampling` when called
on ``inp``, in the form ``sampling(inp, **kwargs)``.
This can be used e.g. for functions with parameters.
Returns
-------
element : `DiscreteLpElement`
The discretized element, calculated as ``sampling(inp)`` or
``tspace.element(inp)``, tried in this order.
Examples
--------
Elements can be created from array-like objects that represent
an already discretized function:
>>> space = odl.uniform_discr(-1, 1, 4)
>>> space.element([1, 2, 3, 4])
uniform_discr(-1.0, 1.0, 4).element([ 1., 2., 3., 4.])
>>> vector = odl.rn(4).element([0, 1, 2, 3])
>>> space.element(vector)
uniform_discr(-1.0, 1.0, 4).element([ 0., 1., 2., 3.])
On the other hand, non-discretized objects like Python functions
can be discretized "on the fly":
>>> space.element(lambda x: x * 2)
uniform_discr(-1.0, 1.0, 4).element([-1.5, -0.5, 0.5, 1.5])
This works also with parameterized functions, however only
through keyword arguments (not positional arguments with
defaults):
>>> def f(x, c=0.0):
... return np.maximum(x, c)
...
>>> space = odl.uniform_discr(-1, 1, 4)
>>> space.element(f, c=0.5)
uniform_discr(-1.0, 1.0, 4).element([ 0.5 , 0.5 , 0.5 , 0.75])
See Also
--------
sampling : create a discrete element from a non-discretized one |
def find_nearby_pores(self, pores, r, flatten=False, include_input=False):
r"""
Find all pores within a given radial distance of the input pore(s)
regardless of whether or not they are toplogically connected.
Parameters
----------
pores : array_like
The list of pores for whom nearby neighbors are to be found
r : scalar
The maximum radius within which the search should be performed
include_input : bool
Controls whether the input pores should be included in the returned
list. The default is ``False``.
flatten : bool
If true returns a single list of all pores that match the criteria,
otherwise returns an array containing a sub-array for each input
pore, where each sub-array contains the pores that are nearby to
each given input pore. The default is False.
Returns
-------
A list of pores which are within the given spatial distance. If a
list of N pores is supplied, then a an N-long list of such lists is
returned. The returned lists each contain the pore for which the
neighbors were sought.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[3, 3, 3])
>>> Ps = pn.find_nearby_pores(pores=[0, 1], r=1)
>>> print(Ps)
[array([3, 9]), array([ 2, 4, 10])]
>>> Ps = pn.find_nearby_pores(pores=[0, 1], r=0.5)
>>> print(Ps)
[array([], dtype=int64), array([], dtype=int64)]
>>> Ps = pn.find_nearby_pores(pores=[0, 1], r=1, flatten=True)
>>> print(Ps)
[ 2 3 4 9 10]
"""
pores = self._parse_indices(pores)
# Handle an empty array if given
if sp.size(pores) == 0:
return sp.array([], dtype=sp.int64)
if r <= 0:
raise Exception('Provided distances should be greater than 0')
# Create kdTree objects
kd = sptl.cKDTree(self['pore.coords'])
kd_pores = sptl.cKDTree(self['pore.coords'][pores])
# Perform search
Ps_within_r = kd_pores.query_ball_tree(kd, r=r)
# Remove self from each list
for i in range(len(Ps_within_r)):
Ps_within_r[i].remove(pores[i])
# Convert to flattened list by default
temp = sp.concatenate((Ps_within_r))
Pn = sp.unique(temp).astype(sp.int64)
# Remove inputs if necessary
if include_input is False:
Pn = Pn[~sp.in1d(Pn, pores)]
# Convert list of lists to a list of nd-arrays
if flatten is False:
if len(Pn) == 0: # Deal with no nearby neighbors
Pn = [sp.array([], dtype=sp.int64) for i in pores]
else:
mask = sp.zeros(shape=sp.amax((Pn.max(), pores.max()))+1,
dtype=bool)
mask[Pn] = True
temp = []
for item in Ps_within_r:
temp.append(sp.array(item, dtype=sp.int64)[mask[item]])
Pn = temp
return Pn | r"""
Find all pores within a given radial distance of the input pore(s)
regardless of whether or not they are toplogically connected.
Parameters
----------
pores : array_like
The list of pores for whom nearby neighbors are to be found
r : scalar
The maximum radius within which the search should be performed
include_input : bool
Controls whether the input pores should be included in the returned
list. The default is ``False``.
flatten : bool
If true returns a single list of all pores that match the criteria,
otherwise returns an array containing a sub-array for each input
pore, where each sub-array contains the pores that are nearby to
each given input pore. The default is False.
Returns
-------
A list of pores which are within the given spatial distance. If a
list of N pores is supplied, then a an N-long list of such lists is
returned. The returned lists each contain the pore for which the
neighbors were sought.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[3, 3, 3])
>>> Ps = pn.find_nearby_pores(pores=[0, 1], r=1)
>>> print(Ps)
[array([3, 9]), array([ 2, 4, 10])]
>>> Ps = pn.find_nearby_pores(pores=[0, 1], r=0.5)
>>> print(Ps)
[array([], dtype=int64), array([], dtype=int64)]
>>> Ps = pn.find_nearby_pores(pores=[0, 1], r=1, flatten=True)
>>> print(Ps)
[ 2 3 4 9 10] |
def parse_roadmap_gwas(fn):
"""
Read Roadmap GWAS file and filter for unique, significant (p < 1e-5)
SNPs.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates.
"""
df = pd.read_table(fn, low_memory=False,
names=['chrom', 'start', 'end', 'rsid', 'pvalue'])
df = df[df.pvalue < 1e-5]
df = df.sort(columns=['chrom', 'start', 'pvalue'])
df = df.drop_duplicates(subset=['chrom', 'start'])
df = df[df['chrom'] != 'chrY']
df.index = df['chrom'].astype(str) + ':' + df['end'].astype(str)
return df | Read Roadmap GWAS file and filter for unique, significant (p < 1e-5)
SNPs.
Parameters
----------
fn : str
Path to (subset of) GRASP database.
Returns
-------
df : pandas.DataFrame
Pandas dataframe with de-duplicated, significant SNPs. The index is of
the form chrom:pos where pos is the one-based position of the SNP. The
columns are chrom, start, end, rsid, and pvalue. rsid may be empty or
not actually an RSID. chrom, start, end make a zero-based bed file with
the SNP coordinates. |
def is_up_url(url, allow_redirects=False, timeout=5):
r""" Check URL to see if it is a valid web page, return the redirected location if it is
Returns:
None if ConnectionError
False if url is invalid (any HTTP error code)
cleaned up URL (following redirects and possibly adding HTTP schema "http://")
>>> is_up_url("duckduckgo.com") # a more private, less manipulative search engine
'https://duckduckgo.com/'
>>> urlisup = is_up_url("totalgood.org")
>>> not urlisup or str(urlisup).startswith('http')
True
>>> urlisup = is_up_url("wikipedia.org")
>>> str(urlisup).startswith('http')
True
>>> 'wikipedia.org' in str(urlisup)
True
>>> bool(is_up_url('8158989668202919656'))
False
>>> is_up_url('invalidurlwithoutadomain')
False
"""
if not isinstance(url, basestring) or '.' not in url:
return False
normalized_url = prepend_http(url)
session = requests.Session()
session.mount(url, HTTPAdapter(max_retries=2))
try:
resp = session.get(normalized_url, allow_redirects=allow_redirects, timeout=timeout)
except ConnectionError:
return None
except:
return None
if resp.status_code in (301, 302, 307) or resp.headers.get('location', None):
return resp.headers.get('location', None) # return redirected URL
elif 100 <= resp.status_code < 400:
return normalized_url # return the original URL that was requested/visited
else:
return False | r""" Check URL to see if it is a valid web page, return the redirected location if it is
Returns:
None if ConnectionError
False if url is invalid (any HTTP error code)
cleaned up URL (following redirects and possibly adding HTTP schema "http://")
>>> is_up_url("duckduckgo.com") # a more private, less manipulative search engine
'https://duckduckgo.com/'
>>> urlisup = is_up_url("totalgood.org")
>>> not urlisup or str(urlisup).startswith('http')
True
>>> urlisup = is_up_url("wikipedia.org")
>>> str(urlisup).startswith('http')
True
>>> 'wikipedia.org' in str(urlisup)
True
>>> bool(is_up_url('8158989668202919656'))
False
>>> is_up_url('invalidurlwithoutadomain')
False |
def identifier_director(**kwargs):
"""Direct how to handle the identifier element."""
ark = kwargs.get('ark', None)
domain_name = kwargs.get('domain_name', None)
# Set default scheme if it is None or is not supplied.
scheme = kwargs.get('scheme') or 'http'
qualifier = kwargs.get('qualifier', None)
content = kwargs.get('content', '')
# See if the ark and domain name were given.
if ark and qualifier == 'ark':
content = 'ark: %s' % ark
if domain_name and ark and qualifier == 'permalink':
# Create the permalink URL.
if not domain_name.endswith('/'):
domain_name += '/'
permalink_url = '%s://%s%s' % (scheme, domain_name, ark)
# Make sure it has a trailing slash.
if not permalink_url.endswith('/'):
permalink_url += '/'
content = permalink_url
else:
if qualifier:
content = '%s: %s' % (string.lower(qualifier), content)
return DCIdentifier(content=content) | Direct how to handle the identifier element. |
def _flush_aggregated_objects(self):
""" method inserts aggregated objects into MongoDB
:return number_of_aggregated_objects """
if len(self.aggregated_objects) == 0:
# nothing to do
return 0
number_of_aggregated_objects = len(self.aggregated_objects)
self.logger.info('Aggregated {0} documents. Performing flush.'.format(number_of_aggregated_objects))
for key in self.aggregated_objects:
document = self.aggregated_objects[key]
mongo_pk = self._mongo_sink_key(*key)
self.ds.update(self.sink, mongo_pk, document)
self.logger.info('Flush successful.')
del self.aggregated_objects
self.aggregated_objects = dict()
gc.collect()
return number_of_aggregated_objects | method inserts aggregated objects into MongoDB
:return number_of_aggregated_objects |
def create_add_on(self, add_on):
"""Make the given `AddOn` available to subscribers on this plan."""
url = urljoin(self._url, '/add_ons')
return add_on.post(url) | Make the given `AddOn` available to subscribers on this plan. |
def IsFile(path):
'''
:param unicode path:
Path to a file (local or ftp)
:raises NotImplementedProtocol:
If checking for a non-local, non-ftp file
:rtype: bool
:returns:
True if the file exists
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
from six.moves.urllib.parse import urlparse
url = urlparse(path)
if _UrlIsLocal(url):
if IsLink(path):
return IsFile(ReadLink(path))
return os.path.isfile(path)
elif url.scheme == 'ftp':
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(url.scheme)
else:
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(url.scheme) | :param unicode path:
Path to a file (local or ftp)
:raises NotImplementedProtocol:
If checking for a non-local, non-ftp file
:rtype: bool
:returns:
True if the file exists
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information |
def get_file_contents(self, project, provider_name, service_endpoint_id=None, repository=None, commit_or_branch=None, path=None, **kwargs):
"""GetFileContents.
[Preview API] Gets the contents of a file in the given source code repository.
:param str project: Project ID or project name
:param str provider_name: The name of the source provider.
:param str service_endpoint_id: If specified, the ID of the service endpoint to query. Can only be omitted for providers that do not use service endpoints, e.g. TFVC or TFGit.
:param str repository: If specified, the vendor-specific identifier or the name of the repository to get branches. Can only be omitted for providers that do not support multiple repositories.
:param str commit_or_branch: The identifier of the commit or branch from which a file's contents are retrieved.
:param str path: The path to the file to retrieve, relative to the root of the repository.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if provider_name is not None:
route_values['providerName'] = self._serialize.url('provider_name', provider_name, 'str')
query_parameters = {}
if service_endpoint_id is not None:
query_parameters['serviceEndpointId'] = self._serialize.query('service_endpoint_id', service_endpoint_id, 'str')
if repository is not None:
query_parameters['repository'] = self._serialize.query('repository', repository, 'str')
if commit_or_branch is not None:
query_parameters['commitOrBranch'] = self._serialize.query('commit_or_branch', commit_or_branch, 'str')
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
response = self._send(http_method='GET',
location_id='29d12225-b1d9-425f-b668-6c594a981313',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback) | GetFileContents.
[Preview API] Gets the contents of a file in the given source code repository.
:param str project: Project ID or project name
:param str provider_name: The name of the source provider.
:param str service_endpoint_id: If specified, the ID of the service endpoint to query. Can only be omitted for providers that do not use service endpoints, e.g. TFVC or TFGit.
:param str repository: If specified, the vendor-specific identifier or the name of the repository to get branches. Can only be omitted for providers that do not support multiple repositories.
:param str commit_or_branch: The identifier of the commit or branch from which a file's contents are retrieved.
:param str path: The path to the file to retrieve, relative to the root of the repository.
:rtype: object |
def _process_results():
"""Process the results from an Async job."""
async = get_current_async()
callbacks = async.get_callbacks()
if not isinstance(async.result.payload, AsyncException):
callback = callbacks.get('success')
else:
callback = callbacks.get('error')
if not callback:
raise async.result.payload.exception, None, \
async.result.payload.traceback[2]
return _execute_callback(async, callback) | Process the results from an Async job. |
def timesince(self, now=None):
"""
Shortcut for the ``django.utils.timesince.timesince`` function of the
current timestamp.
"""
return djtimesince(self.timestamp, now).encode('utf8').replace(b'\xc2\xa0', b' ').decode('utf8') | Shortcut for the ``django.utils.timesince.timesince`` function of the
current timestamp. |
def matching_fpaths(dpath_list, include_patterns, exclude_dirs=[],
greater_exclude_dirs=[], exclude_patterns=[],
recursive=True):
r"""
walks dpath lists returning all directories that match the requested
pattern.
Args:
dpath_list (list):
include_patterns (str):
exclude_dirs (None):
recursive (bool):
References:
# TODO: fix names and behavior of exclude_dirs and greater_exclude_dirs
http://stackoverflow.com/questions/19859840/excluding-directories-in-os-walk
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath_list = [dirname(dirname(ut.__file__))]
>>> include_patterns = get_standard_include_patterns()
>>> exclude_dirs = ['_page']
>>> greater_exclude_dirs = get_standard_exclude_dnames()
>>> recursive = True
>>> fpath_gen = matching_fpaths(dpath_list, include_patterns, exclude_dirs,
>>> greater_exclude_dirs, recursive)
>>> result = list(fpath_gen)
>>> print('\n'.join(result))
"""
if isinstance(dpath_list, six.string_types):
dpath_list = [dpath_list]
for dpath in dpath_list:
for root, dname_list, fname_list in os.walk(dpath):
# Look at all subdirs
subdirs = pathsplit_full(relpath(root, dpath))
# HACK:
if any([dir_ in greater_exclude_dirs for dir_ in subdirs]):
continue
# Look at one subdir
if basename(root) in exclude_dirs:
continue
_match = fnmatch.fnmatch
for name in fname_list:
# yeild filepaths that are included
if any(_match(name, pat) for pat in include_patterns):
# ... and not excluded
if not any(_match(name, pat) for pat in exclude_patterns):
fpath = join(root, name)
yield fpath
if not recursive:
break | r"""
walks dpath lists returning all directories that match the requested
pattern.
Args:
dpath_list (list):
include_patterns (str):
exclude_dirs (None):
recursive (bool):
References:
# TODO: fix names and behavior of exclude_dirs and greater_exclude_dirs
http://stackoverflow.com/questions/19859840/excluding-directories-in-os-walk
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath_list = [dirname(dirname(ut.__file__))]
>>> include_patterns = get_standard_include_patterns()
>>> exclude_dirs = ['_page']
>>> greater_exclude_dirs = get_standard_exclude_dnames()
>>> recursive = True
>>> fpath_gen = matching_fpaths(dpath_list, include_patterns, exclude_dirs,
>>> greater_exclude_dirs, recursive)
>>> result = list(fpath_gen)
>>> print('\n'.join(result)) |
def resplit(prev, pattern, *args, **kw):
"""The resplit pipe split previous pipe input by regular expression.
Use 'maxsplit' keyword argument to limit the number of split.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param pattern: The pattern which used to split string.
:type pattern: str|unicode
"""
maxsplit = 0 if 'maxsplit' not in kw else kw.pop('maxsplit')
pattern_obj = re.compile(pattern, *args, **kw)
for s in prev:
yield pattern_obj.split(s, maxsplit=maxsplit) | The resplit pipe split previous pipe input by regular expression.
Use 'maxsplit' keyword argument to limit the number of split.
:param prev: The previous iterator of pipe.
:type prev: Pipe
:param pattern: The pattern which used to split string.
:type pattern: str|unicode |
def send_request(self, request):
'''Send a Request. Return a (message, event) pair.
The message is an unframed message to send over the network.
Wait on the event for the response; which will be in the
"result" attribute.
Raises: ProtocolError if the request violates the protocol
in some way..
'''
request_id = next(self._id_counter)
message = self._protocol.request_message(request, request_id)
return message, self._event(request, request_id) | Send a Request. Return a (message, event) pair.
The message is an unframed message to send over the network.
Wait on the event for the response; which will be in the
"result" attribute.
Raises: ProtocolError if the request violates the protocol
in some way.. |
def vars(self):
"""Alternative naming, you can use `node.vars.name` instead of `node.v_name`"""
if self._vars is None:
self._vars = NNTreeNodeVars(self)
return self._vars | Alternative naming, you can use `node.vars.name` instead of `node.v_name` |
def _dt_to_epoch_ns(dt_series):
"""Convert a timeseries into an Int64Index of nanoseconds since the epoch.
Parameters
----------
dt_series : pd.Series
The timeseries to convert.
Returns
-------
idx : pd.Int64Index
The index converted to nanoseconds since the epoch.
"""
index = pd.to_datetime(dt_series.values)
if index.tzinfo is None:
index = index.tz_localize('UTC')
else:
index = index.tz_convert('UTC')
return index.view(np.int64) | Convert a timeseries into an Int64Index of nanoseconds since the epoch.
Parameters
----------
dt_series : pd.Series
The timeseries to convert.
Returns
-------
idx : pd.Int64Index
The index converted to nanoseconds since the epoch. |
def to_one(dest_class, type=RelationType.DIRECT, resource_classes=None,
reverse=None, reverse_type=RelationType.DIRECT,
writable=False):
"""Create a one to one relation to a given target :class:`Resource`.
Args:
dest_class(Resource): The *target* class for the relationship
Keyword Args:
type(RelationType): The relationship approach to use.
reverse(to_may or to_one): An *optional* reverse relationship.
reverse_type(RelationType): The reverse relationship approach.
resource_classes(Resource): The kinds of Resources to expect
in the relationship
Returns:
A builder function which, given a source class creates a
one-to-one relationship with the target
A one to one relationship means that you can get the associated
target object from the object on which the ``to_one`` was declared.
.. code-block:: python
@to_one(Organization)
def User(Resource):
pass
Declares that a User is associated with *one* Organization. The
decorator automatically adds a method to fetch the associated
organization:
.. code-block:: python
org = user.organization()
"""
def method_builder(cls):
dest_resource_type = dest_class._resource_type()
dest_method_name = dest_resource_type.replace('-', '_')
doc_variables = {
'from_class': cls.__name__,
'to_class': dest_class.__name__,
'to_name': dest_method_name
}
fetch_method_doc = """Fetch the {2} associated with this :class:`{0}`.
Returns:
{1}: The :class:`{1}` of this :class:`{0}`
""".format(cls.__name__, dest_class.__name__, dest_method_name)
def _fetch_relationship_included(self):
session = self._session
include = self._include
if include is None or dest_class not in include:
# You requested an included relationship that was
# not originally included
error = "{} was not included".format(dest_class.__name__)
raise AttributeError(error)
included = self._included.get(dest_resource_type)
if len(included) == 0:
return None
mk_one = dest_class._mk_one(session,
resource_classes=resource_classes)
return mk_one({
'data': included[0]
})
def fetch_relationship_direct(self, use_included=False):
if use_included:
return _fetch_relationship_included(self)
session = self._session
id = None if self.is_singleton() else self.id
url = session._build_url(self._resource_path(), id,
dest_resource_type)
process = dest_class._mk_one(session,
resource_classes=resource_classes)
return session.get(url, CB.json(200, process))
def fetch_relationship_include(self, use_included=False):
if use_included:
return _fetch_relationship_included(self)
session = self._session
id = None if self.is_singleton() else self.id
url = session._build_url(self._resource_path(), id)
params = build_request_include([dest_class], None)
def _process(json):
included = json.get('included')
if len(included) == 0:
return None
mk_one = dest_class._mk_one(session,
resource_classes=resource_classes)
return mk_one({
'data': included[0]
})
return session.get(url, CB.json(200, _process),
params=params)
if type == RelationType.DIRECT:
fetch_relationship = fetch_relationship_direct
elif type == RelationType.INCLUDE:
fetch_relationship = fetch_relationship_include
else: # pragma: no cover
raise ValueError("Invalid RelationType: {}".format(type))
fetch_relationship.__doc__ = fetch_method_doc
def update_method(self, resource):
"""Set the {to_name} for this :class:`{from_class}`.
Args:
resource: The :class:`{to_class}` to set
Returns:
True if successful
"""
session, url, json = _build_relatonship(self, dest_resource_type,
resource)
return session.patch(url, CB.boolean(200), json=json)
methods = [(dest_method_name, fetch_relationship)]
if writable:
methods.extend([
('update_{}'.format(dest_method_name), update_method)
])
for name, method in methods:
method.__doc__ = method.__doc__.format(**doc_variables)
setattr(cls, name, method)
if reverse is not None:
reverse(cls, type=reverse_type)(dest_class)
return cls
return method_builder | Create a one to one relation to a given target :class:`Resource`.
Args:
dest_class(Resource): The *target* class for the relationship
Keyword Args:
type(RelationType): The relationship approach to use.
reverse(to_may or to_one): An *optional* reverse relationship.
reverse_type(RelationType): The reverse relationship approach.
resource_classes(Resource): The kinds of Resources to expect
in the relationship
Returns:
A builder function which, given a source class creates a
one-to-one relationship with the target
A one to one relationship means that you can get the associated
target object from the object on which the ``to_one`` was declared.
.. code-block:: python
@to_one(Organization)
def User(Resource):
pass
Declares that a User is associated with *one* Organization. The
decorator automatically adds a method to fetch the associated
organization:
.. code-block:: python
org = user.organization() |
def _comparator_approximate_star(filter_value, tested_value):
"""
Tests if the filter value, which contains a joker, is nearly equal to the
tested value.
If the tested value is a string or an array of string, it compares their
lower case forms
"""
lower_filter_value = filter_value.lower()
if is_string(tested_value):
# Lower case comparison
return _comparator_star(lower_filter_value, tested_value.lower())
elif hasattr(tested_value, "__iter__"):
# Extract a list of strings
new_tested = [
value.lower() for value in tested_value if is_string(value)
]
if _comparator_star(lower_filter_value, new_tested):
# Value found in the strings
return True
# Compare the raw values
return _comparator_star(filter_value, tested_value) or _comparator_star(
lower_filter_value, tested_value
) | Tests if the filter value, which contains a joker, is nearly equal to the
tested value.
If the tested value is a string or an array of string, it compares their
lower case forms |
def compile_patterns_in_dictionary(dictionary):
"""
Replace all strings in dictionary with compiled
version of themselves and return dictionary.
"""
for key, value in dictionary.items():
if isinstance(value, str):
dictionary[key] = re.compile(value)
elif isinstance(value, dict):
compile_patterns_in_dictionary(value)
return dictionary | Replace all strings in dictionary with compiled
version of themselves and return dictionary. |
def setMaxSpeedLat(self, typeID, speed):
"""setMaxSpeedLat(string, double) -> None
Sets the maximum lateral speed of this type.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_MAXSPEED_LAT, typeID, speed) | setMaxSpeedLat(string, double) -> None
Sets the maximum lateral speed of this type. |
def set_advanced_configs(vm_name, datacenter, advanced_configs,
service_instance=None):
'''
Appends extra config parameters to a virtual machine advanced config list
vm_name
Virtual machine name
datacenter
Datacenter name where the virtual machine is available
advanced_configs
Dictionary with advanced parameter key value pairs
service_instance
vCenter service instance for connection and configuration
'''
current_config = get_vm_config(vm_name,
datacenter=datacenter,
objects=True,
service_instance=service_instance)
diffs = compare_vm_configs({'name': vm_name,
'advanced_configs': advanced_configs},
current_config)
datacenter_ref = salt.utils.vmware.get_datacenter(service_instance,
datacenter)
vm_ref = salt.utils.vmware.get_mor_by_property(service_instance,
vim.VirtualMachine,
vm_name,
property_name='name',
container_ref=datacenter_ref)
config_spec = vim.vm.ConfigSpec()
changes = diffs['advanced_configs'].diffs
_apply_advanced_config(config_spec, diffs['advanced_configs'].new_values,
vm_ref.config.extraConfig)
if changes:
salt.utils.vmware.update_vm(vm_ref, config_spec)
return {'advanced_config_changes': changes} | Appends extra config parameters to a virtual machine advanced config list
vm_name
Virtual machine name
datacenter
Datacenter name where the virtual machine is available
advanced_configs
Dictionary with advanced parameter key value pairs
service_instance
vCenter service instance for connection and configuration |
def shuffled_batches(self, batch_size):
""" Generate randomized batches of data """
if batch_size >= self.size:
yield self
else:
batch_splits = math_util.divide_ceiling(self.size, batch_size)
indices = list(range(self.size))
np.random.shuffle(indices)
for sub_indices in np.array_split(indices, batch_splits):
yield Transitions(
size=len(sub_indices),
environment_information=None,
# Dont use it in batches for a moment, can be uncommented later if needed
# environment_information=[info[sub_indices.tolist()] for info in self.environment_information]
transition_tensors={k: v[sub_indices] for k, v in self.transition_tensors.items()}
# extra_data does not go into batches
) | Generate randomized batches of data |
def load_image(file) -> DataAndMetadata.DataAndMetadata:
"""
Loads the image from the file-like object or string file.
If file is a string, the file is opened and then read.
Returns a numpy ndarray of our best guess for the most important image
in the file.
"""
if isinstance(file, str) or isinstance(file, str):
with open(file, "rb") as f:
return load_image(f)
dmtag = parse_dm3.parse_dm_header(file)
dmtag = fix_strings(dmtag)
# display_keys(dmtag)
img_index = -1
image_tags = dmtag['ImageList'][img_index]
data = imagedatadict_to_ndarray(image_tags['ImageData'])
calibrations = []
calibration_tags = image_tags['ImageData'].get('Calibrations', dict())
for dimension in calibration_tags.get('Dimension', list()):
origin, scale, units = dimension.get('Origin', 0.0), dimension.get('Scale', 1.0), dimension.get('Units', str())
calibrations.append((-origin * scale, scale, units))
calibrations = tuple(reversed(calibrations))
if len(data.shape) == 3 and data.dtype != numpy.uint8:
if image_tags['ImageTags'].get('Meta Data', dict()).get("Format", str()).lower() in ("spectrum", "spectrum image"):
if data.shape[1] == 1:
data = numpy.squeeze(data, 1)
data = numpy.moveaxis(data, 0, 1)
data_descriptor = DataAndMetadata.DataDescriptor(False, 1, 1)
calibrations = (calibrations[2], calibrations[0])
else:
data = numpy.moveaxis(data, 0, 2)
data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 1)
calibrations = tuple(calibrations[1:]) + (calibrations[0],)
else:
data_descriptor = DataAndMetadata.DataDescriptor(False, 1, 2)
elif len(data.shape) == 4 and data.dtype != numpy.uint8:
# data = numpy.moveaxis(data, 0, 2)
data_descriptor = DataAndMetadata.DataDescriptor(False, 2, 2)
elif data.dtype == numpy.uint8:
data_descriptor = DataAndMetadata.DataDescriptor(False, 0, len(data.shape[:-1]))
else:
data_descriptor = DataAndMetadata.DataDescriptor(False, 0, len(data.shape))
brightness = calibration_tags.get('Brightness', dict())
origin, scale, units = brightness.get('Origin', 0.0), brightness.get('Scale', 1.0), brightness.get('Units', str())
intensity = -origin * scale, scale, units
timestamp = None
timezone = None
timezone_offset = None
title = image_tags.get('Name')
properties = dict()
if 'ImageTags' in image_tags:
voltage = image_tags['ImageTags'].get('ImageScanned', dict()).get('EHT', dict())
if voltage:
properties.setdefault("hardware_source", dict())["autostem"] = { "high_tension_v": float(voltage) }
dm_metadata_signal = image_tags['ImageTags'].get('Meta Data', dict()).get('Signal')
if dm_metadata_signal and dm_metadata_signal.lower() == "eels":
properties.setdefault("hardware_source", dict())["signal_type"] = dm_metadata_signal
if image_tags['ImageTags'].get('Meta Data', dict()).get("Format", str()).lower() in ("spectrum", "spectrum image"):
data_descriptor.collection_dimension_count += data_descriptor.datum_dimension_count - 1
data_descriptor.datum_dimension_count = 1
if image_tags['ImageTags'].get('Meta Data', dict()).get("IsSequence", False) and data_descriptor.collection_dimension_count > 0:
data_descriptor.is_sequence = True
data_descriptor.collection_dimension_count -= 1
timestamp_str = image_tags['ImageTags'].get("Timestamp")
if timestamp_str:
timestamp = get_datetime_from_timestamp_str(timestamp_str)
timezone = image_tags['ImageTags'].get("Timezone")
timezone_offset = image_tags['ImageTags'].get("TimezoneOffset")
# to avoid having duplicate copies in Swift, get rid of these tags
image_tags['ImageTags'].pop("Timestamp", None)
image_tags['ImageTags'].pop("Timezone", None)
image_tags['ImageTags'].pop("TimezoneOffset", None)
# put the image tags into properties
properties.update(image_tags['ImageTags'])
dimensional_calibrations = [Calibration.Calibration(c[0], c[1], c[2]) for c in calibrations]
while len(dimensional_calibrations) < data_descriptor.expected_dimension_count:
dimensional_calibrations.append(Calibration.Calibration())
intensity_calibration = Calibration.Calibration(intensity[0], intensity[1], intensity[2])
return DataAndMetadata.new_data_and_metadata(data,
data_descriptor=data_descriptor,
dimensional_calibrations=dimensional_calibrations,
intensity_calibration=intensity_calibration,
metadata=properties,
timestamp=timestamp,
timezone=timezone,
timezone_offset=timezone_offset) | Loads the image from the file-like object or string file.
If file is a string, the file is opened and then read.
Returns a numpy ndarray of our best guess for the most important image
in the file. |
def get_user_group(self, user=None, group=None):
"""
Get the user and group information.
Parameters
----------
user : str
User name or user id (default is the `os.getuid()`).
group : str
Group name or group id (default is the group of `user`).
Returns
-------
user : pwd.struct_passwd
User object.
group : grp.struct_group
Group object.
"""
user = user or os.getuid()
# Convert the information we have obtained to a user object
try:
try:
user = pwd.getpwuid(int(user))
except ValueError:
user = pwd.getpwnam(user)
except KeyError as ex: # pragma: no cover
self.logger.fatal("could not resolve user: %s", ex)
raise
# Get the group
group = group or user.pw_gid
try:
try:
group = grp.getgrgid(int(group))
except ValueError:
group = grp.getgrnam(group)
except KeyError as ex: # pragma: no cover
self.logger.fatal("could not resolve group:%s", ex)
raise
return user, group | Get the user and group information.
Parameters
----------
user : str
User name or user id (default is the `os.getuid()`).
group : str
Group name or group id (default is the group of `user`).
Returns
-------
user : pwd.struct_passwd
User object.
group : grp.struct_group
Group object. |
def get_single(self, key, lang=None):
""" Returns a single triple related to this node.
:param key: Predicate of the triple
:param lang: Language of the triple if applicable
:rtype: Literal or BNode or URIRef
"""
if not isinstance(key, URIRef):
key = URIRef(key)
if lang is not None:
default = None
for o in self.graph.objects(self.asNode(), key):
default = o
if o.language == lang:
return o
return default
else:
for o in self.graph.objects(self.asNode(), key):
return o | Returns a single triple related to this node.
:param key: Predicate of the triple
:param lang: Language of the triple if applicable
:rtype: Literal or BNode or URIRef |
def auto_find_instance_path(self) -> Path:
"""Locates the instace_path if it was not provided
"""
prefix, package_path = find_package(self.import_name)
if prefix is None:
return package_path / "instance"
return prefix / "var" / f"{self.name}-instance" | Locates the instace_path if it was not provided |
def _find_volumes(self, volume_system, vstype='detect'):
"""Finds all volumes based on the pytsk3 library."""
try:
# noinspection PyUnresolvedReferences
import pytsk3
except ImportError:
logger.error("pytsk3 not installed, could not detect volumes")
raise ModuleNotFoundError("pytsk3")
baseimage = None
try:
# ewf raw image is now available on base mountpoint
# either as ewf1 file or as .dd file
raw_path = volume_system.parent.get_raw_path()
# noinspection PyBroadException
try:
baseimage = pytsk3.Img_Info(raw_path)
except Exception:
logger.error("Failed retrieving image info (possible empty image).", exc_info=True)
return []
try:
volumes = pytsk3.Volume_Info(baseimage, getattr(pytsk3, 'TSK_VS_TYPE_' + vstype.upper()),
volume_system.parent.offset // volume_system.disk.block_size)
volume_system.volume_source = 'multi'
return volumes
except Exception as e:
# some bug in sleuthkit makes detection sometimes difficult, so we hack around it:
if "(GPT or DOS at 0)" in str(e) and vstype != 'gpt':
volume_system.vstype = 'gpt'
# noinspection PyBroadException
try:
logger.warning("Error in retrieving volume info: TSK couldn't decide between GPT and DOS, "
"choosing GPT for you. Use --vstype=dos to force DOS.", exc_info=True)
volumes = pytsk3.Volume_Info(baseimage, getattr(pytsk3, 'TSK_VS_TYPE_GPT'))
volume_system.volume_source = 'multi'
return volumes
except Exception as e:
logger.exception("Failed retrieving image info (possible empty image).")
raise SubsystemError(e)
else:
logger.exception("Failed retrieving image info (possible empty image).")
raise SubsystemError(e)
finally:
if baseimage:
baseimage.close()
del baseimage | Finds all volumes based on the pytsk3 library. |
def sort_top_level_items(self, key):
"""Sorting tree wrt top level items"""
self.save_expanded_state()
items = sorted([self.takeTopLevelItem(0)
for index in range(self.topLevelItemCount())], key=key)
for index, item in enumerate(items):
self.insertTopLevelItem(index, item)
self.restore_expanded_state() | Sorting tree wrt top level items |
def _validate_argument(self, arg):
"""Validate a type or matcher argument to the constructor."""
if arg is None:
return arg
if isinstance(arg, type):
return InstanceOf(arg)
if not isinstance(arg, BaseMatcher):
raise TypeError(
"argument of %s can be a type or a matcher (got %r)" % (
self.__class__.__name__, type(arg)))
return arg | Validate a type or matcher argument to the constructor. |
def src_to_dst(self, src_uri):
"""Return the dst filepath from the src URI.
Returns None on failure, destination path on success.
"""
m = re.match(self.src_uri + "/(.*)$", src_uri)
if (m is None):
return(None)
rel_path = m.group(1)
return(self.dst_path + '/' + rel_path) | Return the dst filepath from the src URI.
Returns None on failure, destination path on success. |
async def asgi_send(self, message: dict) -> None:
"""Called by the ASGI instance to send a message."""
if message["type"] == "websocket.accept" and self.state == ASGIWebsocketState.HANDSHAKE:
headers = build_and_validate_headers(message.get("headers", []))
raise_if_subprotocol_present(headers)
headers.extend(self.response_headers())
await self.asend(
AcceptConnection(
extensions=[PerMessageDeflate()],
extra_headers=headers,
subprotocol=message.get("subprotocol"),
)
)
self.state = ASGIWebsocketState.CONNECTED
self.config.access_logger.access(
self.scope, {"status": 101, "headers": []}, time() - self.start_time
)
elif (
message["type"] == "websocket.http.response.start"
and self.state == ASGIWebsocketState.HANDSHAKE
):
self.response = message
self.config.access_logger.access(self.scope, self.response, time() - self.start_time)
elif message["type"] == "websocket.http.response.body" and self.state in {
ASGIWebsocketState.HANDSHAKE,
ASGIWebsocketState.RESPONSE,
}:
await self._asgi_send_rejection(message)
elif message["type"] == "websocket.send" and self.state == ASGIWebsocketState.CONNECTED:
data: Union[bytes, str]
if message.get("bytes") is not None:
await self.asend(BytesMessage(data=bytes(message["bytes"])))
elif not isinstance(message["text"], str):
raise TypeError(f"{message['text']} should be a str")
else:
await self.asend(TextMessage(data=message["text"]))
elif message["type"] == "websocket.close" and self.state == ASGIWebsocketState.HANDSHAKE:
await self.send_http_error(403)
self.state = ASGIWebsocketState.HTTPCLOSED
elif message["type"] == "websocket.close":
await self.asend(CloseConnection(code=int(message["code"])))
self.state = ASGIWebsocketState.CLOSED
else:
raise UnexpectedMessage(self.state, message["type"]) | Called by the ASGI instance to send a message. |
def _real_re_compile(self, *args, **kwargs):
"""Thunk over to the original re.compile"""
try:
return re.compile(*args, **kwargs)
except re.error as e:
# raise ValueError instead of re.error as this gives a
# cleaner message to the user.
raise ValueError('"' + args[0] + '" ' + str(e)) | Thunk over to the original re.compile |
def roll_dice(spec):
""" Perform the dice rolls and replace all roll expressions with lists of
the dice faces that landed up. """
if spec[0] == 'c': return spec
if spec[0] == 'r':
r = spec[1:]
if len(r) == 2: return ('r', perform_roll(r[0], r[1]))
k = r[3] if r[2] == 'k' else -1
d = r[3] if r[2] == 'd' else -1
return ('r', perform_roll(r[0], r[1], k, d))
if spec[0] == "x":
c = None
roll = None
if spec[1][0] == "c": c = spec[1]
elif spec[1][0] == "r": roll = spec[1]
if spec[2][0] == "c": c = spec[2]
elif spec[2][0] == "r": roll = spec[2]
if (c == None or roll == None):
return ('*', roll_dice(spec[1]), roll_dice(spec[2]))
else:
if (c[1] > 50):
raise SillyDiceError("I don't have that many dice!")
return ("x", [roll_dice(roll) for i in range(c[1])])
if spec[0] in ops:
return (spec[0], roll_dice(spec[1]), roll_dice(spec[2]))
else: raise ValueError("Invalid dice specification") | Perform the dice rolls and replace all roll expressions with lists of
the dice faces that landed up. |
def SavePrivateKey(self, private_key):
"""Store the new private key on disk."""
self.private_key = private_key
config.CONFIG.Set("Client.private_key",
self.private_key.SerializeToString())
config.CONFIG.Write() | Store the new private key on disk. |
def rotateAboutVectorMatrix(vec, theta_deg):
"""Construct the matrix that rotates vector a about
vector vec by an angle of theta_deg degrees
Taken from
http://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle
Input:
theta_deg (float) Angle through which vectors should be
rotated in degrees
Returns:
A matrix
To rotate a vector, premultiply by this matrix.
To rotate the coord sys underneath the vector, post multiply
"""
ct = np.cos(np.radians(theta_deg))
st = np.sin(np.radians(theta_deg))
# Ensure vector has normal length
vec /= np.linalg.norm(vec)
assert( np.all( np.isfinite(vec)))
# compute the three terms
term1 = ct * np.eye(3)
ucross = np.zeros( (3,3))
ucross[0] = [0, -vec[2], vec[1]]
ucross[1] = [vec[2], 0, -vec[0]]
ucross[2] = [-vec[1], vec[0], 0]
term2 = st*ucross
ufunny = np.zeros( (3,3))
for i in range(0,3):
for j in range(i,3):
ufunny[i,j] = vec[i]*vec[j]
ufunny[j,i] = ufunny[i,j]
term3 = (1-ct) * ufunny
return term1 + term2 + term3 | Construct the matrix that rotates vector a about
vector vec by an angle of theta_deg degrees
Taken from
http://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle
Input:
theta_deg (float) Angle through which vectors should be
rotated in degrees
Returns:
A matrix
To rotate a vector, premultiply by this matrix.
To rotate the coord sys underneath the vector, post multiply |
def get_parser():
"""Return ArgumentParser for pypyr cli."""
parser = argparse.ArgumentParser(
allow_abbrev=True,
description='pypyr pipeline runner')
parser.add_argument('pipeline_name',
help='Name of pipeline to run. It should exist in the '
'./pipelines directory.')
parser.add_argument(dest='pipeline_context',
nargs='?',
help='String for context values. Parsed by the '
'pipeline\'s context_parser function.')
parser.add_argument('--dir', dest='working_dir', default=os.getcwd(),
help='Working directory. Use if your pipelines '
'directory is elsewhere. Defaults to cwd.')
parser.add_argument('--log', '--loglevel', dest='log_level', type=int,
default=20,
help='Integer log level. Defaults to 20 (INFO). '
'10=DEBUG\n20=INFO\n30=WARNING\n40=ERROR\n50=CRITICAL'
'.\n Log Level < 10 gives full traceback on errors.')
parser.add_argument('--logpath', dest='log_path',
help='Log-file path. Append log output to this path')
parser.add_argument('--version', action='version',
help='Echo version number.',
version=f'{pypyr.version.get_version()}')
return parser | Return ArgumentParser for pypyr cli. |
def get_worker_report(self, with_memory=False):
""" Returns a dict containing all the data we can about the current status of the worker and
its jobs. """
greenlets = []
for greenlet in list(self.gevent_pool):
g = {}
short_stack = []
stack = traceback.format_stack(greenlet.gr_frame)
for s in stack[1:]:
if "/gevent/hub.py" in s:
break
short_stack.append(s)
g["stack"] = short_stack
job = get_current_job(id(greenlet))
if job:
job.save()
if job.data:
g["path"] = job.data["path"]
g["datestarted"] = job.datestarted
g["id"] = str(job.id)
g["time"] = getattr(greenlet, "_trace_time", 0)
g["switches"] = getattr(greenlet, "_trace_switches", None)
# pylint: disable=protected-access
if job._current_io is not None:
g["io"] = job._current_io
greenlets.append(g)
# When faking network latency, all sockets are affected, including OS ones, but
# we still want reliable reports so this is disabled.
if (not with_memory) or (self.config["add_network_latency"] != "0" and self.config["add_network_latency"]):
cpu = {
"user": 0,
"system": 0,
"percent": 0
}
mem = {"rss": 0, "swap": 0, "total": 0}
else:
cpu_times = self.process.cpu_times()
cpu = {
"user": cpu_times.user,
"system": cpu_times.system,
"percent": self.process.cpu_percent(0)
}
mem = self.get_memory()
# Avoid sharing passwords or sensitive config!
whitelisted_config = [
"max_jobs",
"max_memory"
"greenlets",
"processes",
"queues",
"dequeue_strategy",
"scheduler",
"name",
"local_ip",
"external_ip",
"agent_id",
"worker_group"
]
io = None
if self._traced_io:
io = {}
for k, v in iteritems(self._traced_io):
if k == "total":
io[k] = v
else:
io[k] = sorted(list(v.items()), reverse=True, key=lambda x: x[1])
used_pool_slots = len(self.gevent_pool)
used_avg = self.pool_usage_average.next(used_pool_slots)
return {
"status": self.status,
"config": {k: v for k, v in iteritems(self.config) if k in whitelisted_config},
"done_jobs": self.done_jobs,
"usage_avg": used_avg / self.pool_size,
"datestarted": self.datestarted,
"datereported": datetime.datetime.utcnow(),
"name": self.name,
"io": io,
"_id": str(self.id),
"process": {
"pid": self.process.pid,
"cpu": cpu,
"mem": mem
# https://code.google.com/p/psutil/wiki/Documentation
# get_open_files
# get_connections
# get_num_ctx_switches
# get_num_fds
# get_io_counters
# get_nice
},
"jobs": greenlets
} | Returns a dict containing all the data we can about the current status of the worker and
its jobs. |
def get_files_zip(run_id: int, filetype: _FileType):
"""Send all artifacts or sources of a run as ZIP."""
data = current_app.config["data"]
dao_runs = data.get_run_dao()
dao_files = data.get_files_dao()
run = dao_runs.get(run_id)
if filetype == _FileType.ARTIFACT:
target_files = run['artifacts']
elif filetype == _FileType.SOURCE:
target_files = run['experiment']['sources']
else:
raise Exception("Unknown file type: %s" % filetype)
memory_file = io.BytesIO()
with zipfile.ZipFile(memory_file, 'w') as zf:
for f in target_files:
# source and artifact files use a different data structure
file_id = f['file_id'] if 'file_id' in f else f[1]
file, filename, upload_date = dao_files.get(file_id)
data = zipfile.ZipInfo(filename, date_time=upload_date.timetuple())
data.compress_type = zipfile.ZIP_DEFLATED
zf.writestr(data, file.read())
memory_file.seek(0)
fn_suffix = _filetype_suffices[filetype]
return send_file(memory_file, attachment_filename='run{}_{}.zip'.format(run_id, fn_suffix), as_attachment=True) | Send all artifacts or sources of a run as ZIP. |
def point(self, x, y):
"""Creates a POINT shape."""
shapeType = POINT
pointShape = Shape(shapeType)
pointShape.points.append([x, y])
self.shape(pointShape) | Creates a POINT shape. |
def _options(self):
"""
Returns a raw options object
:rtype: dict
"""
if self._options_cache is None:
target_url = self.client.get_url(self._URL_KEY, 'OPTIONS', 'options')
r = self.client.request('OPTIONS', target_url)
self._options_cache = r.json()
return self._options_cache | Returns a raw options object
:rtype: dict |
def search(self, pattern="*", raw=True, search_raw=True,
output=False):
"""Search the database using unix glob-style matching (wildcards
* and ?).
Parameters
----------
pattern : str
The wildcarded pattern to match when searching
search_raw : bool
If True, search the raw input, otherwise, the parsed input
raw, output : bool
See :meth:`get_range`
Returns
-------
Tuples as :meth:`get_range`
"""
tosearch = "source_raw" if search_raw else "source"
if output:
tosearch = "history." + tosearch
self.writeout_cache()
return self._run_sql("WHERE %s GLOB ?" % tosearch, (pattern,),
raw=raw, output=output) | Search the database using unix glob-style matching (wildcards
* and ?).
Parameters
----------
pattern : str
The wildcarded pattern to match when searching
search_raw : bool
If True, search the raw input, otherwise, the parsed input
raw, output : bool
See :meth:`get_range`
Returns
-------
Tuples as :meth:`get_range` |
def merge_text_nodes_on(self, node):
"""Merges all consecutive non-translatable text nodes into one"""
if not isinstance(node, ContainerNode) or not node.children:
return
new_children = []
text_run = []
for i in node.children:
if isinstance(i, Text) and not i.translatable:
text_run.append(i.escaped())
else:
if text_run:
new_children.append(EscapedText(''.join(text_run)))
text_run = []
new_children.append(i)
if text_run:
new_children.append(EscapedText(''.join(text_run)))
node.children = new_children
for i in node.children:
self.merge_text_nodes_on(i) | Merges all consecutive non-translatable text nodes into one |
def start_tcp_client(self, ip=None, port=None, name=None, timeout=None, protocol=None, family='ipv4'):
"""Starts a new TCP client.
Client can be optionally given `ip` and `port` to bind to, as well as
`name`, default `timeout` and a `protocol`. `family` can be either
ipv4 (default) or ipv6.
You should use `Connect` keyword to connect client to a host.
Examples:
| Start TCP client |
| Start TCP client | name=Client1 | protocol=GTPV2 |
| Start TCP client | 10.10.10.2 | 53 | name=Server1 | protocol=GTPV2 |
| Start TCP client | timeout=5 |
| Start TCP client | 0:0:0:0:0:0:0:1 | 53 | family=ipv6 |
"""
self._start_client(TCPClient, ip, port, name, timeout, protocol, family) | Starts a new TCP client.
Client can be optionally given `ip` and `port` to bind to, as well as
`name`, default `timeout` and a `protocol`. `family` can be either
ipv4 (default) or ipv6.
You should use `Connect` keyword to connect client to a host.
Examples:
| Start TCP client |
| Start TCP client | name=Client1 | protocol=GTPV2 |
| Start TCP client | 10.10.10.2 | 53 | name=Server1 | protocol=GTPV2 |
| Start TCP client | timeout=5 |
| Start TCP client | 0:0:0:0:0:0:0:1 | 53 | family=ipv6 | |
def register(self, source_point_cloud, target_point_cloud,
source_normal_cloud, target_normal_cloud, matcher,
num_iterations=1, compute_total_cost=True, match_centroids=False,
vis=False):
"""
Iteratively register objects to one another using a modified version of point to plane ICP.
The cost func is PointToPlane_COST + gamma * PointToPoint_COST.
Uses a `stochastic Gauss-Newton step` where on each iteration a smaller number of points is sampled.
Parameters
----------
source_point_cloud : :obj:`autolab_core.PointCloud`
source object points
target_point_cloud : :obj`autolab_core.PointCloud`
target object points
source_normal_cloud : :obj:`autolab_core.NormalCloud`
source object outward-pointing normals
target_normal_cloud : :obj:`autolab_core.NormalCloud`
target object outward-pointing normals
matcher : :obj:`PointToPlaneFeatureMatcher`
object to match the point sets
num_iterations : int
the number of iterations to run
compute_total_cost : bool
whether or not to compute the total cost upon termination.
match_centroids : bool
whether or not to match the centroids of the point clouds
Returns
-------
:obj`RegistrationResult`
results containing source to target transformation and cost
"""
# check valid data
if not isinstance(source_point_cloud, PointCloud) or not isinstance(target_point_cloud, PointCloud):
raise ValueError('Source and target point clouds must be PointCloud objects')
if not isinstance(source_normal_cloud, NormalCloud) or not isinstance(target_normal_cloud, NormalCloud):
raise ValueError('Source and target normal clouds must be NormalCloud objects')
if not isinstance(matcher, PointToPlaneFeatureMatcher):
raise ValueError('Feature matcher must be a PointToPlaneFeatureMatcher object')
if source_point_cloud.num_points != source_normal_cloud.num_points or target_point_cloud.num_points != target_normal_cloud.num_points:
raise ValueError('Input point clouds must have the same number of points as corresponding normal cloud')
# extract source and target point and normal data arrays
orig_source_points = source_point_cloud.data.T
orig_target_points = target_point_cloud.data.T
orig_source_normals = source_normal_cloud.data.T
orig_target_normals = target_normal_cloud.data.T
# setup the problem
normal_norms = np.linalg.norm(orig_target_normals, axis=1)
valid_inds = np.nonzero(normal_norms)
orig_target_points = orig_target_points[valid_inds[0],:]
orig_target_normals = orig_target_normals[valid_inds[0],:]
normal_norms = np.linalg.norm(orig_source_normals, axis=1)
valid_inds = np.nonzero(normal_norms)
orig_source_points = orig_source_points[valid_inds[0],:]
orig_source_normals = orig_source_normals[valid_inds[0],:]
# alloc buffers for solutions
source_mean_point = np.mean(orig_source_points, axis=0)
target_mean_point = np.mean(orig_target_points, axis=0)
R_sol = np.eye(3)
t_sol = np.zeros([3, 1]) #init with diff between means
if match_centroids:
t_sol[:,0] = target_mean_point - source_mean_point
# iterate through
for i in range(num_iterations):
logging.info('Point to plane ICP iteration %d' %(i))
# subsample points
source_subsample_inds = np.random.choice(orig_source_points.shape[0], size=self.sample_size_)
source_points = orig_source_points[source_subsample_inds,:]
source_normals = orig_source_normals[source_subsample_inds,:]
target_subsample_inds = np.random.choice(orig_target_points.shape[0], size=self.sample_size_)
target_points = orig_target_points[target_subsample_inds,:]
target_normals = orig_target_normals[target_subsample_inds,:]
# transform source points
source_points = (R_sol.dot(source_points.T) + np.tile(t_sol, [1, source_points.shape[0]])).T
source_normals = (R_sol.dot(source_normals.T)).T
# closest points
corrs = matcher.match(source_points, target_points, source_normals, target_normals)
# solve optimal rotation + translation
valid_corrs = np.where(corrs.index_map != -1)[0]
source_corr_points = corrs.source_points[valid_corrs,:]
target_corr_points = corrs.target_points[corrs.index_map[valid_corrs], :]
target_corr_normals = corrs.target_normals[corrs.index_map[valid_corrs], :]
num_corrs = valid_corrs.shape[0]
if num_corrs == 0:
logging.warning('No correspondences found')
break
# create A and b matrices for Gauss-Newton step on joint cost function
A = np.zeros([6,6])
b = np.zeros([6,1])
Ap = np.zeros([6,6])
bp = np.zeros([6,1])
G = np.zeros([3,6])
G[:,3:] = np.eye(3)
for i in range(num_corrs):
s = source_corr_points[i:i+1,:].T
t = target_corr_points[i:i+1,:].T
n = target_corr_normals[i:i+1,:].T
G[:,:3] = skew(s).T
A += G.T.dot(n).dot(n.T).dot(G)
b += G.T.dot(n).dot(n.T).dot(t - s)
Ap += G.T.dot(G)
bp += G.T.dot(t - s)
v = np.linalg.solve(A + self.gamma_*Ap + self.mu_*np.eye(6),
b + self.gamma_*bp)
# create pose values from the solution
R = np.eye(3)
R = R + skew(v[:3])
U, S, V = np.linalg.svd(R)
R = U.dot(V)
t = v[3:]
# incrementally update the final transform
R_sol = R.dot(R_sol)
t_sol = R.dot(t_sol) + t
T_source_target = RigidTransform(R_sol, t_sol, from_frame=source_point_cloud.frame, to_frame=target_point_cloud.frame)
total_cost = 0
source_points = (R_sol.dot(orig_source_points.T) + np.tile(t_sol, [1, orig_source_points.shape[0]])).T
source_normals = (R_sol.dot(orig_source_normals.T)).T
if compute_total_cost:
# rematch all points to get the final cost
corrs = matcher.match(source_points, orig_target_points, source_normals, orig_target_normals)
valid_corrs = np.where(corrs.index_map != -1)[0]
num_corrs = valid_corrs.shape[0]
if num_corrs == 0:
return RegistrationResult(T_source_target, np.inf)
# get the corresponding points
source_corr_points = corrs.source_points[valid_corrs,:]
target_corr_points = corrs.target_points[corrs.index_map[valid_corrs], :]
target_corr_normals = corrs.target_normals[corrs.index_map[valid_corrs], :]
# determine total cost
source_target_alignment = np.diag((source_corr_points - target_corr_points).dot(target_corr_normals.T))
point_plane_cost = (1.0 / num_corrs) * np.sum(source_target_alignment * source_target_alignment)
point_dist_cost = (1.0 / num_corrs) * np.sum(np.linalg.norm(source_corr_points - target_corr_points, axis=1)**2)
total_cost = point_plane_cost + self.gamma_ * point_dist_cost
return RegistrationResult(T_source_target, total_cost) | Iteratively register objects to one another using a modified version of point to plane ICP.
The cost func is PointToPlane_COST + gamma * PointToPoint_COST.
Uses a `stochastic Gauss-Newton step` where on each iteration a smaller number of points is sampled.
Parameters
----------
source_point_cloud : :obj:`autolab_core.PointCloud`
source object points
target_point_cloud : :obj`autolab_core.PointCloud`
target object points
source_normal_cloud : :obj:`autolab_core.NormalCloud`
source object outward-pointing normals
target_normal_cloud : :obj:`autolab_core.NormalCloud`
target object outward-pointing normals
matcher : :obj:`PointToPlaneFeatureMatcher`
object to match the point sets
num_iterations : int
the number of iterations to run
compute_total_cost : bool
whether or not to compute the total cost upon termination.
match_centroids : bool
whether or not to match the centroids of the point clouds
Returns
-------
:obj`RegistrationResult`
results containing source to target transformation and cost |
def sys_status_send(self, onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4, force_mavlink1=False):
'''
The general system state. If the system is following the MAVLink
standard, the system state is mainly defined by three
orthogonal states/modes: The system mode, which is
either LOCKED (motors shut down and locked), MANUAL
(system under RC control), GUIDED (system with
autonomous position control, position setpoint
controlled manually) or AUTO (system guided by
path/waypoint planner). The NAV_MODE defined the
current flight state: LIFTOFF (often an open-loop
maneuver), LANDING, WAYPOINTS or VECTOR. This
represents the internal navigation state machine. The
system status shows wether the system is currently
active or not and if an emergency occured. During the
CRITICAL and EMERGENCY states the MAV is still
considered to be active, but should start emergency
procedures autonomously. After a failure occured it
should first move from active to critical to allow
manual intervention and then move to emergency after a
certain timeout.
onboard_control_sensors_present : Bitmask showing which onboard controllers and sensors are present. Value of 0: not present. Value of 1: present. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
onboard_control_sensors_enabled : Bitmask showing which onboard controllers and sensors are enabled: Value of 0: not enabled. Value of 1: enabled. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
onboard_control_sensors_health : Bitmask showing which onboard controllers and sensors are operational or have an error: Value of 0: not enabled. Value of 1: enabled. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
load : Maximum usage in percent of the mainloop time, (0%: 0, 100%: 1000) should be always below 1000 (uint16_t)
voltage_battery : Battery voltage, in millivolts (1 = 1 millivolt) (uint16_t)
current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t)
battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot estimate the remaining battery (int8_t)
drop_rate_comm : Communication drops in percent, (0%: 0, 100%: 10'000), (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_comm : Communication errors (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_count1 : Autopilot-specific errors (uint16_t)
errors_count2 : Autopilot-specific errors (uint16_t)
errors_count3 : Autopilot-specific errors (uint16_t)
errors_count4 : Autopilot-specific errors (uint16_t)
'''
return self.send(self.sys_status_encode(onboard_control_sensors_present, onboard_control_sensors_enabled, onboard_control_sensors_health, load, voltage_battery, current_battery, battery_remaining, drop_rate_comm, errors_comm, errors_count1, errors_count2, errors_count3, errors_count4), force_mavlink1=force_mavlink1) | The general system state. If the system is following the MAVLink
standard, the system state is mainly defined by three
orthogonal states/modes: The system mode, which is
either LOCKED (motors shut down and locked), MANUAL
(system under RC control), GUIDED (system with
autonomous position control, position setpoint
controlled manually) or AUTO (system guided by
path/waypoint planner). The NAV_MODE defined the
current flight state: LIFTOFF (often an open-loop
maneuver), LANDING, WAYPOINTS or VECTOR. This
represents the internal navigation state machine. The
system status shows wether the system is currently
active or not and if an emergency occured. During the
CRITICAL and EMERGENCY states the MAV is still
considered to be active, but should start emergency
procedures autonomously. After a failure occured it
should first move from active to critical to allow
manual intervention and then move to emergency after a
certain timeout.
onboard_control_sensors_present : Bitmask showing which onboard controllers and sensors are present. Value of 0: not present. Value of 1: present. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
onboard_control_sensors_enabled : Bitmask showing which onboard controllers and sensors are enabled: Value of 0: not enabled. Value of 1: enabled. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
onboard_control_sensors_health : Bitmask showing which onboard controllers and sensors are operational or have an error: Value of 0: not enabled. Value of 1: enabled. Indices defined by ENUM MAV_SYS_STATUS_SENSOR (uint32_t)
load : Maximum usage in percent of the mainloop time, (0%: 0, 100%: 1000) should be always below 1000 (uint16_t)
voltage_battery : Battery voltage, in millivolts (1 = 1 millivolt) (uint16_t)
current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t)
battery_remaining : Remaining battery energy: (0%: 0, 100%: 100), -1: autopilot estimate the remaining battery (int8_t)
drop_rate_comm : Communication drops in percent, (0%: 0, 100%: 10'000), (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_comm : Communication errors (UART, I2C, SPI, CAN), dropped packets on all links (packets that were corrupted on reception on the MAV) (uint16_t)
errors_count1 : Autopilot-specific errors (uint16_t)
errors_count2 : Autopilot-specific errors (uint16_t)
errors_count3 : Autopilot-specific errors (uint16_t)
errors_count4 : Autopilot-specific errors (uint16_t) |
def OnTableChanged(self, event):
"""Table changed event handler"""
if hasattr(event, 'table'):
self.Select(event.table)
self.EnsureVisible(event.table)
event.Skip() | Table changed event handler |
def _write_model(self, specification, specification_set):
""" Write autogenerate specification file
"""
filename = "%s%s.py" % (self._class_prefix.lower(), specification.entity_name.lower())
override_content = self._extract_override_content(specification.entity_name)
constants = self._extract_constants(specification)
superclass_name = "NURESTRootObject" if specification.rest_name == self.api_root else "NURESTObject"
self.write(destination=self.output_directory, filename=filename, template_name="model.py.tpl",
specification=specification,
specification_set=specification_set,
version=self.api_version,
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
override_content=override_content,
superclass_name=superclass_name,
constants=constants,
header=self.header_content)
self.model_filenames[filename] = specification.entity_name | Write autogenerate specification file |
def _get_key_location(self, key) -> (int, int):
"""
Return chunk no and 1-based offset of key
:param key:
:return:
"""
key = int(key)
if key == 0:
return 1, 0
remainder = key % self.chunkSize
addend = ChunkedFileStore.firstChunkIndex
chunk_no = key - remainder + addend if remainder \
else key - self.chunkSize + addend
offset = remainder or self.chunkSize
return chunk_no, offset | Return chunk no and 1-based offset of key
:param key:
:return: |
def upload(self, filepath, service_path, remove=False):
'''
"Upload" a file to a service
This copies a file from the local filesystem into the ``DataService``'s
filesystem. If ``remove==True``, the file is moved rather than copied.
If ``filepath`` and ``service_path`` paths are the same, ``upload``
deletes the file if ``remove==True`` and returns.
Parameters
----------
filepath : str
Relative or absolute path to the file to be uploaded on the user's
filesystem
service_path: str
Path to the destination for the file on the ``DataService``'s
filesystem
remove : bool
If true, the file is moved rather than copied
'''
local = OSFS(os.path.dirname(filepath))
# Skip if source and dest are the same
if self.fs.hassyspath(service_path) and (
self.fs.getsyspath(service_path) == local.getsyspath(
os.path.basename(filepath))):
if remove:
os.remove(filepath)
return
if not self.fs.isdir(fs.path.dirname(service_path)):
self.fs.makedir(
fs.path.dirname(service_path),
recursive=True,
allow_recreate=True)
if remove:
fs.utils.movefile(
local,
os.path.basename(filepath),
self.fs,
service_path)
else:
fs.utils.copyfile(
local,
os.path.basename(filepath),
self.fs,
service_path) | "Upload" a file to a service
This copies a file from the local filesystem into the ``DataService``'s
filesystem. If ``remove==True``, the file is moved rather than copied.
If ``filepath`` and ``service_path`` paths are the same, ``upload``
deletes the file if ``remove==True`` and returns.
Parameters
----------
filepath : str
Relative or absolute path to the file to be uploaded on the user's
filesystem
service_path: str
Path to the destination for the file on the ``DataService``'s
filesystem
remove : bool
If true, the file is moved rather than copied |
def squeeze(self, trits, offset=0, length=HASH_LENGTH):
# type: (MutableSequence[int], Optional[int], Optional[int]) -> None
"""
Squeeze trits from the sponge.
:param trits:
Sequence that the squeezed trits will be copied to.
Note: this object will be modified!
:param offset:
Starting offset in ``trits``.
:param length:
Number of trits to squeeze, default to ``HASH_LENGTH``
"""
# Squeeze is kind of like the opposite of absorb; it copies
# trits from internal state to the ``trits`` parameter, one hash
# at a time, and transforming internal state in between hashes.
#
# However, only the first hash of the state is "public", so we
# can simplify the implementation somewhat.
# Ensure length can be mod by HASH_LENGTH
if length % HASH_LENGTH != 0:
raise with_context(
exc=ValueError('Invalid length passed to ``squeeze`.'),
context={
'trits': trits,
'offset': offset,
'length': length,
})
# Ensure that ``trits`` can hold at least one hash worth of
# trits.
trits.extend([0] * max(0, length - len(trits)))
# Check trits with offset can handle hash length
if len(trits) - offset < HASH_LENGTH:
raise with_context(
exc=ValueError('Invalid offset passed to ``squeeze``.'),
context={
'trits': trits,
'offset': offset,
'length': length
},
)
while length >= HASH_LENGTH:
# Copy exactly one hash.
trits[offset:offset + HASH_LENGTH] = self._state[0:HASH_LENGTH]
# One hash worth of trits copied; now transform.
self._transform()
offset += HASH_LENGTH
length -= HASH_LENGTH | Squeeze trits from the sponge.
:param trits:
Sequence that the squeezed trits will be copied to.
Note: this object will be modified!
:param offset:
Starting offset in ``trits``.
:param length:
Number of trits to squeeze, default to ``HASH_LENGTH`` |
def prog(self, s=None):
""" Prints the progress indicator """
s = s or self.prog_msg
self.printer(s, end='') | Prints the progress indicator |
def clean_pdb(pdb_file, out_suffix='_clean', outdir=None, force_rerun=False,
remove_atom_alt=True, keep_atom_alt_id='A', remove_atom_hydrogen=True, add_atom_occ=True,
remove_res_hetero=True, keep_chemicals=None, keep_res_only=None,
add_chain_id_if_empty='X', keep_chains=None):
"""Clean a PDB file.
Args:
pdb_file (str): Path to input PDB file
out_suffix (str): Suffix to append to original filename
outdir (str): Path to output directory
force_rerun (bool): If structure should be re-cleaned if a clean file exists already
remove_atom_alt (bool): Remove alternate positions
keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep
remove_atom_hydrogen (bool): Remove hydrogen atoms
add_atom_occ (bool): Add atom occupancy fields if not present
remove_res_hetero (bool): Remove all HETATMs
keep_chemicals (str, list): If removing HETATMs, keep specified chemical names
keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!
add_chain_id_if_empty (str): Add a chain ID if not present
keep_chains (str, list): Keep only these chains
Returns:
str: Path to cleaned PDB file
"""
outfile = ssbio.utils.outfile_maker(inname=pdb_file,
append_to_name=out_suffix,
outdir=outdir,
outext='.pdb')
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
my_pdb = StructureIO(pdb_file)
my_cleaner = CleanPDB(remove_atom_alt=remove_atom_alt,
remove_atom_hydrogen=remove_atom_hydrogen,
keep_atom_alt_id=keep_atom_alt_id,
add_atom_occ=add_atom_occ,
remove_res_hetero=remove_res_hetero,
keep_res_only=keep_res_only,
add_chain_id_if_empty=add_chain_id_if_empty,
keep_chains=keep_chains,
keep_chemicals=keep_chemicals)
my_clean_pdb = my_pdb.write_pdb(out_suffix=out_suffix,
out_dir=outdir,
custom_selection=my_cleaner,
force_rerun=force_rerun)
return my_clean_pdb
else:
return outfile | Clean a PDB file.
Args:
pdb_file (str): Path to input PDB file
out_suffix (str): Suffix to append to original filename
outdir (str): Path to output directory
force_rerun (bool): If structure should be re-cleaned if a clean file exists already
remove_atom_alt (bool): Remove alternate positions
keep_atom_alt_id (str): If removing alternate positions, which alternate ID to keep
remove_atom_hydrogen (bool): Remove hydrogen atoms
add_atom_occ (bool): Add atom occupancy fields if not present
remove_res_hetero (bool): Remove all HETATMs
keep_chemicals (str, list): If removing HETATMs, keep specified chemical names
keep_res_only (str, list): Keep ONLY specified resnames, deletes everything else!
add_chain_id_if_empty (str): Add a chain ID if not present
keep_chains (str, list): Keep only these chains
Returns:
str: Path to cleaned PDB file |
def _get_min_max_value(min, max, value=None, step=None):
"""Return min, max, value given input values with possible None."""
# Either min and max need to be given, or value needs to be given
if value is None:
if min is None or max is None:
raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))
diff = max - min
value = min + (diff / 2)
# Ensure that value has the same type as diff
if not isinstance(value, type(diff)):
value = min + (diff // 2)
else: # value is not None
if not isinstance(value, Real):
raise TypeError('expected a real number, got: %r' % value)
# Infer min/max from value
if value == 0:
# This gives (0, 1) of the correct type
vrange = (value, value + 1)
elif value > 0:
vrange = (-value, 3*value)
else:
vrange = (3*value, -value)
if min is None:
min = vrange[0]
if max is None:
max = vrange[1]
if step is not None:
# ensure value is on a step
tick = int((value - min) / step)
value = min + tick * step
if not min <= value <= max:
raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))
return min, max, value | Return min, max, value given input values with possible None. |
def n_p(self):
"""
The plasma density in SI units.
"""
return 2*_sltr.GeV2joule(self.E)*_spc.epsilon_0 / (self.beta*_spc.elementary_charge)**2 | The plasma density in SI units. |
def metadata_sorter(x, y):
""" Sort metadata keys by priority.
"""
if x == y:
return 0
if x in METADATA_SORTER_FIRST and y in METADATA_SORTER_FIRST:
return -1 if METADATA_SORTER_FIRST.index(x) < METADATA_SORTER_FIRST.index(y) else 1
elif x in METADATA_SORTER_FIRST:
return -1
elif y in METADATA_SORTER_FIRST:
return 1
else:
if x.startswith('_') and y.startswith('_'):
return cmp(x[1:], y[1:])
elif x.startswith('_'):
return 1
elif y.startswith('_'):
return -1
else:
return cmp(x, y) | Sort metadata keys by priority. |
def _init_base_objects(self, ssl_version: OpenSslVersionEnum, underlying_socket: Optional[socket.socket]) -> None:
"""Setup the socket and SSL_CTX objects.
"""
self._is_handshake_completed = False
self._ssl_version = ssl_version
self._ssl_ctx = self._NASSL_MODULE.SSL_CTX(ssl_version.value)
# A Python socket handles transmission of the data
self._sock = underlying_socket | Setup the socket and SSL_CTX objects. |
def correspond(text):
"""Communicate with the child process without closing stdin."""
subproc.stdin.write(text)
subproc.stdin.flush()
return drain() | Communicate with the child process without closing stdin. |
def rate_limit(limit: int, key=None):
"""
Decorator for configuring rate limit and key in different functions.
:param limit:
:param key:
:return:
"""
def decorator(func):
setattr(func, 'throttling_rate_limit', limit)
if key:
setattr(func, 'throttling_key', key)
return func
return decorator | Decorator for configuring rate limit and key in different functions.
:param limit:
:param key:
:return: |
def name_parts(self):
"""Works with PartialNameMixin.clear_dict to set NONE and ANY
values."""
default = PartialMixin.ANY
return ([(k, default, True)
for k, _, _ in PartitionName._name_parts]
+
[(k, default, True)
for k, _, _ in Name._generated_names]
) | Works with PartialNameMixin.clear_dict to set NONE and ANY
values. |
def compute_integrated_acquisition(acquisition,x):
'''
Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated.
'''
acqu_x = 0
for i in range(acquisition.model.num_hmc_samples):
acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:]
acqu_x += acquisition.acquisition_function(x)
acqu_x = acqu_x/acquisition.model.num_hmc_samples
return acqu_x | Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated. |
def shutdown(self):
''' shutdown operations on exit, this is run by
a finaly statement after the tkinter mainloop ends
call root.quit to get here,
Note you will still need to call sys.exit()
'''
logging.info('Shutdown procedures being run!')
for func in self.shutdown_cleanup.values():
func()
session_time = round((time.time() - self.start_time)/60, 0)
logging.info('session time: {0} minutes'.format(session_time))
logging.info('End..') | shutdown operations on exit, this is run by
a finaly statement after the tkinter mainloop ends
call root.quit to get here,
Note you will still need to call sys.exit() |
def dist_dir(self):
'''The dist dir at which to place the finished distribution.'''
if self.distribution is None:
warning('Tried to access {}.dist_dir, but {}.distribution '
'is None'.format(self, self))
exit(1)
return self.distribution.dist_dir | The dist dir at which to place the finished distribution. |
def cds_identifier_validator(record, result):
"""Ensure that the two records have the same CDS identifier.
This is needed because the search is done only for
``external_system_identifiers.value``, which might cause false positives in
case the matched record has an identifier with the same ``value`` but
``schema`` different from CDS.
Args:
record (dict): the given record we are trying to match with similar ones in INSPIRE.
result (dict): possible match returned by the ES query that needs to be validated.
Returns:
bool: validation decision.
"""
record_external_identifiers = get_value(record, 'external_system_identifiers', [])
result_external_identifiers = get_value(result, '_source.external_system_identifiers', [])
record_external_identifiers = {external_id["value"] for external_id in record_external_identifiers if external_id["schema"] == 'CDS'}
result_external_identifiers = {external_id["value"] for external_id in result_external_identifiers if external_id["schema"] == 'CDS'}
return bool(record_external_identifiers & result_external_identifiers) | Ensure that the two records have the same CDS identifier.
This is needed because the search is done only for
``external_system_identifiers.value``, which might cause false positives in
case the matched record has an identifier with the same ``value`` but
``schema`` different from CDS.
Args:
record (dict): the given record we are trying to match with similar ones in INSPIRE.
result (dict): possible match returned by the ES query that needs to be validated.
Returns:
bool: validation decision. |
def decr(self, stat, count=1, rate=1):
"""Decrement a stat by `count`."""
self.incr(stat, -count, rate) | Decrement a stat by `count`. |
def getSupportedServices(self, only_uids=True):
"""Return a list with the services supported by this reference sample,
those for which there is a valid results range assigned in reference
results
:param only_uids: returns a list of uids or a list of objects
:return: list of uids or AnalysisService objects
"""
uids = map(lambda range: range['uid'], self.getReferenceResults())
uids = filter(api.is_uid, uids)
if only_uids:
return uids
brains = api.search({'UID': uids}, 'uid_catalog')
return map(api.get_object, brains) | Return a list with the services supported by this reference sample,
those for which there is a valid results range assigned in reference
results
:param only_uids: returns a list of uids or a list of objects
:return: list of uids or AnalysisService objects |
def create_api_key(self, api_id, stage_name):
"""
Create new API key and link it with an api_id and a stage_name
"""
response = self.apigateway_client.create_api_key(
name='{}_{}'.format(stage_name, api_id),
description='Api Key for {}'.format(api_id),
enabled=True,
stageKeys=[
{
'restApiId': '{}'.format(api_id),
'stageName': '{}'.format(stage_name)
},
]
)
print('Created a new x-api-key: {}'.format(response['id'])) | Create new API key and link it with an api_id and a stage_name |
def accept_all(self):
'''
Accept all keys in pre
'''
keys = self.list_keys()
for key in keys[self.PEND]:
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
self.PEND,
key),
os.path.join(
self.opts['pki_dir'],
self.ACC,
key)
)
eload = {'result': True,
'act': 'accept',
'id': key}
self.event.fire_event(eload,
salt.utils.event.tagify(prefix='key'))
except (IOError, OSError):
pass
return self.list_keys() | Accept all keys in pre |
def get_default(__func: Callable, __arg: str) -> str:
"""Fetch default value for a function argument
Args:
__func: Function to inspect
__arg: Argument to extract default value for
"""
return signature(__func).parameters[__arg].default | Fetch default value for a function argument
Args:
__func: Function to inspect
__arg: Argument to extract default value for |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.