code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def trigger_event(name,
event,
value1=None,
value2=None,
value3=None
):
'''
Trigger an event in IFTTT
.. code-block:: yaml
ifttt-event:
ifttt.trigger_event:
- event: TestEvent
- value1: 'A value that we want to send.'
- value2: 'A second value that we want to send.'
- value3: 'A third value that we want to send.'
The following parameters are required:
name
The unique name for this event.
event
The name of the event to trigger in IFTTT.
The following parameters are optional:
value1
One of the values that we can send to IFTT.
value2
One of the values that we can send to IFTT.
value3
One of the values that we can send to IFTT.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'The following trigger would be sent to IFTTT: {0}'.format(event)
ret['result'] = None
return ret
ret['result'] = __salt__['ifttt.trigger_event'](
event=event,
value1=value1,
value2=value2,
value3=value3
)
if ret and ret['result']:
ret['result'] = True
ret['comment'] = 'Triggered Event: {0}'.format(name)
else:
ret['comment'] = 'Failed to trigger event: {0}'.format(name)
return ret | Trigger an event in IFTTT
.. code-block:: yaml
ifttt-event:
ifttt.trigger_event:
- event: TestEvent
- value1: 'A value that we want to send.'
- value2: 'A second value that we want to send.'
- value3: 'A third value that we want to send.'
The following parameters are required:
name
The unique name for this event.
event
The name of the event to trigger in IFTTT.
The following parameters are optional:
value1
One of the values that we can send to IFTT.
value2
One of the values that we can send to IFTT.
value3
One of the values that we can send to IFTT. |
def gini(data):
"""
Calculate the `Gini coefficient
<https://en.wikipedia.org/wiki/Gini_coefficient>`_ of a 2D array.
The Gini coefficient is calculated using the prescription from `Lotz
et al. 2004 <http://adsabs.harvard.edu/abs/2004AJ....128..163L>`_
as:
.. math::
G = \\frac{1}{\\left | \\bar{x} \\right | n (n - 1)}
\\sum^{n}_{i} (2i - n - 1) \\left | x_i \\right |
where :math:`\\bar{x}` is the mean over all pixel values
:math:`x_i`.
The Gini coefficient is a way of measuring the inequality in a given
set of values. In the context of galaxy morphology, it measures how
the light of a galaxy image is distributed among its pixels. A
``G`` value of 0 corresponds to a galaxy image with the light evenly
distributed over all pixels while a ``G`` value of 1 represents a
galaxy image with all its light concentrated in just one pixel.
Usually Gini's measurement needs some sort of preprocessing for
defining the galaxy region in the image based on the quality of the
input data. As there is not a general standard for doing this, this
is left for the user.
Parameters
----------
data : array-like
The 2D data array or object that can be converted to an array.
Returns
-------
gini : `float`
The Gini coefficient of the input 2D array.
"""
flattened = np.sort(np.ravel(data))
N = np.size(flattened)
normalization = 1. / (np.abs(np.mean(flattened)) * N * (N - 1))
kernel = (2 * np.arange(1, N + 1) - N - 1) * np.abs(flattened)
G = normalization * np.sum(kernel)
return G | Calculate the `Gini coefficient
<https://en.wikipedia.org/wiki/Gini_coefficient>`_ of a 2D array.
The Gini coefficient is calculated using the prescription from `Lotz
et al. 2004 <http://adsabs.harvard.edu/abs/2004AJ....128..163L>`_
as:
.. math::
G = \\frac{1}{\\left | \\bar{x} \\right | n (n - 1)}
\\sum^{n}_{i} (2i - n - 1) \\left | x_i \\right |
where :math:`\\bar{x}` is the mean over all pixel values
:math:`x_i`.
The Gini coefficient is a way of measuring the inequality in a given
set of values. In the context of galaxy morphology, it measures how
the light of a galaxy image is distributed among its pixels. A
``G`` value of 0 corresponds to a galaxy image with the light evenly
distributed over all pixels while a ``G`` value of 1 represents a
galaxy image with all its light concentrated in just one pixel.
Usually Gini's measurement needs some sort of preprocessing for
defining the galaxy region in the image based on the quality of the
input data. As there is not a general standard for doing this, this
is left for the user.
Parameters
----------
data : array-like
The 2D data array or object that can be converted to an array.
Returns
-------
gini : `float`
The Gini coefficient of the input 2D array. |
def single_device(cl_device_type='GPU', platform=None, fallback_to_any_device_type=False):
"""Get a list containing a single device environment, for a device of the given type on the given platform.
This will only fetch devices that support double (possibly only double with a pragma
defined, but still, it should support double).
Args:
cl_device_type (cl.device_type.* or string): The type of the device we want,
can be a opencl device type or a string matching 'GPU', 'CPU' or 'ALL'.
platform (opencl platform): The opencl platform to select the devices from
fallback_to_any_device_type (boolean): If True, try to fallback to any possible device in the system.
Returns:
list of CLEnvironment: List with one element, the CL runtime environment requested.
"""
if isinstance(cl_device_type, str):
cl_device_type = device_type_from_string(cl_device_type)
device = None
if platform is None:
platforms = cl.get_platforms()
else:
platforms = [platform]
for platform in platforms:
devices = platform.get_devices(device_type=cl_device_type)
for dev in devices:
if device_supports_double(dev):
try:
env = CLEnvironment(platform, dev)
return [env]
except cl.RuntimeError:
pass
if not device:
if fallback_to_any_device_type:
return cl.get_platforms()[0].get_devices()
else:
raise ValueError('No devices of the specified type ({}) found.'.format(
cl.device_type.to_string(cl_device_type)))
raise ValueError('No suitable OpenCL device found.') | Get a list containing a single device environment, for a device of the given type on the given platform.
This will only fetch devices that support double (possibly only double with a pragma
defined, but still, it should support double).
Args:
cl_device_type (cl.device_type.* or string): The type of the device we want,
can be a opencl device type or a string matching 'GPU', 'CPU' or 'ALL'.
platform (opencl platform): The opencl platform to select the devices from
fallback_to_any_device_type (boolean): If True, try to fallback to any possible device in the system.
Returns:
list of CLEnvironment: List with one element, the CL runtime environment requested. |
def get_device(self, id=None):
"""Returns details of either the first or specified device
:param int id: Identifier of desired device. If not given, first device
found will be returned
:returns tuple: Device ID, Device Address, Firmware Version
"""
if id is None:
if not self.devices:
raise ValueError('No default device for %s' % self.hardware_id)
id, (device, version) = sorted(self.devices.items())[0]
elif id in self.devices:
device, version = self.devices[id]
else:
error = 'Unable to find device with ID %s' % id
log.error(error)
raise ValueError(error)
log.info("Using COM Port: %s, Device ID: %s, Device Ver: %s",
device, id, version)
return id, device, version | Returns details of either the first or specified device
:param int id: Identifier of desired device. If not given, first device
found will be returned
:returns tuple: Device ID, Device Address, Firmware Version |
def tpictr(sample, lenout=_default_len_out, lenerr=_default_len_out):
"""
Given a sample time string, create a time format picture
suitable for use by the routine timout.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tpictr_c.html
:param sample: A sample time string.
:type sample: str
:param lenout: The length for the output picture string.
:type lenout: int
:param lenerr: The length for the output error string.
:type lenerr: int
:return:
A format picture that describes sample,
Flag indicating whether sample parsed successfully,
Diagnostic returned if sample cannot be parsed
:rtype: tuple
"""
sample = stypes.stringToCharP(sample)
pictur = stypes.stringToCharP(lenout)
errmsg = stypes.stringToCharP(lenerr)
lenout = ctypes.c_int(lenout)
lenerr = ctypes.c_int(lenerr)
ok = ctypes.c_int()
libspice.tpictr_c(sample, lenout, lenerr, pictur, ctypes.byref(ok), errmsg)
return stypes.toPythonString(pictur), ok.value, stypes.toPythonString(
errmsg) | Given a sample time string, create a time format picture
suitable for use by the routine timout.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tpictr_c.html
:param sample: A sample time string.
:type sample: str
:param lenout: The length for the output picture string.
:type lenout: int
:param lenerr: The length for the output error string.
:type lenerr: int
:return:
A format picture that describes sample,
Flag indicating whether sample parsed successfully,
Diagnostic returned if sample cannot be parsed
:rtype: tuple |
def search(self, **kwargs):
'''Query this object (and its descendants).
Parameters
----------
kwargs
Each `(key, value)` pair encodes a search field in `key`
and a target value in `value`.
`key` must be a string, and should correspond to a property in
the JAMS object hierarchy, e.g., 'Annotation.namespace` or `email`
`value` must be either an object (tested for equality), a
string describing a search pattern (regular expression), or a
lambda function which evaluates to `True` if the candidate
object matches the search criteria and `False` otherwise.
Returns
-------
match : bool
`True` if any of the search keys match the specified value,
`False` otherwise, or if the search keys do not exist
within the object.
Examples
--------
>>> J = jams.JObject(foo=5, needle='quick brown fox')
>>> J.search(needle='.*brown.*')
True
>>> J.search(needle='.*orange.*')
False
>>> J.search(badger='.*brown.*')
False
>>> J.search(foo=5)
True
>>> J.search(foo=10)
False
>>> J.search(foo=lambda x: x < 10)
True
>>> J.search(foo=lambda x: x > 10)
False
'''
match = False
r_query = {}
myself = self.__class__.__name__
# Pop this object name off the query
for k, value in six.iteritems(kwargs):
k_pop = query_pop(k, myself)
if k_pop:
r_query[k_pop] = value
if not r_query:
return False
for key in r_query:
if hasattr(self, key):
match |= match_query(getattr(self, key), r_query[key])
if not match:
for attr in dir(self):
obj = getattr(self, attr)
if isinstance(obj, JObject):
match |= obj.search(**r_query)
return match | Query this object (and its descendants).
Parameters
----------
kwargs
Each `(key, value)` pair encodes a search field in `key`
and a target value in `value`.
`key` must be a string, and should correspond to a property in
the JAMS object hierarchy, e.g., 'Annotation.namespace` or `email`
`value` must be either an object (tested for equality), a
string describing a search pattern (regular expression), or a
lambda function which evaluates to `True` if the candidate
object matches the search criteria and `False` otherwise.
Returns
-------
match : bool
`True` if any of the search keys match the specified value,
`False` otherwise, or if the search keys do not exist
within the object.
Examples
--------
>>> J = jams.JObject(foo=5, needle='quick brown fox')
>>> J.search(needle='.*brown.*')
True
>>> J.search(needle='.*orange.*')
False
>>> J.search(badger='.*brown.*')
False
>>> J.search(foo=5)
True
>>> J.search(foo=10)
False
>>> J.search(foo=lambda x: x < 10)
True
>>> J.search(foo=lambda x: x > 10)
False |
def _load_root_directory(self):
"""
Load root directory, which has a cid of 0
"""
kwargs = self._req_directory(0)
self._root_directory = Directory(api=self, **kwargs) | Load root directory, which has a cid of 0 |
def forbild(space, resolution=False, ear=True, value_type='density',
scale='auto'):
"""Standard FORBILD phantom in 2 dimensions.
The FORBILD phantom is intended for testing CT algorithms and is intended
to be similar to a human head.
The phantom is defined using the following materials:
========================= ===== ================
Material Index Density (g/cm^3)
========================= ===== ================
Air 0 0.0000
Cerebrospinal fluid (CSF) 1 1.0450
Small less dense sphere 2 1.0475
Brain 3 1.0500
Small more dense sphere 4 1.0525
Blood 5 1.0550
Eyes 6 1.0600
Bone 7 1.8000
========================= ===== ================
Parameters
----------
space : `DiscreteLp`
The space in which the phantom should be corrected. Needs to be two-
dimensional.
resolution : bool, optional
If ``True``, insert a small resolution test pattern to the left.
ear : bool, optional
If ``True``, insert an ear-like structure to the right.
value_type : {'density', 'materials'}, optional
The format the phantom should be given in.
'density' returns floats in the range [0, 1.8] (g/cm^3)
'materials' returns indices in the range [0, 7].
scale : {'auto', 'cm', 'meters', 'mm'}, optional
Controls how ``space`` should be rescaled to fit the definition of
the forbild phantom, which is defined on the square
[-12.8, 12.8] x [-12.8, 12.8] cm.
* ``'auto'`` means that space is rescaled to fit exactly. The space is
also centered at [0, 0].
* ``'cm'`` means the dimensions of the space should be used as-is.
* ``'m'`` means all dimensions of the space are multiplied by 100.
* ``'mm'`` means all dimensions of the space are divided by 10.
Returns
-------
forbild : ``space``-element
FORBILD phantom discretized by ``space``.
See Also
--------
shepp_logan : A simpler phantom for similar purposes, also working in 3d.
References
----------
.. _FORBILD phantom: www.imp.uni-erlangen.de/phantoms/head/head.html
.. _algorithm: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3426508/
"""
def transposeravel(arr):
"""Implement MATLAB's ``transpose(arr(:))``."""
return arr.T.ravel()
if not isinstance(space, DiscreteLp):
raise TypeError('`space` must be a `DiscreteLp`')
if space.ndim != 2:
raise TypeError('`space` must be two-dimensional')
scale, scale_in = str(scale).lower(), scale
value_type, value_type_in = str(value_type).lower(), value_type
# Create analytic description of phantom
phantomE, phantomC = _analytical_forbild_phantom(resolution, ear)
# Rescale points to the default grid.
# The forbild phantom is defined on [-12.8, 12.8] x [-12.8, 12.8]
xcoord, ycoord = space.points().T
if scale == 'auto':
xcoord = ((xcoord - space.min_pt[0]) /
(space.max_pt[0] - space.min_pt[0]))
xcoord = 25.8 * xcoord - 12.8
ycoord = ((ycoord - space.min_pt[1]) /
(space.max_pt[1] - space.min_pt[1]))
ycoord = 25.8 * ycoord - 12.8
elif scale == 'cm':
pass # dimensions already correct.
elif scale == 'm':
xcoord *= 100.0
ycoord *= 100.0
elif scale == 'mm':
xcoord /= 10.0
ycoord /= 10.0
else:
raise ValueError('unknown `scale` {}'.format(scale_in))
# Compute the phantom values in each voxel
image = np.zeros(space.size)
nclipinfo = 0
for k in range(phantomE.shape[0]):
# Handle elliptic bounds
Vx0 = np.array([transposeravel(xcoord) - phantomE[k, 0],
transposeravel(ycoord) - phantomE[k, 1]])
D = np.array([[1 / phantomE[k, 2], 0],
[0, 1 / phantomE[k, 3]]])
phi = np.deg2rad(phantomE[k, 4])
Q = np.array([[np.cos(phi), np.sin(phi)],
[-np.sin(phi), np.cos(phi)]])
f = phantomE[k, 5]
nclip = int(phantomE[k, 6])
equation1 = np.sum(D.dot(Q).dot(Vx0) ** 2, axis=0)
i = (equation1 <= 1.0)
# Handle clipping surfaces
for _ in range(nclip): # note: nclib can be 0
d = phantomC[0, nclipinfo]
psi = np.deg2rad(phantomC[1, nclipinfo])
equation2 = np.array([np.cos(psi), np.sin(psi)]).dot(Vx0)
i &= (equation2 < d)
nclipinfo += 1
image[i] += f
if value_type == 'materials':
materials = np.zeros(space.size, dtype=space.dtype)
# csf
materials[(image > 1.043) & (image <= 1.047)] = 1
# less_dense_sphere
materials[(image > 1.047) & (image <= 1.048)] = 2
# brain
materials[(image > 1.048) & (image <= 1.052)] = 3
# denser_sphere
materials[(image > 1.052) & (image <= 1.053)] = 4
# blood
materials[(image > 1.053) & (image <= 1.058)] = 5
# eye
materials[(image > 1.058) & (image <= 1.062)] = 6
# Bone
materials[image > 1.75] = 7
return space.element(materials.reshape(space.shape))
elif value_type == 'density':
return space.element(image.reshape(space.shape))
else:
raise ValueError('unknown `value_type` {}'.format(value_type_in)) | Standard FORBILD phantom in 2 dimensions.
The FORBILD phantom is intended for testing CT algorithms and is intended
to be similar to a human head.
The phantom is defined using the following materials:
========================= ===== ================
Material Index Density (g/cm^3)
========================= ===== ================
Air 0 0.0000
Cerebrospinal fluid (CSF) 1 1.0450
Small less dense sphere 2 1.0475
Brain 3 1.0500
Small more dense sphere 4 1.0525
Blood 5 1.0550
Eyes 6 1.0600
Bone 7 1.8000
========================= ===== ================
Parameters
----------
space : `DiscreteLp`
The space in which the phantom should be corrected. Needs to be two-
dimensional.
resolution : bool, optional
If ``True``, insert a small resolution test pattern to the left.
ear : bool, optional
If ``True``, insert an ear-like structure to the right.
value_type : {'density', 'materials'}, optional
The format the phantom should be given in.
'density' returns floats in the range [0, 1.8] (g/cm^3)
'materials' returns indices in the range [0, 7].
scale : {'auto', 'cm', 'meters', 'mm'}, optional
Controls how ``space`` should be rescaled to fit the definition of
the forbild phantom, which is defined on the square
[-12.8, 12.8] x [-12.8, 12.8] cm.
* ``'auto'`` means that space is rescaled to fit exactly. The space is
also centered at [0, 0].
* ``'cm'`` means the dimensions of the space should be used as-is.
* ``'m'`` means all dimensions of the space are multiplied by 100.
* ``'mm'`` means all dimensions of the space are divided by 10.
Returns
-------
forbild : ``space``-element
FORBILD phantom discretized by ``space``.
See Also
--------
shepp_logan : A simpler phantom for similar purposes, also working in 3d.
References
----------
.. _FORBILD phantom: www.imp.uni-erlangen.de/phantoms/head/head.html
.. _algorithm: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3426508/ |
def sector(self, start_ray, end_ray, start_distance=None, end_distance=None, units='b'):
"""Slices a sector from the selected dataset.
Slice contains the start and end rays. If start and end rays are equal
one ray is returned. If the start_ray is greater than the end_ray
slicing continues over the 359-0 border.
Parameters
----------
start_ray : int
Starting ray of of the slice first ray is 0
end_ray : int
End ray of the slice, last ray is 359
Keywords
--------
start_distance : int
Starting distance of the slice, if not defined sector starts
form zero
end_distance : int
Ending distance of the slice, if not defined sector continues to
the end last ray of the dataset
units : str
Units used in distance slicing. Option 'b' means that bin number
is used as index. Option 'm' means that meters are used and the
slicing index is calculated using bin width.
Returns
-------
sector : ndarray
Numpy array containing the sector values
Examples
--------
Get one ray from the selected dataset
>>> pvol = odimPVOL('pvol.h5')
>>> pvol.select_dataset('A', 'DBZH')
>>> ray = pvol.sector(10, 10)
Get sector from selected dataset, rays from 100 to 200
at distances from 5 km to 10 km.
>>> pvol = odimPVOL('pvol.h5')
>>> pvol.select_dataset('A', 'DBZH')
>>> sector = pvol.sector(100, 200, 5000, 10000)
"""
if self.dataset is None:
raise ValueError('Dataset is not selected')
# Validate parameter values
ray_max, distance_max = self.dataset.shape
if start_ray > ray_max:
raise ValueError('Value of start_ray is bigger than the number of rays')
if start_ray < 0:
raise ValueError('start_ray must be non negative')
if start_distance is None:
start_distance_index = 0
else:
if units == 'b':
start_distance_index = start_distance
elif units == 'm':
try:
rscale = next(self.attr_gen('rscale')).value
except:
raise MissingMetadataError
start_distance_index = int(start_distance / rscale)
if end_distance is None:
end_distance_index = self.dataset.shape[1]
else:
if units == 'b':
end_distance_index = end_distance
elif units == 'm':
end_distance_index = int(end_distance / rscale)
if end_ray is None:
sector = self.dataset[start_ray, start_distance_index:end_distance_index]
else:
if start_ray <= end_ray:
sector = self.dataset[start_ray:end_ray+1, start_distance_index:end_distance_index]
else:
sector1 = self.dataset[start_ray:, start_distance_index:end_distance_index]
sector2 = self.dataset[:end_ray+1, start_distance_index:end_distance_index]
sector = np.concatenate((sector1, sector2), axis=0)
return sector | Slices a sector from the selected dataset.
Slice contains the start and end rays. If start and end rays are equal
one ray is returned. If the start_ray is greater than the end_ray
slicing continues over the 359-0 border.
Parameters
----------
start_ray : int
Starting ray of of the slice first ray is 0
end_ray : int
End ray of the slice, last ray is 359
Keywords
--------
start_distance : int
Starting distance of the slice, if not defined sector starts
form zero
end_distance : int
Ending distance of the slice, if not defined sector continues to
the end last ray of the dataset
units : str
Units used in distance slicing. Option 'b' means that bin number
is used as index. Option 'm' means that meters are used and the
slicing index is calculated using bin width.
Returns
-------
sector : ndarray
Numpy array containing the sector values
Examples
--------
Get one ray from the selected dataset
>>> pvol = odimPVOL('pvol.h5')
>>> pvol.select_dataset('A', 'DBZH')
>>> ray = pvol.sector(10, 10)
Get sector from selected dataset, rays from 100 to 200
at distances from 5 km to 10 km.
>>> pvol = odimPVOL('pvol.h5')
>>> pvol.select_dataset('A', 'DBZH')
>>> sector = pvol.sector(100, 200, 5000, 10000) |
def load_model(self, name=None):
'''
Loads a saved version of the model.
'''
if self.clobber:
return False
if name is None:
name = self.name
file = os.path.join(self.dir, '%s.npz' % name)
if os.path.exists(file):
if not self.is_parent:
log.info("Loading '%s.npz'..." % name)
try:
data = np.load(file)
for key in data.keys():
try:
setattr(self, key, data[key][()])
except NotImplementedError:
pass
# HACK: Backwards compatibility. Previous version stored
# the CDPP in the `cdpp6`
# and `cdpp6_arr` attributes. Let's move them over.
if hasattr(self, 'cdpp6'):
self.cdpp = self.cdpp6
del self.cdpp6
if hasattr(self, 'cdpp6_arr'):
self.cdpp_arr = np.array(self.cdpp6_arr)
del self.cdpp6_arr
if hasattr(self, 'gppp'):
self.cdppg = self.gppp
del self.gppp
# HACK: At one point we were saving the figure instances,
# so loading the .npz
# opened a plotting window. I don't think this is the case
# any more, so this
# next line should be removed in the future...
pl.close()
return True
except:
log.warn("Error loading '%s.npz'." % name)
exctype, value, tb = sys.exc_info()
for line in traceback.format_exception_only(exctype, value):
ln = line.replace('\n', '')
log.warn(ln)
os.rename(file, file + '.bad')
if self.is_parent:
raise Exception(
'Unable to load `%s` model for target %d.'
% (self.name, self.ID))
return False | Loads a saved version of the model. |
def open(filename, mode="rb",
format=None, check=-1, preset=None, filters=None,
encoding=None, errors=None, newline=None):
"""Open an LZMA-compressed file in binary or text mode.
filename can be either an actual file name (given as a str or bytes object),
in which case the named file is opened, or it can be an existing file object
to read from or write to.
The mode argument can be "r", "rb" (default), "w", "wb", "a", or "ab" for
binary mode, or "rt", "wt" or "at" for text mode.
The format, check, preset and filters arguments specify the compression
settings, as for LZMACompressor, LZMADecompressor and LZMAFile.
For binary mode, this function is equivalent to the LZMAFile constructor:
LZMAFile(filename, mode, ...). In this case, the encoding, errors and
newline arguments must not be provided.
For text mode, a LZMAFile object is created, and wrapped in an
io.TextIOWrapper instance with the specified encoding, error handling
behavior, and line ending(s).
"""
if "t" in mode:
if "b" in mode:
raise ValueError("Invalid mode: %r" % (mode,))
else:
if encoding is not None:
raise ValueError("Argument 'encoding' not supported in binary mode")
if errors is not None:
raise ValueError("Argument 'errors' not supported in binary mode")
if newline is not None:
raise ValueError("Argument 'newline' not supported in binary mode")
lz_mode = mode.replace("t", "")
binary_file = LZMAFile(filename, lz_mode, format=format, check=check,
preset=preset, filters=filters)
if "t" in mode:
return io.TextIOWrapper(binary_file, encoding, errors, newline)
else:
return binary_file | Open an LZMA-compressed file in binary or text mode.
filename can be either an actual file name (given as a str or bytes object),
in which case the named file is opened, or it can be an existing file object
to read from or write to.
The mode argument can be "r", "rb" (default), "w", "wb", "a", or "ab" for
binary mode, or "rt", "wt" or "at" for text mode.
The format, check, preset and filters arguments specify the compression
settings, as for LZMACompressor, LZMADecompressor and LZMAFile.
For binary mode, this function is equivalent to the LZMAFile constructor:
LZMAFile(filename, mode, ...). In this case, the encoding, errors and
newline arguments must not be provided.
For text mode, a LZMAFile object is created, and wrapped in an
io.TextIOWrapper instance with the specified encoding, error handling
behavior, and line ending(s). |
def generate_tree_path(fileDigest, depth):
"""Generate a relative path from the given fileDigest
relative path has a numbers of directories levels according to @depth
Args:
fileDigest -- digest for which the relative path will be generate
depth -- number of levels to use in relative path generation
Returns:
relative path for the given digest
"""
if(depth < 0):
raise Exception("depth level can not be negative")
if(os.path.split(fileDigest)[1] != fileDigest):
raise Exception("fileDigest cannot contain path separator")
# calculate min length for the given depth (2^1+2^2+...+2^depth+ 1)
min = (2**(depth + 1)) - 1
if(len(fileDigest) < min):
raise Exception("fileDigest too short for the given depth")
path = ""
index = 0
for p in range(1, depth + 1):
jump = 2**p
path = os.path.join(path, fileDigest[index:index + jump])
index += jump
path = os.path.join(path, fileDigest[index:])
return path | Generate a relative path from the given fileDigest
relative path has a numbers of directories levels according to @depth
Args:
fileDigest -- digest for which the relative path will be generate
depth -- number of levels to use in relative path generation
Returns:
relative path for the given digest |
def _get_memory_banks_listed_in_dir(path):
"""Get all memory banks the kernel lists in a given directory.
Such a directory can be /sys/devices/system/node/ (contains all memory banks)
or /sys/devices/system/cpu/cpu*/ (contains all memory banks on the same NUMA node as that core)."""
# Such directories contain entries named "node<id>" for each memory bank
return [int(entry[4:]) for entry in os.listdir(path) if entry.startswith('node')] | Get all memory banks the kernel lists in a given directory.
Such a directory can be /sys/devices/system/node/ (contains all memory banks)
or /sys/devices/system/cpu/cpu*/ (contains all memory banks on the same NUMA node as that core). |
def read(self, source_path):
"""
Parse content and metadata of Org files
Keyword Arguments:
source_path -- Path to the Org file to parse
"""
with pelican_open(source_path) as text:
text_lines = list(text.splitlines())
header, content = self._separate_header_and_content(text_lines)
metadatas = self._parse_metadatas(header)
metadatas_processed = {
key
: self.process_metadata(key, value)
for key, value in metadatas.items()
}
content_html = convert_html("\n".join(content),
highlight=self.code_highlight)
return content_html, metadatas_processed | Parse content and metadata of Org files
Keyword Arguments:
source_path -- Path to the Org file to parse |
def save_token(token, domain='analytics.luminoso.com', token_file=None):
"""
Take a long-lived API token and store it to a local file. Long-lived
tokens can be retrieved through the UI. Optional arguments are the
domain for which the token is valid and the file in which to store the
token.
"""
token_file = token_file or get_token_filename()
if os.path.exists(token_file):
saved_tokens = json.load(open(token_file))
else:
saved_tokens = {}
saved_tokens[domain] = token
directory, filename = os.path.split(token_file)
if directory and not os.path.exists(directory):
os.makedirs(directory)
with open(token_file, 'w') as f:
json.dump(saved_tokens, f) | Take a long-lived API token and store it to a local file. Long-lived
tokens can be retrieved through the UI. Optional arguments are the
domain for which the token is valid and the file in which to store the
token. |
def convert_timedelta(duration):
"""
Summary:
Convert duration into component time units
Args:
:duration (datetime.timedelta): time duration to convert
Returns:
days, hours, minutes, seconds | TYPE: tuple (integers)
"""
days, seconds = duration.days, duration.seconds
hours = seconds // 3600
minutes = (seconds % 3600) // 60
seconds = (seconds % 60)
return days, hours, minutes, seconds | Summary:
Convert duration into component time units
Args:
:duration (datetime.timedelta): time duration to convert
Returns:
days, hours, minutes, seconds | TYPE: tuple (integers) |
def create_dataset(parent, path, overwrite=False, **kwargs):
"""Create a new dataset inside the parent HDF5 object
Parameters
----------
parent : `h5py.Group`, `h5py.File`
the object in which to create a new dataset
path : `str`
the path at which to create the new dataset
overwrite : `bool`
if `True`, delete any existing dataset at the desired path,
default: `False`
**kwargs
other arguments are passed directly to
:meth:`h5py.Group.create_dataset`
Returns
-------
dataset : `h5py.Dataset`
the newly created dataset
"""
# force deletion of existing dataset
if path in parent and overwrite:
del parent[path]
# create new dataset with improved error handling
try:
return parent.create_dataset(path, **kwargs)
except RuntimeError as exc:
if str(exc) == 'Unable to create link (Name already exists)':
exc.args = ('{0}: {1!r}, pass overwrite=True '
'to ignore existing datasets'.format(str(exc), path),)
raise | Create a new dataset inside the parent HDF5 object
Parameters
----------
parent : `h5py.Group`, `h5py.File`
the object in which to create a new dataset
path : `str`
the path at which to create the new dataset
overwrite : `bool`
if `True`, delete any existing dataset at the desired path,
default: `False`
**kwargs
other arguments are passed directly to
:meth:`h5py.Group.create_dataset`
Returns
-------
dataset : `h5py.Dataset`
the newly created dataset |
def chess960_pos(self) -> Optional[int]:
"""
Gets the Chess960 starting position index between 0 and 959
or ``None``.
"""
if self.occupied_co[WHITE] != BB_RANK_1 | BB_RANK_2:
return None
if self.occupied_co[BLACK] != BB_RANK_7 | BB_RANK_8:
return None
if self.pawns != BB_RANK_2 | BB_RANK_7:
return None
if self.promoted:
return None
# Piece counts.
brnqk = [self.bishops, self.rooks, self.knights, self.queens, self.kings]
if [popcount(pieces) for pieces in brnqk] != [4, 4, 4, 2, 2]:
return None
# Symmetry.
if any((BB_RANK_1 & pieces) << 56 != BB_RANK_8 & pieces for pieces in brnqk):
return None
# Algorithm from ChessX, src/database/bitboard.cpp, r2254.
x = self.bishops & (2 + 8 + 32 + 128)
if not x:
return None
bs1 = (lsb(x) - 1) // 2
cc_pos = bs1
x = self.bishops & (1 + 4 + 16 + 64)
if not x:
return None
bs2 = lsb(x) * 2
cc_pos += bs2
q = 0
qf = False
n0 = 0
n1 = 0
n0f = False
n1f = False
rf = 0
n0s = [0, 4, 7, 9]
for square in range(A1, H1 + 1):
bb = BB_SQUARES[square]
if bb & self.queens:
qf = True
elif bb & self.rooks or bb & self.kings:
if bb & self.kings:
if rf != 1:
return None
else:
rf += 1
if not qf:
q += 1
if not n0f:
n0 += 1
elif not n1f:
n1 += 1
elif bb & self.knights:
if not qf:
q += 1
if not n0f:
n0f = True
elif not n1f:
n1f = True
if n0 < 4 and n1f and qf:
cc_pos += q * 16
krn = n0s[n0] + n1
cc_pos += krn * 96
return cc_pos
else:
return None | Gets the Chess960 starting position index between 0 and 959
or ``None``. |
def set_properties(self, properties):
"""
Updates the service properties
:param properties: The new properties
:raise TypeError: The argument is not a dictionary
"""
if not isinstance(properties, dict):
raise TypeError("Waiting for dictionary")
# Keys that must not be updated
for forbidden_key in OBJECTCLASS, SERVICE_ID:
try:
del properties[forbidden_key]
except KeyError:
pass
to_delete = []
for key, value in properties.items():
if self.__properties.get(key) == value:
# No update
to_delete.append(key)
for key in to_delete:
# Remove unchanged properties
del properties[key]
if not properties:
# Nothing to do
return
# Ensure that the service has a valid service ranking
try:
properties[SERVICE_RANKING] = int(properties[SERVICE_RANKING])
except (ValueError, TypeError):
# Bad value: ignore update
del properties[SERVICE_RANKING]
except KeyError:
# Service ranking not updated: ignore
pass
# pylint: disable=W0212
with self.__reference._props_lock:
# Update the properties
previous = self.__properties.copy()
self.__properties.update(properties)
if self.__reference.needs_sort_update():
# The sort key and the registry must be updated
self.__update_callback(self.__reference)
# Trigger a new computation in the framework
event = ServiceEvent(
ServiceEvent.MODIFIED, self.__reference, previous
)
self.__framework._dispatcher.fire_service_event(event) | Updates the service properties
:param properties: The new properties
:raise TypeError: The argument is not a dictionary |
def get_bin_query_session(self, proxy):
"""Gets the bin query session.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.resource.BinQuerySession) - a ``BinQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_bin_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_bin_query()`` is ``true``.*
"""
if not self.supports_bin_query():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.BinQuerySession(proxy=proxy, runtime=self._runtime) | Gets the bin query session.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.resource.BinQuerySession) - a ``BinQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_bin_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_bin_query()`` is ``true``.* |
def get_file_hexdigest(filename, blocksize=1024*1024*10):
'''Get a hex digest of a file.'''
if hashlib.__name__ == 'hashlib':
m = hashlib.md5() # new - 'hashlib' module
else:
m = hashlib.new() # old - 'md5' module - remove once py2.4 gone
fd = open(filename, 'r')
while True:
data = fd.read(blocksize)
if len(data) == 0:
break
m.update(data)
fd.close()
return m.hexdigest() | Get a hex digest of a file. |
def initreadtxt(self, idftxt):
"""
Use the current IDD and read an IDF from text data. If the IDD has not
yet been initialised then this is done first.
Parameters
----------
idftxt : str
Text representing an IDF file.
"""
iddfhandle = StringIO(iddcurrent.iddtxt)
if self.getiddname() == None:
self.setiddname(iddfhandle)
idfhandle = StringIO(idftxt)
self.idfname = idfhandle
self.read() | Use the current IDD and read an IDF from text data. If the IDD has not
yet been initialised then this is done first.
Parameters
----------
idftxt : str
Text representing an IDF file. |
def dot(vec1, vec2):
"""Calculate the dot product between two Vectors"""
if isinstance(vec1, Vector2) \
and isinstance(vec2, Vector2):
return ((vec1.X * vec2.X) + (vec1.Y * vec2.Y))
else:
raise TypeError("vec1 and vec2 must be Vector2's") | Calculate the dot product between two Vectors |
def to_ped(self):
"""
Return a generator with the info in ped format.
Yields:
An iterator with the family info in ped format
"""
ped_header = [
'#FamilyID',
'IndividualID',
'PaternalID',
'MaternalID',
'Sex',
'Phenotype',
]
extra_headers = [
'InheritanceModel',
'Proband',
'Consultand',
'Alive'
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
', '.join(ped_header)
))
yield '\t'.join(ped_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id].to_json()
ped_info = []
ped_info.append(individual['family_id'])
ped_info.append(individual['id'])
ped_info.append(individual['father'])
ped_info.append(individual['mother'])
ped_info.append(individual['sex'])
ped_info.append(individual['phenotype'])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual['extra_info'].get(header, '.'))
yield '\t'.join(ped_info) | Return a generator with the info in ped format.
Yields:
An iterator with the family info in ped format |
def merge_into_group(self, group):
"""
Redefines :meth:`~candv.base.Constant.merge_into_group` and adds
``verbose_name`` and ``help_text`` attributes to the target group.
"""
super(VerboseMixin, self).merge_into_group(group)
group.verbose_name = self.verbose_name
group.help_text = self.help_text | Redefines :meth:`~candv.base.Constant.merge_into_group` and adds
``verbose_name`` and ``help_text`` attributes to the target group. |
def main(sample_id, trace_file, workdir):
"""
Parses a nextflow trace file, searches for processes with a specific tag
and sends a JSON report with the relevant information
The expected fields for the trace file are::
0. task_id
1. process
2. tag
3. status
4. exit code
5. start timestamp
6. container
7. cpus
8. duration
9. realtime
10. queue
11. cpu percentage
12. memory percentage
13. real memory size of the process
14. virtual memory size of the process
Parameters
----------
trace_file : str
Path to the nextflow trace file
"""
# Determine the path of the stored JSON for the sample_id
stats_suffix = ".stats.json"
stats_path = join(workdir, sample_id + stats_suffix)
trace_path = join(workdir, trace_file)
logger.info("Starting pipeline status routine")
logger.debug("Checking for previous pipeline status data")
stats_array = get_previous_stats(stats_path)
logger.info("Stats JSON object set to : {}".format(stats_array))
# Search for this substring in the tags field. Only lines with this
# tag will be processed for the reports
tag = " getStats"
logger.debug("Tag variable set to: {}".format(tag))
logger.info("Starting parsing of trace file: {}".format(trace_path))
with open(trace_path) as fh:
header = next(fh).strip().split()
logger.debug("Header set to: {}".format(header))
for line in fh:
fields = line.strip().split("\t")
# Check if tag substring is in the tag field of the nextflow trace
if tag in fields[2] and fields[3] == "COMPLETED":
logger.debug(
"Parsing trace line with COMPLETED status: {}".format(
line))
current_json = get_json_info(fields, header)
stats_array[fields[0]] = current_json
else:
logger.debug(
"Ignoring trace line without COMPLETED status"
" or stats specific tag: {}".format(
line))
with open(join(stats_path), "w") as fh, open(".report.json", "w") as rfh:
fh.write(json.dumps(stats_array, separators=(",", ":")))
rfh.write(json.dumps(stats_array, separators=(",", ":"))) | Parses a nextflow trace file, searches for processes with a specific tag
and sends a JSON report with the relevant information
The expected fields for the trace file are::
0. task_id
1. process
2. tag
3. status
4. exit code
5. start timestamp
6. container
7. cpus
8. duration
9. realtime
10. queue
11. cpu percentage
12. memory percentage
13. real memory size of the process
14. virtual memory size of the process
Parameters
----------
trace_file : str
Path to the nextflow trace file |
def parse_image_response(self, response):
"""
Parse a single object from the RETS feed
:param response: The response from the RETS server
:return: Object
"""
if 'xml' in response.headers.get('Content-Type'):
# Got an XML response, likely an error code.
xml = xmltodict.parse(response.text)
self.analyze_reply_code(xml_response_dict=xml)
obj = self._response_object_from_header(
obj_head_dict=response.headers,
content=response.content)
return obj | Parse a single object from the RETS feed
:param response: The response from the RETS server
:return: Object |
def value(self):
"""Return sub type and sub value as binary data.
Returns:
:class:`~pyof.foundation.basic_types.BinaryData`:
BinaryData calculated.
"""
binary = UBInt8(self.sub_type).pack() + self.sub_value.pack()
return BinaryData(binary) | Return sub type and sub value as binary data.
Returns:
:class:`~pyof.foundation.basic_types.BinaryData`:
BinaryData calculated. |
def create_annotation(xml_file, from_fasst):
"""Create annotations by importing from FASST sleep scoring file.
Parameters
----------
xml_file : path to xml file
annotation file that will be created
from_fasst : path to FASST file
.mat file containing the scores
Returns
-------
instance of Annotations
TODO
----
Merge create_annotation and create_empty_annotations
"""
xml_file = Path(xml_file)
try:
mat = loadmat(str(from_fasst), variable_names='D', struct_as_record=False,
squeeze_me=True)
except ValueError:
raise UnrecognizedFormat(str(from_fasst) + ' does not look like a FASST .mat file')
D = mat['D']
info = D.other.info
score = D.other.CRC.score
microsecond, second = modf(info.hour[2])
start_time = datetime(*info.date, int(info.hour[0]), int(info.hour[1]),
int(second), int(microsecond * 1e6))
first_sec = score[3, 0][0]
last_sec = score[0, 0].shape[0] * score[2, 0]
root = Element('annotations')
root.set('version', VERSION)
info = SubElement(root, 'dataset')
x = SubElement(info, 'filename')
x.text = D.other.info.fname
x = SubElement(info, 'path') # not to be relied on
x.text = D.other.info.fname
x = SubElement(info, 'start_time')
x.text = start_time.isoformat()
x = SubElement(info, 'first_second')
x.text = str(int(first_sec))
x = SubElement(info, 'last_second')
x.text = str(int(last_sec))
xml = parseString(tostring(root))
with xml_file.open('w') as f:
f.write(xml.toxml())
annot = Annotations(xml_file)
n_raters = score.shape[1]
for i_rater in range(n_raters):
rater_name = score[1, i_rater]
epoch_length = int(score[2, i_rater])
annot.add_rater(rater_name, epoch_length=epoch_length)
for epoch_start, epoch in enumerate(score[0, i_rater]):
if isnan(epoch):
continue
annot.set_stage_for_epoch(epoch_start * epoch_length,
FASST_STAGE_KEY[int(epoch)], save=False)
annot.save()
return annot | Create annotations by importing from FASST sleep scoring file.
Parameters
----------
xml_file : path to xml file
annotation file that will be created
from_fasst : path to FASST file
.mat file containing the scores
Returns
-------
instance of Annotations
TODO
----
Merge create_annotation and create_empty_annotations |
def _split_markdown(text_url, tld_pos):
"""
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
"""
# Markdown url can looks like:
# [http://example.com/](http://example.com/status/210)
left_bracket_pos = text_url.find('[')
# subtract 3 because URL is never shorter than 3 characters
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find(')')
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url | Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str |
def get(self, field):
"""
Returns the value of a user field.
:param str field:
The name of the user field.
:returns: str -- the value
"""
if field in ('username', 'uuid', 'app_data'):
return self.data[field]
else:
return self.data.get('app_data', {})[field] | Returns the value of a user field.
:param str field:
The name of the user field.
:returns: str -- the value |
def assert_is_instance(obj, cls, msg_fmt="{msg}"):
"""Fail if an object is not an instance of a class or tuple of classes.
>>> assert_is_instance(5, int)
>>> assert_is_instance('foo', (str, bytes))
>>> assert_is_instance(5, str)
Traceback (most recent call last):
...
AssertionError: 5 is an instance of <class 'int'>, expected <class 'str'>
The following msg_fmt arguments are supported:
* msg - the default error message
* obj - object to test
* types - tuple of types tested against
"""
if not isinstance(obj, cls):
msg = "{!r} is an instance of {!r}, expected {!r}".format(
obj, obj.__class__, cls
)
types = cls if isinstance(cls, tuple) else (cls,)
fail(msg_fmt.format(msg=msg, obj=obj, types=types)) | Fail if an object is not an instance of a class or tuple of classes.
>>> assert_is_instance(5, int)
>>> assert_is_instance('foo', (str, bytes))
>>> assert_is_instance(5, str)
Traceback (most recent call last):
...
AssertionError: 5 is an instance of <class 'int'>, expected <class 'str'>
The following msg_fmt arguments are supported:
* msg - the default error message
* obj - object to test
* types - tuple of types tested against |
def _check_constant_params(
a, has_const=False, use_const=True, rtol=1e-05, atol=1e-08
):
"""Helper func to interaction between has_const and use_const params.
has_const use_const outcome
--------- --------- -------
True True Confirm that a has constant; return a
False False Confirm that a doesn't have constant; return a
False True Confirm that a doesn't have constant; add constant
True False ValueError
"""
if all((has_const, use_const)):
if not _confirm_constant(a):
raise ValueError(
"Data does not contain a constant; specify" " has_const=False"
)
k = a.shape[-1] - 1
elif not any((has_const, use_const)):
if _confirm_constant(a):
raise ValueError(
"Data already contains a constant; specify" " has_const=True"
)
k = a.shape[-1]
elif not has_const and use_const:
# Also run a quick check to confirm that `a` is *not* ~N(0,1).
# In this case, constant should be zero. (exclude it entirely)
c1 = np.allclose(a.mean(axis=0), b=0.0, rtol=rtol, atol=atol)
c2 = np.allclose(a.std(axis=0), b=1.0, rtol=rtol, atol=atol)
if c1 and c2:
# TODO: maybe we want to just warn here?
raise ValueError(
"Data appears to be ~N(0,1). Specify" " use_constant=False."
)
# `has_constant` does checking on its own and raises VE if True
try:
a = add_constant(a, has_constant="raise")
except ValueError as e:
raise ValueError(
"X data already contains a constant; please specify"
" has_const=True"
) from e
k = a.shape[-1] - 1
else:
raise ValueError("`use_const` == False implies has_const is False.")
return k, a | Helper func to interaction between has_const and use_const params.
has_const use_const outcome
--------- --------- -------
True True Confirm that a has constant; return a
False False Confirm that a doesn't have constant; return a
False True Confirm that a doesn't have constant; add constant
True False ValueError |
def service_set_tag(path, service_name, tag):
'''
Change the tag of a docker-compose service
This does not pull or up the service
This wil re-write your yaml file. Comments will be lost. Indentation is set to 2 spaces
path
Path where the docker-compose file is stored on the server
service_name
Name of the service to remove
tag
Name of the tag (often used as version) that the service image should have
CLI Example:
.. code-block:: bash
salt myminion dockercompose.service_create /path/where/docker-compose/stored service_name tag
'''
compose_result, err = __load_docker_compose(path)
if err:
return err
services = compose_result['compose_content']['services']
if service_name not in services:
return __standardize_result(False,
'Service {0} did not exists'.format(service_name),
None, None)
if 'image' not in services[service_name]:
return __standardize_result(False,
'Service {0} did not contain the variable "image"'.format(service_name),
None, None)
image = services[service_name]['image'].split(':')[0]
services[service_name]['image'] = '{0}:{1}'.format(image, tag)
return __dump_compose_file(path, compose_result,
'Service {0} is set to tag "{1}"'.format(service_name, tag),
already_existed=True) | Change the tag of a docker-compose service
This does not pull or up the service
This wil re-write your yaml file. Comments will be lost. Indentation is set to 2 spaces
path
Path where the docker-compose file is stored on the server
service_name
Name of the service to remove
tag
Name of the tag (often used as version) that the service image should have
CLI Example:
.. code-block:: bash
salt myminion dockercompose.service_create /path/where/docker-compose/stored service_name tag |
def human_size(size, a_kilobyte_is_1024_bytes=False, precision=1, target=None):
'''Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
Credit: <http://diveintopython3.org/your-first-python-program.html>
>>> print(human_size(1000000000000, True))
931.3GiB
>>> print(human_size(1000000000000))
1.0Tb
>>> print(human_size(300))
300.0
'''
if size < 0:
raise ValueError('number must be non-negative')
multiple = 1024 if a_kilobyte_is_1024_bytes else 1000
for suffix in SUFFIXES[multiple]:
if target:
if suffix == target:
break
size /= float(multiple)
else:
if size >= multiple:
size /= float(multiple)
else:
break
return '{0:.{1}f}{2}'.format(size, precision, suffix) | Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
Credit: <http://diveintopython3.org/your-first-python-program.html>
>>> print(human_size(1000000000000, True))
931.3GiB
>>> print(human_size(1000000000000))
1.0Tb
>>> print(human_size(300))
300.0 |
def export(self, filename, offset=0, length=None):
"""Exports byte array to specified destination
Args:
filename (str): destination to output file
offset (int): byte offset (default: 0)
"""
self.__validate_offset(filename=filename, offset=offset, length=length)
with open(filename, 'w') as f:
if length is None:
length = len(self.data) - offset
if offset > 0:
output = self.data[offset:length]
else:
output = self.data[:length]
f.write(output) | Exports byte array to specified destination
Args:
filename (str): destination to output file
offset (int): byte offset (default: 0) |
def permitted_query(self, query, group, operations):
'''Change the ``query`` so that only instances for which
``group`` has roles with permission on ``operations`` are returned.'''
session = query.session
models = session.router
user = group.user
if user.is_superuser: # super-users have all permissions
return query
roles = group.roles.query()
roles = group.roles.query() # query on all roles for group
# The throgh model for Role/Permission relationship
throgh_model = models.role.permissions.model
models[throgh_model].filter(role=roles,
permission__model_type=query.model,
permission__operations=operations)
# query on all relevant permissions
permissions = router.permission.filter(model_type=query.model,
level=operations)
owner_query = query.filter(user=user)
# all roles for the query model with appropriate permission level
roles = models.role.filter(model_type=query.model, level__ge=level)
# Now we need groups which have these roles
groups = Role.groups.throughquery(
session).filter(role=roles).get_field('group')
# I need to know if user is in any of these groups
if user.groups.filter(id=groups).count():
# it is, lets get the model with permissions less
# or equal permission level
permitted = models.instancerole.filter(
role=roles).get_field('object_id')
return owner_query.union(model.objects.filter(id=permitted))
else:
return owner_query | Change the ``query`` so that only instances for which
``group`` has roles with permission on ``operations`` are returned. |
def get_devices(self):
"""
Helper that retuns a dict of devices for this server.
:return:
Returns a tuple of two elements:
- dict<tango class name : list of device names>
- dict<device names : tango class name>
:rtype: tuple<dict, dict>
"""
if self.__util is None:
import tango
db = tango.Database()
else:
db = self.__util.get_database()
server = self.server_instance
dev_list = db.get_device_class_list(server)
class_map, dev_map = {}, {}
for class_name, dev_name in zip(dev_list[1::2], dev_list[::2]):
dev_names = class_map.get(class_name)
if dev_names is None:
class_map[class_name] = dev_names = []
dev_name = dev_name.lower()
dev_names.append(dev_name)
dev_map[dev_name] = class_name
return class_map, dev_map | Helper that retuns a dict of devices for this server.
:return:
Returns a tuple of two elements:
- dict<tango class name : list of device names>
- dict<device names : tango class name>
:rtype: tuple<dict, dict> |
def Miqueu(T, Tc, Vc, omega):
r'''Calculates air-water surface tension using the methods of [1]_.
.. math::
\sigma = k T_c \left( \frac{N_a}{V_c}\right)^{2/3}
(4.35 + 4.14 \omega)t^{1.26}(1+0.19t^{0.5} - 0.487t)
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Vc : float
Critical volume of fluid [m^3/mol]
omega : float
Acentric factor for fluid, [-]
Returns
-------
sigma : float
Liquid surface tension, N/m
Notes
-----
Uses Avogadro's constant and the Boltsman constant.
Internal units of volume are mL/mol and mN/m. However, either a typo
is in the article or author's work, or my value of k is off by 10; this is
corrected nonetheless.
Created with 31 normal fluids, none polar or hydrogen bonded. Has an
AARD of 3.5%.
Examples
--------
Bromotrifluoromethane, 2.45 mN/m
>>> Miqueu(300., 340.1, 0.000199, 0.1687)
0.003474099603581931
References
----------
.. [1] Miqueu, C, D Broseta, J Satherley, B Mendiboure, J Lachaise, and
A Graciaa. "An Extended Scaled Equation for the Temperature Dependence
of the Surface Tension of Pure Compounds Inferred from an Analysis of
Experimental Data." Fluid Phase Equilibria 172, no. 2 (July 5, 2000):
169-82. doi:10.1016/S0378-3812(00)00384-8.
'''
Vc = Vc*1E6
t = 1.-T/Tc
sigma = k*Tc*(N_A/Vc)**(2/3.)*(4.35 + 4.14*omega)*t**1.26*(1+0.19*t**0.5 - 0.25*t)*10000
return sigma | r'''Calculates air-water surface tension using the methods of [1]_.
.. math::
\sigma = k T_c \left( \frac{N_a}{V_c}\right)^{2/3}
(4.35 + 4.14 \omega)t^{1.26}(1+0.19t^{0.5} - 0.487t)
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Vc : float
Critical volume of fluid [m^3/mol]
omega : float
Acentric factor for fluid, [-]
Returns
-------
sigma : float
Liquid surface tension, N/m
Notes
-----
Uses Avogadro's constant and the Boltsman constant.
Internal units of volume are mL/mol and mN/m. However, either a typo
is in the article or author's work, or my value of k is off by 10; this is
corrected nonetheless.
Created with 31 normal fluids, none polar or hydrogen bonded. Has an
AARD of 3.5%.
Examples
--------
Bromotrifluoromethane, 2.45 mN/m
>>> Miqueu(300., 340.1, 0.000199, 0.1687)
0.003474099603581931
References
----------
.. [1] Miqueu, C, D Broseta, J Satherley, B Mendiboure, J Lachaise, and
A Graciaa. "An Extended Scaled Equation for the Temperature Dependence
of the Surface Tension of Pure Compounds Inferred from an Analysis of
Experimental Data." Fluid Phase Equilibria 172, no. 2 (July 5, 2000):
169-82. doi:10.1016/S0378-3812(00)00384-8. |
def upload_files(self, container, src_dst_map, content_type=None):
"""Upload multiple files."""
if not content_type:
content_type = "application/octet.stream"
url = self.make_url(container, None, None)
headers = self._base_headers
multi_files = []
try:
for src_path in src_dst_map:
dst_name = src_dst_map[src_path]
if not dst_name:
dst_name = os.path.basename(src_path)
multi_files.append(
('files', (dst_name, open(src_path, 'rb'), content_type)))
rsp = requests.post(url, headers=headers, files=multi_files,
timeout=self._timeout)
except requests.exceptions.ConnectionError as e:
RestHttp._raise_conn_error(e)
finally:
for n, info in multi_files:
dst, f, ctype = info
f.close()
return self._handle_response(rsp) | Upload multiple files. |
def delete(self):
"""
Deletes resources of this widget that require manual cleanup.
Currently removes all actions, event handlers and the background.
The background itself should automatically remove all vertex lists to avoid visual artifacts.
Note that this method is currently experimental, as it seems to have a memory leak.
"""
# TODO: fix memory leak upon widget deletion
del self.bg.widget
del self.bg
#self.clickable=False
del self._pos
del self._size
self.actions = {}
for e_type,e_handlers in self.peng.eventHandlers.items():
if True or e_type in eh:
to_del = []
for e_handler in e_handlers:
# Weird workaround due to implementation details of WeakMethod
if isinstance(e_handler,weakref.ref):
if super(weakref.WeakMethod,e_handler).__call__() is self:
to_del.append(e_handler)
elif e_handler is self:
to_del.append(e_handler)
for d in to_del:
try:
#print("Deleting handler %s of type %s"%(d,e_type))
del e_handlers[e_handlers.index(d)]
except Exception:
#print("Could not delete handler %s, memory leak may occur"%d)
import traceback;traceback.print_exc() | Deletes resources of this widget that require manual cleanup.
Currently removes all actions, event handlers and the background.
The background itself should automatically remove all vertex lists to avoid visual artifacts.
Note that this method is currently experimental, as it seems to have a memory leak. |
def _parse_resolution(self, tokens):
"""
Parse resolution from the GROUP BY statement.
E.g. GROUP BY time(10s) would mean a 10 second resolution
:param tokens:
:return:
"""
return self.resolution_parser.parse(self.parse_keyword(Keyword.GROUP_BY, tokens)) | Parse resolution from the GROUP BY statement.
E.g. GROUP BY time(10s) would mean a 10 second resolution
:param tokens:
:return: |
def replace_s(self, c_bra, c_ket, s):
'''
to replace chars between c_bra and c_ket in self.current by the
chars in s.
@type c_bra int
@type c_ket int
@type s: string
'''
adjustment = len(s) - (c_ket - c_bra)
self.current = self.current[0:c_bra] + s + self.current[c_ket:]
self.limit += adjustment
if self.cursor >= c_ket:
self.cursor += adjustment
elif self.cursor > c_bra:
self.cursor = c_bra
return adjustment | to replace chars between c_bra and c_ket in self.current by the
chars in s.
@type c_bra int
@type c_ket int
@type s: string |
def custom_role(self):
"""
| Comment: A custom role if the user is an agent on the Enterprise plan
"""
if self.api and self.custom_role_id:
return self.api._get_custom_role(self.custom_role_id) | | Comment: A custom role if the user is an agent on the Enterprise plan |
def isRegionValid(self):
""" Returns false if the whole region is not even partially inside any screen, otherwise true """
screens = PlatformManager.getScreenDetails()
for screen in screens:
s_x, s_y, s_w, s_h = screen["rect"]
if self.x+self.w >= s_x and s_x+s_w >= self.x and self.y+self.h >= s_y and s_y+s_h >= self.y:
# Rects overlap
return True
return False | Returns false if the whole region is not even partially inside any screen, otherwise true |
def is_layer_compatible(self, layer, layer_purpose=None, keywords=None):
"""Validate if a given layer is compatible for selected IF
as a given layer_purpose
:param layer: The layer to be validated
:type layer: QgsVectorLayer | QgsRasterLayer
:param layer_purpose: The layer_purpose the layer is validated for
:type layer_purpose: None, string
:param keywords: The layer keywords
:type keywords: None, dict
:returns: True if layer is appropriate for the selected role
:rtype: boolean
"""
# If not explicitly stated, find the desired purpose
# from the parent step
if not layer_purpose:
layer_purpose = self.get_parent_mode_constraints()[0]['key']
# If not explicitly stated, read the layer's keywords
if not keywords:
try:
keywords = self.keyword_io.read_keywords(layer)
if 'layer_purpose' not in keywords:
keywords = None
except (HashNotFoundError,
OperationalError,
NoKeywordsFoundError,
KeywordNotFoundError,
InvalidParameterError,
UnsupportedProviderError):
keywords = None
# Get allowed subcategory and layer_geometry from IF constraints
h, e, hc, ec = self.selected_impact_function_constraints()
if layer_purpose == 'hazard':
subcategory = h['key']
layer_geometry = hc['key']
elif layer_purpose == 'exposure':
subcategory = e['key']
layer_geometry = ec['key']
else:
# For aggregation layers, use a simplified test and return
if (keywords and 'layer_purpose' in keywords
and keywords['layer_purpose'] == layer_purpose):
return True
if not keywords and is_polygon_layer(layer):
return True
return False
# Compare layer properties with explicitly set constraints
# Reject if layer geometry doesn't match
if layer_geometry != self.get_layer_geometry_key(layer):
return False
# If no keywords, there's nothing more we can check.
# The same if the keywords version doesn't match
if not keywords or 'keyword_version' not in keywords:
return True
keyword_version = str(keywords['keyword_version'])
if not is_keyword_version_supported(keyword_version):
return True
# Compare layer keywords with explicitly set constraints
# Reject if layer purpose missing or doesn't match
if ('layer_purpose' not in keywords
or keywords['layer_purpose'] != layer_purpose):
return False
# Reject if layer subcategory doesn't match
if (layer_purpose in keywords
and keywords[layer_purpose] != subcategory):
return False
return True | Validate if a given layer is compatible for selected IF
as a given layer_purpose
:param layer: The layer to be validated
:type layer: QgsVectorLayer | QgsRasterLayer
:param layer_purpose: The layer_purpose the layer is validated for
:type layer_purpose: None, string
:param keywords: The layer keywords
:type keywords: None, dict
:returns: True if layer is appropriate for the selected role
:rtype: boolean |
def mergeCatalogs(catalog_list):
"""
Merge a list of Catalogs.
Parameters:
-----------
catalog_list : List of Catalog objects.
Returns:
--------
catalog : Combined Catalog object
"""
# Check the columns
for c in catalog_list:
if c.data.dtype.names != catalog_list[0].data.dtype.names:
msg = "Catalog data columns not the same."
raise Exception(msg)
data = np.concatenate([c.data for c in catalog_list])
config = catalog_list[0].config
return Catalog(config,data=data) | Merge a list of Catalogs.
Parameters:
-----------
catalog_list : List of Catalog objects.
Returns:
--------
catalog : Combined Catalog object |
def binary_xloss(logits, labels, ignore=None):
"""
Binary Cross entropy loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
ignore: void class id
"""
logits, labels = flatten_binary_scores(logits, labels, ignore)
loss = StableBCELoss()(logits, Variable(labels.float()))
return loss | Binary Cross entropy loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
ignore: void class id |
def get_content_models(self):
""" Return all subclasses that are admin registered. """
models = []
for model in self.concrete_model.get_content_models():
try:
admin_url(model, "add")
except NoReverseMatch:
continue
else:
setattr(model, "meta_verbose_name", model._meta.verbose_name)
setattr(model, "add_url", admin_url(model, "add"))
models.append(model)
return models | Return all subclasses that are admin registered. |
def to_time(self, phase, component=None, t0='t0_supconj', **kwargs):
"""
Get the time(s) of a phase(s) for a given ephemeris
:parameter phase: phase to convert to times (should be in
same system as t0s)
:type phase: float, list, or array
` :parameter str component: component for which to get the ephemeris.
If not given, component will default to the top-most level of the
current hierarchy
:parameter t0: qualifier of the parameter to be used for t0
:type t0: str
:parameter **kwargs: any value passed through kwargs will override the
ephemeris retrieved by component (ie period, t0, dpdt).
Note: be careful about units - input values will not be converted.
:return: time (float) or times (array)
"""
if kwargs.get('shift', False):
raise ValueError("support for phshift was removed as of 2.1. Please pass t0 instead.")
ephem = self.get_ephemeris(component=component, t0=t0, **kwargs)
if isinstance(phase, list):
phase = np.array(phase)
t0 = ephem.get('t0', 0.0)
period = ephem.get('period', 1.0)
dpdt = ephem.get('dpdt', 0.0)
# if changing this, also see parameters.constraint.time_ephem
if dpdt != 0:
time = t0 + 1./dpdt*(np.exp(dpdt*(phase))-period)
else:
time = t0 + (phase)*period
return time | Get the time(s) of a phase(s) for a given ephemeris
:parameter phase: phase to convert to times (should be in
same system as t0s)
:type phase: float, list, or array
` :parameter str component: component for which to get the ephemeris.
If not given, component will default to the top-most level of the
current hierarchy
:parameter t0: qualifier of the parameter to be used for t0
:type t0: str
:parameter **kwargs: any value passed through kwargs will override the
ephemeris retrieved by component (ie period, t0, dpdt).
Note: be careful about units - input values will not be converted.
:return: time (float) or times (array) |
def integrate_orbit(self, **time_spec):
"""
Integrate the initial conditions in the combined external potential
plus N-body forces.
This integration uses the `~gala.integrate.DOPRI853Integrator`.
Parameters
----------
**time_spec
Specification of how long to integrate. See documentation
for `~gala.integrate.parse_time_specification`.
Returns
-------
orbit : `~gala.dynamics.Orbit`
The orbits of the particles.
"""
# Prepare the initial conditions
pos = self.w0.xyz.decompose(self.units).value
vel = self.w0.v_xyz.decompose(self.units).value
w0 = np.ascontiguousarray(np.vstack((pos, vel)).T)
# Prepare the time-stepping array
t = parse_time_specification(self.units, **time_spec)
ws = _direct_nbody_dop853(w0, t, self._ext_ham,
self.particle_potentials)
pos = np.rollaxis(np.array(ws[..., :3]), axis=2)
vel = np.rollaxis(np.array(ws[..., 3:]), axis=2)
orbits = Orbit(
pos=pos * self.units['length'],
vel=vel * self.units['length'] / self.units['time'],
t=t * self.units['time'])
return orbits | Integrate the initial conditions in the combined external potential
plus N-body forces.
This integration uses the `~gala.integrate.DOPRI853Integrator`.
Parameters
----------
**time_spec
Specification of how long to integrate. See documentation
for `~gala.integrate.parse_time_specification`.
Returns
-------
orbit : `~gala.dynamics.Orbit`
The orbits of the particles. |
def get_connection_logging(self, loadbalancer):
"""
Returns the connection logging setting for the given load balancer.
"""
uri = "/loadbalancers/%s/connectionlogging" % utils.get_id(loadbalancer)
resp, body = self.api.method_get(uri)
ret = body.get("connectionLogging", {}).get("enabled", False)
return ret | Returns the connection logging setting for the given load balancer. |
def get_basic_profile(self, user_id, scope='profile/public'):
"""
Retrieve the Mxit user's basic profile
No user authentication required
"""
profile = _get(
token=self.oauth.get_app_token(scope),
uri='/user/profile/' + urllib.quote(user_id)
)
try:
return json.loads(profile)
except:
raise MxitAPIException('Error parsing profile data') | Retrieve the Mxit user's basic profile
No user authentication required |
def update_git_devstr(version, path=None):
"""
Updates the git revision string if and only if the path is being imported
directly from a git working copy. This ensures that the revision number in
the version string is accurate.
"""
try:
# Quick way to determine if we're in git or not - returns '' if not
devstr = get_git_devstr(sha=True, show_warning=False, path=path)
except OSError:
return version
if not devstr:
# Probably not in git so just pass silently
return version
if 'dev' in version: # update to the current git revision
version_base = version.split('.dev', 1)[0]
devstr = get_git_devstr(sha=False, show_warning=False, path=path)
return version_base + '.dev' + devstr
else:
# otherwise it's already the true/release version
return version | Updates the git revision string if and only if the path is being imported
directly from a git working copy. This ensures that the revision number in
the version string is accurate. |
def get_redshift(self, dist):
"""Returns the redshift for the given distance.
"""
dist, input_is_array = ensurearray(dist)
try:
zs = self.nearby_d2z(dist)
except TypeError:
# interpolant hasn't been setup yet
self.setup_interpolant()
zs = self.nearby_d2z(dist)
# if any points had red shifts beyond the nearby, will have nans;
# replace using the faraway interpolation
replacemask = numpy.isnan(zs)
if replacemask.any():
zs[replacemask] = self.faraway_d2z(dist[replacemask])
replacemask = numpy.isnan(zs)
# if we still have nans, means that some distances are beyond our
# furthest default; fall back to using astropy
if replacemask.any():
# well... check that the distance is positive and finite first
if not (dist > 0.).all() and numpy.isfinite(dist).all():
raise ValueError("distance must be finite and > 0")
zs[replacemask] = _redshift(dist[replacemask],
cosmology=self.cosmology)
return formatreturn(zs, input_is_array) | Returns the redshift for the given distance. |
def get_games_by_season(self, season):
"""
Game schedule for a specified season.
"""
try:
season = int(season)
except ValueError:
raise FantasyDataError('Error: Invalid method parameters')
result = self._method_call("Games/{season}", "stats", season=season)
return result | Game schedule for a specified season. |
def execute_deploy_clone_from_vm(self, si, logger, vcenter_data_model, reservation_id, deployment_params, cancellation_context, folder_manager):
"""
Calls the deployer to deploy vm from another vm
:param cancellation_context:
:param str reservation_id:
:param si:
:param logger:
:type deployment_params: DeployFromTemplateDetails
:param vcenter_data_model:
:return:
"""
self._prepare_deployed_apps_folder(deployment_params, si, logger, folder_manager, vcenter_data_model)
deploy_result = self.deployer.deploy_clone_from_vm(si, logger, deployment_params, vcenter_data_model,
reservation_id, cancellation_context)
return deploy_result | Calls the deployer to deploy vm from another vm
:param cancellation_context:
:param str reservation_id:
:param si:
:param logger:
:type deployment_params: DeployFromTemplateDetails
:param vcenter_data_model:
:return: |
def type_names_mn(prefix, sizerangem, sizerangen):
"""
Helper for type name generation, like: fixed0x8 .. fixed0x256
"""
lm = []
ln = []
namelist = []
# construct lists out of ranges
for i in sizerangem: lm.append(i)
for i in sizerangen: ln.append(i)
# sizes (in bits) are valid if (%8 == 0) and (m+n <= 256)
# first condition is covered by passing proper sizerange{m,n}
validpairs = [tuple([m,n]) for m in lm for n in ln if m+n<=256]
for i in validpairs:
namelist.append(prefix + str(i[0]) + 'x' + str(i[1]))
return tuple(namelist) | Helper for type name generation, like: fixed0x8 .. fixed0x256 |
def get_shard_num_by_key_id(self, key_id):
"""
get_shard_num_by_key_id returns the Redis shard number (zero-indexed)
given a key id.
Keyword arguments:
key_id -- the key id (e.g. '12345' or 'anythingcangohere')
This method is critical in how the Redis cluster sharding works. We
emulate twemproxy's md5 distribution algorithm.
"""
# TODO: support other hash functions?
m = hashlib.md5(str(key_id).encode('ascii')).hexdigest()
# Below is borrowed from
# https://github.com/twitter/twemproxy/blob/master/src/hashkit/nc_md5.c
val = (int(m[0:2], 16) |
int(m[2:4], 16) << 8 |
int(m[4:6], 16) << 16 |
int(m[6:8], 16) << 24)
return val % self.num_shards() | get_shard_num_by_key_id returns the Redis shard number (zero-indexed)
given a key id.
Keyword arguments:
key_id -- the key id (e.g. '12345' or 'anythingcangohere')
This method is critical in how the Redis cluster sharding works. We
emulate twemproxy's md5 distribution algorithm. |
def volume_infos(pool=None, volume=None, **kwargs):
'''
Provide details on a storage volume. If no volume name is provided, the infos
all the volumes contained in the pool are provided. If no pool is provided,
the infos of the volumes of all pools are output.
:param pool: libvirt storage pool name (default: ``None``)
:param volume: name of the volume to get infos from (default: ``None``)
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: Neon
CLI Example:
.. code-block:: bash
salt "*" virt.volume_infos <pool> <volume>
'''
result = {}
conn = __get_conn(**kwargs)
try:
backing_stores = _get_all_volumes_paths(conn)
domains = _get_domain(conn)
domains_list = domains if isinstance(domains, list) else [domains]
disks = {domain.name():
{node.get('file') for node
in ElementTree.fromstring(domain.XMLDesc(0)).findall('.//disk/source/[@file]')}
for domain in domains_list}
def _volume_extract_infos(vol):
'''
Format the volume info dictionary
:param vol: the libvirt storage volume object.
'''
types = ['file', 'block', 'dir', 'network', 'netdir', 'ploop']
infos = vol.info()
# If we have a path, check its use.
used_by = []
if vol.path():
as_backing_store = {path for (path, all_paths) in backing_stores.items() if vol.path() in all_paths}
used_by = [vm_name for (vm_name, vm_disks) in disks.items()
if vm_disks & as_backing_store or vol.path() in vm_disks]
return {
'type': types[infos[0]] if infos[0] < len(types) else 'unknown',
'key': vol.key(),
'path': vol.path(),
'capacity': infos[1],
'allocation': infos[2],
'used_by': used_by,
}
pools = [obj for obj in conn.listAllStoragePools() if pool is None or obj.name() == pool]
vols = {pool_obj.name(): {vol.name(): _volume_extract_infos(vol)
for vol in pool_obj.listAllVolumes()
if (volume is None or vol.name() == volume) and _is_valid_volume(vol)}
for pool_obj in pools}
return {pool_name: volumes for (pool_name, volumes) in vols.items() if volumes}
except libvirt.libvirtError as err:
log.debug('Silenced libvirt error: %s', str(err))
finally:
conn.close()
return result | Provide details on a storage volume. If no volume name is provided, the infos
all the volumes contained in the pool are provided. If no pool is provided,
the infos of the volumes of all pools are output.
:param pool: libvirt storage pool name (default: ``None``)
:param volume: name of the volume to get infos from (default: ``None``)
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: Neon
CLI Example:
.. code-block:: bash
salt "*" virt.volume_infos <pool> <volume> |
def _set_area_range(self, v, load=False):
"""
Setter method for area_range, mapped from YANG variable /rbridge_id/ipv6/router/ospf/area/area_range (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_area_range is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_area_range() directly.
YANG Description: If the ABR that connects the NSSA to other areas needs to summarize the routes in the NSSA before translating them into Type-5 LSAs and flooding them into the other areas, configure an address range. The ABR creates an aggregate value based on the address range. The aggregate value becomes the address that the ABR advertises instead of advertising the individual addresses represented by the aggregate.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("range_address",area_range.area_range, yang_name="area-range", rest_name="range", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='range-address', extensions={u'tailf-common': {u'info': u'To define or undefine a type-3 address range (ABR only)', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'range', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'Ospfv3AreaRange'}}), is_container='list', yang_name="area-range", rest_name="range", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'To define or undefine a type-3 address range (ABR only)', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'range', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'Ospfv3AreaRange'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """area_range must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("range_address",area_range.area_range, yang_name="area-range", rest_name="range", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='range-address', extensions={u'tailf-common': {u'info': u'To define or undefine a type-3 address range (ABR only)', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'range', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'Ospfv3AreaRange'}}), is_container='list', yang_name="area-range", rest_name="range", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'To define or undefine a type-3 address range (ABR only)', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'range', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'callpoint': u'Ospfv3AreaRange'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='list', is_config=True)""",
})
self.__area_range = t
if hasattr(self, '_set'):
self._set() | Setter method for area_range, mapped from YANG variable /rbridge_id/ipv6/router/ospf/area/area_range (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_area_range is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_area_range() directly.
YANG Description: If the ABR that connects the NSSA to other areas needs to summarize the routes in the NSSA before translating them into Type-5 LSAs and flooding them into the other areas, configure an address range. The ABR creates an aggregate value based on the address range. The aggregate value becomes the address that the ABR advertises instead of advertising the individual addresses represented by the aggregate. |
def make_coins(self, collection, text, subreference="", lang=None):
""" Creates a CoINS Title string from information
:param collection: Collection to create coins from
:param text: Text/Passage object
:param subreference: Subreference
:param lang: Locale information
:return: Coins HTML title value
"""
if lang is None:
lang = self.__default_lang__
return "url_ver=Z39.88-2004"\
"&ctx_ver=Z39.88-2004"\
"&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook"\
"&rft_id={cid}"\
"&rft.genre=bookitem"\
"&rft.btitle={title}"\
"&rft.edition={edition}"\
"&rft.au={author}"\
"&rft.atitle={pages}"\
"&rft.language={language}"\
"&rft.pages={pages}".format(
title=quote(str(text.get_title(lang))), author=quote(str(text.get_creator(lang))),
cid=url_for(".r_collection", objectId=collection.id, _external=True),
language=collection.lang, pages=quote(subreference), edition=quote(str(text.get_description(lang)))
) | Creates a CoINS Title string from information
:param collection: Collection to create coins from
:param text: Text/Passage object
:param subreference: Subreference
:param lang: Locale information
:return: Coins HTML title value |
def get_tags(blog_id, username, password):
"""
wp.getTags(blog_id, username, password)
=> tag structure[]
"""
authenticate(username, password)
site = Site.objects.get_current()
return [tag_structure(tag, site)
for tag in Tag.objects.usage_for_queryset(
Entry.published.all(), counts=True)] | wp.getTags(blog_id, username, password)
=> tag structure[] |
def post_dns_record(**kwargs):
'''
Creates a DNS record for the given name if the domain is managed with DO.
'''
if 'kwargs' in kwargs: # flatten kwargs if called via salt-cloud -f
f_kwargs = kwargs['kwargs']
del kwargs['kwargs']
kwargs.update(f_kwargs)
mandatory_kwargs = ('dns_domain', 'name', 'record_type', 'record_data')
for i in mandatory_kwargs:
if kwargs[i]:
pass
else:
error = '{0}="{1}" ## all mandatory args must be provided: {2}'.format(i, kwargs[i], mandatory_kwargs)
raise SaltInvocationError(error)
domain = query(method='domains', droplet_id=kwargs['dns_domain'])
if domain:
result = query(
method='domains',
droplet_id=kwargs['dns_domain'],
command='records',
args={'type': kwargs['record_type'], 'name': kwargs['name'], 'data': kwargs['record_data']},
http_method='post'
)
return result
return False | Creates a DNS record for the given name if the domain is managed with DO. |
def _win32_symlink2(path, link, allow_fallback=True, verbose=0):
"""
Perform a real symbolic link if possible. However, on most versions of
windows you need special privledges to create a real symlink. Therefore, we
try to create a symlink, but if that fails we fallback to using a junction.
AFAIK, the main difference between symlinks and junctions are that symlinks
can reference relative or absolute paths, where as junctions always
reference absolute paths. Not 100% on this though. Windows is weird.
Note that junctions will not register as links via `islink`, but I
believe real symlinks will.
"""
if _win32_can_symlink():
return _win32_symlink(path, link, verbose)
else:
return _win32_junction(path, link, verbose) | Perform a real symbolic link if possible. However, on most versions of
windows you need special privledges to create a real symlink. Therefore, we
try to create a symlink, but if that fails we fallback to using a junction.
AFAIK, the main difference between symlinks and junctions are that symlinks
can reference relative or absolute paths, where as junctions always
reference absolute paths. Not 100% on this though. Windows is weird.
Note that junctions will not register as links via `islink`, but I
believe real symlinks will. |
def push(self):
"""
Push the changes back to the remote(s) after fetching
"""
print('pushing...')
push_kwargs = {}
push_args = []
if self.settings['push.tags']:
push_kwargs['push'] = True
if self.settings['push.all']:
push_kwargs['all'] = True
else:
if '.' in self.remotes:
self.remotes.remove('.')
if not self.remotes:
# Only local target branches,
# `git push` will fail
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error | Push the changes back to the remote(s) after fetching |
def _spec_to_globs(address_mapper, specs):
"""Given a Specs object, return a PathGlobs object for the build files that it matches."""
patterns = set()
for spec in specs:
patterns.update(spec.make_glob_patterns(address_mapper))
return PathGlobs(include=patterns, exclude=address_mapper.build_ignore_patterns) | Given a Specs object, return a PathGlobs object for the build files that it matches. |
def _permute_aux_specs(self):
"""Generate all permutations of the non-core specifications."""
# Convert to attr names that Calc is expecting.
calc_aux_mapping = self._NAMES_SUITE_TO_CALC.copy()
# Special case: manually add 'library' to mapping
calc_aux_mapping[_OBJ_LIB_STR] = None
[calc_aux_mapping.pop(core) for core in self._CORE_SPEC_NAMES]
specs = self._get_aux_specs()
for suite_name, calc_name in calc_aux_mapping.items():
specs[calc_name] = specs.pop(suite_name)
return _permuted_dicts_of_specs(specs) | Generate all permutations of the non-core specifications. |
def send(self,
data: Object,
retries: int = Session.MAX_RETRIES,
timeout: float = Session.WAIT_TIMEOUT):
"""Use this method to send Raw Function queries.
This method makes possible to manually call every single Telegram API method in a low-level manner.
Available functions are listed in the :obj:`functions <pyrogram.api.functions>` package and may accept compound
data types from :obj:`types <pyrogram.api.types>` as well as bare types such as ``int``, ``str``, etc...
Args:
data (``Object``):
The API Schema function filled with proper arguments.
retries (``int``):
Number of retries.
timeout (``float``):
Timeout in seconds.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
if not self.is_started:
raise ConnectionError("Client has not been started")
if self.no_updates:
data = functions.InvokeWithoutUpdates(query=data)
if self.takeout_id:
data = functions.InvokeWithTakeout(takeout_id=self.takeout_id, query=data)
r = self.session.send(data, retries, timeout)
self.fetch_peers(getattr(r, "users", []))
self.fetch_peers(getattr(r, "chats", []))
return r | Use this method to send Raw Function queries.
This method makes possible to manually call every single Telegram API method in a low-level manner.
Available functions are listed in the :obj:`functions <pyrogram.api.functions>` package and may accept compound
data types from :obj:`types <pyrogram.api.types>` as well as bare types such as ``int``, ``str``, etc...
Args:
data (``Object``):
The API Schema function filled with proper arguments.
retries (``int``):
Number of retries.
timeout (``float``):
Timeout in seconds.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. |
def delete_item(self, item):
''' removes an item from the db '''
for relation, dst in self.relations_of(item, True):
self.delete_relation(item, relation, dst)
#print(item, relation, dst)
for src, relation in self.relations_to(item, True):
self.delete_relation(src, relation, item)
#print(src, relation, item)
h = self._item_hash(item)
if item in self:
#print('deleting item:', item)
self.nodes[h].clear()
del self.nodes[h] | removes an item from the db |
def attributes_section(thing, doc, header_level):
"""
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
"""
# Get Attributes
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n### Attributes\n\n")
if len(tl) == 0 and len(props) > 0:
tl.append("\n### Attributes\n\n")
for prop in props:
tl.append(f"- [`{prop}`](#{prop})\n\n")
return tl | Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list |
def _is_small_vcf(vcf_file):
"""Check for small VCFs which we want to analyze quicker.
"""
count = 0
small_thresh = 250
with utils.open_gzipsafe(vcf_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
count += 1
if count > small_thresh:
return False
return True | Check for small VCFs which we want to analyze quicker. |
def sign_message(data_to_sign, digest_alg, sign_key,
use_signed_attributes=True):
"""Function signs the data and returns the generated ASN.1
:param data_to_sign: A byte string of the data to be signed.
:param digest_alg:
The digest algorithm to be used for generating the signature.
:param sign_key: The key to be used for generating the signature.
:param use_signed_attributes: Optional attribute to indicate weather the
CMS signature attributes should be included in the signature or not.
:return: A CMS ASN.1 byte string of the signed data.
"""
if use_signed_attributes:
digest_func = hashlib.new(digest_alg)
digest_func.update(data_to_sign)
message_digest = digest_func.digest()
class SmimeCapability(core.Sequence):
_fields = [
('0', core.Any, {'optional': True}),
('1', core.Any, {'optional': True}),
('2', core.Any, {'optional': True}),
('3', core.Any, {'optional': True}),
('4', core.Any, {'optional': True})
]
class SmimeCapabilities(core.Sequence):
_fields = [
('0', SmimeCapability),
('1', SmimeCapability, {'optional': True}),
('2', SmimeCapability, {'optional': True}),
('3', SmimeCapability, {'optional': True}),
('4', SmimeCapability, {'optional': True}),
('5', SmimeCapability, {'optional': True}),
]
smime_cap = OrderedDict([
('0', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.7'))])),
('1', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.2')),
('1', core.Integer(128))])),
('2', OrderedDict([
('0', core.ObjectIdentifier('1.2.840.113549.3.4')),
('1', core.Integer(128))])),
])
signed_attributes = cms.CMSAttributes([
cms.CMSAttribute({
'type': cms.CMSAttributeType('content_type'),
'values': cms.SetOfContentType([
cms.ContentType('data')
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('signing_time'),
'values': cms.SetOfTime([
cms.Time({
'utc_time': core.UTCTime(datetime.now())
})
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('message_digest'),
'values': cms.SetOfOctetString([
core.OctetString(message_digest)
])
}),
cms.CMSAttribute({
'type': cms.CMSAttributeType('1.2.840.113549.1.9.15'),
'values': cms.SetOfAny([
core.Any(SmimeCapabilities(smime_cap))
])
}),
])
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], signed_attributes.dump(), digest_alg)
else:
signed_attributes = None
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], data_to_sign, digest_alg)
return cms.ContentInfo({
'content_type': cms.ContentType('signed_data'),
'content': cms.SignedData({
'version': cms.CMSVersion('v1'),
'digest_algorithms': cms.DigestAlgorithms([
algos.DigestAlgorithm({
'algorithm': algos.DigestAlgorithmId(digest_alg)
})
]),
'encap_content_info': cms.ContentInfo({
'content_type': cms.ContentType('data')
}),
'certificates': cms.CertificateSet([
cms.CertificateChoices({
'certificate': sign_key[1].asn1
})
]),
'signer_infos': cms.SignerInfos([
cms.SignerInfo({
'version': cms.CMSVersion('v1'),
'sid': cms.SignerIdentifier({
'issuer_and_serial_number': cms.IssuerAndSerialNumber({
'issuer': sign_key[1].asn1[
'tbs_certificate']['issuer'],
'serial_number': sign_key[1].asn1[
'tbs_certificate']['serial_number']
})
}),
'digest_algorithm': algos.DigestAlgorithm({
'algorithm': algos.DigestAlgorithmId(digest_alg)
}),
'signed_attrs': signed_attributes,
'signature_algorithm': algos.SignedDigestAlgorithm({
'algorithm':
algos.SignedDigestAlgorithmId('rsassa_pkcs1v15')
}),
'signature': core.OctetString(signature)
})
])
})
}).dump() | Function signs the data and returns the generated ASN.1
:param data_to_sign: A byte string of the data to be signed.
:param digest_alg:
The digest algorithm to be used for generating the signature.
:param sign_key: The key to be used for generating the signature.
:param use_signed_attributes: Optional attribute to indicate weather the
CMS signature attributes should be included in the signature or not.
:return: A CMS ASN.1 byte string of the signed data. |
def _parse_categories(element):
"""
Returns a list with categories with relations.
"""
reference = {}
items = element.findall("./{%s}category" % WP_NAMESPACE)
for item in items:
term_id = item.find("./{%s}term_id" % WP_NAMESPACE).text
nicename = item.find("./{%s}category_nicename" % WP_NAMESPACE).text
name = item.find("./{%s}cat_name" % WP_NAMESPACE).text
parent = item.find("./{%s}category_parent" % WP_NAMESPACE).text
category = {
"term_id": term_id,
"nicename": nicename,
"name": name,
"parent": parent
}
reference[nicename] = category
return _build_category_tree(None, reference=reference) | Returns a list with categories with relations. |
def df_random(num_numeric=3, num_categorical=3, num_rows=100):
"""Generate a dataframe with random data. This is a general method
to easily generate a random dataframe, for more control of the
random 'distributions' use the column methods (df_numeric_column, df_categorical_column)
For other distributions you can use numpy methods directly (see example at bottom of this file)
Args:
num_numeric (int): The number of numeric columns (default = 3)
num_categorical (int): The number of categorical columns (default = 3)
num_rows (int): The number of rows to generate (default = 100)
"""
# Construct DataFrame
df = pd.DataFrame()
column_names = string.ascii_lowercase
# Create numeric columns
for name in column_names[:num_numeric]:
df[name] = df_numeric_column(num_rows=num_rows)
# Create categorical columns
for name in column_names[num_numeric:num_numeric+num_categorical]:
df[name] = df_categorical_column(['foo', 'bar', 'baz'], num_rows=num_rows)
# Return the dataframe
return df | Generate a dataframe with random data. This is a general method
to easily generate a random dataframe, for more control of the
random 'distributions' use the column methods (df_numeric_column, df_categorical_column)
For other distributions you can use numpy methods directly (see example at bottom of this file)
Args:
num_numeric (int): The number of numeric columns (default = 3)
num_categorical (int): The number of categorical columns (default = 3)
num_rows (int): The number of rows to generate (default = 100) |
def rpc_get_account_record(self, address, token_type, **con_info):
"""
Get the current state of an account
"""
if not check_account_address(address):
return {'error': 'Invalid address', 'http_status': 400}
if not check_token_type(token_type):
return {'error': 'Invalid token type', 'http_status': 400}
# must be b58
if is_c32_address(address):
address = c32ToB58(address)
db = get_db_state(self.working_dir)
account = db.get_account(address, token_type)
db.close()
if account is None:
return {'error': 'No such account', 'http_status': 404}
state = self.export_account_state(account)
return self.success_response({'account': state}) | Get the current state of an account |
def __initialize_ui(self):
"""
Initializes the Widget ui.
"""
self.Lines_Columns_label.setAlignment(Qt.AlignRight)
self.Lines_Columns_label.setText(self.__Lines_Columns_label_default_text.format(1, 1))
self.Languages_comboBox.setModel(self.__container.languages_model)
# Signals / Slots.
self.Languages_comboBox.currentIndexChanged.connect(self.__Languages_comboBox__currentIndexChanged) | Initializes the Widget ui. |
def get_salt_interface(vm_, opts):
'''
Return the salt_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
salt_host = salt.config.get_cloud_config_value(
'salt_interface', vm_, opts, default=False,
search_global=False
)
if salt_host is False:
salt_host = salt.config.get_cloud_config_value(
'ssh_interface', vm_, opts, default='public_ips',
search_global=False
)
return salt_host | Return the salt_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'. |
def store_object(self, obj_name, data, content_type=None, etag=None,
content_encoding=None, ttl=None, return_none=False,
headers=None, extra_info=None):
"""
Creates a new object in this container, and populates it with the given
data. A StorageObject reference to the uploaded file will be returned,
unless 'return_none' is set to True.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more.
"""
return self.create(obj_name=obj_name, data=data,
content_type=content_type, etag=etag,
content_encoding=content_encoding, ttl=ttl,
return_none=return_none, headers=headers) | Creates a new object in this container, and populates it with the given
data. A StorageObject reference to the uploaded file will be returned,
unless 'return_none' is set to True.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more. |
async def load(self, request, resource=None, **kwargs):
"""Load resource from given data."""
schema = self.get_schema(request, resource=resource, **kwargs)
data = await self.parse(request)
resource, errors = schema.load(
data, partial=resource is not None, many=isinstance(data, list))
if errors:
raise RESTBadRequest(reason='Bad request', json={'errors': errors})
return resource | Load resource from given data. |
def weekdays(self):
"""A set of integers representing the weekdays the schedule recurs on,
with Monday = 0 and Sunday = 6."""
if not self.root.xpath('days'):
return set(range(7))
return set(int(d) - 1 for d in self.root.xpath('days/day/text()')) | A set of integers representing the weekdays the schedule recurs on,
with Monday = 0 and Sunday = 6. |
def check_lines(first, second):
"""Checks if two curves are lines and tries to intersect them.
.. note::
This is a helper for :func:`._all_intersections`.
If they are not lines / not linearized, immediately returns :data:`False`
with no "return value".
If they are lines, attempts to intersect them (even if they are parallel
and share a coincident segment).
Args:
first (Union[SubdividedCurve, Linearization]): First curve being
intersected.
second (Union[SubdividedCurve, Linearization]): Second curve being
intersected.
Returns:
Tuple[bool, Optional[Tuple[numpy.ndarray, bool]]]: A pair of
* Flag indicating if both candidates in the pair are lines.
* Optional "result" populated only if both candidates are lines.
When this result is populated, it will be a pair of
* array of parameters of intersection
* flag indicating if the two candidates share a coincident segment
"""
# NOTE: In the below we replace ``isinstance(a, B)`` with
# ``a.__class__ is B``, which is a 3-3.5x speedup.
if not (
first.__class__ is Linearization
and second.__class__ is Linearization
and first.error == 0.0
and second.error == 0.0
):
return False, None
s, t, success = segment_intersection(
first.start_node, first.end_node, second.start_node, second.end_node
)
if success:
if _helpers.in_interval(s, 0.0, 1.0) and _helpers.in_interval(
t, 0.0, 1.0
):
intersections = np.asfortranarray([[s], [t]])
result = intersections, False
else:
result = np.empty((2, 0), order="F"), False
else:
disjoint, params = parallel_lines_parameters(
first.start_node,
first.end_node,
second.start_node,
second.end_node,
)
if disjoint:
result = np.empty((2, 0), order="F"), False
else:
result = params, True
return True, result | Checks if two curves are lines and tries to intersect them.
.. note::
This is a helper for :func:`._all_intersections`.
If they are not lines / not linearized, immediately returns :data:`False`
with no "return value".
If they are lines, attempts to intersect them (even if they are parallel
and share a coincident segment).
Args:
first (Union[SubdividedCurve, Linearization]): First curve being
intersected.
second (Union[SubdividedCurve, Linearization]): Second curve being
intersected.
Returns:
Tuple[bool, Optional[Tuple[numpy.ndarray, bool]]]: A pair of
* Flag indicating if both candidates in the pair are lines.
* Optional "result" populated only if both candidates are lines.
When this result is populated, it will be a pair of
* array of parameters of intersection
* flag indicating if the two candidates share a coincident segment |
def get_vprof_version(filename):
"""Returns actual version specified in filename."""
with open(filename) as src_file:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
src_file.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version info.') | Returns actual version specified in filename. |
def cast(
source: Union[DataType, str], target: Union[DataType, str], **kwargs
) -> DataType:
"""Attempts to implicitly cast from source dtype to target dtype"""
source, result_target = dtype(source), dtype(target)
if not castable(source, result_target, **kwargs):
raise com.IbisTypeError(
'Datatype {} cannot be implicitly '
'casted to {}'.format(source, result_target)
)
return result_target | Attempts to implicitly cast from source dtype to target dtype |
def t_bin_NUMBER(t):
r'[01]+' # A binary integer
t.value = int(t.value, 2)
t.lexer.begin('INITIAL')
return t | r'[01]+ |
def close(self, cancelled=False):
"""
Close this temporary pop-up.
:param cancelled: Whether the pop-up was cancelled (e.g. by pressing Esc).
"""
self._on_close(cancelled)
self._scene.remove_effect(self) | Close this temporary pop-up.
:param cancelled: Whether the pop-up was cancelled (e.g. by pressing Esc). |
def estimate_cpd(self, node):
"""
Method to estimate the CPD for a given variable.
Parameters
----------
node: int, string (any hashable python object)
The name of the variable for which the CPD is to be estimated.
Returns
-------
CPD: TabularCPD
Examples
--------
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import MaximumLikelihoodEstimator
>>> data = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
>>> model = BayesianModel([('A', 'C'), ('B', 'C')])
>>> cpd_A = MaximumLikelihoodEstimator(model, data).estimate_cpd('A')
>>> print(cpd_A)
ββββββββ€βββββββββββ
β A(0) β 0.666667 β
ββββββββΌβββββββββββ€
β A(1) β 0.333333 β
ββββββββ§βββββββββββ
>>> cpd_C = MaximumLikelihoodEstimator(model, data).estimate_cpd('C')
>>> print(cpd_C)
ββββββββ€βββββββ€βββββββ€βββββββ€βββββββ
β A β A(0) β A(0) β A(1) β A(1) β
ββββββββΌβββββββΌβββββββΌβββββββΌβββββββ€
β B β B(0) β B(1) β B(0) β B(1) β
ββββββββΌβββββββΌβββββββΌβββββββΌβββββββ€
β C(0) β 0.0 β 0.0 β 1.0 β 0.5 β
ββββββββΌβββββββΌβββββββΌβββββββΌβββββββ€
β C(1) β 1.0 β 1.0 β 0.0 β 0.5 β
ββββββββ§βββββββ§βββββββ§βββββββ§βββββββ
"""
state_counts = self.state_counts(node)
# if a column contains only `0`s (no states observed for some configuration
# of parents' states) fill that column uniformly instead
state_counts.ix[:, (state_counts == 0).all()] = 1
parents = sorted(self.model.get_parents(node))
parents_cardinalities = [len(self.state_names[parent]) for parent in parents]
node_cardinality = len(self.state_names[node])
cpd = TabularCPD(node, node_cardinality, np.array(state_counts),
evidence=parents,
evidence_card=parents_cardinalities,
state_names=self.state_names)
cpd.normalize()
return cpd | Method to estimate the CPD for a given variable.
Parameters
----------
node: int, string (any hashable python object)
The name of the variable for which the CPD is to be estimated.
Returns
-------
CPD: TabularCPD
Examples
--------
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import MaximumLikelihoodEstimator
>>> data = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
>>> model = BayesianModel([('A', 'C'), ('B', 'C')])
>>> cpd_A = MaximumLikelihoodEstimator(model, data).estimate_cpd('A')
>>> print(cpd_A)
ββββββββ€βββββββββββ
β A(0) β 0.666667 β
ββββββββΌβββββββββββ€
β A(1) β 0.333333 β
ββββββββ§βββββββββββ
>>> cpd_C = MaximumLikelihoodEstimator(model, data).estimate_cpd('C')
>>> print(cpd_C)
ββββββββ€βββββββ€βββββββ€βββββββ€βββββββ
β A β A(0) β A(0) β A(1) β A(1) β
ββββββββΌβββββββΌβββββββΌβββββββΌβββββββ€
β B β B(0) β B(1) β B(0) β B(1) β
ββββββββΌβββββββΌβββββββΌβββββββΌβββββββ€
β C(0) β 0.0 β 0.0 β 1.0 β 0.5 β
ββββββββΌβββββββΌβββββββΌβββββββΌβββββββ€
β C(1) β 1.0 β 1.0 β 0.0 β 0.5 β
ββββββββ§βββββββ§βββββββ§βββββββ§βββββββ |
def ilsr_rankings(
n_items, data, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
"""Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given ranking data (see :ref:`data-rankings`), using the
iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
"""
fun = functools.partial(
lsr_rankings, n_items=n_items, data=data, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol) | Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given ranking data (see :ref:`data-rankings`), using the
iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters. |
def multi_replace(str_, search_list, repl_list):
r"""
Performs multiple replace functions foreach item in search_list and
repl_list.
Args:
str_ (str): string to search
search_list (list): list of search strings
repl_list (list or str): one or multiple replace strings
Returns:
str: str_
CommandLine:
python -m utool.util_str --exec-multi_replace
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> str_ = 'foo. bar: baz; spam-eggs --- eggs+spam'
>>> search_list = ['.', ':', '---']
>>> repl_list = '@'
>>> str_ = multi_replace(str_, search_list, repl_list)
>>> result = ('str_ = %s' % (str(str_),))
>>> print(result)
str_ = foo@ bar@ baz; spam-eggs @ eggs+spam
"""
if isinstance(repl_list, six.string_types):
repl_list_ = [repl_list] * len(search_list)
else:
repl_list_ = repl_list
newstr = str_
assert len(search_list) == len(repl_list_), 'bad lens'
for search, repl in zip(search_list, repl_list_):
newstr = newstr.replace(search, repl)
return newstr | r"""
Performs multiple replace functions foreach item in search_list and
repl_list.
Args:
str_ (str): string to search
search_list (list): list of search strings
repl_list (list or str): one or multiple replace strings
Returns:
str: str_
CommandLine:
python -m utool.util_str --exec-multi_replace
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> str_ = 'foo. bar: baz; spam-eggs --- eggs+spam'
>>> search_list = ['.', ':', '---']
>>> repl_list = '@'
>>> str_ = multi_replace(str_, search_list, repl_list)
>>> result = ('str_ = %s' % (str(str_),))
>>> print(result)
str_ = foo@ bar@ baz; spam-eggs @ eggs+spam |
def setRecord( self, record ):
"""
Sets the record that is linked with this widget.
:param record | <orb.Table>
"""
super(XBasicCardWidget, self).setRecord(record)
browser = self.browserWidget()
if ( not browser ):
return
factory = browser.factory()
if ( not factory ):
return
self._thumbnailButton.setIcon(factory.thumbnail(record))
self._titleLabel.setText(factory.thumbnailText(record)) | Sets the record that is linked with this widget.
:param record | <orb.Table> |
async def shutdown(self):
"""
This method attempts an orderly shutdown
If any exceptions are thrown, just ignore them.
:returns: No return value
"""
if self.log_output:
logging.info('Shutting down ...')
else:
print('Shutting down ...')
await self.send_reset()
try:
self.loop.stop()
except:
pass
try:
self.loop.close()
except:
pass
sys.exit(0) | This method attempts an orderly shutdown
If any exceptions are thrown, just ignore them.
:returns: No return value |
def get_pane_index(self, pane):
" Return the index of the given pane. ValueError if not found. "
assert isinstance(pane, Pane)
return self.panes.index(pane) | Return the index of the given pane. ValueError if not found. |
def _set_topology_group(self, v, load=False):
"""
Setter method for topology_group, mapped from YANG variable /topology_group (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_topology_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_topology_group() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("topology_group_id",topology_group.topology_group, yang_name="topology-group", rest_name="topology-group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='topology-group-id', extensions={u'tailf-common': {u'info': u'Configure topology vlan group for L2 protocols', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'145', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'TopologyGroupCallpoint', u'cli-mode-name': u'config-topo-group-$(topology-group-id)'}}), is_container='list', yang_name="topology-group", rest_name="topology-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure topology vlan group for L2 protocols', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'145', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'TopologyGroupCallpoint', u'cli-mode-name': u'config-topo-group-$(topology-group-id)'}}, namespace='urn:brocade.com:mgmt:brocade-topology-group', defining_module='brocade-topology-group', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """topology_group must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("topology_group_id",topology_group.topology_group, yang_name="topology-group", rest_name="topology-group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='topology-group-id', extensions={u'tailf-common': {u'info': u'Configure topology vlan group for L2 protocols', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'145', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'TopologyGroupCallpoint', u'cli-mode-name': u'config-topo-group-$(topology-group-id)'}}), is_container='list', yang_name="topology-group", rest_name="topology-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure topology vlan group for L2 protocols', u'cli-no-key-completion': None, u'cli-full-no': None, u'sort-priority': u'145', u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-no-match-completion': None, u'cli-full-command': None, u'callpoint': u'TopologyGroupCallpoint', u'cli-mode-name': u'config-topo-group-$(topology-group-id)'}}, namespace='urn:brocade.com:mgmt:brocade-topology-group', defining_module='brocade-topology-group', yang_type='list', is_config=True)""",
})
self.__topology_group = t
if hasattr(self, '_set'):
self._set() | Setter method for topology_group, mapped from YANG variable /topology_group (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_topology_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_topology_group() directly. |
def report(self, output_file=sys.stdout):
"""Report analysis outcome in human readable form."""
max_perf = self.results['max_perf']
if self._args and self._args.verbose >= 3:
print('{}'.format(pformat(self.results)), file=output_file)
if self._args and self._args.verbose >= 1:
print('{}'.format(pformat(self.results['verbose infos'])), file=output_file)
print('Bottlenecks:', file=output_file)
print(' level | a. intensity | performance | peak bandwidth | peak bandwidth kernel',
file=output_file)
print('--------+--------------+-----------------+-------------------+----------------------',
file=output_file)
print(' CPU | | {!s:>15} | |'.format(
max_perf[self._args.unit]),
file=output_file)
for b in self.results['mem bottlenecks']:
print('{level:>7} | {arithmetic intensity:>5.2} FLOP/B | {0!s:>15} |'
' {bandwidth!s:>17} | {bw kernel:<8}'.format(
b['performance'][self._args.unit], **b),
file=output_file)
print('', file=output_file)
if self.results['min performance']['FLOP/s'] > max_perf['FLOP/s']:
# CPU bound
print('CPU bound. {!s} due to CPU max. FLOP/s'.format(max_perf), file=output_file)
else:
# Cache or mem bound
print('Cache or mem bound.', file=output_file)
bottleneck = self.results['mem bottlenecks'][self.results['bottleneck level']]
print('{!s} due to {} transfer bottleneck (with bw from {} benchmark)'.format(
bottleneck['performance'][self._args.unit],
bottleneck['level'],
bottleneck['bw kernel']),
file=output_file)
print('Arithmetic Intensity: {:.2f} FLOP/B'.format(bottleneck['arithmetic intensity']),
file=output_file) | Report analysis outcome in human readable form. |
def read_asynchronously(library, session, count):
"""Reads data from device or interface asynchronously.
Corresponds to viReadAsync function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param count: Number of bytes to be read.
:return: result, jobid, return value of the library call.
:rtype: ctypes buffer, jobid, :class:`pyvisa.constants.StatusCode`
"""
buffer = create_string_buffer(count)
job_id = ViJobId()
ret = library.viReadAsync(session, buffer, count, byref(job_id))
return buffer, job_id, ret | Reads data from device or interface asynchronously.
Corresponds to viReadAsync function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param count: Number of bytes to be read.
:return: result, jobid, return value of the library call.
:rtype: ctypes buffer, jobid, :class:`pyvisa.constants.StatusCode` |
def table(columns, names, page_size=None, format_strings=None):
""" Return an html table of this data
Parameters
----------
columns : list of numpy arrays
names : list of strings
The list of columns names
page_size : {int, None}, optional
The number of items to show on each page of the table
format_strings : {lists of strings, None}, optional
The ICU format string for this column, None for no formatting. All
columns must have a format string if provided.
Returns
-------
html_table : str
A str containing the html code to display a table of this data
"""
if page_size is None:
page = 'disable'
else:
page = 'enable'
div_id = uuid.uuid4()
column_descriptions = []
for column, name in zip(columns, names):
if column.dtype.kind == 'S':
ctype = 'string'
else:
ctype = 'number'
column_descriptions.append((ctype, name))
data = []
for item in zip(*columns):
data.append(list(item))
return google_table_template.render(div_id=div_id,
page_enable=page,
column_descriptions = column_descriptions,
page_size=page_size,
data=data,
format_strings=format_strings,
) | Return an html table of this data
Parameters
----------
columns : list of numpy arrays
names : list of strings
The list of columns names
page_size : {int, None}, optional
The number of items to show on each page of the table
format_strings : {lists of strings, None}, optional
The ICU format string for this column, None for no formatting. All
columns must have a format string if provided.
Returns
-------
html_table : str
A str containing the html code to display a table of this data |
def resize_file_to(self, in_path, out_path, keep_filename=False):
""" Given a filename, resize and save the image per the specification into out_path
:param in_path: path to image file to save. Must be supported by PIL
:param out_path: path to the directory root for the outputted thumbnails to be stored
:return: None
"""
if keep_filename:
filename = path.join(out_path, path.basename(in_path))
else:
filename = path.join(out_path, self.get_thumbnail_name(in_path))
out_path = path.dirname(filename)
if not path.exists(out_path):
os.makedirs(out_path)
if not path.exists(filename):
try:
image = Image.open(in_path)
thumbnail = self.resize(image)
thumbnail.save(filename)
logger.info("Generated Thumbnail {0}".format(path.basename(filename)))
except IOError:
logger.info("Generating Thumbnail for {0} skipped".format(path.basename(filename))) | Given a filename, resize and save the image per the specification into out_path
:param in_path: path to image file to save. Must be supported by PIL
:param out_path: path to the directory root for the outputted thumbnails to be stored
:return: None |
def trusted_permission(f):
"""Access only by D1 infrastructure."""
@functools.wraps(f)
def wrapper(request, *args, **kwargs):
trusted(request)
return f(request, *args, **kwargs)
return wrapper | Access only by D1 infrastructure. |
def _py_expand_short(subsequence, sequence, max_l_dist):
"""Straightforward implementation of partial match expansion."""
# The following diagram shows the score calculation step.
#
# Each new score is the minimum of:
# * a OR a + 1 (substitution, if needed)
# * b + 1 (deletion, i.e. skipping a sequence character)
# * c + 1 (insertion, i.e. skipping a sub-sequence character)
#
# a -- +1 -> c
#
# | \ |
# | \ |
# +1 +1? +1
# | \ |
# v β v
#
# b -- +1 -> scores[subseq_index]
subseq_len = len(subsequence)
if subseq_len == 0:
return (0, 0)
# Initialize the scores array with values for just skipping sub-sequence
# chars.
scores = list(range(1, subseq_len + 1))
min_score = subseq_len
min_score_idx = -1
for seq_index, char in enumerate(sequence):
# calculate scores, one for each character in the sub-sequence
a = seq_index
c = a + 1
for subseq_index in range(subseq_len):
b = scores[subseq_index]
c = scores[subseq_index] = min(
a + (char != subsequence[subseq_index]),
b + 1,
c + 1,
)
a = b
# keep the minimum score found for matches of the entire sub-sequence
if c <= min_score:
min_score = c
min_score_idx = seq_index
# bail early when it is impossible to find a better expansion
elif min(scores) >= min_score:
break
return (min_score, min_score_idx + 1) if min_score <= max_l_dist else (None, None) | Straightforward implementation of partial match expansion. |
def get_data_frame_transform_stats(self, transform_id=None, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/get-data-frame-transform-stats.html>`_
:arg transform_id: The id of the transform for which to get stats.
'_all' or '*' implies all transforms
"""
return self.transport.perform_request(
"GET",
_make_path("_data_frame", "transforms", transform_id, "_stats"),
params=params,
) | `<https://www.elastic.co/guide/en/elasticsearch/reference/current/get-data-frame-transform-stats.html>`_
:arg transform_id: The id of the transform for which to get stats.
'_all' or '*' implies all transforms |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.