code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def queryTs(ts, expression):
"""
Find the indices of the time series entries that match the given expression.
| Example:
| D = lipd.loadLipd()
| ts = lipd.extractTs(D)
| matches = queryTs(ts, "archiveType == marine sediment")
| matches = queryTs(ts, "geo_meanElev <= 2000")
:param str expression: Expression
:param list ts: Time series
:return list _idx: Indices of entries that match the criteria
"""
# Make a copy of the ts. We're going to work directly on it.
_idx = []
# User provided a single query string
if isinstance(expressions, str):
# Use some magic to turn the given string expression into a machine-usable comparative expression.
expr_lst = translate_expression(expressions)
# Only proceed if the translation resulted in a usable expression.
if expr_lst:
# Return the new filtered time series. This will use the same time series
# that filters down each loop.
new_ts, _idx = get_matches(expr_lst, new_ts)
# User provided a list of multiple queries
elif isinstance(expressions, list):
# Loop for each query
for expr in expressions:
# Use some magic to turn the given string expression into a machine-usable comparative expression.
expr_lst = translate_expression(expr)
# Only proceed if the translation resulted in a usable expression.
if expr_lst:
# Return the new filtered time series. This will use the same time series
# that filters down each loop.
new_ts, _idx = get_matches(expr_lst, new_ts)
return _idx | Find the indices of the time series entries that match the given expression.
| Example:
| D = lipd.loadLipd()
| ts = lipd.extractTs(D)
| matches = queryTs(ts, "archiveType == marine sediment")
| matches = queryTs(ts, "geo_meanElev <= 2000")
:param str expression: Expression
:param list ts: Time series
:return list _idx: Indices of entries that match the criteria |
def now(utc=False, tz=None):
"""
Get a current DateTime object. By default is local.
.. code:: python
reusables.now()
# DateTime(2016, 12, 8, 22, 5, 2, 517000)
reusables.now().format("It's {24-hour}:{min}")
# "It's 22:05"
:param utc: bool, default False, UTC time not local
:param tz: TimeZone as specified by the datetime module
:return: reusables.DateTime
"""
return datetime.datetime.utcnow() if utc else datetime.datetime.now(tz=tz) | Get a current DateTime object. By default is local.
.. code:: python
reusables.now()
# DateTime(2016, 12, 8, 22, 5, 2, 517000)
reusables.now().format("It's {24-hour}:{min}")
# "It's 22:05"
:param utc: bool, default False, UTC time not local
:param tz: TimeZone as specified by the datetime module
:return: reusables.DateTime |
def includes(self):
"""Return all of the include directories for this chip as a list."""
incs = self.combined_properties('includes')
processed_incs = []
for prop in incs:
if isinstance(prop, str):
processed_incs.append(prop)
else:
processed_incs.append(os.path.join(*prop))
# All include paths are relative to base directory of the
fullpaths = [os.path.normpath(os.path.join('.', x)) for x in processed_incs]
fullpaths.append(os.path.normpath(os.path.abspath(self.build_dirs()['build'])))
return fullpaths | Return all of the include directories for this chip as a list. |
def backwards(self, orm):
"Write your backwards methods here."
orm['samples.CohortSample'].objects.all().delete()
orm['samples.Cohort'].objects.exclude(name=DEFAULT_COHORT_NAME).delete() | Write your backwards methods here. |
def files(self, creds, options, dry_run):
# type: (SourcePath, StorageCredentials,
# blobxfer.models.options.Download, bool) -> StorageEntity
"""Generator of Azure remote files or blobs
:param SourcePath self: this
:param StorageCredentials creds: storage creds
:param blobxfer.models.options.Download options: download options
:param bool dry_run: dry run
:rtype: StorageEntity
:return: Azure storage entity object
"""
if options.mode == blobxfer.models.azure.StorageModes.File:
for file in self._populate_from_list_files(
creds, options, dry_run):
yield file
else:
for blob in self._populate_from_list_blobs(
creds, options, dry_run):
yield blob | Generator of Azure remote files or blobs
:param SourcePath self: this
:param StorageCredentials creds: storage creds
:param blobxfer.models.options.Download options: download options
:param bool dry_run: dry run
:rtype: StorageEntity
:return: Azure storage entity object |
def serve(destination, port, config):
"""Run a simple web server."""
if os.path.exists(destination):
pass
elif os.path.exists(config):
settings = read_settings(config)
destination = settings.get('destination')
if not os.path.exists(destination):
sys.stderr.write("The '{}' directory doesn't exist, maybe try "
"building first?\n".format(destination))
sys.exit(1)
else:
sys.stderr.write("The {destination} directory doesn't exist "
"and the config file ({config}) could not be read.\n"
.format(destination=destination, config=config))
sys.exit(2)
print('DESTINATION : {}'.format(destination))
os.chdir(destination)
Handler = server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", port), Handler, False)
print(" * Running on http://127.0.0.1:{}/".format(port))
try:
httpd.allow_reuse_address = True
httpd.server_bind()
httpd.server_activate()
httpd.serve_forever()
except KeyboardInterrupt:
print('\nAll done!') | Run a simple web server. |
def copyfileobj(src, dst, length=None, exception=OSError):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
shutil.copyfileobj(src, dst)
return
# BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
# for b in range(blocks):
for _ in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise exception("unexpected end of data")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise exception("unexpected end of data")
dst.write(buf)
return | Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content. |
def set_data(self, frames):
"""
Prepare the input of model
"""
data_frames = []
for frame in frames:
#frame H x W x C
frame = frame.swapaxes(0, 1) # swap width and height to form format W x H x C
if len(frame.shape) < 3:
frame = np.array([frame]).swapaxes(0, 2).swapaxes(0, 1) # Add grayscale channel
data_frames.append(frame)
frames_n = len(data_frames)
data_frames = np.array(data_frames) # T x W x H x C
data_frames = np.rollaxis(data_frames, 3) # C x T x W x H
data_frames = data_frames.swapaxes(2, 3) # C x T x H x W = NCDHW
self.data = data_frames
self.length = frames_n | Prepare the input of model |
def _get_upload_session_status(res):
"""Parse the image upload response to obtain status.
Args:
res: http_utils.FetchResponse instance, the upload response
Returns:
dict, sessionStatus of the response
Raises:
hangups.NetworkError: If the upload request failed.
"""
response = json.loads(res.body.decode())
if 'sessionStatus' not in response:
try:
info = (
response['errorMessage']['additionalInfo']
['uploader_service.GoogleRupioAdditionalInfo']
['completionInfo']['customerSpecificInfo']
)
reason = '{} : {}'.format(info['status'], info['message'])
except KeyError:
reason = 'unknown reason'
raise exceptions.NetworkError('image upload failed: {}'.format(
reason
))
return response['sessionStatus'] | Parse the image upload response to obtain status.
Args:
res: http_utils.FetchResponse instance, the upload response
Returns:
dict, sessionStatus of the response
Raises:
hangups.NetworkError: If the upload request failed. |
def _fire_bundle_event(self, kind):
# type: (int) -> None
"""
Fires a bundle event of the given kind
:param kind: Kind of event
"""
self.__framework._dispatcher.fire_bundle_event(BundleEvent(kind, self)) | Fires a bundle event of the given kind
:param kind: Kind of event |
def get_object_cat1(con, token, cat, kwargs):
"""
Constructs the "GET" URL. The functions is used by the get_object method
First Category of "GET" URL construction. Again calling it first category because more
complex functions maybe added later.
"""
req_str = "/"+kwargs['id']+"?" #/id?
req_str += "access_token="+token #/id?@acces_token=......
del kwargs['id']
key = settings.get_object_cat1_param[cat] #get the param name for the category(single, multiple)
req_str += "&"+key+"=" #/id?@acces_token=......key=
if key in kwargs.keys():
length = len( kwargs[key] )
for i in range(length):
if i == 0:
req_str += kwargs[key][i]
else:
req_str += ","+kwargs[key][i]
else:
return "Parameter Error"
res = wiring.send_request("GET", con, req_str, '')
return res | Constructs the "GET" URL. The functions is used by the get_object method
First Category of "GET" URL construction. Again calling it first category because more
complex functions maybe added later. |
def organize_objects(self):
"""Organize objects and namespaces"""
def _render_children(obj):
for child in obj.children_strings:
child_object = self.objects.get(child)
if child_object:
obj.item_map[child_object.plural].append(child_object)
obj.children.append(child_object)
for key in obj.item_map:
obj.item_map[key].sort()
def _recurse_ns(obj):
if not obj:
return
namespace = obj.top_namespace
if namespace is not None:
ns_obj = self.top_namespaces.get(namespace)
if ns_obj is None or not isinstance(ns_obj, DotNetNamespace):
for ns_obj in self.create_class(
{"uid": namespace, "type": "namespace"}
):
self.top_namespaces[ns_obj.id] = ns_obj
if obj not in ns_obj.children and namespace != obj.id:
ns_obj.children.append(obj)
for obj in self.objects.values():
_render_children(obj)
_recurse_ns(obj)
# Clean out dead namespaces
for key, ns in self.top_namespaces.copy().items():
if not ns.children:
del self.top_namespaces[key]
for key, ns in self.namespaces.items():
if not ns.children:
del self.namespaces[key] | Organize objects and namespaces |
def bounding_box(self):
"""
An axis aligned bounding box for the current mesh.
Returns
----------
aabb : trimesh.primitives.Box
Box object with transform and extents defined
representing the axis aligned bounding box of the mesh
"""
from . import primitives
transform = np.eye(4)
# translate to center of axis aligned bounds
transform[:3, 3] = self.bounds.mean(axis=0)
aabb = primitives.Box(transform=transform,
extents=self.extents,
mutable=False)
return aabb | An axis aligned bounding box for the current mesh.
Returns
----------
aabb : trimesh.primitives.Box
Box object with transform and extents defined
representing the axis aligned bounding box of the mesh |
def priority_enqueue(self,
function,
name=None,
force_start=False,
times=1,
data=None):
"""
Like :class:`enqueue()`, but adds the given function at the top of the
queue.
If force_start is True, the function is immediately started even when
the maximum number of concurrent threads is already reached.
:type function: callable
:param function: The function that is executed.
:type name: str
:param name: Stored in Job.name.
:type force_start: bool
:param force_start: Whether to start execution immediately.
:type times: int
:param times: The maximum number of attempts.
:type data: object
:param data: Optional data to store in Job.data.
:rtype: int
:return: The id of the new job.
"""
self._check_if_ready()
return self.main_loop.priority_enqueue(function,
name,
force_start,
times,
data) | Like :class:`enqueue()`, but adds the given function at the top of the
queue.
If force_start is True, the function is immediately started even when
the maximum number of concurrent threads is already reached.
:type function: callable
:param function: The function that is executed.
:type name: str
:param name: Stored in Job.name.
:type force_start: bool
:param force_start: Whether to start execution immediately.
:type times: int
:param times: The maximum number of attempts.
:type data: object
:param data: Optional data to store in Job.data.
:rtype: int
:return: The id of the new job. |
def organisations(self):
'''The organisations of this composition.'''
class Org:
def __init__(self, sdo_id, org_id, members, obj):
self.sdo_id = sdo_id
self.org_id = org_id
self.members = members
self.obj = obj
with self._mutex:
if not self._orgs:
for org in self._obj.get_owned_organizations():
owner = org.get_owner()
if owner:
sdo_id = owner._narrow(SDOPackage.SDO).get_sdo_id()
else:
sdo_id = ''
org_id = org.get_organization_id()
members = [m.get_sdo_id() for m in org.get_members()]
self._orgs.append(Org(sdo_id, org_id, members, org))
return self._orgs | The organisations of this composition. |
def create_filters(model, filter_info, resource):
"""Apply filters from filters information to base query
:param DeclarativeMeta model: the model of the node
:param dict filter_info: current node filter information
:param Resource resource: the resource
"""
filters = []
for filter_ in filter_info:
filters.append(Node(model, filter_, resource, resource.schema).resolve())
return filters | Apply filters from filters information to base query
:param DeclarativeMeta model: the model of the node
:param dict filter_info: current node filter information
:param Resource resource: the resource |
def Plus(self, other):
"""
Returns a new point which is the pointwise sum of self and other.
"""
return Point(self.x + other.x,
self.y + other.y,
self.z + other.z) | Returns a new point which is the pointwise sum of self and other. |
def simulated_binary_crossover(random, mom, dad, args):
"""Return the offspring of simulated binary crossover on the candidates.
This function performs simulated binary crossover (SBX), following the
implementation in NSGA-II
`(Deb et al., ICANNGA 1999) <http://vision.ucsd.edu/~sagarwal/icannga.pdf>`_.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *sbx_distribution_index* -- the non-negative distribution index
(default 10)
A small value of the `sbx_distribution_index` optional argument allows
solutions far away from parents to be created as child solutions,
while a large value restricts only near-parent solutions to be created as
child solutions.
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
if random.random() < crossover_rate:
di = args.setdefault('sbx_distribution_index', 10)
bounder = args['_ec'].bounder
bro = copy.copy(dad)
sis = copy.copy(mom)
for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):
try:
if m > d:
m, d = d, m
beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)
alpha = 2.0 - 1.0 / beta**(di + 1.0)
u = random.random()
if u <= (1.0 / alpha):
beta_q = (u * alpha)**(1.0 / float(di + 1.0))
else:
beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))
bro_val = 0.5 * ((m + d) - beta_q * (d - m))
bro_val = max(min(bro_val, ub), lb)
sis_val = 0.5 * ((m + d) + beta_q * (d - m))
sis_val = max(min(sis_val, ub), lb)
if random.random() > 0.5:
bro_val, sis_val = sis_val, bro_val
bro[i] = bro_val
sis[i] = sis_val
except ZeroDivisionError:
# The offspring already have legitimate values for every element,
# so no need to take any special action here.
pass
return [bro, sis]
else:
return [mom, dad] | Return the offspring of simulated binary crossover on the candidates.
This function performs simulated binary crossover (SBX), following the
implementation in NSGA-II
`(Deb et al., ICANNGA 1999) <http://vision.ucsd.edu/~sagarwal/icannga.pdf>`_.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *sbx_distribution_index* -- the non-negative distribution index
(default 10)
A small value of the `sbx_distribution_index` optional argument allows
solutions far away from parents to be created as child solutions,
while a large value restricts only near-parent solutions to be created as
child solutions. |
def extract_error_message(cls, e):
"""Extract error message for queries"""
message = str(e)
try:
if isinstance(e.args, tuple) and len(e.args) > 1:
message = e.args[1]
except Exception:
pass
return message | Extract error message for queries |
def decode_fetch_response(cls, response):
"""
Decode FetchResponse struct to FetchResponsePayloads
Arguments:
response: FetchResponse
"""
return [
kafka.structs.FetchResponsePayload(
topic, partition, error, highwater_offset, [
offset_and_msg
for offset_and_msg in cls.decode_message_set(messages)])
for topic, partitions in response.topics
for partition, error, highwater_offset, messages in partitions
] | Decode FetchResponse struct to FetchResponsePayloads
Arguments:
response: FetchResponse |
def head_coaches_by_game(self, year):
"""Returns head coach data by game.
:year: An int representing the season in question.
:returns: An array with an entry per game of the season that the team
played (including playoffs). Each entry is the head coach's ID for that
game in the season.
"""
coach_str = self._year_info_pq(year, 'Coach').text()
regex = r'(\S+?) \((\d+)-(\d+)-(\d+)\)'
coachAndTenure = []
m = True
while m:
m = re.search(regex, coach_str)
coachID, wins, losses, ties = m.groups()
nextIndex = m.end(4) + 1
coachStr = coachStr[nextIndex:]
tenure = int(wins) + int(losses) + int(ties)
coachAndTenure.append((coachID, tenure))
coachIDs = [
cID for cID, games in coachAndTenure for _ in range(games)
]
return np.array(coachIDs[::-1]) | Returns head coach data by game.
:year: An int representing the season in question.
:returns: An array with an entry per game of the season that the team
played (including playoffs). Each entry is the head coach's ID for that
game in the season. |
def _collect_memory_descriptors(program: Program) -> Dict[str, ParameterSpec]:
"""Collect Declare instructions that are important for building the patch table.
This is secretly stored on BinaryExecutableResponse. We're careful to make sure
these objects are json serializable.
:return: A dictionary of variable names to specs about the declared region.
"""
return {
instr.name: ParameterSpec(type=instr.memory_type, length=instr.memory_size)
for instr in program if isinstance(instr, Declare)
} | Collect Declare instructions that are important for building the patch table.
This is secretly stored on BinaryExecutableResponse. We're careful to make sure
these objects are json serializable.
:return: A dictionary of variable names to specs about the declared region. |
def get_authorizations_for_agent_and_function(self, agent_id, function_id):
"""Gets a list of ``Authorizations`` associated with a given agent.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned. In plenary mode, the
returned list contains all known authorizations or an error
results. Otherwise, the returned list may contain only those
authorizations that are accessible through this session.
arg: agent_id (osid.id.Id): an agent ``Id``
arg: function_id (osid.id.Id): a function ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``agent_id`` or ``function_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
result = collection.find(
dict({'agentId': str(agent_id),
'functionId': str(function_id)},
**self._view_filter())).sort('_sort_id', ASCENDING)
return objects.AuthorizationList(result, runtime=self._runtime) | Gets a list of ``Authorizations`` associated with a given agent.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned. In plenary mode, the
returned list contains all known authorizations or an error
results. Otherwise, the returned list may contain only those
authorizations that are accessible through this session.
arg: agent_id (osid.id.Id): an agent ``Id``
arg: function_id (osid.id.Id): a function ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``agent_id`` or ``function_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def set_moving_image(self, image):
"""
Set Moving ANTsImage for metric
"""
if not isinstance(image, iio.ANTsImage):
raise ValueError('image must be ANTsImage type')
if image.dimension != self.dimension:
raise ValueError('image dim (%i) does not match metric dim (%i)' % (image.dimension, self.dimension))
self._metric.setMovingImage(image.pointer, False)
self.moving_image = image | Set Moving ANTsImage for metric |
def get_z_variable(nc):
'''
Returns the name of the variable that defines the Z axis or height/depth
:param netCDF4.Dataset nc: netCDF dataset
'''
z_variables = get_z_variables(nc)
if not z_variables:
return None
# Priority is standard_name, units
for var in z_variables:
ncvar = nc.variables[var]
if getattr(ncvar, 'standard_name', None) in ('depth', 'height', 'altitude'):
return var
for var in z_variables:
ncvar = nc.variables[var]
units = getattr(ncvar, 'units', None)
if isinstance(units, basestring):
if units_convertible(units, 'bar'):
return var
if units_convertible(units, 'm'):
return var
return z_variables[0] | Returns the name of the variable that defines the Z axis or height/depth
:param netCDF4.Dataset nc: netCDF dataset |
def call(self, cmd, **kwargs):
"""A simple subprocess wrapper"""
if isinstance(cmd, basestring):
cmd = cmd.split()
self.log.info('Running %s', cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
out, err = p.communicate()
if out:
self.log.info(out)
if err:
if p.returncode == 0:
self.log.info(err)
else:
self.log.error(err)
if p.returncode != 0:
self.log.error('returncode = %d' % p.returncode)
raise Exception
return out, err, p.returncode | A simple subprocess wrapper |
def mark_all_as_read(self, recipient=None):
"""Mark as read any unread messages in the current queryset.
Optionally, filter these by recipient first.
"""
# We want to filter out read ones, as later we will store
# the time they were marked as read.
qset = self.unread(True)
if recipient:
qset = qset.filter(recipient=recipient)
return qset.update(unread=False) | Mark as read any unread messages in the current queryset.
Optionally, filter these by recipient first. |
def map(self, width, height):
"""
Creates and returns a new randomly generated map
"""
template = ti.load(os.path.join(script_dir, 'assets', 'template.tmx'))['map0']
#template.set_view(0, 0, template.px_width, template.px_height)
template.set_view(0, 0, width*template.tw, height*template.th)
# TODO: Save the generated map.
#epoch = int(time.time())
#filename = 'map_' + str(epoch) + '.tmx'
# Draw borders
border_x = template.cells[width]
for y in xrange(0,height+1):
border_x[y].tile = template.cells[0][0].tile
for x in xrange(0,width):
template.cells[x][height].tile = template.cells[0][0].tile
# Start within borders
#self.recursive_division(template.cells, 3, (template.px_width/template.tw)-1, (template.px_height/template.th)-1, 0, 0)
self.recursive_division(template.cells, 3, width, height, 0, 0)
return template | Creates and returns a new randomly generated map |
def get_brandings(self):
"""
Get all account brandings
@return List of brandings
"""
connection = Connection(self.token)
connection.set_url(self.production, self.BRANDINGS_URL)
return connection.get_request() | Get all account brandings
@return List of brandings |
def get_closest(self, sma):
"""
Return the `~photutils.isophote.Isophote` instance that has the
closest semimajor axis length to the input semimajor axis.
Parameters
----------
sma : float
The semimajor axis length.
Returns
-------
isophote : `~photutils.isophote.Isophote` instance
The isophote with the closest semimajor axis value.
"""
index = (np.abs(self.sma - sma)).argmin()
return self._list[index] | Return the `~photutils.isophote.Isophote` instance that has the
closest semimajor axis length to the input semimajor axis.
Parameters
----------
sma : float
The semimajor axis length.
Returns
-------
isophote : `~photutils.isophote.Isophote` instance
The isophote with the closest semimajor axis value. |
def contains(self, k):
"""Return True if key `k` exists"""
if self._changed():
self._read()
return k in self.store.keys() | Return True if key `k` exists |
def push(self, x):
"""
Push an I{object} onto the stack.
@param x: An object to push.
@type x: L{Frame}
@return: The pushed frame.
@rtype: L{Frame}
"""
if isinstance(x, Frame):
frame = x
else:
frame = Frame(x)
self.stack.append(frame)
log.debug('push: (%s)\n%s', Repr(frame), Repr(self.stack))
return frame | Push an I{object} onto the stack.
@param x: An object to push.
@type x: L{Frame}
@return: The pushed frame.
@rtype: L{Frame} |
def get_types(self):
""" Returns the unordered list of data types
:return: list of data types
"""
types = [str, int, int]
if self.strandPos is not None:
types.append(str)
if self.otherPos:
for o in self.otherPos:
types.append(o[2])
return types | Returns the unordered list of data types
:return: list of data types |
def sensor(self, sensor_type):
"""Update and return sensor value."""
_LOGGER.debug("Reading %s sensor.", sensor_type)
return self._session.read_sensor(self.device_id, sensor_type) | Update and return sensor value. |
def root_rhx_gis(self) -> Optional[str]:
"""rhx_gis string returned in the / query."""
if self.is_logged_in:
# At the moment, rhx_gis seems to be required for anonymous requests only. By returning None when logged
# in, we can save the root_rhx_gis lookup query.
return None
if not self._root_rhx_gis:
self._root_rhx_gis = self.get_json('', {})['rhx_gis']
return self._root_rhx_gis | rhx_gis string returned in the / query. |
def blkid(device=None, token=None):
'''
Return block device attributes: UUID, LABEL, etc. This function only works
on systems where blkid is available.
device
Device name from the system
token
Any valid token used for the search
CLI Example:
.. code-block:: bash
salt '*' disk.blkid
salt '*' disk.blkid /dev/sda
salt '*' disk.blkid token='UUID=6a38ee5-7235-44e7-8b22-816a403bad5d'
salt '*' disk.blkid token='TYPE=ext4'
'''
cmd = ['blkid']
if device:
cmd.append(device)
elif token:
cmd.extend(['-t', token])
ret = {}
blkid_result = __salt__['cmd.run_all'](cmd, python_shell=False)
if blkid_result['retcode'] > 0:
return ret
for line in blkid_result['stdout'].splitlines():
if not line:
continue
comps = line.split()
device = comps[0][:-1]
info = {}
device_attributes = re.split(('\"*\"'), line.partition(' ')[2])
for key, value in zip(*[iter(device_attributes)]*2):
key = key.strip('=').strip(' ')
info[key] = value.strip('"')
ret[device] = info
return ret | Return block device attributes: UUID, LABEL, etc. This function only works
on systems where blkid is available.
device
Device name from the system
token
Any valid token used for the search
CLI Example:
.. code-block:: bash
salt '*' disk.blkid
salt '*' disk.blkid /dev/sda
salt '*' disk.blkid token='UUID=6a38ee5-7235-44e7-8b22-816a403bad5d'
salt '*' disk.blkid token='TYPE=ext4' |
def _parse_outgoing_mail(sender, to, msgstring):
"""
Parse an outgoing mail and put it into the OUTBOX.
Arguments:
- `sender`: str
- `to`: str
- `msgstring`: str
Return: None
Exceptions: None
"""
global OUTBOX
OUTBOX.append(email.message_from_string(msgstring))
return | Parse an outgoing mail and put it into the OUTBOX.
Arguments:
- `sender`: str
- `to`: str
- `msgstring`: str
Return: None
Exceptions: None |
def validate_path(xj_path):
"""Validates XJ path.
:param str xj_path: XJ Path
:raise: XJPathError if validation fails.
"""
if not isinstance(xj_path, str):
raise XJPathError('XJPath must be a string')
for path in split(xj_path, '.'):
if path == '*':
continue
if path.startswith('@'):
if path == '@first' or path == '@last':
continue
try:
int(path[1:])
except ValueError:
raise XJPathError('Array index must be either integer or '
'@first or @last') | Validates XJ path.
:param str xj_path: XJ Path
:raise: XJPathError if validation fails. |
def set_sp_template_updated(self, vlan_id, sp_template, device_id):
"""Sets update_on_ucs flag to True."""
entry = self.get_sp_template_vlan_entry(vlan_id,
sp_template,
device_id)
if entry:
entry.updated_on_ucs = True
self.session.merge(entry)
return entry
else:
return False | Sets update_on_ucs flag to True. |
def on_for_degrees(self, steering, speed, degrees, brake=True, block=True):
"""
Rotate the motors according to the provided ``steering``.
The distance each motor will travel follows the rules of :meth:`MoveTank.on_for_degrees`.
"""
(left_speed, right_speed) = self.get_speed_steering(steering, speed)
MoveTank.on_for_degrees(self, SpeedNativeUnits(left_speed), SpeedNativeUnits(right_speed), degrees, brake, block) | Rotate the motors according to the provided ``steering``.
The distance each motor will travel follows the rules of :meth:`MoveTank.on_for_degrees`. |
def extract_domain(host):
"""
Domain name extractor. Turns host names into domain names, ported
from pwdhash javascript code"""
host = re.sub('https?://', '', host)
host = re.match('([^/]+)', host).groups()[0]
domain = '.'.join(host.split('.')[-2:])
if domain in _domains:
domain = '.'.join(host.split('.')[-3:])
return domain | Domain name extractor. Turns host names into domain names, ported
from pwdhash javascript code |
def insert(self, table, value, ignore=False, commit=True):
"""
Insert a dict into db.
:type table: string
:type value: dict
:type ignore: bool
:type commit: bool
:return: int. The row id of the insert.
"""
value_q, _args = self._value_parser(value, columnname=False)
_sql = ''.join(['INSERT', ' IGNORE' if ignore else '', ' INTO ', self._backtick(table),
' (', self._backtick_columns(value), ') VALUES (', value_q, ');'])
if self.debug:
return self.cur.mogrify(_sql, _args)
self.cur.execute(_sql, _args)
if commit:
self.conn.commit()
return self.cur.lastrowid | Insert a dict into db.
:type table: string
:type value: dict
:type ignore: bool
:type commit: bool
:return: int. The row id of the insert. |
def statistical_distances(samples1, samples2, earth_mover_dist=True,
energy_dist=True):
"""Compute measures of the statistical distance between samples.
Parameters
----------
samples1: 1d array
samples2: 1d array
earth_mover_dist: bool, optional
Whether or not to compute the Earth mover's distance between the
samples.
energy_dist: bool, optional
Whether or not to compute the energy distance between the samples.
Returns
-------
1d array
"""
out = []
temp = scipy.stats.ks_2samp(samples1, samples2)
out.append(temp.pvalue)
out.append(temp.statistic)
if earth_mover_dist:
out.append(scipy.stats.wasserstein_distance(samples1, samples2))
if energy_dist:
out.append(scipy.stats.energy_distance(samples1, samples2))
return np.asarray(out) | Compute measures of the statistical distance between samples.
Parameters
----------
samples1: 1d array
samples2: 1d array
earth_mover_dist: bool, optional
Whether or not to compute the Earth mover's distance between the
samples.
energy_dist: bool, optional
Whether or not to compute the energy distance between the samples.
Returns
-------
1d array |
def get_answers(self, assessment_section_id, item_id):
"""Gets the acceptable answers to the associated item.
arg: assessment_section_id (osid.id.Id): ``Id`` of the
``AssessmentSection``
arg: item_id (osid.id.Id): ``Id`` of the ``Item``
return: (osid.assessment.AnswerList) - the answers
raise: IllegalState - ``is_answer_available()`` is ``false``
raise: NotFound - ``assessment_section_id or item_id is not
found, or item_id not part of assessment_section_id``
raise: NullArgument - ``assessment_section_id or item_id is
null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
if self.is_answer_available(assessment_section_id, item_id):
return self.get_assessment_section(assessment_section_id).get_answers(question_id=item_id)
raise errors.IllegalState() | Gets the acceptable answers to the associated item.
arg: assessment_section_id (osid.id.Id): ``Id`` of the
``AssessmentSection``
arg: item_id (osid.id.Id): ``Id`` of the ``Item``
return: (osid.assessment.AnswerList) - the answers
raise: IllegalState - ``is_answer_available()`` is ``false``
raise: NotFound - ``assessment_section_id or item_id is not
found, or item_id not part of assessment_section_id``
raise: NullArgument - ``assessment_section_id or item_id is
null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def load(cls, filename, project=None, delim=' | '):
r"""
Read in pore and throat data from a saved VTK file.
Parameters
----------
filename : string (optional)
The name of the file containing the data to import. The formatting
of this file is outlined below.
project : OpenPNM Project object
A GenericNetwork is created and added to the specified Project.
If no Project is supplied then one will be created and returned.
"""
net = {}
filename = cls._parse_filename(filename, ext='vtp')
tree = ET.parse(filename)
piece_node = tree.find('PolyData').find('Piece')
# Extract connectivity
conn_element = piece_node.find('Lines').find('DataArray')
conns = VTK._element_to_array(conn_element, 2)
# Extract coordinates
coord_element = piece_node.find('Points').find('DataArray')
coords = VTK._element_to_array(coord_element, 3)
# Extract pore data
for item in piece_node.find('PointData').iter('DataArray'):
key = item.get('Name')
array = VTK._element_to_array(item)
net[key] = array
# Extract throat data
for item in piece_node.find('CellData').iter('DataArray'):
key = item.get('Name')
array = VTK._element_to_array(item)
net[key] = array
if project is None:
project = ws.new_project()
project = Dict.from_dict(dct=net, project=project, delim=delim)
# Clean up data values, if necessary, like convert array's of
# 1's and 0's into boolean.
project = cls._convert_data(project)
# Add coords and conns to network
network = project.network
network.update({'throat.conns': conns})
network.update({'pore.coords': coords})
return project | r"""
Read in pore and throat data from a saved VTK file.
Parameters
----------
filename : string (optional)
The name of the file containing the data to import. The formatting
of this file is outlined below.
project : OpenPNM Project object
A GenericNetwork is created and added to the specified Project.
If no Project is supplied then one will be created and returned. |
def set_params(self, data):
""" resPQ#05162463 nonce:int128 server_nonce:int128 pq:string server_public_key_fingerprints:Vector long = ResPQ """
bytes_io = BytesIO(data)
assert struct.unpack('<I', bytes_io.read(4))[0] == resPQ.constructor
self.nonce = bytes_io.read(16)
self.server_nonce = bytes_io.read(16)
self.pq = deserialize_string(bytes_io)
assert struct.unpack('<I', bytes_io.read(4))[0] == 0x1cb5c415 # long vector
count = struct.unpack('<l', bytes_io.read(4))[0]
for _ in range(count):
self.server_public_key_fingerprints.append(struct.unpack('<q', bytes_io.read(8))[0]) | resPQ#05162463 nonce:int128 server_nonce:int128 pq:string server_public_key_fingerprints:Vector long = ResPQ |
def raw_pressure_encode(self, time_usec, press_abs, press_diff1, press_diff2, temperature):
'''
The RAW pressure readings for the typical setup of one absolute
pressure and one differential pressure sensor. The
sensor values should be the raw, UNSCALED ADC values.
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
press_abs : Absolute pressure (raw) (int16_t)
press_diff1 : Differential pressure 1 (raw, 0 if nonexistant) (int16_t)
press_diff2 : Differential pressure 2 (raw, 0 if nonexistant) (int16_t)
temperature : Raw Temperature measurement (raw) (int16_t)
'''
return MAVLink_raw_pressure_message(time_usec, press_abs, press_diff1, press_diff2, temperature) | The RAW pressure readings for the typical setup of one absolute
pressure and one differential pressure sensor. The
sensor values should be the raw, UNSCALED ADC values.
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
press_abs : Absolute pressure (raw) (int16_t)
press_diff1 : Differential pressure 1 (raw, 0 if nonexistant) (int16_t)
press_diff2 : Differential pressure 2 (raw, 0 if nonexistant) (int16_t)
temperature : Raw Temperature measurement (raw) (int16_t) |
def __convert_json_to_projects_map(self, json):
""" Convert JSON format to the projects map format
map[ds][repository] = project
If a repository is in several projects assign to leaf
Check that all JSON data is in the database
:param json: data with the projects to repositories mapping
:returns: the repositories to projects mapping per data source
"""
ds_repo_to_prj = {}
for project in json:
for ds in json[project]:
if ds == "meta":
continue # not a real data source
if ds not in ds_repo_to_prj:
if ds not in ds_repo_to_prj:
ds_repo_to_prj[ds] = {}
for repo in json[project][ds]:
if repo in ds_repo_to_prj[ds]:
if project == ds_repo_to_prj[ds][repo]:
logger.debug("Duplicated repo: %s %s %s", ds, repo, project)
else:
if len(project.split(".")) > len(ds_repo_to_prj[ds][repo].split(".")):
logger.debug("Changed repo project because we found a leaf: %s leaf vs %s (%s, %s)",
project, ds_repo_to_prj[ds][repo], repo, ds)
ds_repo_to_prj[ds][repo] = project
else:
ds_repo_to_prj[ds][repo] = project
return ds_repo_to_prj | Convert JSON format to the projects map format
map[ds][repository] = project
If a repository is in several projects assign to leaf
Check that all JSON data is in the database
:param json: data with the projects to repositories mapping
:returns: the repositories to projects mapping per data source |
def initialize_state(self):
""" Call this to initialize the state of the UI after everything has been connected. """
if self.__hardware_source:
self.__profile_changed_event_listener = self.__hardware_source.profile_changed_event.listen(self.__update_profile_index)
self.__frame_parameters_changed_event_listener = self.__hardware_source.frame_parameters_changed_event.listen(self.__update_frame_parameters)
self.__data_item_states_changed_event_listener = self.__hardware_source.data_item_states_changed_event.listen(self.__data_item_states_changed)
self.__acquisition_state_changed_event_listener = self.__hardware_source.acquisition_state_changed_event.listen(self.__acquisition_state_changed)
self.__log_messages_event_listener = self.__hardware_source.log_messages_event.listen(self.__log_messages)
if self.on_display_name_changed:
self.on_display_name_changed(self.display_name)
if self.on_binning_values_changed:
self.on_binning_values_changed(self.__hardware_source.binning_values)
if self.on_monitor_button_state_changed:
has_monitor = self.__hardware_source and self.__hardware_source.features.get("has_monitor", False)
self.on_monitor_button_state_changed(has_monitor, has_monitor)
self.__update_buttons()
if self.on_profiles_changed:
profile_items = self.__hardware_source.modes
self.on_profiles_changed(profile_items)
self.__update_profile_index(self.__hardware_source.selected_profile_index)
if self.on_data_item_states_changed:
self.on_data_item_states_changed(list()) | Call this to initialize the state of the UI after everything has been connected. |
def parse_v3_signing_block(self):
"""
Parse the V2 signing block and extract all features
"""
self._v3_signing_data = []
# calling is_signed_v3 should also load the signature, if any
if not self.is_signed_v3():
return
block_bytes = self._v2_blocks[self._APK_SIG_KEY_V3_SIGNATURE]
block = io.BytesIO(block_bytes)
view = block.getvalue()
# V3 signature Block data format:
#
# * signer:
# * signed data:
# * digests:
# * signature algorithm ID (uint32)
# * digest (length-prefixed)
# * certificates
# * minSDK
# * maxSDK
# * additional attributes
# * minSDK
# * maxSDK
# * signatures
# * publickey
size_sequence = self.read_uint32_le(block)
if size_sequence + 4 != len(block_bytes):
raise BrokenAPKError("size of sequence and blocksize does not match")
while block.tell() < len(block_bytes):
off_signer = block.tell()
size_signer = self.read_uint32_le(block)
# read whole signed data, since we might to parse
# content within the signed data, and mess up offset
len_signed_data = self.read_uint32_le(block)
signed_data_bytes = block.read(len_signed_data)
signed_data = io.BytesIO(signed_data_bytes)
# Digests
len_digests = self.read_uint32_le(signed_data)
raw_digests = signed_data.read(len_digests)
digests = self.parse_signatures_or_digests(raw_digests)
# Certs
certs = []
len_certs = self.read_uint32_le(signed_data)
start_certs = signed_data.tell()
while signed_data.tell() < start_certs + len_certs:
len_cert = self.read_uint32_le(signed_data)
cert = signed_data.read(len_cert)
certs.append(cert)
# versions
signed_data_min_sdk = self.read_uint32_le(signed_data)
signed_data_max_sdk = self.read_uint32_le(signed_data)
# Addional attributes
len_attr = self.read_uint32_le(signed_data)
attr = signed_data.read(len_attr)
signed_data_object = APKV3SignedData()
signed_data_object._bytes = signed_data_bytes
signed_data_object.digests = digests
signed_data_object.certificates = certs
signed_data_object.additional_attributes = attr
signed_data_object.minSDK = signed_data_min_sdk
signed_data_object.maxSDK = signed_data_max_sdk
# versions (should be the same as signed data's versions)
signer_min_sdk = self.read_uint32_le(block)
signer_max_sdk = self.read_uint32_le(block)
# Signatures
len_sigs = self.read_uint32_le(block)
raw_sigs = block.read(len_sigs)
sigs = self.parse_signatures_or_digests(raw_sigs)
# PublicKey
len_publickey = self.read_uint32_le(block)
publickey = block.read(len_publickey)
signer = APKV3Signer()
signer._bytes = view[off_signer:off_signer+size_signer]
signer.signed_data = signed_data_object
signer.signatures = sigs
signer.public_key = publickey
signer.minSDK = signer_min_sdk
signer.maxSDK = signer_max_sdk
self._v3_signing_data.append(signer) | Parse the V2 signing block and extract all features |
def start(self):
"""
Starts running the timer. If the timer is currently running, then
this method will do nothing.
:sa stop, reset
"""
if self._timer.isActive():
return
self._starttime = datetime.datetime.now()
self._timer.start() | Starts running the timer. If the timer is currently running, then
this method will do nothing.
:sa stop, reset |
def _get_voltage_angle_var(self, refs, buses):
""" Returns the voltage angle variable set.
"""
Va = array([b.v_angle * (pi / 180.0) for b in buses])
Vau = Inf * ones(len(buses))
Val = -Vau
Vau[refs] = Va[refs]
Val[refs] = Va[refs]
return Variable("Va", len(buses), Va, Val, Vau) | Returns the voltage angle variable set. |
def add_snmp(data, interfaces):
"""
Format data for adding SNMP to an engine.
:param list data: list of interfaces as provided by kw
:param list interfaces: interfaces to enable SNMP by id
"""
snmp_interface = []
if interfaces: # Not providing interfaces will enable SNMP on all NDIs
interfaces = map(str, interfaces)
for interface in data:
interface_id = str(interface.get('interface_id'))
for if_def in interface.get('interfaces', []):
_interface_id = None
if 'vlan_id' in if_def:
_interface_id = '{}.{}'.format(
interface_id, if_def['vlan_id'])
else:
_interface_id = interface_id
if _interface_id in interfaces and 'type' not in interface:
for node in if_def.get('nodes', []):
snmp_interface.append(
{'address': node.get('address'),
'nicid': _interface_id})
return snmp_interface | Format data for adding SNMP to an engine.
:param list data: list of interfaces as provided by kw
:param list interfaces: interfaces to enable SNMP by id |
def to_netcdf(ds, *args, **kwargs):
"""
Store the given dataset as a netCDF file
This functions works essentially the same as the usual
:meth:`xarray.Dataset.to_netcdf` method but can also encode absolute time
units
Parameters
----------
ds: xarray.Dataset
The dataset to store
%(xarray.Dataset.to_netcdf.parameters)s
"""
to_update = {}
for v, obj in six.iteritems(ds.variables):
units = obj.attrs.get('units', obj.encoding.get('units', None))
if units == 'day as %Y%m%d.%f' and np.issubdtype(
obj.dtype, np.datetime64):
to_update[v] = xr.Variable(
obj.dims, AbsoluteTimeEncoder(obj), attrs=obj.attrs.copy(),
encoding=obj.encoding)
to_update[v].attrs['units'] = units
if to_update:
ds = ds.copy()
ds.update(to_update)
return xarray_api.to_netcdf(ds, *args, **kwargs) | Store the given dataset as a netCDF file
This functions works essentially the same as the usual
:meth:`xarray.Dataset.to_netcdf` method but can also encode absolute time
units
Parameters
----------
ds: xarray.Dataset
The dataset to store
%(xarray.Dataset.to_netcdf.parameters)s |
def bdh(self, tickers, flds, start_date, end_date, elms=None,
ovrds=None, longdata=False):
"""
Get tickers and fields, return pandas DataFrame with columns as
MultiIndex with levels "ticker" and "field" and indexed by "date".
If long data is requested return DataFrame with columns
["date", "ticker", "field", "value"].
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
start_date: string
String in format YYYYmmdd
end_date: string
String in format YYYYmmdd
elms: list of tuples
List of tuples where each tuple corresponds to the other elements
to be set, e.g. [("periodicityAdjustment", "ACTUAL")].
Refer to the HistoricalDataRequest section in the
'Services & schemas reference guide' for more info on these values
ovrds: list of tuples
List of tuples where each tuple corresponds to the override
field and value
longdata: boolean
Whether data should be returned in long data format or pivoted
"""
ovrds = [] if not ovrds else ovrds
elms = [] if not elms else elms
elms = list(elms)
data = self._bdh_list(tickers, flds, start_date, end_date,
elms, ovrds)
df = pd.DataFrame(data, columns=['date', 'ticker', 'field', 'value'])
df.loc[:, 'date'] = pd.to_datetime(df.loc[:, 'date'])
if not longdata:
cols = ['ticker', 'field']
df = df.set_index(['date'] + cols).unstack(cols)
df.columns = df.columns.droplevel(0)
return df | Get tickers and fields, return pandas DataFrame with columns as
MultiIndex with levels "ticker" and "field" and indexed by "date".
If long data is requested return DataFrame with columns
["date", "ticker", "field", "value"].
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
start_date: string
String in format YYYYmmdd
end_date: string
String in format YYYYmmdd
elms: list of tuples
List of tuples where each tuple corresponds to the other elements
to be set, e.g. [("periodicityAdjustment", "ACTUAL")].
Refer to the HistoricalDataRequest section in the
'Services & schemas reference guide' for more info on these values
ovrds: list of tuples
List of tuples where each tuple corresponds to the override
field and value
longdata: boolean
Whether data should be returned in long data format or pivoted |
def _get_secrets_to_compare(old_baseline, new_baseline):
"""
:rtype: list(tuple)
:param: tuple is in the following format:
filename: str; filename where identified secret is found
secret: dict; PotentialSecret json representation
is_secret_removed: bool; has the secret been removed from the
new baseline?
"""
def _check_string(a, b):
if a == b:
return 0
if a < b:
return -1
return 1
def _check_secret(a, b):
if a == b:
return 0
if a['line_number'] < b['line_number']:
return -1
elif a['line_number'] > b['line_number']:
return 1
return _check_string(a['hashed_secret'], b['hashed_secret'])
secrets_to_compare = []
for old_filename, new_filename in _comparison_generator(
sorted(old_baseline['results'].keys()),
sorted(new_baseline['results'].keys()),
compare_fn=_check_string,
):
if not new_filename:
secrets_to_compare += list(
map(
lambda x: (old_filename, x, True,),
old_baseline['results'][old_filename],
),
)
continue
elif not old_filename:
secrets_to_compare += list(
map(
lambda x: (new_filename, x, False,),
new_baseline['results'][new_filename],
),
)
continue
for old_secret, new_secret in _comparison_generator(
old_baseline['results'][old_filename],
new_baseline['results'][new_filename],
compare_fn=_check_secret,
):
if old_secret == new_secret:
# If they are the same, no point flagging it.
continue
if old_secret:
secrets_to_compare.append(
(old_filename, old_secret, True,),
)
else:
secrets_to_compare.append(
(new_filename, new_secret, False,),
)
return secrets_to_compare | :rtype: list(tuple)
:param: tuple is in the following format:
filename: str; filename where identified secret is found
secret: dict; PotentialSecret json representation
is_secret_removed: bool; has the secret been removed from the
new baseline? |
def gather_metadata(fn_glob, parser):
"""Given a glob and a parser object, create a metadata dataframe.
Parameters
----------
fn_glob : str
Glob string to find trajectory files.
parser : descendant of _Parser
Object that handles conversion of filenames to metadata rows.
"""
meta = pd.DataFrame(parser.parse_fn(fn) for fn in glob.iglob(fn_glob))
return meta.set_index(parser.index).sort_index() | Given a glob and a parser object, create a metadata dataframe.
Parameters
----------
fn_glob : str
Glob string to find trajectory files.
parser : descendant of _Parser
Object that handles conversion of filenames to metadata rows. |
def clear_surroundings(self):
"""
clears the cells immediately around the grid of the agent
(just to make it find to see on the screen)
"""
cells_to_clear = self.grd.eight_neighbors(self.current_y, self.current_x)
for cell in cells_to_clear:
self.grd.set_tile(cell[0], cell[1], ' ') | clears the cells immediately around the grid of the agent
(just to make it find to see on the screen) |
def _is_zero(x):
""" Returns True if x is numerically 0 or an array with 0's. """
if x is None:
return True
if isinstance(x, numbers.Number):
return x == 0.0
if isinstance(x, np.ndarray):
return np.all(x == 0)
return False | Returns True if x is numerically 0 or an array with 0's. |
def params(self):
""" Return a *copy* (we hope) of the parameters.
DANGER: Altering properties directly doesn't call model._cache
"""
params = odict([])
for key,model in self.models.items():
params.update(model.params)
return params | Return a *copy* (we hope) of the parameters.
DANGER: Altering properties directly doesn't call model._cache |
def format_row(self, row):
"""
The render method expects rows as lists, here we switch our row format
from dict to list respecting the order of the headers
"""
res = []
headers = getattr(self, 'headers', [])
for column in headers:
column_name = column['name']
value = row.get(column_name, '')
if hasattr(self, "format_%s" % column_name):
value = getattr(self, "format_%s" % column_name)(value)
res.append(value)
return res | The render method expects rows as lists, here we switch our row format
from dict to list respecting the order of the headers |
def _load_next(self):
"""Load the next days data (or file) without incrementing the date.
Repeated calls will not advance date/file and will produce the same data
Uses info stored in object to either increment the date,
or the file. Looks for self._load_by_date flag.
"""
if self._load_by_date:
next_date = self.date + pds.DateOffset(days=1)
return self._load_data(date=next_date)
else:
return self._load_data(fid=self._fid+1) | Load the next days data (or file) without incrementing the date.
Repeated calls will not advance date/file and will produce the same data
Uses info stored in object to either increment the date,
or the file. Looks for self._load_by_date flag. |
def pbkdf1(hash_algorithm, password, salt, iterations, key_length):
"""
An implementation of PBKDF1 - should only be used for interop with legacy
systems, not new architectures
:param hash_algorithm:
The string name of the hash algorithm to use: "md2", "md5", "sha1"
:param password:
A byte string of the password to use an input to the KDF
:param salt:
A cryptographic random byte string
:param iterations:
The numbers of iterations to use when deriving the key
:param key_length:
The length of the desired key in bytes
:return:
The derived key as a byte string
"""
if not isinstance(password, byte_cls):
raise TypeError(pretty_message(
'''
password must be a byte string, not %s
''',
(type_name(password))
))
if not isinstance(salt, byte_cls):
raise TypeError(pretty_message(
'''
salt must be a byte string, not %s
''',
(type_name(salt))
))
if not isinstance(iterations, int_types):
raise TypeError(pretty_message(
'''
iterations must be an integer, not %s
''',
(type_name(iterations))
))
if iterations < 1:
raise ValueError(pretty_message(
'''
iterations must be greater than 0 - is %s
''',
repr(iterations)
))
if not isinstance(key_length, int_types):
raise TypeError(pretty_message(
'''
key_length must be an integer, not %s
''',
(type_name(key_length))
))
if key_length < 1:
raise ValueError(pretty_message(
'''
key_length must be greater than 0 - is %s
''',
repr(key_length)
))
if hash_algorithm not in set(['md2', 'md5', 'sha1']):
raise ValueError(pretty_message(
'''
hash_algorithm must be one of "md2", "md5", "sha1", not %s
''',
repr(hash_algorithm)
))
if key_length > 16 and hash_algorithm in set(['md2', 'md5']):
raise ValueError(pretty_message(
'''
key_length can not be longer than 16 for %s - is %s
''',
(hash_algorithm, repr(key_length))
))
if key_length > 20 and hash_algorithm == 'sha1':
raise ValueError(pretty_message(
'''
key_length can not be longer than 20 for sha1 - is %s
''',
repr(key_length)
))
algo = getattr(hashlib, hash_algorithm)
output = algo(password + salt).digest()
for _ in range(2, iterations + 1):
output = algo(output).digest()
return output[:key_length] | An implementation of PBKDF1 - should only be used for interop with legacy
systems, not new architectures
:param hash_algorithm:
The string name of the hash algorithm to use: "md2", "md5", "sha1"
:param password:
A byte string of the password to use an input to the KDF
:param salt:
A cryptographic random byte string
:param iterations:
The numbers of iterations to use when deriving the key
:param key_length:
The length of the desired key in bytes
:return:
The derived key as a byte string |
def insert(self, path, simfile):
"""
Insert a file into the filesystem. Returns whether the operation was successful.
"""
if self.state is not None:
simfile.set_state(self.state)
mountpoint, chunks = self.get_mountpoint(path)
if mountpoint is None:
self._files[self._join_chunks(chunks)] = simfile
return True
else:
return mountpoint.insert(chunks, simfile) | Insert a file into the filesystem. Returns whether the operation was successful. |
def ntp_authentication_key_encryption_type_sha1_type_sha1(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ntp = ET.SubElement(config, "ntp", xmlns="urn:brocade.com:mgmt:brocade-ntp")
authentication_key = ET.SubElement(ntp, "authentication-key")
keyid_key = ET.SubElement(authentication_key, "keyid")
keyid_key.text = kwargs.pop('keyid')
encryption_type = ET.SubElement(authentication_key, "encryption-type")
sha1_type = ET.SubElement(encryption_type, "sha1-type")
sha1 = ET.SubElement(sha1_type, "sha1")
sha1.text = kwargs.pop('sha1')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def setContentLen(self, content, len):
"""Replace the content of a node. NOTE: @content is supposed
to be a piece of XML CDATA, so it allows entity references,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant() resp. xmlEncodeSpecialChars(). """
libxml2mod.xmlNodeSetContentLen(self._o, content, len) | Replace the content of a node. NOTE: @content is supposed
to be a piece of XML CDATA, so it allows entity references,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant() resp. xmlEncodeSpecialChars(). |
def GetCBVs(campaign, model='nPLD', clobber=False, **kwargs):
'''
Computes the CBVs for a given campaign.
:param int campaign: The campaign number
:param str model: The name of the :py:obj:`everest` model. Default `nPLD`
:param bool clobber: Overwrite existing files? Default `False`
'''
# Initialize logging?
if len(logging.getLogger().handlers) == 0:
InitLog(file_name=None, screen_level=logging.DEBUG)
log.info('Computing CBVs for campaign %d...' % (campaign))
# Output path
path = os.path.join(EVEREST_DAT, 'k2', 'cbv', 'c%02d' % campaign)
if not os.path.exists(path):
os.makedirs(path)
# Get the design matrix
xfile = os.path.join(path, 'X.npz')
if clobber or not os.path.exists(xfile):
log.info('Obtaining light curves...')
time = None
for module in range(2, 25):
# Get the light curves
lcfile = os.path.join(path, '%d.npz' % module)
if clobber or not os.path.exists(lcfile):
try:
time, breakpoints, fluxes, errors, kpars = GetStars(
campaign, module, model=model, **kwargs)
except AssertionError:
continue
np.savez(lcfile, time=time, breakpoints=breakpoints,
fluxes=fluxes, errors=errors, kpars=kpars)
# Load the light curves
lcs = np.load(lcfile)
if time is None:
time = lcs['time']
breakpoints = lcs['breakpoints']
fluxes = lcs['fluxes']
errors = lcs['errors']
kpars = lcs['kpars']
else:
fluxes = np.vstack([fluxes, lcs['fluxes']])
errors = np.vstack([errors, lcs['errors']])
kpars = np.vstack([kpars, lcs['kpars']])
# Compute the design matrix
log.info('Running SysRem...')
X = np.ones((len(time), 1 + kwargs.get('ncbv', 5)))
# Loop over the segments
new_fluxes = np.zeros_like(fluxes)
for b in range(len(breakpoints)):
# Get the current segment's indices
inds = GetChunk(time, breakpoints, b)
# Update the error arrays with the white GP component
for j in range(len(errors)):
errors[j] = np.sqrt(errors[j] ** 2 + kpars[j][0] ** 2)
# Get de-trended fluxes
X[inds, 1:] = SysRem(time[inds], fluxes[:, inds],
errors[:, inds], **kwargs).T
# Save
np.savez(xfile, X=X, time=time, breakpoints=breakpoints)
else:
# Load from disk
data = np.load(xfile)
X = data['X'][()]
time = data['time'][()]
breakpoints = data['breakpoints'][()]
# Plot
plotfile = os.path.join(path, 'X.pdf')
if clobber or not os.path.exists(plotfile):
fig, ax = pl.subplots(2, 3, figsize=(12, 8))
fig.subplots_adjust(left=0.05, right=0.95)
ax = ax.flatten()
for axis in ax:
axis.set_xticks([])
axis.set_yticks([])
for b in range(len(breakpoints)):
inds = GetChunk(time, breakpoints, b)
for n in range(min(6, X.shape[1])):
ax[n].plot(time[inds], X[inds, n])
ax[n].set_title(n, fontsize=14)
fig.savefig(plotfile, bbox_inches='tight')
return X | Computes the CBVs for a given campaign.
:param int campaign: The campaign number
:param str model: The name of the :py:obj:`everest` model. Default `nPLD`
:param bool clobber: Overwrite existing files? Default `False` |
def search(cls, element, pattern):
"""
Helper method that returns a list of elements that match the
given path pattern of form {type}.{group}.{label}.
The input may be a Layout, an Overlay type or a single
Element.
"""
if isinstance(element, Layout):
return [el for cell in element for el in cls.search(cell, pattern)]
if isinstance(element, (NdOverlay, Overlay)):
return [el for el in element if el.matches(pattern)]
elif isinstance(element, Element):
return [element] if element.matches(pattern) else [] | Helper method that returns a list of elements that match the
given path pattern of form {type}.{group}.{label}.
The input may be a Layout, an Overlay type or a single
Element. |
def to_python(self, value):
"""
Convert the input JSON value into python structures, raises
django.core.exceptions.ValidationError if the data can't be converted.
"""
if isinstance(value, dict):
return value
if self.blank and not value:
return None
if isinstance(value, string_types):
try:
return json.loads(value)
except Exception as e:
raise ValidationError(str(e))
return value | Convert the input JSON value into python structures, raises
django.core.exceptions.ValidationError if the data can't be converted. |
def visit_wavedrom(self, node):
"""
Visit the wavedrom node
"""
format = determine_format(self.builder.supported_image_types)
if format is None:
raise SphinxError(__("Cannot determine a suitable output format"))
# Create random filename
bname = "wavedrom-{}".format(uuid4())
outpath = path.join(self.builder.outdir, self.builder.imagedir)
# Render the wavedrom image
imgname = render_wavedrom(self, node, outpath, bname, format)
# Now we unpack the image node again. The file was created at the build destination,
# and we can now use the standard visitor for the image node. We add the image node
# as a child and then raise a SkipDepature, which will trigger the builder to visit
# children.
image_node = node['image_node']
image_node['uri'] = os.path.join(self.builder.imgpath, imgname)
node.append(image_node)
raise nodes.SkipDeparture | Visit the wavedrom node |
def _gotitem(self,
key: Union[str, List[str]],
ndim: int,
subset: Optional[Union[Series, ABCDataFrame]] = None,
) -> Union[Series, ABCDataFrame]:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key] | Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on |
def reduce_by_device(parallelism, data, reduce_fn):
"""Reduces data per device.
This can be useful, for example, if we want to all-reduce n tensors on k<n
devices (like during eval when we have only one device). We call
reduce_by_device() to first sum the tensors per device, then call our usual
all-reduce operation to create one sum per device, followed by
expand_by_device, to create the appropriate number of pointers to these
results. See all_reduce_ring() below for an example of how this is used.
Args:
parallelism: a expert_utils.Parallelism object
data: a list of Tensors with length parallelism.n
reduce_fn: a function taking a list of Tensors. e.g. tf.add_n
Returns:
device_parallelism: a Parallelism object with each device listed only once.
reduced_data: A list of Tensors, one per device.
"""
unique_devices = []
device_to_data = {}
for dev, datum in zip(parallelism.devices, data):
if dev not in device_to_data:
unique_devices.append(dev)
device_to_data[dev] = [datum]
else:
device_to_data[dev].append(datum)
device_parallelism = Parallelism(unique_devices)
grouped_data = [device_to_data[dev] for dev in unique_devices]
return device_parallelism, device_parallelism(reduce_fn, grouped_data) | Reduces data per device.
This can be useful, for example, if we want to all-reduce n tensors on k<n
devices (like during eval when we have only one device). We call
reduce_by_device() to first sum the tensors per device, then call our usual
all-reduce operation to create one sum per device, followed by
expand_by_device, to create the appropriate number of pointers to these
results. See all_reduce_ring() below for an example of how this is used.
Args:
parallelism: a expert_utils.Parallelism object
data: a list of Tensors with length parallelism.n
reduce_fn: a function taking a list of Tensors. e.g. tf.add_n
Returns:
device_parallelism: a Parallelism object with each device listed only once.
reduced_data: A list of Tensors, one per device. |
def connect(self):
"""
This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
self._logger.info('Connecting to %s' % self._url)
return adapters.TornadoConnection(pika.URLParameters(self._url),
self.on_connection_open,
custom_ioloop=self._ioloop_instance) | This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection |
def cached_property(method):
"""
:param method: a method without arguments except self
:returns: a cached property
"""
name = method.__name__
def newmethod(self):
try:
val = self.__dict__[name]
except KeyError:
val = method(self)
self.__dict__[name] = val
return val
newmethod.__name__ = method.__name__
newmethod.__doc__ = method.__doc__
return property(newmethod) | :param method: a method without arguments except self
:returns: a cached property |
def get_deployment_by_slot(self, service_name, deployment_slot):
'''
Returns configuration information, status, and system properties for
a deployment.
service_name:
Name of the hosted service.
deployment_slot:
The environment to which the hosted service is deployed. Valid
values are: staging, production
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_slot', deployment_slot)
return self._perform_get(
self._get_deployment_path_using_slot(
service_name, deployment_slot),
Deployment) | Returns configuration information, status, and system properties for
a deployment.
service_name:
Name of the hosted service.
deployment_slot:
The environment to which the hosted service is deployed. Valid
values are: staging, production |
def get_value(self, field, quick):
# type: (Field, bool) -> Any
""" Ask user the question represented by this instance.
Args:
field (Field):
The field we're asking the user to provide the value for.
quick (bool):
Enable quick mode. In quick mode, the form will reduce the
number of question asked by using defaults wherever possible.
This can greatly reduce the number of interactions required on
the user part, but will obviously limit the user choices. This
should probably be enabled only by a specific user action
(like passing a ``--quick`` flag etc.).
Returns:
The user response converted to a python type using the
:py:attr:`cliform.core.Field.type` converter.
"""
if callable(field.default):
default = field.default(self)
else:
default = field.default
if quick and default is not None:
return default
shell.cprint('<90>{}', field.help)
while True:
try:
answer = click.prompt(field.pretty_prompt, default=default)
return field.type(answer)
except ValueError:
shell.cprint("<31>Unsupported value") | Ask user the question represented by this instance.
Args:
field (Field):
The field we're asking the user to provide the value for.
quick (bool):
Enable quick mode. In quick mode, the form will reduce the
number of question asked by using defaults wherever possible.
This can greatly reduce the number of interactions required on
the user part, but will obviously limit the user choices. This
should probably be enabled only by a specific user action
(like passing a ``--quick`` flag etc.).
Returns:
The user response converted to a python type using the
:py:attr:`cliform.core.Field.type` converter. |
def get_fetch_request(self, method, fetch_url, *args, **kwargs):
"""This is handy if you want to modify the request right before passing it
to requests, or you want to do something extra special customized
:param method: string, the http method (eg, GET, POST)
:param fetch_url: string, the full url with query params
:param *args: any other positional arguments
:param **kwargs: any keyword arguments to pass to requests
:returns: a requests.Response compatible object instance
"""
return requests.request(method, fetch_url, *args, **kwargs) | This is handy if you want to modify the request right before passing it
to requests, or you want to do something extra special customized
:param method: string, the http method (eg, GET, POST)
:param fetch_url: string, the full url with query params
:param *args: any other positional arguments
:param **kwargs: any keyword arguments to pass to requests
:returns: a requests.Response compatible object instance |
def component_title(component):
"""
Label, title and caption
Title is the label text plus the title text
Title may contain italic tag, etc.
"""
title = u''
label_text = u''
title_text = u''
if component.get('label'):
label_text = component.get('label')
if component.get('title'):
title_text = component.get('title')
title = unicode_value(label_text)
if label_text != '' and title_text != '':
title += ' '
title += unicode_value(title_text)
if component.get('type') == 'abstract' and title == '':
title = 'Abstract'
return title | Label, title and caption
Title is the label text plus the title text
Title may contain italic tag, etc. |
def background_thread(timeout_fn, timeout_event, handle_exit_code, is_alive,
quit):
""" handles the timeout logic """
# if there's a timeout event, loop
if timeout_event:
while not quit.is_set():
timed_out = event_wait(timeout_event, 0.1)
if timed_out:
timeout_fn()
break
# handle_exit_code will be a function ONLY if our command was NOT waited on
# as part of its spawning. in other words, it's probably a background
# command
#
# this reports the exit code exception in our thread. it's purely for the
# user's awareness, and cannot be caught or used in any way, so it's ok to
# suppress this during the tests
if handle_exit_code and not RUNNING_TESTS: # pragma: no cover
alive = True
while alive:
quit.wait(1)
alive, exit_code = is_alive()
handle_exit_code(exit_code) | handles the timeout logic |
def check_write_permissions(file):
"""
Check if we can write to the given file
Otherwise since we might detach the process to run in the background
we might never find out that writing failed and get an ugly
exit message on startup. For example:
ERROR: Child exited immediately with non-zero exit code 127
So we catch this error upfront and print a nicer error message
with a hint on how to fix it.
"""
try:
open(file, 'a')
except IOError:
print("Can't open file {}. "
"Please grant write permissions or change the path in your config".format(file))
sys.exit(1) | Check if we can write to the given file
Otherwise since we might detach the process to run in the background
we might never find out that writing failed and get an ugly
exit message on startup. For example:
ERROR: Child exited immediately with non-zero exit code 127
So we catch this error upfront and print a nicer error message
with a hint on how to fix it. |
def workspace_backup_list(ctx):
"""
List backups
"""
backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup))
for b in backup_manager.list():
print(b) | List backups |
def connect(self, pattern, presenter, **kwargs):
""" Shortcut for self.route_map().connect() method. It is possible to pass presenter class instead of
its name - in that case such class will be saved in presenter collection and it will be available in
route matching.
:param pattern: same as pattern in :meth:`.WWebRouteMap.connect` method
:param presenter: presenter name or presenter class
:param kwargs: same as kwargs in :meth:`.WWebRouteMap.connect` method
:return: None
"""
if isinstance(presenter, type) and issubclass(presenter, WWebPresenter) is True:
self.presenter_collection().add(presenter)
presenter = presenter.__presenter_name__()
self.__route_map.connect(pattern, presenter, **kwargs) | Shortcut for self.route_map().connect() method. It is possible to pass presenter class instead of
its name - in that case such class will be saved in presenter collection and it will be available in
route matching.
:param pattern: same as pattern in :meth:`.WWebRouteMap.connect` method
:param presenter: presenter name or presenter class
:param kwargs: same as kwargs in :meth:`.WWebRouteMap.connect` method
:return: None |
def getquery(query):
'Performs a query and get the results.'
try:
conn = connection.cursor()
conn.execute(query)
data = conn.fetchall()
conn.close()
except: data = list()
return data | Performs a query and get the results. |
def record(self):
# type: () -> bytes
'''
A method to generate the string representing this UDF Partition Volume
Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Partition Volume Descriptor.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Partition Volume Descriptor not initialized')
rec = struct.pack(self.FMT, b'\x00' * 16,
self.vol_desc_seqnum, self.part_flags,
self.part_num, self.part_contents.record(),
self.part_contents_use.record(), self.access_type,
self.part_start_location, self.part_length,
self.impl_ident.record(), self.implementation_use,
b'\x00' * 156)[16:]
return self.desc_tag.record(rec) + rec | A method to generate the string representing this UDF Partition Volume
Descriptor.
Parameters:
None.
Returns:
A string representing this UDF Partition Volume Descriptor. |
def getElementsByType(self, type):
"""
retrieves all Elements that are of type type
@type type: class
@param type: type of the element
"""
foundElements=[]
for element in self.getAllElementsOfHirarchy():
if isinstance(element, type):
foundElements.append(element)
return foundElements | retrieves all Elements that are of type type
@type type: class
@param type: type of the element |
def up(self):
"""
Move this object up one position.
"""
self.swap(self.get_ordering_queryset().filter(order__lt=self.order).order_by('-order')) | Move this object up one position. |
def read_cyc(this, fn, conv=1.0):
""" Read the lattice information from a cyc.dat file (i.e., tblmd input file)
"""
f = paropen(fn, "r")
f.readline()
f.readline()
f.readline()
f.readline()
cell = np.array( [ [ 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0 ] ] )
l = f.readline()
s = map(float, l.split())
cell[0, 0] = s[0]*conv
cell[1, 0] = s[1]*conv
cell[2, 0] = s[2]*conv
l = f.readline()
s = map(float, l.split())
cell[0, 1] = s[0]*conv
cell[1, 1] = s[1]*conv
cell[2, 1] = s[2]*conv
l = f.readline()
s = map(float, l.split())
cell[0, 2] = s[0]*conv
cell[1, 2] = s[1]*conv
cell[2, 2] = s[2]*conv
this.set_cell(cell)
this.set_pbc(True)
f.close() | Read the lattice information from a cyc.dat file (i.e., tblmd input file) |
def send_update(url_id, dataset):
"""
Send request to Seeder's API with data changed by user.
Args:
url_id (str): ID used as identification in Seeder.
dataset (dict): WA-KAT dataset sent from frontend.
"""
data = _convert_to_seeder_format(dataset)
if not data:
return
try:
_send_request(url_id, json=data, req_type=requests.patch)
except Exception as e:
sys.stderr.write("Seeder PATCH error: ") # TODO: better!
sys.stderr.write(str(e.message))
return None | Send request to Seeder's API with data changed by user.
Args:
url_id (str): ID used as identification in Seeder.
dataset (dict): WA-KAT dataset sent from frontend. |
def decode_from_sha(sha):
"""convert coerced sha back into numeric list"""
if isinstance(sha, str):
sha = sha.encode('utf-8')
return codecs.decode(re.sub(rb'(00)*$', b'', sha), "hex_codec") | convert coerced sha back into numeric list |
def highlightNextMatch(self):
"""
Select and highlight the next match in the set of matches.
"""
# If this method was called on an empty input field (ie.
# if the user hit <ctrl>+s again) then pick the default
# selection.
if self.qteText.toPlainText() == '':
self.qteText.setText(self.defaultChoice)
return
# If the mathIdx variable is out of bounds (eg. the last possible
# match is already selected) then wrap it around.
if self.selMatchIdx < 0:
self.selMatchIdx = 0
return
if self.selMatchIdx >= len(self.matchList):
self.selMatchIdx = 0
return
# Shorthand.
SCI = self.qteWidget
# Undo the highlighting of the previously selected match.
start, stop = self.matchList[self.selMatchIdx - 1]
line, col = SCI.lineIndexFromPosition(start)
SCI.SendScintilla(SCI.SCI_STARTSTYLING, start, 0xFF)
SCI.SendScintilla(SCI.SCI_SETSTYLING, stop - start, 30)
# Highlight the next match.
start, stop = self.matchList[self.selMatchIdx]
SCI.SendScintilla(SCI.SCI_STARTSTYLING, start, 0xFF)
SCI.SendScintilla(SCI.SCI_SETSTYLING, stop - start, 31)
# Place the cursor at the start of the currently selected match.
line, col = SCI.lineIndexFromPosition(start)
SCI.setCursorPosition(line, col)
self.selMatchIdx += 1 | Select and highlight the next match in the set of matches. |
def verify(self, signed):
'''
Recover the message (digest) from the signature using the public key
:param str signed: The signature created with the private key
:rtype: str
:return: The message (digest) recovered from the signature, or an empty
string if the decryption failed
'''
# Allocate a buffer large enough for the signature. Freed by ctypes.
buf = create_string_buffer(libcrypto.RSA_size(self._rsa))
signed = salt.utils.stringutils.to_bytes(signed)
size = libcrypto.RSA_public_decrypt(len(signed), signed, buf, self._rsa, RSA_X931_PADDING)
if size < 0:
raise ValueError('Unable to decrypt message')
return buf[0:size] | Recover the message (digest) from the signature using the public key
:param str signed: The signature created with the private key
:rtype: str
:return: The message (digest) recovered from the signature, or an empty
string if the decryption failed |
def _WriteFileChunk(self, chunk):
"""Yields binary chunks, respecting archive file headers and footers.
Args:
chunk: the StreamedFileChunk to be written
"""
if chunk.chunk_index == 0:
# Make sure size of the original file is passed. It's required
# when output_writer is StreamingTarWriter.
st = os.stat_result((0o644, 0, 0, 0, 0, 0, chunk.total_size, 0, 0, 0))
target_path = _ClientPathToString(chunk.client_path, prefix=self.prefix)
yield self.archive_generator.WriteFileHeader(target_path, st=st)
yield self.archive_generator.WriteFileChunk(chunk.data)
if chunk.chunk_index == chunk.total_chunks - 1:
yield self.archive_generator.WriteFileFooter()
self.archived_files.add(chunk.client_path) | Yields binary chunks, respecting archive file headers and footers.
Args:
chunk: the StreamedFileChunk to be written |
def __write_srgb(self, outfile):
"""
Write colour reference information: gamma, iccp etc.
This method should be called only from ``write_idat`` method
or chunk order will be ruined.
"""
if self.rendering_intent is not None and self.icc_profile is not None:
raise FormatError("sRGB(via rendering_intent) and iCCP could not"
"be present simultaneously")
# http://www.w3.org/TR/PNG/#11sRGB
if self.rendering_intent is not None:
write_chunk(outfile, 'sRGB',
struct.pack("B", int(self.rendering_intent)))
# http://www.w3.org/TR/PNG/#11cHRM
if (self.white_point is not None and self.rgb_points is None) or\
(self.white_point is None and self.rgb_points is not None):
logging.warn("White and RGB points should be both specified to"
" write cHRM chunk")
self.white_point = None
self.rgb_points = None
if (self.white_point is not None and self.rgb_points is not None):
data = (self.white_point[0], self.white_point[1],
self.rgb_points[0][0], self.rgb_points[0][1],
self.rgb_points[1][0], self.rgb_points[1][1],
self.rgb_points[2][0], self.rgb_points[2][1],
)
write_chunk(outfile, 'cHRM',
struct.pack("!8L",
*[int(round(it * 1e5)) for it in data]))
# http://www.w3.org/TR/PNG/#11gAMA
if self.gamma is not None:
write_chunk(outfile, 'gAMA',
struct.pack("!L", int(round(self.gamma * 1e5))))
# http://www.w3.org/TR/PNG/#11iCCP
if self.icc_profile is not None:
if self.compression is None or self.compression == -1:
comp_level = 6 # zlib.Z_DEFAULT_COMPRESSION
else:
comp_level = self.compression
write_chunk(outfile, 'iCCP',
self.icc_profile[0] + zerobyte +
zerobyte +
zlib.compress(self.icc_profile[1], comp_level)) | Write colour reference information: gamma, iccp etc.
This method should be called only from ``write_idat`` method
or chunk order will be ruined. |
def read(*paths):
"""Build a file path from *paths* and return the contents."""
filename = os.path.join(*paths)
with codecs.open(filename, mode='r', encoding='utf-8') as handle:
return handle.read() | Build a file path from *paths* and return the contents. |
def get_cognitive_process_id(self):
"""Gets the grade ``Id`` associated with the cognitive process.
return: (osid.id.Id) - the grade ``Id``
raise: IllegalState - ``has_cognitive_process()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_id_template
if not bool(self._my_map['cognitiveProcessId']):
raise errors.IllegalState('this Objective has no cognitive_process')
else:
return Id(self._my_map['cognitiveProcessId']) | Gets the grade ``Id`` associated with the cognitive process.
return: (osid.id.Id) - the grade ``Id``
raise: IllegalState - ``has_cognitive_process()`` is ``false``
*compliance: mandatory -- This method must be implemented.* |
def get_instance(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets the details of a specific Redis instance.
Example:
>>> from google.cloud import redis_v1beta1
>>>
>>> client = redis_v1beta1.CloudRedisClient()
>>>
>>> name = client.instance_path('[PROJECT]', '[LOCATION]', '[INSTANCE]')
>>>
>>> response = client.get_instance(name)
Args:
name (str): Required. Redis instance resource name using the form:
``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
where ``location_id`` refers to a GCP region
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.redis_v1beta1.types.Instance` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_instance" not in self._inner_api_calls:
self._inner_api_calls[
"get_instance"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_instance,
default_retry=self._method_configs["GetInstance"].retry,
default_timeout=self._method_configs["GetInstance"].timeout,
client_info=self._client_info,
)
request = cloud_redis_pb2.GetInstanceRequest(name=name)
return self._inner_api_calls["get_instance"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Gets the details of a specific Redis instance.
Example:
>>> from google.cloud import redis_v1beta1
>>>
>>> client = redis_v1beta1.CloudRedisClient()
>>>
>>> name = client.instance_path('[PROJECT]', '[LOCATION]', '[INSTANCE]')
>>>
>>> response = client.get_instance(name)
Args:
name (str): Required. Redis instance resource name using the form:
``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
where ``location_id`` refers to a GCP region
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.redis_v1beta1.types.Instance` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
def __sweeten(self, dumper: 'Dumper', class_: Type, node: Node) -> None:
"""Applies the user's yatiml_sweeten() function(s), if any.
Sweetening is done for the base classes first, then for the \
derived classes, down the hierarchy to the class we're \
constructing.
Args:
dumper: The dumper that is dumping this object.
class_: The type of the object to be dumped.
represented_object: The object to be dumped.
"""
for base_class in class_.__bases__:
if base_class in dumper.yaml_representers:
logger.debug('Sweetening for class {}'.format(
self.class_.__name__))
self.__sweeten(dumper, base_class, node)
if hasattr(class_, 'yatiml_sweeten'):
class_.yatiml_sweeten(node) | Applies the user's yatiml_sweeten() function(s), if any.
Sweetening is done for the base classes first, then for the \
derived classes, down the hierarchy to the class we're \
constructing.
Args:
dumper: The dumper that is dumping this object.
class_: The type of the object to be dumped.
represented_object: The object to be dumped. |
def copyFuncVersionedLib(dest, source, env):
"""Install a versioned library into a destination by copying,
(including copying permission/mode bits) and then creating
required symlinks."""
if os.path.isdir(source):
raise SCons.Errors.UserError("cannot install directory `%s' as a version library" % str(source) )
else:
# remove the link if it is already there
try:
os.remove(dest)
except:
pass
shutil.copy2(source, dest)
st = os.stat(source)
os.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
installShlibLinks(dest, source, env)
return 0 | Install a versioned library into a destination by copying,
(including copying permission/mode bits) and then creating
required symlinks. |
def wait_for_connection(self, timeout=10):
"""
Busy loop until connection is established.
Will abort after timeout (seconds). Return value is a boolean, whether
connection could be established.
"""
start_time = datetime.datetime.now()
while True:
if self.connected:
return True
now = datetime.datetime.now()
if (now - start_time).total_seconds() > timeout:
return False
time.sleep(0.5) | Busy loop until connection is established.
Will abort after timeout (seconds). Return value is a boolean, whether
connection could be established. |
def plot_predict(self, h=5, past_values=20, intervals=True, **kwargs):
""" Plots forecasts with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : boolean
Would you like to show prediction intervals for the forecast?
Returns
----------
- Plot of the forecast
"""
figsize = kwargs.get('figsize',(10,7))
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
import matplotlib.pyplot as plt
import seaborn as sns
# Retrieve data, dates and (transformed) latent variables
mu, Y = self._model(self.latent_variables.get_z_values())
date_index = self.shift_dates(h)
t_z = self.transform_z()
# Get mean prediction and simulations (for errors)
mean_values = self._mean_prediction(mu, Y, h, t_z)
if intervals is True:
sim_values = self._sim_prediction(mu, Y, h, t_z, 15000)
else:
sim_values = self._sim_prediction(mu, Y, h, t_z, 2)
error_bars, forecasted_values, plot_values, plot_index = self._summarize_simulations(mean_values, sim_values, date_index, h, past_values)
plt.figure(figsize=figsize)
if intervals is True:
alpha =[0.15*i/float(100) for i in range(50,12,-2)]
for count, pre in enumerate(error_bars):
plt.fill_between(date_index[-h-1:], forecasted_values-pre, forecasted_values+pre,alpha=alpha[count])
plt.plot(plot_index,plot_values)
plt.title("Forecast for " + self.data_name)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show() | Plots forecasts with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : boolean
Would you like to show prediction intervals for the forecast?
Returns
----------
- Plot of the forecast |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.