code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def create(appname, **kwargs):
"""Create a `Link` of a particular class, using the kwargs as options"""
if appname in LinkFactory._class_dict:
return LinkFactory._class_dict[appname].create(**kwargs)
else:
raise KeyError(
"Could not create object associated to app %s" % appname) | Create a `Link` of a particular class, using the kwargs as options |
def stop(self) -> None:
"""Stops listening for new connections.
Requests currently in progress may still continue after the
server is stopped.
"""
if self._stopped:
return
self._stopped = True
for fd, sock in self._sockets.items():
assert sock.fileno() == fd
# Unregister socket from IOLoop
self._handlers.pop(fd)()
sock.close() | Stops listening for new connections.
Requests currently in progress may still continue after the
server is stopped. |
def print_difftext(text, other=None):
"""
Args:
text (str):
CommandLine:
#python -m utool.util_print --test-print_difftext
#autopep8 ingest_data.py --diff | python -m utool.util_print --test-print_difftext
"""
if other is not None:
# hack
text = util_str.difftext(text, other)
colortext = util_str.color_diff_text(text)
try:
print(colortext)
except UnicodeEncodeError as ex: # NOQA
import unicodedata
colortext = unicodedata.normalize('NFKD', colortext).encode('ascii', 'ignore')
print(colortext) | Args:
text (str):
CommandLine:
#python -m utool.util_print --test-print_difftext
#autopep8 ingest_data.py --diff | python -m utool.util_print --test-print_difftext |
def class_box(self, cn: ClassDefinitionName) -> str:
""" Generate a box for the class. Populate its interior only if (a) it hasn't previously been generated and
(b) it appears in the gen_classes list
@param cn:
@param inherited:
@return:
"""
slot_defs: List[str] = []
if cn not in self.box_generated and (not self.focus_classes or cn in self.focus_classes):
cls = self.schema.classes[cn]
for slotname in self.filtered_cls_slots(cn, all_slots=True):
slot = self.schema.slots[slotname]
if not slot.range or slot.range in builtin_names or slot.range in self.schema.types:
mod = self.prop_modifier(cls, slot)
slot_defs.append(underscore(self.aliased_slot_name(slot)) +
mod + ':' +
underscore(slot.range) + self.cardinality(slot))
self.box_generated.add(cn)
self.referenced.add(cn)
return '[' + camelcase(cn) + ('|' + ';'.join(slot_defs) if slot_defs else '') + ']' | Generate a box for the class. Populate its interior only if (a) it hasn't previously been generated and
(b) it appears in the gen_classes list
@param cn:
@param inherited:
@return: |
def make_instance(cls, data):
"""Validate the data and create a model instance from the data.
Args:
data (dict): The unserialized data to insert into the new model
instance through it's constructor.
Returns:
peewee.Model|sqlalchemy.Model: The model instance with it's data
inserted into it.
Raises:
AttributeError: This is raised if ``Meta.model`` isn't set on the
schema's definition.
"""
schema = cls()
if not hasattr(schema.Meta, 'model'):
raise AttributeError("In order to make an instance, a model for "
"the schema must be defined in the Meta "
"class.")
serialized_data = schema.load(data).data
return cls.Meta.model(**serialized_data) | Validate the data and create a model instance from the data.
Args:
data (dict): The unserialized data to insert into the new model
instance through it's constructor.
Returns:
peewee.Model|sqlalchemy.Model: The model instance with it's data
inserted into it.
Raises:
AttributeError: This is raised if ``Meta.model`` isn't set on the
schema's definition. |
def diamond(x, y, radius, filled=False, thickness=1):
"""
Returns a generator that produces (x, y) tuples in a diamond shape.
It is easier to predict the size of the diamond that this function
produces, as opposed to creatinga 4-sided polygon with `polygon()`
and rotating it 45 degrees.
The `left` and `top` arguments are the x and y coordinates for the topleft corner of the square.
The width and height of the diamond will be `2 * radius + 1`.
If `filled` is `True`, the interior points are also returned.
In this example diamond shape, the D characters represent the
drawn diamond, the . characters represent the "outside spaces",
and the ' characters represent the "inside spaces".
(The radius of this example diamond is 3.)
...D
..D'D
.D'''D
D'''''D
.D'''D
..D'D
...D
>>> list(diamond(0, 0, 3))
[(4, 0), (3, 1), (5, 1), (2, 2), (6, 2), (1, 3), (7, 3), (2, 4), (6, 4), (3, 5), (5, 5), (4, 6)]
>>> drawPoints(diamond(0, 0, 3))
,,,O,,,
,,O,O,,
,O,,,O,
O,,,,,O
,O,,,O,
,,O,O,,
,,,O,,,
>>> drawPoints(diamond(0, 0, 3, filled=True))
,,,O,,,
,,OOO,,
,OOOOO,
OOOOOOO
,OOOOO,
,,OOO,,
,,,O,,,
"""
if thickness != 1:
raise NotImplementedError('The pybresenham module is under development and the filled, thickness, and endcap parameters are not implemented. You can contribute at https://github.com/asweigart/pybresenham')
outsideSpaces = radius
insideSpaces = 1 # We'll only start incrementing insidesSpaces on the 2nd row.
for row in range(radius * 2 + 1):
# Yield the leftside point in this row.
yield (outsideSpaces + 1 + x, row + y)
if row != 0 and row != radius * 2:
# (The first and last rows only have one point per row.)
if filled:
# Yield all the interior spaces in this row.
for interiorx in range(outsideSpaces + 2 + x, outsideSpaces + insideSpaces + 2 + x):
yield (interiorx, row + y) # No need for "+ x" here, we did that in the range() call.
# Yield the rightside point in this row.
yield (outsideSpaces + insideSpaces + 2 + x, row + y)
# Modify outsideSpaces/insideSpaces as we move down the rows.
if row < radius:
outsideSpaces -= 1
if row != 0:
insideSpaces += 2
else:
outsideSpaces += 1
insideSpaces -= 2 | Returns a generator that produces (x, y) tuples in a diamond shape.
It is easier to predict the size of the diamond that this function
produces, as opposed to creatinga 4-sided polygon with `polygon()`
and rotating it 45 degrees.
The `left` and `top` arguments are the x and y coordinates for the topleft corner of the square.
The width and height of the diamond will be `2 * radius + 1`.
If `filled` is `True`, the interior points are also returned.
In this example diamond shape, the D characters represent the
drawn diamond, the . characters represent the "outside spaces",
and the ' characters represent the "inside spaces".
(The radius of this example diamond is 3.)
...D
..D'D
.D'''D
D'''''D
.D'''D
..D'D
...D
>>> list(diamond(0, 0, 3))
[(4, 0), (3, 1), (5, 1), (2, 2), (6, 2), (1, 3), (7, 3), (2, 4), (6, 4), (3, 5), (5, 5), (4, 6)]
>>> drawPoints(diamond(0, 0, 3))
,,,O,,,
,,O,O,,
,O,,,O,
O,,,,,O
,O,,,O,
,,O,O,,
,,,O,,,
>>> drawPoints(diamond(0, 0, 3, filled=True))
,,,O,,,
,,OOO,,
,OOOOO,
OOOOOOO
,OOOOO,
,,OOO,,
,,,O,,, |
def _get_sm_scale_in(self, scale_sm=91.1876):
"""Get an estimate of the SM parameters at the input scale by running
them from the EW scale using constant values for the Wilson coefficients
(corresponding to their leading log approximated values at the EW
scale).
Note that this is not guaranteed to work and will fail if some of the
Wilson coefficients (the ones affecting the extraction of SM parameters)
are large."""
# intialize a copy of ourselves
_smeft = SMEFT()
_smeft.set_initial(self.C_in, self.scale_in, self.scale_high)
# Step 1: run the SM up, using the WCs at scale_input as (constant) estimate
_smeft.C_in.update(self._run_sm_scale_in(self.C_in, scale_sm=scale_sm))
# Step 2: run the WCs down in LL approximation
C_out = _smeft.rgevolve_leadinglog(scale_sm)
# Step 3: run the SM up again, this time using the WCs at scale_sm as (constant) estimate
return self._run_sm_scale_in(C_out, scale_sm=scale_sm) | Get an estimate of the SM parameters at the input scale by running
them from the EW scale using constant values for the Wilson coefficients
(corresponding to their leading log approximated values at the EW
scale).
Note that this is not guaranteed to work and will fail if some of the
Wilson coefficients (the ones affecting the extraction of SM parameters)
are large. |
def describe_volumes(self, xml_bytes):
"""Parse the XML returned by the C{DescribeVolumes} function.
@param xml_bytes: XML bytes with a C{DescribeVolumesResponse} root
element.
@return: A list of L{Volume} instances.
TODO: attachementSetItemResponseType#deleteOnTermination
"""
root = XML(xml_bytes)
result = []
for volume_data in root.find("volumeSet"):
volume_id = volume_data.findtext("volumeId")
size = int(volume_data.findtext("size"))
snapshot_id = volume_data.findtext("snapshotId")
availability_zone = volume_data.findtext("availabilityZone")
status = volume_data.findtext("status")
create_time = volume_data.findtext("createTime")
create_time = datetime.strptime(
create_time[:19], "%Y-%m-%dT%H:%M:%S")
volume = model.Volume(
volume_id, size, status, create_time, availability_zone,
snapshot_id)
result.append(volume)
for attachment_data in volume_data.find("attachmentSet"):
instance_id = attachment_data.findtext("instanceId")
device = attachment_data.findtext("device")
status = attachment_data.findtext("status")
attach_time = attachment_data.findtext("attachTime")
attach_time = datetime.strptime(
attach_time[:19], "%Y-%m-%dT%H:%M:%S")
attachment = model.Attachment(
instance_id, device, status, attach_time)
volume.attachments.append(attachment)
return result | Parse the XML returned by the C{DescribeVolumes} function.
@param xml_bytes: XML bytes with a C{DescribeVolumesResponse} root
element.
@return: A list of L{Volume} instances.
TODO: attachementSetItemResponseType#deleteOnTermination |
def set_stencil_mask(self, mask=8, face='front_and_back'):
"""Control the front or back writing of individual bits in the stencil
Parameters
----------
mask : int
Mask that is ANDed with ref and stored stencil value.
face : str
Can be 'front', 'back', or 'front_and_back'.
"""
self.glir.command('FUNC', 'glStencilMaskSeparate', face, int(mask)) | Control the front or back writing of individual bits in the stencil
Parameters
----------
mask : int
Mask that is ANDed with ref and stored stencil value.
face : str
Can be 'front', 'back', or 'front_and_back'. |
def purge_metadata_by_name(self, name):
"""Purge a processes metadata directory.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
"""
meta_dir = self._get_metadata_dir_by_name(name, self._metadata_base_dir)
logger.debug('purging metadata directory: {}'.format(meta_dir))
try:
rm_rf(meta_dir)
except OSError as e:
raise ProcessMetadataManager.MetadataError('failed to purge metadata directory {}: {!r}'.format(meta_dir, e)) | Purge a processes metadata directory.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal. |
def html_encode(text):
"""
Encode characters with a special meaning as HTML.
:param text: The plain text (a string).
:returns: The text converted to HTML (a string).
"""
text = text.replace('&', '&')
text = text.replace('<', '<')
text = text.replace('>', '>')
text = text.replace('"', '"')
return text | Encode characters with a special meaning as HTML.
:param text: The plain text (a string).
:returns: The text converted to HTML (a string). |
def create_process(self, service, agent=None, title=None, mode=None, service_version=None, **kwargs):
'''
create_process(self, service, agent=None, title=None, mode=None, service_version=None, **kwargs)
Registers a new process or processes
:Parameters:
* *service* (`string`) -- Service which process will be started
* *agent* (`string`) -- The service identifier (e.g shell_command)
* *title* (`string`) -- Title for the process
* *mode* (`string`) -- production/development
* *service_version* (`string`) -- Version of the service to execute
:Keywords args:
Json value map containing the process input properties
:return: process id
:Example:
.. code-block:: python
process_properties = {"my_input_param" : "1"}
pid = opereto_client.create_process(service='simple_shell_command', title='Test simple shell command service', agent=opereto_client.input['opereto_agent'], **process_properties)
'''
if not agent:
agent = self.input.get('opereto_agent')
if not mode:
mode=self.input.get('opereto_execution_mode') or 'production'
if not service_version:
service_version=self.input.get('opereto_service_version')
request_data = {'service_id': service, 'agents': agent, 'mode': mode, 's_version':service_version}
if title:
request_data['name']=title
if self.input.get('pid'):
request_data['pflow_id']=self.input.get('pid')
request_data.update(**kwargs)
ret_data= self._call_rest_api('post', '/processes', data=request_data, error='Failed to create a new process')
if not isinstance(ret_data, list):
raise OperetoClientError(str(ret_data))
pid = ret_data[0]
message = 'New process created for service [%s] [pid = %s] '%(service, pid)
if agent:
message += ' [agent = %s]'%agent
else:
message += ' [agent = any ]'
self.logger.info(message)
return str(pid) | create_process(self, service, agent=None, title=None, mode=None, service_version=None, **kwargs)
Registers a new process or processes
:Parameters:
* *service* (`string`) -- Service which process will be started
* *agent* (`string`) -- The service identifier (e.g shell_command)
* *title* (`string`) -- Title for the process
* *mode* (`string`) -- production/development
* *service_version* (`string`) -- Version of the service to execute
:Keywords args:
Json value map containing the process input properties
:return: process id
:Example:
.. code-block:: python
process_properties = {"my_input_param" : "1"}
pid = opereto_client.create_process(service='simple_shell_command', title='Test simple shell command service', agent=opereto_client.input['opereto_agent'], **process_properties) |
def configure(self, graph, spanning_tree):
"""
Configure the filter.
@type graph: graph
@param graph: Graph.
@type spanning_tree: dictionary
@param spanning_tree: Spanning tree.
"""
self.graph = graph
self.spanning_tree = spanning_tree | Configure the filter.
@type graph: graph
@param graph: Graph.
@type spanning_tree: dictionary
@param spanning_tree: Spanning tree. |
def canonicalize_tautomer(self):
"""
:returns: A callable :class:`~molvs.tautomer.TautomerCanonicalizer` instance.
"""
return TautomerCanonicalizer(transforms=self.tautomer_transforms, scores=self.tautomer_scores,
max_tautomers=self.max_tautomers) | :returns: A callable :class:`~molvs.tautomer.TautomerCanonicalizer` instance. |
def removeGuideline(self, guideline):
"""
Remove ``guideline`` from the glyph.
>>> glyph.removeGuideline(guideline)
``guideline`` may be a :ref:`BaseGuideline` or an
:ref:`type-int` representing an guideline index.
"""
if isinstance(guideline, int):
index = guideline
else:
index = self._getGuidelineIndex(guideline)
index = normalizers.normalizeIndex(index)
if index >= self._len__guidelines():
raise ValueError("No guideline located at index %d." % index)
self._removeGuideline(index) | Remove ``guideline`` from the glyph.
>>> glyph.removeGuideline(guideline)
``guideline`` may be a :ref:`BaseGuideline` or an
:ref:`type-int` representing an guideline index. |
def stop(self):
"""
Stop the sensor server (soft stop - signal packet loop to stop)
Warning: Is non blocking (server might still do something after this!)
:rtype: None
"""
self.debug("()")
super(SensorServer, self).stop()
# No new clients
if self._multicast_socket is not None:
self._shutdown_multicast_socket()
# Signal packet loop to shutdown
self._is_stopped.set() | Stop the sensor server (soft stop - signal packet loop to stop)
Warning: Is non blocking (server might still do something after this!)
:rtype: None |
def normalize(in_file, data, passonly=False, normalize_indels=True, split_biallelic=True,
rerun_effects=True, remove_oldeffects=False, nonrefonly=False, work_dir=None):
"""Normalizes variants and reruns SnpEFF for resulting VCF
"""
if remove_oldeffects:
out_file = "%s-noeff-nomultiallelic%s" % utils.splitext_plus(in_file)
else:
out_file = "%s-nomultiallelic%s" % utils.splitext_plus(in_file)
if work_dir:
out_file = os.path.join(work_dir, os.path.basename(out_file))
if not utils.file_exists(out_file):
if vcfutils.vcf_has_variants(in_file):
ready_ma_file = _normalize(in_file, data, passonly=passonly,
normalize_indels=normalize_indels,
split_biallelic=split_biallelic,
remove_oldeffects=remove_oldeffects,
nonrefonly=nonrefonly,
work_dir=work_dir)
if rerun_effects:
ann_ma_file, _ = effects.add_to_vcf(ready_ma_file, data)
if ann_ma_file:
ready_ma_file = ann_ma_file
utils.symlink_plus(ready_ma_file, out_file)
else:
utils.symlink_plus(in_file, out_file)
return vcfutils.bgzip_and_index(out_file, data["config"]) | Normalizes variants and reruns SnpEFF for resulting VCF |
def is_changed(self, start, end):
"""Tell whether any of start till end lines have changed
The end points are inclusive and indices start from 1.
"""
left, right = self._get_changed(start, end)
if left < right:
return True
return False | Tell whether any of start till end lines have changed
The end points are inclusive and indices start from 1. |
def get_pdffilepath(pdffilename):
"""
Returns the path for the pdf file
args:
pdffilename: string
returns path for the plots folder / pdffilename.pdf
"""
return FILEPATHSTR.format(
root_dir=ROOT_DIR, os_sep=os.sep, os_extsep=os.extsep,
name=pdffilename,
folder=PURPOSE.get("plots").get("folder", "plots"),
ext=PURPOSE.get("plots").get("extension", "pdf")
) | Returns the path for the pdf file
args:
pdffilename: string
returns path for the plots folder / pdffilename.pdf |
def connect_delete_namespaced_pod_proxy(self, name, namespace, **kwargs): # noqa: E501
"""connect_delete_namespaced_pod_proxy # noqa: E501
connect DELETE requests to proxy of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_delete_namespaced_pod_proxy(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_delete_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.connect_delete_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | connect_delete_namespaced_pod_proxy # noqa: E501
connect DELETE requests to proxy of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_delete_namespaced_pod_proxy(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread. |
def p_formula_atom(self, p):
"""formula : ATOM
| TRUE
| FALSE"""
if p[1]==Symbols.TRUE.value:
p[0] = PLTrue()
elif p[1]==Symbols.FALSE.value:
p[0] = PLFalse()
else:
p[0] = PLAtomic(Symbol(p[1])) | formula : ATOM
| TRUE
| FALSE |
def trigger_script(self):
"""Actually process a script."""
if self.remote_bridge.status not in (BRIDGE_STATUS.RECEIVED,):
return [1] #FIXME: State change
# This is asynchronous in real life so just cache the error
try:
self.remote_bridge.parsed_script = UpdateScript.FromBinary(self._device.script)
#FIXME: Actually run the script
self.remote_bridge.status = BRIDGE_STATUS.IDLE
except Exception as exc:
self._logger.exception("Error parsing script streamed to device")
self.remote_bridge.script_error = exc
self.remote_bridge.error = 1 # FIXME: Error code
return [0] | Actually process a script. |
def set_basic_params(
self, workers=None, zerg_server=None, fallback_node=None, concurrent_events=None,
cheap_mode=None, stats_server=None, quiet=None, buffer_size=None,
keepalive=None, resubscribe_addresses=None):
"""
:param int workers: Number of worker processes to spawn.
:param str|unicode zerg_server: Attach the router to a zerg server.
:param str|unicode fallback_node: Fallback to the specified node in case of error.
:param int concurrent_events: Set the maximum number of concurrent events router can manage.
Default: system dependent.
:param bool cheap_mode: Enables cheap mode. When the router is in cheap mode,
it will not respond to requests until a node is available.
This means that when there are no nodes subscribed, only your local app (if any) will respond.
When all of the nodes go down, the router will return in cheap mode.
:param str|unicode stats_server: Router stats server address to run at.
:param bool quiet: Do not report failed connections to instances.
:param int buffer_size: Set internal buffer size in bytes. Default: page size.
:param int keepalive: Allows holding the connection open even if the request has a body.
* http://uwsgi.readthedocs.io/en/latest/HTTP.html#http-keep-alive
.. note:: See http11 socket type for an alternative.
:param str|unicode|list[str|unicode] resubscribe_addresses: Forward subscriptions
to the specified subscription server.
"""
super(RouterHttp, self).set_basic_params(**filter_locals(locals(), drop=[
'keepalive',
'resubscribe_addresses',
]))
self._set_aliased('keepalive', keepalive)
self._set_aliased('resubscribe', resubscribe_addresses, multi=True)
return self | :param int workers: Number of worker processes to spawn.
:param str|unicode zerg_server: Attach the router to a zerg server.
:param str|unicode fallback_node: Fallback to the specified node in case of error.
:param int concurrent_events: Set the maximum number of concurrent events router can manage.
Default: system dependent.
:param bool cheap_mode: Enables cheap mode. When the router is in cheap mode,
it will not respond to requests until a node is available.
This means that when there are no nodes subscribed, only your local app (if any) will respond.
When all of the nodes go down, the router will return in cheap mode.
:param str|unicode stats_server: Router stats server address to run at.
:param bool quiet: Do not report failed connections to instances.
:param int buffer_size: Set internal buffer size in bytes. Default: page size.
:param int keepalive: Allows holding the connection open even if the request has a body.
* http://uwsgi.readthedocs.io/en/latest/HTTP.html#http-keep-alive
.. note:: See http11 socket type for an alternative.
:param str|unicode|list[str|unicode] resubscribe_addresses: Forward subscriptions
to the specified subscription server. |
def objectcount(data, key):
"""return the count of objects of key"""
objkey = key.upper()
return len(data.dt[objkey]) | return the count of objects of key |
def add_cable_dist(self, lv_cable_dist):
"""Adds a LV cable_dist to _cable_dists and grid graph if not already existing
Parameters
----------
lv_cable_dist :
Description #TODO
"""
if lv_cable_dist not in self._cable_distributors and isinstance(lv_cable_dist,
LVCableDistributorDing0):
self._cable_distributors.append(lv_cable_dist)
self.graph_add_node(lv_cable_dist) | Adds a LV cable_dist to _cable_dists and grid graph if not already existing
Parameters
----------
lv_cable_dist :
Description #TODO |
def do_search(self, string):
"""Search Ndrive for filenames containing the given string."""
results = self.n.doSearch(string, full_path = self.current_path)
if results:
for r in results:
self.stdout.write("%s\n" % r['path']) | Search Ndrive for filenames containing the given string. |
def say(self, message, **options):
"""
When the current session is a voice channel this key will either play a message or an audio file from a URL.
In the case of an text channel it will send the text back to the user via i nstant messaging or SMS.
Argument: message is a string
Argument: **options is a set of optional keyword arguments.
See https://www.tropo.com/docs/webapi/say
"""
#voice = self.voice
# # **Sun May 15 21:21:29 2011** -- egilchri
# Settng the voice in this method call has priority.
# Otherwise, we can pick up the voice from the Tropo object,
# if it is set there.
if hasattr (self, 'voice'):
if (not 'voice' in options):
options['voice'] = self.voice
# # **Sun May 15 21:21:29 2011** -- egilchri
self._steps.append(Say(message, **options).obj) | When the current session is a voice channel this key will either play a message or an audio file from a URL.
In the case of an text channel it will send the text back to the user via i nstant messaging or SMS.
Argument: message is a string
Argument: **options is a set of optional keyword arguments.
See https://www.tropo.com/docs/webapi/say |
def get_cell_shift(flow_model):
"""Get flow direction induced cell shift dict.
Args:
flow_model: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported.
"""
assert flow_model.lower() in FlowModelConst.d8_deltas
return FlowModelConst.d8_deltas.get(flow_model.lower()) | Get flow direction induced cell shift dict.
Args:
flow_model: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported. |
def special_handling(self, text):
"""special_handling = "?" , identifier , "?" ;"""
self._attempting(text)
return concatenation([
"?",
self.identifier,
"?",
], ignore_whitespace=True)(text).retyped(TokenType.special_handling) | special_handling = "?" , identifier , "?" ; |
def getWord(self, pattern, returnDiff = 0):
"""
Returns the word associated with pattern.
Example: net.getWord([0, 0, 0, 1]) => "tom"
This method now returns the closest pattern based on distance.
"""
minDist = 10000
closest = None
for w in self.patterns:
# There may be some patterns that are scalars; we don't search
# those in this function:
if type(self.patterns[w]) in [int, float, int]: continue
if len(self.patterns[w]) == len(pattern):
dist = reduce(operator.add, [(a - b) ** 2 for (a,b) in zip(self.patterns[w], pattern )])
if dist == 0.0:
if returnDiff:
return w, dist
else:
return w
if dist < minDist:
minDist = dist
closest = w
if returnDiff:
return closest, minDist
else:
return closest | Returns the word associated with pattern.
Example: net.getWord([0, 0, 0, 1]) => "tom"
This method now returns the closest pattern based on distance. |
def qwarp_epi(dset,align_subbrick=5,suffix='_qwal',prefix=None):
'''aligns an EPI time-series using 3dQwarp
Very expensive and not efficient at all, but it can produce pretty impressive alignment for EPI time-series with significant
distortions due to motion'''
info = nl.dset_info(dset)
if info==None:
nl.notify('Error reading dataset "%s"' % (dset),level=nl.level.error)
return False
if prefix==None:
prefix = nl.suffix(dset,suffix)
dset_sub = lambda x: '_tmp_qwarp_epi-%s_%d.nii.gz' % (nl.prefix(dset),x)
try:
align_dset = nl.suffix(dset_sub(align_subbrick),'_warp')
nl.calc('%s[%d]' % (dset,align_subbrick),expr='a',prefix=align_dset,datum='float')
for i in xrange(info.reps):
if i != align_subbrick:
nl.calc('%s[%d]' % (dset,i),expr='a',prefix=dset_sub(i),datum='float')
nl.run([
'3dQwarp', '-nowarp',
'-workhard', '-superhard', '-minpatch', '9', '-blur', '0',
'-pear', '-nopenalty',
'-base', align_dset,
'-source', dset_sub(i),
'-prefix', nl.suffix(dset_sub(i),'_warp')
],quiet=True)
cmd = ['3dTcat','-prefix',prefix]
if info.TR:
cmd += ['-tr',info.TR]
if info.slice_timing:
cmd += ['-tpattern',info.slice_timing]
cmd += [nl.suffix(dset_sub(i),'_warp') for i in xrange(info.reps)]
nl.run(cmd,quiet=True)
except Exception as e:
raise e
finally:
for i in xrange(info.reps):
for suffix in ['','warp']:
try:
os.remove(nl.suffix(dset_sub(i),suffix))
except:
pass | aligns an EPI time-series using 3dQwarp
Very expensive and not efficient at all, but it can produce pretty impressive alignment for EPI time-series with significant
distortions due to motion |
def neighbor(self, **kwargs):
"""Add BGP neighbor.
Args:
ip_addr (str): IP Address of BGP neighbor.
remote_as (str): Remote ASN of BGP neighbor.
vrf (str): The VRF for this BGP process.
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
delete (bool): Deletes the neighbor if `delete` is ``True``.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `remote_as` or `ip_addr` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.local_asn(local_as='65535',
... rbridge_id='225')
... output = dev.bgp.neighbor(ip_addr='10.10.10.10',
... remote_as='65535', rbridge_id='225')
... output = dev.bgp.neighbor(ip_addr='10.10.10.10', get=True,
... remote_as='65535', rbridge_id='225')
... output = dev.bgp.neighbor(remote_as='65535',
... rbridge_id='225',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1')
... output = dev.bgp.neighbor(remote_as='65535', get=True,
... rbridge_id='225',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1')
... output = dev.bgp.neighbor(ip_addr='10.10.10.10',
... delete=True, rbridge_id='225')
... output = dev.bgp.neighbor(delete=True, rbridge_id='225',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1')
... dev.bgp.neighbor() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError
KeyError
"""
ip_addr = kwargs.pop('ip_addr')
remote_as = kwargs.pop('remote_as', None)
vrf = kwargs.pop('vrf', 'default')
rbridge_id = kwargs.pop('rbridge_id', '1')
delete = kwargs.pop('delete', False)
callback = kwargs.pop('callback', self._callback)
ip_addr = ip_interface(unicode(ip_addr))
if not delete and remote_as is None:
raise ValueError('When configuring a neighbor, you must specify '
'its remote-as.')
neighbor_args = dict(router_bgp_neighbor_address=str(ip_addr.ip),
remote_as=remote_as,
vrf_name=vrf,
rbridge_id=rbridge_id)
if ip_addr.version == 4:
neighbor = getattr(self._rbridge,
'rbridge_id_router_bgp_router_bgp_cmds_holder_'
'router_bgp_attributes_neighbor_ips_'
'neighbor_addr_remote_as')
ip_addr_path = './/*neighbor-addr'
else:
neighbor_args['router_bgp_neighbor_ipv6_address'] = str(ip_addr.ip)
neighbor = getattr(self._rbridge,
'rbridge_id_router_bgp_router_bgp_cmds_holder_'
'router_bgp_attributes_neighbor_ipv6s_neighbor_'
'ipv6_addr_remote_as')
ip_addr_path = './/*neighbor-ipv6-addr'
config = neighbor(**neighbor_args)
if delete:
neighbor = config.find(ip_addr_path)
neighbor.set('operation', 'delete')
neighbor.remove(neighbor.find('remote-as'))
if ip_addr.version == 6:
activate_args = dict(vrf_name=vrf, rbridge_id=rbridge_id,
af_ipv6_neighbor_address=str(ip_addr.ip))
activate_neighbor = getattr(self._rbridge,
'rbridge_id_router_bgp_router_bgp_'
'cmds_holder_address_family_ipv6_'
'ipv6_unicast_af_ipv6_neighbor_'
'address_holder_af_ipv6_'
'neighbor_address_activate')
deactivate = activate_neighbor(**activate_args)
deactivate.find('.//*af-ipv6-neighbor-'
'address').set('operation', 'delete')
callback(deactivate)
else:
if ip_addr.version == 6:
callback(config)
activate_args = dict(vrf_name=vrf, rbridge_id=rbridge_id,
af_ipv6_neighbor_address=str(ip_addr.ip))
activate_neighbor = getattr(self._rbridge,
'rbridge_id_router_bgp_router_bgp_'
'cmds_holder_address_family_ipv6_'
'ipv6_unicast_af_ipv6_neighbor_'
'address_holder_af_ipv6_'
'neighbor_address_activate')
config = activate_neighbor(**activate_args)
if kwargs.pop('get', False):
return callback(config, handler='get_config')
return callback(config) | Add BGP neighbor.
Args:
ip_addr (str): IP Address of BGP neighbor.
remote_as (str): Remote ASN of BGP neighbor.
vrf (str): The VRF for this BGP process.
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
delete (bool): Deletes the neighbor if `delete` is ``True``.
get (bool): Get config instead of editing config. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `remote_as` or `ip_addr` is not specified.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.local_asn(local_as='65535',
... rbridge_id='225')
... output = dev.bgp.neighbor(ip_addr='10.10.10.10',
... remote_as='65535', rbridge_id='225')
... output = dev.bgp.neighbor(ip_addr='10.10.10.10', get=True,
... remote_as='65535', rbridge_id='225')
... output = dev.bgp.neighbor(remote_as='65535',
... rbridge_id='225',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1')
... output = dev.bgp.neighbor(remote_as='65535', get=True,
... rbridge_id='225',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1')
... output = dev.bgp.neighbor(ip_addr='10.10.10.10',
... delete=True, rbridge_id='225')
... output = dev.bgp.neighbor(delete=True, rbridge_id='225',
... ip_addr='2001:4818:f000:1ab:cafe:beef:1000:1')
... dev.bgp.neighbor() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError
KeyError |
def handler(self, environ, start_response):
"""Proxy for requests to the actual http server"""
logger = logging.getLogger(__name__ + '.WSGIProxyApplication.handler')
url = urlparse(reconstruct_url(environ, self.port))
# Create connection object
try:
connection = self.connection_class(url.netloc)
# Build path
path = url.geturl().replace('%s://%s' % (url.scheme, url.netloc),
'')
except Exception:
start_response('501 Gateway Error', [('Content-Type', 'text/html')])
logger.exception('Could not Connect')
yield '<H1>Could not connect</H1>'
return
# Read in request body if it exists
body = length = None
try:
length = int(environ['CONTENT_LENGTH'])
except (KeyError, ValueError):
# This is a situation where client HTTP POST is missing content-length.
# This is also situation where (WebOb?) may screw up encoding and isert extranous = in the body.
# https://github.com/ipython/ipython/issues/8416
if environ["REQUEST_METHOD"] == "POST":
if environ.get("CONTENT_TYPE") == 'application/x-www-form-urlencoded; charset=UTF-8':
body = environ['wsgi.input'].read()
try:
body = unquote_plus(body.decode("utf-8"))
# Fix extra = at end of JSON payload
if body.startswith("{") and body.endswith("}="):
body = body[0:len(body) - 1]
except Exception as e:
logger.exception(e)
logger.error("Could not decode body: %s", body)
length = len(body)
else:
body = environ['wsgi.input'].read(length)
# Build headers
logger.debug('environ = %r', environ)
headers = dict(
(key, value)
for key, value in (
# This is a hacky way of getting the header names right
(key[5:].lower().replace('_', '-'), value)
for key, value in environ.items()
# Keys that start with HTTP_ are all headers
if key.startswith('HTTP_')
)
if not is_hop_by_hop(key)
)
# Handler headers that aren't HTTP_ in environ
try:
headers['content-type'] = environ['CONTENT_TYPE']
except KeyError:
pass
# Add our host if one isn't defined
if 'host' not in headers:
headers['host'] = environ['SERVER_NAME']
# Make the remote request
try:
logger.debug('%s %s %r',
environ['REQUEST_METHOD'], path, headers)
connection.request(environ['REQUEST_METHOD'], path,
body=body, headers=headers)
except Exception as e:
# We need extra exception handling in the case the server fails
# in mid connection, it's an edge case but I've seen it
if isinstance(e, ConnectionRefusedError):
# The notebook was shutdown by the user
pass
else:
# This might be a genuine error
logger.exception(e)
start_response('501 Gateway Error', [('Content-Type', 'text/html')])
yield '<H1>Could not proxy IPython Notebook running localhost:{}</H1>'.format(self.port).encode("utf-8")
return
try:
response = connection.getresponse()
except ConnectionResetError:
# Notebook shutdown
start_response('501 Gateway Error', [('Content-Type', 'text/html')])
yield '<H1>Could not proxy IPython Notebook running localhost:{}</H1>'.format(self.port).encode("utf-8")
return
hopped_headers = response.getheaders()
headers = [(key, value)
for key, value in hopped_headers
if not is_hop_by_hop(key)]
start_response('{0.status} {0.reason}'.format(response), headers)
while True:
chunk = response.read(4096)
if chunk:
yield chunk
else:
break | Proxy for requests to the actual http server |
def getDefaultConfigObj(taskname,configObj,input_dict={},loadOnly=True):
""" Return default configObj instance for task updated
with user-specified values from input_dict.
Parameters
----------
taskname : string
Name of task to load into TEAL
configObj : string
The valid values for 'configObj' would be::
None - loads last saved user .cfg file
'defaults' - loads task default .cfg file
name of .cfg file (string)- loads user-specified .cfg file
input_dict : dict
Set of parameters and values specified by user to be different from
what gets loaded in from the .cfg file for the task
loadOnly : bool
Setting 'loadOnly' to False causes the TEAL GUI to start allowing the
user to edit the values further and then run the task if desired.
"""
if configObj is None:
# Start by grabbing the default values without using the GUI
# This insures that all subsequent use of the configObj includes
# all parameters and their last saved values
configObj = teal.load(taskname)
elif isinstance(configObj,str):
if configObj.lower().strip() == 'defaults':
# Load task default .cfg file with all default values
configObj = teal.load(taskname,defaults=True)
# define default filename for configObj
configObj.filename = taskname.lower()+'.cfg'
else:
# Load user-specified .cfg file with its special default values
# we need to call 'fileutil.osfn()' to insure all environment
# variables specified by the user in the configObj filename are
# expanded to the full path
configObj = teal.load(fileutil.osfn(configObj))
# merge in the user values for this run
# this, though, does not save the results for use later
if input_dict not in [None,{}]:# and configObj not in [None, {}]:
# check to see whether any input parameters are unexpected.
# Any unexpected parameters provided on input should be reported and
# the code should stop
validateUserPars(configObj,input_dict)
# If everything looks good, merge user inputs with configObj and continue
cfgpars.mergeConfigObj(configObj, input_dict)
# Update the input .cfg file with the updated parameter values
#configObj.filename = os.path.join(cfgpars.getAppDir(),os.path.basename(configObj.filename))
#configObj.write()
if not loadOnly:
# We want to run the GUI AFTER merging in any parameters
# specified by the user on the command-line and provided in
# input_dict
configObj = teal.teal(configObj,loadOnly=False)
return configObj | Return default configObj instance for task updated
with user-specified values from input_dict.
Parameters
----------
taskname : string
Name of task to load into TEAL
configObj : string
The valid values for 'configObj' would be::
None - loads last saved user .cfg file
'defaults' - loads task default .cfg file
name of .cfg file (string)- loads user-specified .cfg file
input_dict : dict
Set of parameters and values specified by user to be different from
what gets loaded in from the .cfg file for the task
loadOnly : bool
Setting 'loadOnly' to False causes the TEAL GUI to start allowing the
user to edit the values further and then run the task if desired. |
def get_config(self):
"""
Returns pickle-serializable configuration struct for storage.
"""
# Fill this dict with config data
return {
'hash_name': self.hash_name,
'dim': self.dim,
'bin_width': self.bin_width,
'projection_count': self.projection_count,
'normals': self.normals
} | Returns pickle-serializable configuration struct for storage. |
def _advapi32_create_handles(cipher, key, iv):
"""
Creates an HCRYPTPROV and HCRYPTKEY for symmetric encryption/decryption. The
HCRYPTPROV must be released by close_context_handle() and the
HCRYPTKEY must be released by advapi32.CryptDestroyKey() when done.
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
A byte string of the symmetric key
:param iv:
The initialization vector - a byte string - unused for RC4
:return:
A tuple of (HCRYPTPROV, HCRYPTKEY)
"""
context_handle = None
if cipher == 'aes':
algorithm_id = {
16: Advapi32Const.CALG_AES_128,
24: Advapi32Const.CALG_AES_192,
32: Advapi32Const.CALG_AES_256,
}[len(key)]
else:
algorithm_id = {
'des': Advapi32Const.CALG_DES,
'tripledes_2key': Advapi32Const.CALG_3DES_112,
'tripledes_3key': Advapi32Const.CALG_3DES,
'rc2': Advapi32Const.CALG_RC2,
'rc4': Advapi32Const.CALG_RC4,
}[cipher]
provider = Advapi32Const.MS_ENH_RSA_AES_PROV
context_handle = open_context_handle(provider, verify_only=False)
blob_header_pointer = struct(advapi32, 'BLOBHEADER')
blob_header = unwrap(blob_header_pointer)
blob_header.bType = Advapi32Const.PLAINTEXTKEYBLOB
blob_header.bVersion = Advapi32Const.CUR_BLOB_VERSION
blob_header.reserved = 0
blob_header.aiKeyAlg = algorithm_id
blob_struct_pointer = struct(advapi32, 'PLAINTEXTKEYBLOB')
blob_struct = unwrap(blob_struct_pointer)
blob_struct.hdr = blob_header
blob_struct.dwKeySize = len(key)
blob = struct_bytes(blob_struct_pointer) + key
flags = 0
if cipher in set(['rc2', 'rc4']) and len(key) == 5:
flags = Advapi32Const.CRYPT_NO_SALT
key_handle_pointer = new(advapi32, 'HCRYPTKEY *')
res = advapi32.CryptImportKey(
context_handle,
blob,
len(blob),
null(),
flags,
key_handle_pointer
)
handle_error(res)
key_handle = unwrap(key_handle_pointer)
if cipher == 'rc2':
buf = new(advapi32, 'DWORD *', len(key) * 8)
res = advapi32.CryptSetKeyParam(
key_handle,
Advapi32Const.KP_EFFECTIVE_KEYLEN,
buf,
0
)
handle_error(res)
if cipher != 'rc4':
res = advapi32.CryptSetKeyParam(
key_handle,
Advapi32Const.KP_IV,
iv,
0
)
handle_error(res)
buf = new(advapi32, 'DWORD *', Advapi32Const.CRYPT_MODE_CBC)
res = advapi32.CryptSetKeyParam(
key_handle,
Advapi32Const.KP_MODE,
buf,
0
)
handle_error(res)
buf = new(advapi32, 'DWORD *', Advapi32Const.PKCS5_PADDING)
res = advapi32.CryptSetKeyParam(
key_handle,
Advapi32Const.KP_PADDING,
buf,
0
)
handle_error(res)
return (context_handle, key_handle) | Creates an HCRYPTPROV and HCRYPTKEY for symmetric encryption/decryption. The
HCRYPTPROV must be released by close_context_handle() and the
HCRYPTKEY must be released by advapi32.CryptDestroyKey() when done.
:param cipher:
A unicode string of "aes", "des", "tripledes_2key", "tripledes_3key",
"rc2", "rc4"
:param key:
A byte string of the symmetric key
:param iv:
The initialization vector - a byte string - unused for RC4
:return:
A tuple of (HCRYPTPROV, HCRYPTKEY) |
def set_cookie(response, name, value, expiry_seconds=None, secure=False):
"""
Set cookie wrapper that allows number of seconds to be given as the
expiry time, and ensures values are correctly encoded.
"""
if expiry_seconds is None:
expiry_seconds = 90 * 24 * 60 * 60 # Default to 90 days.
expires = datetime.strftime(datetime.utcnow() +
timedelta(seconds=expiry_seconds),
"%a, %d-%b-%Y %H:%M:%S GMT")
# Django doesn't seem to support unicode cookie keys correctly on
# Python 2. Work around by encoding it. See
# https://code.djangoproject.com/ticket/19802
try:
response.set_cookie(name, value, expires=expires, secure=secure)
except (KeyError, TypeError):
response.set_cookie(name.encode('utf-8'), value, expires=expires,
secure=secure) | Set cookie wrapper that allows number of seconds to be given as the
expiry time, and ensures values are correctly encoded. |
def _execute_callback(async, callback):
"""Execute the given callback or insert the Async callback, or if no
callback is given return the async.result.
"""
from furious.async import Async
if not callback:
return async.result.payload
if isinstance(callback, Async):
return callback.start()
return callback() | Execute the given callback or insert the Async callback, or if no
callback is given return the async.result. |
def record(self, i=0):
"""Returns a specific dbf record based on the supplied index."""
f = self.__getFileObj(self.dbf)
if not self.numRecords:
self.__dbfHeader()
i = self.__restrictIndex(i)
recSize = self.__recordFmt()[1]
f.seek(0)
f.seek(self.__dbfHeaderLength() + (i * recSize))
return self.__record() | Returns a specific dbf record based on the supplied index. |
def _createIndexRti(self, index, nodeName):
""" Auxiliary method that creates a PandasIndexRti.
"""
return PandasIndexRti(index=index, nodeName=nodeName, fileName=self.fileName,
iconColor=self._iconColor) | Auxiliary method that creates a PandasIndexRti. |
def transform(self, pyobject):
"""Transform a `PyObject` to textual form"""
if pyobject is None:
return ('none',)
object_type = type(pyobject)
try:
method = getattr(self, object_type.__name__ + '_to_textual')
return method(pyobject)
except AttributeError:
return ('unknown',) | Transform a `PyObject` to textual form |
def hrd_new(self, input_label="", skip=0):
"""
plot an HR diagram with options to skip the first N lines and
add a label string
Parameters
----------
input_label : string, optional
Diagram label. The default is "".
skip : integer, optional
Skip the first n lines. The default is 0.
"""
xl_old=pyl.gca().get_xlim()
if input_label == "":
my_label="M="+str(self.header_attr['initial_mass'])+", Z="+str(self.header_attr['initial_z'])
else:
my_label="M="+str(self.header_attr['initial_mass'])+", Z="+str(self.header_attr['initial_z'])+"; "+str(input_label)
pyl.plot(self.data[skip:,self.cols['log_Teff']-1],self.data[skip:,self.cols['log_L']-1],label = my_label)
pyl.legend(loc=0)
xl_new=pyl.gca().get_xlim()
pyl.xlabel('log Teff')
pyl.ylabel('log L')
if any(array(xl_old)==0):
pyl.gca().set_xlim(max(xl_new),min(xl_new))
elif any(array(xl_new)==0):
pyl.gca().set_xlim(max(xl_old),min(xl_old))
else:
pyl.gca().set_xlim([max(xl_old+xl_new),min(xl_old+xl_new)]) | plot an HR diagram with options to skip the first N lines and
add a label string
Parameters
----------
input_label : string, optional
Diagram label. The default is "".
skip : integer, optional
Skip the first n lines. The default is 0. |
def _has_exclusive_option(cls, options):
"""Return `True` iff one or more exclusive options were selected."""
return any([getattr(options, opt) is not None for opt in
cls.BASE_ERROR_SELECTION_OPTIONS]) | Return `True` iff one or more exclusive options were selected. |
def setBatchSize(self, val):
"""
Sets the value of :py:attr:`batchSize`.
"""
self._paramMap[self.batchSize] = val
pythonBigDL_method_name = "setBatchSize" + self.__class__.__name__
callBigDlFunc(self.bigdl_type, pythonBigDL_method_name, self.value, val)
return self | Sets the value of :py:attr:`batchSize`. |
def Join(self, Id):
"""Joins with another call to form a conference.
:Parameters:
Id : int
Call Id of the other call to join to the conference.
:return: Conference object.
:rtype: `Conference`
"""
#self._Alter('JOIN_CONFERENCE', Id)
reply = self._Owner._DoCommand('SET CALL %s JOIN_CONFERENCE %s' % (self.Id, Id),
'CALL %s CONF_ID' % self.Id)
return Conference(self._Owner, reply.split()[-1]) | Joins with another call to form a conference.
:Parameters:
Id : int
Call Id of the other call to join to the conference.
:return: Conference object.
:rtype: `Conference` |
def external2internal(xe, bounds):
""" Convert a series of external variables to internal variables"""
xi = np.empty_like(xe)
for i, (v, bound) in enumerate(zip(xe, bounds)):
a = bound[0] # minimum
b = bound[1] # maximum
if a == None and b == None: # No constraints
xi[i] = v
elif b == None: # only min
xi[i] = np.sqrt((v - a + 1.) ** 2. - 1)
elif a == None: # only max
xi[i] = np.sqrt((b - v + 1.) ** 2. - 1)
else: # both min and max
xi[i] = np.arcsin((2. * (v - a) / (b - a)) - 1.)
return xi | Convert a series of external variables to internal variables |
def get_year_and_month(self, net, qs, **kwargs):
"""
Get the year and month. First tries from kwargs, then from
querystrings. If none, or if cal_ignore qs is specified,
sets year and month to this year and this month.
"""
now = c.get_now()
year = now.year
month = now.month + net
month_orig = None
if 'cal_ignore=true' not in qs:
if 'year' and 'month' in self.kwargs: # try kwargs
year, month_orig = map(
int, (self.kwargs['year'], self.kwargs['month'])
)
month = month_orig + net
else:
try: # try querystring
year = int(self.request.GET['cal_year'])
month_orig = int(self.request.GET['cal_month'])
month = month_orig + net
except Exception:
pass
# return the year and month, and any errors that may have occurred do
# to an invalid month/year being given.
return c.clean_year_month(year, month, month_orig) | Get the year and month. First tries from kwargs, then from
querystrings. If none, or if cal_ignore qs is specified,
sets year and month to this year and this month. |
def gmean(data, channels=None):
"""
Calculate the geometric mean of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The geometric mean of the events in the specified channels of
`data`.
"""
# Slice data to take statistics from
if channels is None:
data_stats = data
else:
data_stats = data[:, channels]
# Calculate and return statistic
return scipy.stats.gmean(data_stats, axis=0) | Calculate the geometric mean of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The geometric mean of the events in the specified channels of
`data`. |
def main():
"""
Commandline interface to initialize Sockeye embedding weights with pretrained word representations.
"""
setup_main_logger(console=True, file_logging=False)
params = argparse.ArgumentParser(description='Quick usage: python3 -m sockeye.init_embedding '
'-w embed-in-src.npy embed-in-tgt.npy '
'-i vocab-in-src.json vocab-in-tgt.json '
'-o vocab-out-src.json vocab-out-tgt.json '
'-n source_embed_weight target_embed_weight '
'-f params.init')
arguments.add_init_embedding_args(params)
args = params.parse_args()
init_embeddings(args) | Commandline interface to initialize Sockeye embedding weights with pretrained word representations. |
def musixmatch(song):
"""
Returns the lyrics found in musixmatch for the specified mp3 file or an
empty string if not found.
"""
escape = re.sub("'-¡¿", '', URLESCAPE)
translate = {
escape: '',
' ': '-'
}
artist = song.artist.title()
artist = re.sub(r"( '|' )", '', artist)
artist = re.sub(r"'", '-', artist)
title = song.title
title = re.sub(r"( '|' )", '', title)
title = re.sub(r"'", '-', title)
artist = normalize(artist, translate)
artist = re.sub(r'\-{2,}', '-', artist)
title = normalize(title, translate)
title = re.sub(r'\-{2,}', '-', title)
url = 'https://www.musixmatch.com/lyrics/{}/{}'.format(artist, title)
soup = get_url(url)
text = ''
contents = soup.find_all('p', class_='mxm-lyrics__content')
for p in contents:
text += p.get_text().strip()
if p != contents[-1]:
text += '\n\n'
return text.strip() | Returns the lyrics found in musixmatch for the specified mp3 file or an
empty string if not found. |
def _get_bootstrap_url(directory):
'''
Get the most appropriate download URL for the bootstrap script.
directory
directory to execute in
'''
v = _get_buildout_ver(directory)
return _URL_VERSIONS.get(v, _URL_VERSIONS[DEFAULT_VER]) | Get the most appropriate download URL for the bootstrap script.
directory
directory to execute in |
def linear_interpolation_extrapolation(df, target_height):
r"""
Linear inter- or extrapolates between the values of a data frame.
This function can be used for the inter-/extrapolation of a parameter
(e.g wind speed) available at two or more different heights, to approximate
the value at hub height. The function is carried out when the parameter
`wind_speed_model`, `density_model` or `temperature_model` of an
instance of the :class:`~.modelchain.ModelChain` class is
'interpolation_extrapolation'.
Parameters
----------
df : pandas.DataFrame
DataFrame with time series for parameter that is to be interpolated or
extrapolated. The columns of the DataFrame are the different heights
for which the parameter is available. If more than two heights are
given, the two closest heights are used. See example below on how the
DataFrame should look like and how the function can be used.
target_height : float
Height for which the parameter is approximated (e.g. hub height).
Returns
-------
pandas.Series
Result of the inter-/extrapolation (e.g. wind speed at hub height).
Notes
-----
For the inter- and extrapolation the following equation is used:
.. math:: f(x) = \frac{(f(x_2) - f(x_1))}{(x_2 - x_1)} \cdot
(x - x_1) + f(x_1)
Examples
---------
>>> import numpy as np
>>> import pandas as pd
>>> wind_speed_10m = np.array([[3], [4]])
>>> wind_speed_80m = np.array([[6], [6]])
>>> weather_df = pd.DataFrame(np.hstack((wind_speed_10m,
... wind_speed_80m)),
... index=pd.date_range('1/1/2012',
... periods=2,
... freq='H'),
... columns=[np.array(['wind_speed',
... 'wind_speed']),
... np.array([10, 80])])
>>> value = linear_interpolation_extrapolation(
... weather_df['wind_speed'], 100)[0]
"""
# find closest heights
heights_sorted = df.columns[
sorted(range(len(df.columns)),
key=lambda i: abs(df.columns[i] - target_height))]
return ((df[heights_sorted[1]] - df[heights_sorted[0]]) /
(heights_sorted[1] - heights_sorted[0]) *
(target_height - heights_sorted[0]) + df[heights_sorted[0]]) | r"""
Linear inter- or extrapolates between the values of a data frame.
This function can be used for the inter-/extrapolation of a parameter
(e.g wind speed) available at two or more different heights, to approximate
the value at hub height. The function is carried out when the parameter
`wind_speed_model`, `density_model` or `temperature_model` of an
instance of the :class:`~.modelchain.ModelChain` class is
'interpolation_extrapolation'.
Parameters
----------
df : pandas.DataFrame
DataFrame with time series for parameter that is to be interpolated or
extrapolated. The columns of the DataFrame are the different heights
for which the parameter is available. If more than two heights are
given, the two closest heights are used. See example below on how the
DataFrame should look like and how the function can be used.
target_height : float
Height for which the parameter is approximated (e.g. hub height).
Returns
-------
pandas.Series
Result of the inter-/extrapolation (e.g. wind speed at hub height).
Notes
-----
For the inter- and extrapolation the following equation is used:
.. math:: f(x) = \frac{(f(x_2) - f(x_1))}{(x_2 - x_1)} \cdot
(x - x_1) + f(x_1)
Examples
---------
>>> import numpy as np
>>> import pandas as pd
>>> wind_speed_10m = np.array([[3], [4]])
>>> wind_speed_80m = np.array([[6], [6]])
>>> weather_df = pd.DataFrame(np.hstack((wind_speed_10m,
... wind_speed_80m)),
... index=pd.date_range('1/1/2012',
... periods=2,
... freq='H'),
... columns=[np.array(['wind_speed',
... 'wind_speed']),
... np.array([10, 80])])
>>> value = linear_interpolation_extrapolation(
... weather_df['wind_speed'], 100)[0] |
def replace_all_post_order(expression: Expression, rules: Iterable[ReplacementRule]) \
-> Union[Expression, Sequence[Expression]]:
"""Replace all occurrences of the patterns according to the replacement rules.
A replacement rule consists of a *pattern*, that is matched against any subexpression
of the expression. If a match is found, the *replacement* callback of the rule is called with
the variables from the match substitution. Whatever the callback returns is used as a replacement for the
matched subexpression. This can either be a single expression or a sequence of expressions, which is then
integrated into the surrounding operation in place of the subexpression.
Note that the pattern can therefore not be a single sequence variable/wildcard, because only single expressions
will be matched.
Args:
expression:
The expression to which the replacement rules are applied.
rules:
A collection of replacement rules that are applied to the expression.
max_count:
If given, at most *max_count* applications of the rules are performed. Otherwise, the rules
are applied until there is no more match. If the set of replacement rules is not confluent,
the replacement might not terminate without a *max_count* set.
Returns:
The resulting expression after the application of the replacement rules. This can also be a sequence of
expressions, if the root expression is replaced with a sequence of expressions by a rule.
"""
return _replace_all_post_order(expression, rules)[0] | Replace all occurrences of the patterns according to the replacement rules.
A replacement rule consists of a *pattern*, that is matched against any subexpression
of the expression. If a match is found, the *replacement* callback of the rule is called with
the variables from the match substitution. Whatever the callback returns is used as a replacement for the
matched subexpression. This can either be a single expression or a sequence of expressions, which is then
integrated into the surrounding operation in place of the subexpression.
Note that the pattern can therefore not be a single sequence variable/wildcard, because only single expressions
will be matched.
Args:
expression:
The expression to which the replacement rules are applied.
rules:
A collection of replacement rules that are applied to the expression.
max_count:
If given, at most *max_count* applications of the rules are performed. Otherwise, the rules
are applied until there is no more match. If the set of replacement rules is not confluent,
the replacement might not terminate without a *max_count* set.
Returns:
The resulting expression after the application of the replacement rules. This can also be a sequence of
expressions, if the root expression is replaced with a sequence of expressions by a rule. |
def illumination(x, gamma=1., contrast=1., saturation=1., is_random=False):
"""Perform illumination augmentation for a single image, randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
gamma : float
Change brightness (the same with ``tl.prepro.brightness``)
- if is_random=False, one float number, small than one means brighter, greater than one means darker.
- if is_random=True, tuple of two float numbers, (min, max).
contrast : float
Change contrast.
- if is_random=False, one float number, small than one means blur.
- if is_random=True, tuple of two float numbers, (min, max).
saturation : float
Change saturation.
- if is_random=False, one float number, small than one means unsaturation.
- if is_random=True, tuple of two float numbers, (min, max).
is_random : boolean
If True, randomly change illumination. Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
Random
>>> x = tl.prepro.illumination(x, gamma=(0.5, 5.0), contrast=(0.3, 1.0), saturation=(0.7, 1.0), is_random=True)
Non-random
>>> x = tl.prepro.illumination(x, 0.5, 0.6, 0.8, is_random=False)
"""
if is_random:
if not (len(gamma) == len(contrast) == len(saturation) == 2):
raise AssertionError("if is_random = True, the arguments are (min, max)")
## random change brightness # small --> brighter
illum_settings = np.random.randint(0, 3) # 0-brighter, 1-darker, 2 keep normal
if illum_settings == 0: # brighter
gamma = np.random.uniform(gamma[0], 1.0) # (.5, 1.0)
elif illum_settings == 1: # darker
gamma = np.random.uniform(1.0, gamma[1]) # (1.0, 5.0)
else:
gamma = 1
im_ = brightness(x, gamma=gamma, gain=1, is_random=False)
# tl.logging.info("using contrast and saturation")
image = PIL.Image.fromarray(im_) # array -> PIL
contrast_adjust = PIL.ImageEnhance.Contrast(image)
image = contrast_adjust.enhance(np.random.uniform(contrast[0], contrast[1])) #0.3,0.9))
saturation_adjust = PIL.ImageEnhance.Color(image)
image = saturation_adjust.enhance(np.random.uniform(saturation[0], saturation[1])) # (0.7,1.0))
im_ = np.array(image) # PIL -> array
else:
im_ = brightness(x, gamma=gamma, gain=1, is_random=False)
image = PIL.Image.fromarray(im_) # array -> PIL
contrast_adjust = PIL.ImageEnhance.Contrast(image)
image = contrast_adjust.enhance(contrast)
saturation_adjust = PIL.ImageEnhance.Color(image)
image = saturation_adjust.enhance(saturation)
im_ = np.array(image) # PIL -> array
return np.asarray(im_) | Perform illumination augmentation for a single image, randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
gamma : float
Change brightness (the same with ``tl.prepro.brightness``)
- if is_random=False, one float number, small than one means brighter, greater than one means darker.
- if is_random=True, tuple of two float numbers, (min, max).
contrast : float
Change contrast.
- if is_random=False, one float number, small than one means blur.
- if is_random=True, tuple of two float numbers, (min, max).
saturation : float
Change saturation.
- if is_random=False, one float number, small than one means unsaturation.
- if is_random=True, tuple of two float numbers, (min, max).
is_random : boolean
If True, randomly change illumination. Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
Random
>>> x = tl.prepro.illumination(x, gamma=(0.5, 5.0), contrast=(0.3, 1.0), saturation=(0.7, 1.0), is_random=True)
Non-random
>>> x = tl.prepro.illumination(x, 0.5, 0.6, 0.8, is_random=False) |
def adjust_brightness_contrast(image, brightness=0., contrast=0.):
"""
Adjust the brightness and/or contrast of an image
:param image: OpenCV BGR image
:param contrast: Float, contrast adjustment with 0 meaning no change
:param brightness: Float, brightness adjustment with 0 meaning no change
"""
beta = 0
# See the OpenCV docs for more info on the `beta` parameter to addWeighted
# https://docs.opencv.org/3.4.2/d2/de8/group__core__array.html#gafafb2513349db3bcff51f54ee5592a19
return cv2.addWeighted(image,
1 + float(contrast) / 100.,
image,
beta,
float(brightness)) | Adjust the brightness and/or contrast of an image
:param image: OpenCV BGR image
:param contrast: Float, contrast adjustment with 0 meaning no change
:param brightness: Float, brightness adjustment with 0 meaning no change |
def update_series(self, series):
"""Update a series with new attributes. This does not change
any of the data written to this series. The recommended workflow for
series updates is to pull a Series object down using the
:meth:`get_series` method, change its attributes, then pass it into
this method.
:param series: the series to update
:type series: `tempodb.protocol.Series` object
:rtype: :class:`tempodb.response.Response` object with the updated
:class:`tempodb.protocol.objects.Series` as the data payload"""
url = make_series_url(series.key)
resp = self.session.put(url, series.to_json())
return resp | Update a series with new attributes. This does not change
any of the data written to this series. The recommended workflow for
series updates is to pull a Series object down using the
:meth:`get_series` method, change its attributes, then pass it into
this method.
:param series: the series to update
:type series: `tempodb.protocol.Series` object
:rtype: :class:`tempodb.response.Response` object with the updated
:class:`tempodb.protocol.objects.Series` as the data payload |
def filter_empty_parameters(func):
"""Decorator that is filtering empty parameters.
:param func: function that you want wrapping
:type func: function
"""
@wraps(func)
def func_wrapper(self, *args, **kwargs):
my_kwargs = {key: value for key, value in kwargs.items()
if value not in EMPTIES}
args_is_empty = all(arg in EMPTIES for arg in args)
if (
{'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
) and args_is_empty:
return
return func(self, *args, **my_kwargs)
return func_wrapper | Decorator that is filtering empty parameters.
:param func: function that you want wrapping
:type func: function |
def compress(x, y):
"""
Given a x,y coordinate, encode in "compressed format"
Returned is always 33 bytes.
"""
polarity = "02" if y % 2 == 0 else "03"
wrap = lambda x: x
if not is_py2:
wrap = lambda x: bytes(x, 'ascii')
return unhexlify(wrap("%s%0.64x" % (polarity, x))) | Given a x,y coordinate, encode in "compressed format"
Returned is always 33 bytes. |
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib_parse.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path | Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.) |
def IterAssociatorInstancePaths(self, InstanceName, AssocClass=None,
ResultClass=None,
Role=None, ResultRole=None,
FilterQueryLanguage=None, FilterQuery=None,
OperationTimeout=None, ContinueOnError=None,
MaxObjectCount=DEFAULT_ITER_MAXOBJECTCOUNT,
**extra):
# pylint: disable=invalid-name
"""
Retrieve the instance paths of the instances associated to a source
instance, using the Python :term:`py:generator` idiom to return the
result.
*New in pywbem 0.10 as experimental and finalized in 0.12.*
This method uses the corresponding pull operations if supported by the
WBEM server or otherwise the corresponding traditional operation.
This method is an alternative to using the pull operations directly,
that frees the user of having to know whether the WBEM server supports
pull operations.
This method is a generator function that retrieves instance paths from
the WBEM server and returns them one by one (using :keyword:`yield`)
when the caller iterates through the returned generator object. The
number of instance paths that are retrieved from the WBEM server in one
request (and thus need to be materialized in this method) is up to the
`MaxObjectCount` parameter if the corresponding pull operations are
used, or the complete result set all at once if the corresponding
traditional operation is used.
By default, this method attempts to perform the corresponding pull
operations
(:meth:`~pywbem.WBEMConnection.OpenAssociatorInstancePaths` and
:meth:`~pywbem.WBEMConnection.PullInstancePaths`).
If these pull operations are not supported by the WBEM server, this
method falls back to using the corresponding traditional operation
(:meth:`~pywbem.WBEMConnection.AssociatorNames`).
Whether the WBEM server supports these pull operations is remembered
in the :class:`~pywbem.WBEMConnection` object (by operation type), and
avoids unnecessary attempts to try these pull operations on that
connection in the future.
The `use_pull_operations` init parameter of
:class:`~pywbem.WBEMConnection` can be used to control the preference
for always using pull operations, always using traditional operations,
or using pull operations if supported by the WBEM server (the default).
This method provides all of the controls of the corresponding pull
operations except for the ability to set different response sizes on
each request; the response size (defined by the `MaxObjectCount`
parameter) is the same for all pull operations in the enumeration
session.
In addition, some functionality is only available if the corresponding
pull operations are used by this method:
* Filtering is not supported for the corresponding traditional
operation so that setting the `FilterQuery` or `FilterQueryLanguage`
parameters will be rejected if the corresponding traditional
operation is used by this method.
Note that this limitation is not a disadvantage compared to using the
corresponding pull operations directly, because in both cases, the
WBEM server must support the pull operations and their filtering
capability in order for the filtering to work.
* Setting the `ContinueOnError` parameter to `True` will be rejected if
the corresponding traditional operation is used by this method.
The enumeration session that is opened with the WBEM server when using
pull operations is closed automatically when the returned generator
object is exhausted, or when the generator object is closed using its
:meth:`~py:generator.close` method (which may also be called before the
generator is exhausted).
Parameters:
InstanceName (:class:`~pywbem.CIMInstanceName`):
The instance path of the source instance.
If this object does not specify a namespace, the default namespace
of the connection is used.
Its `host` attribute will be ignored.
AssocClass (:term:`string` or :class:`~pywbem.CIMClassName`):
Class name of an association class (case independent),
to filter the result to include only traversals of that association
class (or subclasses).
`None` means that no such filtering is peformed.
ResultClass (:term:`string` or :class:`~pywbem.CIMClassName`):
Class name of an associated class (case independent),
to filter the result to include only traversals to that associated
class (or subclasses).
`None` means that no such filtering is peformed.
Role (:term:`string`):
Role name (= property name) of the source end (case independent),
to filter the result to include only traversals from that source
role.
`None` means that no such filtering is peformed.
ResultRole (:term:`string`):
Role name (= property name) of the far end (case independent),
to filter the result to include only traversals to that far
role.
`None` means that no such filtering is peformed.
FilterQueryLanguage (:term:`string`):
The name of the filter query language used for the `FilterQuery`
parameter. The DMTF-defined Filter Query Language (see
:term:`DSP0212`) is specified as "DMTF:FQL".
If this parameter is not `None` and the traditional operation is
used by this method, :exc:`~py:exceptions.ValueError` will be
raised.
Not all WBEM servers support filtering for this operation because
it returns instance paths and the act of the server filtering
requires that it generate instances just for that purpose and then
discard them.
FilterQuery (:term:`string`):
The filter query in the query language defined by the
`FilterQueryLanguage` parameter.
If this parameter is not `None` and the traditional operation is
used by this method, :exc:`~py:exceptions.ValueError` will be
raised.
OperationTimeout (:class:`~pywbem.Uint32`):
Minimum time in seconds the WBEM Server shall maintain an open
enumeration session after a previous Open or Pull request is
sent to the client. Once this timeout time has expired, the
WBEM server may close the enumeration session.
* If not `None`, this parameter is sent to the WBEM server as the
proposed timeout for the enumeration session. A value of 0
indicates that the server is expected to never time out. The
server may reject the proposed value, causing a
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_INVALID_OPERATION_TIMEOUT`.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default timeout to be used.
ContinueOnError (:class:`py:bool`):
Indicates to the WBEM server to continue sending responses
after an error response has been sent.
* If `True`, the server is to continue sending responses after
sending an error response. Not all servers support continuation
on error; a server that does not support it must send an error
response if `True` was specified, causing
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_CONTINUATION_ON_ERROR_NOT_SUPPORTED`.
If the corresponding traditional operation is used by this
method, :exc:`~py:exceptions.ValueError` will be raised.
* If `False`, the server is requested to close the enumeration
after sending an error response.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default behaviour to be used.
:term:`DSP0200` defines that the server-implemented default is
`False`.
MaxObjectCount (:class:`~pywbem.Uint32`)
Maximum number of instances the WBEM server may return for each of
the open and pull requests issued during the iterations over the
returned generator object.
* If positive, the WBEM server is to return no more than the
specified number of instances.
* Zero is not allowed; it would mean that zero instances
are to be returned for open and all pull requests issued to the
server.
* The default is defined as a system config variable.
* `None` is not allowed.
**extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server.
Note that :term:`DSP0200` does not define any additional parameters
for this operation.
Returns:
:term:`py:generator` iterating :class:`~pywbem.CIMInstanceName`:
A generator object that iterates the resulting CIM instance paths.
These instance paths have their host and namespace components set.
Raises:
Exceptions described in :class:`~pywbem.WBEMConnection`.
Example::
paths_generator = conn.IterAssociatorInstancePaths('CIM_Blah')
for path in paths_generator:
print('path {0}'.format(path))
"""
_validateIterCommonParams(MaxObjectCount, OperationTimeout)
# Common variable for pull result tuple used by pulls and finally:
pull_result = None
try: # try / finally block to allow iter.close()
if (self._use_assoc_path_pull_operations is None or
self._use_assoc_path_pull_operations):
try: # Open operation try block
pull_result = self.OpenAssociatorInstancePaths(
InstanceName,
AssocClass=AssocClass,
ResultClass=ResultClass,
Role=Role,
ResultRole=ResultRole,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount, **extra)
# Open operation succeeded; set use_pull flag
self._use_assoc_path_pull_operations = True
for inst in pull_result.paths:
yield inst
# Loop to pull while more while eos not returned.
while not pull_result.eos:
pull_result = self.PullInstancePaths(
pull_result.context, MaxObjectCount=MaxObjectCount)
for inst in pull_result.paths:
yield inst
pull_result = None # clear the pull_result
return
# If NOT_SUPPORTED and first request, set flag and try
# alternative request operation.
# If use_pull_operations is True, always raise the exception
except CIMError as ce:
if (self._use_assoc_path_pull_operations is None and
ce.status_code == CIM_ERR_NOT_SUPPORTED):
self._use_assoc_path_pull_operations = False
else:
raise
# Alternate request if Pull not implemented. This does not allow
# the FilterQuery or ContinueOnError
assert self._use_assoc_path_pull_operations is False
if FilterQuery is not None or FilterQueryLanguage is not None:
raise ValueError('AssociatorNames does not support'
' FilterQuery.')
if ContinueOnError is not None:
raise ValueError('AssociatorNames does not support '
'ContinueOnError.')
enum_rslt = self.AssociatorNames(
InstanceName,
AssocClass=AssocClass,
ResultClass=ResultClass,
Role=Role,
ResultRole=ResultRole, **extra)
for inst in enum_rslt:
yield inst
# Cleanup if caller closess the iterator before exhausting it
finally:
# Cleanup only required if the pull context is open and not complete
if pull_result is not None and not pull_result.eos:
self.CloseEnumeration(pull_result.context)
pull_result = None | Retrieve the instance paths of the instances associated to a source
instance, using the Python :term:`py:generator` idiom to return the
result.
*New in pywbem 0.10 as experimental and finalized in 0.12.*
This method uses the corresponding pull operations if supported by the
WBEM server or otherwise the corresponding traditional operation.
This method is an alternative to using the pull operations directly,
that frees the user of having to know whether the WBEM server supports
pull operations.
This method is a generator function that retrieves instance paths from
the WBEM server and returns them one by one (using :keyword:`yield`)
when the caller iterates through the returned generator object. The
number of instance paths that are retrieved from the WBEM server in one
request (and thus need to be materialized in this method) is up to the
`MaxObjectCount` parameter if the corresponding pull operations are
used, or the complete result set all at once if the corresponding
traditional operation is used.
By default, this method attempts to perform the corresponding pull
operations
(:meth:`~pywbem.WBEMConnection.OpenAssociatorInstancePaths` and
:meth:`~pywbem.WBEMConnection.PullInstancePaths`).
If these pull operations are not supported by the WBEM server, this
method falls back to using the corresponding traditional operation
(:meth:`~pywbem.WBEMConnection.AssociatorNames`).
Whether the WBEM server supports these pull operations is remembered
in the :class:`~pywbem.WBEMConnection` object (by operation type), and
avoids unnecessary attempts to try these pull operations on that
connection in the future.
The `use_pull_operations` init parameter of
:class:`~pywbem.WBEMConnection` can be used to control the preference
for always using pull operations, always using traditional operations,
or using pull operations if supported by the WBEM server (the default).
This method provides all of the controls of the corresponding pull
operations except for the ability to set different response sizes on
each request; the response size (defined by the `MaxObjectCount`
parameter) is the same for all pull operations in the enumeration
session.
In addition, some functionality is only available if the corresponding
pull operations are used by this method:
* Filtering is not supported for the corresponding traditional
operation so that setting the `FilterQuery` or `FilterQueryLanguage`
parameters will be rejected if the corresponding traditional
operation is used by this method.
Note that this limitation is not a disadvantage compared to using the
corresponding pull operations directly, because in both cases, the
WBEM server must support the pull operations and their filtering
capability in order for the filtering to work.
* Setting the `ContinueOnError` parameter to `True` will be rejected if
the corresponding traditional operation is used by this method.
The enumeration session that is opened with the WBEM server when using
pull operations is closed automatically when the returned generator
object is exhausted, or when the generator object is closed using its
:meth:`~py:generator.close` method (which may also be called before the
generator is exhausted).
Parameters:
InstanceName (:class:`~pywbem.CIMInstanceName`):
The instance path of the source instance.
If this object does not specify a namespace, the default namespace
of the connection is used.
Its `host` attribute will be ignored.
AssocClass (:term:`string` or :class:`~pywbem.CIMClassName`):
Class name of an association class (case independent),
to filter the result to include only traversals of that association
class (or subclasses).
`None` means that no such filtering is peformed.
ResultClass (:term:`string` or :class:`~pywbem.CIMClassName`):
Class name of an associated class (case independent),
to filter the result to include only traversals to that associated
class (or subclasses).
`None` means that no such filtering is peformed.
Role (:term:`string`):
Role name (= property name) of the source end (case independent),
to filter the result to include only traversals from that source
role.
`None` means that no such filtering is peformed.
ResultRole (:term:`string`):
Role name (= property name) of the far end (case independent),
to filter the result to include only traversals to that far
role.
`None` means that no such filtering is peformed.
FilterQueryLanguage (:term:`string`):
The name of the filter query language used for the `FilterQuery`
parameter. The DMTF-defined Filter Query Language (see
:term:`DSP0212`) is specified as "DMTF:FQL".
If this parameter is not `None` and the traditional operation is
used by this method, :exc:`~py:exceptions.ValueError` will be
raised.
Not all WBEM servers support filtering for this operation because
it returns instance paths and the act of the server filtering
requires that it generate instances just for that purpose and then
discard them.
FilterQuery (:term:`string`):
The filter query in the query language defined by the
`FilterQueryLanguage` parameter.
If this parameter is not `None` and the traditional operation is
used by this method, :exc:`~py:exceptions.ValueError` will be
raised.
OperationTimeout (:class:`~pywbem.Uint32`):
Minimum time in seconds the WBEM Server shall maintain an open
enumeration session after a previous Open or Pull request is
sent to the client. Once this timeout time has expired, the
WBEM server may close the enumeration session.
* If not `None`, this parameter is sent to the WBEM server as the
proposed timeout for the enumeration session. A value of 0
indicates that the server is expected to never time out. The
server may reject the proposed value, causing a
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_INVALID_OPERATION_TIMEOUT`.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default timeout to be used.
ContinueOnError (:class:`py:bool`):
Indicates to the WBEM server to continue sending responses
after an error response has been sent.
* If `True`, the server is to continue sending responses after
sending an error response. Not all servers support continuation
on error; a server that does not support it must send an error
response if `True` was specified, causing
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_CONTINUATION_ON_ERROR_NOT_SUPPORTED`.
If the corresponding traditional operation is used by this
method, :exc:`~py:exceptions.ValueError` will be raised.
* If `False`, the server is requested to close the enumeration
after sending an error response.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default behaviour to be used.
:term:`DSP0200` defines that the server-implemented default is
`False`.
MaxObjectCount (:class:`~pywbem.Uint32`)
Maximum number of instances the WBEM server may return for each of
the open and pull requests issued during the iterations over the
returned generator object.
* If positive, the WBEM server is to return no more than the
specified number of instances.
* Zero is not allowed; it would mean that zero instances
are to be returned for open and all pull requests issued to the
server.
* The default is defined as a system config variable.
* `None` is not allowed.
**extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server.
Note that :term:`DSP0200` does not define any additional parameters
for this operation.
Returns:
:term:`py:generator` iterating :class:`~pywbem.CIMInstanceName`:
A generator object that iterates the resulting CIM instance paths.
These instance paths have their host and namespace components set.
Raises:
Exceptions described in :class:`~pywbem.WBEMConnection`.
Example::
paths_generator = conn.IterAssociatorInstancePaths('CIM_Blah')
for path in paths_generator:
print('path {0}'.format(path)) |
def add_chan(self, chan, color=None, values=None, limits_c=None,
colormap=CHAN_COLORMAP, alpha=None, colorbar=False):
"""Add channels to visualization
Parameters
----------
chan : instance of Channels
channels to plot
color : tuple
3-, 4-element tuple, representing RGB and alpha, between 0 and 1
values : ndarray
array with values for each channel
limits_c : tuple of 2 floats, optional
min and max values to normalize the color
colormap : str
one of the colormaps in vispy
alpha : float
transparency (0 = transparent, 1 = opaque)
colorbar : bool
add a colorbar at the back of the surface
"""
# reuse previous limits
if limits_c is None and self._chan_limits is not None:
limits_c = self._chan_limits
chan_colors, limits = _prepare_colors(color=color, values=values,
limits_c=limits_c,
colormap=colormap, alpha=alpha,
chan=chan)
self._chan_limits = limits
xyz = chan.return_xyz()
marker = Markers()
marker.set_data(pos=xyz, size=CHAN_SIZE, face_color=chan_colors)
self._add_mesh(marker)
if colorbar:
self._view.add(_colorbar_for_surf(colormap, limits)) | Add channels to visualization
Parameters
----------
chan : instance of Channels
channels to plot
color : tuple
3-, 4-element tuple, representing RGB and alpha, between 0 and 1
values : ndarray
array with values for each channel
limits_c : tuple of 2 floats, optional
min and max values to normalize the color
colormap : str
one of the colormaps in vispy
alpha : float
transparency (0 = transparent, 1 = opaque)
colorbar : bool
add a colorbar at the back of the surface |
def titlefy(subject):
"""\
Titlecases the provided subject but respects common abbreviations.
This function returns ``None`` if the provided `subject` is ``None``. It
returns an empty string if the provided subject is empty.
`subject
A cable's subject.
"""
def clean_word(word):
return _APOS_PATTERN.sub(lambda m: u'%s%s%s' % (m.group(1), m.group(2) if not m.group(2) == ',' else u"'", m.group(3).lower()), word)
def titlefy_word(word):
if _is_number(word):
return word.lower()
if _TITLEFY_BIG_PATTERN.match(word):
return clean_word(word.upper())
return clean_word(_SPECIAL_WORDS.get(word, word.title()))
if not subject:
return None if subject is None else u''
res = []
append = res.append
wl = subject.strip().split()
append(titlefy_word(wl[0]))
for word in wl[1:]:
if _TITLEFY_SMALL_PATTERN.match(word):
if res[-1][-1] not in ':-':
if word == u'A' and res[-1] == u'and' and res[-2] == 'Q':
# Q and A
append(word.upper())
else:
append(word.lower())
else:
append(titlefy_word(word))
else:
append(titlefy_word(word))
return u' '.join(res) | \
Titlecases the provided subject but respects common abbreviations.
This function returns ``None`` if the provided `subject` is ``None``. It
returns an empty string if the provided subject is empty.
`subject
A cable's subject. |
def _process_output_source_directive(schema, current_schema_type, ast,
location, context, local_unique_directives):
"""Process the output_source directive, modifying the context as appropriate.
Args:
schema: GraphQL schema object, obtained from the graphql library
current_schema_type: GraphQLType, the schema type at the current location
ast: GraphQL AST node, obtained from the graphql library
location: Location object representing the current location in the query
context: dict, various per-compilation data (e.g. declared tags, whether the current block
is optional, etc.). May be mutated in-place in this function!
local_unique_directives: dict, directive name string -> directive object, containing
unique directives present on the current AST node *only*
Returns:
an OutputSource block, if one should be emitted, or None otherwise
"""
# The 'ast' variable is only for function signature uniformity, and is currently not used.
output_source_directive = local_unique_directives.get('output_source', None)
if output_source_directive:
if has_encountered_output_source(context):
raise GraphQLCompilationError(u'Cannot have more than one output source!')
if is_in_optional_scope(context):
raise GraphQLCompilationError(u'Cannot have the output source in an optional block!')
set_output_source_data(context, location)
return blocks.OutputSource()
else:
return None | Process the output_source directive, modifying the context as appropriate.
Args:
schema: GraphQL schema object, obtained from the graphql library
current_schema_type: GraphQLType, the schema type at the current location
ast: GraphQL AST node, obtained from the graphql library
location: Location object representing the current location in the query
context: dict, various per-compilation data (e.g. declared tags, whether the current block
is optional, etc.). May be mutated in-place in this function!
local_unique_directives: dict, directive name string -> directive object, containing
unique directives present on the current AST node *only*
Returns:
an OutputSource block, if one should be emitted, or None otherwise |
def integer(description, **kwargs) -> typing.Type:
"""Create a :class:`~doctor.types.Integer` type.
:param description: A description of the type.
:param kwargs: Can include any attribute defined in
:class:`~doctor.types.Integer`
"""
kwargs['description'] = description
return type('Integer', (Integer,), kwargs) | Create a :class:`~doctor.types.Integer` type.
:param description: A description of the type.
:param kwargs: Can include any attribute defined in
:class:`~doctor.types.Integer` |
def toFilename(url):
'''
gets url and returns filename
'''
urlp = urlparse(url)
path = urlp.path
if not path:
path = "file_{}".format(int(time.time()))
value = re.sub(r'[^\w\s\.\-]', '-', path).strip().lower()
return re.sub(r'[-\s]+', '-', value).strip("-")[-200:] | gets url and returns filename |
def map_constructor(self, loader, node, deep=False):
""" Walk the mapping, recording any duplicate keys.
"""
mapping = {}
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
value = loader.construct_object(value_node, deep=deep)
if key in mapping:
raise ValueError(f"Duplicate key: \"{key}\"")
mapping[key] = value
return mapping | Walk the mapping, recording any duplicate keys. |
def do_continue(self, arg):
"""
continue - continue execution
g - continue execution
go - continue execution
"""
if self.cmdprefix:
raise CmdError("prefix not allowed")
if arg:
raise CmdError("too many arguments")
if self.debug.get_debugee_count() > 0:
return True | continue - continue execution
g - continue execution
go - continue execution |
def _check_holiday_structure(self, times):
""" To check the structure of the HolidayClass
:param list times: years or months or days or number week
:rtype: None or Exception
:return: in the case of exception returns the exception
"""
if not isinstance(times, list):
raise TypeError("an list is required")
for time in times:
if not isinstance(time, tuple):
raise TypeError("a tuple is required")
if len(time) > 5:
raise TypeError("Target time takes at most 5 arguments"
" ('%d' given)" % len(time))
if len(time) < 5:
raise TypeError("Required argument '%s' (pos '%d')"
" not found" % (TIME_LABEL[len(time)], len(time)))
self._check_time_format(TIME_LABEL, time) | To check the structure of the HolidayClass
:param list times: years or months or days or number week
:rtype: None or Exception
:return: in the case of exception returns the exception |
def _submit_request(self):
"""Submit a request to the ACS Zeropoint Calculator.
If an exception is raised during the request, an error message is
given. Otherwise, the response is saved in the corresponding
attribute.
"""
try:
self._response = urlopen(self._url)
except URLError as e:
msg = ('{}\n{}\nThe query failed! Please check your inputs. '
'If the error persists, submit a ticket to the '
'ACS Help Desk at hsthelp.stsci.edu with the error message '
'displayed above.'.format(str(e), self._msg_div))
LOG.error(msg)
self._failed = True
else:
self._failed = False | Submit a request to the ACS Zeropoint Calculator.
If an exception is raised during the request, an error message is
given. Otherwise, the response is saved in the corresponding
attribute. |
def merge(self, elements):
''' Merges all scraping results to a list sorted by frequency of occurrence. '''
from collections import Counter
from lltk.utils import list2tuple, tuple2list
# The list2tuple conversion is necessary because mutable objects (e.g. lists) are not hashable
merged = tuple2list([value for value, count in Counter(list2tuple(list(elements))).most_common()])
return merged | Merges all scraping results to a list sorted by frequency of occurrence. |
def nice_display(item):
"""Display a comma-separated list of models for M2M fields"""
if hasattr(item, 'all'): # RelatedManager: display a list
return ', '.join(map(text_type, item.all()))
return item | Display a comma-separated list of models for M2M fields |
def set_resolved_url(self, item=None, subtitles=None):
'''Takes a url or a listitem to be played. Used in conjunction with a
playable list item with a path that calls back into your addon.
:param item: A playable list item or url. Pass None to alert XBMC of a
failure to resolve the item.
.. warning:: When using set_resolved_url you should ensure
the initial playable item (which calls back
into your addon) doesn't have a trailing
slash in the URL. Otherwise it won't work
reliably with XBMC's PlayMedia().
:param subtitles: A URL to a remote subtitles file or a local filename
for a subtitles file to be played along with the
item.
'''
if self._end_of_directory:
raise Exception('Current XBMC handle has been removed. Either '
'set_resolved_url(), end_of_directory(), or '
'finish() has already been called.')
self._end_of_directory = True
succeeded = True
if item is None:
# None item indicates the resolve url failed.
item = {}
succeeded = False
if isinstance(item, basestring):
# caller is passing a url instead of an item dict
item = {'path': item}
item = self._listitemify(item)
item.set_played(True)
xbmcplugin.setResolvedUrl(self.handle, succeeded,
item.as_xbmc_listitem())
# call to _add_subtitles must be after setResolvedUrl
if subtitles:
self._add_subtitles(subtitles)
return [item] | Takes a url or a listitem to be played. Used in conjunction with a
playable list item with a path that calls back into your addon.
:param item: A playable list item or url. Pass None to alert XBMC of a
failure to resolve the item.
.. warning:: When using set_resolved_url you should ensure
the initial playable item (which calls back
into your addon) doesn't have a trailing
slash in the URL. Otherwise it won't work
reliably with XBMC's PlayMedia().
:param subtitles: A URL to a remote subtitles file or a local filename
for a subtitles file to be played along with the
item. |
def add_row(self, key: str, default: str=None,
unit_label: str=None, enable: bool=None):
"""
Add a single row and re-draw as necessary
:param key: the name and dict accessor
:param default: the default value
:param unit_label: the label that should be \
applied at the right of the entry
:param enable: the 'enabled' state (defaults to True)
:return:
"""
self.keys.append(ttk.Label(self, text=key))
self.defaults.append(default)
self.unit_labels.append(
ttk.Label(self, text=unit_label if unit_label else '')
)
self.enables.append(enable)
self.values.append(ttk.Entry(self))
row_offset = 1 if self.title is not None else 0
for i in range(len(self.keys)):
self.keys[i].grid_forget()
self.keys[i].grid(row=row_offset, column=0, sticky='e')
self.values[i].grid(row=row_offset, column=1)
if self.unit_labels[i]:
self.unit_labels[i].grid(row=row_offset, column=3, sticky='w')
if self.defaults[i]:
self.values[i].config(state=tk.NORMAL)
self.values[i].delete(0, tk.END)
self.values[i].insert(0, self.defaults[i])
if self.enables[i] in [True, None]:
self.values[i].config(state=tk.NORMAL)
elif self.enables[i] is False:
self.values[i].config(state=tk.DISABLED)
row_offset += 1
# strip <Return> and <Tab> bindings, add callbacks to all entries
self.values[i].unbind('<Return>')
self.values[i].unbind('<Tab>')
if self.callback is not None:
def callback(event):
self.callback()
self.values[i].bind('<Return>', callback)
self.values[i].bind('<Tab>', callback) | Add a single row and re-draw as necessary
:param key: the name and dict accessor
:param default: the default value
:param unit_label: the label that should be \
applied at the right of the entry
:param enable: the 'enabled' state (defaults to True)
:return: |
def info(zone, show_all=False):
'''
Display the configuration from memory
zone : string
name of zone
show_all : boolean
also include calculated values like capped-cpu, cpu-shares, ...
CLI Example:
.. code-block:: bash
salt '*' zonecfg.info tallgeese
'''
ret = {}
# dump zone
res = __salt__['cmd.run_all']('zonecfg -z {zone} info'.format(
zone=zone,
))
if res['retcode'] == 0:
# parse output
resname = None
resdata = {}
for line in res['stdout'].split("\n"):
# skip some bad data
if ':' not in line:
continue
# skip calculated values (if requested)
if line.startswith('['):
if not show_all:
continue
line = line.rstrip()[1:-1]
# extract key
key = line.strip().split(':')[0]
if '[' in key:
key = key[1:]
# parse calculated resource (if requested)
if key in _zonecfg_info_resources_calculated:
if resname:
ret[resname].append(resdata)
if show_all:
resname = key
resdata = {}
if key not in ret:
ret[key] = []
else:
resname = None
resdata = {}
# parse resources
elif key in _zonecfg_info_resources:
if resname:
ret[resname].append(resdata)
resname = key
resdata = {}
if key not in ret:
ret[key] = []
# store resource property
elif line.startswith("\t"):
# ship calculated values (if requested)
if line.strip().startswith('['):
if not show_all:
continue
line = line.strip()[1:-1]
if key == 'property': # handle special 'property' keys
if 'property' not in resdata:
resdata[key] = {}
kv = _parse_value(line.strip()[line.strip().index(':')+1:])
if 'name' in kv and 'value' in kv:
resdata[key][kv['name']] = kv['value']
else:
log.warning('zonecfg.info - not sure how to deal with: %s', kv)
else:
resdata[key] = _parse_value(line.strip()[line.strip().index(':')+1:])
# store property
else:
if resname:
ret[resname].append(resdata)
resname = None
resdata = {}
if key == 'property': # handle special 'property' keys
if 'property' not in ret:
ret[key] = {}
kv = _parse_value(line.strip()[line.strip().index(':')+1:])
if 'name' in kv and 'value' in kv:
res[key][kv['name']] = kv['value']
else:
log.warning('zonecfg.info - not sure how to deal with: %s', kv)
else:
ret[key] = _parse_value(line.strip()[line.strip().index(':')+1:])
# store hanging resource
if resname:
ret[resname].append(resdata)
return ret | Display the configuration from memory
zone : string
name of zone
show_all : boolean
also include calculated values like capped-cpu, cpu-shares, ...
CLI Example:
.. code-block:: bash
salt '*' zonecfg.info tallgeese |
def print(self, indent=0):
"""Print self with optional indent."""
text = (
'{indent}{magenta}{name}{none} ({dim}{cls}{none}, '
'default {dim}{default}{none})'
).format(
indent=' ' * indent,
dim=Style.DIM,
magenta=Fore.MAGENTA,
none=Style.RESET_ALL,
name=self.name,
cls=self.cls,
default=self.default
)
if self.description:
text += ':\n' + pretty_description(self.description,
indent=indent + 2)
print(text) | Print self with optional indent. |
def check_lon(self, dataset):
'''
float lon(timeSeries) ; //........................................ Depending on the precision used for the variable, the data type could be int or double instead of float.
lon:long_name = "" ; //...................................... RECOMMENDED
lon:standard_name = "longitude" ; //......................... REQUIRED - This is fixed, do not change.
lon:units = "degrees_east" ; //.............................. REQUIRED - CF recommends degrees_east, but at least use UDUNITS.
lon:axis = "X" ; //.......................................... REQUIRED - Do not change.
lon:valid_min = 0.0f ; //.................................... RECOMMENDED - Replace this with correct value.
lon:valid_max = 0.0f ; //.................................... RECOMMENDED - Replace this with correct value.
lon:_FillValue = 0.0f;//..................................... REQUIRED if there could be missing values in the data.
lon:ancillary_variables = "" ; //............................ RECOMMENDED - List other variables providing information about this variable.
lon:comment = "" ; //........................................ RECOMMENDED - Add useful, additional information here.
'''
results = []
lon = util.get_lon_variable(dataset)
if not lon:
return Result(BaseCheck.HIGH, False, 'longitude', ['a variable for longitude doesn\'t exist'])
lon_var = dataset.variables[lon]
test_ctx = TestCtx(BaseCheck.HIGH, 'Required attributes for variable {}'.format(lon))
test_ctx.assert_true(getattr(lon_var, 'standard_name', '') == 'longitude', 'standard_name attribute must be longitude')
units = getattr(lon_var, 'units', '')
test_ctx.assert_true(units and units_convertible(units, 'degrees_east'), 'units are valid UDUNITS for longitude')
test_ctx.assert_true(getattr(lon_var, 'axis', '') == 'X', '{} axis attribute must be X'.format(lon))
results.append(test_ctx.to_result())
test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for variable {}'.format(lon))
test_ctx.assert_true(getattr(lon_var, 'long_name', '') != '', 'long_name attribute should exist and not be empty')
self._check_min_max_range(lon_var, test_ctx)
if hasattr(lon_var, 'comment'):
test_ctx.assert_true(getattr(lon_var, 'comment', '') != '', 'comment attribute should not be empty if specified')
test_ctx.assert_true(units == 'degrees_east', '{} should have units degrees_east'.format(lon))
results.append(test_ctx.to_result())
return results | float lon(timeSeries) ; //........................................ Depending on the precision used for the variable, the data type could be int or double instead of float.
lon:long_name = "" ; //...................................... RECOMMENDED
lon:standard_name = "longitude" ; //......................... REQUIRED - This is fixed, do not change.
lon:units = "degrees_east" ; //.............................. REQUIRED - CF recommends degrees_east, but at least use UDUNITS.
lon:axis = "X" ; //.......................................... REQUIRED - Do not change.
lon:valid_min = 0.0f ; //.................................... RECOMMENDED - Replace this with correct value.
lon:valid_max = 0.0f ; //.................................... RECOMMENDED - Replace this with correct value.
lon:_FillValue = 0.0f;//..................................... REQUIRED if there could be missing values in the data.
lon:ancillary_variables = "" ; //............................ RECOMMENDED - List other variables providing information about this variable.
lon:comment = "" ; //........................................ RECOMMENDED - Add useful, additional information here. |
def _find_only_column_of_type(sframe, target_type, type_name, col_name):
"""
Finds the only column in `SFrame` with a type specified by `target_type`.
If there are zero or more than one such columns, an exception will be
raised. The name and type of the target column should be provided as
strings for the purpose of error feedback.
"""
image_column_name = None
if type(target_type) != list:
target_type = [target_type]
for name, ctype in zip(sframe.column_names(), sframe.column_types()):
if ctype in target_type:
if image_column_name is not None:
raise ToolkitError('No "{col_name}" column specified and more than one {type_name} column in "dataset". Can not infer correct {col_name} column.'.format(col_name=col_name, type_name=type_name))
image_column_name = name
if image_column_name is None:
raise ToolkitError('No %s column in "dataset".' % type_name)
return image_column_name | Finds the only column in `SFrame` with a type specified by `target_type`.
If there are zero or more than one such columns, an exception will be
raised. The name and type of the target column should be provided as
strings for the purpose of error feedback. |
async def do_authentication(sender):
"""
Executes the authentication process with the Telegram servers.
:param sender: a connected `MTProtoPlainSender`.
:return: returns a (authorization key, time offset) tuple.
"""
# Step 1 sending: PQ Request, endianness doesn't matter since it's random
nonce = int.from_bytes(os.urandom(16), 'big', signed=True)
res_pq = await sender.send(ReqPqMultiRequest(nonce))
assert isinstance(res_pq, ResPQ), 'Step 1 answer was %s' % res_pq
if res_pq.nonce != nonce:
raise SecurityError('Step 1 invalid nonce from server')
pq = get_int(res_pq.pq)
# Step 2 sending: DH Exchange
p, q = Factorization.factorize(pq)
p, q = rsa.get_byte_array(p), rsa.get_byte_array(q)
new_nonce = int.from_bytes(os.urandom(32), 'little', signed=True)
pq_inner_data = bytes(PQInnerData(
pq=rsa.get_byte_array(pq), p=p, q=q,
nonce=res_pq.nonce,
server_nonce=res_pq.server_nonce,
new_nonce=new_nonce
))
# sha_digest + data + random_bytes
cipher_text, target_fingerprint = None, None
for fingerprint in res_pq.server_public_key_fingerprints:
cipher_text = rsa.encrypt(fingerprint, pq_inner_data)
if cipher_text is not None:
target_fingerprint = fingerprint
break
if cipher_text is None:
raise SecurityError(
'Step 2 could not find a valid key for fingerprints: {}'
.format(', '.join(
[str(f) for f in res_pq.server_public_key_fingerprints])
)
)
server_dh_params = await sender.send(ReqDHParamsRequest(
nonce=res_pq.nonce,
server_nonce=res_pq.server_nonce,
p=p, q=q,
public_key_fingerprint=target_fingerprint,
encrypted_data=cipher_text
))
assert isinstance(
server_dh_params, (ServerDHParamsOk, ServerDHParamsFail)),\
'Step 2.1 answer was %s' % server_dh_params
if server_dh_params.nonce != res_pq.nonce:
raise SecurityError('Step 2 invalid nonce from server')
if server_dh_params.server_nonce != res_pq.server_nonce:
raise SecurityError('Step 2 invalid server nonce from server')
if isinstance(server_dh_params, ServerDHParamsFail):
nnh = int.from_bytes(
sha1(new_nonce.to_bytes(32, 'little', signed=True)).digest()[4:20],
'little', signed=True
)
if server_dh_params.new_nonce_hash != nnh:
raise SecurityError('Step 2 invalid DH fail nonce from server')
assert isinstance(server_dh_params, ServerDHParamsOk),\
'Step 2.2 answer was %s' % server_dh_params
# Step 3 sending: Complete DH Exchange
key, iv = helpers.generate_key_data_from_nonce(
res_pq.server_nonce, new_nonce
)
if len(server_dh_params.encrypted_answer) % 16 != 0:
# See PR#453
raise SecurityError('Step 3 AES block size mismatch')
plain_text_answer = AES.decrypt_ige(
server_dh_params.encrypted_answer, key, iv
)
with BinaryReader(plain_text_answer) as reader:
reader.read(20) # hash sum
server_dh_inner = reader.tgread_object()
assert isinstance(server_dh_inner, ServerDHInnerData),\
'Step 3 answer was %s' % server_dh_inner
if server_dh_inner.nonce != res_pq.nonce:
raise SecurityError('Step 3 Invalid nonce in encrypted answer')
if server_dh_inner.server_nonce != res_pq.server_nonce:
raise SecurityError('Step 3 Invalid server nonce in encrypted answer')
dh_prime = get_int(server_dh_inner.dh_prime, signed=False)
g_a = get_int(server_dh_inner.g_a, signed=False)
time_offset = server_dh_inner.server_time - int(time.time())
b = get_int(os.urandom(256), signed=False)
gb = pow(server_dh_inner.g, b, dh_prime)
gab = pow(g_a, b, dh_prime)
# Prepare client DH Inner Data
client_dh_inner = bytes(ClientDHInnerData(
nonce=res_pq.nonce,
server_nonce=res_pq.server_nonce,
retry_id=0, # TODO Actual retry ID
g_b=rsa.get_byte_array(gb)
))
client_dh_inner_hashed = sha1(client_dh_inner).digest() + client_dh_inner
# Encryption
client_dh_encrypted = AES.encrypt_ige(client_dh_inner_hashed, key, iv)
# Prepare Set client DH params
dh_gen = await sender.send(SetClientDHParamsRequest(
nonce=res_pq.nonce,
server_nonce=res_pq.server_nonce,
encrypted_data=client_dh_encrypted,
))
nonce_types = (DhGenOk, DhGenRetry, DhGenFail)
assert isinstance(dh_gen, nonce_types), 'Step 3.1 answer was %s' % dh_gen
name = dh_gen.__class__.__name__
if dh_gen.nonce != res_pq.nonce:
raise SecurityError('Step 3 invalid {} nonce from server'.format(name))
if dh_gen.server_nonce != res_pq.server_nonce:
raise SecurityError(
'Step 3 invalid {} server nonce from server'.format(name))
auth_key = AuthKey(rsa.get_byte_array(gab))
nonce_number = 1 + nonce_types.index(type(dh_gen))
new_nonce_hash = auth_key.calc_new_nonce_hash(new_nonce, nonce_number)
dh_hash = getattr(dh_gen, 'new_nonce_hash{}'.format(nonce_number))
if dh_hash != new_nonce_hash:
raise SecurityError('Step 3 invalid new nonce hash')
if not isinstance(dh_gen, DhGenOk):
raise AssertionError('Step 3.2 answer was %s' % dh_gen)
return auth_key, time_offset | Executes the authentication process with the Telegram servers.
:param sender: a connected `MTProtoPlainSender`.
:return: returns a (authorization key, time offset) tuple. |
def order_target(self) -> Optional[Union[int, Point2]]:
""" Returns the target tag (if it is a Unit) or Point2 (if it is a Position)
from the first order, returns None if the unit is idle """
if self.orders:
if isinstance(self.orders[0].target, int):
return self.orders[0].target
else:
return Point2.from_proto(self.orders[0].target)
return None | Returns the target tag (if it is a Unit) or Point2 (if it is a Position)
from the first order, returns None if the unit is idle |
def perform_command(self):
"""
Perform command and return the appropriate exit code.
:rtype: int
"""
if len(self.actual_arguments) < 2:
return self.print_help()
source_url = self.actual_arguments[0]
output_file_path = self.actual_arguments[1]
download = not self.has_option("--list")
# largest_audio = True by default or if explicitly given
if self.has_option("--largest-audio"):
largest_audio = True
else:
largest_audio = not self.has_option("--smallest-audio")
download_format = self.has_option_with_value("--format")
try:
if download:
self.print_info(u"Downloading audio stream from '%s' ..." % source_url)
downloader = Downloader(logger=self.logger)
result = downloader.audio_from_youtube(
source_url,
download=True,
output_file_path=output_file_path,
download_format=download_format,
largest_audio=largest_audio,
)
self.print_info(u"Downloading audio stream from '%s' ... done" % source_url)
self.print_success(u"Downloaded file '%s'" % result)
else:
self.print_info(u"Downloading stream info from '%s' ..." % source_url)
downloader = Downloader(logger=self.logger)
result = downloader.audio_from_youtube(
source_url,
download=False
)
self.print_info(u"Downloading stream info from '%s' ... done" % source_url)
msg = []
msg.append(u"%s\t%s\t%s\t%s" % ("Format", "Extension", "Bitrate", "Size"))
for r in result:
filesize = gf.human_readable_number(r["filesize"])
msg.append(u"%s\t%s\t%s\t%s" % (r["format"], r["ext"], r["abr"], filesize))
self.print_generic(u"Available audio streams:")
self.print_generic(u"\n".join(msg))
return self.NO_ERROR_EXIT_CODE
except ImportError:
self.print_no_dependency_error()
except Exception as exc:
self.print_error(u"An unexpected error occurred while downloading audio from YouTube:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE | Perform command and return the appropriate exit code.
:rtype: int |
def instance_attr_ancestors(self, name, context=None):
"""Iterate over the parents that define the given name as an attribute.
:param name: The name to find definitions for.
:type name: str
:returns: The parents that define the given name as
an instance attribute.
:rtype: iterable(NodeNG)
"""
for astroid in self.ancestors(context=context):
if name in astroid.instance_attrs:
yield astroid | Iterate over the parents that define the given name as an attribute.
:param name: The name to find definitions for.
:type name: str
:returns: The parents that define the given name as
an instance attribute.
:rtype: iterable(NodeNG) |
def host_domains(self, ip=None, limit=None, **kwargs):
"""Pass in an IP address."""
return self._results('reverse-ip', '/v1/{0}/host-domains'.format(ip), limit=limit, **kwargs) | Pass in an IP address. |
def merge(objects, compat='no_conflicts', join='outer'):
"""Merge any number of xarray objects into a single Dataset as variables.
Parameters
----------
objects : Iterable[Union[xarray.Dataset, xarray.DataArray, dict]]
Merge together all variables from these objects. If any of them are
DataArray objects, they must have a name.
compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
How to combine objects with different indexes.
Returns
-------
Dataset
Dataset with combined variables from each object.
Examples
--------
>>> arrays = [xr.DataArray(n, name='var%d' % n) for n in range(5)]
>>> xr.merge(arrays)
<xarray.Dataset>
Dimensions: ()
Coordinates:
*empty*
Data variables:
var0 int64 0
var1 int64 1
var2 int64 2
var3 int64 3
var4 int64 4
Raises
------
xarray.MergeError
If any variables with the same name have conflicting values.
See also
--------
concat
""" # noqa
from .dataarray import DataArray
from .dataset import Dataset
dict_like_objects = [
obj.to_dataset() if isinstance(obj, DataArray) else obj
for obj in objects]
variables, coord_names, dims = merge_core(dict_like_objects, compat, join)
# TODO: don't always recompute indexes
merged = Dataset._construct_direct(
variables, coord_names, dims, indexes=None)
return merged | Merge any number of xarray objects into a single Dataset as variables.
Parameters
----------
objects : Iterable[Union[xarray.Dataset, xarray.DataArray, dict]]
Merge together all variables from these objects. If any of them are
DataArray objects, they must have a name.
compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
How to combine objects with different indexes.
Returns
-------
Dataset
Dataset with combined variables from each object.
Examples
--------
>>> arrays = [xr.DataArray(n, name='var%d' % n) for n in range(5)]
>>> xr.merge(arrays)
<xarray.Dataset>
Dimensions: ()
Coordinates:
*empty*
Data variables:
var0 int64 0
var1 int64 1
var2 int64 2
var3 int64 3
var4 int64 4
Raises
------
xarray.MergeError
If any variables with the same name have conflicting values.
See also
--------
concat |
def _print_message(self, flag_message=None, color=None, padding=None,
reverse=False):
""" Outputs the message to the terminal """
if flag_message:
flag_message = stdout_encode(flag(flag_message,
color=color if self.pretty else None,
show=False))
if not reverse:
print(padd(flag_message, padding),
self.format_messages(self.message))
else:
print(self.format_messages(self.message),
padd(flag_message, padding))
else:
print(self.format_messages(self.message))
self.message = [] | Outputs the message to the terminal |
def add(self, pointer, value):
"""Add element to sequence, member to mapping.
:param pointer: the path to add in it
:param value: the new value
:return: resolved document
:rtype: Target
The pointer must reference one of:
- The root of the target document - whereupon the specified value
becomes the entire content of the target document.
- A member to add to an existing mapping - whereupon the supplied
value is added to that mapping at the indicated location. If the
member already exists, it is replaced by the specified value.
- An element to add to an existing sequence - whereupon the supplied
value is added to the sequence at the indicated location.
Any elements at or above the specified index are shifted one
position to the right.
The specified index must no be greater than the number of elements
in the sequence.
If the "-" character is used to index the end of the sequence, this
has the effect of appending the value to the sequence.
"""
doc = deepcopy(self.document)
parent, obj = None, doc
try:
for token in Pointer(pointer):
parent, obj = obj, token.extract(obj, bypass_ref=True)
else:
if isinstance(parent, MutableSequence):
raise OutOfRange(parent)
if isinstance(parent, Mapping):
raise OutOfBounds(parent)
raise Error('already setted')
except (OutOfBounds, OutOfRange, LastElement) as error:
if not token.last:
raise NonexistentTarget(obj)
value = deepcopy(value)
if isinstance(error, OutOfBounds):
error.obj[str(token)] = value
elif isinstance(error, OutOfRange):
error.obj.insert(int(token), value)
elif isinstance(error, LastElement):
error.obj.append(value)
return Target(doc) | Add element to sequence, member to mapping.
:param pointer: the path to add in it
:param value: the new value
:return: resolved document
:rtype: Target
The pointer must reference one of:
- The root of the target document - whereupon the specified value
becomes the entire content of the target document.
- A member to add to an existing mapping - whereupon the supplied
value is added to that mapping at the indicated location. If the
member already exists, it is replaced by the specified value.
- An element to add to an existing sequence - whereupon the supplied
value is added to the sequence at the indicated location.
Any elements at or above the specified index are shifted one
position to the right.
The specified index must no be greater than the number of elements
in the sequence.
If the "-" character is used to index the end of the sequence, this
has the effect of appending the value to the sequence. |
def is_catchup_needed_during_view_change(self) -> bool:
"""
Check if received a quorum of view change done messages and if yes
check if caught up till the
Check if all requests ordered till last prepared certificate
Check if last catchup resulted in no txns
"""
if self.caught_up_for_current_view():
logger.info('{} is caught up for the current view {}'.format(self, self.viewNo))
return False
logger.info('{} is not caught up for the current view {}'.format(self, self.viewNo))
if self.num_txns_caught_up_in_last_catchup() == 0:
if self.has_ordered_till_last_prepared_certificate():
logger.info('{} ordered till last prepared certificate'.format(self))
return False
if self.is_catch_up_limit(self.config.MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE):
# No more 3PC messages will be processed since maximum catchup
# rounds have been done
self.master_replica.last_prepared_before_view_change = None
return False
return True | Check if received a quorum of view change done messages and if yes
check if caught up till the
Check if all requests ordered till last prepared certificate
Check if last catchup resulted in no txns |
def _setting(self, key, default):
"""Return the setting, checking config, then the appropriate
environment variable, falling back to the default, caching the
results.
:param str key: The key to get
:param any default: The default value if not set
:return: str
"""
if key not in self._settings:
value = self._settings_in.get(
key, os.environ.get('STATSD_{}'.format(key).upper(), default))
self._settings[key] = value
return self._settings[key] | Return the setting, checking config, then the appropriate
environment variable, falling back to the default, caching the
results.
:param str key: The key to get
:param any default: The default value if not set
:return: str |
def coroutine(func):
"""Wraps a PEP-342 enhanced generator in a way that avoids boilerplate of the "priming" call to ``next``.
Args:
func (Callable): The function constructing a generator to decorate.
Returns:
Callable: The decorated generator.
"""
def wrapper(*args, **kwargs):
gen = func(*args, **kwargs)
val = next(gen)
if val != None:
raise TypeError('Unexpected value from start of coroutine')
return gen
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper | Wraps a PEP-342 enhanced generator in a way that avoids boilerplate of the "priming" call to ``next``.
Args:
func (Callable): The function constructing a generator to decorate.
Returns:
Callable: The decorated generator. |
def __openlib(self):
'''
Actual (lazy) dlopen() only when an attribute is accessed
'''
if self.__getattribute__('_libloaded'):
return
libpath_list = self.__get_libres()
for p in libpath_list:
try:
libres = resource_filename(self._module_name, p)
self.lib = self.ffi.dlopen(libres)
return
except:
continue
# Try self._libpath if nothing in libpath_list worked - will work
# only if self._module_name is set
try:
libres = resource_filename(self._module_name, self._libpath)
self.lib = self.ffi.dlopen(libres)
except:
# If self._module_name is in sys.modules, try self._libpath
# in the same dir as sys.modules[self._module_name].__file__
# This is allows get_lib_ffi_shared to work in REPL
try:
# We set _libloaded to indicate all options have been tried
self._libloaded = True
libdir = ''
if self._module_name is not None:
mod = sys.modules.get(self._module_name, None)
if mod is not None:
libdir = os.path.dirname(mod.__file__) or os.getcwd()
libres = os.path.join(libdir, self._libpath)
self.lib = self.ffi.dlopen(libres)
except:
return None | Actual (lazy) dlopen() only when an attribute is accessed |
def received_message(self, address, data):
"""Process a message received from the KNX bus."""
self.value_cache.set(address, data)
if self.notify:
self.notify(address, data)
try:
listeners = self.address_listeners[address]
except KeyError:
listeners = []
for listener in listeners:
listener(address, data) | Process a message received from the KNX bus. |
def get_event_loop():
"""Return a EventLoop instance.
A new instance is created for each new HTTP request. We determine
that we're in a new request by inspecting os.environ, which is reset
at the start of each request. Also, each thread gets its own loop.
"""
ev = _state.event_loop
if not os.getenv(_EVENT_LOOP_KEY) and ev is not None:
ev.clear()
_state.event_loop = None
ev = None
if ev is None:
ev = EventLoop()
_state.event_loop = ev
os.environ[_EVENT_LOOP_KEY] = '1'
return ev | Return a EventLoop instance.
A new instance is created for each new HTTP request. We determine
that we're in a new request by inspecting os.environ, which is reset
at the start of each request. Also, each thread gets its own loop. |
def noEmptyNests(node):
'''recursively make sure that no dictionaries inside node contain empty children lists '''
if type(node)==list:
for i in node:
noEmptyNests(i)
if type(node)==dict:
for i in node.values():
noEmptyNests(i)
if node["children"] == []:
node.pop("children")
return node | recursively make sure that no dictionaries inside node contain empty children lists |
def _remove(self, xer, primary):
"""
Private method for removing a descriptor from the event loop.
It does the inverse job of _add, and also add a check in case of the fd
has gone away.
"""
if xer in primary:
notifier = primary.pop(xer)
notifier.shutdown() | Private method for removing a descriptor from the event loop.
It does the inverse job of _add, and also add a check in case of the fd
has gone away. |
def _create_user_posts_table(self):
"""
Creates the table to store association info between user and blog
posts.
:return:
"""
with self._engine.begin() as conn:
user_posts_table_name = self._table_name("user_posts")
if not conn.dialect.has_table(conn, user_posts_table_name):
post_id_key = self._table_name("post") + ".id"
self._user_posts_table = sqla.Table(
user_posts_table_name, self._metadata,
sqla.Column("user_id", sqla.String(128), index=True),
sqla.Column("post_id", sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('user_id', 'post_id', name='uix_2'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
user_posts_table_name)
else:
self._user_posts_table = \
self._metadata.tables[user_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
user_posts_table_name) | Creates the table to store association info between user and blog
posts.
:return: |
def clearRedisPools():
'''
clearRedisPools - Disconnect all managed connection pools,
and clear the connectiobn_pool attribute on all stored managed connection pools.
A "managed" connection pool is one where REDIS_CONNECTION_PARAMS does not define the "connection_pool" attribute.
If you define your own pools, IndexedRedis will use them and leave them alone.
This method will be called automatically after calling setDefaultRedisConnectionParams.
Otherwise, you shouldn't have to call it.. Maybe as some sort of disaster-recovery call..
'''
global RedisPools
global _redisManagedConnectionParams
for pool in RedisPools.values():
try:
pool.disconnect()
except:
pass
for paramsList in _redisManagedConnectionParams.values():
for params in paramsList:
if 'connection_pool' in params:
del params['connection_pool']
RedisPools.clear()
_redisManagedConnectionParams.clear() | clearRedisPools - Disconnect all managed connection pools,
and clear the connectiobn_pool attribute on all stored managed connection pools.
A "managed" connection pool is one where REDIS_CONNECTION_PARAMS does not define the "connection_pool" attribute.
If you define your own pools, IndexedRedis will use them and leave them alone.
This method will be called automatically after calling setDefaultRedisConnectionParams.
Otherwise, you shouldn't have to call it.. Maybe as some sort of disaster-recovery call.. |
def write(self, fname, append=True):
"""
Write detection to csv formatted file.
Will append if append==True and file exists
:type fname: str
:param fname: Full path to file to open and write to.
:type append: bool
:param append: Set to true to append to an existing file, if True \
and file doesn't exist, will create new file and warn. If False
will overwrite old files.
"""
mode = 'w'
if append and os.path.isfile(fname):
mode = 'a'
header = '; '.join(['Template name', 'Detection time (UTC)',
'Number of channels', 'Channel list',
'Detection value', 'Threshold',
'Threshold type', 'Input threshold',
'Detection type'])
print_str = "{0}; {1}; {2}; {3}; {4}; {5}; {6}; {7}; {8}\n".format(
self.template_name, self.detect_time, self.no_chans,
self.chans, self.detect_val, self.threshold,
self.threshold_type, self.threshold_input, self.typeofdet)
with open(fname, mode) as _f:
_f.write(header + '\n') # Write a header for the file
_f.write(print_str) | Write detection to csv formatted file.
Will append if append==True and file exists
:type fname: str
:param fname: Full path to file to open and write to.
:type append: bool
:param append: Set to true to append to an existing file, if True \
and file doesn't exist, will create new file and warn. If False
will overwrite old files. |
def train(cls, data, isotonic=True):
"""
Train an isotonic regression model on the given data.
:param data:
RDD of (label, feature, weight) tuples.
:param isotonic:
Whether this is isotonic (which is default) or antitonic.
(default: True)
"""
boundaries, predictions = callMLlibFunc("trainIsotonicRegressionModel",
data.map(_convert_to_vector), bool(isotonic))
return IsotonicRegressionModel(boundaries.toArray(), predictions.toArray(), isotonic) | Train an isotonic regression model on the given data.
:param data:
RDD of (label, feature, weight) tuples.
:param isotonic:
Whether this is isotonic (which is default) or antitonic.
(default: True) |
def current_frame(self, n):
"""Sets current frame to ``n``
:param integer n: Frame to set to ``current_frame``
"""
self.sound.seek(n)
self._current_frame = n | Sets current frame to ``n``
:param integer n: Frame to set to ``current_frame`` |
def pprint(object, stream=None, indent=1, width=80, depth=None):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
printer = PrettyPrinter(
stream=stream, indent=indent, width=width, depth=depth)
printer.pprint(object) | Pretty-print a Python object to a stream [default is sys.stdout]. |
def network_create(auth=None, **kwargs):
'''
Create a network
name
Name of the network being created
shared : False
If ``True``, set the network as shared
admin_state_up : True
If ``True``, Set the network administrative state to "up"
external : False
Control whether or not this network is externally accessible
provider
An optional Python dictionary of network provider options
project_id
The project ID on which this network will be created
CLI Example:
.. code-block:: bash
salt '*' neutronng.network_create name=network2 \
shared=True admin_state_up=True external=True
salt '*' neutronng.network_create name=network3 \
provider='{"network_type": "vlan",\
"segmentation_id": "4010",\
"physical_network": "provider"}' \
project_id=1dcac318a83b4610b7a7f7ba01465548
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(keep_name=True, **kwargs)
return cloud.create_network(**kwargs) | Create a network
name
Name of the network being created
shared : False
If ``True``, set the network as shared
admin_state_up : True
If ``True``, Set the network administrative state to "up"
external : False
Control whether or not this network is externally accessible
provider
An optional Python dictionary of network provider options
project_id
The project ID on which this network will be created
CLI Example:
.. code-block:: bash
salt '*' neutronng.network_create name=network2 \
shared=True admin_state_up=True external=True
salt '*' neutronng.network_create name=network3 \
provider='{"network_type": "vlan",\
"segmentation_id": "4010",\
"physical_network": "provider"}' \
project_id=1dcac318a83b4610b7a7f7ba01465548 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.