code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def get_umi_consensus(data):
"""Retrieve UMI for consensus based preparation.
We specify this either as a separate fastq file or embedded
in the read name as `fastq_name`.`
"""
consensus_choices = (["fastq_name"])
umi = tz.get_in(["config", "algorithm", "umi_type"], data)
# don't run consensus UMI calling for scrna-seq
if tz.get_in(["analysis"], data, "").lower() == "scrna-seq":
return False
if umi and (umi in consensus_choices or os.path.exists(umi)):
assert tz.get_in(["config", "algorithm", "mark_duplicates"], data, True), \
"Using consensus UMI inputs requires marking duplicates"
return umi | Retrieve UMI for consensus based preparation.
We specify this either as a separate fastq file or embedded
in the read name as `fastq_name`.` |
def node_is_on_list(self, node):
"""Returns True if this node is on *some* list.
A node is not on any list if it is linked to itself, or if it
does not have the next and/prev attributes at all.
"""
next = self.node_next(node)
if next == node or next is None:
assert(self.node_prev(node) is next)
return False
return True | Returns True if this node is on *some* list.
A node is not on any list if it is linked to itself, or if it
does not have the next and/prev attributes at all. |
def precip(self, start, end, **kwargs):
r""" Returns precipitation observations at a user specified location for a specified time. Users must specify at
least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa',
'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See
below mandatory and optional parameters. Also see the metadata() function for station IDs.
Arguments:
----------
start: string, mandatory
Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC
e.g., start='201306011800'
end: string, mandatory
End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC
e.g., end='201306011800'
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'
showemptystations: string, optional
Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are
omitted by default.
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: list, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: list, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
status: string, optional
A value of either active or inactive returns stations currently set as active or inactive in the archive.
Omitting this param returns all stations. e.g. status='active'
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of precipitation observations.
Raises:
-------
None.
"""
self._check_geo_param(kwargs)
kwargs['start'] = start
kwargs['end'] = end
kwargs['token'] = self.token
return self._get_response('stations/precipitation', kwargs) | r""" Returns precipitation observations at a user specified location for a specified time. Users must specify at
least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa',
'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See
below mandatory and optional parameters. Also see the metadata() function for station IDs.
Arguments:
----------
start: string, mandatory
Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC
e.g., start='201306011800'
end: string, mandatory
End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC
e.g., end='201306011800'
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'
showemptystations: string, optional
Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are
omitted by default.
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: list, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: list, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
status: string, optional
A value of either active or inactive returns stations currently set as active or inactive in the archive.
Omitting this param returns all stations. e.g. status='active'
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of precipitation observations.
Raises:
-------
None. |
def console_init_root(
w: int,
h: int,
title: Optional[str] = None,
fullscreen: bool = False,
renderer: Optional[int] = None,
order: str = "C",
) -> tcod.console.Console:
"""Set up the primary display and return the root console.
`w` and `h` are the columns and rows of the new window (in tiles.)
`title` is an optional string to display on the windows title bar.
`fullscreen` determines if the window will start in fullscreen. Fullscreen
mode is unreliable unless the renderer is set to `tcod.RENDERER_SDL2` or
`tcod.RENDERER_OPENGL2`.
`renderer` is the rendering back-end that libtcod will use.
If you don't know which to pick, then use `tcod.RENDERER_SDL2`.
Options are:
* `tcod.RENDERER_SDL`:
A deprecated software/SDL2 renderer.
* `tcod.RENDERER_OPENGL`:
A deprecated SDL2/OpenGL1 renderer.
* `tcod.RENDERER_GLSL`:
A deprecated SDL2/OpenGL2 renderer.
* `tcod.RENDERER_SDL2`:
The recommended SDL2 renderer. Rendering is decided by SDL2 and can be
changed by using an SDL2 hint.
* `tcod.RENDERER_OPENGL2`:
An SDL2/OPENGL2 renderer. Usually faster than regular SDL2.
Requires OpenGL 2.0 Core.
`order` will affect how the array attributes of the returned root console
are indexed. `order='C'` is the default, but `order='F'` is recommended.
.. versionchanged:: 4.3
Added `order` parameter.
`title` parameter is now optional.
.. versionchanged:: 8.0
The default `renderer` is now automatic instead of always being
`RENDERER_SDL`.
"""
if title is None:
# Use the scripts filename as the title.
title = os.path.basename(sys.argv[0])
if renderer is None:
warnings.warn(
"A renderer should be given, see the online documentation.",
DeprecationWarning,
stacklevel=2,
)
renderer = tcod.constants.RENDERER_SDL
elif renderer in (
tcod.constants.RENDERER_SDL,
tcod.constants.RENDERER_OPENGL,
tcod.constants.RENDERER_GLSL,
):
warnings.warn(
"The SDL, OPENGL, and GLSL renderers are deprecated.",
DeprecationWarning,
stacklevel=2,
)
lib.TCOD_console_init_root(w, h, _bytes(title), fullscreen, renderer)
console = tcod.console.Console._get_root(order)
console.clear()
return console | Set up the primary display and return the root console.
`w` and `h` are the columns and rows of the new window (in tiles.)
`title` is an optional string to display on the windows title bar.
`fullscreen` determines if the window will start in fullscreen. Fullscreen
mode is unreliable unless the renderer is set to `tcod.RENDERER_SDL2` or
`tcod.RENDERER_OPENGL2`.
`renderer` is the rendering back-end that libtcod will use.
If you don't know which to pick, then use `tcod.RENDERER_SDL2`.
Options are:
* `tcod.RENDERER_SDL`:
A deprecated software/SDL2 renderer.
* `tcod.RENDERER_OPENGL`:
A deprecated SDL2/OpenGL1 renderer.
* `tcod.RENDERER_GLSL`:
A deprecated SDL2/OpenGL2 renderer.
* `tcod.RENDERER_SDL2`:
The recommended SDL2 renderer. Rendering is decided by SDL2 and can be
changed by using an SDL2 hint.
* `tcod.RENDERER_OPENGL2`:
An SDL2/OPENGL2 renderer. Usually faster than regular SDL2.
Requires OpenGL 2.0 Core.
`order` will affect how the array attributes of the returned root console
are indexed. `order='C'` is the default, but `order='F'` is recommended.
.. versionchanged:: 4.3
Added `order` parameter.
`title` parameter is now optional.
.. versionchanged:: 8.0
The default `renderer` is now automatic instead of always being
`RENDERER_SDL`. |
def DeleteCronJob(self, cronjob_id):
"""Deletes a cronjob along with all its runs."""
if cronjob_id not in self.cronjobs:
raise db.UnknownCronJobError("Cron job %s not known." % cronjob_id)
del self.cronjobs[cronjob_id]
try:
del self.cronjob_leases[cronjob_id]
except KeyError:
pass
for job_run in self.ReadCronJobRuns(cronjob_id):
del self.cronjob_runs[(cronjob_id, job_run.run_id)] | Deletes a cronjob along with all its runs. |
def _sectors(self, ignore_chunk=None):
"""
Return a list of all sectors, each sector is a list of chunks occupying the block.
"""
sectorsize = self._bytes_to_sector(self.size)
sectors = [[] for s in range(sectorsize)]
sectors[0] = True # locations
sectors[1] = True # timestamps
for m in self.metadata.values():
if not m.is_created():
continue
if ignore_chunk == m:
continue
if m.blocklength and m.blockstart:
blockend = m.blockstart + max(m.blocklength, m.requiredblocks())
# Ensure 2 <= b < sectorsize, as well as m.blockstart <= b < blockend
for b in range(max(m.blockstart, 2), min(blockend, sectorsize)):
sectors[b].append(m)
return sectors | Return a list of all sectors, each sector is a list of chunks occupying the block. |
def related_records2marc(self, key, value):
"""Populate the ``78708`` MARC field
Also populates the ``78002``, ``78502`` MARC fields through side effects.
"""
if value.get('relation_freetext'):
return {
'i': value.get('relation_freetext'),
'w': get_recid_from_ref(value.get('record')),
}
elif value.get('relation') == 'successor':
self.setdefault('78502', []).append({
'i': 'superseded by',
'w': get_recid_from_ref(value.get('record')),
})
elif value.get('relation') == 'predecessor':
self.setdefault('78002', []).append({
'i': 'supersedes',
'w': get_recid_from_ref(value.get('record')),
})
else:
raise NotImplementedError(u"Unhandled relation in related_records: {}".format(value.get('relation'))) | Populate the ``78708`` MARC field
Also populates the ``78002``, ``78502`` MARC fields through side effects. |
def print_msg(contentlist):
# type: (Union[AnyStr, List[AnyStr], Tuple[AnyStr]]) -> AnyStr
"""concatenate message list as single string with line feed."""
if isinstance(contentlist, list) or isinstance(contentlist, tuple):
return '\n'.join(contentlist)
else: # strings
if len(contentlist) > 1 and contentlist[-1] != '\n':
contentlist += '\n'
return contentlist | concatenate message list as single string with line feed. |
def count(self, axis='major'):
"""
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
"""
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i, dtype='int64')
return self._wrap_result(result, axis) | Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame |
def _get_stream_id(self, text):
"""Try to find a stream_id"""
m = self._image_re.search(text)
if m:
return m.group("stream_id") | Try to find a stream_id |
async def input(dev: Device, input, output):
"""Get and change outputs."""
inputs = await dev.get_inputs()
if input:
click.echo("Activating %s" % input)
try:
input = next((x for x in inputs if x.title == input))
except StopIteration:
click.echo("Unable to find input %s" % input)
return
zone = None
if output:
zone = await dev.get_zone(output)
if zone.uri not in input.outputs:
click.echo("Input %s not valid for zone %s" % (input.title, output))
return
await input.activate(zone)
else:
click.echo("Inputs:")
for input in inputs:
act = False
if input.active:
act = True
click.echo(" * " + click.style(str(input), bold=act))
for out in input.outputs:
click.echo(" - %s" % out) | Get and change outputs. |
def groupby_with_null(data, *args, **kwargs):
"""
Groupby on columns with NaN/None/Null values
Pandas currently does have proper support for
groupby on columns with null values. The nulls
are discarded and so not grouped on.
"""
by = kwargs.get('by', args[0])
altered_columns = {}
if not isinstance(by, (list, tuple)):
by = [by]
# Convert NaNs & Nones in the grouping columns
# to sum unique string value. And, for those
# columns record which rows have been converted
# Note: this may affect the dtype of the column,
# so we record the dtype too. Both these changes
# are undone.
for col in by:
bool_idx = pd.isnull(data[col])
idx = bool_idx.index[bool_idx]
if idx.size:
altered_columns[col] = (idx, data[col].dtype)
data.loc[idx, col] = '-*-null-*-'
# Groupby on the columns, making sure to revert back
# to NaN/None and the correct dtype.
for group, df in data.groupby(*args, **kwargs):
for col, (orig_idx, orig_dtype) in altered_columns.items():
# Indices in the grouped df that need correction
sub_idx = orig_idx.intersection(df[col].index)
# NaN/None
if sub_idx.size:
df.loc[sub_idx, col] = None
# dtype
if df[col].dtype != orig_dtype:
df[col] = df[col].astype(orig_dtype)
yield group, df
# Undo the NaN / None conversion and any dtype
# changes on the original dataframe
for col, (orig_idx, orig_dtype) in altered_columns.items():
data.loc[orig_idx, col] = None
if data[col].dtype != orig_dtype:
data[col] = data[col].astype(orig_dtype) | Groupby on columns with NaN/None/Null values
Pandas currently does have proper support for
groupby on columns with null values. The nulls
are discarded and so not grouped on. |
def from_xml(cls, xml_val):
"""
Return the enumeration member corresponding to the XML value
*xml_val*.
"""
if xml_val not in cls._xml_to_member:
raise InvalidXmlError(
"attribute value '%s' not valid for this type" % xml_val
)
return cls._xml_to_member[xml_val] | Return the enumeration member corresponding to the XML value
*xml_val*. |
def schema_to_command(
p, name: str, callback: callable, add_message: bool
) -> click.Command:
"""
Generates a ``notify`` :class:`click.Command` for :class:`~notifiers.core.Provider`
:param p: Relevant Provider
:param name: Command name
:return: A ``notify`` :class:`click.Command`
"""
params = params_factory(p.schema["properties"], add_message=add_message)
help = p.__doc__
cmd = click.Command(name=name, callback=callback, params=params, help=help)
return cmd | Generates a ``notify`` :class:`click.Command` for :class:`~notifiers.core.Provider`
:param p: Relevant Provider
:param name: Command name
:return: A ``notify`` :class:`click.Command` |
def _rm_udf_link(self, rec):
# type: (udfmod.UDFFileEntry) -> int
'''
An internal method to remove a UDF File Entry link.
Parameters:
rec - The UDF File Entry to remove.
Returns:
The number of bytes to remove from the ISO.
'''
if not rec.is_file() and not rec.is_symlink():
raise pycdlibexception.PyCdlibInvalidInput('Cannot remove a directory with rm_hard_link (try rm_directory instead)')
# To remove something from UDF, we have to:
# 1. Remove it from the list of linked_records on the Inode.
# 2. If the number of links to the Inode is now 0, remove the Inode.
# 3. If the number of links to the UDF File Entry this uses is 0,
# remove the UDF File Entry.
# 4. Remove the UDF File Identifier from the parent.
logical_block_size = self.pvd.logical_block_size()
num_bytes_to_remove = 0
if rec.inode is not None:
# Step 1.
found_index = None
for index, link in enumerate(rec.inode.linked_records):
if id(link) == id(rec):
found_index = index
break
else:
# This should never happen
raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record')
del rec.inode.linked_records[found_index]
rec.inode.num_udf -= 1
# Step 2.
if not rec.inode.linked_records:
found_index = None
for index, ino in enumerate(self.inodes):
if id(ino) == id(rec.inode):
found_index = index
break
else:
# This should never happen
raise pycdlibexception.PyCdlibInternalError('Could not find inode corresponding to record')
del self.inodes[found_index]
num_bytes_to_remove += rec.get_data_length()
# Step 3.
if rec.inode.num_udf == 0:
num_bytes_to_remove += logical_block_size
else:
# If rec.inode is None, then we are just removing the UDF File
# Entry.
num_bytes_to_remove += logical_block_size
# Step 4.
if rec.parent is None:
raise pycdlibexception.PyCdlibInternalError('Cannot remove a UDF record with no parent')
if rec.file_ident is None:
raise pycdlibexception.PyCdlibInternalError('Cannot remove a UDF record with no file identifier')
return num_bytes_to_remove + self._rm_udf_file_ident(rec.parent, rec.file_ident.fi) | An internal method to remove a UDF File Entry link.
Parameters:
rec - The UDF File Entry to remove.
Returns:
The number of bytes to remove from the ISO. |
def analisar(retorno):
"""Constrói uma :class:`RespostaExtrairLogs` a partir do retorno
informado.
:param unicode retorno: Retorno da função ``ExtrairLogs``.
"""
resposta = analisar_retorno(forcar_unicode(retorno),
funcao='ExtrairLogs',
classe_resposta=RespostaExtrairLogs,
campos=RespostaSAT.CAMPOS + (
('arquivoLog', unicode),
),
campos_alternativos=[
# se a extração dos logs falhar espera-se o padrão de
# campos no retorno...
RespostaSAT.CAMPOS,
]
)
if resposta.EEEEE not in ('15000',):
raise ExcecaoRespostaSAT(resposta)
return resposta | Constrói uma :class:`RespostaExtrairLogs` a partir do retorno
informado.
:param unicode retorno: Retorno da função ``ExtrairLogs``. |
def _setup_simplejson(self, responder):
"""
We support serving simplejson for Python 2.4 targets on Ansible 2.3, at
least so the package's own CI Docker scripts can run without external
help, however newer versions of simplejson no longer support Python
2.4. Therefore override any installed/loaded version with a
2.4-compatible version we ship in the compat/ directory.
"""
responder.whitelist_prefix('simplejson')
# issue #536: must be at end of sys.path, in case existing newer
# version is already loaded.
compat_path = os.path.join(os.path.dirname(__file__), 'compat')
sys.path.append(compat_path)
for fullname, is_pkg, suffix in (
(u'simplejson', True, '__init__.py'),
(u'simplejson.decoder', False, 'decoder.py'),
(u'simplejson.encoder', False, 'encoder.py'),
(u'simplejson.scanner', False, 'scanner.py'),
):
path = os.path.join(compat_path, 'simplejson', suffix)
fp = open(path, 'rb')
try:
source = fp.read()
finally:
fp.close()
responder.add_source_override(
fullname=fullname,
path=path,
source=source,
is_pkg=is_pkg,
) | We support serving simplejson for Python 2.4 targets on Ansible 2.3, at
least so the package's own CI Docker scripts can run without external
help, however newer versions of simplejson no longer support Python
2.4. Therefore override any installed/loaded version with a
2.4-compatible version we ship in the compat/ directory. |
def splitroot(self, path, sep=None):
"""Split path into drive, root and rest."""
if sep is None:
sep = self.filesystem.path_separator
if self.filesystem.is_windows_fs:
return self._splitroot_with_drive(path, sep)
return self._splitroot_posix(path, sep) | Split path into drive, root and rest. |
def stop_tensorboard(args):
'''stop tensorboard'''
experiment_id = check_experiment_id(args)
experiment_config = Experiments()
experiment_dict = experiment_config.get_all_experiments()
config_file_name = experiment_dict[experiment_id]['fileName']
nni_config = Config(config_file_name)
tensorboard_pid_list = nni_config.get_config('tensorboardPidList')
if tensorboard_pid_list:
for tensorboard_pid in tensorboard_pid_list:
try:
cmds = ['kill', '-9', str(tensorboard_pid)]
call(cmds)
except Exception as exception:
print_error(exception)
nni_config.set_config('tensorboardPidList', [])
print_normal('Stop tensorboard success!')
else:
print_error('No tensorboard configuration!') | stop tensorboard |
def show_G_distribution(data):
'''Show the distribution of the G function.'''
Xs, t = fitting.preprocess_data(data)
Theta, Phi = np.meshgrid(np.linspace(0, np.pi, 50), np.linspace(0, 2 * np.pi, 50))
G = []
for i in range(len(Theta)):
G.append([])
for j in range(len(Theta[i])):
w = fitting.direction(Theta[i][j], Phi[i][j])
G[-1].append(fitting.G(w, Xs))
plt.imshow(G, extent=[0, np.pi, 0, 2 * np.pi], origin='lower')
plt.show() | Show the distribution of the G function. |
def smkdirs(dpath, mode=0o777):
"""Safely make a full directory path if it doesn't exist.
Parameters
----------
dpath : str
Path of directory/directories to create
mode : int [default=0777]
Permissions for the new directories
See also
--------
os.makedirs
"""
if not os.path.exists(dpath):
os.makedirs(dpath, mode=mode) | Safely make a full directory path if it doesn't exist.
Parameters
----------
dpath : str
Path of directory/directories to create
mode : int [default=0777]
Permissions for the new directories
See also
--------
os.makedirs |
async def renew_lease_async(self, lease):
"""
Renew a lease currently held by this host.
If the lease has been stolen, or expired, or released, it is not possible to renew it.
You will have to call getLease() and then acquireLease() again.
:param lease: The stored lease to be renewed.
:type lease: ~azure.eventprocessorhost.lease.Lease
:return: `True` if the lease was renewed successfully, `False` if not.
:rtype: bool
"""
try:
await self.host.loop.run_in_executor(
self.executor,
functools.partial(
self.storage_client.renew_blob_lease,
self.lease_container_name,
lease.partition_id,
lease_id=lease.token,
timeout=self.lease_duration))
except Exception as err: # pylint: disable=broad-except
if "LeaseIdMismatchWithLeaseOperation" in str(err):
_logger.info("LeaseLost on partition %r", lease.partition_id)
else:
_logger.error("Failed to renew lease on partition %r with token %r %r",
lease.partition_id, lease.token, err)
return False
return True | Renew a lease currently held by this host.
If the lease has been stolen, or expired, or released, it is not possible to renew it.
You will have to call getLease() and then acquireLease() again.
:param lease: The stored lease to be renewed.
:type lease: ~azure.eventprocessorhost.lease.Lease
:return: `True` if the lease was renewed successfully, `False` if not.
:rtype: bool |
def cli(ctx,
amount,
index,
stage,
stepresult,
formattype,
select,
where,
order,
outputfile,
showkeys,
showvalues,
showalways,
position):
"""Export from memory to format supported by tablib"""
if not ctx.bubble:
msg = 'There is no bubble present, will not export'
ctx.say_yellow(msg)
raise click.Abort()
path = ctx.home + '/'
if stage not in STAGES:
ctx.say_yellow('There is no known stage:' + stage)
raise click.Abort()
if stepresult not in exportables:
ctx.say_yellow('stepresult not one of: ' + ', '.join(exportables))
raise click.Abort()
data_gen = bubble_lod_load(ctx, stepresult, stage)
ctx.gbc.say('data_gen:', stuff=data_gen, verbosity=20)
part = get_gen_slice(ctx.gbc, data_gen, amount, index)
ctx.gbc.say('selected part:', stuff=part, verbosity=20)
aliases = get_pairs(ctx.gbc, select, missing_colon=True)
if position or len(aliases) == 0:
ctx.gbc.say('adding position to selection of columns:',
stuff=aliases, verbosity=20)
aliases.insert(0, {'key': buts('index'), 'val': 'BUBBLE_IDX'})
ctx.gbc.say('added position to selection of columns:',
stuff=aliases, verbosity=20)
wheres = get_pairs(ctx.gbc, where)
# TODO: use aliases as lookup for wheres
data = tablib.Dataset()
data.headers = [sel['val'] for sel in aliases]
ctx.gbc.say('select wheres:' + str(wheres), verbosity=20)
ctx.gbc.say('select aliases:' + str(aliases), verbosity=20)
ctx.gbc.say('select data.headers:' + str(data.headers), verbosity=20)
not_shown = True
try:
for ditem in part:
row = []
ctx.gbc.say('curr dict', stuff=ditem, verbosity=101)
flitem = flat(ctx, ditem)
ctx.gbc.say('curr flat dict', stuff=flitem, verbosity=101)
row_ok = True
for wp in wheres:
# TODO: negative selects: k:None, k:False,k:Zero,k:Null,k:0,k:-1,k:'',k:"",
# TODO: negative selects:
# k:BUBBLE_NO_KEY,k:BUBBLE_NO_VAL,k:BUBBLE_NO_KEY_OR_NO_VAL
wcheck_key=True
if wp['key'] not in flitem:
row_ok = False
wcheck_key=False
if wcheck_key and wp['val'] not in str(flitem[wp['key']]):
row_ok = False
if not row_ok:
continue
for sel in aliases:
if sel['key'] in flitem:
row.append(flitem[sel['key']])
else:
# temporary to check, not use case for buts()
bnp = '____BTS_NO_PATH_'
tempv = get_flat_path(ctx, flitem, sel['key'] + '.*', bnp)
if tempv != bnp:
row.append(tempv)
else:
row.append('None')
# TODO maybe 'NONE', or just '' or something like:
# magic.export_format_none
data.append(row)
# todo: count keys, and show all keys in selection: i,a
if not_shown and showkeys:
if not showalways:
not_shown = False
ks = list(flitem.keys())
ks.sort()
ctx.say(
'available dict path keys from first selected dict:', verbosity=0)
for k in ks:
ctx.say('keypath: ' + k, verbosity=0)
if showvalues:
ctx.say('value: ' + str(flitem[k]) + '\n', verbosity=0)
except Exception as excpt:
ctx.say_red('Cannot export data', stuff=excpt)
raise click.Abort()
if not outputfile:
outputfile = path + 'export/export_' + \
stepresult + '_' + stage + '.' + formattype
# todo: order key must be present in selection
# add to selection before
# and remove from result before output to format.
if order:
olast2 = order[-2:]
ctx.gbc.say('order:' + order + ' last2:' + olast2, verbosity=100)
if olast2 not in [':+', ':-']:
data = data.sort(order, False)
else:
if olast2 == ':+':
data = data.sort(order[:-2], False)
if olast2 == ':-':
data = data.sort(order[:-2], True)
# Write `spreadsheet` to disk
formatted = None
if formattype == 'yaml':
formatted = data.yaml
if formattype == 'json':
formatted = data.json
if formattype == 'csv':
formatted = data.csv
# TODO:
# if formattype == 'ldif':
# formatted = data.ldif
if formattype == 'tab':
# standard, output, whatever tablib makes of it, ascii table
print(data)
if formatted:
enc_formatted = formatted.encode('utf-8')
of_path = opath.Path(outputfile)
of_dir = of_path.dirname()
if not of_dir.exists():
of_dir.makedirs_p()
with open(outputfile, 'wb') as f:
f.write(enc_formatted)
ctx.say_green('exported: ' + outputfile) | Export from memory to format supported by tablib |
def setColor(self, personID, color):
"""setColor(string, (integer, integer, integer, integer))
sets color for person with the given ID.
i.e. (255,0,0,0) for the color red.
The fourth integer (alpha) is only used when drawing persons with raster images
"""
self._connection._beginMessage(
tc.CMD_SET_PERSON_VARIABLE, tc.VAR_COLOR, personID, 1 + 1 + 1 + 1 + 1)
self._connection._string += struct.pack("!BBBBB", tc.TYPE_COLOR, int(
color[0]), int(color[1]), int(color[2]), int(color[3]))
self._connection._sendExact() | setColor(string, (integer, integer, integer, integer))
sets color for person with the given ID.
i.e. (255,0,0,0) for the color red.
The fourth integer (alpha) is only used when drawing persons with raster images |
def check_type(self, type):
"""Check to see if the type is either in TYPES or fits type name
Returns proper type
"""
if type in TYPES:
return type
tdict = dict(zip(TYPES,TYPES))
tdict.update({
'line': 'lc',
'bar': 'bvs',
'pie': 'p',
'venn': 'v',
'scater': 's',
'radar': 'r',
'meter': 'gom',
})
assert type in tdict, 'Invalid chart type: %s'%type
return tdict[type] | Check to see if the type is either in TYPES or fits type name
Returns proper type |
def accept_format(*, version: str = "v3", media: Optional[str] = None,
json: bool = True) -> str:
"""Construct the specification of the format that a request should return.
The version argument defaults to v3 of the GitHub API and is applicable to
all requests. The media argument along with 'json' specifies what format
the request should return, e.g. requesting the rendered HTML of a comment.
Do note that not all of GitHub's API supports alternative formats.
The default arguments of this function will always return the latest stable
version of the GitHub API in the default format that this library is
designed to support.
"""
# https://developer.github.com/v3/media/
# https://developer.github.com/v3/#current-version
accept = f"application/vnd.github.{version}"
if media is not None:
accept += f".{media}"
if json:
accept += "+json"
return accept | Construct the specification of the format that a request should return.
The version argument defaults to v3 of the GitHub API and is applicable to
all requests. The media argument along with 'json' specifies what format
the request should return, e.g. requesting the rendered HTML of a comment.
Do note that not all of GitHub's API supports alternative formats.
The default arguments of this function will always return the latest stable
version of the GitHub API in the default format that this library is
designed to support. |
def note_hz_to_midi(annotation):
'''Convert a pitch_hz annotation to pitch_midi'''
annotation.namespace = 'note_midi'
data = annotation.pop_data()
for obs in data:
annotation.append(time=obs.time, duration=obs.duration,
confidence=obs.confidence,
value=12 * (np.log2(obs.value) - np.log2(440.0)) + 69)
return annotation | Convert a pitch_hz annotation to pitch_midi |
def transaction_abort(self, transaction_id, **kwargs):
"""Abort a transaction and roll back all operations.
:param transaction_id: ID of transaction to be aborted.
:param **kwargs: Further parameters for the transport layer.
"""
if transaction_id not in self.__transactions:
raise workflows.Error("Attempting to abort unknown transaction")
self.log.debug("Aborting transaction %s", transaction_id)
self.__transactions.remove(transaction_id)
self._transaction_abort(transaction_id, **kwargs) | Abort a transaction and roll back all operations.
:param transaction_id: ID of transaction to be aborted.
:param **kwargs: Further parameters for the transport layer. |
def clone(self, opts):
'''
Create a new instance of this type with the specified options.
Args:
opts (dict): The type specific options for the new instance.
'''
topt = self.opts.copy()
topt.update(opts)
return self.__class__(self.modl, self.name, self.info, topt) | Create a new instance of this type with the specified options.
Args:
opts (dict): The type specific options for the new instance. |
def _parse_docline(self, line, container):
"""Parses a single line of code following a docblock to see if
it as a valid code element that can be decorated. If so, return
the name of the code element."""
match = self.RE_DECOR.match(line)
if match is not None:
return "{}.{}".format(container.name, match.group("name"))
else:
return container.name | Parses a single line of code following a docblock to see if
it as a valid code element that can be decorated. If so, return
the name of the code element. |
def cache_git_tag():
"""
Try to read the current version from git and, if read successfully, cache it into the version cache file. If
the git folder doesn't exist or if git isn't installed, this is a no-op. I.E. it won't blank out a
pre-existing version cache file upon failure.
:return: Project version string
"""
try:
version = __get_git_tag()
with __open_cache_file('w') as vf:
vf.write(version)
except Exception:
version = __default_version__
return version | Try to read the current version from git and, if read successfully, cache it into the version cache file. If
the git folder doesn't exist or if git isn't installed, this is a no-op. I.E. it won't blank out a
pre-existing version cache file upon failure.
:return: Project version string |
def urls(self):
"""
A dictionary of the urls to be mocked with this service and the handlers
that should be called in their place
"""
url_bases = self._url_module.url_bases
unformatted_paths = self._url_module.url_paths
urls = {}
for url_base in url_bases:
for url_path, handler in unformatted_paths.items():
url = url_path.format(url_base)
urls[url] = handler
return urls | A dictionary of the urls to be mocked with this service and the handlers
that should be called in their place |
def streamweigths_get(self, session):
'''taobao.wangwang.eservice.streamweigths.get 获取分流权重接口
获取当前登录用户自己的店铺内的分流权重设置'''
request = TOPRequest('taobao.wangwang.eservice.streamweigths.get')
self.create(self.execute(request, session))
return self.staff_stream_weights | taobao.wangwang.eservice.streamweigths.get 获取分流权重接口
获取当前登录用户自己的店铺内的分流权重设置 |
def comp_srcmdl_xml(self, **kwargs):
""" return the name of a source model file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
kwargs_copy['component'] = kwargs.get(
'component', self.component(**kwargs))
self._replace_none(kwargs_copy)
localpath = NameFactory.comp_srcmdl_xml_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a source model file |
def delete_container(container_name, profile, **libcloud_kwargs):
'''
Delete an object container in the cloud
:param container_name: Container name
:type container_name: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's delete_container method
:type libcloud_kwargs: ``dict``
:return: True if an object container has been successfully deleted, False
otherwise.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.delete_container MyFolder profile1
'''
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
container = conn.get_container(container_name)
return conn.delete_container(container, **libcloud_kwargs) | Delete an object container in the cloud
:param container_name: Container name
:type container_name: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's delete_container method
:type libcloud_kwargs: ``dict``
:return: True if an object container has been successfully deleted, False
otherwise.
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_storage.delete_container MyFolder profile1 |
def from_pycode(cls, co):
"""Create a Code object from a python code object.
Parameters
----------
co : CodeType
The python code object.
Returns
-------
code : Code
The codetransformer Code object.
"""
# Make it sparse to instrs[n] is the instruction at bytecode[n]
sparse_instrs = tuple(
_sparse_args(
Instruction.from_opcode(
b.opcode,
Instruction._no_arg if b.arg is None else _RawArg(b.arg),
) for b in Bytecode(co)
),
)
for idx, instr in enumerate(sparse_instrs):
if instr is None:
# The sparse value
continue
if instr.absjmp:
instr.arg = sparse_instrs[instr.arg]
elif instr.reljmp:
instr.arg = sparse_instrs[instr.arg + idx + argsize + 1]
elif isinstance(instr, LOAD_CONST):
instr.arg = co.co_consts[instr.arg]
elif instr.uses_name:
instr.arg = co.co_names[instr.arg]
elif instr.uses_varname:
instr.arg = co.co_varnames[instr.arg]
elif instr.uses_free:
instr.arg = _freevar_argname(
instr.arg,
co.co_freevars,
co.co_cellvars,
)
elif instr.have_arg and isinstance(instr.arg, _RawArg):
instr.arg = int(instr.arg)
flags = Flag.unpack(co.co_flags)
has_vargs = flags['CO_VARARGS']
has_kwargs = flags['CO_VARKEYWORDS']
# Here we convert the varnames format into our argnames format.
paramnames = co.co_varnames[
:(co.co_argcount +
co.co_kwonlyargcount +
has_vargs +
has_kwargs)
]
# We start with the positional arguments.
new_paramnames = list(paramnames[:co.co_argcount])
# Add *args next.
if has_vargs:
new_paramnames.append('*' + paramnames[-1 - has_kwargs])
# Add positional only arguments next.
new_paramnames.extend(paramnames[
co.co_argcount:co.co_argcount + co.co_kwonlyargcount
])
# Add **kwargs last.
if has_kwargs:
new_paramnames.append('**' + paramnames[-1])
return cls(
filter(bool, sparse_instrs),
argnames=new_paramnames,
cellvars=co.co_cellvars,
freevars=co.co_freevars,
name=co.co_name,
filename=co.co_filename,
firstlineno=co.co_firstlineno,
lnotab={
lno: sparse_instrs[off] for off, lno in findlinestarts(co)
},
flags=flags,
) | Create a Code object from a python code object.
Parameters
----------
co : CodeType
The python code object.
Returns
-------
code : Code
The codetransformer Code object. |
def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):
# type: (Any, slice, Callable, str) -> str
"""Return a class representation using the slice parameters.
Args:
type_: The type the class was sliced with.
bound: The boundaries specified for the values of type_.
keyfunc: The comparison function used to check the value
boundaries.
keyfunc_name: The name of keyfunc.
Returns:
A string representing the class.
"""
if keyfunc is not cls._default:
return "{}.{}[{}, {}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
cls._get_bound_repr(bound),
keyfunc_name,
)
return "{}.{}[{}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
cls._get_bound_repr(bound),
) | Return a class representation using the slice parameters.
Args:
type_: The type the class was sliced with.
bound: The boundaries specified for the values of type_.
keyfunc: The comparison function used to check the value
boundaries.
keyfunc_name: The name of keyfunc.
Returns:
A string representing the class. |
def simulate(self):
""" Section 7 - uwg main section
self.N # Total hours in simulation
self.ph # per hour
self.dayType # 3=Sun, 2=Sat, 1=Weekday
self.ceil_time_step # simulation timestep (dt) fitted to weather file timestep
# Output of object instance vector
self.WeatherData # Nx1 vector of forc instance
self.UCMData # Nx1 vector of UCM instance
self.UBLData # Nx1 vector of UBL instance
self.RSMData # Nx1 vector of RSM instance
self.USMData # Nx1 vector of USM instance
"""
self.N = int(self.simTime.days * 24) # total number of hours in simulation
n = 0 # weather time step counter
self.ph = self.simTime.dt/3600. # dt (simulation time step) in hours
# Data dump variables
time = range(self.N)
self.WeatherData = [None for x in range(self.N)]
self.UCMData = [None for x in range(self.N)]
self.UBLData = [None for x in range(self.N)]
self.RSMData = [None for x in range(self.N)]
self.USMData = [None for x in range(self.N)]
print('\nSimulating new temperature and humidity values for {} days from {}/{}.\n'.format(
int(self.nDay), int(self.Month), int(self.Day)))
self.logger.info("Start simulation")
for it in range(1, self.simTime.nt, 1): # for every simulation time-step (i.e 5 min) defined by uwg
# Update water temperature (estimated)
if self.nSoil < 3: # correction to original matlab code
# for BUBBLE/CAPITOUL/Singapore only
self.forc.deepTemp = sum(self.forcIP.temp)/float(len(self.forcIP.temp))
self.forc.waterTemp = sum(
self.forcIP.temp)/float(len(self.forcIP.temp)) - 10. # for BUBBLE/CAPITOUL/Singapore only
else:
# soil temperature by depth, by month
self.forc.deepTemp = self.Tsoil[self.soilindex1][self.simTime.month-1]
self.forc.waterTemp = self.Tsoil[2][self.simTime.month-1]
# There's probably a better way to update the weather...
self.simTime.UpdateDate()
self.logger.info("\n{0} m={1}, d={2}, h={3}, s={4}".format(
__name__, self.simTime.month, self.simTime.day, self.simTime.secDay/3600., self.simTime.secDay))
# simulation time increment raised to weather time step
self.ceil_time_step = int(math.ceil(it * self.ph))-1
# minus one to be consistent with forcIP list index
# Updating forcing instance
# horizontal Infrared Radiation Intensity (W m-2)
self.forc.infra = self.forcIP.infra[self.ceil_time_step]
# wind speed (m s-1)
self.forc.wind = max(self.forcIP.wind[self.ceil_time_step], self.geoParam.windMin)
self.forc.uDir = self.forcIP.uDir[self.ceil_time_step] # wind direction
# specific humidty (kg kg-1)
self.forc.hum = self.forcIP.hum[self.ceil_time_step]
self.forc.pres = self.forcIP.pres[self.ceil_time_step] # Pressure (Pa)
self.forc.temp = self.forcIP.temp[self.ceil_time_step] # air temperature (C)
self.forc.rHum = self.forcIP.rHum[self.ceil_time_step] # Relative humidity (%)
self.forc.prec = self.forcIP.prec[self.ceil_time_step] # Precipitation (mm h-1)
# horizontal solar diffuse radiation (W m-2)
self.forc.dif = self.forcIP.dif[self.ceil_time_step]
# normal solar direct radiation (W m-2)
self.forc.dir = self.forcIP.dir[self.ceil_time_step]
# Canyon humidity (absolute) same as rural
self.UCM.canHum = copy.copy(self.forc.hum)
# Update solar flux
self.solar = SolarCalcs(self.UCM, self.BEM, self.simTime,
self.RSM, self.forc, self.geoParam, self.rural)
self.rural, self.UCM, self.BEM = self.solar.solarcalcs()
# Update building & traffic schedule
# Assign day type (1 = weekday, 2 = sat, 3 = sun/other)
if self.is_near_zero(self.simTime.julian % 7):
self.dayType = 3 # Sunday
elif self.is_near_zero(self.simTime.julian % 7 - 6.):
self.dayType = 2 # Saturday
else:
self.dayType = 1 # Weekday
# Update anthropogenic heat load for each hour (building & UCM)
self.UCM.sensAnthrop = self.sensAnth * (self.SchTraffic[self.dayType-1][self.simTime.hourDay])
# Update the energy components for building types defined in initialize.uwg
for i in range(len(self.BEM)):
# Set temperature
self.BEM[i].building.coolSetpointDay = self.Sch[i].Cool[self.dayType -
1][self.simTime.hourDay] + 273.15 # add from temperature schedule for cooling
self.BEM[i].building.coolSetpointNight = self.BEM[i].building.coolSetpointDay
self.BEM[i].building.heatSetpointDay = self.Sch[i].Heat[self.dayType -
1][self.simTime.hourDay] + 273.15 # add from temperature schedule for heating
self.BEM[i].building.heatSetpointNight = self.BEM[i].building.heatSetpointDay
# Internal Heat Load Schedule (W/m^2 of floor area for Q)
self.BEM[i].Elec = self.Sch[i].Qelec * self.Sch[i].Elec[self.dayType -
1][self.simTime.hourDay] # Qelec x elec fraction for day
self.BEM[i].Light = self.Sch[i].Qlight * self.Sch[i].Light[self.dayType -
1][self.simTime.hourDay] # Qlight x light fraction for day
self.BEM[i].Nocc = self.Sch[i].Nocc * self.Sch[i].Occ[self.dayType -
1][self.simTime.hourDay] # Number of occupants x occ fraction for day
# Sensible Q occupant * fraction occupant sensible Q * number of occupants
self.BEM[i].Qocc = self.sensOcc * (1 - self.LatFOcc) * self.BEM[i].Nocc
# SWH and ventilation schedule
self.BEM[i].SWH = self.Sch[i].Vswh * self.Sch[i].SWH[self.dayType -
1][self.simTime.hourDay] # litres per hour x SWH fraction for day
# m^3/s/m^2 of floor
self.BEM[i].building.vent = self.Sch[i].Vent
self.BEM[i].Gas = self.Sch[i].Qgas * self.Sch[i].Gas[self.dayType -
1][self.simTime.hourDay] # Gas Equip Schedule, per m^2 of floor
# This is quite messy, should update
# Update internal heat and corresponding fractional loads
intHeat = self.BEM[i].Light + self.BEM[i].Elec + self.BEM[i].Qocc
# W/m2 from light, electricity, occupants
self.BEM[i].building.intHeatDay = intHeat
self.BEM[i].building.intHeatNight = intHeat
# fraction of radiant heat from light and equipment of whole internal heat
self.BEM[i].building.intHeatFRad = (
self.RadFLight * self.BEM[i].Light + self.RadFEquip * self.BEM[i].Elec) / intHeat
# fraction of latent heat (from occupants) of whole internal heat
self.BEM[i].building.intHeatFLat = self.LatFOcc * \
self.sensOcc * self.BEM[i].Nocc/intHeat
# Update envelope temperature layers
self.BEM[i].T_wallex = self.BEM[i].wall.layerTemp[0]
self.BEM[i].T_wallin = self.BEM[i].wall.layerTemp[-1]
self.BEM[i].T_roofex = self.BEM[i].roof.layerTemp[0]
self.BEM[i].T_roofin = self.BEM[i].roof.layerTemp[-1]
# Update rural heat fluxes & update vertical diffusion model (VDM)
self.rural.infra = self.forc.infra - self.rural.emissivity * self.SIGMA * \
self.rural.layerTemp[0]**4. # Infrared radiation from rural road
self.rural.SurfFlux(self.forc, self.geoParam, self.simTime,
self.forc.hum, self.forc.temp, self.forc.wind, 2., 0.)
self.RSM.VDM(self.forc, self.rural, self.geoParam, self.simTime)
# Calculate urban heat fluxes, update UCM & UBL
self.UCM, self.UBL, self.BEM = urbflux(
self.UCM, self.UBL, self.BEM, self.forc, self.geoParam, self.simTime, self.RSM)
self.UCM.UCModel(self.BEM, self.UBL.ublTemp, self.forc, self.geoParam)
self.UBL.UBLModel(self.UCM, self.RSM, self.rural,
self.forc, self.geoParam, self.simTime)
"""
# Experimental code to run diffusion model in the urban area
# N.B Commented out in python uwg because computed wind speed in
# urban VDM: y = =0.84*ln((2-x/20)/0.51) results in negative log
# for building heights >= 40m.
Uroad = copy.copy(self.UCM.road)
Uroad.sens = copy.copy(self.UCM.sensHeat)
Uforc = copy.copy(self.forc)
Uforc.wind = copy.copy(self.UCM.canWind)
Uforc.temp = copy.copy(self.UCM.canTemp)
self.USM.VDM(Uforc,Uroad,self.geoParam,self.simTime)
"""
self.logger.info("dbT = {}".format(self.UCM.canTemp-273.15))
if n > 0:
logging.info("dpT = {}".format(self.UCM.Tdp))
logging.info("RH = {}".format(self.UCM.canRHum))
if self.is_near_zero(self.simTime.secDay % self.simTime.timePrint) and n < self.N:
self.logger.info("{0} ----sim time step = {1}----\n\n".format(__name__, n))
self.WeatherData[n] = copy.copy(self.forc)
_Tdb, _w, self.UCM.canRHum, _h, self.UCM.Tdp, _v = psychrometrics(
self.UCM.canTemp, self.UCM.canHum, self.forc.pres)
self.UBLData[n] = copy.copy(self.UBL)
self.UCMData[n] = copy.copy(self.UCM)
self.RSMData[n] = copy.copy(self.RSM)
self.logger.info("dbT = {}".format(self.UCMData[n].canTemp-273.15))
self.logger.info("dpT = {}".format(self.UCMData[n].Tdp))
self.logger.info("RH = {}".format(self.UCMData[n].canRHum))
n += 1 | Section 7 - uwg main section
self.N # Total hours in simulation
self.ph # per hour
self.dayType # 3=Sun, 2=Sat, 1=Weekday
self.ceil_time_step # simulation timestep (dt) fitted to weather file timestep
# Output of object instance vector
self.WeatherData # Nx1 vector of forc instance
self.UCMData # Nx1 vector of UCM instance
self.UBLData # Nx1 vector of UBL instance
self.RSMData # Nx1 vector of RSM instance
self.USMData # Nx1 vector of USM instance |
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
pairs = parse_qsl(self.query_string, keep_blank_values=True)
get = self.environ['bottle.get'] = FormsDict()
for key, value in pairs[:self.MAX_PARAMS]:
get[key] = value
return get | The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. |
def parrep(self, parfile=None,enforce_bounds=True):
"""replicates the pest parrep util. replaces the parval1 field in the
parameter data section dataframe
Parameters
----------
parfile : str
parameter file to use. If None, try to use
a parameter file that corresponds to the case name.
Default is None
enforce_hounds : bool
flag to enforce parameter bounds after parameter values are updated.
This is useful because PEST and PEST++ round the parameter values in the
par file, which may cause slight bound violations
"""
if parfile is None:
parfile = self.filename.replace(".pst", ".par")
par_df = pst_utils.read_parfile(parfile)
self.parameter_data.index = self.parameter_data.parnme
par_df.index = par_df.parnme
self.parameter_data.parval1 = par_df.parval1
self.parameter_data.scale = par_df.scale
self.parameter_data.offset = par_df.offset
if enforce_bounds:
par = self.parameter_data
idx = par.loc[par.parval1 > par.parubnd,"parnme"]
par.loc[idx,"parval1"] = par.loc[idx,"parubnd"]
idx = par.loc[par.parval1 < par.parlbnd,"parnme"]
par.loc[idx, "parval1"] = par.loc[idx, "parlbnd"] | replicates the pest parrep util. replaces the parval1 field in the
parameter data section dataframe
Parameters
----------
parfile : str
parameter file to use. If None, try to use
a parameter file that corresponds to the case name.
Default is None
enforce_hounds : bool
flag to enforce parameter bounds after parameter values are updated.
This is useful because PEST and PEST++ round the parameter values in the
par file, which may cause slight bound violations |
def _srm(self, data):
"""Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
sigma_s : array, shape=[features, features]
The covariance :math:`\\Sigma_s` of the shared response Normal
distribution.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
mu : list of array, element i has shape=[voxels_i]
The voxel means :math:`\\mu_i` over the samples for each subject.
rho2 : array, shape=[subjects]
The estimated noise variance :math:`\\rho_i^2` for each subject
s : array, shape=[features, samples]
The shared response.
"""
samples = data[0].shape[1]
subjects = len(data)
np.random.seed(self.rand_seed)
# Initialization step: initialize the outputs with initial values,
# voxels with the number of voxels in each subject, and trace_xtx with
# the ||X_i||_F^2 of each subject.
w, voxels = _init_w_transforms(data, self.features)
x, mu, rho2, trace_xtx = self._init_structures(data, subjects)
shared_response = np.zeros((self.features, samples))
sigma_s = np.identity(self.features)
# Main loop of the algorithm (run
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# E-step:
# Sum the inverted the rho2 elements for computing W^T * Psi^-1 * W
rho0 = (1 / rho2).sum()
# Invert Sigma_s using Cholesky factorization
(chol_sigma_s, lower_sigma_s) = scipy.linalg.cho_factor(
sigma_s, check_finite=False)
inv_sigma_s = scipy.linalg.cho_solve(
(chol_sigma_s, lower_sigma_s), np.identity(self.features),
check_finite=False)
# Invert (Sigma_s + rho_0 * I) using Cholesky factorization
sigma_s_rhos = inv_sigma_s + np.identity(self.features) * rho0
(chol_sigma_s_rhos, lower_sigma_s_rhos) = scipy.linalg.cho_factor(
sigma_s_rhos, check_finite=False)
inv_sigma_s_rhos = scipy.linalg.cho_solve(
(chol_sigma_s_rhos, lower_sigma_s_rhos),
np.identity(self.features), check_finite=False)
# Compute the sum of W_i^T * rho_i^-2 * X_i, and the sum of traces
# of X_i^T * rho_i^-2 * X_i
wt_invpsi_x = np.zeros((self.features, samples))
trace_xt_invsigma2_x = 0.0
for subject in range(subjects):
wt_invpsi_x += (w[subject].T.dot(x[subject])) / rho2[subject]
trace_xt_invsigma2_x += trace_xtx[subject] / rho2[subject]
log_det_psi = np.sum(np.log(rho2) * voxels)
# Update the shared response
shared_response = sigma_s.dot(
np.identity(self.features) - rho0 * inv_sigma_s_rhos).dot(
wt_invpsi_x)
# M-step
# Update Sigma_s and compute its trace
sigma_s = (inv_sigma_s_rhos
+ shared_response.dot(shared_response.T) / samples)
trace_sigma_s = samples * np.trace(sigma_s)
# Update each subject's mapping transform W_i and error variance
# rho_i^2
for subject in range(subjects):
a_subject = x[subject].dot(shared_response.T)
perturbation = np.zeros(a_subject.shape)
np.fill_diagonal(perturbation, 0.001)
u_subject, s_subject, v_subject = np.linalg.svd(
a_subject + perturbation, full_matrices=False)
w[subject] = u_subject.dot(v_subject)
rho2[subject] = trace_xtx[subject]
rho2[subject] += -2 * np.sum(w[subject] * a_subject).sum()
rho2[subject] += trace_sigma_s
rho2[subject] /= samples * voxels[subject]
if logger.isEnabledFor(logging.INFO):
# Calculate and log the current log-likelihood for checking
# convergence
loglike = self._likelihood(
chol_sigma_s_rhos, log_det_psi, chol_sigma_s,
trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x,
samples)
logger.info('Objective function %f' % loglike)
return sigma_s, w, mu, rho2, shared_response | Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
sigma_s : array, shape=[features, features]
The covariance :math:`\\Sigma_s` of the shared response Normal
distribution.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
mu : list of array, element i has shape=[voxels_i]
The voxel means :math:`\\mu_i` over the samples for each subject.
rho2 : array, shape=[subjects]
The estimated noise variance :math:`\\rho_i^2` for each subject
s : array, shape=[features, samples]
The shared response. |
def compute(self, write_to_tar=True):
"""Perform all desired calculations on the data and save externally."""
data = self._get_all_data(self.start_date, self.end_date)
logging.info('Computing timeseries for {0} -- '
'{1}.'.format(self.start_date, self.end_date))
full, full_dt = self._compute_full_ts(data)
full_out = self._full_to_yearly_ts(full, full_dt)
reduced = self._apply_all_time_reductions(full_out)
logging.info("Writing desired gridded outputs to disk.")
for dtype_time, data in reduced.items():
data = _add_metadata_as_attrs(data, self.var.units,
self.var.description,
self.dtype_out_vert)
self.save(data, dtype_time, dtype_out_vert=self.dtype_out_vert,
save_files=True, write_to_tar=write_to_tar)
return self | Perform all desired calculations on the data and save externally. |
def export(self, last_checkpoint, output_dir):
"""Builds a prediction graph and xports the model.
Args:
last_checkpoint: Path to the latest checkpoint file from training.
output_dir: Path to the folder to be used to output the model.
"""
logging.info('Exporting prediction graph to %s', output_dir)
with tf.Session(graph=tf.Graph()) as sess:
# Build and save prediction meta graph and trained variable values.
inputs, outputs = self.build_prediction_graph()
signature_def_map = {
'serving_default': signature_def_utils.predict_signature_def(inputs, outputs)
}
init_op = tf.global_variables_initializer()
sess.run(init_op)
self.restore_from_checkpoint(sess, self.inception_checkpoint_file,
last_checkpoint)
init_op_serving = control_flow_ops.group(
variables.local_variables_initializer(),
tf.tables_initializer())
builder = saved_model_builder.SavedModelBuilder(output_dir)
builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map=signature_def_map,
legacy_init_op=init_op_serving)
builder.save(False) | Builds a prediction graph and xports the model.
Args:
last_checkpoint: Path to the latest checkpoint file from training.
output_dir: Path to the folder to be used to output the model. |
def value_from_ast_untyped(
value_node: ValueNode, variables: Dict[str, Any] = None
) -> Any:
"""Produce a Python value given a GraphQL Value AST.
Unlike `value_from_ast()`, no type is provided. The resulting Python value will
reflect the provided GraphQL value AST.
| GraphQL Value | JSON Value | Python Value |
| -------------------- | ---------- | ------------ |
| Input Object | Object | dict |
| List | Array | list |
| Boolean | Boolean | bool |
| String / Enum | String | str |
| Int / Float | Number | int / float |
| Null | null | None |
"""
func = _value_from_kind_functions.get(value_node.kind)
if func:
return func(value_node, variables)
# Not reachable. All possible value nodes have been considered.
raise TypeError( # pragma: no cover
f"Unexpected value node: '{inspect(value_node)}'."
) | Produce a Python value given a GraphQL Value AST.
Unlike `value_from_ast()`, no type is provided. The resulting Python value will
reflect the provided GraphQL value AST.
| GraphQL Value | JSON Value | Python Value |
| -------------------- | ---------- | ------------ |
| Input Object | Object | dict |
| List | Array | list |
| Boolean | Boolean | bool |
| String / Enum | String | str |
| Int / Float | Number | int / float |
| Null | null | None | |
def _pop(self):
'''
Actual pop
'''
if not self.canPop():
raise IndexError('pop from an empty or blocked queue')
priority = self.prioritySet[-1]
ret = self.queues[priority]._pop()
self.outputStat = self.outputStat + 1
self.totalSize = self.totalSize - 1
if self.isWaited and self.canAppend():
self.isWaited = False
ret[1].append(QueueCanWriteEvent(self))
if self.isWaitEmpty and not self:
self.isWaitEmpty = False
ret[2].append(QueueIsEmptyEvent(self))
return ret | Actual pop |
def browse_in_qt5_ui(self):
"""Browse and edit the SubjectInfo in a simple Qt5 based UI."""
self._render_type = "browse"
self._tree.show(tree_style=self._get_tree_style()) | Browse and edit the SubjectInfo in a simple Qt5 based UI. |
def sys_call(cmd):
"""Execute cmd and capture stdout and stderr
:param cmd: command to be executed
:return: (stdout, stderr)
"""
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
return p.stdout.readlines(), p.stderr.readlines() | Execute cmd and capture stdout and stderr
:param cmd: command to be executed
:return: (stdout, stderr) |
def read_column(self, column, where=None, start=None, stop=None):
"""return a single column from the table, generally only indexables
are interesting
"""
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where "
"clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
"column [{column}] can not be extracted individually; "
"it is not data indexable".format(column=column))
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
return Series(_set_tz(a.convert(c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors
).take_data(),
a.tz, True), name=column)
raise KeyError(
"column [{column}] not found in the table".format(column=column)) | return a single column from the table, generally only indexables
are interesting |
def remove_by_rank(self, low, high=None):
"""
Remove elements from the ZSet by their rank (relative position).
:param low: Lower bound.
:param high: Upper bound.
"""
if high is None:
high = low
return self.database.zremrangebyrank(self.key, low, high) | Remove elements from the ZSet by their rank (relative position).
:param low: Lower bound.
:param high: Upper bound. |
def get_priority_rules(db) -> Iterable[PriorityRule]:
"""Get file priority rules."""
cur = db.cursor()
cur.execute('SELECT id, regexp, priority FROM file_priority')
for row in cur:
yield PriorityRule(*row) | Get file priority rules. |
def execute(self, cacheable=False):
"""Returns the XML DOM response of the POST Request from the server"""
if self.network.is_caching_enabled() and cacheable:
response = self._get_cached_response()
else:
response = self._download_response()
return minidom.parseString(_string(response).replace("opensearch:", "")) | Returns the XML DOM response of the POST Request from the server |
def _server_begin_response_callback(self, response: Response):
'''Pre-response callback handler.'''
self._item_session.response = response
if self._cookie_jar:
self._cookie_jar.extract_cookies(response, self._item_session.request)
action = self._result_rule.handle_pre_response(self._item_session)
self._file_writer_session.process_response(response)
return action == Actions.NORMAL | Pre-response callback handler. |
def qwe(rtol, atol, maxint, inp, intervals, lambd=None, off=None,
factAng=None):
r"""Quadrature-With-Extrapolation.
This is the kernel of the QWE method, used for the Hankel (``hqwe``) and
the Fourier (``fqwe``) Transforms. See ``hqwe`` for an extensive
description.
This function is based on ``qwe.m`` from the source code distributed with
[Key12]_.
"""
def getweights(i, inpint):
r"""Return weights for this interval."""
return (np.atleast_2d(inpint)[:, i+1] - np.atleast_2d(inpint)[:, i])/2
# 1. Calculate the first interval for all offsets
if hasattr(inp, '__call__'): # Hankel and not spline
EM0 = inp(0, lambd, off, factAng)
else: # Fourier or Hankel with spline
EM0 = inp[:, 0]
EM0 *= getweights(0, intervals)
# 2. Pre-allocate arrays and initialize
EM = np.zeros(EM0.size, dtype=EM0.dtype) # EM array
om = np.ones(EM0.size, dtype=bool) # Convergence array
S = np.zeros((EM0.size, maxint), dtype=EM0.dtype) # Working arr. 4 recurs.
relErr = np.zeros((EM0.size, maxint)) # Relative error
extrap = np.zeros((EM0.size, maxint), dtype=EM0.dtype) # extrap. result
kcount = 1 # Initialize kernel count (only important for Hankel)
# 3. The extrapolation transformation loop
for i in range(1, maxint):
# 3.a Calculate the field for this interval
if hasattr(inp, '__call__'): # Hankel and not spline
EMi = inp(i, lambd[om, :], off[om], factAng[om])
kcount += 1 # Update count
else: # Fourier or Hankel with spline
EMi = inp[om, i]
EMi *= getweights(i, intervals[om, :])
# 3.b Compute Shanks transformation
# Using the epsilon algorithm: structured after [Weni89]_, p26.
S[:, i][om] = S[:, i-1][om] + EMi # working array for transformation
# Recursive loop
aux2 = np.zeros(om.sum(), dtype=EM0.dtype)
for k in range(i, 0, -1):
aux1, aux2 = aux2, S[om, k-1]
ddff = S[om, k] - aux2
S[om, k-1] = np.where(np.abs(ddff) < np.finfo(np.double).tiny,
np.finfo(np.double).max, aux1 + 1/ddff)
# The extrapolated result plus the first interval term
extrap[om, i-1] = S[om, np.mod(i, 2)] + EM0[om]
# 3.c Analyze for convergence
if i > 1:
# Calculate relative and absolute error
rErr = (extrap[om, i-1] - extrap[om, i-2])/extrap[om, i-1]
relErr[om, i-1] = np.abs(rErr)
abserr = atol/np.abs(extrap[om, i-1])
# Update booleans
om[om] *= relErr[om, i-1] >= rtol + abserr
# Store in EM
EM[om] = extrap[om, i-1]
if (~om).all():
break
# 4. Cleaning up
# Warning if maxint is potentially too small
conv = i+1 != maxint
# Catch the ones that did not converge
EM[om] = extrap[om, i-1]
# Set np.finfo(np.double).max to 0
EM.real[EM.real == np.finfo(np.double).max] = 0
return EM, kcount, conv | r"""Quadrature-With-Extrapolation.
This is the kernel of the QWE method, used for the Hankel (``hqwe``) and
the Fourier (``fqwe``) Transforms. See ``hqwe`` for an extensive
description.
This function is based on ``qwe.m`` from the source code distributed with
[Key12]_. |
def get(self, subscription_id=None, stream=None, historics_id=None,
page=None, per_page=None, order_by=None, order_dir=None,
include_finished=None):
""" Show details of the Subscriptions belonging to this user.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushget
:param subscription_id: optional id of an existing Push Subscription
:type subscription_id: str
:param hash: optional hash of a live stream
:type hash: str
:param playback_id: optional playback id of a Historics query
:type playback_id: str
:param page: optional page number for pagination
:type page: int
:param per_page: optional number of items per page, default 20
:type per_page: int
:param order_by: field to order by, default request_time
:type order_by: str
:param order_dir: direction to order by, asc or desc, default desc
:type order_dir: str
:param include_finished: boolean indicating if finished Subscriptions for Historics should be included
:type include_finished: bool
:returns: dict with extra response data
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
params = {}
if subscription_id:
params['id'] = subscription_id
if stream:
params['hash'] = stream
if historics_id:
params['historics_id'] = historics_id
if page:
params['page'] = page
if per_page:
params['per_page'] = per_page
if order_by:
params['order_by'] = order_by
if order_dir:
params['order_dir'] = order_dir
if include_finished:
params['include_finished'] = 1 if include_finished else 0
return self.request.get('get', params=params) | Show details of the Subscriptions belonging to this user.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushget
:param subscription_id: optional id of an existing Push Subscription
:type subscription_id: str
:param hash: optional hash of a live stream
:type hash: str
:param playback_id: optional playback id of a Historics query
:type playback_id: str
:param page: optional page number for pagination
:type page: int
:param per_page: optional number of items per page, default 20
:type per_page: int
:param order_by: field to order by, default request_time
:type order_by: str
:param order_dir: direction to order by, asc or desc, default desc
:type order_dir: str
:param include_finished: boolean indicating if finished Subscriptions for Historics should be included
:type include_finished: bool
:returns: dict with extra response data
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` |
def regex(expression, flags=re.IGNORECASE):
"""
Convenient shortcut to ``re.compile()`` for fast, easy to use
regular expression compilation without an extra import statement.
Arguments:
expression (str): regular expression value.
flags (int): optional regular expression flags.
Defaults to ``re.IGNORECASE``
Returns:
expression (str): string based regular expression.
Raises:
Exception: in case of regular expression compilation error
Example::
(pook
.get('api.com/foo')
.header('Content-Type', pook.regex('[a-z]{1,4}')))
"""
return re.compile(expression, flags=flags) | Convenient shortcut to ``re.compile()`` for fast, easy to use
regular expression compilation without an extra import statement.
Arguments:
expression (str): regular expression value.
flags (int): optional regular expression flags.
Defaults to ``re.IGNORECASE``
Returns:
expression (str): string based regular expression.
Raises:
Exception: in case of regular expression compilation error
Example::
(pook
.get('api.com/foo')
.header('Content-Type', pook.regex('[a-z]{1,4}'))) |
def use_args(
self,
argmap: ArgMap,
req: typing.Optional[Request] = None,
locations: typing.Iterable = None,
as_kwargs: bool = False,
validate: Validate = None,
error_status_code: typing.Optional[int] = None,
error_headers: typing.Union[typing.Mapping[str, str], None] = None,
) -> typing.Callable[..., typing.Callable]:
"""Decorator that injects parsed arguments into a view function or method.
Receives the same arguments as `webargs.core.Parser.use_args`.
"""
locations = locations or self.locations
request_obj = req
# Optimization: If argmap is passed as a dictionary, we only need
# to generate a Schema once
if isinstance(argmap, Mapping):
argmap = core.dict2schema(argmap, self.schema_class)()
def decorator(func: typing.Callable) -> typing.Callable:
req_ = request_obj
if inspect.iscoroutinefunction(func):
@functools.wraps(func)
async def wrapper(*args, **kwargs):
req_obj = req_
if not req_obj:
req_obj = self.get_request_from_view_args(func, args, kwargs)
# NOTE: At this point, argmap may be a Schema, callable, or dict
parsed_args = await self.parse(
argmap,
req=req_obj,
locations=locations,
validate=validate,
error_status_code=error_status_code,
error_headers=error_headers,
)
if as_kwargs:
kwargs.update(parsed_args or {})
return await func(*args, **kwargs)
else:
# Add parsed_args after other positional arguments
new_args = args + (parsed_args,)
return await func(*new_args, **kwargs)
else:
@functools.wraps(func) # type: ignore
def wrapper(*args, **kwargs):
req_obj = req_
if not req_obj:
req_obj = self.get_request_from_view_args(func, args, kwargs)
# NOTE: At this point, argmap may be a Schema, callable, or dict
parsed_args = yield from self.parse( # type: ignore
argmap,
req=req_obj,
locations=locations,
validate=validate,
error_status_code=error_status_code,
error_headers=error_headers,
)
if as_kwargs:
kwargs.update(parsed_args)
return func(*args, **kwargs) # noqa: B901
else:
# Add parsed_args after other positional arguments
new_args = args + (parsed_args,)
return func(*new_args, **kwargs)
return wrapper
return decorator | Decorator that injects parsed arguments into a view function or method.
Receives the same arguments as `webargs.core.Parser.use_args`. |
def is_now(s, dt=None):
'''
A very simple cron-like parser to determine, if (cron-like) string is valid for this date and time.
@input:
s = cron-like string (minute, hour, day of month, month, day of week)
dt = datetime to use as reference time, defaults to now
@output: boolean of result
'''
if dt is None:
dt = datetime.now()
minute, hour, dom, month, dow = s.split(' ')
weekday = dt.isoweekday()
return _parse_arg(minute, dt.minute) \
and _parse_arg(hour, dt.hour) \
and _parse_arg(dom, dt.day) \
and _parse_arg(month, dt.month) \
and _parse_arg(dow, 0 if weekday == 7 else weekday, True) | A very simple cron-like parser to determine, if (cron-like) string is valid for this date and time.
@input:
s = cron-like string (minute, hour, day of month, month, day of week)
dt = datetime to use as reference time, defaults to now
@output: boolean of result |
def determine_chan_detect_threshold(kal_out):
"""Return channel detect threshold from kal output."""
channel_detect_threshold = ""
while channel_detect_threshold == "":
for line in kal_out.splitlines():
if "channel detect threshold: " in line:
channel_detect_threshold = str(line.split()[-1])
if channel_detect_threshold == "":
print("Unable to parse sample rate")
channel_detect_threshold = None
return channel_detect_threshold | Return channel detect threshold from kal output. |
def get_all(limit=''):
'''
Return all installed services. Use the ``limit`` param to restrict results
to services of that type.
CLI Example:
.. code-block:: bash
salt '*' service.get_all
salt '*' service.get_all limit=upstart
salt '*' service.get_all limit=sysvinit
'''
limit = limit.lower()
if limit == 'upstart':
return sorted(_upstart_services())
elif limit == 'sysvinit':
return sorted(_sysv_services())
else:
return sorted(_sysv_services() + _upstart_services()) | Return all installed services. Use the ``limit`` param to restrict results
to services of that type.
CLI Example:
.. code-block:: bash
salt '*' service.get_all
salt '*' service.get_all limit=upstart
salt '*' service.get_all limit=sysvinit |
def query(self):
"""Runs an fstat for this file and repopulates the data"""
self._p4dict = self._connection.run(['fstat', '-m', '1', self._p4dict['depotFile']])[0]
self._head = HeadRevision(self._p4dict)
self._filename = self.depotFile | Runs an fstat for this file and repopulates the data |
def paste_mashes(sketches, pasted_mash, force = False):
"""
Combine mash files into single sketch
Input:
sketches <list[str]> -- paths to sketch files
pasted_mash <str> -- path to output mash file
force <boolean> -- force overwrite of all mash file
"""
if os.path.isfile(pasted_mash):
if force:
subprocess.Popen(['rm', pasted_mash]).wait()
else:
return
pasted_mash = pasted_mash.rsplit('.msh')[0]
mash_cmd = ['/opt/bin/bio/mash', 'paste', pasted_mash]
mash_cmd.extend(sketches)
process = subprocess.Popen(mash_cmd)
process.wait()
return | Combine mash files into single sketch
Input:
sketches <list[str]> -- paths to sketch files
pasted_mash <str> -- path to output mash file
force <boolean> -- force overwrite of all mash file |
async def destroy_models(self, *models, destroy_storage=False):
"""Destroy one or more models.
:param str *models: Names or UUIDs of models to destroy
:param bool destroy_storage: Whether or not to destroy storage when
destroying the models. Defaults to false.
"""
uuids = await self.model_uuids()
models = [uuids[model] if model in uuids else model
for model in models]
model_facade = client.ModelManagerFacade.from_connection(
self.connection())
log.debug(
'Destroying model%s %s',
'' if len(models) == 1 else 's',
', '.join(models)
)
if model_facade.version >= 5:
params = [
client.DestroyModelParams(model_tag=tag.model(model),
destroy_storage=destroy_storage)
for model in models]
else:
params = [client.Entity(tag.model(model)) for model in models]
await model_facade.DestroyModels(params) | Destroy one or more models.
:param str *models: Names or UUIDs of models to destroy
:param bool destroy_storage: Whether or not to destroy storage when
destroying the models. Defaults to false. |
def askopenfilename(**kwargs):
"""Return file name(s) from Tkinter's file open dialog."""
try:
from Tkinter import Tk
import tkFileDialog as filedialog
except ImportError:
from tkinter import Tk, filedialog
root = Tk()
root.withdraw()
root.update()
filenames = filedialog.askopenfilename(**kwargs)
root.destroy()
return filenames | Return file name(s) from Tkinter's file open dialog. |
def make_nylas_blueprint(
client_id=None,
client_secret=None,
scope="email",
redirect_url=None,
redirect_to=None,
login_url=None,
authorized_url=None,
session_class=None,
storage=None,
):
"""
Make a blueprint for authenticating with Nylas using OAuth 2. This requires
an API ID and API secret from Nylas. You should either pass them to
this constructor, or make sure that your Flask application config defines
them, using the variables :envvar:`NYLAS_OAUTH_CLIENT_ID` and
:envvar:`NYLAS_OAUTH_CLIENT_SECRET`.
Args:
client_id (str): The client ID for your developer account on Nylas.
client_secret (str): The client secret for your developer account
on Nylas.
scope (str, optional): comma-separated list of scopes for the OAuth
token. Defaults to "email".
redirect_url (str): the URL to redirect to after the authentication
dance is complete
redirect_to (str): if ``redirect_url`` is not defined, the name of the
view to redirect to after the authentication dance is complete.
The actual URL will be determined by :func:`flask.url_for`
login_url (str, optional): the URL path for the ``login`` view.
Defaults to ``/nylas``
authorized_url (str, optional): the URL path for the ``authorized`` view.
Defaults to ``/nylas/authorized``.
session_class (class, optional): The class to use for creating a
Requests session. Defaults to
:class:`~flask_dance.consumer.requests.OAuth2Session`.
storage: A token storage class, or an instance of a token storage
class, to use for this blueprint. Defaults to
:class:`~flask_dance.consumer.storage.session.SessionStorage`.
:rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint`
:returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app.
"""
nylas_bp = OAuth2ConsumerBlueprint(
"nylas",
__name__,
client_id=client_id,
client_secret=client_secret,
scope=scope,
base_url="https://api.nylas.com/",
authorization_url="https://api.nylas.com/oauth/authorize",
token_url="https://api.nylas.com/oauth/token",
redirect_url=redirect_url,
redirect_to=redirect_to,
login_url=login_url,
authorized_url=authorized_url,
session_class=session_class,
storage=storage,
)
nylas_bp.from_config["client_id"] = "NYLAS_OAUTH_CLIENT_ID"
nylas_bp.from_config["client_secret"] = "NYLAS_OAUTH_CLIENT_SECRET"
@nylas_bp.before_app_request
def set_applocal_session():
ctx = stack.top
ctx.nylas_oauth = nylas_bp.session
return nylas_bp | Make a blueprint for authenticating with Nylas using OAuth 2. This requires
an API ID and API secret from Nylas. You should either pass them to
this constructor, or make sure that your Flask application config defines
them, using the variables :envvar:`NYLAS_OAUTH_CLIENT_ID` and
:envvar:`NYLAS_OAUTH_CLIENT_SECRET`.
Args:
client_id (str): The client ID for your developer account on Nylas.
client_secret (str): The client secret for your developer account
on Nylas.
scope (str, optional): comma-separated list of scopes for the OAuth
token. Defaults to "email".
redirect_url (str): the URL to redirect to after the authentication
dance is complete
redirect_to (str): if ``redirect_url`` is not defined, the name of the
view to redirect to after the authentication dance is complete.
The actual URL will be determined by :func:`flask.url_for`
login_url (str, optional): the URL path for the ``login`` view.
Defaults to ``/nylas``
authorized_url (str, optional): the URL path for the ``authorized`` view.
Defaults to ``/nylas/authorized``.
session_class (class, optional): The class to use for creating a
Requests session. Defaults to
:class:`~flask_dance.consumer.requests.OAuth2Session`.
storage: A token storage class, or an instance of a token storage
class, to use for this blueprint. Defaults to
:class:`~flask_dance.consumer.storage.session.SessionStorage`.
:rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint`
:returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app. |
def _prune_hit(hit, model):
"""
Check whether a document should be pruned.
This method uses the SearchDocumentManagerMixin.in_search_queryset method
to determine whether a 'hit' (search document) should be pruned from an index,
and if so it returns the hit as a Django object(id=hit_id).
Args:
hit: dict object the represents a document as returned from the scan_index
function. (Contains object id and index.)
model: the Django model (not object) from which the document was derived.
Used to get the correct model manager and bulk action.
Returns:
an object of type model, with id=hit_id. NB this is not the object
itself, which by definition may not exist in the underlying database,
but a temporary object with the document id - which is enough to create
a 'delete' action.
"""
hit_id = hit["_id"]
hit_index = hit["_index"]
if model.objects.in_search_queryset(hit_id, index=hit_index):
logger.debug(
"%s with id=%s exists in the '%s' index queryset.", model, hit_id, hit_index
)
return None
else:
logger.debug(
"%s with id=%s does not exist in the '%s' index queryset and will be pruned.",
model,
hit_id,
hit_index,
)
# we don't need the full obj for a delete action, just the id.
# (the object itself may not even exist.)
return model(pk=hit_id) | Check whether a document should be pruned.
This method uses the SearchDocumentManagerMixin.in_search_queryset method
to determine whether a 'hit' (search document) should be pruned from an index,
and if so it returns the hit as a Django object(id=hit_id).
Args:
hit: dict object the represents a document as returned from the scan_index
function. (Contains object id and index.)
model: the Django model (not object) from which the document was derived.
Used to get the correct model manager and bulk action.
Returns:
an object of type model, with id=hit_id. NB this is not the object
itself, which by definition may not exist in the underlying database,
but a temporary object with the document id - which is enough to create
a 'delete' action. |
def _compute_error(self):
"""Compute unexplained error."""
sum_x = sum(self.x_transforms)
err = sum((self.y_transform - sum_x) ** 2) / len(sum_x)
return err | Compute unexplained error. |
def transform(self, X):
'''
:param X: features.
'''
inverser_tranformer = self.dict_vectorizer_
if self.feature_selection:
inverser_tranformer = self.clone_dict_vectorizer_
return inverser_tranformer.inverse_transform(
self.transformer.transform(
self.dict_vectorizer_.transform(X))) | :param X: features. |
def setup(self, target=None, strict=False, minify=False, line_numbers=False, keep_lines=False, no_tco=False):
"""Initializes parsing parameters."""
if target is None:
target = ""
else:
target = str(target).replace(".", "")
if target in pseudo_targets:
target = pseudo_targets[target]
if target not in targets:
raise CoconutException(
"unsupported target Python version " + ascii(target),
extra="supported targets are " + ', '.join(ascii(t) for t in specific_targets) + ", or leave blank for universal",
)
logger.log_vars("Compiler args:", locals())
self.target, self.strict, self.minify, self.line_numbers, self.keep_lines, self.no_tco = (
target, strict, minify, line_numbers, keep_lines, no_tco,
) | Initializes parsing parameters. |
def use_sequestered_assessment_part_view(self):
"""Pass through to provider AssessmentPartLookupSession.use_sequestered_assessment_part_view"""
# Does this need to be re-implemented to match the other non-sub-package view setters?
self._containable_views['assessment_part'] = SEQUESTERED
self._get_sub_package_provider_session('assessment_authoring',
'assessment_part_lookup_session')
for session in self._provider_sessions:
for provider_session_name, provider_session in self._provider_sessions[session].items():
try:
provider_session.use_sequestered_assessment_part_view()
except AttributeError:
pass | Pass through to provider AssessmentPartLookupSession.use_sequestered_assessment_part_view |
def extend(self, item):
"""
Extend list from object, if object is list.
"""
if self.meta_type == 'dict':
raise AssertionError('Cannot extend to object of `dict` base type!')
if self.meta_type == 'list':
self._list.extend(item)
return | Extend list from object, if object is list. |
def to_proper_radians(theta):
"""
Converts theta (radians) to be within -pi and +pi.
"""
if theta > pi or theta < -pi:
theta = theta % pi
return theta | Converts theta (radians) to be within -pi and +pi. |
def GetSavename(default=None, **kwargs):
"""Prompt the user for a filename to save as.
This will raise a Zenity Save As Dialog. It will return the name to save
a file as or None if the user hit cancel.
default - The default name that should appear in the save as dialog.
kwargs - Optional command line parameters for Zenity such as height,
width, etc."""
args = ['--save']
if default:
args.append('--filename=%s' % default)
for generic_args in kwargs_helper(kwargs):
args.append('--%s=%s' % generic_args)
p = run_zenity('--file-selection', *args)
if p.wait() == 0:
return p.stdout.read().strip().split('|') | Prompt the user for a filename to save as.
This will raise a Zenity Save As Dialog. It will return the name to save
a file as or None if the user hit cancel.
default - The default name that should appear in the save as dialog.
kwargs - Optional command line parameters for Zenity such as height,
width, etc. |
def move_saved_issue_data(self, issue, ns, other_ns):
"""Moves an issue_data from one namespace to another."""
if isinstance(issue, int):
issue_number = str(issue)
elif isinstance(issue, basestring):
issue_number = issue
else:
issue_number = issue.number
issue_data_key = self._issue_data_key(ns)
other_issue_data_key = self._issue_data_key(other_ns)
issue_data = self.data.get(issue_data_key,
{})
other_issue_data = self.data.get(other_issue_data_key,
{})
_id = issue_data.pop(issue_number, None)
if _id:
other_issue_data[issue_number] = _id
self.data[other_issue_data_key] = other_issue_data
self.data[issue_data_key] = issue_data | Moves an issue_data from one namespace to another. |
def close(self):
"""Closes associated resources of this request object. This
closes all file handles explicitly. You can also use the request
object in a with statement which will automatically close it.
.. versionadded:: 0.9
"""
files = self.__dict__.get("files")
for _key, value in iter_multi_items(files or ()):
value.close() | Closes associated resources of this request object. This
closes all file handles explicitly. You can also use the request
object in a with statement which will automatically close it.
.. versionadded:: 0.9 |
def generate_name_variations(name):
"""Generate name variations for a given name.
Args:
name (six.text_type): The name whose variations are to be generated.
Returns:
list: All the name variations for the given name.
Notes:
Uses `unidecode` for doing unicode characters transliteration to ASCII ones. This was chosen so that we can map
both full names of authors in HEP records and user's input to the same space and thus make exact queries work.
"""
def _update_name_variations_with_product(set_a, set_b):
name_variations.update([
unidecode((names_variation[0] +
separator +
names_variation[1]).strip(''.join(_LASTNAME_NON_LASTNAME_SEPARATORS))).lower()
for names_variation
in product(set_a, set_b)
for separator
in _LASTNAME_NON_LASTNAME_SEPARATORS
])
parsed_name = ParsedName.loads(name)
# Handle rare-case of single-name
if len(parsed_name) == 1:
return [parsed_name.dumps().lower()]
name_variations = set()
# We need to filter out empty entries, since HumanName for this name `Perelstein,, Maxim` returns a first_list with
# an empty string element.
non_lastnames = [
non_lastname
for non_lastname
in parsed_name.first_list + parsed_name.suffix_list
if non_lastname
]
# This is needed because due to erroneous data (e.g. having many authors in a single authors field) ends up
# requiring a lot of memory (due to combinatorial expansion of all non lastnames).
# The policy is to use the input as a name variation, since this data will have to be curated.
if len(non_lastnames) > _NAMES_MAX_NUMBER_THRESHOLD or len(parsed_name.last_list) > _NAMES_MAX_NUMBER_THRESHOLD:
LOGGER.error('Skipping name variations generation - too many names in: "%s"', name)
return [name]
non_lastnames_variations = \
_generate_non_lastnames_variations(non_lastnames)
lastnames_variations = _generate_lastnames_variations(parsed_name.last_list)
# Create variations where lastnames comes first and is separated from non lastnames either by space or comma.
_update_name_variations_with_product(lastnames_variations, non_lastnames_variations)
# Second part of transformations - having the lastnames in the end.
_update_name_variations_with_product(non_lastnames_variations, lastnames_variations)
return list(name_variations) | Generate name variations for a given name.
Args:
name (six.text_type): The name whose variations are to be generated.
Returns:
list: All the name variations for the given name.
Notes:
Uses `unidecode` for doing unicode characters transliteration to ASCII ones. This was chosen so that we can map
both full names of authors in HEP records and user's input to the same space and thus make exact queries work. |
def add_snippet_client(self, name, package):
"""Adds a snippet client to the management.
Args:
name: string, the attribute name to which to attach the snippet
client. E.g. `name='maps'` attaches the snippet client to
`ad.maps`.
package: string, the package name of the snippet apk to connect to.
Raises:
Error, if a duplicated name or package is passed in.
"""
# Should not load snippet with the same name more than once.
if name in self._snippet_clients:
raise Error(
self,
'Name "%s" is already registered with package "%s", it cannot '
'be used again.' %
(name, self._snippet_clients[name].client.package))
# Should not load the same snippet package more than once.
for snippet_name, client in self._snippet_clients.items():
if package == client.package:
raise Error(
self,
'Snippet package "%s" has already been loaded under name'
' "%s".' % (package, snippet_name))
client = snippet_client.SnippetClient(package=package, ad=self._device)
client.start_app_and_connect()
self._snippet_clients[name] = client | Adds a snippet client to the management.
Args:
name: string, the attribute name to which to attach the snippet
client. E.g. `name='maps'` attaches the snippet client to
`ad.maps`.
package: string, the package name of the snippet apk to connect to.
Raises:
Error, if a duplicated name or package is passed in. |
def parse_proposal_data(self, proposal_data, dossier_pk):
"""Get or Create a proposal model from raw data"""
proposal_display = '{} ({})'.format(proposal_data['title'].encode(
'utf-8'), proposal_data.get('report', '').encode('utf-8'))
if 'issue_type' not in proposal_data.keys():
logger.debug('This proposal data without issue_type: %s',
proposal_data['epref'])
return
changed = False
try:
proposal = Proposal.objects.get(title=proposal_data['title'])
except Proposal.DoesNotExist:
proposal = Proposal(title=proposal_data['title'])
changed = True
data_map = dict(
title=proposal_data['title'],
datetime=_parse_date(proposal_data['ts']),
dossier_id=dossier_pk,
reference=proposal_data.get('report'),
kind=proposal_data.get('issue_type')
)
for position in ('For', 'Abstain', 'Against'):
position_data = proposal_data.get(position, {})
position_total = position_data.get('total', 0)
if isinstance(position_total, str) and position_total.isdigit():
position_total = int(position_total)
data_map['total_%s' % position.lower()] = position_total
for key, value in data_map.items():
if value != getattr(proposal, key, None):
setattr(proposal, key, value)
changed = True
if changed:
proposal.save()
responses = vote_pre_import.send(sender=self, vote_data=proposal_data)
for receiver, response in responses:
if response is False:
logger.debug(
'Skipping dossier %s', proposal_data.get(
'epref', proposal_data['title']))
return
positions = ['For', 'Abstain', 'Against']
logger.info(
'Looking for votes in proposal {}'.format(proposal_display))
for position in positions:
for group_vote_data in proposal_data.get(
position,
{}).get(
'groups',
{}):
for vote_data in group_vote_data['votes']:
if not isinstance(vote_data, dict):
logger.error('Skipping vote data %s for proposal %s',
vote_data, proposal_data['_id'])
continue
representative_pk = self.get_representative(vote_data)
if representative_pk is None:
logger.error('Could not find mep for %s', vote_data)
continue
representative_name = vote_data.get('orig', '')
changed = False
try:
vote = Vote.objects.get(
representative_id=representative_pk,
proposal_id=proposal.pk)
except Vote.DoesNotExist:
vote = Vote(proposal_id=proposal.pk,
representative_id=representative_pk)
changed = True
if vote.position != position.lower():
changed = True
vote.position = position.lower()
if vote.representative_name != representative_name:
changed = True
vote.representative_name = representative_name
if changed:
vote.save()
logger.debug('Save vote %s for MEP %s on %s #%s to %s',
vote.pk, representative_pk, proposal_data['title'],
proposal.pk, position)
return proposal | Get or Create a proposal model from raw data |
def sync_sources(self):
"""
Syncs data sources between Elements, which draw data
from the same object.
"""
get_sources = lambda x: (id(x.current_frame.data), x)
filter_fn = lambda x: (x.shared_datasource and x.current_frame is not None and
not isinstance(x.current_frame.data, np.ndarray)
and 'source' in x.handles)
data_sources = self.traverse(get_sources, [filter_fn])
grouped_sources = groupby(sorted(data_sources, key=lambda x: x[0]), lambda x: x[0])
shared_sources = []
source_cols = {}
plots = []
for _, group in grouped_sources:
group = list(group)
if len(group) > 1:
source_data = {}
for _, plot in group:
source_data.update(plot.handles['source'].data)
new_source = ColumnDataSource(source_data)
for _, plot in group:
renderer = plot.handles.get('glyph_renderer')
for callback in plot.callbacks:
callback.reset()
if renderer is None:
continue
elif 'data_source' in renderer.properties():
renderer.update(data_source=new_source)
else:
renderer.update(source=new_source)
if hasattr(renderer, 'view'):
renderer.view.update(source=new_source)
plot.handles['source'] = plot.handles['cds'] = new_source
plots.append(plot)
shared_sources.append(new_source)
source_cols[id(new_source)] = [c for c in new_source.data]
for plot in plots:
if plot.hooks and plot.finalize_hooks:
self.param.warning(
"Supply either hooks or finalize_hooks not both; "
"using hooks and ignoring finalize_hooks.")
hooks = plot.hooks or plot.finalize_hooks
for hook in hooks:
hook(plot, plot.current_frame)
for callback in plot.callbacks:
callback.initialize(plot_id=self.id)
self.handles['shared_sources'] = shared_sources
self.handles['source_cols'] = source_cols | Syncs data sources between Elements, which draw data
from the same object. |
def get_soup_response(self):
"""Get the response as a cached BeautifulSoup container.
Returns:
obj: The BeautifulSoup container.
"""
if self.response is not None:
if self.__response_soup is None:
result = BeautifulSoup(self.response.text, "lxml")
if self.decomposed:
return result
else:
self.__response_soup = BeautifulSoup(self.response.text, "lxml")
return self.__response_soup | Get the response as a cached BeautifulSoup container.
Returns:
obj: The BeautifulSoup container. |
def center_eigenvalue_diff(mat):
"""Compute the eigvals of mat and then find the center eigval difference."""
N = len(mat)
evals = np.sort(la.eigvals(mat))
diff = np.abs(evals[N/2] - evals[N/2-1])
return diff | Compute the eigvals of mat and then find the center eigval difference. |
def get_disk_cache(self, key=None):
"""Return result in disk cache for key 'key' or None if not found."""
key = self.model.hash if key is None else key
if not getattr(self, 'disk_cache_location', False):
self.init_disk_cache()
disk_cache = shelve.open(self.disk_cache_location)
self._results = disk_cache.get(key)
disk_cache.close()
return self._results | Return result in disk cache for key 'key' or None if not found. |
def timethis(func):
"""A wrapper use for timeit."""
func_module, func_name = func.__module__, func.__name__
@functools.wraps(func)
def wrapper(*args, **kwargs):
start = _time_perf_counter()
r = func(*args, **kwargs)
end = _time_perf_counter()
print('timethis : <{}.{}> : {}'.format(func_module, func_name, end - start))
return r
return wrapper | A wrapper use for timeit. |
def dsa_sign(private_key, data, hash_algorithm):
"""
Generates a DSA signature
:param private_key:
The PrivateKey to generate the signature with
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384" or "sha512"
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the signature
"""
if private_key.algorithm != 'dsa':
raise ValueError('The key specified is not a DSA private key')
return _sign(private_key, data, hash_algorithm) | Generates a DSA signature
:param private_key:
The PrivateKey to generate the signature with
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384" or "sha512"
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the signature |
def copy_from_dict(self, attributes):
"""Copies the attribute container from a dictionary.
Args:
attributes (dict[str, object]): attribute values per name.
"""
for attribute_name, attribute_value in attributes.items():
# Not using startswith to improve performance.
if attribute_name[0] == '_':
continue
setattr(self, attribute_name, attribute_value) | Copies the attribute container from a dictionary.
Args:
attributes (dict[str, object]): attribute values per name. |
def ModifyInstance(self, ModifiedInstance, IncludeQualifiers=None,
PropertyList=None, **extra):
# pylint: disable=invalid-name,line-too-long
"""
Modify the property values of an instance.
This method performs the ModifyInstance operation
(see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all
methods performing such operations.
The `PropertyList` parameter determines the set of properties that are
designated to be modified (see its description for details).
The properties provided in the `ModifiedInstance` parameter specify
the new property values for the properties that are designated to be
modified.
Pywbem sends the property values provided in the `ModifiedInstance`
parameter to the WBEM server as provided; it does not add any default
values for properties not provided but designated to be modified, nor
does it reduce the properties by those not designated to be modified.
The properties that are actually modified by the WBEM server as a result
of this operation depend on a number of things:
* The WBEM server will reject modification requests for key properties
and for properties that are not exposed by the creation class of the
target instance.
* The WBEM server may consider some properties as read-only, as a
result of requirements at the CIM modeling level (schema or
management profiles), or as a result of an implementation decision.
Note that the WRITE qualifier on a property is not a safe indicator
as to whether the property can actually be modified. It is an
expression at the level of the CIM schema that may or may not be
considered in DMTF management profiles or in implementations.
Specifically, a qualifier value of True on a property does not
guarantee modifiability of the property, and a value of False does
not prevent modifiability.
* The WBEM server may detect invalid new values or conflicts resulting
from the new property values and may reject modification of a property
for such reasons.
If the WBEM server rejects modification of a property for any reason,
it will cause this operation to fail and will not modify any property
on the target instance. If this operation succeeds, all properties
designated to be modified have their new values (see the description
of the `ModifiedInstance` parameter for details on how the new values
are determined).
Note that properties (including properties not designated to be
modified) may change their values as an indirect result of this
operation. For example, a property that was not designated to be
modified may be derived from another property that was modified, and
may show a changed value due to that.
If the operation succeeds, this method returns.
Otherwise, this method raises an exception.
Parameters:
ModifiedInstance (:class:`~pywbem.CIMInstance`):
A representation of the modified instance, also indicating its
instance path.
The `path` attribute of this object identifies the instance to be
modified. Its `keybindings` attribute is required. If its
`namespace` attribute is `None`, the default namespace of the
connection will be used. Its `host` attribute will be ignored.
The `classname` attribute of the instance path and the `classname`
attribute of the instance must specify the same class name.
The properties defined in this object specify the new property
values (including `None` for NULL). If a property is designated to
be modified but is not specified in this object, the WBEM server
will use the default value of the property declaration if specified
(including `None`), and otherwise may update the property to any
value (including `None`).
Typically, this object has been retrieved by other operations,
such as :meth:`~pywbem.WBEMConnection.GetInstance`.
IncludeQualifiers (:class:`py:bool`):
Indicates that qualifiers are to be modified as specified in the
`ModifiedInstance` parameter, as follows:
* If `False`, qualifiers not modified.
* If `True`, qualifiers are modified if the WBEM server implements
support for this parameter.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default to be used. :term:`DSP0200`
defines that the server-implemented default is `True`.
This parameter has been deprecated in :term:`DSP0200`. Clients
cannot rely on qualifiers to be modified.
PropertyList (:term:`string` or :term:`py:iterable` of :term:`string`):
This parameter defines which properties are designated to be
modified.
This parameter is an iterable specifying the names of the
properties, or a string that specifies a single property name. In
all cases, the property names are matched case insensitively.
The specified properties are designated to be modified. Properties
not specified are not designated to be modified.
An empty iterable indicates that no properties are designated to be
modified.
If `None`, DSP0200 states that the properties with values different
from the current values in the instance are designated to be
modified, but for all practical purposes this is equivalent to
stating that all properties exposed by the instance are designated
to be modified.
**extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server.
Note that :term:`DSP0200` does not define any additional parameters
for this operation.
Raises:
Exceptions described in :class:`~pywbem.WBEMConnection`.
""" # noqa: E501
exc = None
method_name = 'ModifyInstance'
if self._operation_recorders:
self.operation_recorder_reset()
self.operation_recorder_stage_pywbem_args(
method=method_name,
ModifiedInstance=ModifiedInstance,
IncludeQualifiers=IncludeQualifiers,
PropertyList=PropertyList,
**extra)
try:
stats = self.statistics.start_timer('ModifyInstance')
# Must pass a named CIMInstance here (i.e path attribute set)
if ModifiedInstance.path is None:
raise ValueError(
'ModifiedInstance parameter must have path attribute set')
if ModifiedInstance.path.classname is None:
raise ValueError(
'ModifiedInstance parameter must have classname set in '
' path')
if ModifiedInstance.classname is None:
raise ValueError(
'ModifiedInstance parameter must have classname set in '
'instance')
namespace = self._iparam_namespace_from_objectname(
ModifiedInstance.path, 'ModifiedInstance.path')
PropertyList = _iparam_propertylist(PropertyList)
# Strip off host and namespace to avoid producing an INSTANCEPATH or
# LOCALINSTANCEPATH element instead of the desired INSTANCENAME
# element.
instance = ModifiedInstance.copy()
instance.path.namespace = None
instance.path.host = None
self._imethodcall(
method_name,
namespace,
ModifiedInstance=instance,
IncludeQualifiers=IncludeQualifiers,
PropertyList=PropertyList,
has_return_value=False,
**extra)
return
except (CIMXMLParseError, XMLParseError) as exce:
exce.request_data = self.last_raw_request
exce.response_data = self.last_raw_reply
exc = exce
raise
except Exception as exce:
exc = exce
raise
finally:
self._last_operation_time = stats.stop_timer(
self.last_request_len, self.last_reply_len,
self.last_server_response_time, exc)
if self._operation_recorders:
self.operation_recorder_stage_result(None, exc) | Modify the property values of an instance.
This method performs the ModifyInstance operation
(see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all
methods performing such operations.
The `PropertyList` parameter determines the set of properties that are
designated to be modified (see its description for details).
The properties provided in the `ModifiedInstance` parameter specify
the new property values for the properties that are designated to be
modified.
Pywbem sends the property values provided in the `ModifiedInstance`
parameter to the WBEM server as provided; it does not add any default
values for properties not provided but designated to be modified, nor
does it reduce the properties by those not designated to be modified.
The properties that are actually modified by the WBEM server as a result
of this operation depend on a number of things:
* The WBEM server will reject modification requests for key properties
and for properties that are not exposed by the creation class of the
target instance.
* The WBEM server may consider some properties as read-only, as a
result of requirements at the CIM modeling level (schema or
management profiles), or as a result of an implementation decision.
Note that the WRITE qualifier on a property is not a safe indicator
as to whether the property can actually be modified. It is an
expression at the level of the CIM schema that may or may not be
considered in DMTF management profiles or in implementations.
Specifically, a qualifier value of True on a property does not
guarantee modifiability of the property, and a value of False does
not prevent modifiability.
* The WBEM server may detect invalid new values or conflicts resulting
from the new property values and may reject modification of a property
for such reasons.
If the WBEM server rejects modification of a property for any reason,
it will cause this operation to fail and will not modify any property
on the target instance. If this operation succeeds, all properties
designated to be modified have their new values (see the description
of the `ModifiedInstance` parameter for details on how the new values
are determined).
Note that properties (including properties not designated to be
modified) may change their values as an indirect result of this
operation. For example, a property that was not designated to be
modified may be derived from another property that was modified, and
may show a changed value due to that.
If the operation succeeds, this method returns.
Otherwise, this method raises an exception.
Parameters:
ModifiedInstance (:class:`~pywbem.CIMInstance`):
A representation of the modified instance, also indicating its
instance path.
The `path` attribute of this object identifies the instance to be
modified. Its `keybindings` attribute is required. If its
`namespace` attribute is `None`, the default namespace of the
connection will be used. Its `host` attribute will be ignored.
The `classname` attribute of the instance path and the `classname`
attribute of the instance must specify the same class name.
The properties defined in this object specify the new property
values (including `None` for NULL). If a property is designated to
be modified but is not specified in this object, the WBEM server
will use the default value of the property declaration if specified
(including `None`), and otherwise may update the property to any
value (including `None`).
Typically, this object has been retrieved by other operations,
such as :meth:`~pywbem.WBEMConnection.GetInstance`.
IncludeQualifiers (:class:`py:bool`):
Indicates that qualifiers are to be modified as specified in the
`ModifiedInstance` parameter, as follows:
* If `False`, qualifiers not modified.
* If `True`, qualifiers are modified if the WBEM server implements
support for this parameter.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default to be used. :term:`DSP0200`
defines that the server-implemented default is `True`.
This parameter has been deprecated in :term:`DSP0200`. Clients
cannot rely on qualifiers to be modified.
PropertyList (:term:`string` or :term:`py:iterable` of :term:`string`):
This parameter defines which properties are designated to be
modified.
This parameter is an iterable specifying the names of the
properties, or a string that specifies a single property name. In
all cases, the property names are matched case insensitively.
The specified properties are designated to be modified. Properties
not specified are not designated to be modified.
An empty iterable indicates that no properties are designated to be
modified.
If `None`, DSP0200 states that the properties with values different
from the current values in the instance are designated to be
modified, but for all practical purposes this is equivalent to
stating that all properties exposed by the instance are designated
to be modified.
**extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server.
Note that :term:`DSP0200` does not define any additional parameters
for this operation.
Raises:
Exceptions described in :class:`~pywbem.WBEMConnection`. |
def close(self, discard=False):
'''Close this pool connection by releasing the underlying
:attr:`connection` back to the :attr:`pool`.
'''
if self.pool is not None:
self.pool._put(self.connection, discard)
self.pool = None
conn, self.connection = self.connection, None
return conn | Close this pool connection by releasing the underlying
:attr:`connection` back to the :attr:`pool`. |
def setup_environment(config: Dict[str, Any], environment_type: Environment) -> None:
"""Sets the config depending on the environment type"""
# interpret the provided string argument
if environment_type == Environment.PRODUCTION:
# Safe configuration: restrictions for mainnet apply and matrix rooms have to be private
config['transport']['matrix']['private_rooms'] = True
config['environment_type'] = environment_type
print(f'Raiden is running in {environment_type.value.lower()} mode') | Sets the config depending on the environment type |
def do_drag_data_received(self, drag_context, x, y, data, info, time):
'''从其它程序拖放目录/文件, 以便上传.
这里, 会直接把文件上传到当前目录(self.path).
拖放事件已经被处理, 所以不会触发self.app.window的拖放动作.
'''
if not self.app.profile:
return
if info == TargetInfo.URI_LIST:
uris = data.get_uris()
source_paths = util.uris_to_paths(uris)
if source_paths:
self.app.upload_page.upload_files(source_paths, self.path) | 从其它程序拖放目录/文件, 以便上传.
这里, 会直接把文件上传到当前目录(self.path).
拖放事件已经被处理, 所以不会触发self.app.window的拖放动作. |
def get_count_sql(self):
"""
Build a SELECT query which returns the count of items for an unlimited SELECT
:return:
A SQL SELECT query which returns the count of items for an unlimited query based on this SQLBuilder
"""
sql = 'SELECT COUNT(*) FROM ' + self.tables
if len(self.where_clauses) > 0:
sql += ' WHERE '
sql += ' AND '.join(self.where_clauses)
return sql | Build a SELECT query which returns the count of items for an unlimited SELECT
:return:
A SQL SELECT query which returns the count of items for an unlimited query based on this SQLBuilder |
def log_histogram(self, name, value, step=None):
"""Log a histogram for given name on given step.
Args:
name (str): name of the variable (it will be converted to a valid
tensorflow summary name).
value (tuple or list): either list of numbers
to be summarized as a histogram, or a tuple of bin_edges and
bincounts that directly define a histogram.
step (int): non-negative integer used for visualization
"""
if isinstance(value, six.string_types):
raise TypeError('"value" should be a number, got {}'
.format(type(value)))
self._check_step(step)
tf_name = self._ensure_tf_name(name)
summary = self._histogram_summary(tf_name, value, step=step)
self._log_summary(tf_name, summary, value, step=step) | Log a histogram for given name on given step.
Args:
name (str): name of the variable (it will be converted to a valid
tensorflow summary name).
value (tuple or list): either list of numbers
to be summarized as a histogram, or a tuple of bin_edges and
bincounts that directly define a histogram.
step (int): non-negative integer used for visualization |
def login(self, user=None, password=None, restrict_login=None):
"""
Attempt to log in using the given username and password. Subsequent
method calls will use this username and password. Returns False if
login fails, otherwise returns some kind of login info - typically
either a numeric userid, or a dict of user info.
If user is not set, the value of Bugzilla.user will be used. If *that*
is not set, ValueError will be raised. If login fails, BugzillaError
will be raised.
The login session can be restricted to current user IP address
with restrict_login argument. (Bugzilla 4.4+)
This method will be called implicitly at the end of connect() if user
and password are both set. So under most circumstances you won't need
to call this yourself.
"""
if self.api_key:
raise ValueError("cannot login when using an API key")
if user:
self.user = user
if password:
self.password = password
if not self.user:
raise ValueError("missing username")
if not self.password:
raise ValueError("missing password")
if restrict_login:
log.info("logging in with restrict_login=True")
try:
ret = self._login(self.user, self.password, restrict_login)
self.password = ''
log.info("login successful for user=%s", self.user)
return ret
except Fault as e:
raise BugzillaError("Login failed: %s" % str(e.faultString)) | Attempt to log in using the given username and password. Subsequent
method calls will use this username and password. Returns False if
login fails, otherwise returns some kind of login info - typically
either a numeric userid, or a dict of user info.
If user is not set, the value of Bugzilla.user will be used. If *that*
is not set, ValueError will be raised. If login fails, BugzillaError
will be raised.
The login session can be restricted to current user IP address
with restrict_login argument. (Bugzilla 4.4+)
This method will be called implicitly at the end of connect() if user
and password are both set. So under most circumstances you won't need
to call this yourself. |
def plot_sens_center(self, frequency=2):
"""
plot sensitivity center distribution for all configurations in
config.dat. The centers of mass are colored by the data given in
volt_file.
"""
try:
colors = np.loadtxt(self.volt_file, skiprows=1)
except IOError:
print('IOError opening {0}'.format(volt_file))
exit()
# check for 1-dimensionality
if(len(colors.shape) > 1):
print('Artificial or Multi frequency data')
colors = colors[:, frequency].flatten()
colors = colors[~np.isnan(colors)]
elem.load_elem_file(self.elem_file)
elem.load_elec_file(self.elec_file)
nr_elements = len(elem.element_type_list[0])
elem.element_data = np.zeros((nr_elements, 1)) * np.nan
elem.plt_opt.title = ' '
elem.plt_opt.reverse = True
elem.plt_opt.cbmin = -1
elem.plt_opt.cbmax = 1
elem.plt_opt.cblabel = self.cblabel
elem.plt_opt.xlabel = 'x (m)'
elem.plt_opt.ylabel = 'z (m)'
fig = plt.figure(figsize=(5, 7))
ax = fig.add_subplot(111)
ax, pm, cb = elem.plot_element_data_to_ax(0, ax, scale='linear',
no_cb=True)
ax.scatter(self.sens_centers[:, 0], self.sens_centers[:, 1], c=colors,
s=100, edgecolors='none')
cb_pos = mpl_get_cb_bound_next_to_plot(ax)
ax1 = fig.add_axes(cb_pos, frame_on=True)
cmap = mpl.cm.jet_r
norm = mpl.colors.Normalize(vmin=np.nanmin(colors),
vmax=np.nanmax(colors))
mpl.colorbar.ColorbarBase(ax1, cmap=cmap, norm=norm,
orientation='vertical')
fig.savefig(self.output_file, bbox_inches='tight', dpi=300) | plot sensitivity center distribution for all configurations in
config.dat. The centers of mass are colored by the data given in
volt_file. |
def list_contains(list_of_strings, substring, return_true_false_array=False):
""" Get strings in list which contains substring.
"""
key_tf = [keyi.find(substring) != -1 for keyi in list_of_strings]
if return_true_false_array:
return key_tf
keys_to_remove = list_of_strings[key_tf]
return keys_to_remove | Get strings in list which contains substring. |
def dist_mlipns(src, tar, threshold=0.25, max_mismatches=2):
"""Return the MLIPNS distance between two strings.
This is a wrapper for :py:meth:`MLIPNS.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
threshold : float
A number [0, 1] indicating the maximum similarity score, below which
the strings are considered 'similar' (0.25 by default)
max_mismatches : int
A number indicating the allowable number of mismatches to remove before
declaring two strings not similar (2 by default)
Returns
-------
float
MLIPNS distance
Examples
--------
>>> dist_mlipns('cat', 'hat')
0.0
>>> dist_mlipns('Niall', 'Neil')
1.0
>>> dist_mlipns('aluminum', 'Catalan')
1.0
>>> dist_mlipns('ATCG', 'TAGC')
1.0
"""
return MLIPNS().dist(src, tar, threshold, max_mismatches) | Return the MLIPNS distance between two strings.
This is a wrapper for :py:meth:`MLIPNS.dist`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
threshold : float
A number [0, 1] indicating the maximum similarity score, below which
the strings are considered 'similar' (0.25 by default)
max_mismatches : int
A number indicating the allowable number of mismatches to remove before
declaring two strings not similar (2 by default)
Returns
-------
float
MLIPNS distance
Examples
--------
>>> dist_mlipns('cat', 'hat')
0.0
>>> dist_mlipns('Niall', 'Neil')
1.0
>>> dist_mlipns('aluminum', 'Catalan')
1.0
>>> dist_mlipns('ATCG', 'TAGC')
1.0 |
def ISBNValidator(raw_isbn):
""" Check string is a valid ISBN number"""
isbn_to_check = raw_isbn.replace('-', '').replace(' ', '')
if not isinstance(isbn_to_check, string_types):
raise ValidationError(_(u'Invalid ISBN: Not a string'))
if len(isbn_to_check) != 10 and len(isbn_to_check) != 13:
raise ValidationError(_(u'Invalid ISBN: Wrong length'))
if not isbn.is_valid(isbn_to_check):
raise ValidationError(_(u'Invalid ISBN: Failed checksum'))
if isbn_to_check != isbn_to_check.upper():
raise ValidationError(_(u'Invalid ISBN: Only upper case allowed'))
return True | Check string is a valid ISBN number |
def _generate_AES_CBC_cipher(cek, iv):
'''
Generates and returns an encryption cipher for AES CBC using the given cek and iv.
:param bytes[] cek: The content encryption key for the cipher.
:param bytes[] iv: The initialization vector for the cipher.
:return: A cipher for encrypting in AES256 CBC.
:rtype: ~cryptography.hazmat.primitives.ciphers.Cipher
'''
backend = default_backend()
algorithm = AES(cek)
mode = CBC(iv)
return Cipher(algorithm, mode, backend) | Generates and returns an encryption cipher for AES CBC using the given cek and iv.
:param bytes[] cek: The content encryption key for the cipher.
:param bytes[] iv: The initialization vector for the cipher.
:return: A cipher for encrypting in AES256 CBC.
:rtype: ~cryptography.hazmat.primitives.ciphers.Cipher |
def uri(host='localhost', port=5432, dbname='postgres', user='postgres',
password=None):
"""Return a PostgreSQL connection URI for the specified values.
:param str host: Host to connect to
:param int port: Port to connect on
:param str dbname: The database name
:param str user: User to connect as
:param str password: The password to use, None for no password
:return str: The PostgreSQL connection URI
"""
if port:
host = '%s:%s' % (host, port)
if password:
return 'postgresql://%s:%s@%s/%s' % (user, password, host, dbname)
return 'postgresql://%s@%s/%s' % (user, host, dbname) | Return a PostgreSQL connection URI for the specified values.
:param str host: Host to connect to
:param int port: Port to connect on
:param str dbname: The database name
:param str user: User to connect as
:param str password: The password to use, None for no password
:return str: The PostgreSQL connection URI |
def __cache_point(self, index):
"""!
@brief Store index points.
@param[in] index (uint): Index point that should be stored.
"""
if self.__cache_points:
if self.__points is None:
self.__points = []
self.__points.append(index) | !
@brief Store index points.
@param[in] index (uint): Index point that should be stored. |
def create(self, table_id, schema):
""" Create a table in Google BigQuery given a table and schema
Parameters
----------
table : str
Name of table to be written
schema : str
Use the generate_bq_schema to generate your table schema from a
dataframe.
"""
from google.cloud.bigquery import SchemaField
from google.cloud.bigquery import Table
if self.exists(table_id):
raise TableCreationError(
"Table {0} already " "exists".format(table_id)
)
if not _Dataset(self.project_id, credentials=self.credentials).exists(
self.dataset_id
):
_Dataset(
self.project_id,
credentials=self.credentials,
location=self.location,
).create(self.dataset_id)
table_ref = self.client.dataset(self.dataset_id).table(table_id)
table = Table(table_ref)
# Manually create the schema objects, adding NULLABLE mode
# as a workaround for
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4456
for field in schema["fields"]:
if "mode" not in field:
field["mode"] = "NULLABLE"
table.schema = [
SchemaField.from_api_repr(field) for field in schema["fields"]
]
try:
self.client.create_table(table)
except self.http_error as ex:
self.process_http_error(ex) | Create a table in Google BigQuery given a table and schema
Parameters
----------
table : str
Name of table to be written
schema : str
Use the generate_bq_schema to generate your table schema from a
dataframe. |
def ignore(mapping):
""" Use ignore to prevent a mapping from being mapped to a namedtuple. """
if isinstance(mapping, Mapping):
return AsDict(mapping)
elif isinstance(mapping, list):
return [ignore(item) for item in mapping]
return mapping | Use ignore to prevent a mapping from being mapped to a namedtuple. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.