code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def toString(self, obj):
"""
Convert the given L{Identifier} to a string.
"""
return Box(shareID=obj.shareID.encode('utf-8'),
localpart=obj.localpart.encode('utf-8'),
domain=obj.domain.encode('utf-8')).serialize() | Convert the given L{Identifier} to a string. |
def _gen_dimension_table(self):
"""
2D array describing each registered dimension
together with headers - for use in __str__
"""
headers = ['Dimension Name', 'Description',
'Global Size', 'Extents']
table = []
for dimval in sorted(self.dimensions(copy=False).itervalues(),
key=lambda dval: dval.name.upper()):
table.append([dimval.name,
dimval.description,
dimval.global_size,
(dimval.lower_extent, dimval.upper_extent)])
return table, headers | 2D array describing each registered dimension
together with headers - for use in __str__ |
def automodsumm_to_autosummary_lines(fn, app):
"""
Generates lines from a file with an "automodsumm" entry suitable for
feeding into "autosummary".
Searches the provided file for `automodsumm` directives and returns
a list of lines specifying the `autosummary` commands for the modules
requested. This does *not* return the whole file contents - just an
autosummary section in place of any :automodsumm: entries. Note that
any options given for `automodsumm` are also included in the
generated `autosummary` section.
Parameters
----------
fn : str
The name of the file to search for `automodsumm` entries.
app : sphinx.application.Application
The sphinx Application object
Return
------
lines : list of str
Lines for all `automodsumm` entries with the entries replaced by
`autosummary` and the module's members added.
"""
fullfn = os.path.join(app.builder.env.srcdir, fn)
with open(fullfn) as fr:
if 'astropy_helpers.sphinx.ext.automodapi' in app._extensions:
from astropy_helpers.sphinx.ext.automodapi import automodapi_replace
# Must do the automodapi on the source to get the automodsumm
# that might be in there
docname = os.path.splitext(fn)[0]
filestr = automodapi_replace(fr.read(), app, True, docname, False)
else:
filestr = fr.read()
spl = _automodsummrex.split(filestr)
#0th entry is the stuff before the first automodsumm line
indent1s = spl[1::5]
mods = spl[2::5]
opssecs = spl[3::5]
indent2s = spl[4::5]
remainders = spl[5::5]
# only grab automodsumm sections and convert them to autosummary with the
# entries for all the public objects
newlines = []
#loop over all automodsumms in this document
for i, (i1, i2, modnm, ops, rem) in enumerate(zip(indent1s, indent2s, mods,
opssecs, remainders)):
allindent = i1 + ('' if i2 is None else i2)
#filter out functions-only and classes-only options if present
oplines = ops.split('\n')
toskip = []
allowedpkgnms = []
funcsonly = clssonly = False
for i, ln in reversed(list(enumerate(oplines))):
if ':functions-only:' in ln:
funcsonly = True
del oplines[i]
if ':classes-only:' in ln:
clssonly = True
del oplines[i]
if ':skip:' in ln:
toskip.extend(_str_list_converter(ln.replace(':skip:', '')))
del oplines[i]
if ':allowed-package-names:' in ln:
allowedpkgnms.extend(_str_list_converter(ln.replace(':allowed-package-names:', '')))
del oplines[i]
if funcsonly and clssonly:
msg = ('Defined both functions-only and classes-only options. '
'Skipping this directive.')
lnnum = sum([spl[j].count('\n') for j in range(i * 5 + 1)])
app.warn('[automodsumm]' + msg, (fn, lnnum))
continue
# Use the currentmodule directive so we can just put the local names
# in the autosummary table. Note that this doesn't always seem to
# actually "take" in Sphinx's eyes, so in `Automodsumm.run`, we have to
# force it internally, as well.
newlines.extend([i1 + '.. currentmodule:: ' + modnm,
'',
'.. autosummary::'])
newlines.extend(oplines)
ols = True if len(allowedpkgnms) == 0 else allowedpkgnms
for nm, fqn, obj in zip(*find_mod_objs(modnm, onlylocals=ols)):
if nm in toskip:
continue
if funcsonly and not inspect.isroutine(obj):
continue
if clssonly and not inspect.isclass(obj):
continue
newlines.append(allindent + nm)
# add one newline at the end of the autosummary block
newlines.append('')
return newlines | Generates lines from a file with an "automodsumm" entry suitable for
feeding into "autosummary".
Searches the provided file for `automodsumm` directives and returns
a list of lines specifying the `autosummary` commands for the modules
requested. This does *not* return the whole file contents - just an
autosummary section in place of any :automodsumm: entries. Note that
any options given for `automodsumm` are also included in the
generated `autosummary` section.
Parameters
----------
fn : str
The name of the file to search for `automodsumm` entries.
app : sphinx.application.Application
The sphinx Application object
Return
------
lines : list of str
Lines for all `automodsumm` entries with the entries replaced by
`autosummary` and the module's members added. |
def remote_space_available(self, search_pattern=r"(\d+) \w+ free"):
"""Return space available on remote device."""
remote_cmd = "dir {}".format(self.file_system)
remote_output = self.ssh_ctl_chan.send_command_expect(remote_cmd)
match = re.search(search_pattern, remote_output)
if "kbytes" in match.group(0) or "Kbytes" in match.group(0):
return int(match.group(1)) * 1000
return int(match.group(1)) | Return space available on remote device. |
def cv_squared(x):
"""The squared coefficient of variation of a sample.
Useful as a loss to encourage a positive distribution to be more uniform.
Epsilons added for numerical stability.
Returns 0 for an empty Tensor.
Args:
x: a `Tensor`.
Returns:
a `Scalar`.
"""
epsilon = 1e-10
float_size = tf.to_float(tf.size(x)) + epsilon
mean = tf.reduce_sum(x) / float_size
variance = tf.reduce_sum(tf.squared_difference(x, mean)) / float_size
return variance / (tf.square(mean) + epsilon) | The squared coefficient of variation of a sample.
Useful as a loss to encourage a positive distribution to be more uniform.
Epsilons added for numerical stability.
Returns 0 for an empty Tensor.
Args:
x: a `Tensor`.
Returns:
a `Scalar`. |
def auth_criteria(self):
"""
This attribute provides the mapping of services to their auth requirement
Returns:
(dict) : the mapping from services to their auth requirements.
"""
# the dictionary we will return
auth = {}
# go over each attribute of the service
for attr in dir(self):
# make sure we could hit an infinite loop
if attr != 'auth_criteria':
# get the actual attribute
attribute = getattr(self, attr)
# if the service represents an auth criteria
if isinstance(attribute, Callable) and hasattr(attribute, '_service_auth'):
# add the criteria to the final results
auth[getattr(self, attr)._service_auth] = attribute
# return the auth mapping
return auth | This attribute provides the mapping of services to their auth requirement
Returns:
(dict) : the mapping from services to their auth requirements. |
def download_files_maybe_extract(urls, directory, check_files=[]):
""" Download the files at ``urls`` to ``directory``. Extract to ``directory`` if tar or zip.
Args:
urls (str): Url of files.
directory (str): Directory to download to.
check_files (list of str): Check if these files exist, ensuring the download succeeded.
If these files exist before the download, the download is skipped.
Raises:
ValueError: Error if one of the ``check_files`` are not found following the download.
"""
check_files = [os.path.join(directory, f) for f in check_files]
if _check_download(*check_files):
return
for url in urls:
download_file_maybe_extract(url=url, directory=directory)
if not _check_download(*check_files):
raise ValueError('[DOWNLOAD FAILED] `*check_files` not found') | Download the files at ``urls`` to ``directory``. Extract to ``directory`` if tar or zip.
Args:
urls (str): Url of files.
directory (str): Directory to download to.
check_files (list of str): Check if these files exist, ensuring the download succeeded.
If these files exist before the download, the download is skipped.
Raises:
ValueError: Error if one of the ``check_files`` are not found following the download. |
def read_data(self, blocksize=4096):
"""Generates byte strings reflecting the audio data in the file.
"""
frames = ctypes.c_uint(blocksize // self._client_fmt.mBytesPerFrame)
buf = ctypes.create_string_buffer(blocksize)
buflist = AudioBufferList()
buflist.mNumberBuffers = 1
buflist.mBuffers[0].mNumberChannels = \
self._client_fmt.mChannelsPerFrame
buflist.mBuffers[0].mDataByteSize = blocksize
buflist.mBuffers[0].mData = ctypes.cast(buf, ctypes.c_void_p)
while True:
check(_coreaudio.ExtAudioFileRead(
self._obj, ctypes.byref(frames), ctypes.byref(buflist)
))
assert buflist.mNumberBuffers == 1
size = buflist.mBuffers[0].mDataByteSize
if not size:
break
data = ctypes.cast(buflist.mBuffers[0].mData,
ctypes.POINTER(ctypes.c_char))
blob = data[:size]
yield blob | Generates byte strings reflecting the audio data in the file. |
def log_indexing_error(cls, indexing_errors):
""" Logs indexing errors and raises a general ElasticSearch Exception"""
indexing_errors_log = []
for indexing_error in indexing_errors:
indexing_errors_log.append(str(indexing_error))
raise exceptions.ElasticsearchException(', '.join(indexing_errors_log)) | Logs indexing errors and raises a general ElasticSearch Exception |
def check_str(obj):
""" Returns a string for various input types """
if isinstance(obj, str):
return obj
if isinstance(obj, float):
return str(int(obj))
else:
return str(obj) | Returns a string for various input types |
def points(self):
""" returns a pointer to the points as a numpy object """
vtk_data = self.GetPoints().GetData()
arr = vtk_to_numpy(vtk_data)
return vtki_ndarray(arr, vtk_data) | returns a pointer to the points as a numpy object |
def unset_sentry_context(self, tag):
"""Remove a context tag from sentry
:param tag: The context tag to remove
:type tag: :class:`str`
"""
if self.sentry_client:
self.sentry_client.tags.pop(tag, None) | Remove a context tag from sentry
:param tag: The context tag to remove
:type tag: :class:`str` |
def parse_sentry_configuration(filename):
"""Parse Sentry DSN out of an application or Sentry configuration file"""
filetype = os.path.splitext(filename)[-1][1:].lower()
if filetype == 'ini': # Pyramid, Pylons
config = ConfigParser()
config.read(filename)
ini_key = 'dsn'
ini_sections = ['sentry', 'filter:raven']
for section in ini_sections:
if section in config:
print('- Using value from [{section}]:[{key}]'
.format(section=section, key=ini_key))
try:
return config[section][ini_key]
except KeyError:
print('- Warning: Key "{key}" not found in section '
'[{section}]'.format(section=section, key=ini_key))
raise SystemExit('No DSN found in {file}. Tried sections [{sec_list}]'
.format(
file=filename,
sec_list='], ['.join(ini_sections),
))
elif filetype == 'py': # Django, Flask, Bottle, ...
raise SystemExit('Parsing configuration from pure Python (Django,'
'Flask, Bottle, etc.) not implemented yet.')
else:
raise SystemExit('Configuration file type not supported for parsing: '
'%s' % filetype) | Parse Sentry DSN out of an application or Sentry configuration file |
def mktar_from_dockerfile(fileobject: BinaryIO) -> IO:
"""
Create a zipped tar archive from a Dockerfile
**Remember to close the file object**
Args:
fileobj: a Dockerfile
Returns:
a NamedTemporaryFile() object
"""
f = tempfile.NamedTemporaryFile()
t = tarfile.open(mode="w:gz", fileobj=f)
if isinstance(fileobject, BytesIO):
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(fileobject.getvalue())
fileobject.seek(0)
else:
dfinfo = t.gettarinfo(fileobj=fileobject, arcname="Dockerfile")
t.addfile(dfinfo, fileobject)
t.close()
f.seek(0)
return f | Create a zipped tar archive from a Dockerfile
**Remember to close the file object**
Args:
fileobj: a Dockerfile
Returns:
a NamedTemporaryFile() object |
def frontendediting_request_processor(page, request):
"""
Sets the frontend editing state in the cookie depending on the
``frontend_editing`` GET parameter and the user's permissions.
"""
if 'frontend_editing' not in request.GET:
return
response = HttpResponseRedirect(request.path)
if request.user.has_module_perms('page'):
if 'frontend_editing' in request.GET:
try:
enable_fe = int(request.GET['frontend_editing']) > 0
except ValueError:
enable_fe = False
if enable_fe:
response.set_cookie(str('frontend_editing'), enable_fe)
clear_cache()
else:
response.delete_cookie(str('frontend_editing'))
clear_cache()
else:
response.delete_cookie(str('frontend_editing'))
# Redirect to cleanup URLs
return response | Sets the frontend editing state in the cookie depending on the
``frontend_editing`` GET parameter and the user's permissions. |
def __calculate_center(self, cluster):
"""!
@brief Calculates new center.
@return (list) New value of the center of the specified cluster.
"""
dimension = len(self.__pointer_data[cluster[0]]);
center = [0] * dimension;
for index_point in cluster:
for index_dimension in range(0, dimension):
center[index_dimension] += self.__pointer_data[index_point][index_dimension];
for index_dimension in range(0, dimension):
center[index_dimension] /= len(cluster);
return center; | !
@brief Calculates new center.
@return (list) New value of the center of the specified cluster. |
def proc_monomer(self, monomer_info, parent, mon_cls=False):
"""Processes a records into a `Monomer`.
Parameters
----------
monomer_info : (set, OrderedDict)
Labels and data for a monomer.
parent : ampal.Polymer
`Polymer` used to assign `ampal_parent` on created
`Monomer`.
mon_cls : `Monomer class or subclass`, optional
A `Monomer` class can be defined explicitly.
"""
monomer_labels, monomer_data = monomer_info
if len(monomer_labels) > 1:
raise ValueError(
'Malformed PDB, single monomer id with '
'multiple labels. {}'.format(monomer_labels))
else:
monomer_label = list(monomer_labels)[0]
if mon_cls:
monomer_class = mon_cls
het = True
elif monomer_label[0] == 'ATOM':
if monomer_label[2] in standard_amino_acids.values():
monomer_class = Residue
else:
monomer_class = Nucleotide
het = False
else:
raise ValueError('Unknown Monomer type.')
monomer = monomer_class(
atoms=None, mol_code=monomer_label[2], monomer_id=monomer_label[1],
insertion_code=monomer_label[3], is_hetero=het, ampal_parent=parent
)
monomer.states = self.gen_states(monomer_data.values(), monomer)
monomer._active_state = sorted(monomer.states.keys())[0]
return monomer | Processes a records into a `Monomer`.
Parameters
----------
monomer_info : (set, OrderedDict)
Labels and data for a monomer.
parent : ampal.Polymer
`Polymer` used to assign `ampal_parent` on created
`Monomer`.
mon_cls : `Monomer class or subclass`, optional
A `Monomer` class can be defined explicitly. |
def stop(self, timeout=None):
"""Requests device to stop running, waiting at most the given timout in seconds (fractional). Has no effect if
`run()` was not called with background=True set. Returns True if successfully stopped (or already not running).
"""
stopped = True
self.__shutdown.set()
if self.__bgthread:
logger.debug('Stopping bgthread')
self.__bgthread.join(timeout)
if self.__bgthread.is_alive():
logger.warning('bgthread did not finish within timeout')
stopped = False
self.__bgthread = None
return stopped | Requests device to stop running, waiting at most the given timout in seconds (fractional). Has no effect if
`run()` was not called with background=True set. Returns True if successfully stopped (or already not running). |
def export_network(nw, mode=''):
"""
Export all nodes and lines of the network nw as DataFrames
Parameters
----------
nw: :any:`list` of NetworkDing0
The MV grid(s) to be studied
mode: str
If 'MV' export only medium voltage nodes and lines
If 'LV' export only low voltage nodes and lines
else, exports MV and LV nodes and lines
Returns
-------
pandas.DataFrame
nodes_df : Dataframe containing nodes and its attributes
pandas.DataFrame
lines_df : Dataframe containing lines and its attributes
"""
# close circuit breakers
nw.control_circuit_breakers(mode='close')
# srid
srid = str(int(nw.config['geo']['srid']))
##############################
# check what to do
lv_info = True
mv_info = True
if mode == 'LV':
mv_info = False
if mode == 'MV':
lv_info = False
##############################
# from datetime import datetime
run_id = nw.metadata['run_id'] # datetime.now().strftime("%Y%m%d%H%M%S")
##############################
#############################
# go through the grid collecting info
lvgrid_idx = 0
lv_grid_dict = {}
lvloads_idx = 0
lv_loads_dict = {}
mvgrid_idx = 0
mv_grid_dict = {}
mvloads_idx = 0
mv_loads_dict = {}
mvgen_idx = 0
mv_gen_dict = {}
mvcb_idx = 0
mvcb_dict = {}
mvcd_idx = 0
mv_cd_dict = {}
mvstations_idx = 0
hvmv_stations_dict = {}
mvtrafos_idx = 0
hvmv_trafos_dict = {}
lvgen_idx = 0
lv_gen_dict = {}
lvcd_idx = 0
lv_cd_dict = {}
lvstations_idx = 0
mvlv_stations_dict = {}
lvtrafos_idx = 0
mvlv_trafos_dict = {}
areacenter_idx = 0
areacenter_dict = {}
lines_idx = 0
lines_dict = {}
LVMVmapping_idx = 0
mvlv_mapping_dict = {}
def aggregate_generators(gen, aggr):
"""Aggregate generation capacity per voltage level
Parameters
----------
gen: ding0.core.GeneratorDing0
Ding0 Generator object
aggr: dict
Aggregated generation capacity. For structure see
`_determine_aggregated_nodes()`.
Returns
-------
"""
if gen.v_level not in aggr['generation']:
aggr['generation'][gen.v_level] = {}
if gen.type not in aggr['generation'][gen.v_level]:
aggr['generation'][gen.v_level][gen.type] = {}
if gen.subtype not in aggr['generation'][gen.v_level][gen.type]:
aggr['generation'][gen.v_level][gen.type].update(
{gen.subtype: {'ids': [gen.id_db],
'capacity': gen.capacity}})
else:
aggr['generation'][gen.v_level][gen.type][gen.subtype][
'ids'].append(gen.id_db)
aggr['generation'][gen.v_level][gen.type][gen.subtype][
'capacity'] += gen.capacity
return aggr
def aggregate_loads(la_center, aggr):
"""Aggregate consumption in load area per sector
Parameters
----------
la_center: LVLoadAreaCentreDing0
Load area center object from Ding0
Returns
-------
"""
for s in ['retail', 'industrial', 'agricultural', 'residential']:
if s not in aggr['load']:
aggr['load'][s] = {}
for t in ['nominal','peak']:
if t not in aggr['load'][s]:
aggr['load'][s][t] = 0
aggr['load']['retail']['nominal'] += sum(
[_.sector_consumption_retail
for _ in la_center.lv_load_area._lv_grid_districts])
aggr['load']['industrial']['nominal'] += sum(
[_.sector_consumption_industrial
for _ in la_center.lv_load_area._lv_grid_districts])
aggr['load']['agricultural']['nominal'] += sum(
[_.sector_consumption_agricultural
for _ in la_center.lv_load_area._lv_grid_districts])
aggr['load']['residential']['nominal'] += sum(
[_.sector_consumption_residential
for _ in la_center.lv_load_area._lv_grid_districts])
aggr['load']['retail']['peak'] += sum(
[_.peak_load_retail
for _ in la_center.lv_load_area._lv_grid_districts])
aggr['load']['industrial']['peak'] += sum(
[_.peak_load_industrial
for _ in la_center.lv_load_area._lv_grid_districts])
aggr['load']['agricultural']['peak'] += sum(
[_.peak_load_agricultural
for _ in la_center.lv_load_area._lv_grid_districts])
aggr['load']['residential']['peak'] += sum(
[_.peak_load_residential
for _ in la_center.lv_load_area._lv_grid_districts])
return aggr
for mv_district in nw.mv_grid_districts():
mv_grid_id = mv_district.mv_grid.id_db
mv_grid_id_db = '_'.join(
[str(mv_district.mv_grid.__class__.__name__), 'MV', str(mv_grid_id), str(mv_district.mv_grid.id_db)])
if mv_info:
lv_grid_id = 0
# MV-grid
# ToDo: geom <- Polygon
mvgrid_idx += 1
mv_grid_dict[mvgrid_idx] = {
'MV_grid_id': mv_district.mv_grid.id_db,
'id_db': '_'.join([str(mv_district.mv_grid.__class__.__name__), 'MV', str(mv_grid_id),
str(mv_district.mv_grid.id_db)]),
# 'network': mv_district.mv_grid.network,
'geom': wkt_dumps(mv_district.geo_data),
'population': # None,
sum([_.zensus_sum
for _ in
mv_district._lv_load_areas # ding0_grid.grid_district._lv_load_areas
if not np.isnan(_.zensus_sum)]),
'voltage_nom': mv_district.mv_grid.v_level, # in kV
'run_id': run_id
}
# id_db: Classname_MV/LV_mvgridid/lvgridid_id
# excemptions: class LVStations: LVStationDing0_MV_mvgridid_id(=lvgridid)
# MVGrid
for node in mv_district.mv_grid.graph_nodes_sorted():
geom = wkt_dumps(node.geo_data)
# geom = from_shape(Point(node.geo_data), srid=srid)
db_id = node.id_db
# LVStation
if isinstance(node, LVStationDing0):
if not node.lv_load_area.is_aggregated:
lvstations_idx += 1
mvlv_stations_dict[lvstations_idx] = {
'id_db': '_'.join([str(node.__class__.__name__), 'MV', str(mv_grid_id), str(node.id_db)]),
'LV_grid_id_db': '_'.join(['LVGridDing0', 'LV', str(node.id_db), str(node.id_db)]),
'geom': geom,
'run_id': run_id,
}
# LV-MV mapping
LVMVmapping_idx += 1
mvlv_mapping_dict[LVMVmapping_idx] = {
'MV_grid_id': mv_grid_id,
'MV_grid_id_db': mv_grid_id_db,
'LV_grid_id': node.id_db,
'LV_grid_id_db': '_'.join(['LVGridDing0', 'LV', str(node.id_db), str(node.id_db)]),
'run_id': run_id,
}
# Trafos LV
for t in node.transformers():
lvtrafos_idx += 1
mvlv_trafos_dict[lvtrafos_idx] = {
'id_db': '_'.join([str(t.__class__.__name__), 'LV', str(mv_grid_id), str(node.id_db)]),
'geom': geom,
'LV_grid_id_db': '_'.join(['LVGridDing0', 'LV', str(node.id_db), str(node.id_db)]),
'voltage_op': t.v_level,
'S_nom': t.s_max_a,
'X': t.x,
'R': t.r,
'run_id': run_id,
}
# MVStation
elif isinstance(node, MVStationDing0):
mvstations_idx += 1
hvmv_stations_dict[mvstations_idx] = {
'id_db': '_'.join([str(node.__class__.__name__), 'MV', str(mv_grid_id), str(node.id_db)]),
'MV_grid_id_db': mv_grid_id_db,
'geom': geom,
'run_id': run_id,
}
# Trafos MV
for t in node.transformers():
mvtrafos_idx += 1
hvmv_trafos_dict[mvtrafos_idx] = {
'id_db': '_'.join([str(t.__class__.__name__), 'MV', str(mv_grid_id), str(node.id_db)]),
'geom': geom,
'MV_grid_id_db': mv_grid_id_db,
'voltage_op': t.v_level,
'S_nom': t.s_max_a,
'X': t.x,
'R': t.r,
'run_id': run_id,
}
# MVGenerator
elif isinstance(node, GeneratorDing0):
if node.subtype == None:
subtype = 'other'
else:
subtype = node.subtype
type = node.type
mvgen_idx += 1
mv_gen_dict[mvgen_idx] = {
'id_db': '_'.join([str(node.__class__.__name__), 'MV', str(mv_grid_id), str(node.id_db)]),
'MV_grid_id_db': mv_grid_id_db,
'geom': geom,
'type': type,
'subtype': subtype,
'v_level': node.v_level,
'nominal_capacity': node.capacity,
'run_id': run_id,
'is_aggregated': False,
}
# MVBranchTees
elif isinstance(node, MVCableDistributorDing0):
mvcd_idx += 1
mv_cd_dict[mvcd_idx] = {
'id_db': '_'.join([str(node.__class__.__name__), 'MV', str(mv_grid_id), str(node.id_db)]),
'MV_grid_id_db': mv_grid_id_db,
'geom': geom,
'run_id': run_id,
}
# LoadAreaCentre
elif isinstance(node, LVLoadAreaCentreDing0):
# type = 'Load area center of aggregated load area'
areacenter_idx += 1
aggr_lines = 0
aggr = {'generation': {}, 'load': {}, 'aggregates': []}
# Determine aggregated generation in LV grid
for lvgd in node.lv_load_area._lv_grid_districts:
for aggr_gen in lvgd.lv_grid.generators():
aggr = aggregate_generators(aggr_gen, aggr)
if aggr_gen.subtype == None:
subtype = 'other'
else:
subtype = aggr_gen.subtype
type = aggr_gen.type
# Determine aggregated load in MV grid
# -> Implement once loads in Ding0 MV grids exist
# Determine aggregated load in LV grid
aggr = aggregate_loads(node, aggr)
# Collect metadata of aggregated load areas
aggr['aggregates'] = {
'population': node.lv_load_area.zensus_sum,
'geom': node.lv_load_area.geo_area}
aggr_line_type = nw._static_data['MV_cables'].iloc[
nw._static_data['MV_cables']['I_max_th'].idxmax()]
geom = wkt_dumps(node.lv_load_area.geo_area)
for aggr_node in aggr:
if aggr_node == 'generation':
mvgenaggr_idx = 0
for v_level in aggr['generation']:
for type in aggr['generation'][v_level]:
for subtype in aggr['generation'][v_level][type]:
mvgen_idx += 1
mvgenaggr_idx += 1
mv_gen_dict[mvgen_idx] = {
'id_db': '_'.join(
[str(aggr_gen.__class__.__name__), 'MV', str(mv_grid_id),
str(aggr_gen.id_db), str(mvgenaggr_idx)]), # , str(mvgen_idx)
'MV_grid_id_db': mv_grid_id_db,
'geom': geom,#from_shape(Point(mv_district.mv_grid.station().geo_data), srid=srid),#lv_load_area.geo_area,#geom, #?? Polygon # hvmv_stations_dict[mvstations_idx]['geom'], #
'type': type,
'subtype': subtype,
'v_level': v_level,
'nominal_capacity': aggr['generation'][v_level][type][subtype]['capacity'],
'is_aggregated': True,
'run_id': run_id,
}
lines_idx += 1
aggr_lines += 1
lines_dict[lines_idx] = {
# ToDo: Rename edge_name
'edge_name': '_'.join(
[str(mv_grid_id), 'aggr', str(node.lv_load_area.id_db),
str(aggr_lines)]),
# , 'vlevel', str(v_level), 'subtype', str(subtype)]),#}'.format(v_level=v_level, subtype=subtype),
'grid_id_db': mv_grid_id_db,
# ToDo: read type_name from aggr_line_type
'type_name': 'NA2XS2Y 3x1x500 RM/35', # aggr_line_type.name,
'type_kind': 'cable', # branch['branch'].kind,
'length': 1,
'U_n': aggr_line_type.U_n,
'I_max_th': aggr_line_type.I_max_th,
'R': aggr_line_type.R,
'L': aggr_line_type.L,
'C': aggr_line_type.C,
'node1': '_'.join(
[str(aggr_gen.__class__.__name__), 'MV', str(mv_grid_id),
str(aggr_gen.id_db), str(mvgenaggr_idx)]),
'node2': '_'.join([
'MVStationDing0', 'MV', str(mv_grid_id), str(mv_grid_id)]),
'run_id': run_id,
}
elif aggr_node == 'load':
for type in aggr['load']:
mvloads_idx += 1
mv_loads_dict[mvloads_idx] = {
'id_db': '_'.join(
['AggregatedLoad', 'MV', str(mv_grid_id), str(mvloads_idx)]),
'MV_grid_id_db': mv_grid_id_db,
'geom': geom,
# from_shape(Point(mv_district.mv_grid.station().geo_data), srid=srid),
'consumption_{}'.format(type): aggr['load'][type]['nominal'],
'is_aggregated': True,
'run_id': run_id,
}
lines_idx += 1
aggr_lines += 1
lines_dict[lines_idx] = {
# ToDo: Rename edge_name
'edge_name': '_'.join(
[str(mv_grid_id), 'aggr', str(node.lv_load_area.id_db), str(aggr_lines)]),
# 'edge_name': '_'.join(
# ['line_aggr_load', str(node.lv_load_area), 'vlevel', str(v_level),
# 'subtype', str(subtype)]), # }'.format(v_level=v_level, subtype=subtype),
'grid_id_db': mv_grid_id_db,
# ToDo: read type_name from aggr_line_type
'type_name': 'NA2XS2Y 3x1x500 RM/35', # aggr_line_type.name,
'type_kind': 'cable', # branch['branch'].kind,
# 'type': aggr_line_type,
'length': 1e-3, # in km
'U_n': aggr_line_type.U_n,
'I_max_th': aggr_line_type.I_max_th,
'R': aggr_line_type.R,
'L': aggr_line_type.L,
'C': aggr_line_type.C,
'node1': '_'.join(
['AggregatedLoad', 'MV', str(mv_grid_id), str(mvloads_idx)]),
'node2': '_'.join([
'MVStationDing0', 'MV', str(mv_grid_id), str(mv_grid_id)]),
'run_id': run_id,
}
# areacenter_dict[areacenter_idx] = {
# 'id_db': '_'.join([str(node.__class__.__name__), 'MV', str(mv_grid_id), str(node.id_db)]),#node.id_db,
# 'MV_grid_id':node.grid,
# 'geom':node.geo_data,
# 'lv_load_area': node.lv_load_area,
# 'run_id': run_id,#
# }
# DisconnectingPoints
elif isinstance(node, CircuitBreakerDing0):
mvcb_idx += 1
mvcb_dict[mvcb_idx] = {
'id_db': '_'.join([str(node.__class__.__name__), 'MV', str(mv_grid_id), str(node.id_db)]),
'MV_grid_id': mv_grid_id,
'MV_grid_id_db': mv_grid_id_db,
'geom': geom,
'status': node.status,
'run_id': run_id,
}
else:
type = 'Unknown'
# MVedges
for branch in mv_district.mv_grid.graph_edges():
# geom = wkt_dumps(node.geo_data)
geom = from_shape(LineString([branch['adj_nodes'][0].geo_data, branch['adj_nodes'][1].geo_data]),
srid=srid)
if not any([isinstance(branch['adj_nodes'][0], LVLoadAreaCentreDing0),
isinstance(branch['adj_nodes'][1], LVLoadAreaCentreDing0)]):
lines_idx += 1
lines_dict[lines_idx] = {
'edge_name': branch['branch'].id_db,
'grid_id_db': mv_grid_id_db,
'type_name': branch['branch'].type['name'],
'type_kind': branch['branch'].kind,
'length': branch['branch'].length / 1e3,
'U_n': branch['branch'].type['U_n'],
'I_max_th': branch['branch'].type['I_max_th'],
'R': branch['branch'].type['R'],
'L': branch['branch'].type['L'],
'C': branch['branch'].type['C'],
'node1': '_'.join([str(branch['adj_nodes'][0].__class__.__name__), 'MV', str(mv_grid_id),
str(branch['adj_nodes'][0].id_db)]),
'node2': '_'.join([str(branch['adj_nodes'][1].__class__.__name__), 'MV', str(mv_grid_id),
str(branch['adj_nodes'][1].id_db)]),
'run_id': run_id,
}
if lv_info:
for LA in mv_district.lv_load_areas():
for lv_district in LA.lv_grid_districts():
if not lv_district.lv_grid.grid_district.lv_load_area.is_aggregated:
# ding0_grid.grid_district._lv_load_areas._lv_grid_districts _.lv_grid
# LV-grid
# ToDo: geom <- Polygon
lvgrid_idx += 1
lv_grid_dict[lvgrid_idx] = {
'LV_grid_id': lv_district.lv_grid.id_db,
'id_db': '_'.join(
[str(lv_district.lv_grid.__class__.__name__), 'LV', str(lv_district.lv_grid.id_db),
str(lv_district.lv_grid.id_db)]),
'geom': wkt_dumps(lv_district.geo_data),
'population': lv_district.population,
'voltage_nom': lv_district.lv_grid.v_level / 1e3,
'run_id': run_id
}
lv_grid_id = lv_district.lv_grid.id_db
lv_grid_id_db = '_'.join(
[str(lv_district.lv_grid.__class__.__name__), 'LV', str(lv_district.lv_grid.id_db),
str(lv_district.lv_grid.id_db)])
# geom = from_shape(Point(lv_district.lv_grid.station().geo_data), srid=srid)
# geom = wkt_dumps(lv_district.geo_data)# lv_grid.station() #ding0_lv_grid.grid_district.geo_data
for node in lv_district.lv_grid.graph_nodes_sorted():
# geom = wkt_dumps(node.geo_data)
# LVGenerator
if isinstance(node, GeneratorDing0):
if node.subtype == None:
subtype = 'other'
else:
subtype = node.subtype
type = node.type
lvgen_idx += 1
lv_gen_dict[lvgen_idx] = {
'id_db': '_'.join(
[str(node.__class__.__name__), 'LV', str(lv_grid_id), str(node.id_db)]),
'LV_grid_id_db': lv_grid_id_db,
'geom': wkt_dumps(node.geo_data),
'type': type,
'subtype': subtype,
'v_level': node.v_level,
'nominal_capacity': node.capacity,
'run_id': run_id,
}
# LVcd
elif isinstance(node, LVCableDistributorDing0):
lvcd_idx += 1
lv_cd_dict[lvcd_idx] = {
'id_db': '_'.join(
[str(node.__class__.__name__), 'LV', str(lv_grid_id), str(node.id_db)]),
'LV_grid_id_db': lv_grid_id_db,
'geom': None,
# wkt_dumps(lv_district.geo_data),#wkt_dumps(node.geo_data), Todo: why no geo_data?
'run_id': run_id,
}
# LVload
elif isinstance(node, LVLoadDing0):
consumption_dict = {}
for k in ['residential', 'retail', 'agricultural', 'industrial']:
if k in node.consumption.keys():
consumption_dict[k] = node.consumption[k]
else:
consumption_dict[k] = None
lvloads_idx += 1
lv_loads_dict[lvloads_idx] = {
'id_db': '_'.join(
[str(node.__class__.__name__), 'LV', str(lv_grid_id), str(node.id_db)]),
'LV_grid_id_db': lv_grid_id_db,
'geom': None,#wkt_dumps(lv_district.geo_data),#wkt_dumps(node.geo_data), Todo: why no geo_data?
# 'consumption': json.dumps(node.consumption),
'consumption_residential': consumption_dict['residential'],
'consumption_retail': consumption_dict['retail'],
'consumption_agricultural': consumption_dict['agricultural'],
'consumption_industrial': consumption_dict['industrial'],
'run_id': run_id,
}
del consumption_dict
else:
type = 'Unknown'
# LVedges
for branch in lv_district.lv_grid.graph_edges():
# geom = from_shape(
# LineString([branch['adj_nodes'][0].geo_data, branch['adj_nodes'][1].geo_data]), srid=srid)
if not any([isinstance(branch['adj_nodes'][0], LVLoadAreaCentreDing0),
isinstance(branch['adj_nodes'][1], LVLoadAreaCentreDing0)]):
lines_idx += 1
lines_dict[lines_idx] = {
'edge_name': branch['branch'].id_db,
'grid_id_db': lv_grid_id_db,
'type_name': branch['branch'].type.to_frame().columns[0],
'type_kind': branch['branch'].kind,
'length': branch['branch'].length / 1e3, # length in km
'U_n': branch['branch'].type['U_n'] / 1e3, # U_n in kV
'I_max_th': branch['branch'].type['I_max_th'],
'R': branch['branch'].type['R'],
'L': branch['branch'].type['L'],
'C': branch['branch'].type['C'],
'node1': '_'.join(
[str(branch['adj_nodes'][0].__class__.__name__), 'LV', str(lv_grid_id),
str(branch['adj_nodes'][0].id_db)])
if not isinstance(branch['adj_nodes'][0], LVStationDing0) else '_'.join(
[str(branch['adj_nodes'][0].__class__.__name__), 'MV', str(mv_grid_id),
str(branch['adj_nodes'][0].id_db)]),
'node2': '_'.join(
[str(branch['adj_nodes'][1].__class__.__name__), 'LV', str(lv_grid_id),
str(branch['adj_nodes'][1].id_db)])
if not isinstance(branch['adj_nodes'][1], LVStationDing0) else '_'.join(
[str(branch['adj_nodes'][1].__class__.__name__), 'MV', str(mv_grid_id),
str(branch['adj_nodes'][1].id_db)]),
'run_id': run_id,
}
lv_grid = pd.DataFrame.from_dict(lv_grid_dict, orient='index')
lv_gen = pd.DataFrame.from_dict(lv_gen_dict, orient='index')
lv_cd = pd.DataFrame.from_dict(lv_cd_dict, orient='index')
mvlv_stations = pd.DataFrame.from_dict(mvlv_stations_dict, orient='index')
mvlv_trafos = pd.DataFrame.from_dict(mvlv_trafos_dict, orient='index')
lv_loads = pd.DataFrame.from_dict(lv_loads_dict, orient='index')
mv_grid = pd.DataFrame.from_dict(mv_grid_dict, orient='index')
mv_gen = pd.DataFrame.from_dict(mv_gen_dict, orient='index')
# mv_cb = pd.DataFrame.from_dict(mvcb_dict, orient='index')
mv_cd = pd.DataFrame.from_dict(mv_cd_dict, orient='index')
hvmv_stations = pd.DataFrame.from_dict(hvmv_stations_dict, orient='index')
# mv_areacenter= pd.DataFrame.from_dict(areacenter_dict, orient='index')
hvmv_trafos = pd.DataFrame.from_dict(hvmv_trafos_dict, orient='index')
mv_loads = pd.DataFrame.from_dict(mv_loads_dict, orient='index')
lines = pd.DataFrame.from_dict(lines_dict, orient='index')
mvlv_mapping = pd.DataFrame.from_dict(mvlv_mapping_dict, orient='index')
lines = lines[sorted(lines.columns.tolist())]
return run_id, lv_grid, lv_gen, lv_cd, mvlv_stations, mvlv_trafos, lv_loads, mv_grid, mv_gen, mv_cd, \
hvmv_stations, hvmv_trafos, mv_loads, lines, mvlv_mapping | Export all nodes and lines of the network nw as DataFrames
Parameters
----------
nw: :any:`list` of NetworkDing0
The MV grid(s) to be studied
mode: str
If 'MV' export only medium voltage nodes and lines
If 'LV' export only low voltage nodes and lines
else, exports MV and LV nodes and lines
Returns
-------
pandas.DataFrame
nodes_df : Dataframe containing nodes and its attributes
pandas.DataFrame
lines_df : Dataframe containing lines and its attributes |
def print_rev_id(localRepoPath):
"""prints information about the specified local repository to STDOUT. Expected method of execution: command-line or
shell script call
Parameters
----------
localRepoPath: string
Local repository path.
Returns
=======
Nothing as such. subroutine will exit with a state of 0 if everything ran OK, and a value of '111' if
something went wrong.
"""
start_path = os.getcwd()
try:
log.info("Local repository path: {}".format(localRepoPath))
os.chdir(localRepoPath)
log.info("\n== Remote URL")
os.system('git remote -v')
# log.info("\n== Remote Branches")
# os.system("git branch -r")
log.info("\n== Local Branches")
os.system("git branch")
log.info("\n== Most Recent Commit")
os.system("git log |head -1")
rv = 0
except:
rv = 111
log.info("WARNING! get_git_rev_info.print_rev_id() encountered a problem and cannot continue.")
finally:
os.chdir(start_path)
if rv != 0:
sys.exit(rv) | prints information about the specified local repository to STDOUT. Expected method of execution: command-line or
shell script call
Parameters
----------
localRepoPath: string
Local repository path.
Returns
=======
Nothing as such. subroutine will exit with a state of 0 if everything ran OK, and a value of '111' if
something went wrong. |
def get_user(uid):
"""Get an user by the UID.
:param str uid: UID to find
:return: the user
:rtype: User object
:raises ValueError: uid is not an integer
:raises KeyError: if user does not exist
"""
if db is not None:
try:
uid = uid.decode('utf-8')
except AttributeError:
pass
d = db.hgetall('user:{0}'.format(uid))
if d:
nd = {}
# strings everywhere
for k in d:
try:
nd[k.decode('utf-8')] = d[k].decode('utf-8')
except AttributeError:
try:
nd[k.decode('utf-8')] = d[k]
except AttributeError:
nd[k] = d[k]
for p in PERMISSIONS:
nd[p] = nd.get(p) == '1'
return User(uid=uid, **nd)
else:
return None
else:
d = app.config['COIL_USERS'].get(uid)
if d:
return User(uid=uid, **d)
else:
return None | Get an user by the UID.
:param str uid: UID to find
:return: the user
:rtype: User object
:raises ValueError: uid is not an integer
:raises KeyError: if user does not exist |
def evaluate(self, password=''):
"""Evaluates the development set.
The passwords is sent as plain text.
:return: the evaluation results.
"""
# Make a copy only keeping the development set
dev_submission = self
if self['metadata'].get('evaluation_setting', {}).get('development_set', None):
dev_submission = copy.deepcopy(self)
dev_submission['tokens'] = {token_id: token for token_id, token in self['tokens'].items()
if token_id in self['metadata']['evaluation_setting']['development_set']}
url = '{}/api/evaluate'.format(BASE_URL)
try:
r = requests.post(url,
data=dev_submission.dumps(),
headers={'content-type': 'application/json'},
auth=(dev_submission['metadata']['email'], password))
response = r.json()
except requests.exceptions.HTTPError as e:
logging.error('Error while submitting the participation. {}'.format(e))
return Job()
if 'error' in response:
logging.error('Error while processing the participation. {}'.format(response['error']))
return Job()
return Job(response) | Evaluates the development set.
The passwords is sent as plain text.
:return: the evaluation results. |
def _event_monitor_loop(region_name, vpc_id,
watcher_plugin, health_plugin,
iterations, sleep_time,
route_check_time_interval=30):
"""
Monitor queues to receive updates about new route specs or any detected
failed IPs.
If any of those have updates, notify the health-monitor thread with a
message on a special queue and also re-process the entire routing table.
The 'iterations' argument allows us to limit the running time of the watch
loop for test purposes. Not used during normal operation. Also, for faster
tests, sleep_time can be set to values less than 1.
The 'route_check_time_interval' arguments specifies the number of seconds
we allow to elapse before forcing a re-check of the VPC routes. This is so
that accidentally deleted routes or manually broken route tables can be
fixed back up again on their own.
"""
q_route_spec = watcher_plugin.get_route_spec_queue()
q_monitor_ips, q_failed_ips, q_questionable_ips = \
health_plugin.get_queues()
time.sleep(sleep_time) # Wait to allow monitor to report results
current_route_spec = {} # The last route spec we have seen
all_ips = [] # Cache of IP addresses we currently know about
# Occasionally we want to recheck VPC routes even without other updates.
# That way, if a route is manually deleted by someone, it will be
# re-created on its own.
last_route_check_time = time.time()
while not CURRENT_STATE._stop_all:
try:
# Get the latest messages from the route-spec monitor and the
# health-check monitor. At system start the route-spec queue should
# immediately have been initialized with a first message.
failed_ips = utils.read_last_msg_from_queue(q_failed_ips)
questnbl_ips = utils.read_last_msg_from_queue(q_questionable_ips)
new_route_spec = utils.read_last_msg_from_queue(q_route_spec)
if failed_ips:
# Store the failed IPs in the shared state
CURRENT_STATE.failed_ips = failed_ips
if questnbl_ips:
# Store the questionable IPs in the shared state
CURRENT_STATE.questionble_ips = questnbl_ips
if new_route_spec:
# Store the new route spec in the shared state
CURRENT_STATE.route_spec = new_route_spec
current_route_spec = new_route_spec
# Need to communicate a new set of IPs to the health
# monitoring thread, in case the list changed. The list of
# addresses is extracted from the route spec. Pass in the old
# version of the address list, so that this function can
# compare to see if there are any changes to the host list.
all_ips = _update_health_monitor_with_new_ips(new_route_spec,
all_ips,
q_monitor_ips)
# Spec or list of failed or questionable IPs changed? Update
# routes...
# We pass in the last route spec we have seen, since we are also
# here in case we only have failed/questionable IPs, but no new
# route spec. This is also called occasionally on its own, so that
# we can repair any damaged route tables in VPC.
now = time.time()
time_for_regular_recheck = \
(now - last_route_check_time) > route_check_time_interval
if new_route_spec or failed_ips or questnbl_ips or \
time_for_regular_recheck:
if not new_route_spec and not (failed_ips or questnbl_ips):
# Only reason we are here is due to expired timer.
logging.debug("Time for regular route check")
last_route_check_time = now
vpc.handle_spec(region_name, vpc_id, current_route_spec,
failed_ips if failed_ips else [],
questnbl_ips if questnbl_ips else [])
# If iterations are provided, count down and exit
if iterations is not None:
iterations -= 1
if iterations == 0:
break
time.sleep(sleep_time)
except KeyboardInterrupt:
# Allow exit via keyboard interrupt, useful during development
return
except Exception as e:
# Of course we should never get here, but if we do, better to log
# it and keep operating best we can...
import traceback
traceback.print_exc()
logging.error("*** Uncaught exception 1: %s" % str(e))
return
logging.debug("event_monitor_loop ended: Global stop") | Monitor queues to receive updates about new route specs or any detected
failed IPs.
If any of those have updates, notify the health-monitor thread with a
message on a special queue and also re-process the entire routing table.
The 'iterations' argument allows us to limit the running time of the watch
loop for test purposes. Not used during normal operation. Also, for faster
tests, sleep_time can be set to values less than 1.
The 'route_check_time_interval' arguments specifies the number of seconds
we allow to elapse before forcing a re-check of the VPC routes. This is so
that accidentally deleted routes or manually broken route tables can be
fixed back up again on their own. |
def getLatency(self, instId: int) -> float:
"""
Return a dict with client identifier as a key and calculated latency as a value
"""
if len(self.clientAvgReqLatencies) == 0:
return 0.0
return self.clientAvgReqLatencies[instId].get_avg_latency() | Return a dict with client identifier as a key and calculated latency as a value |
def _set_member_vlan(self, v, load=False):
"""
Setter method for member_vlan, mapped from YANG variable /topology_group/member_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_member_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_member_vlan() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=member_vlan.member_vlan, is_container='container', presence=False, yang_name="member-vlan", rest_name="member-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Member VLANs for this topology group', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-topology-group', defining_module='brocade-topology-group', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """member_vlan must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=member_vlan.member_vlan, is_container='container', presence=False, yang_name="member-vlan", rest_name="member-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Member VLANs for this topology group', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-topology-group', defining_module='brocade-topology-group', yang_type='container', is_config=True)""",
})
self.__member_vlan = t
if hasattr(self, '_set'):
self._set() | Setter method for member_vlan, mapped from YANG variable /topology_group/member_vlan (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_member_vlan is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_member_vlan() directly. |
def _get_or_open_file(filename):
'''If ``filename`` is a string or bytes object, open the
``filename`` and return the file object. If ``filename`` is
file-like (i.e., it has 'read' and 'write' attributes, return
``filename``.
Parameters
----------
filename : str, bytes, file
Raises
------
TypeError
If ``filename`` is not a string, bytes, or file-like
object.
File-likeness is determined by checking for 'read' and
'write' attributes.
'''
if isinstance(filename, (str, bytes)):
f = open(filename)
elif hasattr(filename, 'read') and hasattr(filename, 'write'):
f = filename
else:
raise TypeError('filename must be str or bytes, or a file')
return f | If ``filename`` is a string or bytes object, open the
``filename`` and return the file object. If ``filename`` is
file-like (i.e., it has 'read' and 'write' attributes, return
``filename``.
Parameters
----------
filename : str, bytes, file
Raises
------
TypeError
If ``filename`` is not a string, bytes, or file-like
object.
File-likeness is determined by checking for 'read' and
'write' attributes. |
def nl_send(sk, msg):
"""Transmit Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L416
Transmits the Netlink message `msg` over the Netlink socket using the `socket.sendmsg()`. This function is based on
`nl_send_iovec()`.
The message is addressed to the peer as specified in the socket by either the nl_socket_set_peer_port() or
nl_socket_set_peer_groups() function. The peer address can be overwritten by specifying an address in the `msg`
object using nlmsg_set_dst().
If present in the `msg`, credentials set by the nlmsg_set_creds() function are added to the control buffer of the
message.
Calls to this function can be overwritten by providing an alternative using the nl_cb_overwrite_send() function.
This function triggers the `NL_CB_MSG_OUT` callback.
ATTENTION: Unlike `nl_send_auto()`, this function does *not* finalize the message in terms of automatically adding
needed flags or filling out port numbers.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
msg -- Netlink message (nl_msg class instance).
Returns:
Number of bytes sent on success or a negative error code.
"""
cb = sk.s_cb
if cb.cb_send_ow:
return cb.cb_send_ow(sk, msg)
hdr = nlmsg_hdr(msg)
iov = hdr.bytearray[:hdr.nlmsg_len]
return nl_send_iovec(sk, msg, iov, 1) | Transmit Netlink message.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L416
Transmits the Netlink message `msg` over the Netlink socket using the `socket.sendmsg()`. This function is based on
`nl_send_iovec()`.
The message is addressed to the peer as specified in the socket by either the nl_socket_set_peer_port() or
nl_socket_set_peer_groups() function. The peer address can be overwritten by specifying an address in the `msg`
object using nlmsg_set_dst().
If present in the `msg`, credentials set by the nlmsg_set_creds() function are added to the control buffer of the
message.
Calls to this function can be overwritten by providing an alternative using the nl_cb_overwrite_send() function.
This function triggers the `NL_CB_MSG_OUT` callback.
ATTENTION: Unlike `nl_send_auto()`, this function does *not* finalize the message in terms of automatically adding
needed flags or filling out port numbers.
Positional arguments:
sk -- Netlink socket (nl_sock class instance).
msg -- Netlink message (nl_msg class instance).
Returns:
Number of bytes sent on success or a negative error code. |
def _load_fsstat_data(self, timeout=3):
"""Using :command:`fsstat`, adds some additional information of the volume to the Volume."""
def stats_thread():
try:
cmd = ['fsstat', self.get_raw_path(), '-o', str(self.offset // self.disk.block_size)]
# Setting the fstype explicitly makes fsstat much faster and more reliable
# In some versions, the auto-detect yaffs2 check takes ages for large images
fstype = {
"ntfs": "ntfs", "fat": "fat", "ext": "ext", "iso": "iso9660", "hfs+": "hfs",
"ufs": "ufs", "swap": "swap", "exfat": "exfat",
}.get(self.fstype, None)
if fstype:
cmd.extend(["-f", fstype])
logger.debug('$ {0}'.format(' '.join(cmd)))
stats_thread.process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in iter(stats_thread.process.stdout.readline, b''):
line = line.decode('utf-8')
logger.debug('< {0}'.format(line))
if line.startswith("File System Type:"):
self.info['statfstype'] = line[line.index(':') + 2:].strip()
elif line.startswith("Last Mount Point:") or line.startswith("Last mounted on:"):
self.info['lastmountpoint'] = line[line.index(':') + 2:].strip().replace("//", "/")
elif line.startswith("Volume Name:") and not self.info.get('label'):
self.info['label'] = line[line.index(':') + 2:].strip()
elif line.startswith("Version:"):
self.info['version'] = line[line.index(':') + 2:].strip()
elif line.startswith("Source OS:"):
self.info['version'] = line[line.index(':') + 2:].strip()
elif 'CYLINDER GROUP INFORMATION' in line or 'BLOCK GROUP INFORMATION' in line:
# noinspection PyBroadException
try:
stats_thread.process.terminate()
logger.debug("Terminated fsstat at cylinder/block group information.")
except Exception:
pass
break
if self.info.get('lastmountpoint') and self.info.get('label'):
self.info['label'] = "{0} ({1})".format(self.info['lastmountpoint'], self.info['label'])
elif self.info.get('lastmountpoint') and not self.info.get('label'):
self.info['label'] = self.info['lastmountpoint']
elif not self.info.get('lastmountpoint') and self.info.get('label') and \
self.info['label'].startswith("/"): # e.g. /boot1
if self.info['label'].endswith("1"):
self.info['lastmountpoint'] = self.info['label'][:-1]
else:
self.info['lastmountpoint'] = self.info['label']
except Exception: # ignore any exceptions here.
logger.exception("Error while obtaining stats.")
stats_thread.process = None
thread = threading.Thread(target=stats_thread)
thread.start()
thread.join(timeout)
if thread.is_alive():
# noinspection PyBroadException
try:
stats_thread.process.terminate()
except Exception:
pass
thread.join()
logger.debug("Killed fsstat after {0}s".format(timeout)) | Using :command:`fsstat`, adds some additional information of the volume to the Volume. |
def connect(self, hardware: hc.API):
""" Connect to a running hardware API.
This can be either a simulator or a full hardware controller.
Note that there is no true disconnected state for a
:py:class:`.ProtocolContext`; :py:meth:`disconnect` simply creates
a new simulator and replaces the current hardware with it.
"""
self._hw_manager.set_hw(hardware)
self._hw_manager.hardware.cache_instruments() | Connect to a running hardware API.
This can be either a simulator or a full hardware controller.
Note that there is no true disconnected state for a
:py:class:`.ProtocolContext`; :py:meth:`disconnect` simply creates
a new simulator and replaces the current hardware with it. |
def start(path=None, host=None, port=None, color=None, cors=None, detach=False, nolog=False):
"""start web server"""
if detach:
sys.argv.append('--no-log')
idx = sys.argv.index('-d')
del sys.argv[idx]
cmd = sys.executable + ' ' + ' '.join([sys.argv[0], 'start'] + sys.argv[1:])
if os.name == 'nt':
cmd = 'start /B %s' % cmd
else:
cmd = '%s &' % cmd
os.system(cmd)
else:
if path:
path = os.path.abspath(path)
app.config['PATH_HTML']= first_value(path, app.config.get('PATH_HTML',None), os.getcwd())
app.config['HOST'] = first_value(host, app.config.get('HOST',None), '0.0.0.0')
app.config['PORT'] = int(first_value(port, app.config.get('PORT',None), 5001))
app.logger.setLevel(logging.DEBUG)
app.config['historylog'] = HistoryHandler()
app.logger.addHandler(app.config['historylog'])
if not nolog:
app.logger.addHandler(StreamHandler())
if cors: CORS(app)
app.run(host = app.config['HOST'],
port = app.config['PORT'],
threaded = True) | start web server |
def sqlite_to_csv(
input_filename,
table_name,
output_filename,
dialect=csv.excel,
batch_size=10000,
encoding="utf-8",
callback=None,
query=None,
):
"""Export a table inside a SQLite database to CSV"""
# TODO: should be able to specify fields
# TODO: should be able to specify custom query
if isinstance(dialect, six.text_type):
dialect = csv.get_dialect(dialect)
if query is None:
query = "SELECT * FROM {}".format(table_name)
connection = sqlite3.Connection(input_filename)
cursor = connection.cursor()
result = cursor.execute(query)
header = [item[0] for item in cursor.description]
fobj = open_compressed(output_filename, mode="w", encoding=encoding)
writer = csv.writer(fobj, dialect=dialect)
writer.writerow(header)
total_written = 0
for batch in rows.plugins.utils.ipartition(result, batch_size):
writer.writerows(batch)
written = len(batch)
total_written += written
if callback:
callback(written, total_written)
fobj.close() | Export a table inside a SQLite database to CSV |
def save(self, **kwargs):
"""
Method that creates the translations tasks for every selected instance
:param kwargs:
:return:
"""
try:
# result_ids = []
manager = Manager()
for item in self.model_class.objects.language(manager.get_main_language()).filter(pk__in=self.ids).all():
create_translations_for_item_and_its_children.delay(self.model_class, item.pk, self.languages,
update_item_languages=True)
# return TransTaskSerializer(TransTask.objects.filter(pk__in=result_ids), many=True).data
return {'status': 'ok'}
except Exception as e:
raise serializers.ValidationError(detail=str(e)) | Method that creates the translations tasks for every selected instance
:param kwargs:
:return: |
def create_skeleton(shutit):
"""Creates module based on a pattern supplied as a git repo.
"""
skel_path = shutit.cfg['skeleton']['path']
skel_module_name = shutit.cfg['skeleton']['module_name']
skel_domain = shutit.cfg['skeleton']['domain']
skel_domain_hash = shutit.cfg['skeleton']['domain_hash']
skel_depends = shutit.cfg['skeleton']['depends']
skel_shutitfiles = shutit.cfg['skeleton']['shutitfiles']
skel_delivery = shutit.cfg['skeleton']['delivery']
skel_pattern = shutit.cfg['skeleton']['pattern']
# For vagrant only
skel_vagrant_num_machines = shutit.cfg['skeleton']['vagrant_num_machines']
skel_vagrant_machine_prefix = shutit.cfg['skeleton']['vagrant_machine_prefix']
skel_vagrant_ssh_access = shutit.cfg['skeleton']['vagrant_ssh_access']
skel_vagrant_docker = shutit.cfg['skeleton']['vagrant_docker']
skel_vagrant_snapshot = shutit.cfg['skeleton']['vagrant_snapshot']
skel_vagrant_upload = shutit.cfg['skeleton']['vagrant_upload']
skel_vagrant_image_name = shutit.cfg['skeleton']['vagrant_image_name']
# Check setup
if not skel_path or skel_path[0] != '/':
shutit.fail('Must supply a directory and it must be absolute') # pragma: no cover
if os.path.exists(skel_path):
shutit.fail(skel_path + ' already exists') # pragma: no cover
if not skel_module_name:
shutit.fail('Must supply a name for your module, eg mymodulename') # pragma: no cover
if not re.match('^[a-zA-z_][0-9a-zA-Z_]+$', skel_module_name):
shutit.fail('Module names must comply with python classname standards: cf: http://stackoverflow.com/questions/10120295/valid-characters-in-a-python-class-name name: ' + skel_module_name) # pragma: no cover
if not skel_domain:
shutit.fail('Must supply a domain for your module, eg com.yourname.madeupdomainsuffix') # pragma: no cover
# Create folders and process pattern
os.makedirs(skel_path)
os.chdir(skel_path)
# If it's shutitfile and vagrant
if shutit.cfg['skeleton']['pattern'] == 'bash':
from shutit_patterns import bash
bash.setup_bash_pattern(shutit,
skel_path=skel_path,
skel_delivery=skel_delivery,
skel_domain=skel_domain,
skel_module_name=skel_module_name,
skel_shutitfiles=skel_shutitfiles,
skel_domain_hash=skel_domain_hash,
skel_depends=skel_depends)
elif shutit.cfg['skeleton']['pattern'] == 'docker':
from shutit_patterns import docker
docker.setup_docker_pattern(shutit,
skel_path=skel_path,
skel_delivery=skel_delivery,
skel_domain=skel_domain,
skel_module_name=skel_module_name,
skel_shutitfiles=skel_shutitfiles,
skel_domain_hash=skel_domain_hash,
skel_depends=skel_depends)
elif shutit.cfg['skeleton']['pattern'] == 'vagrant': # pragma: no cover
from shutit_patterns import vagrant
vagrant.setup_vagrant_pattern(shutit,
skel_path=skel_path,
skel_delivery=skel_delivery,
skel_domain=skel_domain,
skel_module_name=skel_module_name,
skel_shutitfiles=skel_shutitfiles,
skel_domain_hash=skel_domain_hash,
skel_depends=skel_depends,
skel_vagrant_num_machines=skel_vagrant_num_machines,
skel_vagrant_machine_prefix=skel_vagrant_machine_prefix,
skel_vagrant_ssh_access=skel_vagrant_ssh_access,
skel_vagrant_docker=skel_vagrant_docker,
skel_vagrant_snapshot=skel_vagrant_snapshot,
skel_vagrant_upload=skel_vagrant_upload,
skel_vagrant_image_name=skel_vagrant_image_name)
elif shutit.cfg['skeleton']['pattern'] == 'shutitfile':
shutitfile.setup_shutitfile_pattern(shutit,
skel_path=skel_path,
skel_delivery=skel_delivery,
skel_pattern=skel_pattern,
skel_domain=skel_domain,
skel_module_name=skel_module_name,
skel_vagrant_num_machines=skel_vagrant_num_machines,
skel_vagrant_machine_prefix=skel_vagrant_machine_prefix,
skel_vagrant_ssh_access=skel_vagrant_ssh_access,
skel_vagrant_docker=skel_vagrant_docker)
elif shutit.cfg['skeleton']['pattern'] == 'docker_tutorial': # pragma: no cover
shutit.fail('docker_tutorial not yet supported') | Creates module based on a pattern supplied as a git repo. |
def get_transaction_index(self, transaction_hash: Hash32) -> Tuple[BlockNumber, int]:
"""
Returns a 2-tuple of (block_number, transaction_index) indicating which
block the given transaction can be found in and at what index in the
block transactions.
Raises TransactionNotFound if the transaction_hash is not found in the
canonical chain.
"""
key = SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash)
try:
encoded_key = self.db[key]
except KeyError:
raise TransactionNotFound(
"Transaction {} not found in canonical chain".format(encode_hex(transaction_hash)))
transaction_key = rlp.decode(encoded_key, sedes=TransactionKey)
return (transaction_key.block_number, transaction_key.index) | Returns a 2-tuple of (block_number, transaction_index) indicating which
block the given transaction can be found in and at what index in the
block transactions.
Raises TransactionNotFound if the transaction_hash is not found in the
canonical chain. |
def pacl_term(DiamTube, ConcClay, ConcAl, ConcNatOrgMat, NatOrgMat,
coag, material, RatioHeightDiameter):
"""Return the fraction of the surface area that is covered with coagulant
that is not covered with humic acid.
:param DiamTube: Diameter of the dosing tube
:type Diamtube: float
:param ConcClay: Concentration of clay in solution
:type ConcClay: float
:param ConcAl: Concentration of alumninum in solution
:type ConcAl: float
:param ConcNatOrgMat: Concentration of natural organic matter in solution
:type ConcNatOrgMat: float
:param NatOrgMat: type of natural organic matter, e.g. floc_model.HumicAcid
:type NatOrgMat: floc_model.Material
:param coag: Type of coagulant in solution, e.g. floc_model.PACl
:type coag: floc_model.Material
:param material: Type of clay in suspension, e.g. floc_model.Clay
:type material: floc_model.Material
:param RatioHeightDiameter: Dimensionless ratio of clay height to clay diameter
:type RatioHeightDiameter: float
:return: fraction of the surface area that is covered with coagulant that is not covered with humic acid
:rtype: float
"""
return (gamma_coag(ConcClay, ConcAl, coag, material, DiamTube,
RatioHeightDiameter)
* (1 - gamma_humic_acid_to_coag(ConcAl, ConcNatOrgMat,
NatOrgMat, coag))
) | Return the fraction of the surface area that is covered with coagulant
that is not covered with humic acid.
:param DiamTube: Diameter of the dosing tube
:type Diamtube: float
:param ConcClay: Concentration of clay in solution
:type ConcClay: float
:param ConcAl: Concentration of alumninum in solution
:type ConcAl: float
:param ConcNatOrgMat: Concentration of natural organic matter in solution
:type ConcNatOrgMat: float
:param NatOrgMat: type of natural organic matter, e.g. floc_model.HumicAcid
:type NatOrgMat: floc_model.Material
:param coag: Type of coagulant in solution, e.g. floc_model.PACl
:type coag: floc_model.Material
:param material: Type of clay in suspension, e.g. floc_model.Clay
:type material: floc_model.Material
:param RatioHeightDiameter: Dimensionless ratio of clay height to clay diameter
:type RatioHeightDiameter: float
:return: fraction of the surface area that is covered with coagulant that is not covered with humic acid
:rtype: float |
def reformat(found_sequences):
'''Truncate the FASTA headers so that the first field is a 4-character ID.'''
for (pdb_id, chain, file_name), sequence in sorted(found_sequences.iteritems()):
header = sequence[0]
assert(header[0] == '>')
tokens = header.split('|')
tokens[0] = tokens[0][:5]
assert(len(tokens[0]) == 5)
sequence[0] = "|".join(tokens) | Truncate the FASTA headers so that the first field is a 4-character ID. |
def plot(data: Dict[str, np.array], fields: List[str] = None, *args, **kwargs):
"""
Plot simulation data.
:data: A dictionary of arrays.
:fields: A list of variables you want to plot (e.g. ['x', y', 'c'])
"""
if plt is None:
return
if fields is None:
fields = ['x', 'y', 'm', 'c']
labels = []
lines = []
for field in fields:
if min(data[field].shape) > 0:
f_lines = plt.plot(data['t'], data[field], *args, **kwargs)
lines.extend(f_lines)
labels.extend(data['labels'][field])
plt.legend(lines, labels, ncol=2, loc='best')
plt.xlabel('t, sec')
plt.grid() | Plot simulation data.
:data: A dictionary of arrays.
:fields: A list of variables you want to plot (e.g. ['x', y', 'c']) |
def exclude_fields(self):
"""Excludes fields that are included in the queryparameters"""
request = self.context.get('request')
if request:
exclude = request.query_params.get('exclude', None)
if exclude is None: return
excluded_fields = exclude.split(',')
for field in excluded_fields:
self.fields.pop(field) | Excludes fields that are included in the queryparameters |
def download(self):
"""
MLBAM dataset download
"""
p = Pool()
p.map(self._download, self.days) | MLBAM dataset download |
def setup(self, **kwargs):
'''
This is called during production de-trending, prior to
calling the :py:obj:`Detrender.run()` method.
:param inter piter: The number of iterations in the minimizer. \
Default 3
:param int pmaxf: The maximum number of function evaluations per \
iteration. Default 300
:param float ppert: The fractional amplitude of the perturbation on \
the initial guess. Default 0.1
'''
# Check for saved model
clobber = self.clobber
self.clobber = False
if not self.load_model('nPLD'):
raise Exception("Can't find `nPLD` model for target.")
self.clobber = clobber
# Powell iterations
self.piter = kwargs.get('piter', 3)
self.pmaxf = kwargs.get('pmaxf', 300)
self.ppert = kwargs.get('ppert', 0.1) | This is called during production de-trending, prior to
calling the :py:obj:`Detrender.run()` method.
:param inter piter: The number of iterations in the minimizer. \
Default 3
:param int pmaxf: The maximum number of function evaluations per \
iteration. Default 300
:param float ppert: The fractional amplitude of the perturbation on \
the initial guess. Default 0.1 |
def update(self):
"""todo: Docstring for update
:return:
:rtype:
"""
logger.debug("")
rd = self.repo_dir
logger.debug("pkg path %s", rd)
if not rd:
print(
"unable to find pkg '%s'. %s" % (self.name, did_u_mean(self.name))
)
cwd = os.getcwd()
os.chdir(self.repo_dir)
logger.debug("cwd: %s, updating %s ", cwd, self.repo_dir)
try:
p = git.pull('--rebase', '--progress',
_out=self._sh_stdout('blue'),
_err=self._sh_stderr('red'))
p.wait()
except Exception as e:
pass
# logger.warn(e)
os.chdir(cwd)
# Update or install any dependancies before running the
# update script.
self.install_update_deps()
up = os.path.join(self.repo_dir, '_upkg', 'update')
if os.path.exists(up):
# We use subprocess instead of the sh module due to problems with
# runing shell scripts with sh
cwd = os.getcwd()
os.chdir(os.path.join(self.repo_dir, '_upkg'))
self.pr_info("Running update script for {} @ {}", self.name, up)
subprocess.check_call(up, shell=True)
os.chdir(cwd) | todo: Docstring for update
:return:
:rtype: |
def _ack(self, message_id, subscription_id, **kwargs):
"""Acknowledge receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message_id: ID of the message to be acknowledged
:param subscription: ID of the relevant subscriptiong
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if acknowledgement should be part of
a transaction
"""
self._conn.ack(message_id, subscription_id, **kwargs) | Acknowledge receipt of a message. This only makes sense when the
'acknowledgement' flag was set for the relevant subscription.
:param message_id: ID of the message to be acknowledged
:param subscription: ID of the relevant subscriptiong
:param **kwargs: Further parameters for the transport layer. For example
transaction: Transaction ID if acknowledgement should be part of
a transaction |
def _compute_magnitude(self, rup, C):
"""
Compute the first term of the equation described on p. 1144:
``c1 + c2 * (M - 6) + c3 * log(M / 6)``
"""
return C['c1'] + C['c2'] * (rup.mag - 6.0) +\
(C['c3'] * np.log(rup.mag / 6.0)) | Compute the first term of the equation described on p. 1144:
``c1 + c2 * (M - 6) + c3 * log(M / 6)`` |
def write_to_fp(self, fp):
"""Do the TTS API request and write bytes to a file-like object.
Args:
fp (file object): Any file-like object to write the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request.
TypeError: When ``fp`` is not a file-like object that takes bytes.
"""
# When disabling ssl verify in requests (for proxies and firewalls),
# urllib3 prints an insecure warning on stdout. We disable that.
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
text_parts = self._tokenize(self.text)
log.debug("text_parts: %i", len(text_parts))
assert text_parts, 'No text to send to TTS API'
for idx, part in enumerate(text_parts):
try:
# Calculate token
part_tk = self.token.calculate_token(part)
except requests.exceptions.RequestException as e: # pragma: no cover
log.debug(str(e), exc_info=True)
raise gTTSError(
"Connection error during token calculation: %s" %
str(e))
payload = {'ie': 'UTF-8',
'q': part,
'tl': self.lang,
'ttsspeed': self.speed,
'total': len(text_parts),
'idx': idx,
'client': 'tw-ob',
'textlen': _len(part),
'tk': part_tk}
log.debug("payload-%i: %s", idx, payload)
try:
# Request
r = requests.get(self.GOOGLE_TTS_URL,
params=payload,
headers=self.GOOGLE_TTS_HEADERS,
proxies=urllib.request.getproxies(),
verify=False)
log.debug("headers-%i: %s", idx, r.request.headers)
log.debug("url-%i: %s", idx, r.request.url)
log.debug("status-%i: %s", idx, r.status_code)
r.raise_for_status()
except requests.exceptions.HTTPError:
# Request successful, bad response
raise gTTSError(tts=self, response=r)
except requests.exceptions.RequestException as e: # pragma: no cover
# Request failed
raise gTTSError(str(e))
try:
# Write
for chunk in r.iter_content(chunk_size=1024):
fp.write(chunk)
log.debug("part-%i written to %s", idx, fp)
except (AttributeError, TypeError) as e:
raise TypeError(
"'fp' is not a file-like object or it does not take bytes: %s" %
str(e)) | Do the TTS API request and write bytes to a file-like object.
Args:
fp (file object): Any file-like object to write the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request.
TypeError: When ``fp`` is not a file-like object that takes bytes. |
def check_input_and_output_numbers(operator, input_count_range=None, output_count_range=None):
'''
Check if the number of input(s)/output(s) is correct
:param operator: A Operator object
:param input_count_range: A list of two integers or an integer. If it's a list the first/second element is the
minimal/maximal number of inputs. If it's an integer, it is equivalent to specify that number twice in a list. For
infinite ranges like 5 to infinity, you need to use [5, None].
:param output_count_range: A list of two integers or an integer. See input_count_range for its format.
'''
if isinstance(input_count_range, list):
min_input_count = input_count_range[0]
max_input_count = input_count_range[1]
elif isinstance(input_count_range, int) or input_count_range is None:
min_input_count = input_count_range
max_input_count = input_count_range
else:
raise RuntimeError('input_count_range must be a list or an integer')
if isinstance(output_count_range, list):
min_output_count = output_count_range[0]
max_output_count = output_count_range[1]
elif isinstance(output_count_range, int) or output_count_range is None:
min_output_count = output_count_range
max_output_count = output_count_range
else:
raise RuntimeError('output_count_range must be a list or an integer')
if min_input_count is not None and len(operator.inputs) < min_input_count:
raise RuntimeError(
'For operator %s (type: %s), at least %s input(s) is(are) required but we got %s input(s) which are %s' \
% (operator.full_name, operator.type, min_input_count, len(operator.inputs), operator.input_full_names))
if max_input_count is not None and len(operator.inputs) > max_input_count:
raise RuntimeError(
'For operator %s (type: %s), at most %s input(s) is(are) supported but we got %s output(s) which are %s' \
% (operator.full_name, operator.type, max_input_count, len(operator.inputs), operator.input_full_names))
if min_output_count is not None and len(operator.outputs) < min_output_count:
raise RuntimeError(
'For operator %s (type: %s), at least %s output(s) is(are) produced but we got %s output(s) which are %s' \
% (operator.full_name, operator.type, min_output_count, len(operator.outputs), operator.output_full_names))
if max_output_count is not None and len(operator.outputs) > max_output_count:
raise RuntimeError(
'For operator %s (type: %s), at most %s outputs(s) is(are) supported but we got %s output(s) which are %s' \
% (operator.full_name, operator.type, max_output_count, len(operator.outputs), operator.output_full_names)) | Check if the number of input(s)/output(s) is correct
:param operator: A Operator object
:param input_count_range: A list of two integers or an integer. If it's a list the first/second element is the
minimal/maximal number of inputs. If it's an integer, it is equivalent to specify that number twice in a list. For
infinite ranges like 5 to infinity, you need to use [5, None].
:param output_count_range: A list of two integers or an integer. See input_count_range for its format. |
def add_application(self, application_id, **kwargs):
"""
Add an application to a group.
`application_id` is the name of the application to add. Any
application options can be specified as kwargs.
"""
path = 'group/%s/application' % self.id
data = {'application_id': application_id}
if kwargs:
data["options"] = kwargs
self.api.request(path, data) | Add an application to a group.
`application_id` is the name of the application to add. Any
application options can be specified as kwargs. |
def remove_accessibility_type(self, accessibility_type=None):
"""Removes an accessibility type.
:param accessibility_type: accessibility type to remove
:type accessibility_type: ``osid.type.Type``
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NotFound`` -- acessibility type not found
:raise: ``NullArgument`` -- ``accessibility_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if accessibility_type is None:
raise NullArgument
metadata = Metadata(**settings.METADATA['accessibility_type'])
if metadata.is_read_only() or metadata.is_required():
raise NoAccess()
if (accessibility_type._my_map['id']) not in self._my_map['accessibility_type']:
raise NotFound()
self._my_map['accessibility_types'].remove(accessibility_type._my_map['id']) | Removes an accessibility type.
:param accessibility_type: accessibility type to remove
:type accessibility_type: ``osid.type.Type``
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NotFound`` -- acessibility type not found
:raise: ``NullArgument`` -- ``accessibility_type`` is ``null``
*compliance: mandatory -- This method must be implemented.* |
def _analyze_ini_file(self, add_header=False):
"""
:returns: same format as super().analyze()
"""
def wrapped(file, filename):
potential_secrets = {}
with self.non_quoted_string_regex():
for value, lineno in IniFileParser(
file,
add_header,
exclude_lines_regex=self.exclude_lines_regex,
).iterator():
potential_secrets.update(self.analyze_string(
value,
lineno,
filename,
))
return potential_secrets
return wrapped | :returns: same format as super().analyze() |
def export_request_rate_by_interval(
self, parameters, location, custom_headers=None, raw=False, polling=True, **operation_config):
"""Export logs that show Api requests made by this subscription in the
given time window to show throttling activities.
:param parameters: Parameters supplied to the LogAnalytics
getRequestRateByInterval Api.
:type parameters:
~azure.mgmt.compute.v2018_04_01.models.RequestRateByIntervalInput
:param location: The location upon which virtual-machine-sizes is
queried.
:type location: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
LogAnalyticsOperationResult or
ClientRawResponse<LogAnalyticsOperationResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.LogAnalyticsOperationResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.LogAnalyticsOperationResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._export_request_rate_by_interval_initial(
parameters=parameters,
location=location,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('LogAnalyticsOperationResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) | Export logs that show Api requests made by this subscription in the
given time window to show throttling activities.
:param parameters: Parameters supplied to the LogAnalytics
getRequestRateByInterval Api.
:type parameters:
~azure.mgmt.compute.v2018_04_01.models.RequestRateByIntervalInput
:param location: The location upon which virtual-machine-sizes is
queried.
:type location: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
LogAnalyticsOperationResult or
ClientRawResponse<LogAnalyticsOperationResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.LogAnalyticsOperationResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.LogAnalyticsOperationResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` |
def encrypt_file(cls, key, in_filename, out_filename=None, chunksize=64 * 1024):
""" Encrypts a file using AES (CBC mode) with the
given key.
key:
The encryption key - a string that must be
either 16, 24 or 32 bytes long. Longer keys
are more secure.
in_filename:
Name of the input file
out_filename:
If None, '<in_filename>.enc' will be used.
chunksize:
Sets the size of the chunk which the function
uses to read and encrypt the file. Larger chunk
sizes can be faster for some files and machines.
chunksize must be divisible by 16.
"""
if not out_filename:
out_filename = in_filename + '.enc'
iv = ''.join(chr(random.randint(0, 0xFF)) for _ in range(16))
encryptor = AES.new(key, AES.MODE_CBC, iv)
filesize = os.path.getsize(in_filename)
with open(in_filename, 'rb') as infile:
with open(out_filename, 'wb') as outfile:
outfile.write(struct.pack('<Q', filesize))
outfile.write(iv)
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
chunk += ' ' * (16 - len(chunk) % 16)
outfile.write(encryptor.encrypt(chunk)) | Encrypts a file using AES (CBC mode) with the
given key.
key:
The encryption key - a string that must be
either 16, 24 or 32 bytes long. Longer keys
are more secure.
in_filename:
Name of the input file
out_filename:
If None, '<in_filename>.enc' will be used.
chunksize:
Sets the size of the chunk which the function
uses to read and encrypt the file. Larger chunk
sizes can be faster for some files and machines.
chunksize must be divisible by 16. |
def format_kinds(raw):
"""Format a string representing the kinds."""
output = ' '.join('{} {}'.format(*kind) for kind in raw if kind)
return output | Format a string representing the kinds. |
def load_contents(self):
"""
Loads contents of Database from a filename database.csv.
"""
with open(self.name + ".csv") as f:
list_of_rows = f.readlines()
list_of_rows = map(
lambda x: x.strip(),
map(
lambda x: x.replace("\"", ""),
list_of_rows
)
)
for row in list_of_rows:
self.put_row(make_row(self.columns, row.split(','))) | Loads contents of Database from a filename database.csv. |
def get_last_depth(self, symbol, _type):
"""
获取marketdepth
:param symbol
:param type: 可选值:{ percent10, step0, step1, step2, step3, step4, step5 }
:return:
"""
params = {'symbol': symbol, 'type': _type}
url = u.MARKET_URL + '/market/depth'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(http_get_request(url, params))
return handle
return _wrapper | 获取marketdepth
:param symbol
:param type: 可选值:{ percent10, step0, step1, step2, step3, step4, step5 }
:return: |
def generate(self, output_dir, work, ngrams, labels, minus_ngrams):
"""Generates HTML reports for each witness to `work`, showing its text
with the n-grams in `ngrams` highlighted.
Any n-grams in `minus_ngrams` have any highlighting of them
(or subsets of them) removed.
:param output_dir: directory to write report to
:type output_dir: `str`
:param work: name of work to highlight
:type work: `str`
:param ngrams: groups of n-grams to highlight
:type ngrams: `list` of `list` of `str`
:param labels: labels for the groups of n-grams
:type labels: `list` of `str`
:param minus_ngrams: n-grams to remove highlighting from
:type minus_ngrams: `list` of `str`
:rtype: `str`
"""
template = self._get_template()
colours = generate_colours(len(ngrams))
for siglum in self._corpus.get_sigla(work):
ngram_data = zip(labels, ngrams)
content = self._generate_base(work, siglum)
for ngrams_group in ngrams:
content = self._highlight(content, ngrams_group, True)
content = self._highlight(content, minus_ngrams, False)
self._ngrams_count = 1
content = self._format_content(content)
report_name = '{}-{}.html'.format(work, siglum)
self._write(work, siglum, content, output_dir, report_name,
template, ngram_data=ngram_data,
minus_ngrams=minus_ngrams, colours=colours) | Generates HTML reports for each witness to `work`, showing its text
with the n-grams in `ngrams` highlighted.
Any n-grams in `minus_ngrams` have any highlighting of them
(or subsets of them) removed.
:param output_dir: directory to write report to
:type output_dir: `str`
:param work: name of work to highlight
:type work: `str`
:param ngrams: groups of n-grams to highlight
:type ngrams: `list` of `list` of `str`
:param labels: labels for the groups of n-grams
:type labels: `list` of `str`
:param minus_ngrams: n-grams to remove highlighting from
:type minus_ngrams: `list` of `str`
:rtype: `str` |
def _calculate_refund_amount(self, amount=None):
"""
:rtype: int
:return: amount that can be refunded, in CENTS
"""
eligible_to_refund = self.amount - (self.amount_refunded or 0)
if amount:
amount_to_refund = min(eligible_to_refund, amount)
else:
amount_to_refund = eligible_to_refund
return int(amount_to_refund * 100) | :rtype: int
:return: amount that can be refunded, in CENTS |
def prox_l1(v, alpha):
r"""Compute the proximal operator of the :math:`\ell_1` norm (scalar
shrinkage/soft thresholding)
.. math::
\mathrm{prox}_{\alpha f}(\mathbf{v}) =
\mathcal{S}_{1,\alpha}(\mathbf{v}) = \mathrm{sign}(\mathbf{v})
\odot \max(0, |\mathbf{v}| - \alpha)
where :math:`f(\mathbf{x}) = \|\mathbf{x}\|_1`.
Unlike the corresponding :func:`norm_l1`, there is no need for an
`axis` parameter since the proximal operator of the :math:`\ell_1`
norm is the same when taken independently over each element, or
over their sum.
Parameters
----------
v : array_like
Input array :math:`\mathbf{v}`
alpha : float or array_like
Parameter :math:`\alpha`
Returns
-------
x : ndarray
Output array
"""
if have_numexpr:
return ne.evaluate(
'where(abs(v)-alpha > 0, where(v >= 0, 1, -1) * (abs(v)-alpha), 0)'
)
else:
return np.sign(v) * (np.clip(np.abs(v) - alpha, 0, float('Inf'))) | r"""Compute the proximal operator of the :math:`\ell_1` norm (scalar
shrinkage/soft thresholding)
.. math::
\mathrm{prox}_{\alpha f}(\mathbf{v}) =
\mathcal{S}_{1,\alpha}(\mathbf{v}) = \mathrm{sign}(\mathbf{v})
\odot \max(0, |\mathbf{v}| - \alpha)
where :math:`f(\mathbf{x}) = \|\mathbf{x}\|_1`.
Unlike the corresponding :func:`norm_l1`, there is no need for an
`axis` parameter since the proximal operator of the :math:`\ell_1`
norm is the same when taken independently over each element, or
over their sum.
Parameters
----------
v : array_like
Input array :math:`\mathbf{v}`
alpha : float or array_like
Parameter :math:`\alpha`
Returns
-------
x : ndarray
Output array |
def parse_workflow_call_body(self, i):
"""
Required.
:param i:
:return:
"""
io_map = OrderedDict()
if isinstance(i, wdl_parser.Terminal):
return i.source_string # no io mappings; represents just a blank call
elif isinstance(i, wdl_parser.Ast):
if i.name == 'CallBody':
declarations = self.parse_workflow_call_body_declarations(i.attr("declarations")) # have not seen this used
io_map = self.parse_workflow_call_body_io(i.attr('io'))
else:
raise NotImplementedError
elif isinstance(i, wdl_parser.AstList):
raise NotImplementedError
return io_map | Required.
:param i:
:return: |
def close(self):
"""Close the plot and release its memory.
"""
from matplotlib.pyplot import close
for ax in self.axes[::-1]:
# avoid matplotlib/matplotlib#9970
ax.set_xscale('linear')
ax.set_yscale('linear')
# clear the axes
ax.cla()
# close the figure
close(self) | Close the plot and release its memory. |
def register_sizer(self, attr_name, sizedimage_cls):
"""
Register a new SizedImage subclass (`sizedimage_cls`).
To be used via the attribute (`attr_name`).
"""
if attr_name.startswith(
'_'
) or attr_name in self.unallowed_sizer_names:
raise UnallowedSizerName(
"`%s` is an unallowed Sizer name. Sizer names cannot begin "
"with an underscore or be named any of the "
"following: %s." % (
attr_name,
', '.join([
name
for name in self.unallowed_sizer_names
])
)
)
if not issubclass(sizedimage_cls, SizedImage):
raise InvalidSizedImageSubclass(
'Only subclasses of versatileimagefield.datastructures.'
'SizedImage may be registered with register_sizer'
)
if attr_name in self._sizedimage_registry:
raise AlreadyRegistered(
'A SizedImage class is already registered to the `%s` '
'attribute. If you would like to override this attribute, '
'use the unregister method' % attr_name
)
else:
self._sizedimage_registry[attr_name] = sizedimage_cls | Register a new SizedImage subclass (`sizedimage_cls`).
To be used via the attribute (`attr_name`). |
def distance_restraint_force(self, atoms, distances, strengths):
"""
Parameters
----------
atoms : tuple of tuple of int or str
Pair of atom indices to be restrained, with shape (n, 2),
like ((a1, a2), (a3, a4)). Items can be str compatible with MDTraj DSL.
distances : tuple of float
Equilibrium distances for each pair
strengths : tuple of float
Force constant for each pair
"""
system = self.system
force = mm.HarmonicBondForce()
force.setUsesPeriodicBoundaryConditions(self.system.usesPeriodicBoundaryConditions())
for pair, distance, strength in zip(atoms, distances, strengths):
indices = []
for atom in pair:
if isinstance(atom, str):
index = self.subset(atom)
if len(index) != 1:
raise ValueError('Distance restraint for selection `{}` returns != 1 atom!: {}'
.format(atom, index))
indices.append(int(index[0]))
elif isinstance(atom, (int, float)):
indices.append(int(atom))
else:
raise ValueError('Distance restraint atoms must be int or str DSL selections')
if distance == 'current':
pos = self.positions or system.positions
distance = np.linalg.norm(pos[indices[0]] - pos[indices[1]])
force.addBond(indices[0], indices[1], distance*u.nanometers,
strength*u.kilocalories_per_mole/u.angstroms**2)
return force | Parameters
----------
atoms : tuple of tuple of int or str
Pair of atom indices to be restrained, with shape (n, 2),
like ((a1, a2), (a3, a4)). Items can be str compatible with MDTraj DSL.
distances : tuple of float
Equilibrium distances for each pair
strengths : tuple of float
Force constant for each pair |
def point3d(value, lon, lat, depth):
"""
This is used to convert nodes of the form
<hypocenter lon="LON" lat="LAT" depth="DEPTH"/>
:param value: None
:param lon: longitude string
:param lat: latitude string
:returns: a validated triple (lon, lat, depth)
"""
return longitude(lon), latitude(lat), positivefloat(depth) | This is used to convert nodes of the form
<hypocenter lon="LON" lat="LAT" depth="DEPTH"/>
:param value: None
:param lon: longitude string
:param lat: latitude string
:returns: a validated triple (lon, lat, depth) |
def get_dummy_dynamic_run(nsamples, **kwargs):
"""Generate dummy data for a dynamic nested sampling run.
Loglikelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
nthread_init: int
Number of threads in the inital run (starting at logl=-np.inf).
nthread_dyn: int
Number of threads in the inital run (starting at randomly chosen points
in the initial run).
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values.
"""
seed = kwargs.pop('seed', False)
ndim = kwargs.pop('ndim', 2)
nthread_init = kwargs.pop('nthread_init', 2)
nthread_dyn = kwargs.pop('nthread_dyn', 3)
logl_range = kwargs.pop('logl_range', 1)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
init = get_dummy_run(nthread_init, nsamples, ndim=ndim, seed=seed,
logl_start=-np.inf, logl_range=logl_range)
dyn_starts = list(np.random.choice(
init['logl'], nthread_dyn, replace=True))
threads = nestcheck.ns_run_utils.get_run_threads(init)
# Seed must be False here so it is not set again for each thread
threads += [get_dummy_thread(
nsamples, ndim=ndim, seed=False, logl_start=start,
logl_range=logl_range) for start in dyn_starts]
# make sure the threads have unique labels and combine them
for i, _ in enumerate(threads):
threads[i]['thread_labels'] = np.full(nsamples, i)
run = nestcheck.ns_run_utils.combine_threads(threads)
# To make sure the thread labelling is same way it would when
# processing a dead points file, tranform into dead points
samples = nestcheck.write_polychord_output.run_dead_birth_array(run)
return nestcheck.data_processing.process_samples_array(samples) | Generate dummy data for a dynamic nested sampling run.
Loglikelihood values of points are generated from a uniform distribution
in (0, 1), sorted, scaled by logl_range and shifted by logl_start (if it is
not -np.inf). Theta values of each point are each generated from a uniform
distribution in (0, 1).
Parameters
----------
nsamples: int
Number of samples in thread.
nthread_init: int
Number of threads in the inital run (starting at logl=-np.inf).
nthread_dyn: int
Number of threads in the inital run (starting at randomly chosen points
in the initial run).
ndim: int, optional
Number of dimensions.
seed: int, optional
If not False, the seed is set with np.random.seed(seed).
logl_start: float, optional
logl at which thread starts.
logl_range: float, optional
Scale factor applied to logl values. |
def desbloquear_sat(self):
"""Sobrepõe :meth:`~satcfe.base.FuncoesSAT.desbloquear_sat`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT
"""
retorno = super(ClienteSATLocal, self).desbloquear_sat()
return RespostaSAT.desbloquear_sat(retorno) | Sobrepõe :meth:`~satcfe.base.FuncoesSAT.desbloquear_sat`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT |
def create(name, **params):
'''
Function to create device in Server Density. For more info, see the `API
docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.create lama
salt '*' serverdensity_device.create rich_lama group=lama_band installedRAM=32768
'''
log.debug('Server Density params: %s', params)
params = _clean_salt_variables(params)
params['name'] = name
api_response = requests.post(
'https://api.serverdensity.io/inventory/devices/',
params={'token': get_sd_auth('api_token')},
data=params
)
log.debug('Server Density API Response: %s', api_response)
log.debug('Server Density API Response content: %s', api_response.content)
if api_response.status_code == 200:
try:
return salt.utils.json.loads(api_response.content)
except ValueError:
log.error('Could not parse API Response content: %s', api_response.content)
raise CommandExecutionError(
'Failed to create, API Response: {0}'.format(api_response)
)
else:
return None | Function to create device in Server Density. For more info, see the `API
docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.create lama
salt '*' serverdensity_device.create rich_lama group=lama_band installedRAM=32768 |
def linear_extrapolation_plot(log_prob_adv_array, y, file_name,
min_epsilon=-10, max_epsilon=10,
num_points=21):
"""Generate linear extrapolation plot.
Args:
log_prob_adv_array: Numpy array containing log probabilities
y: Tf placeholder for the labels
file_name: Plot filename
min_epsilon: Minimum value of epsilon over the interval
max_epsilon: Maximum value of epsilon over the interval
num_points: Number of points used to interpolate
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
figure = plt.figure()
figure.canvas.set_window_title('Cleverhans: Linear Extrapolation Plot')
correct_idx = np.argmax(y, axis=0)
fig = plt.figure()
plt.xlabel('Epsilon')
plt.ylabel('Logits')
x_axis = np.linspace(min_epsilon, max_epsilon, num_points)
plt.xlim(min_epsilon - 1, max_epsilon + 1)
for i in range(y.shape[0]):
if i == correct_idx:
ls = '-'
linewidth = 5
else:
ls = '--'
linewidth = 2
plt.plot(
x_axis,
log_prob_adv_array[:, i],
ls=ls,
linewidth=linewidth,
label='{}'.format(i))
plt.legend(loc='best', fontsize=14)
plt.show()
fig.savefig(file_name)
plt.clf()
return figure | Generate linear extrapolation plot.
Args:
log_prob_adv_array: Numpy array containing log probabilities
y: Tf placeholder for the labels
file_name: Plot filename
min_epsilon: Minimum value of epsilon over the interval
max_epsilon: Maximum value of epsilon over the interval
num_points: Number of points used to interpolate |
def gimbal_torque_cmd_report_send(self, target_system, target_component, rl_torque_cmd, el_torque_cmd, az_torque_cmd, force_mavlink1=False):
'''
100 Hz gimbal torque command telemetry
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
rl_torque_cmd : Roll Torque Command (int16_t)
el_torque_cmd : Elevation Torque Command (int16_t)
az_torque_cmd : Azimuth Torque Command (int16_t)
'''
return self.send(self.gimbal_torque_cmd_report_encode(target_system, target_component, rl_torque_cmd, el_torque_cmd, az_torque_cmd), force_mavlink1=force_mavlink1) | 100 Hz gimbal torque command telemetry
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
rl_torque_cmd : Roll Torque Command (int16_t)
el_torque_cmd : Elevation Torque Command (int16_t)
az_torque_cmd : Azimuth Torque Command (int16_t) |
def post_handler_err(self, function_arn, invocation_id, handler_err):
"""
Post the error message from executing the function handler for :code:`function_arn`
with specifid :code:`invocation_id`
:param function_arn: Arn of the Lambda function which has the handler error message.
:type function_arn: string
:param invocation_id: Invocation ID of the work that is being requested
:type invocation_id: string
:param handler_err: the error message caught from handler
:type handler_err: string
"""
url = self._get_work_url(function_arn)
runtime_logger.info('Posting handler error for invocation id [{}] to {}'.format(invocation_id, url))
payload = json.dumps({
"errorMessage": handler_err,
}).encode('utf-8')
request = Request(url, payload)
request.add_header(HEADER_INVOCATION_ID, invocation_id)
request.add_header(HEADER_FUNCTION_ERR_TYPE, "Handled")
request.add_header(HEADER_AUTH_TOKEN, self.auth_token)
urlopen(request)
runtime_logger.info('Posted handler error for invocation id [{}]'.format(invocation_id)) | Post the error message from executing the function handler for :code:`function_arn`
with specifid :code:`invocation_id`
:param function_arn: Arn of the Lambda function which has the handler error message.
:type function_arn: string
:param invocation_id: Invocation ID of the work that is being requested
:type invocation_id: string
:param handler_err: the error message caught from handler
:type handler_err: string |
def delete_activity(self, activity_id):
"""Deletes the ``Activity`` identified by the given ``Id``.
arg: activity_id (osid.id.Id): the ``Id`` of the ``Activity``
to delete
raise: NotFound - an ``Activity`` was not found identified by
the given ``Id``
raise: NullArgument - ``activity_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.delete_resource_template
collection = JSONClientValidated('learning',
collection='Activity',
runtime=self._runtime)
if not isinstance(activity_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
activity_map = collection.find_one(
dict({'_id': ObjectId(activity_id.get_identifier())},
**self._view_filter()))
objects.Activity(osid_object_map=activity_map, runtime=self._runtime, proxy=self._proxy)._delete()
collection.delete_one({'_id': ObjectId(activity_id.get_identifier())}) | Deletes the ``Activity`` identified by the given ``Id``.
arg: activity_id (osid.id.Id): the ``Id`` of the ``Activity``
to delete
raise: NotFound - an ``Activity`` was not found identified by
the given ``Id``
raise: NullArgument - ``activity_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def getOutputElementCount(self, name):
"""
Returns the size of the output array
"""
if name in ["activeCells", "learnableCells", "sensoryAssociatedCells"]:
return self.cellCount * self.moduleCount
else:
raise Exception("Invalid output name specified: " + name) | Returns the size of the output array |
def _open_interface(self, conn_id, iface, callback):
"""Open an interface on this device
Args:
conn_id (int): the unique identifier for the connection
iface (string): the interface name to open
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason)
"""
try:
context = self.conns.get_context(conn_id)
except ArgumentError:
callback(conn_id, self.id, False, "Could not find connection information")
return
self.conns.begin_operation(conn_id, 'open_interface', callback, self.get_config('default_timeout'))
topics = context['topics']
open_iface_message = {'key': context['key'], 'type': 'command', 'operation': 'open_interface', 'client': self.name, 'interface': iface}
self.client.publish(topics.action, open_iface_message) | Open an interface on this device
Args:
conn_id (int): the unique identifier for the connection
iface (string): the interface name to open
callback (callback): Callback to be called when this command finishes
callback(conn_id, adapter_id, success, failure_reason) |
def movies_released_in(self, year):
"""Return list of movies that were released in certain year.
:param year: Release year
:type year: int
:rtype: list[movies.models.Movie]
:return: List of movie instances.
"""
return [movie for movie in self._movie_finder.find_all()
if movie.year == year] | Return list of movies that were released in certain year.
:param year: Release year
:type year: int
:rtype: list[movies.models.Movie]
:return: List of movie instances. |
def set_prev_hard(self):
"""
Выставляет параметры твёрдости/мягкости, для предыдущих согласных.
"""
prev = self.get_prev_letter()
if not prev:
return
if not prev.is_consonant():
return
if self.is_softener(prev):
prev.set_hard(False)
elif self.letter in self.vovels_set_hard:
prev.set_hard(True) | Выставляет параметры твёрдости/мягкости, для предыдущих согласных. |
def parse_connection_option(
header: str, pos: int, header_name: str
) -> Tuple[ConnectionOption, int]:
"""
Parse a Connection option from ``header`` at the given position.
Return the protocol value and the new position.
Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs.
"""
item, pos = parse_token(header, pos, header_name)
return cast(ConnectionOption, item), pos | Parse a Connection option from ``header`` at the given position.
Return the protocol value and the new position.
Raise :exc:`~websockets.exceptions.InvalidHeaderFormat` on invalid inputs. |
def _set_ext_src_vtep_ip_any(self, v, load=False):
"""
Setter method for ext_src_vtep_ip_any, mapped from YANG variable /overlay/access_list/type/vxlan/extended/ext_seq/ext_src_vtep_ip_any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_ext_src_vtep_ip_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ext_src_vtep_ip_any() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="ext-src-vtep-ip-any", rest_name="src-vtep-ip-any", parent=self, choice=(u'choice-ext-src-vtep-ip', u'case-ext-src-vtep-ip-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'src vtep ip address: any', u'alt-name': u'src-vtep-ip-any', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ext_src_vtep_ip_any must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="ext-src-vtep-ip-any", rest_name="src-vtep-ip-any", parent=self, choice=(u'choice-ext-src-vtep-ip', u'case-ext-src-vtep-ip-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'src vtep ip address: any', u'alt-name': u'src-vtep-ip-any', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True)""",
})
self.__ext_src_vtep_ip_any = t
if hasattr(self, '_set'):
self._set() | Setter method for ext_src_vtep_ip_any, mapped from YANG variable /overlay/access_list/type/vxlan/extended/ext_seq/ext_src_vtep_ip_any (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_ext_src_vtep_ip_any is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ext_src_vtep_ip_any() directly. |
def _encrypt(data):
"""Equivalent to OpenSSL using 256 bit AES in CBC mode"""
BS = AES.block_size
def pad(s):
n = BS - len(s) % BS
char = chr(n).encode('utf8')
return s + n * char
password = settings.GECKOBOARD_PASSWORD
salt = Random.new().read(BS - len('Salted__'))
key, iv = _derive_key_and_iv(password, salt, 32, BS)
cipher = AES.new(key, AES.MODE_CBC, iv)
encrypted = b'Salted__' + salt + cipher.encrypt(pad(data))
return base64.b64encode(encrypted) | Equivalent to OpenSSL using 256 bit AES in CBC mode |
def create(self, server):
"""Create the task on the server"""
if len(self.geometries) == 0:
raise Exception('no geometries')
return server.post(
'task_admin',
self.as_payload(),
replacements={
'slug': self.__challenge__.slug,
'identifier': self.identifier}) | Create the task on the server |
def _validate_all_tags_are_used(metadata):
"""Ensure all tags are used in some filter."""
tag_names = set([tag_name for tag_name, _ in metadata.tags])
filter_arg_names = set()
for location, _ in metadata.registered_locations:
for filter_info in metadata.get_filter_infos(location):
for filter_arg in filter_info.args:
if is_tag_argument(filter_arg):
filter_arg_names.add(get_directive_argument_name(filter_arg))
unused_tags = tag_names - filter_arg_names
if unused_tags:
raise GraphQLCompilationError(u'This GraphQL query contains @tag directives whose values '
u'are not used: {}. This is not allowed. Please either use '
u'them in a filter or remove them entirely.'
.format(unused_tags)) | Ensure all tags are used in some filter. |
def from_iter(self, iterable):
# type: (Any, Any) -> Any
'''Takes an object and an iterable and produces a new object that is
a copy of the original with data from ``iterable`` reincorporated. It
is intended as the inverse of the ``to_iter`` function. Any state in
``self`` that is not modelled by the iterable should remain unchanged.
The following equality should hold for your definition:
.. code-block:: python
from_iter(self, to_iter(self)) == self
This function is used by EachLens to synthesise states from iterables,
allowing it to focus every element of an iterable state.
The corresponding method call for this hook is
``obj._lens_from_iter(iterable)``.
There is no default implementation.
'''
try:
self._lens_from_iter
except AttributeError:
message = 'Don\'t know how to create instance of {} from iterable'
raise NotImplementedError(message.format(type(self)))
else:
return self._lens_from_iter(iterable) | Takes an object and an iterable and produces a new object that is
a copy of the original with data from ``iterable`` reincorporated. It
is intended as the inverse of the ``to_iter`` function. Any state in
``self`` that is not modelled by the iterable should remain unchanged.
The following equality should hold for your definition:
.. code-block:: python
from_iter(self, to_iter(self)) == self
This function is used by EachLens to synthesise states from iterables,
allowing it to focus every element of an iterable state.
The corresponding method call for this hook is
``obj._lens_from_iter(iterable)``.
There is no default implementation. |
def on_while(self, node): # ('test', 'body', 'orelse')
"""While blocks."""
while self.run(node.test):
self._interrupt = None
for tnode in node.body:
self.run(tnode)
if self._interrupt is not None:
break
if isinstance(self._interrupt, ast.Break):
break
else:
for tnode in node.orelse:
self.run(tnode)
self._interrupt = None | While blocks. |
def _set_packet_error_counters(self, v, load=False):
"""
Setter method for packet_error_counters, mapped from YANG variable /mpls_state/rsvp/statistics/packet_error_counters (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_packet_error_counters is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_packet_error_counters() directly.
YANG Description: RSVP error packet counters
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=packet_error_counters.packet_error_counters, is_container='container', presence=False, yang_name="packet-error-counters", rest_name="packet-error-counters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-packet-error-counters', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """packet_error_counters must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=packet_error_counters.packet_error_counters, is_container='container', presence=False, yang_name="packet-error-counters", rest_name="packet-error-counters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-packet-error-counters', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__packet_error_counters = t
if hasattr(self, '_set'):
self._set() | Setter method for packet_error_counters, mapped from YANG variable /mpls_state/rsvp/statistics/packet_error_counters (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_packet_error_counters is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_packet_error_counters() directly.
YANG Description: RSVP error packet counters |
def repr_data_size(size_in_bytes, precision=2): # pragma: no cover
"""Return human readable string represent of a file size. Doesn"t support
size greater than 1EB.
For example:
- 100 bytes => 100 B
- 100,000 bytes => 97.66 KB
- 100,000,000 bytes => 95.37 MB
- 100,000,000,000 bytes => 93.13 GB
- 100,000,000,000,000 bytes => 90.95 TB
- 100,000,000,000,000,000 bytes => 88.82 PB
...
Magnitude of data::
1000 kB kilobyte
1000 ** 2 MB megabyte
1000 ** 3 GB gigabyte
1000 ** 4 TB terabyte
1000 ** 5 PB petabyte
1000 ** 6 EB exabyte
1000 ** 7 ZB zettabyte
1000 ** 8 YB yottabyte
"""
if size_in_bytes < 1024:
return "%s B" % size_in_bytes
magnitude_of_data = ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]
index = 0
while 1:
index += 1
size_in_bytes, mod = divmod(size_in_bytes, 1024)
if size_in_bytes < 1024:
break
template = "{0:.%sf} {1}" % precision
s = template.format(size_in_bytes + mod / 1024.0, magnitude_of_data[index])
return s | Return human readable string represent of a file size. Doesn"t support
size greater than 1EB.
For example:
- 100 bytes => 100 B
- 100,000 bytes => 97.66 KB
- 100,000,000 bytes => 95.37 MB
- 100,000,000,000 bytes => 93.13 GB
- 100,000,000,000,000 bytes => 90.95 TB
- 100,000,000,000,000,000 bytes => 88.82 PB
...
Magnitude of data::
1000 kB kilobyte
1000 ** 2 MB megabyte
1000 ** 3 GB gigabyte
1000 ** 4 TB terabyte
1000 ** 5 PB petabyte
1000 ** 6 EB exabyte
1000 ** 7 ZB zettabyte
1000 ** 8 YB yottabyte |
def setLinkQuality(self, EUIadr, LinkQuality):
"""set custom LinkQualityIn for all receiving messages from the specified EUIadr
Args:
EUIadr: a given extended address
LinkQuality: a given custom link quality
link quality/link margin mapping table
3: 21 - 255 (dB)
2: 11 - 20 (dB)
1: 3 - 9 (dB)
0: 0 - 2 (dB)
Returns:
True: successful to set the link quality
False: fail to set the link quality
"""
print '%s call setLinkQuality' % self.port
print EUIadr
print LinkQuality
try:
# process EUIadr
euiHex = hex(EUIadr)
euiStr = str(euiHex)
euiStr = euiStr.rstrip('L')
address64 = ''
if '0x' in euiStr:
address64 = euiStr.lstrip('0x')
# prepend 0 at the beginning
if len(address64) < 16:
address64 = address64.zfill(16)
print address64
cmd = 'macfilter rss add-lqi %s %s' % (address64, str(LinkQuality))
print cmd
return self.__sendCommand(cmd)[0] == 'Done'
except Exception, e:
ModuleHelper.WriteIntoDebugLogger("setLinkQuality() Error: " + str(e)) | set custom LinkQualityIn for all receiving messages from the specified EUIadr
Args:
EUIadr: a given extended address
LinkQuality: a given custom link quality
link quality/link margin mapping table
3: 21 - 255 (dB)
2: 11 - 20 (dB)
1: 3 - 9 (dB)
0: 0 - 2 (dB)
Returns:
True: successful to set the link quality
False: fail to set the link quality |
def submit(recaptcha_challenge_field,
recaptcha_response_field,
private_key,
remoteip,
use_ssl=False):
"""
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_challenge_field -- The value of recaptcha_challenge_field
from the form
recaptcha_response_field -- The value of recaptcha_response_field
from the form
private_key -- your reCAPTCHA private key
remoteip -- the user's ip address
"""
if not (recaptcha_response_field and recaptcha_challenge_field and
len(recaptcha_response_field) and len(recaptcha_challenge_field)):
return RecaptchaResponse(
is_valid=False,
error_code='incorrect-captcha-sol'
)
if getattr(settings, "NOCAPTCHA", False):
params = urlencode({
'secret': want_bytes(private_key),
'response': want_bytes(recaptcha_response_field),
'remoteip': want_bytes(remoteip),
})
else:
params = urlencode({
'privatekey': want_bytes(private_key),
'remoteip': want_bytes(remoteip),
'challenge': want_bytes(recaptcha_challenge_field),
'response': want_bytes(recaptcha_response_field),
})
if not PY2:
params = params.encode('utf-8')
if use_ssl:
verify_url = 'https://%s/recaptcha/api/verify' % VERIFY_SERVER
else:
verify_url = 'http://%s/recaptcha/api/verify' % VERIFY_SERVER
if getattr(settings, "NOCAPTCHA", False):
verify_url = 'https://%s/recaptcha/api/siteverify' % VERIFY_SERVER
req = Request(
url=verify_url,
data=params,
headers={
'Content-type': 'application/x-www-form-urlencoded',
'User-agent': 'reCAPTCHA Python'
}
)
httpresp = urlopen(req)
if getattr(settings, "NOCAPTCHA", False):
data = json.loads(httpresp.read().decode('utf-8'))
return_code = data['success']
return_values = [return_code, None]
if return_code:
return_code = 'true'
else:
return_code = 'false'
else:
return_values = httpresp.read().splitlines()
return_code = return_values[0]
httpresp.close()
if (return_code == "true"):
return RecaptchaResponse(is_valid=True)
else:
return RecaptchaResponse(is_valid=False, error_code=return_values[1]) | Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_challenge_field -- The value of recaptcha_challenge_field
from the form
recaptcha_response_field -- The value of recaptcha_response_field
from the form
private_key -- your reCAPTCHA private key
remoteip -- the user's ip address |
def copy(self):
"""
Return a copy of this object.
"""
self_copy = self.dup()
self_copy._scopes = copy.copy(self._scopes)
return self_copy | Return a copy of this object. |
def get_var(self, name, recurse=True):
"""Return the first var of name ``name`` in the current
scope stack (remember, vars are the ones that parse the
input stream)
:name: The name of the id
:recurse: Whether parent scopes should also be searched (defaults to True)
:returns: TODO
"""
self._dlog("getting var '{}'".format(name))
return self._search("vars", name, recurse) | Return the first var of name ``name`` in the current
scope stack (remember, vars are the ones that parse the
input stream)
:name: The name of the id
:recurse: Whether parent scopes should also be searched (defaults to True)
:returns: TODO |
def draw(self, x, y):
"""Places the flattened canvas in NodeBox.
Exports to a temporary PNG file.
Draws the PNG in NodeBox using the image() command.
Removes the temporary file.
"""
try:
from time import time
import md5
from os import unlink
m = md5.new()
m.update(str(time()))
filename = "photobot" + str(m.hexdigest()) + ".png"
self.export(filename)
_ctx.image(filename, x, y)
unlink(filename)
except:
pass | Places the flattened canvas in NodeBox.
Exports to a temporary PNG file.
Draws the PNG in NodeBox using the image() command.
Removes the temporary file. |
def normalize_response_value(rv):
""" Normalize the response value into a 3-tuple (rv, status, headers)
:type rv: tuple|*
:returns: tuple(rv, status, headers)
:rtype: tuple(Response|JsonResponse|*, int|None, dict|None)
"""
status = headers = None
if isinstance(rv, tuple):
rv, status, headers = rv + (None,) * (3 - len(rv))
return rv, status, headers | Normalize the response value into a 3-tuple (rv, status, headers)
:type rv: tuple|*
:returns: tuple(rv, status, headers)
:rtype: tuple(Response|JsonResponse|*, int|None, dict|None) |
def match_input_fmt(self, fmt_list):
"""Given a list of Fortran format specifiers, e.g., ['I5', '2X', 'F4.1'],
this function constructs a list of tuples for matching an input
string against those format specifiers."""
rexp_list = []
for fmt in fmt_list:
rexp_list.extend(self.match_input_fmt_1(fmt))
return rexp_list | Given a list of Fortran format specifiers, e.g., ['I5', '2X', 'F4.1'],
this function constructs a list of tuples for matching an input
string against those format specifiers. |
def delete(self, client=None, reload_data=False):
"""API call: delete the project via a ``DELETE`` request.
See
https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/delete
This actually changes the status (``lifecycleState``) from ``ACTIVE``
to ``DELETE_REQUESTED``.
Later (it's not specified when), the project will move into the
``DELETE_IN_PROGRESS`` state, which means the deleting has actually
begun.
:type client: :class:`google.cloud.resource_manager.client.Client` or
:data:`NoneType <types.NoneType>`
:param client: the client to use. If not passed, falls back to
the client stored on the current project.
:type reload_data: bool
:param reload_data: Whether to reload the project with the latest
state. If you want to get the updated status,
you'll want this set to :data:`True` as the DELETE
method doesn't send back the updated project.
Default: :data:`False`.
"""
client = self._require_client(client)
client._connection.api_request(method="DELETE", path=self.path)
# If the reload flag is set, reload the project.
if reload_data:
self.reload() | API call: delete the project via a ``DELETE`` request.
See
https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/delete
This actually changes the status (``lifecycleState``) from ``ACTIVE``
to ``DELETE_REQUESTED``.
Later (it's not specified when), the project will move into the
``DELETE_IN_PROGRESS`` state, which means the deleting has actually
begun.
:type client: :class:`google.cloud.resource_manager.client.Client` or
:data:`NoneType <types.NoneType>`
:param client: the client to use. If not passed, falls back to
the client stored on the current project.
:type reload_data: bool
:param reload_data: Whether to reload the project with the latest
state. If you want to get the updated status,
you'll want this set to :data:`True` as the DELETE
method doesn't send back the updated project.
Default: :data:`False`. |
def find_and_reserve_fcp(self, assigner_id):
"""reserve the fcp to assigner_id
The function to reserve a fcp for user
1. Check whether assigner_id has a fcp already
if yes, make the reserve of that record to 1
2. No fcp, then find a fcp and reserve it
fcp will be returned, or None indicate no fcp
"""
fcp_list = self.db.get_from_assigner(assigner_id)
if not fcp_list:
new_fcp = self.db.find_and_reserve()
if new_fcp is None:
LOG.info("no more fcp to be allocated")
return None
LOG.debug("allocated %s fcp for %s assigner" %
(new_fcp, assigner_id))
return new_fcp
else:
# we got it from db, let's reuse it
old_fcp = fcp_list[0][0]
self.db.reserve(fcp_list[0][0])
return old_fcp | reserve the fcp to assigner_id
The function to reserve a fcp for user
1. Check whether assigner_id has a fcp already
if yes, make the reserve of that record to 1
2. No fcp, then find a fcp and reserve it
fcp will be returned, or None indicate no fcp |
def patch_ref(self, sha):
""" Patch reference on the origin master branch
:param sha: Sha to use for the branch
:return: Status of success
:rtype: str or self.ProxyError
"""
uri = "{api}/repos/{origin}/git/refs/heads/{branch}".format(
api=self.github_api_url,
origin=self.origin,
branch=self.master_fork
)
data = {
"sha": sha,
"force": True
}
reply = self.request(
"PATCH",
uri,
data=data
)
if reply.status_code == 200:
dic = json.loads(reply.content.decode("utf-8"))
return dic["object"]["sha"]
else:
dic = json.loads(reply.content.decode("utf-8"))
return self.ProxyError(
reply.status_code,
(dic, "message"),
step="patch",
context={
"uri": uri,
"data": data
}
) | Patch reference on the origin master branch
:param sha: Sha to use for the branch
:return: Status of success
:rtype: str or self.ProxyError |
def main():
""" Main entry point of the CLI. """
try:
args = sys.argv[1:]
try:
_, args = getopt.getopt(args, MAIN_OPTS, MAIN_LONG_OPTS)
except getopt.GetoptError as e:
error(str(e))
sys.exit(1)
if args[0] == 'prompt':
try:
from topydo.ui.prompt.Prompt import PromptApplication
PromptApplication().run()
except ImportError:
error("Some additional dependencies for prompt mode were not installed, please install with 'pip3 install topydo[prompt]'")
elif args[0] == 'columns':
try:
from topydo.ui.columns.Main import UIApplication
UIApplication().run()
except ImportError:
error("Some additional dependencies for column mode were not installed, please install with 'pip3 install topydo[columns]'")
except NameError as err:
if _WINDOWS:
error("Column mode is not supported on Windows.")
else:
error("Could not load column mode: {}".format(err))
else:
CLIApplication().run()
except IndexError:
CLIApplication().run() | Main entry point of the CLI. |
def _loadConfiguration(self):
"""
Load module configuration files.
:return: <void>
"""
configPath = os.path.join(self.path, "config")
if not os.path.isdir(configPath):
return
config = Config(configPath)
Config.mergeDictionaries(config.getData(), self.application.config) | Load module configuration files.
:return: <void> |
def rm(self, path):
"""Delete file or directory."""
resp = self._sendRequest("DELETE", path)
# By documentation server must return 200 "OK", but I get 204 "No Content".
# Anyway file or directory have been removed.
if not (resp.status_code in (200, 204)):
raise YaDiskException(resp.status_code, resp.content) | Delete file or directory. |
def _check_status(func, read_exception, *args, **kwargs):
"""
Checks the status of a single component by
calling the func with the args. The func is expected to
return a dict with at least an `available=<bool>` key
value pair
:param func func: The function to call
:param read_exception: If an exception is thrown
should the exception message be passed as the
message parameter. If not a generic
message parameter will be added to the dict
:param tuple args: A list of arguments to pass to
to function
:param dict kwargs: a dict of keyword arguments
to pass to the function
:return: a dictionary that includes the state
of the component. At least an 'available'
key is guaranteed
:rtype: dict
"""
try:
return func(*args, **kwargs)
except Exception as e:
_LOG.exception(e)
message = str(e) if read_exception else 'An error occurred while checking the status'
return dict(message=message, available=False) | Checks the status of a single component by
calling the func with the args. The func is expected to
return a dict with at least an `available=<bool>` key
value pair
:param func func: The function to call
:param read_exception: If an exception is thrown
should the exception message be passed as the
message parameter. If not a generic
message parameter will be added to the dict
:param tuple args: A list of arguments to pass to
to function
:param dict kwargs: a dict of keyword arguments
to pass to the function
:return: a dictionary that includes the state
of the component. At least an 'available'
key is guaranteed
:rtype: dict |
def upper_diag_self_prodx(list_):
"""
upper diagnoal of cartesian product of self and self.
Weird name. fixme
Args:
list_ (list):
Returns:
list:
CommandLine:
python -m utool.util_alg --exec-upper_diag_self_prodx
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> list_ = [1, 2, 3]
>>> result = upper_diag_self_prodx(list_)
>>> print(result)
[(1, 2), (1, 3), (2, 3)]
"""
return [(item1, item2)
for n1, item1 in enumerate(list_)
for n2, item2 in enumerate(list_) if n1 < n2] | upper diagnoal of cartesian product of self and self.
Weird name. fixme
Args:
list_ (list):
Returns:
list:
CommandLine:
python -m utool.util_alg --exec-upper_diag_self_prodx
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> list_ = [1, 2, 3]
>>> result = upper_diag_self_prodx(list_)
>>> print(result)
[(1, 2), (1, 3), (2, 3)] |
def get_depth_pmf(self, depth_bins, default_depth=5.0, bootstrap=None):
"""
Returns the depth distribution of the catalogue as a probability mass
function
"""
if len(self.data['depth']) == 0:
# If depth information is missing
return PMF([(1.0, default_depth)])
# Get the depth distribution
depth_hist = self.get_depth_distribution(depth_bins,
normalisation=True,
bootstrap=bootstrap)
# If the histogram does not sum to 1.0 then remove the difference
# from the lowest bin
depth_hist = np.around(depth_hist, 3)
while depth_hist.sum() - 1.0:
depth_hist[-1] -= depth_hist.sum() - 1.0
depth_hist = np.around(depth_hist, 3)
pmf_list = []
for iloc, prob in enumerate(depth_hist):
pmf_list.append((prob,
(depth_bins[iloc] + depth_bins[iloc + 1]) / 2.0))
return PMF(pmf_list) | Returns the depth distribution of the catalogue as a probability mass
function |
def authenticate(self):
"""
Handles authentication, and persists the X-APPLE-WEB-KB cookie so that
subsequent logins will not cause additional e-mails from Apple.
"""
logger.info("Authenticating as %s", self.user['apple_id'])
data = dict(self.user)
# We authenticate every time, so "remember me" is not needed
data.update({'extended_login': False})
try:
req = self.session.post(
self._base_login_url,
params=self.params,
data=json.dumps(data)
)
except PyiCloudAPIResponseError as error:
msg = 'Invalid email/password combination.'
raise PyiCloudFailedLoginException(msg, error)
resp = req.json()
self.params.update({'dsid': resp['dsInfo']['dsid']})
if not os.path.exists(self._cookie_directory):
os.mkdir(self._cookie_directory)
self.session.cookies.save()
logger.debug("Cookies saved to %s", self._get_cookiejar_path())
self.data = resp
self.webservices = self.data['webservices']
logger.info("Authentication completed successfully")
logger.debug(self.params) | Handles authentication, and persists the X-APPLE-WEB-KB cookie so that
subsequent logins will not cause additional e-mails from Apple. |
def list_default_storage_policy_of_datastore(datastore, service_instance=None):
'''
Returns a list of datastores assign the the storage policies.
datastore
Name of the datastore to assign.
The datastore needs to be visible to the VMware entity the proxy
points to.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.list_default_storage_policy_of_datastore datastore=ds1
'''
log.trace('Listing the default storage policy of datastore \'%s\'', datastore)
# Find datastore
target_ref = _get_proxy_target(service_instance)
ds_refs = salt.utils.vmware.get_datastores(service_instance, target_ref,
datastore_names=[datastore])
if not ds_refs:
raise VMwareObjectRetrievalError('Datastore \'{0}\' was not '
'found'.format(datastore))
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policy = salt.utils.pbm.get_default_storage_policy_of_datastore(
profile_manager, ds_refs[0])
return _get_policy_dict(policy) | Returns a list of datastores assign the the storage policies.
datastore
Name of the datastore to assign.
The datastore needs to be visible to the VMware entity the proxy
points to.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.list_default_storage_policy_of_datastore datastore=ds1 |
def send_global_velocity(velocity_x, velocity_y, velocity_z, duration):
"""
Move vehicle in direction based on specified velocity vectors.
This uses the SET_POSITION_TARGET_GLOBAL_INT command with type mask enabling only
velocity components
(http://dev.ardupilot.com/wiki/copter-commands-in-guided-mode/#set_position_target_global_int).
Note that from AC3.3 the message should be re-sent every second (after about 3 seconds
with no message the velocity will drop back to zero). In AC3.2.1 and earlier the specified
velocity persists until it is canceled. The code below should work on either version
(sending the message multiple times does not cause problems).
See the above link for information on the type_mask (0=enable, 1=ignore).
At time of writing, acceleration and yaw bits are ignored.
"""
msg = vehicle.message_factory.set_position_target_global_int_encode(
0, # time_boot_ms (not used)
0, 0, # target system, target component
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT, # frame
0b0000111111000111, # type_mask (only speeds enabled)
0, # lat_int - X Position in WGS84 frame in 1e7 * meters
0, # lon_int - Y Position in WGS84 frame in 1e7 * meters
0, # alt - Altitude in meters in AMSL altitude(not WGS84 if absolute or relative)
# altitude above terrain if GLOBAL_TERRAIN_ALT_INT
velocity_x, # X velocity in NED frame in m/s
velocity_y, # Y velocity in NED frame in m/s
velocity_z, # Z velocity in NED frame in m/s
0, 0, 0, # afx, afy, afz acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle on 1 Hz cycle
for x in range(0,duration):
vehicle.send_mavlink(msg)
time.sleep(1) | Move vehicle in direction based on specified velocity vectors.
This uses the SET_POSITION_TARGET_GLOBAL_INT command with type mask enabling only
velocity components
(http://dev.ardupilot.com/wiki/copter-commands-in-guided-mode/#set_position_target_global_int).
Note that from AC3.3 the message should be re-sent every second (after about 3 seconds
with no message the velocity will drop back to zero). In AC3.2.1 and earlier the specified
velocity persists until it is canceled. The code below should work on either version
(sending the message multiple times does not cause problems).
See the above link for information on the type_mask (0=enable, 1=ignore).
At time of writing, acceleration and yaw bits are ignored. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.