code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def featurecounts_stats_table(self):
""" Take the parsed stats from the featureCounts report and add them to the
basic stats table at the top of the report """
headers = OrderedDict()
headers['percent_assigned'] = {
'title': '% Assigned',
'description': '% Assigned reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'RdYlGn'
}
headers['Assigned'] = {
'title': '{} Assigned'.format(config.read_count_prefix),
'description': 'Assigned reads ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'PuBu',
'modify': lambda x: float(x) * config.read_count_multiplier,
'shared_key': 'read_count'
}
self.general_stats_addcols(self.featurecounts_data, headers)
|
Take the parsed stats from the featureCounts report and add them to the
basic stats table at the top of the report
|
def argument_kind(args):
# type: (List[Argument]) -> Optional[str]
"""Return the kind of an argument, based on one or more descriptions of the argument.
Return None if every item does not have the same kind.
"""
kinds = set(arg.kind for arg in args)
if len(kinds) != 1:
return None
return kinds.pop()
|
Return the kind of an argument, based on one or more descriptions of the argument.
Return None if every item does not have the same kind.
|
def slackbuild(self, name, sbo_file):
"""Read SlackBuild file
"""
return URL(self.sbo_url + name + sbo_file).reading()
|
Read SlackBuild file
|
def _import_protobuf_from_file(grpc_pyfile, method_name, service_name = None):
"""
helper function which try to import method from the given _pb2_grpc.py file
service_name should be provided only in case of name conflict
return (False, None) in case of failure
return (True, (stub_class, request_class, response_class)) in case of success
"""
prefix = grpc_pyfile[:-12]
pb2 = __import__("%s_pb2"%prefix)
pb2_grpc = __import__("%s_pb2_grpc"%prefix)
# we take all objects from pb2_grpc module which endswith "Stub", and we remove this postfix to get service_name
all_service_names = [stub_name[:-4] for stub_name in dir(pb2_grpc) if stub_name.endswith("Stub")]
# if service_name was specified we take only this service_name
if (service_name):
if (service_name not in all_service_names):
return False, None
all_service_names = [service_name]
found_services = []
for service_name in all_service_names:
service_descriptor = getattr(pb2, "DESCRIPTOR").services_by_name[service_name]
for method in service_descriptor.methods:
if(method.name == method_name):
request_class = method.input_type._concrete_class
response_class = method.output_type._concrete_class
stub_class = getattr(pb2_grpc, "%sStub"%service_name)
found_services.append(service_name)
if (len(found_services) == 0):
return False, None
if (len(found_services) > 1):
raise Exception("Error while loading protobuf. We found methods %s in multiply services [%s]."
" You should specify service_name."%(method_name, ", ".join(found_services)))
return True, (stub_class, request_class, response_class)
|
helper function which try to import method from the given _pb2_grpc.py file
service_name should be provided only in case of name conflict
return (False, None) in case of failure
return (True, (stub_class, request_class, response_class)) in case of success
|
def get(self, request):
"""
GET /consent/api/v1/data_sharing_consent?username=bob&course_id=id&enterprise_customer_uuid=uuid
*username*
The edX username from whom to get consent.
*course_id*
The course for which consent is granted.
*enterprise_customer_uuid*
The UUID of the enterprise customer that requires consent.
"""
try:
consent_record = self.get_consent_record(request)
if consent_record is None:
return self.get_no_record_response(request)
except ConsentAPIRequestError as invalid_request:
return Response({'error': str(invalid_request)}, status=HTTP_400_BAD_REQUEST)
return Response(consent_record.serialize(), status=HTTP_200_OK)
|
GET /consent/api/v1/data_sharing_consent?username=bob&course_id=id&enterprise_customer_uuid=uuid
*username*
The edX username from whom to get consent.
*course_id*
The course for which consent is granted.
*enterprise_customer_uuid*
The UUID of the enterprise customer that requires consent.
|
def _parse_proc_mount(self):
"""Parse /proc/mounts"""
"""
cgroup /cgroup/cpu cgroup rw,relatime,cpuacct,cpu,release_agent=/sbin/cgroup_clean 0 0
cgroup /cgroup/memory cgroup rw,relatime,memory 0 0
cgroup /cgroup/blkio cgroup rw,relatime,blkio 0 0
cgroup /cgroup/freezer cgroup rw,relatime,freezer 0 0
"""
for line in fileops.readlines('/proc/mounts'):
if 'cgroup' not in line:
continue
items = line.split(' ')
path = items[1]
opts = items[3].split(',')
name = None
for opt in opts:
if opt in self:
name = opt
self.paths[name] = path
if 'name=' in opt:
# We treat name=XXX as its name
name = opt
self.paths[name] = path
self[name] = {}
self[name]['name'] = name
self[name]['enabled'] = True
self[name]['hierarchy'] = 0
self[name]['num_cgroups'] = 0
# release_agent= may appear before name=
for opt in opts:
if 'release_agent=' in opt:
self[name]['release_agent'] = opt.replace('release_agent=', '')
|
Parse /proc/mounts
|
def _is_and_or_ternary(node):
"""
Returns true if node is 'condition and true_value or false_value' form.
All of: condition, true_value and false_value should not be a complex boolean expression
"""
return (
isinstance(node, astroid.BoolOp)
and node.op == "or"
and len(node.values) == 2
and isinstance(node.values[0], astroid.BoolOp)
and not isinstance(node.values[1], astroid.BoolOp)
and node.values[0].op == "and"
and not isinstance(node.values[0].values[1], astroid.BoolOp)
and len(node.values[0].values) == 2
)
|
Returns true if node is 'condition and true_value or false_value' form.
All of: condition, true_value and false_value should not be a complex boolean expression
|
def sort_layout(thread, listfile, column=0):
"""
Sort the syntelog table according to chromomomal positions. First orient the
contents against threadbed, then for contents not in threadbed, insert to
the nearest neighbor.
"""
from jcvi.formats.base import DictFile
outfile = listfile.rsplit(".", 1)[0] + ".sorted.list"
threadorder = thread.order
fw = open(outfile, "w")
lt = DictFile(listfile, keypos=column, valuepos=None)
threaded = []
imported = set()
for t in thread:
accn = t.accn
if accn not in lt:
continue
imported.add(accn)
atoms = lt[accn]
threaded.append(atoms)
assert len(threaded) == len(imported)
total = sum(1 for x in open(listfile))
logging.debug("Total: {0}, currently threaded: {1}".format(total, len(threaded)))
fp = open(listfile)
for row in fp:
atoms = row.split()
accn = atoms[0]
if accn in imported:
continue
insert_into_threaded(atoms, threaded, threadorder)
for atoms in threaded:
print("\t".join(atoms), file=fw)
fw.close()
logging.debug("File `{0}` sorted to `{1}`.".format(outfile, thread.filename))
|
Sort the syntelog table according to chromomomal positions. First orient the
contents against threadbed, then for contents not in threadbed, insert to
the nearest neighbor.
|
def construct_formset(self):
"""
Returns an instance of the formset
"""
formset_class = self.get_formset()
if hasattr(self, 'get_extra_form_kwargs'):
klass = type(self).__name__
raise DeprecationWarning(
'Calling {0}.get_extra_form_kwargs is no longer supported. '
'Set `form_kwargs` in {0}.formset_kwargs or override '
'{0}.get_formset_kwargs() directly.'.format(klass),
)
return formset_class(**self.get_formset_kwargs())
|
Returns an instance of the formset
|
def list_patterns(refresh=False, root=None):
'''
List all known patterns from available repos.
refresh
force a refresh if set to True.
If set to False (default) it depends on zypper if a refresh is
executed.
root
operate on a different root directory.
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_patterns
'''
if refresh:
refresh_db(root)
return _get_patterns(root=root)
|
List all known patterns from available repos.
refresh
force a refresh if set to True.
If set to False (default) it depends on zypper if a refresh is
executed.
root
operate on a different root directory.
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_patterns
|
def attrs(self):
"""provide a copy of this player's attributes as a dictionary"""
ret = dict(self.__dict__) # obtain copy of internal __dict__
del ret["_matches"] # match history is specifically distinguished from player information (and stored separately)
if self.type != c.COMPUTER: # difficulty only matters for computer playres
del ret["difficulty"]
return ret
|
provide a copy of this player's attributes as a dictionary
|
def parse(expected, query):
"""
Parse query parameters.
:type expected: `dict` mapping `bytes` to `callable`
:param expected: Mapping of query argument names to argument parsing
callables.
:type query: `dict` mapping `bytes` to `list` of `bytes`
:param query: Mapping of query argument names to lists of argument values,
this is the form that Twisted Web's `IRequest.args
<twisted:twisted.web.iweb.IRequest.args>` value takes.
:rtype: `dict` mapping `bytes` to `object`
:return: Mapping of query argument names to parsed argument values.
"""
return dict(
(key, parser(query.get(key, [])))
for key, parser in expected.items())
|
Parse query parameters.
:type expected: `dict` mapping `bytes` to `callable`
:param expected: Mapping of query argument names to argument parsing
callables.
:type query: `dict` mapping `bytes` to `list` of `bytes`
:param query: Mapping of query argument names to lists of argument values,
this is the form that Twisted Web's `IRequest.args
<twisted:twisted.web.iweb.IRequest.args>` value takes.
:rtype: `dict` mapping `bytes` to `object`
:return: Mapping of query argument names to parsed argument values.
|
def _linux_brdel(br):
'''
Internal, deletes the bridge
'''
brctl = _tool_path('brctl')
return __salt__['cmd.run']('{0} delbr {1}'.format(brctl, br),
python_shell=False)
|
Internal, deletes the bridge
|
def reset(self):
"""
Calls `reset` on all our Preprocessor objects.
Returns:
A list of tensors to be fetched.
"""
fetches = []
for processor in self.preprocessors:
fetches.extend(processor.reset() or [])
return fetches
|
Calls `reset` on all our Preprocessor objects.
Returns:
A list of tensors to be fetched.
|
def load_blotter_args(blotter_name=None, logger=None):
""" Load running blotter's settings (used by clients)
:Parameters:
blotter_name : str
Running Blotter's name (defaults to "auto-detect")
logger : object
Logger to be use (defaults to Blotter's)
:Returns:
args : dict
Running Blotter's arguments
"""
if logger is None:
logger = tools.createLogger(__name__, logging.WARNING)
# find specific name
if blotter_name is not None: # and blotter_name != 'auto-detect':
args_cache_file = tempfile.gettempdir() + "/" + blotter_name.lower() + ".qtpylib"
if not os.path.exists(args_cache_file):
logger.critical(
"Cannot connect to running Blotter [%s]", blotter_name)
if os.isatty(0):
sys.exit(0)
return []
# no name provided - connect to last running
else:
blotter_files = sorted(
glob.glob(tempfile.gettempdir() + "/*.qtpylib"), key=os.path.getmtime)
if not blotter_files:
logger.critical(
"Cannot connect to running Blotter [%s]", blotter_name)
if os.isatty(0):
sys.exit(0)
return []
args_cache_file = blotter_files[-1]
args = pickle.load(open(args_cache_file, "rb"))
args['as_client'] = True
return args
|
Load running blotter's settings (used by clients)
:Parameters:
blotter_name : str
Running Blotter's name (defaults to "auto-detect")
logger : object
Logger to be use (defaults to Blotter's)
:Returns:
args : dict
Running Blotter's arguments
|
def nlargest(n, mapping):
"""
Takes a mapping and returns the n keys associated with the largest values
in descending order. If the mapping has fewer than n items, all its keys
are returned.
Equivalent to:
``next(zip(*heapq.nlargest(mapping.items(), key=lambda x: x[1])))``
Returns
-------
list of up to n keys from the mapping
"""
try:
it = mapping.iteritems()
except AttributeError:
it = iter(mapping.items())
pq = minpq()
try:
for i in range(n):
pq.additem(*next(it))
except StopIteration:
pass
try:
while it:
pq.pushpopitem(*next(it))
except StopIteration:
pass
out = list(pq.popkeys())
out.reverse()
return out
|
Takes a mapping and returns the n keys associated with the largest values
in descending order. If the mapping has fewer than n items, all its keys
are returned.
Equivalent to:
``next(zip(*heapq.nlargest(mapping.items(), key=lambda x: x[1])))``
Returns
-------
list of up to n keys from the mapping
|
def get_edge_type(self, edge_type):
"""Returns all edges with the specified edge type.
Parameters
----------
edge_type : int
An integer specifying what type of edges to return.
Returns
-------
out : list of 2-tuples
A list of 2-tuples representing the edges in the graph
with the specified edge type.
Examples
--------
Lets get type 2 edges from the following graph
>>> import queueing_tool as qt
>>> adjacency = {
... 0: {1: {'edge_type': 2}},
... 1: {2: {'edge_type': 1},
... 3: {'edge_type': 4}},
... 2: {0: {'edge_type': 2}},
... 3: {3: {'edge_type': 0}}
... }
>>> G = qt.QueueNetworkDiGraph(adjacency)
>>> ans = G.get_edge_type(2)
>>> ans.sort()
>>> ans
[(0, 1), (2, 0)]
"""
edges = []
for e in self.edges():
if self.adj[e[0]][e[1]].get('edge_type') == edge_type:
edges.append(e)
return edges
|
Returns all edges with the specified edge type.
Parameters
----------
edge_type : int
An integer specifying what type of edges to return.
Returns
-------
out : list of 2-tuples
A list of 2-tuples representing the edges in the graph
with the specified edge type.
Examples
--------
Lets get type 2 edges from the following graph
>>> import queueing_tool as qt
>>> adjacency = {
... 0: {1: {'edge_type': 2}},
... 1: {2: {'edge_type': 1},
... 3: {'edge_type': 4}},
... 2: {0: {'edge_type': 2}},
... 3: {3: {'edge_type': 0}}
... }
>>> G = qt.QueueNetworkDiGraph(adjacency)
>>> ans = G.get_edge_type(2)
>>> ans.sort()
>>> ans
[(0, 1), (2, 0)]
|
def get_hdulist_idx(self, ccdnum):
"""
The SourceCutout is a list of HDUs, this method returns the index of the HDU that corresponds to the given
ccd number. CCDs are numbers from 0, but the first CCD (CCDNUM=0) is often in extension 1 of an MEF.
@param ccdnum: the number of the CCD in the MEF that is being referenced.
@return: the index of in self.hdulist that corresponds to the given CCD number.
"""
for (extno, hdu) in enumerate(self.hdulist):
if ccdnum == int(hdu.header.get('EXTVER', -1)) or str(ccdnum) in hdu.header.get('AMPNAME', ''):
return extno
raise ValueError("Failed to find requested CCD Number {} in cutout {}".format(ccdnum,
self))
|
The SourceCutout is a list of HDUs, this method returns the index of the HDU that corresponds to the given
ccd number. CCDs are numbers from 0, but the first CCD (CCDNUM=0) is often in extension 1 of an MEF.
@param ccdnum: the number of the CCD in the MEF that is being referenced.
@return: the index of in self.hdulist that corresponds to the given CCD number.
|
def plot_cdf(fignum, data, xlab, sym, title, **kwargs):
""" Makes a plot of the cumulative distribution function.
Parameters
__________
fignum : matplotlib figure number
data : list of data to be plotted - doesn't need to be sorted
sym : matplotlib symbol for plotting, e.g., 'r--' for a red dashed line
**kwargs : optional dictionary with {'color': color, 'linewidth':linewidth}
Returns
__________
x : sorted list of data
y : fraction of cdf
"""
#
#if len(sym)==1:sym=sym+'-'
fig = plt.figure(num=fignum)
# sdata=np.array(data).sort()
sdata = []
for d in data:
sdata.append(d) # have to copy the data to avoid overwriting it!
sdata.sort()
X, Y = [], []
color = ""
for j in range(len(sdata)):
Y.append(old_div(float(j), float(len(sdata))))
X.append(sdata[j])
if 'color' in list(kwargs.keys()):
color = kwargs['color']
if 'linewidth' in list(kwargs.keys()):
lw = kwargs['linewidth']
else:
lw = 1
if color != "":
plt.plot(X, Y, color=sym, linewidth=lw)
else:
plt.plot(X, Y, sym, linewidth=lw)
plt.xlabel(xlab)
plt.ylabel('Cumulative Distribution')
plt.title(title)
return X, Y
|
Makes a plot of the cumulative distribution function.
Parameters
__________
fignum : matplotlib figure number
data : list of data to be plotted - doesn't need to be sorted
sym : matplotlib symbol for plotting, e.g., 'r--' for a red dashed line
**kwargs : optional dictionary with {'color': color, 'linewidth':linewidth}
Returns
__________
x : sorted list of data
y : fraction of cdf
|
def parse_django_adminopt_node(env, sig, signode):
"""A copy of sphinx.directives.CmdoptionDesc.parse_signature()"""
from sphinx.domains.std import option_desc_re
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not count:
for m in simple_option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname
|
A copy of sphinx.directives.CmdoptionDesc.parse_signature()
|
def prime(self, key, value):
# type: (Hashable, Any) -> DataLoader
"""
Adds the provied key and value to the cache. If the key already exists, no
change is made. Returns itself for method chaining.
"""
cache_key = self.get_cache_key(key)
# Only add the key if it does not already exist.
if cache_key not in self._promise_cache:
# Cache a rejected promise if the value is an Error, in order to match
# the behavior of load(key).
if isinstance(value, Exception):
promise = Promise.reject(value)
else:
promise = Promise.resolve(value)
self._promise_cache[cache_key] = promise
return self
|
Adds the provied key and value to the cache. If the key already exists, no
change is made. Returns itself for method chaining.
|
def content(self):
"""以处理过的Html代码形式返回答案内容.
:return: 答案内容
:rtype: str
"""
answer_wrap = self.soup.find('div', id='zh-question-answer-wrap')
content = answer_wrap.find('div', class_='zm-editable-content')
content = answer_content_process(content)
return content
|
以处理过的Html代码形式返回答案内容.
:return: 答案内容
:rtype: str
|
def enable(self):
"""
(Re)enable the cache
"""
logger.debug('enable()')
self.options.enabled = True
logger.info('cache enabled')
|
(Re)enable the cache
|
def fail(message, exception_data=None):
"""
Print a failure message and exit nonzero
"""
print(message, file=sys.stderr)
if exception_data:
print(repr(exception_data))
sys.exit(1)
|
Print a failure message and exit nonzero
|
def get_installed_distributions(local_only=True,
skip=stdlib_pkgs,
include_editables=True,
editables_only=False,
user_only=False):
# type: (bool, Container[str], bool, bool, bool) -> List[Distribution]
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``include_editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
"""
if local_only:
local_test = dist_is_local
else:
def local_test(d):
return True
if include_editables:
def editable_test(d):
return True
else:
def editable_test(d):
return not dist_is_editable(d)
if editables_only:
def editables_only_test(d):
return dist_is_editable(d)
else:
def editables_only_test(d):
return True
if user_only:
user_test = dist_in_usersite
else:
def user_test(d):
return True
# because of pkg_resources vendoring, mypy cannot find stub in typeshed
return [d for d in pkg_resources.working_set # type: ignore
if local_test(d) and
d.key not in skip and
editable_test(d) and
editables_only_test(d) and
user_test(d)
]
|
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``include_editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
|
def _fill_empty_sessions(self, fill_subjects, fill_visits):
"""
Fill in tree with additional empty subjects and/or visits to
allow the study to pull its inputs from external repositories
"""
if fill_subjects is None:
fill_subjects = [s.id for s in self.subjects]
if fill_visits is None:
fill_visits = [v.id for v in self.complete_visits]
for subject_id in fill_subjects:
try:
subject = self.subject(subject_id)
except ArcanaNameError:
subject = self._subjects[subject_id] = Subject(
subject_id, [], [], [])
for visit_id in fill_visits:
try:
subject.session(visit_id)
except ArcanaNameError:
session = Session(subject_id, visit_id, [], [])
subject._sessions[visit_id] = session
try:
visit = self.visit(visit_id)
except ArcanaNameError:
visit = self._visits[visit_id] = Visit(
visit_id, [], [], [])
visit._sessions[subject_id] = session
|
Fill in tree with additional empty subjects and/or visits to
allow the study to pull its inputs from external repositories
|
def _build_response(self):
"""
Builds the composite reponse to be output by the module by looping
through all events and formatting the necessary strings.
Returns: A composite containing the individual response for each event.
"""
responses = []
self.event_urls = []
for index, event in enumerate(self.events):
self.py3.threshold_get_color(index + 1, "event")
self.py3.threshold_get_color(index + 1, "time")
event_dict = {}
event_dict["summary"] = event.get("summary")
event_dict["location"] = event.get("location")
event_dict["description"] = event.get("description")
self.event_urls.append(event["htmlLink"])
if event["start"].get("date") is not None:
start_dt = self._gstr_to_date(event["start"].get("date"))
end_dt = self._gstr_to_date(event["end"].get("date"))
else:
start_dt = self._gstr_to_datetime(event["start"].get("dateTime"))
end_dt = self._gstr_to_datetime(event["end"].get("dateTime"))
if end_dt < datetime.datetime.now(tzlocal()):
continue
event_dict["start_time"] = self._datetime_to_str(start_dt, self.format_time)
event_dict["end_time"] = self._datetime_to_str(end_dt, self.format_time)
event_dict["start_date"] = self._datetime_to_str(start_dt, self.format_date)
event_dict["end_date"] = self._datetime_to_str(end_dt, self.format_date)
time_delta = self._delta_time(start_dt)
if time_delta["days"] < 0:
time_delta = self._delta_time(end_dt)
is_current = True
else:
is_current = False
event_dict["format_timer"] = self._format_timedelta(
index, time_delta, is_current
)
if self.warn_threshold > 0:
self._check_warn_threshold(time_delta, event_dict)
event_formatted = self.py3.safe_format(
self.format_event,
{
"is_toggled": self.button_states[index],
"summary": event_dict["summary"],
"location": event_dict["location"],
"description": event_dict["description"],
"start_time": event_dict["start_time"],
"end_time": event_dict["end_time"],
"start_date": event_dict["start_date"],
"end_date": event_dict["end_date"],
"format_timer": event_dict["format_timer"],
},
)
self.py3.composite_update(event_formatted, {"index": index})
responses.append(event_formatted)
self.no_update = False
format_separator = self.py3.safe_format(self.format_separator)
self.py3.composite_update(format_separator, {"index": "sep"})
responses = self.py3.composite_join(format_separator, responses)
return {"events": responses}
|
Builds the composite reponse to be output by the module by looping
through all events and formatting the necessary strings.
Returns: A composite containing the individual response for each event.
|
def _source_info():
"""
Get information from the user's code (two frames up)
to leave breadcrumbs for file, line, class and function.
"""
ofi = inspect.getouterframes(inspect.currentframe())[2]
try:
calling_class = ofi[0].f_locals['self'].__class__
except KeyError:
calling_class = None
# Tuple of file,line,calling_class,function_name
return ofi[1], ofi[2], calling_class, ofi[3]
|
Get information from the user's code (two frames up)
to leave breadcrumbs for file, line, class and function.
|
def awd_lstm_lm_1150(dataset_name=None, vocab=None, pretrained=False, ctx=cpu(),
root=os.path.join(get_home_dir(), 'models'), **kwargs):
r"""3-layer LSTM language model with weight-drop, variational dropout, and tied weights.
Embedding size is 400, and hidden layer size is 1150.
Parameters
----------
dataset_name : str or None, default None
The dataset name on which the pre-trained model is trained.
Options are 'wikitext-2'. If specified, then the returned vocabulary is extracted from
the training set of the dataset.
If None, then vocab is required, for specifying embedding weight size, and is directly
returned.
The pre-trained model achieves 73.32/69.74 ppl on Val and Test of wikitext-2 respectively.
vocab : gluonnlp.Vocab or None, default None
Vocab object to be used with the language model.
Required when dataset_name is not specified.
pretrained : bool, default False
Whether to load the pre-trained weights for model.
ctx : Context, default CPU
The context in which to load the pre-trained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
MXNET_HOME defaults to '~/.mxnet'.
Returns
-------
gluon.Block, gluonnlp.Vocab
"""
predefined_args = {'embed_size': 400,
'hidden_size': 1150,
'mode': 'lstm',
'num_layers': 3,
'tie_weights': True,
'dropout': 0.4,
'weight_drop': 0.5,
'drop_h': 0.2,
'drop_i': 0.65,
'drop_e': 0.1}
mutable_args = frozenset(['dropout', 'weight_drop', 'drop_h', 'drop_i', 'drop_e'])
assert all((k not in kwargs or k in mutable_args) for k in predefined_args), \
'Cannot override predefined model settings.'
predefined_args.update(kwargs)
return _get_rnn_model(AWDRNN, 'awd_lstm_lm_1150', dataset_name, vocab, pretrained,
ctx, root, **predefined_args)
|
r"""3-layer LSTM language model with weight-drop, variational dropout, and tied weights.
Embedding size is 400, and hidden layer size is 1150.
Parameters
----------
dataset_name : str or None, default None
The dataset name on which the pre-trained model is trained.
Options are 'wikitext-2'. If specified, then the returned vocabulary is extracted from
the training set of the dataset.
If None, then vocab is required, for specifying embedding weight size, and is directly
returned.
The pre-trained model achieves 73.32/69.74 ppl on Val and Test of wikitext-2 respectively.
vocab : gluonnlp.Vocab or None, default None
Vocab object to be used with the language model.
Required when dataset_name is not specified.
pretrained : bool, default False
Whether to load the pre-trained weights for model.
ctx : Context, default CPU
The context in which to load the pre-trained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
MXNET_HOME defaults to '~/.mxnet'.
Returns
-------
gluon.Block, gluonnlp.Vocab
|
def workflow_script_reject(self):
"""Copy real analyses to RejectAnalysis, with link to real
create a new worksheet, with the original analyses, and new
duplicates and references to match the rejected
worksheet.
"""
if skip(self, "reject"):
return
workflow = self.portal_workflow
def copy_src_fields_to_dst(src, dst):
# These will be ignored when copying field values between analyses
ignore_fields = [
'UID',
'id',
'title',
'allowDiscussion',
'subject',
'description',
'location',
'contributors',
'creators',
'effectiveDate',
'expirationDate',
'language',
'rights',
'creation_date',
'modification_date',
'Layout', # ws
'Analyses', # ws
]
fields = src.Schema().fields()
for field in fields:
fieldname = field.getName()
if fieldname in ignore_fields:
continue
getter = getattr(src, 'get' + fieldname,
src.Schema().getField(fieldname).getAccessor(src))
setter = getattr(dst, 'set' + fieldname,
dst.Schema().getField(fieldname).getMutator(dst))
if getter is None or setter is None:
# ComputedField
continue
setter(getter())
analysis_positions = {}
for item in self.getLayout():
analysis_positions[item['analysis_uid']] = item['position']
old_layout = []
new_layout = []
# New worksheet
worksheets = self.aq_parent
new_ws = _createObjectByType('Worksheet', worksheets, tmpID())
new_ws.unmarkCreationFlag()
new_ws_id = renameAfterCreation(new_ws)
copy_src_fields_to_dst(self, new_ws)
new_ws.edit(
Number=new_ws_id,
Remarks=self.getRemarks()
)
# Objects are being created inside other contexts, but we want their
# workflow handlers to be aware of which worksheet this is occurring in.
# We save the worksheet in request['context_uid'].
# We reset it again below.... be very sure that this is set to the
# UID of the containing worksheet before invoking any transitions on
# analyses.
self.REQUEST['context_uid'] = new_ws.UID()
# loop all analyses
analyses = self.getAnalyses()
new_ws_analyses = []
old_ws_analyses = []
for analysis in analyses:
# Skip published or verified analyses
review_state = workflow.getInfoFor(analysis, 'review_state', '')
if review_state in ['published', 'verified', 'retracted']:
old_ws_analyses.append(analysis.UID())
# XXX where does position come from?
old_layout.append({'position': position,
'type': 'a',
'analysis_uid': analysis.UID(),
'container_uid': analysis.aq_parent.UID()})
continue
# Normal analyses:
# - Create matching RejectAnalysis inside old WS
# - Link analysis to new WS in same position
# - Copy all field values
# - Clear analysis result, and set Retested flag
if analysis.portal_type == 'Analysis':
reject = _createObjectByType('RejectAnalysis', self, tmpID())
reject.unmarkCreationFlag()
copy_src_fields_to_dst(analysis, reject)
reject.setAnalysis(analysis)
reject.reindexObject()
analysis.edit(
Result=None,
Retested=True,
)
analysis.reindexObject()
position = analysis_positions[analysis.UID()]
old_ws_analyses.append(reject.UID())
old_layout.append({'position': position,
'type': 'r',
'analysis_uid': reject.UID(),
'container_uid': self.UID()})
new_ws_analyses.append(analysis.UID())
new_layout.append({'position': position,
'type': 'a',
'analysis_uid': analysis.UID(),
'container_uid': analysis.aq_parent.UID()})
# Reference analyses
# - Create a new reference analysis in the new worksheet
# - Transition the original analysis to 'rejected' state
if analysis.portal_type == 'ReferenceAnalysis':
service_uid = analysis.getServiceUID()
reference = analysis.aq_parent
new_reference = reference.addReferenceAnalysis(service_uid)
reference_type = new_reference.getReferenceType()
new_analysis_uid = api.get_uid(new_reference)
position = analysis_positions[analysis.UID()]
old_ws_analyses.append(analysis.UID())
old_layout.append({'position': position,
'type': reference_type,
'analysis_uid': analysis.UID(),
'container_uid': reference.UID()})
new_ws_analyses.append(new_analysis_uid)
new_layout.append({'position': position,
'type': reference_type,
'analysis_uid': new_analysis_uid,
'container_uid': reference.UID()})
workflow.doActionFor(analysis, 'reject')
analysis.reindexObject()
# Duplicate analyses
# - Create a new duplicate inside the new worksheet
# - Transition the original analysis to 'rejected' state
if analysis.portal_type == 'DuplicateAnalysis':
duplicate_id = new_ws.generateUniqueId('DuplicateAnalysis')
new_duplicate = _createObjectByType('DuplicateAnalysis',
new_ws, duplicate_id)
new_duplicate.unmarkCreationFlag()
copy_src_fields_to_dst(analysis, new_duplicate)
new_duplicate.reindexObject()
position = analysis_positions[analysis.UID()]
old_ws_analyses.append(analysis.UID())
old_layout.append({'position': position,
'type': 'd',
'analysis_uid': analysis.UID(),
'container_uid': self.UID()})
new_ws_analyses.append(new_duplicate.UID())
new_layout.append({'position': position,
'type': 'd',
'analysis_uid': new_duplicate.UID(),
'container_uid': new_ws.UID()})
workflow.doActionFor(analysis, 'reject')
analysis.reindexObject()
new_ws.setAnalyses(new_ws_analyses)
new_ws.setLayout(new_layout)
new_ws.replaces_rejected_worksheet = self.UID()
for analysis in new_ws.getAnalyses():
review_state = workflow.getInfoFor(analysis, 'review_state', '')
if review_state == 'to_be_verified':
# TODO Workflow - Analysis Retest transition within a Worksheet
changeWorkflowState(analysis, "bika_analysis_workflow", "assigned")
self.REQUEST['context_uid'] = self.UID()
self.setLayout(old_layout)
self.setAnalyses(old_ws_analyses)
self.replaced_by = new_ws.UID()
|
Copy real analyses to RejectAnalysis, with link to real
create a new worksheet, with the original analyses, and new
duplicates and references to match the rejected
worksheet.
|
def multiglob_compile(globs, prefix=False):
"""Generate a single "A or B or C" regex from a list of shell globs.
:param globs: Patterns to be processed by :mod:`fnmatch`.
:type globs: iterable of :class:`~__builtins__.str`
:param prefix: If ``True``, then :meth:`~re.RegexObject.match` will
perform prefix matching rather than exact string matching.
:type prefix: :class:`~__builtins__.bool`
:rtype: :class:`re.RegexObject`
"""
if not globs:
# An empty globs list should only match empty strings
return re.compile('^$')
elif prefix:
globs = [x + '*' for x in globs]
return re.compile('|'.join(fnmatch.translate(x) for x in globs))
|
Generate a single "A or B or C" regex from a list of shell globs.
:param globs: Patterns to be processed by :mod:`fnmatch`.
:type globs: iterable of :class:`~__builtins__.str`
:param prefix: If ``True``, then :meth:`~re.RegexObject.match` will
perform prefix matching rather than exact string matching.
:type prefix: :class:`~__builtins__.bool`
:rtype: :class:`re.RegexObject`
|
def _compute_mean(self, C, mag, r):
"""
Compute mean value according to equation 30, page 1021.
"""
mean = (C['c1'] +
self._compute_term1(C, mag) +
self._compute_term2(C, mag, r))
return mean
|
Compute mean value according to equation 30, page 1021.
|
def updateUserTone(conversationPayload, toneAnalyzerPayload, maintainHistory):
"""
updateUserTone processes the Tone Analyzer payload to pull out the emotion,
writing and social tones, and identify the meaningful tones (i.e.,
those tones that meet the specified thresholds).
The conversationPayload json object is updated to include these tones.
@param conversationPayload json object returned by the Watson Conversation
Service
@param toneAnalyzerPayload json object returned by the Watson Tone Analyzer
Service
@returns conversationPayload where the user object has been updated with tone
information from the toneAnalyzerPayload
"""
emotionTone = None
writingTone = None
socialTone = None
# if there is no context in a
if 'context' not in conversationPayload:
conversationPayload['context'] = {}
if 'user' not in conversationPayload['context']:
conversationPayload['context'] = initUser()
# For convenience sake, define a variable for the user object
user = conversationPayload['context']['user']
# Extract the tones - emotion, writing and social
if toneAnalyzerPayload and toneAnalyzerPayload['document_tone']:
for toneCategory in toneAnalyzerPayload['document_tone']['tone_categories']:
if toneCategory['category_id'] == EMOTION_TONE_LABEL:
emotionTone = toneCategory
if toneCategory['category_id'] == LANGUAGE_TONE_LABEL:
writingTone = toneCategory
if toneCategory['category_id'] == SOCIAL_TONE_LABEL:
socialTone = toneCategory
updateEmotionTone(user, emotionTone, maintainHistory)
updateWritingTone(user, writingTone, maintainHistory)
updateSocialTone(user, socialTone, maintainHistory)
conversationPayload['context']['user'] = user
return conversationPayload
|
updateUserTone processes the Tone Analyzer payload to pull out the emotion,
writing and social tones, and identify the meaningful tones (i.e.,
those tones that meet the specified thresholds).
The conversationPayload json object is updated to include these tones.
@param conversationPayload json object returned by the Watson Conversation
Service
@param toneAnalyzerPayload json object returned by the Watson Tone Analyzer
Service
@returns conversationPayload where the user object has been updated with tone
information from the toneAnalyzerPayload
|
def set_rich_menu_image(self, rich_menu_id, content_type, content, timeout=None):
"""Call upload rich menu image API.
https://developers.line.me/en/docs/messaging-api/reference/#upload-rich-menu-image
Uploads and attaches an image to a rich menu.
:param str rich_menu_id: IDs of the richmenu
:param str content_type: image/jpeg or image/png
:param content: image content as bytes, or file-like object
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
"""
self._post(
'/v2/bot/richmenu/{rich_menu_id}/content'.format(rich_menu_id=rich_menu_id),
data=content,
headers={'Content-Type': content_type},
timeout=timeout
)
|
Call upload rich menu image API.
https://developers.line.me/en/docs/messaging-api/reference/#upload-rich-menu-image
Uploads and attaches an image to a rich menu.
:param str rich_menu_id: IDs of the richmenu
:param str content_type: image/jpeg or image/png
:param content: image content as bytes, or file-like object
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
|
def setRelay(self, seconds, relay, status, password="00000000"):
"""Serial call to set relay.
Args:
seconds (int): Seconds to hold, ero is hold forever. See :class:`~ekmmeters.RelayInterval`.
relay (int): Selected relay, see :class:`~ekmmeters.Relay`.
status (int): Status to set, see :class:`~ekmmeters.RelayState`
password (str): Optional password
Returns:
bool: True on completion and ACK.
"""
result = False
self.setContext("setRelay")
try:
self.clearCmdMsg()
if len(password) != 8:
self.writeCmdMsg("Invalid password length.")
self.setContext("")
return result
if seconds < 0 or seconds > 9999:
self.writeCmdMsg("Relay duration must be between 0 and 9999.")
self.setContext("")
return result
if not self.requestA():
self.writeCmdMsg("Bad read CRC on setting")
else:
if not self.serialCmdPwdAuth(password):
self.writeCmdMsg("Password failure")
else:
req_str = ""
req_str = ("01573102303038" +
binascii.hexlify(str(relay)).zfill(2) +
"28" +
binascii.hexlify(str(status)).zfill(2) +
binascii.hexlify(str(seconds).zfill(4)) + "2903")
req_str += self.calc_crc16(req_str[2:].decode("hex"))
self.m_serial_port.write(req_str.decode("hex"))
if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06":
self.writeCmdMsg("Success: 06 returned.")
result = True
self.serialPostEnd()
except:
ekm_log(traceback.format_exc(sys.exc_info()))
self.setContext("")
return result
|
Serial call to set relay.
Args:
seconds (int): Seconds to hold, ero is hold forever. See :class:`~ekmmeters.RelayInterval`.
relay (int): Selected relay, see :class:`~ekmmeters.Relay`.
status (int): Status to set, see :class:`~ekmmeters.RelayState`
password (str): Optional password
Returns:
bool: True on completion and ACK.
|
def resolve_remote(self, uri):
"""Resolve a uri or relative path to a schema."""
try:
return super(LocalRefResolver, self).resolve_remote(uri)
except ValueError:
return super(LocalRefResolver, self).resolve_remote(
'file://' + get_schema_path(uri.rsplit('.json', 1)[0])
)
|
Resolve a uri or relative path to a schema.
|
def _upload(param_dict, timeout, data):
"""
Calls upload either with a local audio file,
or a url. Returns a track object.
"""
param_dict['format'] = 'json'
param_dict['wait'] = 'true'
param_dict['bucket'] = 'audio_summary'
result = util.callm('track/upload', param_dict, POST = True, socket_timeout = 300, data = data)
return _track_from_response(result, timeout)
|
Calls upload either with a local audio file,
or a url. Returns a track object.
|
def blame_incremental(self, rev, file, **kwargs):
"""Iterator for blame information for the given file at the given revision.
Unlike .blame(), this does not return the actual file's contents, only
a stream of BlameEntry tuples.
:param rev: revision specifier, see git-rev-parse for viable options.
:return: lazy iterator of BlameEntry tuples, where the commit
indicates the commit to blame for the line, and range
indicates a span of line numbers in the resulting file.
If you combine all line number ranges outputted by this command, you
should get a continuous range spanning all line numbers in the file.
"""
data = self.git.blame(rev, '--', file, p=True, incremental=True, stdout_as_string=False, **kwargs)
commits = {}
stream = (line for line in data.split(b'\n') if line)
while True:
try:
line = next(stream) # when exhausted, causes a StopIteration, terminating this function
except StopIteration:
return
hexsha, orig_lineno, lineno, num_lines = line.split()
lineno = int(lineno)
num_lines = int(num_lines)
orig_lineno = int(orig_lineno)
if hexsha not in commits:
# Now read the next few lines and build up a dict of properties
# for this commit
props = {}
while True:
try:
line = next(stream)
except StopIteration:
return
if line == b'boundary':
# "boundary" indicates a root commit and occurs
# instead of the "previous" tag
continue
tag, value = line.split(b' ', 1)
props[tag] = value
if tag == b'filename':
# "filename" formally terminates the entry for --incremental
orig_filename = value
break
c = Commit(self, hex_to_bin(hexsha),
author=Actor(safe_decode(props[b'author']),
safe_decode(props[b'author-mail'].lstrip(b'<').rstrip(b'>'))),
authored_date=int(props[b'author-time']),
committer=Actor(safe_decode(props[b'committer']),
safe_decode(props[b'committer-mail'].lstrip(b'<').rstrip(b'>'))),
committed_date=int(props[b'committer-time']))
commits[hexsha] = c
else:
# Discard all lines until we find "filename" which is
# guaranteed to be the last line
while True:
try:
line = next(stream) # will fail if we reach the EOF unexpectedly
except StopIteration:
return
tag, value = line.split(b' ', 1)
if tag == b'filename':
orig_filename = value
break
yield BlameEntry(commits[hexsha],
range(lineno, lineno + num_lines),
safe_decode(orig_filename),
range(orig_lineno, orig_lineno + num_lines))
|
Iterator for blame information for the given file at the given revision.
Unlike .blame(), this does not return the actual file's contents, only
a stream of BlameEntry tuples.
:param rev: revision specifier, see git-rev-parse for viable options.
:return: lazy iterator of BlameEntry tuples, where the commit
indicates the commit to blame for the line, and range
indicates a span of line numbers in the resulting file.
If you combine all line number ranges outputted by this command, you
should get a continuous range spanning all line numbers in the file.
|
def gen_image(img, width, height, outfile, img_type='grey'):
"""Save an image with the given parameters."""
assert len(img) == width * height or len(img) == width * height * 3
if img_type == 'grey':
misc.imsave(outfile, img.reshape(width, height))
elif img_type == 'color':
misc.imsave(outfile, img.reshape(3, width, height))
|
Save an image with the given parameters.
|
def sign(self, storepass=None, keypass=None, keystore=None, apk=None, alias=None, name='app'):
"""
Signs (jarsign and zipalign) a target apk file based on keystore information, uses default debug keystore file by default.
:param storepass(str): keystore file storepass
:param keypass(str): keystore file keypass
:param keystore(str): keystore file path
:param apk(str): apk file path to be signed
:param alias(str): keystore file alias
:param name(str): signed apk name to be used by zipalign
"""
target = self.get_target()
build_tool = android_helper.get_highest_build_tool(target.split('-')[1])
if keystore is None:
(keystore, storepass, keypass, alias) = android_helper.get_default_keystore()
dist = '%s/%s.apk' % ('/'.join(apk.split('/')[:-1]), name)
android_helper.jarsign(storepass, keypass, keystore, apk, alias, path=self.path)
android_helper.zipalign(apk, dist, build_tool=build_tool, path=self.path)
|
Signs (jarsign and zipalign) a target apk file based on keystore information, uses default debug keystore file by default.
:param storepass(str): keystore file storepass
:param keypass(str): keystore file keypass
:param keystore(str): keystore file path
:param apk(str): apk file path to be signed
:param alias(str): keystore file alias
:param name(str): signed apk name to be used by zipalign
|
def validate(tool_class, model_class):
"""
Does basic ObjectTool option validation.
"""
if not hasattr(tool_class, 'name'):
raise ImproperlyConfigured("No 'name' attribute found for tool %s." % (
tool_class.__name__
))
if not hasattr(tool_class, 'label'):
raise ImproperlyConfigured("No 'label' attribute found for tool %s." % (
tool_class.__name__
))
if not hasattr(tool_class, 'view'):
raise NotImplementedError("No 'view' method found for tool %s." % (
tool_class.__name__
))
|
Does basic ObjectTool option validation.
|
def run(self):
"""Run directive."""
try:
language = self.arguments[0]
except IndexError:
language = ''
code = '\n'.join(self.content)
literal = docutils.nodes.literal_block(code, code)
literal['classes'].append('code-block')
literal['language'] = language
return [literal]
|
Run directive.
|
def _setup_metric_group_definitions(self):
"""
Return the dict of MetricGroupDefinition objects for this metrics
context, by processing its 'metric-group-infos' property.
"""
# Dictionary of MetricGroupDefinition objects, by metric group name
metric_group_definitions = dict()
for mg_info in self.properties['metric-group-infos']:
mg_name = mg_info['group-name']
mg_def = MetricGroupDefinition(
name=mg_name,
resource_class=_resource_class_from_group(mg_name),
metric_definitions=dict())
for i, m_info in enumerate(mg_info['metric-infos']):
m_name = m_info['metric-name']
m_def = MetricDefinition(
index=i,
name=m_name,
type=_metric_type(m_info['metric-type']),
unit=_metric_unit_from_name(m_name))
mg_def.metric_definitions[m_name] = m_def
metric_group_definitions[mg_name] = mg_def
return metric_group_definitions
|
Return the dict of MetricGroupDefinition objects for this metrics
context, by processing its 'metric-group-infos' property.
|
def register(device, data, facet):
"""
Register a U2F device
data = {
"version": "U2F_V2",
"challenge": string, //b64 encoded challenge
"appId": string, //app_id
}
"""
if isinstance(data, string_types):
data = json.loads(data)
if data['version'] != VERSION:
raise ValueError('Unsupported U2F version: %s' % data['version'])
app_id = data.get('appId', facet)
verify_facet(app_id, facet)
app_param = sha256(app_id.encode('utf8')).digest()
client_data = {
'typ': 'navigator.id.finishEnrollment',
'challenge': data['challenge'],
'origin': facet
}
client_data = json.dumps(client_data)
client_param = sha256(client_data.encode('utf8')).digest()
request = client_param + app_param
p1 = 0x03
p2 = 0
response = device.send_apdu(INS_ENROLL, p1, p2, request)
return {
'registrationData': websafe_encode(response),
'clientData': websafe_encode(client_data)
}
|
Register a U2F device
data = {
"version": "U2F_V2",
"challenge": string, //b64 encoded challenge
"appId": string, //app_id
}
|
def convert_uen(pinyin):
"""uen 转换,还原原始的韵母
iou,uei,uen前面加声母的时候,写成iu,ui,un。
例如niu(牛),gui(归),lun(论)。
"""
return UN_RE.sub(lambda m: m.group(1) + UN_MAP[m.group(2)], pinyin)
|
uen 转换,还原原始的韵母
iou,uei,uen前面加声母的时候,写成iu,ui,un。
例如niu(牛),gui(归),lun(论)。
|
def _buffer_decode(self, input, errors, final):
"""
Decode bytes that may be arriving in a stream, following the Codecs
API.
`input` is the incoming sequence of bytes. `errors` tells us how to
handle errors, though we delegate all error-handling cases to the real
UTF-8 decoder to ensure correct behavior. `final` indicates whether
this is the end of the sequence, in which case we should raise an
error given incomplete input.
Returns as much decoded text as possible, and the number of bytes
consumed.
"""
# decoded_segments are the pieces of text we have decoded so far,
# and position is our current position in the byte string. (Bytes
# before this position have been consumed, and bytes after it have
# yet to be decoded.)
decoded_segments = []
position = 0
while True:
# Use _buffer_decode_step to decode a segment of text.
decoded, consumed = self._buffer_decode_step(
input[position:],
errors,
final
)
if consumed == 0:
# Either there's nothing left to decode, or we need to wait
# for more input. Either way, we're done for now.
break
# Append the decoded text to the list, and update our position.
decoded_segments.append(decoded)
position += consumed
if final:
# _buffer_decode_step must consume all the bytes when `final` is
# true.
assert position == len(input)
return ''.join(decoded_segments), position
|
Decode bytes that may be arriving in a stream, following the Codecs
API.
`input` is the incoming sequence of bytes. `errors` tells us how to
handle errors, though we delegate all error-handling cases to the real
UTF-8 decoder to ensure correct behavior. `final` indicates whether
this is the end of the sequence, in which case we should raise an
error given incomplete input.
Returns as much decoded text as possible, and the number of bytes
consumed.
|
def staticproperty(func):
"""Use as a decorator on a method definition to make it a class-level attribute (without binding).
This decorator can be applied to a method or a staticmethod. This decorator does not bind any
arguments.
Usage:
>>> other_x = 'value'
>>> class Foo(object):
... @staticproperty
... def x():
... return other_x
...
>>> Foo.x
'value'
Setting or deleting the attribute of this name will overwrite this property.
The docstring of the classproperty `x` for a class `C` can be obtained by
`C.__dict__['x'].__doc__`.
"""
doc = func.__doc__
if not isinstance(func, staticmethod):
func = staticmethod(func)
return ClassPropertyDescriptor(func, doc)
|
Use as a decorator on a method definition to make it a class-level attribute (without binding).
This decorator can be applied to a method or a staticmethod. This decorator does not bind any
arguments.
Usage:
>>> other_x = 'value'
>>> class Foo(object):
... @staticproperty
... def x():
... return other_x
...
>>> Foo.x
'value'
Setting or deleting the attribute of this name will overwrite this property.
The docstring of the classproperty `x` for a class `C` can be obtained by
`C.__dict__['x'].__doc__`.
|
def get(company='', company_uri=''):
"""Performs a HTTP GET for a glassdoor page and returns json"""
if not company and not company_uri:
raise Exception("glassdoor.gd.get(company='', company_uri=''): "\
" company or company_uri required")
payload = {}
if not company_uri:
payload.update({'clickSource': 'searchBtn',
'sc.keyword': company
})
uri = '%s/%s' % (GLASSDOOR_API, REVIEWS_URL)
else:
uri = '%s%s' % (GLASSDOOR_API, company_uri)
r = requests.get(uri, params=payload)
soup = BeautifulSoup(r.content)
results = parse(soup)
return results
|
Performs a HTTP GET for a glassdoor page and returns json
|
def from_df(cls, ratings:DataFrame, valid_pct:float=0.2, user_name:Optional[str]=None, item_name:Optional[str]=None,
rating_name:Optional[str]=None, test:DataFrame=None, seed:int=None, path:PathOrStr='.', bs:int=64,
val_bs:int=None, num_workers:int=defaults.cpus, dl_tfms:Optional[Collection[Callable]]=None,
device:torch.device=None, collate_fn:Callable=data_collate, no_check:bool=False) -> 'CollabDataBunch':
"Create a `DataBunch` suitable for collaborative filtering from `ratings`."
user_name = ifnone(user_name, ratings.columns[0])
item_name = ifnone(item_name, ratings.columns[1])
rating_name = ifnone(rating_name,ratings.columns[2])
cat_names = [user_name,item_name]
src = (CollabList.from_df(ratings, cat_names=cat_names, procs=Categorify)
.split_by_rand_pct(valid_pct=valid_pct, seed=seed).label_from_df(cols=rating_name))
if test is not None: src.add_test(CollabList.from_df(test, cat_names=cat_names))
return src.databunch(path=path, bs=bs, val_bs=val_bs, num_workers=num_workers, device=device,
collate_fn=collate_fn, no_check=no_check)
|
Create a `DataBunch` suitable for collaborative filtering from `ratings`.
|
def query_subdevice2index(self, ncfile) -> Subdevice2Index:
"""Return a |Subdevice2Index| that maps the (sub)device names to
their position within the given NetCDF file.
Method |NetCDFVariableBase.query_subdevice2index| is based on
|NetCDFVariableBase.query_subdevices|. The returned
|Subdevice2Index| object remembers the NetCDF file the
(sub)device names stem from, allowing for clear error messages:
>>> from hydpy.core.netcdftools import NetCDFVariableBase, str2chars
>>> from hydpy import make_abc_testable, TestIO
>>> from hydpy.core.netcdftools import netcdf4
>>> with TestIO():
... ncfile = netcdf4.Dataset('model.nc', 'w')
>>> Var = make_abc_testable(NetCDFVariableBase)
>>> Var.subdevicenames = [
... 'element3', 'element1', 'element1_1', 'element2']
>>> var = Var('flux_prec', isolate=True, timeaxis=1)
>>> var.insert_subdevices(ncfile)
>>> subdevice2index = var.query_subdevice2index(ncfile)
>>> subdevice2index.get_index('element1_1')
2
>>> subdevice2index.get_index('element3')
0
>>> subdevice2index.get_index('element5')
Traceback (most recent call last):
...
OSError: No data for sequence `flux_prec` and (sub)device \
`element5` in NetCDF file `model.nc` available.
Additionally, |NetCDFVariableBase.query_subdevice2index|
checks for duplicates:
>>> ncfile['station_id'][:] = str2chars(
... ['element3', 'element1', 'element1_1', 'element1'])
>>> var.query_subdevice2index(ncfile)
Traceback (most recent call last):
...
OSError: The NetCDF file `model.nc` contains duplicate (sub)device \
names for variable `flux_prec` (the first found duplicate is `element1`).
>>> ncfile.close()
"""
subdevices = self.query_subdevices(ncfile)
self._test_duplicate_exists(ncfile, subdevices)
subdev2index = {subdev: idx for (idx, subdev) in enumerate(subdevices)}
return Subdevice2Index(subdev2index, self.name, get_filepath(ncfile))
|
Return a |Subdevice2Index| that maps the (sub)device names to
their position within the given NetCDF file.
Method |NetCDFVariableBase.query_subdevice2index| is based on
|NetCDFVariableBase.query_subdevices|. The returned
|Subdevice2Index| object remembers the NetCDF file the
(sub)device names stem from, allowing for clear error messages:
>>> from hydpy.core.netcdftools import NetCDFVariableBase, str2chars
>>> from hydpy import make_abc_testable, TestIO
>>> from hydpy.core.netcdftools import netcdf4
>>> with TestIO():
... ncfile = netcdf4.Dataset('model.nc', 'w')
>>> Var = make_abc_testable(NetCDFVariableBase)
>>> Var.subdevicenames = [
... 'element3', 'element1', 'element1_1', 'element2']
>>> var = Var('flux_prec', isolate=True, timeaxis=1)
>>> var.insert_subdevices(ncfile)
>>> subdevice2index = var.query_subdevice2index(ncfile)
>>> subdevice2index.get_index('element1_1')
2
>>> subdevice2index.get_index('element3')
0
>>> subdevice2index.get_index('element5')
Traceback (most recent call last):
...
OSError: No data for sequence `flux_prec` and (sub)device \
`element5` in NetCDF file `model.nc` available.
Additionally, |NetCDFVariableBase.query_subdevice2index|
checks for duplicates:
>>> ncfile['station_id'][:] = str2chars(
... ['element3', 'element1', 'element1_1', 'element1'])
>>> var.query_subdevice2index(ncfile)
Traceback (most recent call last):
...
OSError: The NetCDF file `model.nc` contains duplicate (sub)device \
names for variable `flux_prec` (the first found duplicate is `element1`).
>>> ncfile.close()
|
def set_levels(self, levels):
"""
Replace the levels of a categorical column.
New levels must be aligned with the old domain. This call has copy-on-write semantics.
:param List[str] levels: A list of strings specifying the new levels. The number of new
levels must match the number of old levels.
:returns: A single-column H2OFrame with the desired levels.
"""
assert_is_type(levels, [str])
return H2OFrame._expr(expr=ExprNode("setDomain", self, False, levels), cache=self._ex._cache)
|
Replace the levels of a categorical column.
New levels must be aligned with the old domain. This call has copy-on-write semantics.
:param List[str] levels: A list of strings specifying the new levels. The number of new
levels must match the number of old levels.
:returns: A single-column H2OFrame with the desired levels.
|
def _to_power_basis_degree8(nodes1, nodes2):
r"""Compute the coefficients of an **intersection polynomial**.
Helper for :func:`to_power_basis` in the case that B |eacute| zout's
`theorem`_ tells us the **intersection polynomial** is degree
:math:`8`. This happens if the two curves have degrees one and eight
or have degrees two and four.
.. note::
This uses a least-squares fit to the function evaluated at the
Chebyshev nodes (scaled and shifted onto ``[0, 1]``). Hence, the
coefficients may be less stable than those produced for smaller
degrees.
Args:
nodes1 (numpy.ndarray): The nodes in the first curve.
nodes2 (numpy.ndarray): The nodes in the second curve.
Returns:
numpy.ndarray: ``9``-array of coefficients.
"""
evaluated = [
eval_intersection_polynomial(nodes1, nodes2, t_val) for t_val in _CHEB9
]
return polynomial.polyfit(_CHEB9, evaluated, 8)
|
r"""Compute the coefficients of an **intersection polynomial**.
Helper for :func:`to_power_basis` in the case that B |eacute| zout's
`theorem`_ tells us the **intersection polynomial** is degree
:math:`8`. This happens if the two curves have degrees one and eight
or have degrees two and four.
.. note::
This uses a least-squares fit to the function evaluated at the
Chebyshev nodes (scaled and shifted onto ``[0, 1]``). Hence, the
coefficients may be less stable than those produced for smaller
degrees.
Args:
nodes1 (numpy.ndarray): The nodes in the first curve.
nodes2 (numpy.ndarray): The nodes in the second curve.
Returns:
numpy.ndarray: ``9``-array of coefficients.
|
def _raise_error_routes(iface, option, expected):
'''
Log and raise an error with a logical formatted message.
'''
msg = _error_msg_routes(iface, option, expected)
log.error(msg)
raise AttributeError(msg)
|
Log and raise an error with a logical formatted message.
|
def convert_coordinates(coords, origin, wgs84, wrapped):
""" Convert coordinates from one crs to another """
if isinstance(coords, list) or isinstance(coords, tuple):
try:
if isinstance(coords[0], list) or isinstance(coords[0], tuple):
return [convert_coordinates(list(c), origin, wgs84, wrapped) for c in coords]
elif isinstance(coords[0], float):
c = list(transform(origin, wgs84, *coords))
if wrapped and c[0] < -170:
c[0] = c[0] + 360
return c
except IndexError:
pass
return None
|
Convert coordinates from one crs to another
|
def stringify_summary(summary):
""" stringify summary, in order to dump json file and generate html report.
"""
for index, suite_summary in enumerate(summary["details"]):
if not suite_summary.get("name"):
suite_summary["name"] = "testcase {}".format(index)
for record in suite_summary.get("records"):
meta_datas = record['meta_datas']
__stringify_meta_datas(meta_datas)
meta_datas_expanded = []
__expand_meta_datas(meta_datas, meta_datas_expanded)
record["meta_datas_expanded"] = meta_datas_expanded
record["response_time"] = __get_total_response_time(meta_datas_expanded)
|
stringify summary, in order to dump json file and generate html report.
|
def __rst2graph(self, rs3_xml_tree):
"""
Reads an RST tree (from an ElementTree representation of an RS3
XML file) and adds all segments (nodes representing text) and
groups (nonterminal nodes in an RST tree) as well as the
relationships that hold between them (typed edges) to this
RSTGraph.
Parameters
----------
rs3_xml_tree : lxml.etree._ElementTree
lxml ElementTree representation of an RS3 XML file
tokenize : bool
If True, the RST segments (i.e. nuclei and satellites) will
be tokenized and added as additonal token nodes to the
document graph (with edges from the respective RST segments).
If False, each RST segment will be labeled with the text it
represents.
"""
doc_root = rs3_xml_tree.getroot()
for segment in doc_root.iter('segment'):
self.__add_segment(segment)
for group in doc_root.iter('group'):
self.__add_group(group)
|
Reads an RST tree (from an ElementTree representation of an RS3
XML file) and adds all segments (nodes representing text) and
groups (nonterminal nodes in an RST tree) as well as the
relationships that hold between them (typed edges) to this
RSTGraph.
Parameters
----------
rs3_xml_tree : lxml.etree._ElementTree
lxml ElementTree representation of an RS3 XML file
tokenize : bool
If True, the RST segments (i.e. nuclei and satellites) will
be tokenized and added as additonal token nodes to the
document graph (with edges from the respective RST segments).
If False, each RST segment will be labeled with the text it
represents.
|
def _merge_meta(self, encoded_meta, meta):
"""
Merge new meta dict into encoded meta. Returns new encoded meta.
"""
new_meta = None
if meta:
_meta = self._decode_meta(encoded_meta)
for key, value in six.iteritems(meta):
if value is None:
_meta.pop(key, None)
else:
_meta[key] = value
new_meta = self._encode_meta(_meta)
return new_meta
|
Merge new meta dict into encoded meta. Returns new encoded meta.
|
def modify(self, modification, parameters):
"""
Apply a modification to the underlying point sources, with the
same parameters for all sources
"""
for src in self:
src.modify(modification, parameters)
|
Apply a modification to the underlying point sources, with the
same parameters for all sources
|
def insert(self, table, columns, values, execute=True):
"""Insert a single row into a table."""
# TODO: Cant accept lists?
# Concatenate statement
cols, vals = get_col_val_str(columns)
statement = "INSERT INTO {0} ({1}) VALUES ({2})".format(wrap(table), cols, vals)
# Execute statement
if execute:
self._cursor.execute(statement, values)
self._commit()
self._printer('\tMySQL row successfully inserted into {0}'.format(table))
# Only return statement
else:
return statement
|
Insert a single row into a table.
|
def get_random_user(self):
"""
Gets a random user from the provider
:returns: Dictionary
"""
from provider.models import User
u = User.objects.order_by('?')[0]
return {"username": u.username, "password": u.password, "fullname": u.fullname}
|
Gets a random user from the provider
:returns: Dictionary
|
def pre_save(self, model_instance, add):
"""Updates username created on ADD only."""
value = super(UserField, self).pre_save(model_instance, add)
if not value and not add:
# fall back to OS user if not accessing through browser
# better than nothing ...
value = self.get_os_username()
setattr(model_instance, self.attname, value)
return value
return value
|
Updates username created on ADD only.
|
def ci(data, statfunction=None, alpha=0.05, n_samples=10000,
method='bca', output='lowhigh', epsilon=0.001, multi=None,
_iter=True):
"""
Given a set of data ``data``, and a statistics function ``statfunction`` that
applies to that data, computes the bootstrap confidence interval for
``statfunction`` on that data. Data points are assumed to be delineated by
axis 0.
Parameters
----------
data: array_like, shape (N, ...) OR tuple of array_like all with shape (N, ...)
Input data. Data points are assumed to be delineated by axis 0. Beyond this,
the shape doesn't matter, so long as ``statfunction`` can be applied to the
array. If a tuple of array_likes is passed, then samples from each array (along
axis 0) are passed in order as separate parameters to the statfunction. The
type of data (single array or tuple of arrays) can be explicitly specified
by the multi parameter.
statfunction: function (data, weights=(weights, optional)) -> value
This function should accept samples of data from ``data``. It is applied
to these samples individually.
If using the ABC method, the function _must_ accept a named ``weights``
parameter which will be an array_like with weights for each sample, and
must return a _weighted_ result. Otherwise this parameter is not used
or required. Note that numpy's np.average accepts this. (default=np.average)
alpha: float or iterable, optional
The percentiles to use for the confidence interval (default=0.05). If this
is a float, the returned values are (alpha/2, 1-alpha/2) percentile confidence
intervals. If it is an iterable, alpha is assumed to be an iterable of
each desired percentile.
n_samples: float, optional
The number of bootstrap samples to use (default=10000)
method: string, optional
The method to use: one of 'pi', 'bca', or 'abc' (default='bca')
output: string, optional
The format of the output. 'lowhigh' gives low and high confidence interval
values. 'errorbar' gives transposed abs(value-confidence interval value) values
that are suitable for use with matplotlib's errorbar function. (default='lowhigh')
epsilon: float, optional (only for ABC method)
The step size for finite difference calculations in the ABC method. Ignored for
all other methods. (default=0.001)
multi: boolean, optional
If False, assume data is a single array. If True, assume data is a tuple/other
iterable of arrays of the same length that should be sampled together. If None,
decide based on whether the data is an actual tuple. (default=None)
Returns
-------
confidences: tuple of floats
The confidence percentiles specified by alpha
Calculation Methods
-------------------
'pi': Percentile Interval (Efron 13.3)
The percentile interval method simply returns the 100*alphath bootstrap
sample's values for the statistic. This is an extremely simple method of
confidence interval calculation. However, it has several disadvantages
compared to the bias-corrected accelerated method, which is the default.
'bca': Bias-Corrected Accelerated (BCa) Non-Parametric (Efron 14.3) (default)
This method is much more complex to explain. However, it gives considerably
better results, and is generally recommended for normal situations. Note
that in cases where the statistic is smooth, and can be expressed with
weights, the ABC method will give approximated results much, much faster.
Note that in a case where the statfunction results in equal output for every
bootstrap sample, the BCa confidence interval is technically undefined, as
the acceleration value is undefined. To match the percentile interval method
and give reasonable output, the implementation of this method returns a
confidence interval of zero width using the 0th bootstrap sample in this
case, and warns the user.
'abc': Approximate Bootstrap Confidence (Efron 14.4, 22.6)
This method provides approximated bootstrap confidence intervals without
actually taking bootstrap samples. This requires that the statistic be
smooth, and allow for weighting of individual points with a weights=
parameter (note that np.average allows this). This is _much_ faster
than all other methods for situations where it can be used.
Examples
--------
To calculate the confidence intervals for the mean of some numbers:
>> boot.ci( np.randn(100), np.average )
Given some data points in arrays x and y calculate the confidence intervals
for all linear regression coefficients simultaneously:
>> boot.ci( (x,y), scipy.stats.linregress )
References
----------
Efron, An Introduction to the Bootstrap. Chapman & Hall 1993
"""
# Deal with the alpha values
if np.iterable(alpha):
alphas = np.array(alpha)
else:
alphas = np.array([alpha/2, 1-alpha/2])
if multi is None:
if isinstance(data, tuple):
multi = True
else:
multi = False
if statfunction is None:
if _iter:
statfunction = np.average
else:
def statfunc_wrapper(x, *args, **kwargs):
return np.average(x, axis=-1, *args, **kwargs)
statfunction = statfunc_wrapper
# Ensure that the data is actually an array. This isn't nice to pandas,
# but pandas seems much much slower and the indexes become a problem.
if not multi:
data = np.array(data)
tdata = (data,)
else:
tdata = tuple( np.array(x) for x in data )
# Deal with ABC *now*, as it doesn't need samples.
if method == 'abc':
n = tdata[0].shape[0]*1.0
nn = tdata[0].shape[0]
I = np.identity(nn)
ep = epsilon / n*1.0
p0 = np.repeat(1.0/n,nn)
try:
t0 = statfunction(*tdata,weights=p0)
except TypeError as e:
raise TypeError("statfunction does not accept correct arguments for ABC ({0})".format(e.message))
di_full = I - p0
tp = np.fromiter((statfunction(*tdata, weights=p0+ep*di)
for di in di_full), dtype=np.float)
tm = np.fromiter((statfunction(*tdata, weights=p0-ep*di)
for di in di_full), dtype=np.float)
t1 = (tp-tm)/(2*ep)
t2 = (tp-2*t0+tm)/ep**2
sighat = np.sqrt(np.sum(t1**2))/n
a = (np.sum(t1**3))/(6*n**3*sighat**3)
delta = t1/(n**2*sighat)
cq = (statfunction(*tdata,weights=p0+ep*delta)-2*t0+statfunction(*tdata,weights=p0-ep*delta))/(2*sighat*ep**2)
bhat = np.sum(t2)/(2*n**2)
curv = bhat/sighat-cq
z0 = nppf(2*ncdf(a)*ncdf(-curv))
Z = z0+nppf(alphas)
za = Z/(1-a*Z)**2
# stan = t0 + sighat * nppf(alphas)
abc = np.zeros_like(alphas)
for i in range(0,len(alphas)):
abc[i] = statfunction(*tdata,weights=p0+za[i]*delta)
if output == 'lowhigh':
return abc
elif output == 'errorbar':
return abs(abc-statfunction(tdata))[np.newaxis].T
else:
raise ValueError("Output option {0} is not supported.".format(output))
# We don't need to generate actual samples; that would take more memory.
# Instead, we can generate just the indexes, and then apply the statfun
# to those indexes.
if _iter:
bootindexes = bootstrap_indexes(tdata[0], n_samples)
stat = np.array([statfunction(*(x[indexes] for x in tdata))
for indexes in bootindexes])
else:
bootindexes = bootstrap_indexes_array(tdata[0], n_samples)
stat = statfunction(*(x[bootindexes] for x in tdata))
stat.sort(axis=0)
# Percentile Interval Method
if method == 'pi':
avals = alphas
# Bias-Corrected Accelerated Method
elif method == 'bca':
# The value of the statistic function applied just to the actual data.
ostat = statfunction(*tdata)
# The bias correction value.
z0 = nppf( ( 1.0*np.sum(stat < ostat, axis=0) ) / n_samples )
# Statistics of the jackknife distribution
jackindexes = jackknife_indexes(tdata[0])
jstat = [statfunction(*(x[indexes] for x in tdata)) for indexes in jackindexes]
jmean = np.mean(jstat,axis=0)
# Temporarily kill numpy warnings:
oldnperr = np.seterr(invalid='ignore')
# Acceleration value
a = np.sum((jmean - jstat)**3, axis=0) / (
6.0 * np.sum((jmean - jstat)**2, axis=0)**1.5)
if np.any(np.isnan(a)):
nanind = np.nonzero(np.isnan(a))
warnings.warn("BCa acceleration values for indexes {} were undefined. \
Statistic values were likely all equal. Affected CI will \
be inaccurate.".format(nanind), InstabilityWarning, stacklevel=2)
zs = z0 + nppf(alphas).reshape(alphas.shape+(1,)*z0.ndim)
avals = ncdf(z0 + zs/(1-a*zs))
np.seterr(**oldnperr)
else:
raise ValueError("Method {0} is not supported.".format(method))
nvals = np.round((n_samples-1)*avals)
oldnperr = np.seterr(invalid='ignore')
if np.any(np.isnan(nvals)):
warnings.warn("Some values were NaN; results are probably unstable " +
"(all values were probably equal)", InstabilityWarning,
stacklevel=2)
if np.any(nvals == 0) or np.any(nvals == n_samples-1):
warnings.warn("Some values used extremal samples; " +
"results are probably unstable.",
InstabilityWarning, stacklevel=2)
elif np.any(nvals < 10) or np.any(nvals >= n_samples-10):
warnings.warn("Some values used top 10 low/high samples; " +
"results may be unstable.",
InstabilityWarning, stacklevel=2)
np.seterr(**oldnperr)
nvals = np.nan_to_num(nvals).astype('int')
if output == 'lowhigh':
if nvals.ndim == 1:
# All nvals are the same. Simple broadcasting
return stat[nvals]
else:
# Nvals are different for each data point. Not simple broadcasting.
# Each set of nvals along axis 0 corresponds to the data at the same
# point in other axes.
return stat[(nvals, np.indices(nvals.shape)[1:].squeeze())]
elif output == 'errorbar':
if nvals.ndim == 1:
return abs(statfunction(data)-stat[nvals])[np.newaxis].T
else:
return abs(statfunction(data)-stat[(nvals, np.indices(nvals.shape)[1:])])[np.newaxis].T
else:
raise ValueError("Output option {0} is not supported.".format(output))
|
Given a set of data ``data``, and a statistics function ``statfunction`` that
applies to that data, computes the bootstrap confidence interval for
``statfunction`` on that data. Data points are assumed to be delineated by
axis 0.
Parameters
----------
data: array_like, shape (N, ...) OR tuple of array_like all with shape (N, ...)
Input data. Data points are assumed to be delineated by axis 0. Beyond this,
the shape doesn't matter, so long as ``statfunction`` can be applied to the
array. If a tuple of array_likes is passed, then samples from each array (along
axis 0) are passed in order as separate parameters to the statfunction. The
type of data (single array or tuple of arrays) can be explicitly specified
by the multi parameter.
statfunction: function (data, weights=(weights, optional)) -> value
This function should accept samples of data from ``data``. It is applied
to these samples individually.
If using the ABC method, the function _must_ accept a named ``weights``
parameter which will be an array_like with weights for each sample, and
must return a _weighted_ result. Otherwise this parameter is not used
or required. Note that numpy's np.average accepts this. (default=np.average)
alpha: float or iterable, optional
The percentiles to use for the confidence interval (default=0.05). If this
is a float, the returned values are (alpha/2, 1-alpha/2) percentile confidence
intervals. If it is an iterable, alpha is assumed to be an iterable of
each desired percentile.
n_samples: float, optional
The number of bootstrap samples to use (default=10000)
method: string, optional
The method to use: one of 'pi', 'bca', or 'abc' (default='bca')
output: string, optional
The format of the output. 'lowhigh' gives low and high confidence interval
values. 'errorbar' gives transposed abs(value-confidence interval value) values
that are suitable for use with matplotlib's errorbar function. (default='lowhigh')
epsilon: float, optional (only for ABC method)
The step size for finite difference calculations in the ABC method. Ignored for
all other methods. (default=0.001)
multi: boolean, optional
If False, assume data is a single array. If True, assume data is a tuple/other
iterable of arrays of the same length that should be sampled together. If None,
decide based on whether the data is an actual tuple. (default=None)
Returns
-------
confidences: tuple of floats
The confidence percentiles specified by alpha
Calculation Methods
-------------------
'pi': Percentile Interval (Efron 13.3)
The percentile interval method simply returns the 100*alphath bootstrap
sample's values for the statistic. This is an extremely simple method of
confidence interval calculation. However, it has several disadvantages
compared to the bias-corrected accelerated method, which is the default.
'bca': Bias-Corrected Accelerated (BCa) Non-Parametric (Efron 14.3) (default)
This method is much more complex to explain. However, it gives considerably
better results, and is generally recommended for normal situations. Note
that in cases where the statistic is smooth, and can be expressed with
weights, the ABC method will give approximated results much, much faster.
Note that in a case where the statfunction results in equal output for every
bootstrap sample, the BCa confidence interval is technically undefined, as
the acceleration value is undefined. To match the percentile interval method
and give reasonable output, the implementation of this method returns a
confidence interval of zero width using the 0th bootstrap sample in this
case, and warns the user.
'abc': Approximate Bootstrap Confidence (Efron 14.4, 22.6)
This method provides approximated bootstrap confidence intervals without
actually taking bootstrap samples. This requires that the statistic be
smooth, and allow for weighting of individual points with a weights=
parameter (note that np.average allows this). This is _much_ faster
than all other methods for situations where it can be used.
Examples
--------
To calculate the confidence intervals for the mean of some numbers:
>> boot.ci( np.randn(100), np.average )
Given some data points in arrays x and y calculate the confidence intervals
for all linear regression coefficients simultaneously:
>> boot.ci( (x,y), scipy.stats.linregress )
References
----------
Efron, An Introduction to the Bootstrap. Chapman & Hall 1993
|
def show(self):
"""
Display (with a pretty print) this object
"""
off = 0
for n, i in enumerate(self.get_instructions()):
print("{:8d} (0x{:08x}) {:04x} {:30} {}".format(n, off, i.get_op_value(), i.get_name(), i.get_output(self.idx)))
off += i.get_length()
|
Display (with a pretty print) this object
|
def get_worksheet(self, index):
"""Returns a worksheet with specified `index`.
:param index: An index of a worksheet. Indexes start from zero.
:type index: int
:returns: an instance of :class:`gsperad.models.Worksheet`
or `None` if the worksheet is not found.
Example. To get first worksheet of a spreadsheet:
>>> sht = client.open('My fancy spreadsheet')
>>> worksheet = sht.get_worksheet(0)
"""
sheet_data = self.fetch_sheet_metadata()
try:
properties = sheet_data['sheets'][index]['properties']
return Worksheet(self, properties)
except (KeyError, IndexError):
return None
|
Returns a worksheet with specified `index`.
:param index: An index of a worksheet. Indexes start from zero.
:type index: int
:returns: an instance of :class:`gsperad.models.Worksheet`
or `None` if the worksheet is not found.
Example. To get first worksheet of a spreadsheet:
>>> sht = client.open('My fancy spreadsheet')
>>> worksheet = sht.get_worksheet(0)
|
def as_params(self):
""" Returns the filters, orders, select, expands and search as query parameters
:rtype: dict
"""
params = {}
if self.has_filters:
params['$filter'] = self.get_filters()
if self.has_order:
params['$orderby'] = self.get_order()
if self.has_selects:
params['$select'] = self.get_selects()
if self.has_expands:
params['$expand'] = self.get_expands()
if self._search:
params['$search'] = self._search
params.pop('$filter', None)
params.pop('$orderby', None)
return params
|
Returns the filters, orders, select, expands and search as query parameters
:rtype: dict
|
def convert_sed_cols(tab):
"""Cast SED column names to lowercase."""
# Update Column names
for colname in list(tab.columns.keys()):
newname = colname.lower()
newname = newname.replace('dfde', 'dnde')
if tab.columns[colname].name == newname:
continue
tab.columns[colname].name = newname
return tab
|
Cast SED column names to lowercase.
|
def add(self, search):
"""
Adds a new :class:`~elasticsearch_dsl.Search` object to the request::
ms = MultiSearch(index='my-index')
ms = ms.add(Search(doc_type=Category).filter('term', category='python'))
ms = ms.add(Search(doc_type=Blog))
"""
ms = self._clone()
ms._searches.append(search)
return ms
|
Adds a new :class:`~elasticsearch_dsl.Search` object to the request::
ms = MultiSearch(index='my-index')
ms = ms.add(Search(doc_type=Category).filter('term', category='python'))
ms = ms.add(Search(doc_type=Blog))
|
def _get_goid2dbids(associations):
"""Return gene2go data for user-specified taxids."""
go2ids = cx.defaultdict(set)
for ntd in associations:
go2ids[ntd.GO_ID].add(ntd.DB_ID)
return dict(go2ids)
|
Return gene2go data for user-specified taxids.
|
def _close_prepared_statement(self):
"""
Close the prepared statement on the server.
"""
self.prepared_sql = None
self.flush_to_query_ready()
self.connection.write(messages.Close('prepared_statement', self.prepared_name))
self.connection.write(messages.Flush())
self._message = self.connection.read_expected_message(messages.CloseComplete)
self.connection.write(messages.Sync())
|
Close the prepared statement on the server.
|
def delete_managed_disk(call=None, kwargs=None): # pylint: disable=unused-argument
'''
Delete a managed disk from a resource group.
'''
compconn = get_conn(client_type='compute')
try:
compconn.disks.delete(kwargs['resource_group'], kwargs['blob'])
except Exception as exc:
log.error('Error deleting managed disk %s - %s', kwargs.get('blob'), six.text_type(exc))
return False
return True
|
Delete a managed disk from a resource group.
|
def distVersion():
"""
The distribution version identifying a published release on PyPI.
"""
from pkg_resources import parse_version
build_number = buildNumber()
parsedBaseVersion = parse_version(baseVersion)
if isinstance(parsedBaseVersion, tuple):
raise RuntimeError("Setuptools version 8.0 or newer required. Update by running "
"'pip install setuptools --upgrade'")
if build_number is not None and parsedBaseVersion.is_prerelease:
return baseVersion + '.dev' + build_number
else:
return baseVersion
|
The distribution version identifying a published release on PyPI.
|
def region_size(im):
r"""
Replace each voxel with size of region to which it belongs
Parameters
----------
im : ND-array
Either a boolean image wtih ``True`` indicating the features of
interest, in which case ``scipy.ndimage.label`` will be applied to
find regions, or a greyscale image with integer values indicating
regions.
Returns
-------
image : ND-array
A copy of ``im`` with each voxel value indicating the size of the
region to which it belongs. This is particularly useful for finding
chord sizes on the image produced by ``apply_chords``.
"""
if im.dtype == bool:
im = spim.label(im)[0]
counts = sp.bincount(im.flatten())
counts[0] = 0
chords = counts[im]
return chords
|
r"""
Replace each voxel with size of region to which it belongs
Parameters
----------
im : ND-array
Either a boolean image wtih ``True`` indicating the features of
interest, in which case ``scipy.ndimage.label`` will be applied to
find regions, or a greyscale image with integer values indicating
regions.
Returns
-------
image : ND-array
A copy of ``im`` with each voxel value indicating the size of the
region to which it belongs. This is particularly useful for finding
chord sizes on the image produced by ``apply_chords``.
|
def normalize_curves_eb(curves):
"""
A more sophisticated version of normalize_curves, used in the event
based calculator.
:param curves: a list of pairs (losses, poes)
:returns: first losses, all_poes
"""
# we assume non-decreasing losses, so losses[-1] is the maximum loss
non_zero_curves = [(losses, poes)
for losses, poes in curves if losses[-1] > 0]
if not non_zero_curves: # no damage. all zero curves
return curves[0][0], numpy.array([poes for _losses, poes in curves])
else: # standard case
max_losses = [losses[-1] for losses, _poes in non_zero_curves]
reference_curve = non_zero_curves[numpy.argmax(max_losses)]
loss_ratios = reference_curve[0]
curves_poes = [interpolate.interp1d(
losses, poes, bounds_error=False, fill_value=0)(loss_ratios)
for losses, poes in curves]
# fix degenerated case with flat curve
for cp in curves_poes:
if numpy.isnan(cp[0]):
cp[0] = 0
return loss_ratios, numpy.array(curves_poes)
|
A more sophisticated version of normalize_curves, used in the event
based calculator.
:param curves: a list of pairs (losses, poes)
:returns: first losses, all_poes
|
def remote_mgmt_addr_uneq_store(self, remote_mgmt_addr):
"""This function saves the MGMT address, if different from stored. """
if remote_mgmt_addr != self.remote_mgmt_addr:
self.remote_mgmt_addr = remote_mgmt_addr
return True
return False
|
This function saves the MGMT address, if different from stored.
|
def run(*steps):
"""
Helper to run one or more async functions synchronously, with graceful
handling of SIGINT / Ctrl-C.
Returns the return value of the last function.
"""
if not steps:
return
task = None
run._sigint = False # function attr to allow setting from closure
loop = asyncio.get_event_loop()
def abort():
task.cancel()
run._sigint = True
added = False
try:
loop.add_signal_handler(signal.SIGINT, abort)
added = True
except (ValueError, OSError, RuntimeError) as e:
# add_signal_handler doesn't work in a thread
if 'main thread' not in str(e):
raise
try:
for step in steps:
task = loop.create_task(step)
loop.run_until_complete(asyncio.wait([task], loop=loop))
if run._sigint:
raise KeyboardInterrupt()
if task.exception():
raise task.exception()
return task.result()
finally:
if added:
loop.remove_signal_handler(signal.SIGINT)
|
Helper to run one or more async functions synchronously, with graceful
handling of SIGINT / Ctrl-C.
Returns the return value of the last function.
|
def NewOutputModule(cls, name, output_mediator):
"""Creates a new output module object for the specified output format.
Args:
name (str): name of the output module.
output_mediator (OutputMediator): output mediator.
Returns:
OutputModule: output module.
Raises:
KeyError: if there is no output class found with the supplied name.
ValueError: if name is not a string.
"""
output_class = cls.GetOutputClass(name)
return output_class(output_mediator)
|
Creates a new output module object for the specified output format.
Args:
name (str): name of the output module.
output_mediator (OutputMediator): output mediator.
Returns:
OutputModule: output module.
Raises:
KeyError: if there is no output class found with the supplied name.
ValueError: if name is not a string.
|
def meth_set_acl(args):
""" Assign an ACL role to a list of users for a workflow. """
acl_updates = [{"user": user, "role": args.role} \
for user in set(expand_fc_groups(args.users)) \
if user != fapi.whoami()]
id = args.snapshot_id
if not id:
# get the latest snapshot_id for this method from the methods repo
r = fapi.list_repository_methods(namespace=args.namespace,
name=args.method)
fapi._check_response_code(r, 200)
versions = r.json()
if len(versions) == 0:
if fcconfig.verbosity:
eprint("method {0}/{1} not found".format(args.namespace,
args.method))
return 1
latest = sorted(versions, key=lambda m: m['snapshotId'])[-1]
id = latest['snapshotId']
r = fapi.update_repository_method_acl(args.namespace, args.method, id,
acl_updates)
fapi._check_response_code(r, 200)
if fcconfig.verbosity:
print("Updated ACL for {0}/{1}:{2}".format(args.namespace, args.method,
id))
return 0
|
Assign an ACL role to a list of users for a workflow.
|
def p_example_multiline(self, p):
"""example_field : ID EQ NL INDENT ex_map NL DEDENT"""
p[0] = AstExampleField(
self.path, p.lineno(1), p.lexpos(1), p[1], p[5])
|
example_field : ID EQ NL INDENT ex_map NL DEDENT
|
def _CSI(self, cmd):
"""
Control sequence introducer
"""
sys.stdout.write('\x1b[')
sys.stdout.write(cmd)
|
Control sequence introducer
|
def get_args_parser():
"""Return a parser for command line options."""
parser = argparse.ArgumentParser(
description='Marabunta: Migrating ants for Odoo')
parser.add_argument('--migration-file', '-f',
action=EnvDefault,
envvar='MARABUNTA_MIGRATION_FILE',
required=True,
help='The yaml file containing the migration steps')
parser.add_argument('--database', '-d',
action=EnvDefault,
envvar='MARABUNTA_DATABASE',
required=True,
help="Odoo's database")
parser.add_argument('--db-user', '-u',
action=EnvDefault,
envvar='MARABUNTA_DB_USER',
required=True,
help="Odoo's database user")
parser.add_argument('--db-password', '-w',
action=EnvDefault,
envvar='MARABUNTA_DB_PASSWORD',
required=True,
help="Odoo's database password")
parser.add_argument('--db-port', '-p',
default=os.environ.get('MARABUNTA_DB_PORT', 5432),
help="Odoo's database port")
parser.add_argument('--db-host', '-H',
default=os.environ.get('MARABUNTA_DB_HOST',
'localhost'),
help="Odoo's database host")
parser.add_argument('--mode',
action=EnvDefault,
envvar='MARABUNTA_MODE',
required=False,
help="Specify the mode in which we run the migration,"
"such as 'demo' or 'prod'. Additional operations "
"of this mode will be executed after the main "
"operations and the addons list of this mode "
"will be merged with the main addons list.")
parser.add_argument('--allow-serie',
action=BoolEnvDefault,
required=False,
envvar='MARABUNTA_ALLOW_SERIE',
help='Allow to run more than 1 version upgrade at a '
'time.')
parser.add_argument('--force-version',
required=False,
default=os.environ.get('MARABUNTA_FORCE_VERSION'),
help='Force upgrade of a version, even if it has '
'already been applied.')
group = parser.add_argument_group(
title='Web',
description='Configuration related to the internal web server, '
'used to publish a maintenance page during the migration.',
)
group.add_argument('--web-host',
required=False,
default=os.environ.get('MARABUNTA_WEB_HOST', '0.0.0.0'),
help='Host for the web server')
group.add_argument('--web-port',
required=False,
default=os.environ.get('MARABUNTA_WEB_PORT', 8069),
help='Port for the web server')
group.add_argument('--web-custom-html',
required=False,
default=os.environ.get(
'MARABUNTA_WEB_CUSTOM_HTML'
),
help='Path to a custom html file to publish')
return parser
|
Return a parser for command line options.
|
def find_near_matches_no_deletions_ngrams(subsequence, sequence, search_params):
"""search for near-matches of subsequence in sequence
This searches for near-matches, where the nearly-matching parts of the
sequence must meet the following limitations (relative to the subsequence):
* the maximum allowed number of character substitutions
* the maximum allowed number of new characters inserted
* no deletions are allowed
* the total number of substitutions, insertions and deletions
"""
if not subsequence:
raise ValueError('Given subsequence is empty!')
max_substitutions, max_insertions, max_deletions, max_l_dist = search_params.unpacked
max_substitutions = min(max_substitutions, max_l_dist)
max_insertions = min(max_insertions, max_l_dist)
subseq_len = len(subsequence)
seq_len = len(sequence)
ngram_len = subseq_len // (max_substitutions + max_insertions + 1)
if ngram_len == 0:
raise ValueError(
"The subsequence's length must be greater than max_subs + max_ins!"
)
matches = []
matched_indexes = set()
for ngram_start in range(0, len(subsequence) - ngram_len + 1, ngram_len):
ngram_end = ngram_start + ngram_len
subseq_before = subsequence[:ngram_start]
subseq_before_reversed = subseq_before[::-1]
subseq_after = subsequence[ngram_end:]
start_index = max(0, ngram_start - max_insertions)
end_index = min(seq_len, seq_len - (subseq_len - ngram_end) + max_insertions)
for index in search_exact(
subsequence[ngram_start:ngram_end], sequence,
start_index, end_index,
):
if index - ngram_start in matched_indexes:
continue
seq_after = sequence[index + ngram_len:index + subseq_len - ngram_start + max_insertions]
if seq_after.startswith(subseq_after):
matches_after = [(0, 0)]
else:
matches_after = _expand(subseq_after, seq_after,
max_substitutions, max_insertions, max_l_dist)
if not matches_after:
continue
_max_substitutions = max_substitutions - min(m[0] for m in matches_after)
_max_insertions = max_insertions - min(m[1] for m in matches_after)
_max_l_dist = max_l_dist - min(m[0] + m[1] for m in matches_after)
seq_before = sequence[index - ngram_start - _max_insertions:index]
if seq_before.endswith(subseq_before):
matches_before = [(0, 0)]
else:
matches_before = _expand(
subseq_before_reversed, seq_before[::-1],
_max_substitutions, _max_insertions, _max_l_dist,
)
for (subs_before, ins_before) in matches_before:
for (subs_after, ins_after) in matches_after:
if (
subs_before + subs_after <= max_substitutions and
ins_before + ins_after <= max_insertions and
subs_before + subs_after + ins_before + ins_after <= max_l_dist
):
matches.append(Match(
start=index - ngram_start - ins_before,
end=index - ngram_start + subseq_len + ins_after,
dist=subs_before + subs_after + ins_before + ins_after,
))
matched_indexes |= set(range(
index - ngram_start - ins_before,
index - ngram_start - ins_before + max_insertions + 1,
))
return sorted(matches, key=lambda match: match.start)
|
search for near-matches of subsequence in sequence
This searches for near-matches, where the nearly-matching parts of the
sequence must meet the following limitations (relative to the subsequence):
* the maximum allowed number of character substitutions
* the maximum allowed number of new characters inserted
* no deletions are allowed
* the total number of substitutions, insertions and deletions
|
def taskGroupCreationRequested(self, *args, **kwargs):
"""
tc-gh requested the Queue service to create all the tasks in a group
supposed to signal that taskCreate API has been called for every task in the task group
for this particular repo and this particular organization
currently used for creating initial status indicators in GitHub UI using Statuses API.
This particular exchange can also be bound to RabbitMQ queues by custom routes - for that,
Pass in the array of routes as a second argument to the publish method. Currently, we do
use the statuses routes to bind the handler that creates the initial status.
This exchange outputs: ``v1/task-group-creation-requested.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
"""
ref = {
'exchange': 'task-group-creation-requested',
'name': 'taskGroupCreationRequested',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
],
'schema': 'v1/task-group-creation-requested.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs)
|
tc-gh requested the Queue service to create all the tasks in a group
supposed to signal that taskCreate API has been called for every task in the task group
for this particular repo and this particular organization
currently used for creating initial status indicators in GitHub UI using Statuses API.
This particular exchange can also be bound to RabbitMQ queues by custom routes - for that,
Pass in the array of routes as a second argument to the publish method. Currently, we do
use the statuses routes to bind the handler that creates the initial status.
This exchange outputs: ``v1/task-group-creation-requested.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
|
def pretty_print(self):
"""
Print the error message to stdout with colors and borders
"""
print colored.blue("-" * 40)
print colored.red("datacats: problem was encountered:")
print self.message
print colored.blue("-" * 40)
|
Print the error message to stdout with colors and borders
|
def unix_time(self, dt):
"""Returns the number of seconds since the UNIX epoch for the given
datetime (dt).
PARAMETERS:
dt -- datetime
"""
epoch = datetime.utcfromtimestamp(0)
delta = dt - epoch
return int(delta.total_seconds())
|
Returns the number of seconds since the UNIX epoch for the given
datetime (dt).
PARAMETERS:
dt -- datetime
|
def ipv6_link_local(self, **kwargs):
"""Configure ipv6 link local address on interfaces on vdx switches
Args:
int_type: Interface type on which the ipv6 link local needs to be
configured.
name: 'Ve' or 'loopback' interface name.
rbridge_id (str): rbridge-id for device.
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete the mac-learning. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name` is not passed.
ValueError: if `int_type`, `name` is invalid.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.ipv6_link_local(name='500',
... int_type='ve',rbridge_id='1')
... output = dev.interface.ipv6_link_local(get=True,name='500',
... int_type='ve',rbridge_id='1')
... output = dev.interface.ipv6_link_local(delete=True,
... name='500', int_type='ve', rbridge_id='1')
"""
int_type = kwargs.pop('int_type').lower()
ve_name = kwargs.pop('name')
rbridge_id = kwargs.pop('rbridge_id', '1')
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['loopback', 've']
if int_type not in valid_int_types:
raise ValueError('`int_type` must be one of: %s' %
repr(valid_int_types))
link_args = dict(name=ve_name, rbridge_id=rbridge_id,
int_type=int_type)
method_name = 'rbridge_id_interface_%s_ipv6_ipv6_config_address_' \
'use_link_local_only' % int_type
method_class = self._rbridge
v6_link_local = getattr(method_class, method_name)
config = v6_link_local(**link_args)
if kwargs.pop('get', False):
output = callback(config, handler='get_config')
item = output.data.find('.//{*}use-link-local-only')
if item is not None:
return True
if kwargs.pop('delete', False):
config.find('.//*use-link-local-only').set('operation', 'delete')
return callback(config)
|
Configure ipv6 link local address on interfaces on vdx switches
Args:
int_type: Interface type on which the ipv6 link local needs to be
configured.
name: 'Ve' or 'loopback' interface name.
rbridge_id (str): rbridge-id for device.
get (bool): Get config instead of editing config. (True, False)
delete (bool): True, delete the mac-learning. (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name` is not passed.
ValueError: if `int_type`, `name` is invalid.
Examples:
>>> import pynos.device
>>> conn = ('10.24.39.211', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.ipv6_link_local(name='500',
... int_type='ve',rbridge_id='1')
... output = dev.interface.ipv6_link_local(get=True,name='500',
... int_type='ve',rbridge_id='1')
... output = dev.interface.ipv6_link_local(delete=True,
... name='500', int_type='ve', rbridge_id='1')
|
def getIdent(self, node):
"""
Get the graph identifier for a node
"""
ident = self.getRawIdent(node)
if ident is not None:
return ident
node = self.findNode(node)
if node is None:
return None
return node.graphident
|
Get the graph identifier for a node
|
def bbox_to_resolution(bbox, width, height):
""" Calculates pixel resolution in meters for a given bbox of a given width and height.
:param bbox: bounding box
:type bbox: geometry.BBox
:param width: width of bounding box in pixels
:type width: int
:param height: height of bounding box in pixels
:type height: int
:return: resolution east-west at north and south, and resolution north-south in meters for given CRS
:rtype: float, float
:raises: ValueError if CRS is not supported
"""
utm_bbox = to_utm_bbox(bbox)
east1, north1 = utm_bbox.lower_left
east2, north2 = utm_bbox.upper_right
return abs(east2 - east1) / width, abs(north2 - north1) / height
|
Calculates pixel resolution in meters for a given bbox of a given width and height.
:param bbox: bounding box
:type bbox: geometry.BBox
:param width: width of bounding box in pixels
:type width: int
:param height: height of bounding box in pixels
:type height: int
:return: resolution east-west at north and south, and resolution north-south in meters for given CRS
:rtype: float, float
:raises: ValueError if CRS is not supported
|
def mark_offer_as_lose(self, offer_id):
"""
Mark offer as lose
:param offer_id: the offer id
:return Response
"""
return self._create_put_request(
resource=OFFERS,
billomat_id=offer_id,
command=LOSE,
)
|
Mark offer as lose
:param offer_id: the offer id
:return Response
|
def run(self):
""" This defines the sequence of actions that are taken when the preemptive concurrency state is executed
:return:
"""
logger.debug("Starting execution of {0}{1}".format(self, " (backwards)" if self.backward_execution else ""))
self.setup_run()
try:
concurrency_history_item = self.setup_forward_or_backward_execution()
concurrency_queue = self.start_child_states(concurrency_history_item)
#######################################################
# wait for the first threads to finish
#######################################################
finished_thread_id = concurrency_queue.get()
finisher_state = self.states[finished_thread_id]
finisher_state.join()
# preempt all child states
if not self.backward_execution:
for state_id, state in self.states.items():
state.recursively_preempt_states()
# join all states
for history_index, state in enumerate(self.states.values()):
self.join_state(state, history_index, concurrency_history_item)
self.add_state_execution_output_to_scoped_data(state.output_data, state)
self.update_scoped_variables_with_output_dictionary(state.output_data, state)
# add the data of the first state now to overwrite data of the preempted states
self.add_state_execution_output_to_scoped_data(finisher_state.output_data, finisher_state)
self.update_scoped_variables_with_output_dictionary(finisher_state.output_data, finisher_state)
#######################################################
# handle backward execution case
#######################################################
if self.states[finished_thread_id].backward_execution:
return self.finalize_backward_execution()
else:
self.backward_execution = False
#######################################################
# handle no transition
#######################################################
transition = self.get_transition_for_outcome(self.states[finished_thread_id],
self.states[finished_thread_id].final_outcome)
if transition is None:
# final outcome is set here
transition = self.handle_no_transition(self.states[finished_thread_id])
# it the transition is still None, then the state was preempted or aborted, in this case return
if transition is None:
self.output_data["error"] = RuntimeError("state aborted")
else:
if 'error' in self.states[finished_thread_id].output_data:
self.output_data["error"] = self.states[finished_thread_id].output_data['error']
self.final_outcome = self.outcomes[transition.to_outcome]
return self.finalize_concurrency_state(self.final_outcome)
except Exception as e:
logger.error("{0} had an internal error: {1}\n{2}".format(self, str(e), str(traceback.format_exc())))
self.output_data["error"] = e
self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE
return self.finalize(Outcome(-1, "aborted"))
|
This defines the sequence of actions that are taken when the preemptive concurrency state is executed
:return:
|
def taskdir(self):
""" Return the directory under which all artefacts are stored. """
return os.path.join(self.BASE, self.TAG, self.task_family)
|
Return the directory under which all artefacts are stored.
|
def get_required(self, name):
"""
Gets all required dependencies by their name.
At least one dependency must be present. If no dependencies was found it throws a [[ReferenceException]]
:param name: the dependency name to locate.
:return: a list with found dependencies.
"""
locator = self._locate(name)
if locator == None:
raise ReferenceException(None, name)
return self._references.get_required(locator)
|
Gets all required dependencies by their name.
At least one dependency must be present. If no dependencies was found it throws a [[ReferenceException]]
:param name: the dependency name to locate.
:return: a list with found dependencies.
|
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = map(str, range(1,40))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
"""
codes = self.get_opcodes()
if not codes:
codes = [("equal", 0, 1, 0, 1)]
# Fixup leading and trailing groups if they show no changes.
if codes[0][0] == 'equal':
tag, i1, i2, j1, j2 = codes[0]
codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
if codes[-1][0] == 'equal':
tag, i1, i2, j1, j2 = codes[-1]
codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
nn = n + n
group = []
for tag, i1, i2, j1, j2 in codes:
# End the current group and start a new one whenever
# there is a large range with no changes.
if tag == 'equal' and i2-i1 > nn:
group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
yield group
group = []
i1, j1 = max(i1, i2-n), max(j1, j2-n)
group.append((tag, i1, i2, j1 ,j2))
if group and not (len(group)==1 and group[0][0] == 'equal'):
yield group
|
Isolate change clusters by eliminating ranges with no changes.
Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
>>> a = map(str, range(1,40))
>>> b = a[:]
>>> b[8:8] = ['i'] # Make an insertion
>>> b[20] += 'x' # Make a replacement
>>> b[23:28] = [] # Make a deletion
>>> b[30] += 'y' # Make another replacement
>>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
[[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
[('equal', 16, 19, 17, 20),
('replace', 19, 20, 20, 21),
('equal', 20, 22, 21, 23),
('delete', 22, 27, 23, 23),
('equal', 27, 30, 23, 26)],
[('equal', 31, 34, 27, 30),
('replace', 34, 35, 30, 31),
('equal', 35, 38, 31, 34)]]
|
def get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify=False,
**kwargs):
'''
Return the managed file data for file.managed
name
location where the file lives on the server
template
template format
source
managed source file
source_hash
hash of the source file
source_hash_name
When ``source_hash`` refers to a remote file, this specifies the
filename to look for in that file.
.. versionadded:: 2016.3.5
user
Owner of file
group
Group owner of file
mode
Permissions of file
attrs
Attributes of file
.. versionadded:: 2018.3.0
context
Variables to add to the template context
defaults
Default values of for context_dict
skip_verify
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' file.get_managed /etc/httpd/conf.d/httpd.conf jinja salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' None root root '755' base None None
'''
# Copy the file to the minion and templatize it
sfn = ''
source_sum = {}
def _get_local_file_source_sum(path):
'''
DRY helper for getting the source_sum value from a locally cached
path.
'''
return {'hsum': get_hash(path, form='sha256'), 'hash_type': 'sha256'}
# If we have a source defined, let's figure out what the hash is
if source:
urlparsed_source = _urlparse(source)
if urlparsed_source.scheme in salt.utils.files.VALID_PROTOS:
parsed_scheme = urlparsed_source.scheme
else:
parsed_scheme = ''
parsed_path = os.path.join(
urlparsed_source.netloc, urlparsed_source.path).rstrip(os.sep)
unix_local_source = parsed_scheme in ('file', '')
if parsed_scheme == '':
parsed_path = sfn = source
if not os.path.exists(sfn):
msg = 'Local file source {0} does not exist'.format(sfn)
return '', {}, msg
elif parsed_scheme == 'file':
sfn = parsed_path
if not os.path.exists(sfn):
msg = 'Local file source {0} does not exist'.format(sfn)
return '', {}, msg
if parsed_scheme and parsed_scheme.lower() in string.ascii_lowercase:
parsed_path = ':'.join([parsed_scheme, parsed_path])
parsed_scheme = 'file'
if parsed_scheme == 'salt':
source_sum = __salt__['cp.hash_file'](source, saltenv)
if not source_sum:
return '', {}, 'Source file {0} not found in saltenv \'{1}\''.format(source, saltenv)
elif not source_hash and unix_local_source:
source_sum = _get_local_file_source_sum(parsed_path)
elif not source_hash and source.startswith(os.sep):
# This should happen on Windows
source_sum = _get_local_file_source_sum(source)
else:
if not skip_verify:
if source_hash:
try:
source_sum = get_source_sum(name,
source,
source_hash,
source_hash_name,
saltenv)
except CommandExecutionError as exc:
return '', {}, exc.strerror
else:
msg = (
'Unable to verify upstream hash of source file {0}, '
'please set source_hash or set skip_verify to True'
.format(salt.utils.url.redact_http_basic_auth(source))
)
return '', {}, msg
if source and (template or parsed_scheme in salt.utils.files.REMOTE_PROTOS):
# Check if we have the template or remote file cached
cache_refetch = False
cached_dest = __salt__['cp.is_cached'](source, saltenv)
if cached_dest and (source_hash or skip_verify):
htype = source_sum.get('hash_type', 'sha256')
cached_sum = get_hash(cached_dest, form=htype)
if skip_verify:
# prev: if skip_verify or cached_sum == source_sum['hsum']:
# but `cached_sum == source_sum['hsum']` is elliptical as prev if
sfn = cached_dest
source_sum = {'hsum': cached_sum, 'hash_type': htype}
elif cached_sum != source_sum.get('hsum', __opts__['hash_type']):
cache_refetch = True
else:
sfn = cached_dest
# If we didn't have the template or remote file, or the file has been
# updated and the cache has to be refreshed, download the file.
if not sfn or cache_refetch:
try:
sfn = __salt__['cp.cache_file'](
source,
saltenv,
source_hash=source_sum.get('hsum'))
except Exception as exc:
# A 404 or other error code may raise an exception, catch it
# and return a comment that will fail the calling state.
_source = salt.utils.url.redact_http_basic_auth(source)
return '', {}, 'Failed to cache {0}: {1}'.format(_source, exc)
# If cache failed, sfn will be False, so do a truth check on sfn first
# as invoking os.path.exists() on a bool raises a TypeError.
if not sfn or not os.path.exists(sfn):
_source = salt.utils.url.redact_http_basic_auth(source)
return sfn, {}, 'Source file \'{0}\' not found'.format(_source)
if sfn == name:
raise SaltInvocationError(
'Source file cannot be the same as destination'
)
if template:
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict = salt.utils.dictupdate.merge(context_dict, context)
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
sfn,
name=name,
source=source,
user=user,
group=group,
mode=mode,
attrs=attrs,
saltenv=saltenv,
context=context_dict,
salt=__salt__,
pillar=__pillar__,
grains=__opts__['grains'],
opts=__opts__,
**kwargs)
else:
return sfn, {}, ('Specified template format {0} is not supported'
).format(template)
if data['result']:
sfn = data['data']
hsum = get_hash(sfn, form='sha256')
source_sum = {'hash_type': 'sha256',
'hsum': hsum}
else:
__clean_tmp(sfn)
return sfn, {}, data['data']
return sfn, source_sum, ''
|
Return the managed file data for file.managed
name
location where the file lives on the server
template
template format
source
managed source file
source_hash
hash of the source file
source_hash_name
When ``source_hash`` refers to a remote file, this specifies the
filename to look for in that file.
.. versionadded:: 2016.3.5
user
Owner of file
group
Group owner of file
mode
Permissions of file
attrs
Attributes of file
.. versionadded:: 2018.3.0
context
Variables to add to the template context
defaults
Default values of for context_dict
skip_verify
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' file.get_managed /etc/httpd/conf.d/httpd.conf jinja salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' None root root '755' base None None
|
def load(self, name):
"""
If not yet in the cache, load the named template and compiles it,
placing it into the cache.
If in cache, return the cached template.
"""
if self.reload:
self._maybe_purge_cache()
template = self.cache.get(name)
if template:
return template
path = self.resolve(name)
if not path:
raise OSError(errno.ENOENT, "File not found: %s" % name)
with codecs.open(path, 'r', encoding='UTF-8') as f:
contents = f.read()
mtime = os.fstat(f.fileno()).st_mtime
template = self.load_string(contents, filename=path)
template.mtime = mtime
template.path = path
self.cache[name] = template
return template
|
If not yet in the cache, load the named template and compiles it,
placing it into the cache.
If in cache, return the cached template.
|
def rst2md(text):
"""Converts the RST text from the examples docstrigs and comments
into markdown text for the IPython notebooks"""
top_heading = re.compile(r'^=+$\s^([\w\s-]+)^=+$', flags=re.M)
text = re.sub(top_heading, r'# \1', text)
math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M)
text = re.sub(math_eq,
lambda match: r'$${0}$$'.format(match.group(1).strip()),
text)
inline_math = re.compile(r':math:`(.+)`')
text = re.sub(inline_math, r'$\1$', text)
return text
|
Converts the RST text from the examples docstrigs and comments
into markdown text for the IPython notebooks
|
def field2choices(self, field, **kwargs):
"""Return the dictionary of OpenAPI field attributes for valid choices definition
:param Field field: A marshmallow field.
:rtype: dict
"""
attributes = {}
comparable = [
validator.comparable
for validator in field.validators
if hasattr(validator, "comparable")
]
if comparable:
attributes["enum"] = comparable
else:
choices = [
OrderedSet(validator.choices)
for validator in field.validators
if hasattr(validator, "choices")
]
if choices:
attributes["enum"] = list(functools.reduce(operator.and_, choices))
return attributes
|
Return the dictionary of OpenAPI field attributes for valid choices definition
:param Field field: A marshmallow field.
:rtype: dict
|
def md5sum(filename, blocksize=8192):
"""Get the MD5 checksum of a file."""
with open(filename, 'rb') as fh:
m = hashlib.md5()
while True:
data = fh.read(blocksize)
if not data:
break
m.update(data)
return m.hexdigest()
|
Get the MD5 checksum of a file.
|
def init_default_config(self, path):
''' Initialize the config object and load the default configuration.
The path to the config file must be provided. The name of the
application is read from the config file.
The config file stores the description and the default values for
all configurations including the application name.
@param path: The path to the config config file.
'''
if not (os.path.exists(path) and os.path.isfile(path)):
raise AppConfigValueException('The given config config file does '
'not exist. ({0})'.format(path))
cfl = open(path, 'r')
data = json.load(cfl)
cfl.close()
for key in data.keys():
if 'application_name' == key:
self.application_name = data[key].lower()
continue
if 'application_author' == key:
self.application_author = data[key].lower()
continue
if 'application_version' == key:
self.application_version = data[key].lower()
continue
self._add_section_default(key, data[key])
|
Initialize the config object and load the default configuration.
The path to the config file must be provided. The name of the
application is read from the config file.
The config file stores the description and the default values for
all configurations including the application name.
@param path: The path to the config config file.
|
def check(text):
"""Check the text."""
err = "airlinese.misc"
msg = u"'{}' is airlinese."
airlinese = [
"enplan(?:e|ed|ing|ement)",
"deplan(?:e|ed|ing|ement)",
"taking off momentarily",
]
return existence_check(text, airlinese, err, msg)
|
Check the text.
|
def create_sqlite_backup_db(audit_tables):
"""
return an inspector object
"""
#we always want to create a whole new DB, so delete the old one first
#if it exists.
try:
Popen("rm %s"%(config.get('sqlite', 'backup_url')), shell=True)
logging.warn("Old sqlite backup DB removed")
except Exception as e:
logging.warn(e)
try:
aux_dir = config.get('DEFAULT', 'hydra_aux_dir')
os.mkdir(aux_dir)
logging.warn("%s created", aux_dir)
except Exception as e:
logging.warn(e)
try:
backup_dir = config.get('db', 'export_target')
os.mkdir(backup_dir)
logging.warn("%s created", backup_dir)
except Exception as e:
logging.warn(e)
db = create_engine(sqlite_engine, echo=True)
db.connect()
metadata = MetaData(db)
for main_audit_table in audit_tables:
cols = []
for c in main_audit_table.columns:
col = c.copy()
if col.type.python_type == Decimal:
col.type = DECIMAL()
cols.append(col)
Table(main_audit_table.name, metadata, *cols, sqlite_autoincrement=True)
metadata.create_all(db)
|
return an inspector object
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.