code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def _sum(data):
"""_sum(data [, start]) -> value
Return a high-precision sum of the given numeric data. If optional
argument ``start`` is given, it is added to the total. If ``data`` is
empty, ``start`` (defaulting to 0) is returned.
Examples
--------
>>> _sum([3, 2.25, 4.5, -0.5, 1.0], 0.75)
11.0
Some sources of round-off error will be avoided:
>>> _sum([1e50, 1, -1e50] * 1000) # Built-in sum returns zero.
1000.0
Fractions and Decimals are also supported:
>>> from fractions import Fraction as F
>>> _sum([F(2, 3), F(7, 5), F(1, 4), F(5, 6)])
Fraction(63, 20)
>>> from decimal import Decimal as D
>>> data = [D("0.1375"), D("0.2108"), D("0.3061"), D("0.0419")]
>>> _sum(data)
Decimal('0.6963')
Mixed types are currently treated as an error, except that int is
allowed.
"""
data = iter(data)
n = _first(data)
if n is not None:
data = chain([n], data)
if isinstance(n, F):
return math.fsum(data)
return sum(data)
return 0 | _sum(data [, start]) -> value
Return a high-precision sum of the given numeric data. If optional
argument ``start`` is given, it is added to the total. If ``data`` is
empty, ``start`` (defaulting to 0) is returned.
Examples
--------
>>> _sum([3, 2.25, 4.5, -0.5, 1.0], 0.75)
11.0
Some sources of round-off error will be avoided:
>>> _sum([1e50, 1, -1e50] * 1000) # Built-in sum returns zero.
1000.0
Fractions and Decimals are also supported:
>>> from fractions import Fraction as F
>>> _sum([F(2, 3), F(7, 5), F(1, 4), F(5, 6)])
Fraction(63, 20)
>>> from decimal import Decimal as D
>>> data = [D("0.1375"), D("0.2108"), D("0.3061"), D("0.0419")]
>>> _sum(data)
Decimal('0.6963')
Mixed types are currently treated as an error, except that int is
allowed. |
def lrem(self, key, value, count=0):
"""Emulate lrem."""
value = self._encode(value)
redis_list = self._get_list(key, 'LREM')
removed_count = 0
if self._encode(key) in self.redis:
if count == 0:
# Remove all ocurrences
while redis_list.count(value):
redis_list.remove(value)
removed_count += 1
elif count > 0:
counter = 0
# remove first 'count' ocurrences
while redis_list.count(value):
redis_list.remove(value)
counter += 1
removed_count += 1
if counter >= count:
break
elif count < 0:
# remove last 'count' ocurrences
counter = -count
new_list = []
for v in reversed(redis_list):
if v == value and counter > 0:
counter -= 1
removed_count += 1
else:
new_list.append(v)
redis_list[:] = list(reversed(new_list))
if removed_count > 0 and len(redis_list) == 0:
self.delete(key)
return removed_count | Emulate lrem. |
def _map_arguments(self, args):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
config_yaml = args['config']
config_dict = load_yaml(config_yaml)
data = config_dict.get('data')
comp = config_dict.get('comp')
dry_run = args.get('dry_run', False)
self._set_link('prepare', SplitAndMktimeChain,
comp=comp, data=data,
ft1file=config_dict['ft1file'],
ft2file=config_dict['ft2file'],
hpx_order_ccube=config_dict.get('hpx_order_ccube', 7),
hpx_order_expcube=config_dict.get('hpx_order_expcube', 7),
mktime=config_dict.get('mktimefitler', None),
do_ltsum=config_dict.get('do_ltsum', False),
scratch=config_dict.get('scratch', None),
dry_run=dry_run)
self._set_link('residual-cr', ResidualCR_SG,
comp=comp, data=data,
mktimefilter=config_dict.get('mktimefitler', None),
hpx_order=config_dict.get('hpx_order_fitting', 4),
clean=config_dict.get('clean_class', None),
dirty=config_dict.get('dirty_class', None),
select_factor=config_dict.get('select_factor', None),
mask_factor=config_dict.get('mask_factor', None),
sigma=config_dict.get('sigma', None),
full_output=config_dict.get('full_output', False),
dry_run=dry_run) | Map from the top-level arguments to the arguments provided to
the indiviudal links |
def start(self):
""" Initialize websockets, say hello, and start listening for events
"""
self.connect()
if not self.isAlive():
super(WAMPClient,self).start()
self.hello()
return self | Initialize websockets, say hello, and start listening for events |
def import_trade(self, trade):
"""
trade是一个可迭代的list/generator
"""
for item in trade:
self.make_deal(item.code, item.datetime, item.amount,
item.towards, item.price.item.order_model, item.amount_model) | trade是一个可迭代的list/generator |
def init_app(self, app,
entry_point_group='invenio_workflows.workflows',
**kwargs):
"""Flask application initialization."""
app.config.setdefault(
"WORKFLOWS_OBJECT_CLASS",
"invenio_workflows.api.WorkflowObject"
)
state = _WorkflowState(
app, entry_point_group=entry_point_group, **kwargs
)
app.extensions['invenio-workflows'] = state
return state | Flask application initialization. |
def qualified_name_import(cls):
"""Full name of a class, including the module. Like qualified_class_name, but when you already have a class """
parts = qualified_name(cls).split('.')
return "from {} import {}".format('.'.join(parts[:-1]), parts[-1]) | Full name of a class, including the module. Like qualified_class_name, but when you already have a class |
def db_import(self, urls=None, force_download=False):
"""Updates the CTD database
1. downloads all files from CTD
2. drops all tables in database
3. creates all tables in database
4. import all data from CTD files
:param iter[str] urls: An iterable of URL strings
:param bool force_download: force method to download
"""
if not urls:
urls = [
defaults.url_base + table_conf.tables[model]['file_name']
for model in table_conf.tables
]
log.info('Update CTD database from %s', urls)
self.drop_all()
self.download_urls(urls=urls, force_download=force_download)
self.create_all()
self.import_tables()
self.session.close() | Updates the CTD database
1. downloads all files from CTD
2. drops all tables in database
3. creates all tables in database
4. import all data from CTD files
:param iter[str] urls: An iterable of URL strings
:param bool force_download: force method to download |
def count_mismatches_before_variant(reference_prefix, cdna_prefix):
"""
Computes the number of mismatching nucleotides between two cDNA sequences before a variant
locus.
Parameters
----------
reference_prefix : str
cDNA sequence of a reference transcript before a variant locus
cdna_prefix : str
cDNA sequence detected from RNAseq before a variant locus
"""
if len(reference_prefix) != len(cdna_prefix):
raise ValueError(
"Expected reference prefix '%s' to be same length as %s" % (
reference_prefix, cdna_prefix))
return sum(xi != yi for (xi, yi) in zip(reference_prefix, cdna_prefix)) | Computes the number of mismatching nucleotides between two cDNA sequences before a variant
locus.
Parameters
----------
reference_prefix : str
cDNA sequence of a reference transcript before a variant locus
cdna_prefix : str
cDNA sequence detected from RNAseq before a variant locus |
def _find(expr, sub, start=0, end=None):
"""
Return lowest indexes in each strings in the sequence or scalar
where the substring is fully contained between [start:end]. Return -1 on failure.
Equivalent to standard str.find().
:param expr:
:param sub: substring being searched
:param start: left edge index
:param end: right edge index
:return: sequence or scalar
"""
return _string_op(expr, Find, output_type=types.int64,
_sub=sub, _start=start, _end=end) | Return lowest indexes in each strings in the sequence or scalar
where the substring is fully contained between [start:end]. Return -1 on failure.
Equivalent to standard str.find().
:param expr:
:param sub: substring being searched
:param start: left edge index
:param end: right edge index
:return: sequence or scalar |
def groupuninstall(group, options=None):
"""
Remove an existing software group.
Extra *options* may be passed to ``yum`` if necessary.
"""
manager = MANAGER
if options is None:
options = []
elif isinstance(options, str):
options = [options]
options = " ".join(options)
run_as_root('%(manager)s %(options)s groupremove "%(group)s"' % locals()) | Remove an existing software group.
Extra *options* may be passed to ``yum`` if necessary. |
def is_function_or_method(obj):
"""Check if an object is a function or method.
Args:
obj: The Python object in question.
Returns:
True if the object is an function or method.
"""
return inspect.isfunction(obj) or inspect.ismethod(obj) or is_cython(obj) | Check if an object is a function or method.
Args:
obj: The Python object in question.
Returns:
True if the object is an function or method. |
def is_interesting(entry):
"""Is this entry interesting?
``entry`` is an XML node representing one entry of the svn status
XML output. It looks like this::
<entry path="unchanged.txt">
<wc-status item="normal" revision="1" props="none">
<commit revision="1">
<author>mg</author>
<date>2015-02-06T07:52:38.163516Z</date>
</commit>
</wc-status>
</entry>
<entry path="added-but-not-committed.txt">
<wc-status item="added" revision="-1" props="none"></wc-status>
</entry>
<entry path="ext">
<wc-status item="external" props="none"></wc-status>
</entry>
<entry path="unknown.txt">
<wc-status props="none" item="unversioned"></wc-status>
</entry>
"""
if entry.get('path') == '.':
return False
status = entry.find('wc-status')
if status is None:
warning('svn status --xml parse error: <entry path="%s"> without'
' <wc-status>' % entry.get('path'))
return False
# For SVN externals we get two entries: one mentioning the
# existence of the external, and one about the status of the external.
if status.get('item') in ('unversioned', 'external'):
return False
return True | Is this entry interesting?
``entry`` is an XML node representing one entry of the svn status
XML output. It looks like this::
<entry path="unchanged.txt">
<wc-status item="normal" revision="1" props="none">
<commit revision="1">
<author>mg</author>
<date>2015-02-06T07:52:38.163516Z</date>
</commit>
</wc-status>
</entry>
<entry path="added-but-not-committed.txt">
<wc-status item="added" revision="-1" props="none"></wc-status>
</entry>
<entry path="ext">
<wc-status item="external" props="none"></wc-status>
</entry>
<entry path="unknown.txt">
<wc-status props="none" item="unversioned"></wc-status>
</entry> |
def _configure_manager(self):
"""
Creates a manager to handle the instances, and another
to handle flavors.
"""
self._manager = CloudDNSManager(self, resource_class=CloudDNSDomain,
response_key="domains", plural_response_key="domains",
uri_base="domains") | Creates a manager to handle the instances, and another
to handle flavors. |
def load_model_from_link(name, **overrides):
"""Load a model from a shortcut link, or directory in spaCy data path."""
path = get_data_path() / name / "__init__.py"
try:
cls = import_file(name, path)
except AttributeError:
raise IOError(Errors.E051.format(name=name))
return cls.load(**overrides) | Load a model from a shortcut link, or directory in spaCy data path. |
def _get_mechanism(self, rup, coeffs):
"""
Compute fifth term of equation (1) on p. 1200:
``b6 * H``
"""
is_strike_slip = self.get_fault_type_dummy_variables(rup)
return coeffs['b6']*is_strike_slip | Compute fifth term of equation (1) on p. 1200:
``b6 * H`` |
def logout_all(self,
command='exit',
note=None,
echo=None,
timeout=shutit_global.shutit_global_object.default_timeout,
nonewline=False,
loglevel=logging.DEBUG):
"""Logs the user out of all pexpect sessions within this ShutIt object.
@param command: Command to run to log out (default=exit)
@param note: See send()
"""
shutit_global.shutit_global_object.yield_to_draw()
for key in self.shutit_pexpect_sessions:
shutit_pexpect_session = self.shutit_pexpect_sessions[key]
shutit_pexpect_session.logout_all(ShutItSendSpec(shutit_pexpect_session,
send=command,
note=note,
timeout=timeout,
nonewline=nonewline,
loglevel=loglevel,
echo=echo))
return True | Logs the user out of all pexpect sessions within this ShutIt object.
@param command: Command to run to log out (default=exit)
@param note: See send() |
def compute_score(self):
"""Calculate the overall test score using the configuration."""
# LOGGER.info("Begin scoring")
cases = self.get_configured_tests() | set(self.result.cases)
scores = DataFrame({"score": 0.0, "max": 1.0},
index=sorted(cases))
self.result.setdefault("score", dict())
self.result["score"]["sections"] = list()
# Calculate the scores for each test individually.
for test, result in iteritems(self.result.cases):
# LOGGER.info("Calculate score for test: '%s'.", test)
# Test metric may be a dictionary for a parametrized test.
metric = result["metric"]
if hasattr(metric, "items"):
result["score"] = test_score = dict()
total = 0.0
for key, value in iteritems(metric):
value = 1.0 - value
total += value
test_score[key] = value
# For some reason there are parametrized tests without cases.
if len(metric) == 0:
metric = 0.0
else:
metric = total / len(metric)
else:
metric = 1.0 - metric
scores.at[test, "score"] = metric
scores.loc[test, :] *= self.config["weights"].get(test, 1.0)
score = 0.0
maximum = 0.0
# Calculate the scores for each section considering the individual test
# case scores.
for section_id, card in iteritems(
self.config['cards']['scored']['sections']
):
# LOGGER.info("Calculate score for section: '%s'.", section_id)
cases = card.get("cases", None)
if cases is None:
continue
card_score = scores.loc[cases, "score"].sum()
card_total = scores.loc[cases, "max"].sum()
# Format results nicely to work immediately with Vega Bar Chart.
section_score = {"section": section_id,
"score": card_score / card_total}
self.result["score"]["sections"].append(section_score)
# Calculate the final score for the entire model.
weight = card.get("weight", 1.0)
score += card_score * weight
maximum += card_total * weight
self.result["score"]["total_score"] = score / maximum | Calculate the overall test score using the configuration. |
def get_coverage(config: CoverageConfig) -> 'Coverage':
"""
Returns a Coverage instance.
:param config: Coverage configuration.
:return: Instance of Coverage.
"""
if config.type == C.COVERAGE_COUNT or config.type == C.COVERAGE_FERTILITY:
utils.check_condition(config.num_hidden == 1, "Count or fertility coverage requires coverage_num_hidden==1")
if config.type == C.GRU_TYPE:
return GRUCoverage(config.num_hidden, config.layer_normalization)
elif config.type in {C.TANH, C.SIGMOID, C.RELU, C.SOFT_RELU}:
return ActivationCoverage(config.num_hidden, config.type, config.layer_normalization)
elif config.type == C.COVERAGE_COUNT:
return CountCoverage()
elif config.type == C.COVERAGE_FERTILITY:
return FertilityCoverage(config.max_fertility)
else:
raise ValueError("Unknown coverage type %s" % config.type) | Returns a Coverage instance.
:param config: Coverage configuration.
:return: Instance of Coverage. |
def parse(self):
"""Parse the metadata.rb into a dict."""
data = utils.ruby_lines(self.readlines())
data = [tuple(j.strip() for j in line.split(None, 1))
for line in data]
depends = {}
for line in data:
if not len(line) == 2:
continue
key, value = line
if key == 'depends':
value = value.split(',')
lib = utils.ruby_strip(value[0])
detail = [utils.ruby_strip(j) for j in value[1:]]
depends[lib] = detail
datamap = {key: utils.ruby_strip(val) for key, val in data}
if depends:
datamap['depends'] = depends
self.seek(0)
return datamap | Parse the metadata.rb into a dict. |
def replace_suffixes_1(self, word):
"""
Find the longest suffix among the ones specified
and perform the required action.
"""
length = len(word)
if word.endswith("sses"):
return word[:-2]
elif word.endswith("ied") or word.endswith("ies"):
word = word[:-3]
if len(word) == 1:
word += 'ie'
else:
word += 'i'
return word
# This ensures that words like conspicous stem properly
elif word.endswith('us') or word.endswith('ss'):
return word
# From spec: 'delete if the preceding word part contains a vowel
# not immediately before the s (so gas and this retain the s,
# gaps and kiwis lose it)
elif word[length - 1] == 's':
for letter in word[:-2]:
if letter in self.vowels:
return word[:-1]
return word | Find the longest suffix among the ones specified
and perform the required action. |
def is_contradictory(self, other):
"""
Can these two strings coexist ?
"""
other = StringCell.coerce(other)
if self.value is None or other.value is None:
# None = empty, and won't contradict anything
return False
def sequence_in(s1, s2):
"""Does `s1` appear in sequence in `s2`?"""
return bool(re.search(".*".join(s1), s2))
return not sequence_in(self.value, other.value) and \
not sequence_in(other.value, self.value) | Can these two strings coexist ? |
def combine_proximals(*factory_list):
r"""Combine proximal operators into a diagonal product space operator.
This assumes the functional to be separable across variables in order to
make use of the separable sum property of proximal operators.
Parameters
----------
factory_list : sequence of callables
Proximal operator factories to be combined.
Returns
-------
diag_op : function
Returns a diagonal product space operator factory to be initialized
with the same step size parameter
Notes
-----
That two functionals :math:`F` and :math:`G` are separable across variables
means that :math:`F((x, y)) = F(x)` and :math:`G((x, y)) = G(y)`, and in
this case the proximal operator of the sum is given by
.. math::
\mathrm{prox}_{\sigma (F(x) + G(y))}(x, y) =
(\mathrm{prox}_{\sigma F}(x), \mathrm{prox}_{\sigma G}(y)).
"""
def diag_op_factory(sigma):
"""Diagonal matrix of operators.
Parameters
----------
sigma : positive float or sequence of positive floats
Step size parameter(s), if a sequence, the length must match
the length of the ``factory_list``.
Returns
-------
diag_op : `DiagonalOperator`
"""
if np.isscalar(sigma):
sigma = [sigma] * len(factory_list)
return DiagonalOperator(
*[factory(sigmai)
for sigmai, factory in zip(sigma, factory_list)])
return diag_op_factory | r"""Combine proximal operators into a diagonal product space operator.
This assumes the functional to be separable across variables in order to
make use of the separable sum property of proximal operators.
Parameters
----------
factory_list : sequence of callables
Proximal operator factories to be combined.
Returns
-------
diag_op : function
Returns a diagonal product space operator factory to be initialized
with the same step size parameter
Notes
-----
That two functionals :math:`F` and :math:`G` are separable across variables
means that :math:`F((x, y)) = F(x)` and :math:`G((x, y)) = G(y)`, and in
this case the proximal operator of the sum is given by
.. math::
\mathrm{prox}_{\sigma (F(x) + G(y))}(x, y) =
(\mathrm{prox}_{\sigma F}(x), \mathrm{prox}_{\sigma G}(y)). |
def lookup_facade(name, version):
"""
Given a facade name and version, attempt to pull that facade out
of the correct client<version>.py file.
"""
for _version in range(int(version), 0, -1):
try:
facade = getattr(CLIENTS[str(_version)], name)
return facade
except (KeyError, AttributeError):
continue
else:
raise ImportError("No supported version for facade: "
"{}".format(name)) | Given a facade name and version, attempt to pull that facade out
of the correct client<version>.py file. |
def get_ref_bedtool(ref_file, config, chrom=None):
"""Retrieve a pybedtool BedTool object with reference sizes from input reference.
"""
broad_runner = broad.runner_from_path("picard", config)
ref_dict = broad_runner.run_fn("picard_index_ref", ref_file)
ref_lines = []
with pysam.Samfile(ref_dict, "r") as ref_sam:
for sq in ref_sam.header["SQ"]:
if not chrom or sq["SN"] == chrom:
ref_lines.append("%s\t%s\t%s" % (sq["SN"], 0, sq["LN"]))
return pybedtools.BedTool("\n".join(ref_lines), from_string=True) | Retrieve a pybedtool BedTool object with reference sizes from input reference. |
def get_wrap_size_limit(self, output_size, conf_req=True, qop_req=C.GSS_C_QOP_DEFAULT):
"""
Calculates the maximum size of message that can be fed to :meth:`wrap` so that the size of
the resulting wrapped token (message plus wrapping overhead) is no more than a given
maximum output size.
:param output_size: The maximum output size (in bytes) of a wrapped token
:type output_size: int
:param conf_req: Whether to calculate the wrapping overhead for confidentiality protection
(if True) or just integrity protection (if False).
:type conf_req: bool
:returns: The maximum input size (in bytes) of message that can be passed to :meth:`wrap`
:rtype: int
"""
minor_status = ffi.new('OM_uint32[1]')
max_input_size = ffi.new('OM_uint32[1]')
retval = C.gss_wrap_size_limit(
minor_status,
self._ctx[0],
ffi.cast('int', conf_req),
ffi.cast('gss_qop_t', qop_req),
ffi.cast('OM_uint32', output_size),
max_input_size
)
if GSS_ERROR(retval):
if minor_status[0] and self.mech_type:
raise _exception_for_status(retval, minor_status[0], self.mech_type)
else:
raise _exception_for_status(retval, minor_status[0])
return max_input_size[0] | Calculates the maximum size of message that can be fed to :meth:`wrap` so that the size of
the resulting wrapped token (message plus wrapping overhead) is no more than a given
maximum output size.
:param output_size: The maximum output size (in bytes) of a wrapped token
:type output_size: int
:param conf_req: Whether to calculate the wrapping overhead for confidentiality protection
(if True) or just integrity protection (if False).
:type conf_req: bool
:returns: The maximum input size (in bytes) of message that can be passed to :meth:`wrap`
:rtype: int |
def get_index_nested(x, i):
"""
Description:
Returns the first index of the array (vector) x containing the value i.
Parameters:
x: one-dimensional array
i: search value
"""
for ind in range(len(x)):
if i == x[ind]:
return ind
return -1 | Description:
Returns the first index of the array (vector) x containing the value i.
Parameters:
x: one-dimensional array
i: search value |
def Wow64EnableWow64FsRedirection(Wow64FsEnableRedirection):
"""
This function may not work reliably when there are nested calls. Therefore,
this function has been replaced by the L{Wow64DisableWow64FsRedirection}
and L{Wow64RevertWow64FsRedirection} functions.
@see: U{http://msdn.microsoft.com/en-us/library/windows/desktop/aa365744(v=vs.85).aspx}
"""
_Wow64EnableWow64FsRedirection = windll.kernel32.Wow64EnableWow64FsRedirection
_Wow64EnableWow64FsRedirection.argtypes = [BOOLEAN]
_Wow64EnableWow64FsRedirection.restype = BOOLEAN
_Wow64EnableWow64FsRedirection.errcheck = RaiseIfZero | This function may not work reliably when there are nested calls. Therefore,
this function has been replaced by the L{Wow64DisableWow64FsRedirection}
and L{Wow64RevertWow64FsRedirection} functions.
@see: U{http://msdn.microsoft.com/en-us/library/windows/desktop/aa365744(v=vs.85).aspx} |
def input(self, _in, out, **kwargs):
"""Process individual translation file."""
language_code = _re_language_code.search(_in.read()).group(
'language_code'
)
_in.seek(0) # move at the begining after matching the language
catalog = read_po(_in)
out.write('gettextCatalog.setStrings("{0}", '.format(language_code))
out.write(json.dumps({
key: value.string for key, value in catalog._messages.items()
if key and value.string
}))
out.write(');') | Process individual translation file. |
def create_index_tuple(group_ids):
"""An helper function to create index tuples for fast lookup in HDF5Pump"""
max_group_id = np.max(group_ids)
start_idx_arr = np.full(max_group_id + 1, 0)
n_items_arr = np.full(max_group_id + 1, 0)
current_group_id = group_ids[0]
current_idx = 0
item_count = 0
for group_id in group_ids:
if group_id != current_group_id:
start_idx_arr[current_group_id] = current_idx
n_items_arr[current_group_id] = item_count
current_idx += item_count
item_count = 0
current_group_id = group_id
item_count += 1
else:
start_idx_arr[current_group_id] = current_idx
n_items_arr[current_group_id] = item_count
return (start_idx_arr, n_items_arr) | An helper function to create index tuples for fast lookup in HDF5Pump |
def make_present_participles(verbs):
"""Make the list of verbs into present participles
E.g.:
empower -> empowering
drive -> driving
"""
res = []
for verb in verbs:
parts = verb.split()
if parts[0].endswith("e"):
parts[0] = parts[0][:-1] + "ing"
else:
parts[0] = parts[0] + "ing"
res.append(" ".join(parts))
return res | Make the list of verbs into present participles
E.g.:
empower -> empowering
drive -> driving |
def swipe_left(self, width: int = 1080, length: int = 1920) -> None:
'''Swipe left.'''
self.swipe(0.8*width, 0.5*length, 0.2*width, 0.5*length) | Swipe left. |
def get_printoptions():
"""Return the current print options.
Returns
-------
dict
Dictionary of current print options with keys
- short : bool
- xml : bool
- codestream : bool
For a full description of these options, see `set_printoptions`.
See also
--------
set_printoptions
"""
warnings.warn('Use get_option instead of get_printoptions.',
DeprecationWarning)
d = {}
for key in ['short', 'xml', 'codestream']:
d[key] = _options['print.' + key]
return d | Return the current print options.
Returns
-------
dict
Dictionary of current print options with keys
- short : bool
- xml : bool
- codestream : bool
For a full description of these options, see `set_printoptions`.
See also
--------
set_printoptions |
def drop_zero_priors(self):
'''
Returns
-------
PriorFactory
'''
self.term_doc_mat = self.term_doc_mat.remove_terms(
self.priors[self.priors == 0].index
)
self._reindex_priors()
return self | Returns
-------
PriorFactory |
def handle_stats(name, obj):
"""
Stats object handler.
:param name: Unused String
:param obj: GitPython Stats
:return: Dictionary of attributes.
"""
return {'total_deletions': obj.total['deletions'],
'total_insertions': obj.total['insertions'],
'total_lines': obj.total['lines'],
'total_files': obj.total['files'],
'changes': obj.files} | Stats object handler.
:param name: Unused String
:param obj: GitPython Stats
:return: Dictionary of attributes. |
def draw_peaks_inverted(self, x, peaks, line_color):
"""Draw 2 inverted peaks at x"""
y1 = self.image_height * 0.5 - peaks[0] * (self.image_height - 4) * 0.5
y2 = self.image_height * 0.5 - peaks[1] * (self.image_height - 4) * 0.5
if self.previous_y and x < self.image_width - 1:
if y1 < y2:
self.draw.line((x, 0, x, y1), line_color)
self.draw.line((x, self.image_height, x, y2), line_color)
else:
self.draw.line((x, 0, x, y2), line_color)
self.draw.line((x, self.image_height, x, y1), line_color)
else:
self.draw.line((x, 0, x, self.image_height), line_color)
self.draw_anti_aliased_pixels(x, y1, y2, line_color)
self.previous_x, self.previous_y = x, y1 | Draw 2 inverted peaks at x |
def load_from_db(cls, callback_etat=print, out=None):
"""Launch data fetching then load data received.
The method _load_remote_db should be overridden.
If out is given, datas are set in it, instead of returning a new base object.
"""
dic = cls._load_remote_db(callback_etat)
callback_etat("Chargement...", 2, 3)
if out is None:
return cls(dic)
cls.__init__(out, datas=dic) | Launch data fetching then load data received.
The method _load_remote_db should be overridden.
If out is given, datas are set in it, instead of returning a new base object. |
def public_pair_to_sec(public_pair, compressed=True):
"""Convert a public pair (a pair of bignums corresponding to a public key) to the
gross internal sec binary format used by OpenSSL."""
x_str = to_bytes_32(public_pair[0])
if compressed:
return int2byte((2 + (public_pair[1] & 1))) + x_str
y_str = to_bytes_32(public_pair[1])
return b'\4' + x_str + y_str | Convert a public pair (a pair of bignums corresponding to a public key) to the
gross internal sec binary format used by OpenSSL. |
def put(self, key, value):
"""
>>> c = LRUCache()
>>> c.put(1, 'one')
>>> c.get(1)
'one'
>>> c.size()
1
>>> c.put(2, 'two')
>>> c.put(3, 'three')
>>> c.put(4, 'four')
>>> c.put(5, 'five')
>>> c.get(5)
'five'
>>> c.size()
5
"""
self._cache[key] = value
self._order.push(key)
self._size += 1 | >>> c = LRUCache()
>>> c.put(1, 'one')
>>> c.get(1)
'one'
>>> c.size()
1
>>> c.put(2, 'two')
>>> c.put(3, 'three')
>>> c.put(4, 'four')
>>> c.put(5, 'five')
>>> c.get(5)
'five'
>>> c.size()
5 |
def unpackexe(exefile, destdir):
"""Unpack the given exefile into destdir, using 7z"""
nullfd = open(os.devnull, "w")
exefile = cygpath(os.path.abspath(exefile))
try:
check_call([SEVENZIP, 'x', exefile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking exe %s to %s", exefile, destdir)
raise
nullfd.close() | Unpack the given exefile into destdir, using 7z |
def j2(x):
""" A fast j2 defined in terms of other special functions """
to_return = 2./(x+1e-15)*j1(x) - j0(x)
to_return[x==0] = 0
return to_return | A fast j2 defined in terms of other special functions |
def find_service_by_id(self, service_id):
"""
Get service for a given service_id.
:param service_id: Service id, str
:return: Service
"""
service_id_key = 'serviceDefinitionId'
service_id = str(service_id)
for service in self._services:
if service_id_key in service.values and str(
service.values[service_id_key]) == service_id:
return service
try:
# If service_id is int or can be converted to int then we couldn't find it
int(service_id)
return None
except ValueError:
pass
# try to find by type
return self.find_service_by_type(service_id) | Get service for a given service_id.
:param service_id: Service id, str
:return: Service |
def _write(self, cmd, *datas):
"""Helper function to simplify writing."""
cmd = Command(write=cmd)
cmd.write(self._transport, self._protocol, *datas) | Helper function to simplify writing. |
def instance_query_movie_ids(self) -> List[str]:
"""Demonstrates showing tabular hinting of tab completion information"""
completions_with_desc = []
# Sort the movie id strings with a natural sort since they contain numbers
for movie_id in utils.natural_sort(self.MOVIE_DATABASE_IDS):
if movie_id in self.MOVIE_DATABASE:
movie_entry = self.MOVIE_DATABASE[movie_id]
completions_with_desc.append(argparse_completer.CompletionItem(movie_id, movie_entry['title']))
# Mark that we already sorted the matches
self.matches_sorted = True
return completions_with_desc | Demonstrates showing tabular hinting of tab completion information |
def do_execute(self):
"""
The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str
"""
for s in self.resolve_option("strings"):
self._output.append(Token(s))
return None | The actual execution of the actor.
:return: None if successful, otherwise error message
:rtype: str |
def ReadPostingLists(self,
keywords,
start_time=FIRST_TIMESTAMP,
end_time=LAST_TIMESTAMP,
last_seen_map=None):
"""Finds all objects associated with any of the keywords.
Args:
keywords: A collection of keywords that we are interested in.
start_time: Only considers keywords added at or after this point in time.
end_time: Only considers keywords at or before this point in time.
last_seen_map: If present, is treated as a dict and populated to map pairs
(keyword, name) to the timestamp of the latest connection found.
Returns:
A dict mapping each keyword to a set of relevant names.
"""
return data_store.DB.IndexReadPostingLists(
self.urn, keywords, start_time, end_time, last_seen_map=last_seen_map) | Finds all objects associated with any of the keywords.
Args:
keywords: A collection of keywords that we are interested in.
start_time: Only considers keywords added at or after this point in time.
end_time: Only considers keywords at or before this point in time.
last_seen_map: If present, is treated as a dict and populated to map pairs
(keyword, name) to the timestamp of the latest connection found.
Returns:
A dict mapping each keyword to a set of relevant names. |
def FinalizeTaskStorage(self, task):
"""Finalizes a processed task storage.
Moves the task storage file from its temporary directory to the processed
directory.
Args:
task (Task): task.
Raises:
IOError: if the storage type is not supported or
if the storage file cannot be renamed.
OSError: if the storage type is not supported or
if the storage file cannot be renamed.
"""
if self._storage_type != definitions.STORAGE_TYPE_SESSION:
raise IOError('Unsupported storage type.')
storage_file_path = self._GetTaskStorageFilePath(task)
processed_storage_file_path = self._GetProcessedStorageFilePath(task)
try:
os.rename(storage_file_path, processed_storage_file_path)
except OSError as exception:
raise IOError((
'Unable to rename task storage file: {0:s} with error: '
'{1!s}').format(storage_file_path, exception)) | Finalizes a processed task storage.
Moves the task storage file from its temporary directory to the processed
directory.
Args:
task (Task): task.
Raises:
IOError: if the storage type is not supported or
if the storage file cannot be renamed.
OSError: if the storage type is not supported or
if the storage file cannot be renamed. |
def get_group_name_nonetrick(self, group_name = None):
""" In all getter function in case of single payment group, group_name can be None """
groups = self.m["groups"]
if (len(groups) == 0):
raise Exception("Cannot find any groups in metadata")
if (not group_name):
if (len(groups) > 1):
raise Exception("We have more than one payment group in metadata, so group_name should be specified")
return groups[0]["group_name"]
return group_name | In all getter function in case of single payment group, group_name can be None |
def _gcs_list(args, _):
""" List the buckets or the contents of a bucket.
This command is a bit different in that we allow wildchars in the bucket name and will list
the buckets that match.
"""
target = args['objects']
project = args['project']
if target is None:
return _gcs_list_buckets(project, '*') # List all buckets.
bucket_name, key = google.datalab.storage._bucket.parse_name(target)
if bucket_name is None:
raise Exception('Cannot list %s; not a valid bucket name' % target)
# If a target was specified, list keys inside it
if target:
if not re.search('\?|\*|\[', target):
# If no wild characters are present in the key string, append a '/*' suffix to show all keys
key = key.strip('/') + '/*' if key else '*'
if project:
# Only list if the bucket is in the project
for bucket in google.datalab.storage.Buckets(_make_context(project)):
if bucket.name == bucket_name:
break
else:
raise Exception('%s does not exist in project %s' % (target, project))
else:
bucket = google.datalab.storage.Bucket(bucket_name)
if bucket.exists():
return _gcs_list_keys(bucket, key)
else:
raise Exception('Bucket %s does not exist' % target)
else:
# Treat the bucket name as a pattern and show matches. We don't use bucket_name as that
# can strip off wildchars and so we need to strip off gs:// here.
return _gcs_list_buckets(project, target.strip('/')[5:]) | List the buckets or the contents of a bucket.
This command is a bit different in that we allow wildchars in the bucket name and will list
the buckets that match. |
def vector_poly_data(orig, vec):
""" Creates a vtkPolyData object composed of vectors """
# shape, dimention checking
if not isinstance(orig, np.ndarray):
orig = np.asarray(orig)
if not isinstance(vec, np.ndarray):
vec = np.asarray(vec)
if orig.ndim != 2:
orig = orig.reshape((-1, 3))
elif orig.shape[1] != 3:
raise Exception('orig array must be 3D')
if vec.ndim != 2:
vec = vec.reshape((-1, 3))
elif vec.shape[1] != 3:
raise Exception('vec array must be 3D')
# Create vtk points and cells objects
vpts = vtk.vtkPoints()
vpts.SetData(numpy_to_vtk(np.ascontiguousarray(orig), deep=True))
npts = orig.shape[0]
cells = np.hstack((np.ones((npts, 1), 'int'),
np.arange(npts).reshape((-1, 1))))
if cells.dtype != ctypes.c_int64 or cells.flags.c_contiguous:
cells = np.ascontiguousarray(cells, ctypes.c_int64)
cells = np.reshape(cells, (2*npts))
vcells = vtk.vtkCellArray()
vcells.SetCells(npts, numpy_to_vtkIdTypeArray(cells, deep=True))
# Create vtkPolyData object
pdata = vtk.vtkPolyData()
pdata.SetPoints(vpts)
pdata.SetVerts(vcells)
# Add vectors to polydata
name = 'vectors'
vtkfloat = numpy_to_vtk(np.ascontiguousarray(vec), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveVectors(name)
# Add magnitude of vectors to polydata
name = 'mag'
scalars = (vec * vec).sum(1)**0.5
vtkfloat = numpy_to_vtk(np.ascontiguousarray(scalars), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveScalars(name)
return vtki.PolyData(pdata) | Creates a vtkPolyData object composed of vectors |
def same_log10_order_of_magnitude(x, delta=0.1):
"""
Return true if range is approximately in same order of magnitude
For example these sequences are in the same order of magnitude:
- [1, 8, 5] # [1, 10)
- [35, 20, 80] # [10 100)
- [232, 730] # [100, 1000)
Parameters
----------
x : array-like
Values in base 10. Must be size 2 and
``rng[0] <= rng[1]``.
delta : float
Fuzz factor for approximation. It is multiplicative.
"""
dmin = np.log10(np.min(x)*(1-delta))
dmax = np.log10(np.max(x)*(1+delta))
return np.floor(dmin) == np.floor(dmax) | Return true if range is approximately in same order of magnitude
For example these sequences are in the same order of magnitude:
- [1, 8, 5] # [1, 10)
- [35, 20, 80] # [10 100)
- [232, 730] # [100, 1000)
Parameters
----------
x : array-like
Values in base 10. Must be size 2 and
``rng[0] <= rng[1]``.
delta : float
Fuzz factor for approximation. It is multiplicative. |
def parse_network_osm_query(data):
"""
Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame
"""
if len(data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = []
ways = []
waynodes = []
for e in data['elements']:
if e['type'] == 'node':
nodes.append(process_node(e))
elif e['type'] == 'way':
w, wn = process_way(e)
ways.append(w)
waynodes.extend(wn)
nodes = pd.DataFrame.from_records(nodes, index='id')
ways = pd.DataFrame.from_records(ways, index='id')
waynodes = pd.DataFrame.from_records(waynodes, index='way_id')
return (nodes, ways, waynodes) | Convert OSM query data to DataFrames of ways and way-nodes.
Parameters
----------
data : dict
Result of an OSM query.
Returns
-------
nodes, ways, waynodes : pandas.DataFrame |
def help(self, *args):
"""Get help for a remote interface method.
Examples
--------
help('ginga', `method`)
name of the method for which you want help
help('channel', `chname`, `method`)
name of the method in the channel for which you want help
Returns
-------
help : string
a help message
"""
if len(args) == 0:
return help_msg
which = args[0].lower()
if which == 'ginga':
method = args[1]
_method = getattr(self.fv, method)
return _method.__doc__
elif which == 'channel':
chname = args[1]
method = args[2]
chinfo = self.fv.get_channel(chname)
_method = getattr(chinfo.viewer, method)
return _method.__doc__
else:
return ("Please use 'help ginga <method>' or "
"'help channel <chname> <method>'") | Get help for a remote interface method.
Examples
--------
help('ginga', `method`)
name of the method for which you want help
help('channel', `chname`, `method`)
name of the method in the channel for which you want help
Returns
-------
help : string
a help message |
def last(iterable, default=_marker):
"""Return the last item of *iterable*, or *default* if *iterable* is
empty.
>>> last([0, 1, 2, 3])
3
>>> last([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``.
"""
try:
try:
# Try to access the last item directly
return iterable[-1]
except (TypeError, AttributeError, KeyError):
# If not slice-able, iterate entirely using length-1 deque
return deque(iterable, maxlen=1)[0]
except IndexError: # If the iterable was empty
if default is _marker:
raise ValueError('last() was called on an empty iterable, and no '
'default value was provided.')
return default | Return the last item of *iterable*, or *default* if *iterable* is
empty.
>>> last([0, 1, 2, 3])
3
>>> last([], 'some default')
'some default'
If *default* is not provided and there are no items in the iterable,
raise ``ValueError``. |
def from_bytes_list(cls, function_descriptor_list):
"""Create a FunctionDescriptor instance from list of bytes.
This function is used to create the function descriptor from
backend data.
Args:
cls: Current class which is required argument for classmethod.
function_descriptor_list: list of bytes to represent the
function descriptor.
Returns:
The FunctionDescriptor instance created from the bytes list.
"""
assert isinstance(function_descriptor_list, list)
if len(function_descriptor_list) == 0:
# This is a function descriptor of driver task.
return FunctionDescriptor.for_driver_task()
elif (len(function_descriptor_list) == 3
or len(function_descriptor_list) == 4):
module_name = ensure_str(function_descriptor_list[0])
class_name = ensure_str(function_descriptor_list[1])
function_name = ensure_str(function_descriptor_list[2])
if len(function_descriptor_list) == 4:
return cls(module_name, function_name, class_name,
function_descriptor_list[3])
else:
return cls(module_name, function_name, class_name)
else:
raise Exception(
"Invalid input for FunctionDescriptor.from_bytes_list") | Create a FunctionDescriptor instance from list of bytes.
This function is used to create the function descriptor from
backend data.
Args:
cls: Current class which is required argument for classmethod.
function_descriptor_list: list of bytes to represent the
function descriptor.
Returns:
The FunctionDescriptor instance created from the bytes list. |
def reset(cls, newObjs):
'''
reset - Remove all stored data associated with this model (i.e. all objects of this type),
and then save all the provided objects in #newObjs , all in one atomic transaction.
Use this method to move from one complete set of objects to another, where any querying applications
will only see the complete before or complete after.
@param newObjs list<IndexedRedisModel objs> - A list of objects that will replace the current dataset
To just replace a specific subset of objects in a single transaction, you can do MyModel.saver.save(objs)
and just the objs in "objs" will be inserted/updated in one atomic step.
This method, on the other hand, will delete all previous objects and add the newly provided objects in a single atomic step,
and also reset the primary key ID generator
@return list<int> - The new primary keys associated with each object (same order as provided #newObjs list)
'''
conn = cls.objects._get_new_connection()
transaction = conn.pipeline()
transaction.eval("""
local matchingKeys = redis.call('KEYS', '%s*')
for _,key in ipairs(matchingKeys) do
redis.call('DEL', key)
end
""" %( ''.join([INDEXED_REDIS_PREFIX, cls.KEY_NAME, ':']), ), 0)
saver = IndexedRedisSave(cls)
nextID = 1
for newObj in newObjs:
saver.save(newObj, False, forceID=nextID, conn=transaction)
nextID += 1
transaction.set(saver._get_next_id_key(), nextID)
transaction.execute()
return list( range( 1, nextID, 1) ) | reset - Remove all stored data associated with this model (i.e. all objects of this type),
and then save all the provided objects in #newObjs , all in one atomic transaction.
Use this method to move from one complete set of objects to another, where any querying applications
will only see the complete before or complete after.
@param newObjs list<IndexedRedisModel objs> - A list of objects that will replace the current dataset
To just replace a specific subset of objects in a single transaction, you can do MyModel.saver.save(objs)
and just the objs in "objs" will be inserted/updated in one atomic step.
This method, on the other hand, will delete all previous objects and add the newly provided objects in a single atomic step,
and also reset the primary key ID generator
@return list<int> - The new primary keys associated with each object (same order as provided #newObjs list) |
def make_wsgi_app(registry=REGISTRY):
"""Create a WSGI app which serves the metrics from a registry."""
def prometheus_app(environ, start_response):
params = parse_qs(environ.get('QUERY_STRING', ''))
r = registry
encoder, content_type = choose_encoder(environ.get('HTTP_ACCEPT'))
if 'name[]' in params:
r = r.restricted_registry(params['name[]'])
output = encoder(r)
status = str('200 OK')
headers = [(str('Content-type'), content_type)]
start_response(status, headers)
return [output]
return prometheus_app | Create a WSGI app which serves the metrics from a registry. |
def maskMatch(self, mask):
"""
Determine whether this sequence matches the given mask.
:param mask: string to match against. Ns in the mask are considered to
match anything in the sequence -- all other chars must
match exactly.
:return: True if the mask matches at all places, otherwise false
"""
if len(mask) > len(self.sequenceData):
return False
lim = len(mask)
for i in range(0, lim):
if mask[i] == "N" or mask[i] == "n":
continue
if mask[i] != self.sequenceData[i]:
return False
return True | Determine whether this sequence matches the given mask.
:param mask: string to match against. Ns in the mask are considered to
match anything in the sequence -- all other chars must
match exactly.
:return: True if the mask matches at all places, otherwise false |
def initialize(self, emt_id, emt_pass):
"""Manual initialization of the interface attributes.
This is useful when the interface must be declare but initialized later
on with parsed configuration values.
Args:
emt_id (str): ID given by the server upon registration
emt_pass (str): Token given by the server upon registration
"""
self._emt_id = emt_id
self._emt_pass = emt_pass
# Initialize modules
self.bus = BusApi(self)
self.geo = GeoApi(self)
self.parking = ParkingApi(self) | Manual initialization of the interface attributes.
This is useful when the interface must be declare but initialized later
on with parsed configuration values.
Args:
emt_id (str): ID given by the server upon registration
emt_pass (str): Token given by the server upon registration |
def estimate(self, observations, weights):
"""
Maximum likelihood estimation of output model given the observations and weights
Parameters
----------
observations : [ ndarray(T_k) ] with K elements
A list of K observation trajectories, each having length T_k
weights : [ ndarray(T_k, N) ] with K elements
A list of K weight matrices, each having length T_k and containing the probability of any of the states in
the given time step
Examples
--------
Generate an observation model and samples from each state.
>>> import numpy as np
>>> ntrajectories = 3
>>> nobs = 1000
>>> B = np.array([[0.5,0.5],[0.1,0.9]])
>>> output_model = DiscreteOutputModel(B)
>>> from scipy import stats
>>> nobs = 1000
>>> obs = np.empty(nobs, dtype = object)
>>> weights = np.empty(nobs, dtype = object)
>>> gens = [stats.rv_discrete(values=(range(len(B[i])), B[i])) for i in range(B.shape[0])]
>>> obs = [gens[i].rvs(size=nobs) for i in range(B.shape[0])]
>>> weights = [np.zeros((nobs, B.shape[1])) for i in range(B.shape[0])]
>>> for i in range(B.shape[0]): weights[i][:, i] = 1.0
Update the observation model parameters my a maximum-likelihood fit.
>>> output_model.estimate(obs, weights)
"""
# sizes
N, M = self._output_probabilities.shape
K = len(observations)
# initialize output probability matrix
self._output_probabilities = np.zeros((N, M))
# update output probability matrix (numerator)
if self.__impl__ == self.__IMPL_C__:
for k in range(K):
dc.update_pout(observations[k], weights[k], self._output_probabilities, dtype=config.dtype)
elif self.__impl__ == self.__IMPL_PYTHON__:
for k in range(K):
for o in range(M):
times = np.where(observations[k] == o)[0]
self._output_probabilities[:, o] += np.sum(weights[k][times, :], axis=0)
else:
raise RuntimeError('Implementation '+str(self.__impl__)+' not available')
# normalize
self._output_probabilities /= np.sum(self._output_probabilities, axis=1)[:, None] | Maximum likelihood estimation of output model given the observations and weights
Parameters
----------
observations : [ ndarray(T_k) ] with K elements
A list of K observation trajectories, each having length T_k
weights : [ ndarray(T_k, N) ] with K elements
A list of K weight matrices, each having length T_k and containing the probability of any of the states in
the given time step
Examples
--------
Generate an observation model and samples from each state.
>>> import numpy as np
>>> ntrajectories = 3
>>> nobs = 1000
>>> B = np.array([[0.5,0.5],[0.1,0.9]])
>>> output_model = DiscreteOutputModel(B)
>>> from scipy import stats
>>> nobs = 1000
>>> obs = np.empty(nobs, dtype = object)
>>> weights = np.empty(nobs, dtype = object)
>>> gens = [stats.rv_discrete(values=(range(len(B[i])), B[i])) for i in range(B.shape[0])]
>>> obs = [gens[i].rvs(size=nobs) for i in range(B.shape[0])]
>>> weights = [np.zeros((nobs, B.shape[1])) for i in range(B.shape[0])]
>>> for i in range(B.shape[0]): weights[i][:, i] = 1.0
Update the observation model parameters my a maximum-likelihood fit.
>>> output_model.estimate(obs, weights) |
def add_dirrecord(self, rec):
# type: (Union[dr.DirectoryRecord, udfmod.UDFFileEntry]) -> None
'''
A method to set the Directory Record associated with this Boot Catalog.
Parameters:
rec - The DirectoryRecord object to associate with this Boot Catalog.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Boot Catalog not yet initialized')
self.dirrecords.append(rec) | A method to set the Directory Record associated with this Boot Catalog.
Parameters:
rec - The DirectoryRecord object to associate with this Boot Catalog.
Returns:
Nothing. |
def p_matcharglist(p):
'''
matcharglist : matcharg
| matcharglist COMMA matcharg
'''
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1]
p[0].append(p[3]) | matcharglist : matcharg
| matcharglist COMMA matcharg |
def check_string(value, min_length=None, max_length=None, pattern=None):
"""
verify that a string has a particular size and conforms
to a particular alphabet
>>> check_string(1)
False
>>> check_string(None)
False
>>> check_string(True)
False
>>> check_string({})
False
>>> check_string([])
False
>>> check_string((1,2))
False
>>> check_string('abc')
True
>>> check_string('')
True
>>> check_string(u'')
True
>>> check_string('abc', min_length=0, max_length=3)
True
>>> check_string('abc', min_length=3, max_length=3)
True
>>> check_string('abc', min_length=4, max_length=5)
False
>>> check_string('abc', min_length=0, max_length=2)
False
>>> check_string('abc', pattern='^abc$')
True
>>> check_string('abc', pattern='^abd$')
False
"""
if type(value) not in [str, unicode]:
return False
if min_length and len(value) < min_length:
return False
if max_length and len(value) > max_length:
return False
if pattern and not re.match(pattern, value):
return False
return True | verify that a string has a particular size and conforms
to a particular alphabet
>>> check_string(1)
False
>>> check_string(None)
False
>>> check_string(True)
False
>>> check_string({})
False
>>> check_string([])
False
>>> check_string((1,2))
False
>>> check_string('abc')
True
>>> check_string('')
True
>>> check_string(u'')
True
>>> check_string('abc', min_length=0, max_length=3)
True
>>> check_string('abc', min_length=3, max_length=3)
True
>>> check_string('abc', min_length=4, max_length=5)
False
>>> check_string('abc', min_length=0, max_length=2)
False
>>> check_string('abc', pattern='^abc$')
True
>>> check_string('abc', pattern='^abd$')
False |
def list_pools(**kwargs):
'''
List all storage pools.
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.list_pools
'''
conn = __get_conn(**kwargs)
try:
return [pool.name() for pool in conn.listAllStoragePools()]
finally:
conn.close() | List all storage pools.
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.list_pools |
def get_extra_info(self, name, default=None):
"""
Called by the client protocol to return optional transport
information. Information requests not recognized by the
``FramerProtocol`` are passed on to the underlying transport.
The values of ``name`` recognized directly by
``FramerProtocol`` are:
=============== ============================================
Value Description
=============== ============================================
send_framer The active framer for the send direction.
send_state The state for the send framer.
recv_framer The active framer for the receive direction.
recv_state The state for the receive framer.
recv_buf The current receive buffer.
recv_paused ``True`` if reading is paused.
client_protocol The client ``FramedProtocol``.
transport The underlying transport.
=============== ============================================
:param name: A string representing the piece of
transport-specific information to get.
:param default: The value to return if the information doesn't
exist.
:returns: The requested data.
"""
# Handle data we know about
if name in self._handlers:
return self._handlers[name](self)
# Call get_extra_info() on the transport
return self._transport.get_extra_info(name, default=default) | Called by the client protocol to return optional transport
information. Information requests not recognized by the
``FramerProtocol`` are passed on to the underlying transport.
The values of ``name`` recognized directly by
``FramerProtocol`` are:
=============== ============================================
Value Description
=============== ============================================
send_framer The active framer for the send direction.
send_state The state for the send framer.
recv_framer The active framer for the receive direction.
recv_state The state for the receive framer.
recv_buf The current receive buffer.
recv_paused ``True`` if reading is paused.
client_protocol The client ``FramedProtocol``.
transport The underlying transport.
=============== ============================================
:param name: A string representing the piece of
transport-specific information to get.
:param default: The value to return if the information doesn't
exist.
:returns: The requested data. |
def timestamp(self):
"Return POSIX timestamp as float"
if self._tzinfo is None:
return _time.mktime((self.year, self.month, self.day,
self.hour, self.minute, self.second,
-1, -1, -1)) + self.microsecond / 1e6
else:
return (self - _EPOCH).total_seconds() | Return POSIX timestamp as float |
def add_filter(self, name, query, color=None, item_order=None):
"""Create a new filter.
.. warning:: Requires Todoist premium.
:param name: The name of the filter.
:param query: The query to search for.
:param color: The color of the filter.
:param item_order: The filter's order in the filter list.
:return: The newly created filter.
:rtype: :class:`pytodoist.todoist.Filter`
>>> from pytodoist import todoist
>>> user = todoist.login('john.doe@gmail.com', 'password')
>>> overdue_filter = user.add_filter('Overdue', todoist.Query.OVERDUE)
"""
args = {
'name': name,
'query': query,
'color': color,
'item_order': item_order
}
_perform_command(self, 'filter_add', args)
return self.get_filter(name) | Create a new filter.
.. warning:: Requires Todoist premium.
:param name: The name of the filter.
:param query: The query to search for.
:param color: The color of the filter.
:param item_order: The filter's order in the filter list.
:return: The newly created filter.
:rtype: :class:`pytodoist.todoist.Filter`
>>> from pytodoist import todoist
>>> user = todoist.login('john.doe@gmail.com', 'password')
>>> overdue_filter = user.add_filter('Overdue', todoist.Query.OVERDUE) |
def export(self, name, columns, points):
"""Write the points in Riemann."""
for i in range(len(columns)):
if not isinstance(points[i], Number):
continue
else:
data = {'host': self.hostname, 'service': name + " " + columns[i], 'metric': points[i]}
logger.debug(data)
try:
self.client.send(data)
except Exception as e:
logger.error("Cannot export stats to Riemann (%s)" % e) | Write the points in Riemann. |
def askyesno(title=None, message=None, **options):
"""Original doc: Ask a question; return true if the answer is yes"""
return psidialogs.ask_yes_no(title=title, message=message) | Original doc: Ask a question; return true if the answer is yes |
def discover(email, credentials):
"""
Performs the autodiscover dance and returns the primary SMTP address of the account and a Protocol on success. The
autodiscover and EWS server might not be the same, so we use a different Protocol to do the autodiscover request,
and return a hopefully-cached Protocol to the callee.
"""
log.debug('Attempting autodiscover on email %s', email)
if not isinstance(credentials, Credentials):
raise ValueError("'credentials' %r must be a Credentials instance" % credentials)
domain = get_domain(email)
# We may be using multiple different credentials and changing our minds on TLS verification. This key combination
# should be safe.
autodiscover_key = (domain, credentials)
# Use lock to guard against multiple threads competing to cache information
log.debug('Waiting for _autodiscover_cache_lock')
with _autodiscover_cache_lock:
# Don't recurse while holding the lock!
log.debug('_autodiscover_cache_lock acquired')
if autodiscover_key in _autodiscover_cache:
protocol = _autodiscover_cache[autodiscover_key]
if not isinstance(protocol, AutodiscoverProtocol):
raise ValueError('Unexpected autodiscover cache contents: %s' % protocol)
log.debug('Cache hit for domain %s credentials %s: %s', domain, credentials, protocol.server)
try:
# This is the main path when the cache is primed
return _autodiscover_quick(credentials=credentials, email=email, protocol=protocol)
except AutoDiscoverFailed:
# Autodiscover no longer works with this domain. Clear cache and try again after releasing the lock
del _autodiscover_cache[autodiscover_key]
except AutoDiscoverRedirect as e:
log.debug('%s redirects to %s', email, e.redirect_email)
if email.lower() == e.redirect_email.lower():
raise_from(AutoDiscoverCircularRedirect('Redirect to same email address: %s' % email), None)
# Start over with the new email address after releasing the lock
email = e.redirect_email
else:
log.debug('Cache miss for domain %s credentials %s', domain, credentials)
log.debug('Cache contents: %s', _autodiscover_cache)
try:
# This eventually fills the cache in _autodiscover_hostname
return _try_autodiscover(hostname=domain, credentials=credentials, email=email)
except AutoDiscoverRedirect as e:
if email.lower() == e.redirect_email.lower():
raise_from(AutoDiscoverCircularRedirect('Redirect to same email address: %s' % email), None)
log.debug('%s redirects to %s', email, e.redirect_email)
# Start over with the new email address after releasing the lock
email = e.redirect_email
log.debug('Released autodiscover_cache_lock')
# We fell out of the with statement, so either cache was filled by someone else, or autodiscover redirected us to
# another email address. Start over after releasing the lock.
return discover(email=email, credentials=credentials) | Performs the autodiscover dance and returns the primary SMTP address of the account and a Protocol on success. The
autodiscover and EWS server might not be the same, so we use a different Protocol to do the autodiscover request,
and return a hopefully-cached Protocol to the callee. |
def alloc(self):
"""
Allocate an ID value and return it.
Raises:
ValueError: Out of capacity in ID pool.
"""
if not self._free:
self._expand()
id = self._free.pop()
self._used.add(id)
return id | Allocate an ID value and return it.
Raises:
ValueError: Out of capacity in ID pool. |
def _validate(self):
"""Validate the JPEG 2000 outermost superbox. These checks must be
done at a file level.
"""
# A JP2 file must contain certain boxes. The 2nd box must be a file
# type box.
if not isinstance(self.box[1], FileTypeBox):
msg = "{filename} does not contain a valid File Type box."
msg = msg.format(filename=self.filename)
raise IOError(msg)
# A jp2-branded file cannot contain an "any ICC profile
ftyp = self.box[1]
if ftyp.brand == 'jp2 ':
jp2h = [box for box in self.box if box.box_id == 'jp2h'][0]
colrs = [box for box in jp2h.box if box.box_id == 'colr']
for colr in colrs:
if colr.method not in (core.ENUMERATED_COLORSPACE,
core.RESTRICTED_ICC_PROFILE):
msg = ("Color Specification box method must specify "
"either an enumerated colorspace or a restricted "
"ICC profile if the file type box brand is 'jp2 '.")
warnings.warn(msg, UserWarning) | Validate the JPEG 2000 outermost superbox. These checks must be
done at a file level. |
def get_asset_lookup_session(self):
"""Gets the ``OsidSession`` associated with the asset lookup service.
return: (osid.repository.AssetLookupSession) - the new
``AssetLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_asset_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_lookup()`` is ``true``.*
"""
if not self.supports_asset_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.AssetLookupSession(runtime=self._runtime) | Gets the ``OsidSession`` associated with the asset lookup service.
return: (osid.repository.AssetLookupSession) - the new
``AssetLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_asset_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_lookup()`` is ``true``.* |
def aggregate(self, search):
"""
Add aggregations representing the facets selected, including potential
filters.
"""
for f, facet in iteritems(self.facets):
agg = facet.get_aggregation()
agg_filter = MatchAll()
for field, filter in iteritems(self._filters):
if f == field:
continue
agg_filter &= filter
search.aggs.bucket(
'_filter_' + f,
'filter',
filter=agg_filter
).bucket(f, agg) | Add aggregations representing the facets selected, including potential
filters. |
def bbox2wktpolygon(bbox):
"""
Return OGC WKT Polygon of a simple bbox list of strings
"""
minx = float(bbox[0])
miny = float(bbox[1])
maxx = float(bbox[2])
maxy = float(bbox[3])
return 'POLYGON((%.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f))' \
% (minx, miny, minx, maxy, maxx, maxy, maxx, miny, minx, miny) | Return OGC WKT Polygon of a simple bbox list of strings |
def on(message):
'''Decorator that register a class method as callback for a message.'''
def decorator(function):
try:
function._callback_messages.append(message)
except AttributeError:
function._callback_messages = [message]
return function
return decorator | Decorator that register a class method as callback for a message. |
def list_logtail_config(self, project_name, offset=0, size=100):
""" list logtail config name in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type offset: int
:param offset: the offset of all config names
:type size: int
:param size: the max return names count, -1 means all
:return: ListLogtailConfigResponse
:raise: LogException
"""
# need to use extended method to get more
if int(size) == -1 or int(size) > MAX_LIST_PAGING_SIZE:
return list_more(self.list_logtail_config, int(offset), int(size), MAX_LIST_PAGING_SIZE, project_name)
headers = {}
params = {}
resource = "/configs"
params['offset'] = str(offset)
params['size'] = str(size)
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return ListLogtailConfigResponse(resp, header) | list logtail config name in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type offset: int
:param offset: the offset of all config names
:type size: int
:param size: the max return names count, -1 means all
:return: ListLogtailConfigResponse
:raise: LogException |
def lv_unpack(txt):
"""
Deserializes a string of the length:value format
:param txt: The input string
:return: a list og values
"""
txt = txt.strip()
res = []
while txt:
l, v = txt.split(':', 1)
res.append(v[:int(l)])
txt = v[int(l):]
return res | Deserializes a string of the length:value format
:param txt: The input string
:return: a list og values |
def conditional_entropy(X, Y, base=2):
"""Calculates the conditional entropy, H(X|Y), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the conditional entropy
Y: array-like (# samples)
An array of values for which to compute the conditional entropy
base: integer (default: 2)
The base in which to calculate conditional entropy
Returns
----------
conditional_entropy: float
The conditional entropy calculated according to the equation H(X|Y) = H(X,Y) - H(Y)
"""
return joint_entropy(X, Y, base=base) - entropy(Y, base=base) | Calculates the conditional entropy, H(X|Y), in the given base
Parameters
----------
X: array-like (# samples)
An array of values for which to compute the conditional entropy
Y: array-like (# samples)
An array of values for which to compute the conditional entropy
base: integer (default: 2)
The base in which to calculate conditional entropy
Returns
----------
conditional_entropy: float
The conditional entropy calculated according to the equation H(X|Y) = H(X,Y) - H(Y) |
def get_mobilenet(multiplier, pretrained=False, ctx=cpu(),
root=os.path.join(base.data_dir(), 'models'), **kwargs):
r"""MobileNet model from the
`"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications"
<https://arxiv.org/abs/1704.04861>`_ paper.
Parameters
----------
multiplier : float
The width multiplier for controling the model size. Only multipliers that are no
less than 0.25 are supported. The actual number of channels is equal to the original
channel size multiplied by this multiplier.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
net = MobileNet(multiplier, **kwargs)
if pretrained:
from ..model_store import get_model_file
version_suffix = '{0:.2f}'.format(multiplier)
if version_suffix in ('1.00', '0.50'):
version_suffix = version_suffix[:-1]
net.load_parameters(
get_model_file('mobilenet%s' % version_suffix, root=root), ctx=ctx)
return net | r"""MobileNet model from the
`"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications"
<https://arxiv.org/abs/1704.04861>`_ paper.
Parameters
----------
multiplier : float
The width multiplier for controling the model size. Only multipliers that are no
less than 0.25 are supported. The actual number of channels is equal to the original
channel size multiplied by this multiplier.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters. |
def parse_date(self, item, field_name, source_name):
"""
Converts the date in the format: Thu 03.
As only the day is provided, tries to find the best match
based on the current date, considering that dates are on
the past.
"""
# Get the current date
now = datetime.now().date()
# Get the date from the source
val = self.get_value(item, source_name)
week_day, day = val.split()
day = int(day)
# If the current date is minor than the item date
# go back one month
if now.day < day:
if now.month == 1:
now = now.replace(month=12, year=now.year-1)
else:
now = now.replace(month=now.month-1)
# Finally, replace the source day in the current date
# and return
now = now.replace(day=day)
return now | Converts the date in the format: Thu 03.
As only the day is provided, tries to find the best match
based on the current date, considering that dates are on
the past. |
def _ws_on_open(self, ws: websocket.WebSocketApp):
"""Callback for sending the initial authentication data
This "payload" contains the required data to authenticate this websocket
client as a suitable bot connection to the Discord websocket.
Args:
ws: websocket connection
"""
payload = {
'op': WebSocketEvent.IDENTIFY.value,
'd': {
'token': self.token,
'properties': {
'$os': sys.platform,
'$browser': 'Pycord',
'$device': 'Pycord',
'$referrer': '',
'$referring_domain': ''
},
'compress': True,
'large_threshold': 250
}
}
self.logger.debug('Sending identify payload')
ws.send(json.dumps(payload))
self.connected = True | Callback for sending the initial authentication data
This "payload" contains the required data to authenticate this websocket
client as a suitable bot connection to the Discord websocket.
Args:
ws: websocket connection |
def _write_model_stats(self, iteration:int)->None:
"Writes gradient statistics to Tensorboard."
# We don't want to write stats when model is not iterated on and hence has zeroed out gradients
gen_mode = self.learn.gan_trainer.gen_mode
if gen_mode and not self.gen_stats_updated: self._write_gen_model_stats(iteration=iteration)
if not gen_mode and not self.crit_stats_updated: self._write_critic_model_stats(iteration=iteration) | Writes gradient statistics to Tensorboard. |
def get_last(self, table=None):
"""Just the last entry."""
if table is None: table = self.main_table
query = 'SELECT * FROM "%s" ORDER BY ROWID DESC LIMIT 1;' % table
return self.own_cursor.execute(query).fetchone() | Just the last entry. |
def sort_segment_points(Aps, Bps):
"""Takes two line segments and sorts all their points,
so that they form a continuous path
Args:
Aps: Array of tracktotrip.Point
Bps: Array of tracktotrip.Point
Returns:
Array with points ordered
"""
mid = []
j = 0
mid.append(Aps[0])
for i in range(len(Aps)-1):
dist = distance_tt_point(Aps[i], Aps[i+1])
for m in range(j, len(Bps)):
distm = distance_tt_point(Aps[i], Bps[m])
if dist > distm:
direction = dot(normalize(line(Aps[i].gen2arr(), Aps[i+1].gen2arr())), normalize(Bps[m].gen2arr()))
if direction > 0:
j = m + 1
mid.append(Bps[m])
break
mid.append(Aps[i+1])
for m in range(j, len(Bps)):
mid.append(Bps[m])
return mid | Takes two line segments and sorts all their points,
so that they form a continuous path
Args:
Aps: Array of tracktotrip.Point
Bps: Array of tracktotrip.Point
Returns:
Array with points ordered |
def query_statements(self, query):
"""Query the LRS for statements with specified parameters
:param query: Dictionary of query parameters and their values
:type query: dict
:return: LRS Response object with the returned StatementsResult object as content
:rtype: :class:`tincan.lrs_response.LRSResponse`
.. note::
Optional query parameters are\n
**statementId:** (*str*) ID of the Statement to fetch
**voidedStatementId:** (*str*) ID of the voided Statement to fetch
**agent:** (*Agent* |*Group*) Filter to return Statements for which the
specified Agent or Group is the Actor
**verb:** (*Verb id IRI*) Filter to return Statements matching the verb id
**activity:** (*Activity id IRI*) Filter to return Statements for which the
specified Activity is the Object
**registration:** (*UUID*) Filter to return Statements matching the specified registration ID
**related_activities:** (*bool*) Include Statements for which the Object,
Context Activities or any Sub-Statement
properties match the specified Activity
**related_agents:** (*bool*) Include Statements for which the Actor, Object,
Authority, Instructor, Team, or any Sub-Statement properties match the specified Agent
**since:** (*datetime*) Filter to return Statements stored since the specified datetime
**until:** (*datetime*) Filter to return Statements stored at or before the specified datetime
**limit:** (*positive int*) Allow <limit> Statements to be returned. 0 indicates the
maximum supported by the LRS
**format:** (*str* {"ids"|"exact"|"canonical"}) Manipulates how the LRS handles
importing and returning the statements
**attachments:** (*bool*) If true, the LRS will use multipart responses and include
all attachment data per Statement returned.
Otherwise, application/json is used and no attachment information will be returned
**ascending:** (*bool*) If true, the LRS will return results in ascending order of
stored time (oldest first)
"""
params = {}
param_keys = [
"registration",
"since",
"until",
"limit",
"ascending",
"related_activities",
"related_agents",
"format",
"attachments",
]
for k, v in query.iteritems():
if v is not None:
if k == "agent":
params[k] = v.to_json(self.version)
elif k == "verb" or k == "activity":
params[k] = v.id
elif k in param_keys:
params[k] = v
request = HTTPRequest(
method="GET",
resource="statements"
)
request.query_params = params
lrs_response = self._send_request(request)
if lrs_response.success:
lrs_response.content = StatementsResult.from_json(lrs_response.data)
return lrs_response | Query the LRS for statements with specified parameters
:param query: Dictionary of query parameters and their values
:type query: dict
:return: LRS Response object with the returned StatementsResult object as content
:rtype: :class:`tincan.lrs_response.LRSResponse`
.. note::
Optional query parameters are\n
**statementId:** (*str*) ID of the Statement to fetch
**voidedStatementId:** (*str*) ID of the voided Statement to fetch
**agent:** (*Agent* |*Group*) Filter to return Statements for which the
specified Agent or Group is the Actor
**verb:** (*Verb id IRI*) Filter to return Statements matching the verb id
**activity:** (*Activity id IRI*) Filter to return Statements for which the
specified Activity is the Object
**registration:** (*UUID*) Filter to return Statements matching the specified registration ID
**related_activities:** (*bool*) Include Statements for which the Object,
Context Activities or any Sub-Statement
properties match the specified Activity
**related_agents:** (*bool*) Include Statements for which the Actor, Object,
Authority, Instructor, Team, or any Sub-Statement properties match the specified Agent
**since:** (*datetime*) Filter to return Statements stored since the specified datetime
**until:** (*datetime*) Filter to return Statements stored at or before the specified datetime
**limit:** (*positive int*) Allow <limit> Statements to be returned. 0 indicates the
maximum supported by the LRS
**format:** (*str* {"ids"|"exact"|"canonical"}) Manipulates how the LRS handles
importing and returning the statements
**attachments:** (*bool*) If true, the LRS will use multipart responses and include
all attachment data per Statement returned.
Otherwise, application/json is used and no attachment information will be returned
**ascending:** (*bool*) If true, the LRS will return results in ascending order of
stored time (oldest first) |
def check_state(self):
"""Tracks differences in the device state."""
if self.dpad:
x_state, y_state = self.handle_dpad()
else:
x_state, y_state = self.handle_abs()
# pylint: disable=no-member
new_state = set((
x_state,
y_state,
('Key', 0x130, int(self.microbit.button_a.is_pressed())),
('Key', 0x131, int(self.microbit.button_b.is_pressed())),
('Key', 0x13a, int(self.microbit.pin0.is_touched())),
('Key', 0x133, int(self.microbit.pin1.is_touched())),
('Key', 0x134, int(self.microbit.pin2.is_touched())),
))
events = new_state - self.state
self.state = new_state
return events | Tracks differences in the device state. |
def text(self):
"""Linearize the bib source according to the rules of the unified style.
Book:
author. year. booktitle. (series, volume.) address: publisher.
Article:
author. year. title. journal volume(issue). pages.
Incollection:
author. year. title. In editor (ed.), booktitle, pages. address: publisher.
.. seealso::
http://celxj.org/downloads/UnifiedStyleSheet.pdf
https://github.com/citation-style-language/styles/blob/master/\
unified-style-linguistics.csl
"""
genre = getattr(self.genre, 'value', self.genre)
pages_at_end = genre in (
'book',
'phdthesis',
'mastersthesis',
'misc',
'techreport')
thesis = genre in ('phdthesis', 'mastersthesis')
if self.get('editor'):
editors = self['editor']
affix = 'eds' if ' and ' in editors or '&' in editors else 'ed'
editors = " %s (%s.)" % (editors, affix)
else:
editors = None
res = [self.get('author', editors), self.get('year', 'n.d')]
if genre == 'book':
res.append(self.get_with_translation('booktitle') or
self.get_with_translation('title'))
series = ', '.join(filter(None, [self.get('series'), self.get('volume')]))
if series:
res.append('(%s.)' % series)
elif genre == 'misc':
# in case of misc records, we use the note field in case a title is missing.
res.append(self.get_with_translation('title') or self.get('note'))
else:
res.append(self.get_with_translation('title'))
if genre == 'article':
atom = ' '.join(filter(None, [self.get('journal'), self.get('volume')]))
if self.get('issue'):
atom += '(%s)' % self['issue']
res.append(atom)
res.append(self.get('pages'))
elif genre == 'incollection' or genre == 'inproceedings':
prefix = 'In'
atom = ''
if editors:
atom += editors
if self.get('booktitle'):
if atom:
atom += ','
atom += " %s" % self.get_with_translation('booktitle')
if self.get('pages'):
atom += ", %s" % self['pages']
res.append(prefix + atom)
else:
# check for author to make sure we haven't included the editors yet.
if editors and self.get('author'):
res.append("In %s" % editors)
for attr in [
'journal',
'volume' if genre != 'book' else None,
]:
if attr and self.get(attr):
res.append(self.get(attr))
if self.get('issue'):
res.append("(%s)" % self['issue'])
if not pages_at_end and self.get('pages'):
res.append(self['pages'])
if self.get('publisher'):
res.append(": ".join(filter(None, [self.get('address'), self['publisher']])))
else:
if genre == 'misc' and self.get('howpublished'):
res.append(self.get('howpublished'))
if not thesis and pages_at_end and self.get('pages'):
res.append(self['pages'] + 'pp')
note = self.get('note') or self._genre_note.get(genre)
if note and note not in res:
if thesis:
joiner = ','
if self.get('school'):
note += '{0} {1}'.format(joiner, self.get('school'))
joiner = ';'
if self.get('pages'):
note += '{0} {1}pp.'.format(joiner, self.get('pages'))
res.append('(%s)' % note)
return ' '.join(
x if x.endswith(('.', '.)')) else '%s.' % x for x in res if x) | Linearize the bib source according to the rules of the unified style.
Book:
author. year. booktitle. (series, volume.) address: publisher.
Article:
author. year. title. journal volume(issue). pages.
Incollection:
author. year. title. In editor (ed.), booktitle, pages. address: publisher.
.. seealso::
http://celxj.org/downloads/UnifiedStyleSheet.pdf
https://github.com/citation-style-language/styles/blob/master/\
unified-style-linguistics.csl |
def register_method(self, func):
"""
Register a function to be available as RPC method.
The given function will be inspected to find external_name, protocol and entry_point values set by the decorator
@rpc_method.
:param func: A function previously decorated using @rpc_method
:return: The name of registered method
"""
if not getattr(func, 'modernrpc_enabled', False):
raise ImproperlyConfigured('Error: trying to register {} as RPC method, but it has not been decorated.'
.format(func.__name__))
# Define the external name of the function
name = getattr(func, 'modernrpc_name', func.__name__)
logger.debug('Register RPC method "{}"'.format(name))
if name.startswith('rpc.'):
raise ImproperlyConfigured('According to RPC standard, method names starting with "rpc." are reserved for '
'system extensions and must not be used. See '
'http://www.jsonrpc.org/specification#extensions for more information.')
# Encapsulate the function in a RPCMethod object
method = RPCMethod(func)
# Ensure method names are unique in the registry
existing_method = self.get_method(method.name, ALL, ALL)
if existing_method is not None:
# Trying to register many times the same function is OK, because if a method is decorated
# with @rpc_method(), it could be imported in different places of the code
if method == existing_method:
return method.name
# But if we try to use the same name to register 2 different methods, we
# must inform the developer there is an error in the code
else:
raise ImproperlyConfigured("A RPC method with name {} has already been registered".format(method.name))
# Store the method
self._registry[method.name] = method
logger.debug('Method registered. len(registry): {}'.format(len(self._registry)))
return method.name | Register a function to be available as RPC method.
The given function will be inspected to find external_name, protocol and entry_point values set by the decorator
@rpc_method.
:param func: A function previously decorated using @rpc_method
:return: The name of registered method |
def dump_code_line(disassembly_line, bShowAddress = True,
bShowDump = True,
bLowercase = True,
dwDumpWidth = None,
dwCodeWidth = None,
bits = None):
"""
Dump a single line of code. To dump a block of code use L{dump_code}.
@type disassembly_line: tuple( int, int, str, str )
@param disassembly_line: Single item of the list returned by
L{Process.disassemble} or L{Thread.disassemble_around_pc}.
@type bShowAddress: bool
@param bShowAddress: (Optional) If C{True} show the memory address.
@type bShowDump: bool
@param bShowDump: (Optional) If C{True} show the hexadecimal dump.
@type bLowercase: bool
@param bLowercase: (Optional) If C{True} convert the code to lowercase.
@type dwDumpWidth: int or None
@param dwDumpWidth: (Optional) Width in characters of the hex dump.
@type dwCodeWidth: int or None
@param dwCodeWidth: (Optional) Width in characters of the code.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if bits is None:
address_size = HexDump.address_size
else:
address_size = bits / 4
(addr, size, code, dump) = disassembly_line
dump = dump.replace(' ', '')
result = list()
fmt = ''
if bShowAddress:
result.append( HexDump.address(addr, bits) )
fmt += '%%%ds:' % address_size
if bShowDump:
result.append(dump)
if dwDumpWidth:
fmt += ' %%-%ds' % dwDumpWidth
else:
fmt += ' %s'
if bLowercase:
code = code.lower()
result.append(code)
if dwCodeWidth:
fmt += ' %%-%ds' % dwCodeWidth
else:
fmt += ' %s'
return fmt % tuple(result) | Dump a single line of code. To dump a block of code use L{dump_code}.
@type disassembly_line: tuple( int, int, str, str )
@param disassembly_line: Single item of the list returned by
L{Process.disassemble} or L{Thread.disassemble_around_pc}.
@type bShowAddress: bool
@param bShowAddress: (Optional) If C{True} show the memory address.
@type bShowDump: bool
@param bShowDump: (Optional) If C{True} show the hexadecimal dump.
@type bLowercase: bool
@param bLowercase: (Optional) If C{True} convert the code to lowercase.
@type dwDumpWidth: int or None
@param dwDumpWidth: (Optional) Width in characters of the hex dump.
@type dwCodeWidth: int or None
@param dwCodeWidth: (Optional) Width in characters of the code.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging. |
def handle_startendtag(self, tag, attrs):
"""Function called for empty tags (e.g. <br />)"""
if tag.lower() in self.allowed_tag_whitelist:
self.result += '<' + tag
for (attr, value) in attrs:
if attr.lower() in self.allowed_attribute_whitelist:
self.result += ' %s="%s"' % \
(attr, self.handle_attribute_value(value))
self.result += ' />'
else:
if self.render_unallowed_tags:
self.result += '<' + cgi.escape(tag)
for (attr, value) in attrs:
self.result += ' %s="%s"' % \
(attr, cgi.escape(value, True))
self.result += ' />' | Function called for empty tags (e.g. <br />) |
def setRecord(self, record):
"""
Sets the record instance linked with this widget.
:param record | <orb.Table>
"""
self._record = record
if record is not None:
self.loadValues(record.recordValues(autoInflate=True))
else:
self.loadValues({}) | Sets the record instance linked with this widget.
:param record | <orb.Table> |
def layer_width(layer):
'''get layer width.
'''
if is_layer(layer, "Dense"):
return layer.units
if is_layer(layer, "Conv"):
return layer.filters
raise TypeError("The layer should be either Dense or Conv layer.") | get layer width. |
def update(self):
"""Update processes stats using the input method."""
# Init new stats
stats = self.get_init_value()
if self.input_method == 'local':
# Update stats using the standard system lib
# Note: Update is done in the processcount plugin
# Just return the processes list
stats = glances_processes.getlist()
elif self.input_method == 'snmp':
# No SNMP grab for processes
pass
# Update the stats
self.stats = stats
# Get the max values (dict)
# Use Deep copy to avoid change between update and display
self.max_values = copy.deepcopy(glances_processes.max_values())
return self.stats | Update processes stats using the input method. |
def get_cli_version():
"""
获取终端命令版本号,若存在VERSION文件则使用其中的版本号,
否则使用 :meth:`.get_setup_version`
:return: 终端命令版本号
:rtype: str
"""
directory = os.path.dirname(os.path.abspath(__file__))
version_path = os.path.join(directory, 'VERSION')
if os.path.exists(version_path):
with open(version_path) as f:
ver = f.read()
return ver
return get_setup_version() | 获取终端命令版本号,若存在VERSION文件则使用其中的版本号,
否则使用 :meth:`.get_setup_version`
:return: 终端命令版本号
:rtype: str |
def describe(lcdict, returndesc=False, offsetwith=None):
'''This describes the light curve object and columns present.
Parameters
----------
lcdict : dict
The input lcdict to parse for column and metadata info.
returndesc : bool
If True, returns the description string as an str instead of just
printing it to stdout.
offsetwith : str
This is a character to offset the output description lines by. This is
useful to add comment characters like '#' to the output description
lines.
Returns
-------
str or None
If returndesc is True, returns the description lines as a str, otherwise
returns nothing.
'''
# transparently read LCC CSV format description
if 'lcformat' in lcdict and 'lcc-csv' in lcdict['lcformat'].lower():
return describe_lcc_csv(lcdict, returndesc=returndesc)
# figure out the columndefs part of the header string
columndefs = []
for colind, column in enumerate(lcdict['columns']):
if '_' in column:
colkey, colap = column.split('_')
coldesc = COLUMNDEFS[colkey][0] % colap
else:
coldesc = COLUMNDEFS[column][0]
columndefstr = '%03i - %s - %s' % (colind,
column,
coldesc)
columndefs.append(columndefstr)
columndefs = '\n'.join(columndefs)
# figure out the filterdefs
filterdefs = []
for row in lcdict['filters']:
filterid, filtername, filterdesc = row
filterdefstr = '%s - %s - %s' % (filterid,
filtername,
filterdesc)
filterdefs.append(filterdefstr)
filterdefs = '\n'.join(filterdefs)
# figure out the apertures
aperturedefs = []
for key in sorted(lcdict['lcapertures'].keys()):
aperturedefstr = '%s - %.2f px' % (key, lcdict['lcapertures'][key])
aperturedefs.append(aperturedefstr)
aperturedefs = '\n'.join(aperturedefs)
# now fill in the description
description = DESCTEMPLATE.format(
objectid=lcdict['objectid'],
hatid=lcdict['objectinfo']['hatid'],
twomassid=lcdict['objectinfo']['twomassid'].strip(),
ra=lcdict['objectinfo']['ra'],
decl=lcdict['objectinfo']['decl'],
pmra=lcdict['objectinfo']['pmra'],
pmra_err=lcdict['objectinfo']['pmra_err'],
pmdecl=lcdict['objectinfo']['pmdecl'],
pmdecl_err=lcdict['objectinfo']['pmdecl_err'],
jmag=lcdict['objectinfo']['jmag'],
hmag=lcdict['objectinfo']['hmag'],
kmag=lcdict['objectinfo']['kmag'],
bmag=lcdict['objectinfo']['bmag'],
vmag=lcdict['objectinfo']['vmag'],
sdssg=lcdict['objectinfo']['sdssg'],
sdssr=lcdict['objectinfo']['sdssr'],
sdssi=lcdict['objectinfo']['sdssi'],
ndet=lcdict['objectinfo']['ndet'],
lcsortcol=lcdict['lcsortcol'],
lcbestaperture=json.dumps(lcdict['lcbestaperture'],ensure_ascii=True),
network=lcdict['objectinfo']['network'],
stations=lcdict['objectinfo']['stations'],
lastupdated=lcdict['lastupdated'],
datarelease=lcdict['datarelease'],
lcversion=lcdict['lcversion'],
lcserver=lcdict['lcserver'],
comment=lcdict['comment'],
lcfiltersql=(lcdict['lcfiltersql'] if 'lcfiltersql' in lcdict else ''),
lcnormcols=(lcdict['lcnormcols'] if 'lcnormcols' in lcdict else ''),
filterdefs=filterdefs,
columndefs=columndefs,
aperturedefs=aperturedefs
)
if offsetwith is not None:
description = textwrap.indent(
description,
'%s ' % offsetwith,
lambda line: True
)
print(description)
else:
print(description)
if returndesc:
return description | This describes the light curve object and columns present.
Parameters
----------
lcdict : dict
The input lcdict to parse for column and metadata info.
returndesc : bool
If True, returns the description string as an str instead of just
printing it to stdout.
offsetwith : str
This is a character to offset the output description lines by. This is
useful to add comment characters like '#' to the output description
lines.
Returns
-------
str or None
If returndesc is True, returns the description lines as a str, otherwise
returns nothing. |
def add_version(self, project, version, egg):
"""
Adds a new project egg to the Scrapyd service. First class, maps to
Scrapyd's add version endpoint.
"""
url = self._build_url(constants.ADD_VERSION_ENDPOINT)
data = {
'project': project,
'version': version
}
files = {
'egg': egg
}
json = self.client.post(url, data=data, files=files,
timeout=self.timeout)
return json['spiders'] | Adds a new project egg to the Scrapyd service. First class, maps to
Scrapyd's add version endpoint. |
def dot_product_attention(q,
k,
v,
bias,
dropout_rate=0.0,
image_shapes=None,
name=None,
make_image_summary=True,
save_weights_to=None,
dropout_broadcast_dims=None,
activation_dtype=None,
weight_dtype=None,
hard_attention_k=0):
"""Dot-product attention.
Args:
q: Tensor with shape [..., length_q, depth_k].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
bias: bias Tensor (see attention_bias())
dropout_rate: a float.
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
name: an optional string
make_image_summary: True if you want an image summary.
save_weights_to: an optional dictionary to capture attention weights
for visualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
dropout_broadcast_dims: an optional list of integers less than rank of q.
Specifies in which dimensions to broadcast the dropout decisions.
activation_dtype: Used to define function activation dtype when using
mixed precision.
weight_dtype: The dtype weights are stored in when using mixed precision
hard_attention_k: integer, if > 0 triggers hard attention (picking top-k)
Returns:
Tensor with shape [..., length_q, depth_v].
"""
with tf.variable_scope(
name, default_name="dot_product_attention", values=[q, k, v]) as scope:
logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv]
if bias is not None:
bias = common_layers.cast_like(bias, logits)
logits += bias
# If logits are fp16, upcast before softmax
logits = maybe_upcast(logits, activation_dtype, weight_dtype)
weights = tf.nn.softmax(logits, name="attention_weights")
if hard_attention_k > 0:
weights = harden_attention_weights(weights, hard_attention_k)
weights = common_layers.cast_like(weights, q)
if save_weights_to is not None:
save_weights_to[scope.name] = weights
save_weights_to[scope.name + "/logits"] = logits
# Drop out attention links for each head.
weights = common_layers.dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
if common_layers.should_generate_summaries() and make_image_summary:
attention_image_summary(weights, image_shapes)
return tf.matmul(weights, v) | Dot-product attention.
Args:
q: Tensor with shape [..., length_q, depth_k].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
bias: bias Tensor (see attention_bias())
dropout_rate: a float.
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
name: an optional string
make_image_summary: True if you want an image summary.
save_weights_to: an optional dictionary to capture attention weights
for visualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
dropout_broadcast_dims: an optional list of integers less than rank of q.
Specifies in which dimensions to broadcast the dropout decisions.
activation_dtype: Used to define function activation dtype when using
mixed precision.
weight_dtype: The dtype weights are stored in when using mixed precision
hard_attention_k: integer, if > 0 triggers hard attention (picking top-k)
Returns:
Tensor with shape [..., length_q, depth_v]. |
def exp_and_normalise(lw):
"""Exponentiate, then normalise (so that sum equals one).
Arguments
---------
lw: ndarray
log weights.
Returns
-------
W: ndarray of the same shape as lw
W = exp(lw) / sum(exp(lw))
Note
----
uses the log_sum_exp trick to avoid overflow (i.e. subtract the max
before exponentiating)
See also
--------
log_sum_exp
log_mean_exp
"""
w = np.exp(lw - lw.max())
return w / w.sum() | Exponentiate, then normalise (so that sum equals one).
Arguments
---------
lw: ndarray
log weights.
Returns
-------
W: ndarray of the same shape as lw
W = exp(lw) / sum(exp(lw))
Note
----
uses the log_sum_exp trick to avoid overflow (i.e. subtract the max
before exponentiating)
See also
--------
log_sum_exp
log_mean_exp |
def run(self, scenario=None, only=None, **kwargs):
"""
Run MAGICC and parse the output.
As a reminder, putting ``out_parameters=1`` will cause MAGICC to write out its
parameters into ``out/PARAMETERS.OUT`` and they will then be read into
``output.metadata["parameters"]`` where ``output`` is the returned object.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run. If None MAGICC will simply run with whatever config has
already been set.
only : list of str
If not None, only extract variables in this list.
kwargs
Other config values to pass to MAGICC for the run
Returns
-------
:obj:`pymagicc.io.MAGICCData`
MAGICCData object containing that data in its ``df`` attribute and
metadata and parameters (depending on the value of ``include_parameters``)
in its ``metadata`` attribute.
Raises
------
ValueError
If no output is found which matches the list specified in ``only``.
"""
if not exists(self.root_dir):
raise FileNotFoundError(self.root_dir)
if self.executable is None:
raise ValueError(
"MAGICC executable not found, try setting an environment variable `MAGICC_EXECUTABLE_{}=/path/to/binary`".format(
self.version
)
)
if scenario is not None:
kwargs = self.set_emission_scenario_setup(scenario, kwargs)
yr_config = {}
if "startyear" in kwargs:
yr_config["startyear"] = kwargs.pop("startyear")
if "endyear" in kwargs:
yr_config["endyear"] = kwargs.pop("endyear")
if yr_config:
self.set_years(**yr_config)
# should be able to do some other nice metadata stuff re how magicc was run
# etc. here
kwargs.setdefault("rundate", get_date_time_string())
self.update_config(**kwargs)
self.check_config()
exec_dir = basename(self.original_dir)
command = [join(self.root_dir, exec_dir, self.binary_name)]
if not IS_WINDOWS and self.binary_name.endswith(".exe"): # pragma: no cover
command.insert(0, "wine")
# On Windows shell=True is required.
subprocess.check_call(command, cwd=self.run_dir, shell=IS_WINDOWS)
outfiles = self._get_output_filenames()
read_cols = {"climate_model": ["MAGICC{}".format(self.version)]}
if scenario is not None:
read_cols["model"] = scenario["model"].unique().tolist()
read_cols["scenario"] = scenario["scenario"].unique().tolist()
else:
read_cols.setdefault("model", ["unspecified"])
read_cols.setdefault("scenario", ["unspecified"])
mdata = None
for filepath in outfiles:
try:
openscm_var = _get_openscm_var_from_filepath(filepath)
if only is None or openscm_var in only:
tempdata = MAGICCData(
join(self.out_dir, filepath), columns=deepcopy(read_cols)
)
mdata = mdata.append(tempdata) if mdata is not None else tempdata
except (NoReaderWriterError, InvalidTemporalResError):
continue
if mdata is None:
error_msg = "No output found for only={}".format(only)
raise ValueError(error_msg)
try:
run_paras = self.read_parameters()
self.config = run_paras
mdata.metadata["parameters"] = run_paras
except FileNotFoundError:
pass
return mdata | Run MAGICC and parse the output.
As a reminder, putting ``out_parameters=1`` will cause MAGICC to write out its
parameters into ``out/PARAMETERS.OUT`` and they will then be read into
``output.metadata["parameters"]`` where ``output`` is the returned object.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run. If None MAGICC will simply run with whatever config has
already been set.
only : list of str
If not None, only extract variables in this list.
kwargs
Other config values to pass to MAGICC for the run
Returns
-------
:obj:`pymagicc.io.MAGICCData`
MAGICCData object containing that data in its ``df`` attribute and
metadata and parameters (depending on the value of ``include_parameters``)
in its ``metadata`` attribute.
Raises
------
ValueError
If no output is found which matches the list specified in ``only``. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.