code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def fit_linear(xdata, ydata):
"""
Returns slope and intercept of line of best fit:
y = a*x + b
through the supplied data.
Parameters
----------
xdata, ydata:
Arrays of x data and y data (having matching lengths).
"""
x = _n.array(xdata)
y = _n.array(ydata)
ax = _n.average(x)
ay = _n.average(y)
axx = _n.average(x*x)
ayx = _n.average(y*x)
slope = (ayx - ay*ax) / (axx - ax*ax)
intercept = ay - slope*ax
return slope, intercept | Returns slope and intercept of line of best fit:
y = a*x + b
through the supplied data.
Parameters
----------
xdata, ydata:
Arrays of x data and y data (having matching lengths). |
def renew(gandi, domain, duration, background):
"""Renew a domain."""
result = gandi.domain.renew(domain, duration, background)
if background:
gandi.pretty_echo(result)
return result | Renew a domain. |
def bloquear_sat(self):
"""Sobrepõe :meth:`~satcfe.base.FuncoesSAT.bloquear_sat`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT
"""
resp = self._http_post('bloquearsat')
conteudo = resp.json()
return RespostaSAT.bloquear_sat(conteudo.get('retorno')) | Sobrepõe :meth:`~satcfe.base.FuncoesSAT.bloquear_sat`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT |
def calibrate(self, calib_ps, analytes=None):
"""
Apply calibration to data.
The `calib_dict` must be calculated at the `analyse` level,
and passed to this calibrate function.
Parameters
----------
calib_dict : dict
A dict of calibration values to apply to each analyte.
Returns
-------
None
"""
# can have calibration function stored in self and pass *coefs?
if analytes is None:
analytes = self.analytes
if 'calibrated' not in self.data.keys():
self.data['calibrated'] = Bunch()
for a in analytes:
m = calib_ps[a]['m'].new(self.uTime)
if 'c' in calib_ps[a]:
c = calib_ps[a]['c'].new(self.uTime)
else:
c = 0
self.data['calibrated'][a] = self.data['ratios'][a] * m + c
if self.internal_standard not in analytes:
self.data['calibrated'][self.internal_standard] = \
np.empty(len(self.data['ratios'][self.internal_standard]))
self.setfocus('calibrated')
return | Apply calibration to data.
The `calib_dict` must be calculated at the `analyse` level,
and passed to this calibrate function.
Parameters
----------
calib_dict : dict
A dict of calibration values to apply to each analyte.
Returns
-------
None |
def libvlc_audio_equalizer_get_band_frequency(u_index):
'''Get a particular equalizer band frequency.
This value can be used, for example, to create a label for an equalizer band control
in a user interface.
@param u_index: index of the band, counting from zero.
@return: equalizer band frequency (Hz), or -1 if there is no such band.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_get_band_frequency', None) or \
_Cfunction('libvlc_audio_equalizer_get_band_frequency', ((1,),), None,
ctypes.c_float, ctypes.c_uint)
return f(u_index) | Get a particular equalizer band frequency.
This value can be used, for example, to create a label for an equalizer band control
in a user interface.
@param u_index: index of the band, counting from zero.
@return: equalizer band frequency (Hz), or -1 if there is no such band.
@version: LibVLC 2.2.0 or later. |
def foldx_dir(self):
"""str: FoldX folder"""
if self.root_dir:
return op.join(self.root_dir, self._foldx_dirname)
else:
log.warning('Root directory not set')
return None | str: FoldX folder |
def read_connections(file_name, point_names):
"""Read a file detailing which markers should be connected to which for motion capture data."""
connections = []
fid = open(file_name, 'r')
line=fid.readline()
while(line):
connections.append(np.array(line.split(',')))
connections[-1][0] = connections[-1][0].strip()
connections[-1][1] = connections[-1][1].strip()
line = fid.readline()
connect = np.zeros((len(point_names), len(point_names)),dtype=bool)
for i in range(len(point_names)):
for j in range(len(point_names)):
for k in range(len(connections)):
if connections[k][0] == point_names[i] and connections[k][1] == point_names[j]:
connect[i,j]=True
connect[j,i]=True
break
return connect | Read a file detailing which markers should be connected to which for motion capture data. |
def proc_chain(self, chain_info, parent):
"""Converts a chain into a `Polymer` type object.
Parameters
----------
chain_info : (set, OrderedDict)
Contains a set of chain labels and atom records.
parent : ampal.Assembly
`Assembly` used to assign `ampal_parent` on created
`Polymer`.
Raises
------
ValueError
Raised if multiple or unknown atom types found
within the same chain.
AttributeError
Raised if unknown `Monomer` type encountered.
"""
hetatom_filters = {
'nc_aas': self.check_for_non_canonical
}
polymer = False
chain_labels, chain_data = chain_info
chain_label = list(chain_labels)[0]
monomer_types = {x[2] for x in chain_labels if x[2]}
if ('P' in monomer_types) and ('N' in monomer_types):
raise ValueError(
'Malformed PDB, multiple "ATOM" types in a single chain.')
# Changes Polymer type based on chain composition
if 'P' in monomer_types:
polymer_class = Polypeptide
polymer = True
elif 'N' in monomer_types:
polymer_class = Polynucleotide
polymer = True
elif 'H' in monomer_types:
polymer_class = LigandGroup
else:
raise AttributeError('Malformed parse tree, check inout PDB.')
chain = polymer_class(polymer_id=chain_label[0], ampal_parent=parent)
# Changes where the ligands should go based on the chain composition
if polymer:
chain.ligands = LigandGroup(
polymer_id=chain_label[0], ampal_parent=parent)
ligands = chain.ligands
else:
ligands = chain
for residue in chain_data.values():
res_info = list(residue[0])[0]
if res_info[0] == 'ATOM':
chain._monomers.append(self.proc_monomer(residue, chain))
elif res_info[0] == 'HETATM':
mon_cls = None
on_chain = False
for filt_func in hetatom_filters.values():
filt_res = filt_func(residue)
if filt_res:
mon_cls, on_chain = filt_res
break
mon_cls = Ligand
if on_chain:
chain._monomers.append(self.proc_monomer(
residue, chain, mon_cls=mon_cls))
else:
ligands._monomers.append(self.proc_monomer(
residue, chain, mon_cls=mon_cls))
else:
raise ValueError('Malformed PDB, unknown record type for data')
return chain | Converts a chain into a `Polymer` type object.
Parameters
----------
chain_info : (set, OrderedDict)
Contains a set of chain labels and atom records.
parent : ampal.Assembly
`Assembly` used to assign `ampal_parent` on created
`Polymer`.
Raises
------
ValueError
Raised if multiple or unknown atom types found
within the same chain.
AttributeError
Raised if unknown `Monomer` type encountered. |
def add_slice_db(self, fid, slice_end, md5):
'''在数据库中加入上传任务分片信息'''
sql = 'INSERT INTO slice VALUES(?, ?, ?)'
self.cursor.execute(sql, (fid, slice_end, md5))
self.check_commit() | 在数据库中加入上传任务分片信息 |
def broker_url(self):
""" Returns a "broker URL" for use with Celery. """
return 'amqp://{}:{}@{}/{}'.format(
self.user, self.password, self.name, self.vhost) | Returns a "broker URL" for use with Celery. |
async def merge_imports_tree(cache, imports, target_trees, base_tree=None):
'''Take an Imports struct and a dictionary of resolved trees and merge the
unified imports tree. If base_tree is supplied, merge that too. There are a
couple reasons for structuring this function the way it is:
- We want to cache merged trees, so that we don't have to do expensive
git operations just to check whether a module is in cache.
- We want tree merging to know about target names, so that it can write
good error messages when there are conflicts.
- We need to use this for both toplevel imports and recursive module
imports.
'''
key = _cache_key(imports, target_trees, base_tree)
if key in cache.keyval:
return cache.keyval[key]
# We always want to merge imports in the same order, so that any conflicts
# we run into will be deterministic. Sort the imports alphabetically by
# target name.
unified_tree = base_tree or (await cache.get_empty_tree())
for target, paths in imports.items():
for path in paths:
try:
unified_tree = await cache.merge_trees(
unified_tree, target_trees[target], path)
except MergeConflictError as e:
message = 'Merge conflict in import "{}" at "{}":\n\n{}'
e.message = message.format(target, path,
textwrap.indent(e.message, ' '))
raise
cache.keyval[key] = unified_tree
return unified_tree | Take an Imports struct and a dictionary of resolved trees and merge the
unified imports tree. If base_tree is supplied, merge that too. There are a
couple reasons for structuring this function the way it is:
- We want to cache merged trees, so that we don't have to do expensive
git operations just to check whether a module is in cache.
- We want tree merging to know about target names, so that it can write
good error messages when there are conflicts.
- We need to use this for both toplevel imports and recursive module
imports. |
def create_table(name=str('_Table'), base=Table, options=None):
"""Creates and returns a new table class. You can specify a name for
you class if you wish. You can also set the base class (or
classes) that should be used when creating the class.
"""
try:
base = tuple(base)
except TypeError:
# Then assume that what we have is a single class, so make it
# into a 1-tuple.
base = (base,)
return TableMeta(name, base, options or {}) | Creates and returns a new table class. You can specify a name for
you class if you wish. You can also set the base class (or
classes) that should be used when creating the class. |
def index_modules(root) -> Dict:
""" Counts the number of modules in the Fortran file including the program
file. Each module is written out into a separate Python file. """
module_index_dict = {
node["name"]: (node.get("tag"), index)
for index, node in enumerate(root)
if node.get("tag") in ("module", "program", "subroutine")
}
return module_index_dict | Counts the number of modules in the Fortran file including the program
file. Each module is written out into a separate Python file. |
def open_outside_spyder(self, fnames):
"""Open file outside Spyder with the appropriate application
If this does not work, opening unknown file in Spyder, as text file"""
for path in sorted(fnames):
path = file_uri(path)
ok = programs.start_file(path)
if not ok:
self.sig_edit.emit(path) | Open file outside Spyder with the appropriate application
If this does not work, opening unknown file in Spyder, as text file |
def _build_mappings(
self, classes: Sequence[type]
) -> Tuple[Mapping[type, Sequence[type]], Mapping[type, Sequence[type]]]:
"""
Collect all bases and organize into parent/child mappings.
"""
parents_to_children: MutableMapping[type, Set[type]] = {}
children_to_parents: MutableMapping[type, Set[type]] = {}
visited_classes: Set[type] = set()
class_stack = list(classes)
while class_stack:
class_ = class_stack.pop()
if class_ in visited_classes:
continue
visited_classes.add(class_)
for base in class_.__bases__:
if base not in visited_classes:
class_stack.append(base)
parents_to_children.setdefault(base, set()).add(class_)
children_to_parents.setdefault(class_, set()).add(base)
sorted_parents_to_children: MutableMapping[
type, List[type]
] = collections.OrderedDict()
for parent, children in sorted(
parents_to_children.items(), key=lambda x: (x[0].__module__, x[0].__name__)
):
sorted_parents_to_children[parent] = sorted(
children, key=lambda x: (x.__module__, x.__name__)
)
sorted_children_to_parents: MutableMapping[
type, List[type]
] = collections.OrderedDict()
for child, parents in sorted(
children_to_parents.items(), key=lambda x: (x[0].__module__, x[0].__name__)
):
sorted_children_to_parents[child] = sorted(
parents, key=lambda x: (x.__module__, x.__name__)
)
return sorted_parents_to_children, sorted_children_to_parents | Collect all bases and organize into parent/child mappings. |
def verify_response(response, status_code, content_type=None):
"""Verifies that a response has the expected status and content type.
Args:
response: The ResponseTuple to be checked.
status_code: An int, the HTTP status code to be compared with response
status.
content_type: A string with the acceptable Content-Type header value.
None allows any content type.
Returns:
True if both status_code and content_type match, else False.
"""
status = int(response.status.split(' ', 1)[0])
if status != status_code:
return False
if content_type is None:
return True
for header, value in response.headers:
if header.lower() == 'content-type':
return value == content_type
# If we fall through to here, the verification has failed, so return False.
return False | Verifies that a response has the expected status and content type.
Args:
response: The ResponseTuple to be checked.
status_code: An int, the HTTP status code to be compared with response
status.
content_type: A string with the acceptable Content-Type header value.
None allows any content type.
Returns:
True if both status_code and content_type match, else False. |
def follower_ids(self, user):
"""
Returns Twitter user id lists for the specified user's followers.
A user can be a specific using their screen_name or user_id
"""
user = str(user)
user = user.lstrip('@')
url = 'https://api.twitter.com/1.1/followers/ids.json'
if re.match(r'^\d+$', user):
params = {'user_id': user, 'cursor': -1}
else:
params = {'screen_name': user, 'cursor': -1}
while params['cursor'] != 0:
try:
resp = self.get(url, params=params, allow_404=True)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
log.info("no users matching %s", screen_name)
raise e
user_ids = resp.json()
for user_id in user_ids['ids']:
yield str_type(user_id)
params['cursor'] = user_ids['next_cursor'] | Returns Twitter user id lists for the specified user's followers.
A user can be a specific using their screen_name or user_id |
def internal_get_frame(dbg, seq, thread_id, frame_id):
''' Converts request into python variable '''
try:
frame = dbg.find_frame(thread_id, frame_id)
if frame is not None:
hidden_ns = pydevconsole.get_ipython_hidden_vars()
xml = "<xml>"
xml += pydevd_xml.frame_vars_to_xml(frame.f_locals, hidden_ns)
del frame
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_frame_message(seq, xml)
dbg.writer.add_command(cmd)
else:
# pydevd_vars.dump_frames(thread_id)
# don't print this error: frame not found: means that the client is not synchronized (but that's ok)
cmd = dbg.cmd_factory.make_error_message(seq, "Frame not found: %s from thread: %s" % (frame_id, thread_id))
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(seq, "Error resolving frame: %s from thread: %s" % (frame_id, thread_id))
dbg.writer.add_command(cmd) | Converts request into python variable |
def process_parsed_args(opts: Namespace, error_fun: Optional[Callable], connect: bool=True) -> Namespace:
"""
Set the defaults for the crc and ontology schemas
:param opts: parsed arguments
:param error_fun: Function to call if error
:param connect: actually connect. (For debugging)
:return: namespace with additional elements added
"""
def setdefault(vn: str, default: object) -> None:
assert vn in opts, "Unknown option"
if not getattr(opts, vn):
setattr(opts, vn, default)
if error_fun and \
(getattr(opts, 'dburl') is None or getattr(opts, 'user') is None or getattr(opts, 'password') is None):
error_fun("db url, user id and password must be supplied")
setdefault('crcdb', opts.dburl)
setdefault('crcuser', opts.user)
setdefault('crcpassword', opts.password)
setdefault('ontodb', opts.dburl)
setdefault('ontouser', opts.user)
setdefault('ontopassword', opts.password)
if connect:
opts.tables = I2B2Tables(opts)
# TODO: This approach needs to be re-thought. As i2b2tablenames is a singleton, any changes here
# impact the entire testing harness
if opts.onttable:
i2b2tablenames.ontology_table = opts.onttable
return opts | Set the defaults for the crc and ontology schemas
:param opts: parsed arguments
:param error_fun: Function to call if error
:param connect: actually connect. (For debugging)
:return: namespace with additional elements added |
def start_standby(cls, webdriver=None, max_time=WTF_TIMEOUT_MANAGER.EPIC, sleep=5):
"""
Create an instance of BrowserStandBy() and immediately return a running instance.
This is best used in a 'with' block.
Example::
with BrowserStandBy.start_standby():
# Now browser is in standby, you can do a bunch of stuff with in this block.
# ...
# We are now outside the block, and the browser standby has ended.
"""
return cls(webdriver=webdriver, max_time=max_time, sleep=sleep, _autostart=True) | Create an instance of BrowserStandBy() and immediately return a running instance.
This is best used in a 'with' block.
Example::
with BrowserStandBy.start_standby():
# Now browser is in standby, you can do a bunch of stuff with in this block.
# ...
# We are now outside the block, and the browser standby has ended. |
def compare(self, buf, offset=0, length=1, ignore=""):
"""Compare buffer"""
for i in range(offset, offset + length):
if isinstance(self.m_types, (type(Union), type(Structure))):
if compare(self.m_buf[i], buf[i], ignore=ignore):
return 1
elif self.m_buf[i] != buf[i]:
return 1
return 0 | Compare buffer |
def quast_contigs_barplot(self):
""" Make a bar plot showing the number and length of contigs for each assembly """
# Prep the data
data = dict()
categories = []
for s_name, d in self.quast_data.items():
nums_by_t = dict()
for k, v in d.items():
m = re.match('# contigs \(>= (\d+) bp\)', k)
if m and v != '-':
nums_by_t[int(m.groups()[0])] = int(v)
tresholds = sorted(nums_by_t.keys(), reverse=True)
p = dict()
cats = []
for i, t in enumerate(tresholds):
if i == 0:
c = '>= ' + str(t) + ' bp'
cats.append(c)
p[c] = nums_by_t[t]
else:
c = str(t) + '-' + str(tresholds[i - 1]) + ' bp'
cats.append(c)
p[c] = nums_by_t[t] - nums_by_t[tresholds[i - 1]]
if not categories:
categories = cats
data[s_name] = p
pconfig = {
'id': 'quast_num_contigs',
'title': 'QUAST: Number of Contigs',
'ylab': '# Contigs',
'yDecimals': False
}
return bargraph.plot(data, categories, pconfig) | Make a bar plot showing the number and length of contigs for each assembly |
def destination(self) :
"the bus name that the message is to be sent to."
result = dbus.dbus_message_get_destination(self._dbobj)
if result != None :
result = result.decode()
#end if
return \
result | the bus name that the message is to be sent to. |
def main():
"""Return 0 on success."""
args = parse_args()
if not args.files:
return 0
with enable_sphinx_if_possible():
status = 0
pool = multiprocessing.Pool(multiprocessing.cpu_count())
try:
if len(args.files) > 1:
results = pool.map(
_check_file,
[(name, args) for name in args.files])
else:
# This is for the case where we read from standard in.
results = [_check_file((args.files[0], args))]
for (filename, errors) in results:
for error in errors:
line_number = error[0]
message = error[1]
if not re.match(r'\([A-Z]+/[0-9]+\)', message):
message = '(ERROR/3) ' + message
output_message('{}:{}: {}'.format(filename,
line_number,
message))
status = 1
except (IOError, UnicodeError) as exception:
output_message(exception)
status = 1
return status | Return 0 on success. |
def process_model_scores(self, model_names, root_cache,
include_features=False):
"""
Generates a score map for a set of models based on a `root_cache`.
This method performs no substantial IO, but may incur substantial CPU
usage.
:Parameters:
model_names : `set` ( `str` )
A set of models to score
root_cache : `dict` ( `str` --> `mixed` )
A cache of pre-computed root_dependencies for a specific
revision. See `extract_root_dependency_caches()`
include_features : `bool`
If True, include a map of basic features used in scoring along
with the model score. If False, just generate the scores.
"""
model_scores = {}
for model_name in model_names:
model_scores[model_name] = {}
# Mostly CPU
model_scores[model_name]['score'] = \
self._process_score(model_name, dependency_cache=root_cache)
# Essentially free
if include_features:
base_feature_map = self._solve_base_feature_map(
model_name, dependency_cache=root_cache)
model_scores[model_name]['features'] = base_feature_map
return model_scores | Generates a score map for a set of models based on a `root_cache`.
This method performs no substantial IO, but may incur substantial CPU
usage.
:Parameters:
model_names : `set` ( `str` )
A set of models to score
root_cache : `dict` ( `str` --> `mixed` )
A cache of pre-computed root_dependencies for a specific
revision. See `extract_root_dependency_caches()`
include_features : `bool`
If True, include a map of basic features used in scoring along
with the model score. If False, just generate the scores. |
def get_sqltext(self, format_=1):
"""retourne les requêtes actuellement lancées sur le serveur"""
if format_ == 1:
_sql = """SELECT u.sid, substr(u.username,1,12) user_name, s.sql_text
FROM v$sql s,v$session u
WHERE s.hash_value = u.sql_hash_value
AND sql_text NOT LIKE '%from v$sql s, v$session u%'
AND u.username NOT LIKE 'None'
ORDER BY u.sid"""
if format_ == 2:
_sql = """SELECT u.username, s.first_load_time, s.executions, s.sql_text
FROM dba_users u,v$sqlarea s
WHERE u.user_id=s.parsing_user_id
AND u.username LIKE 'LIONEL'
AND sql_text NOT LIKE '%FROM dba_users u,v$sqlarea s%'
ORDER BY s.first_load_time"""
return psql.read_sql(_sql, self.conn) | retourne les requêtes actuellement lancées sur le serveur |
def json_file_response(obj=None, pid=None, record=None, status=None):
"""JSON Files/File serializer.
:param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance or
a :class:`invenio_records_files.api.FilesIterator` if it's a list of
files.
:param pid: PID value. (not used)
:param record: The record metadata. (not used)
:param status: The HTTP status code.
:returns: A Flask response with JSON data.
:rtype: :py:class:`flask.Response`.
"""
from invenio_records_files.api import FilesIterator
if isinstance(obj, FilesIterator):
return json_files_serializer(obj, status=status)
else:
return json_file_serializer(obj, status=status) | JSON Files/File serializer.
:param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance or
a :class:`invenio_records_files.api.FilesIterator` if it's a list of
files.
:param pid: PID value. (not used)
:param record: The record metadata. (not used)
:param status: The HTTP status code.
:returns: A Flask response with JSON data.
:rtype: :py:class:`flask.Response`. |
def find_exact(self, prefix):
'''
Find the exact child with the given prefix
'''
matches = self.find_all(prefix)
if len(matches) == 1:
match = matches.pop()
if match.prefix == prefix:
return match
return None | Find the exact child with the given prefix |
def count_repetitions(self, ctx, maxcount):
"""Returns the number of repetitions of a single item, starting from the
current string position. The code pointer is expected to point to a
REPEAT_ONE operation (with the repeated 4 ahead)."""
count = 0
real_maxcount = ctx.state.end - ctx.string_position
if maxcount < real_maxcount and maxcount != MAXREPEAT:
real_maxcount = maxcount
# XXX could special case every single character pattern here, as in C.
# This is a general solution, a bit hackisch, but works and should be
# efficient.
code_position = ctx.code_position
string_position = ctx.string_position
ctx.skip_code(4)
reset_position = ctx.code_position
while count < real_maxcount:
# this works because the single character pattern is followed by
# a success opcode
ctx.code_position = reset_position
self.dispatch(ctx.peek_code(), ctx)
if ctx.has_matched is False: # could be None as well
break
count += 1
ctx.has_matched = None
ctx.code_position = code_position
ctx.string_position = string_position
return count | Returns the number of repetitions of a single item, starting from the
current string position. The code pointer is expected to point to a
REPEAT_ONE operation (with the repeated 4 ahead). |
def load_token(cls, token, force=False):
"""Validate a secret link token (non-expiring + expiring)."""
for algorithm in SUPPORTED_DIGEST_ALGORITHMS:
s = SecretLinkSerializer(algorithm_name=algorithm)
st = TimedSecretLinkSerializer(algorithm_name=algorithm)
for serializer in (s, st):
try:
data = serializer.load_token(token, force=force)
if data:
return data
except SignatureExpired:
raise # signature was parsed and is expired
except BadData:
continue | Validate a secret link token (non-expiring + expiring). |
def parse_multi_lrvalue_string(search_string, split_string,
delimiter=":"):
'''
The function is an extension of the parse_lrvalue_string() API.
The function takes a multi-line output/string of the format
"Category: xyz
name: foo
id: bar
Category: abc
name: foox
id: barx
:
"
It splits the output based on the splitstring passed as
argument (eg "Category"), and converts the individual
lines of the form "name: value" to a dictionary object with
key value pairs. The key is built from the name (LV) part.
eg "First Name: Behzad" --> dict[first_name] = "Behzad"
'''
dictlist = []
for out in search_string.split(split_string):
tdict = parse_lrvalue_string(split_string + out,
delimiter=delimiter)
dictlist.append(tdict)
return dictlist | The function is an extension of the parse_lrvalue_string() API.
The function takes a multi-line output/string of the format
"Category: xyz
name: foo
id: bar
Category: abc
name: foox
id: barx
:
"
It splits the output based on the splitstring passed as
argument (eg "Category"), and converts the individual
lines of the form "name: value" to a dictionary object with
key value pairs. The key is built from the name (LV) part.
eg "First Name: Behzad" --> dict[first_name] = "Behzad" |
def draw(self, surface):
""" Draw all sprites and map onto the surface
:param surface: pygame surface to draw to
:type surface: pygame.surface.Surface
"""
ox, oy = self._map_layer.get_center_offset()
new_surfaces = list()
spritedict = self.spritedict
gl = self.get_layer_of_sprite
new_surfaces_append = new_surfaces.append
for spr in self.sprites():
new_rect = spr.rect.move(ox, oy)
try:
new_surfaces_append((spr.image, new_rect, gl(spr), spr.blendmode))
except AttributeError: # generally should only fail when no blendmode available
new_surfaces_append((spr.image, new_rect, gl(spr)))
spritedict[spr] = new_rect
self.lostsprites = []
return self._map_layer.draw(surface, surface.get_rect(), new_surfaces) | Draw all sprites and map onto the surface
:param surface: pygame surface to draw to
:type surface: pygame.surface.Surface |
def exit_config_mode(self, exit_config="exit", pattern=""):
"""Exit config_mode."""
if not pattern:
pattern = re.escape(self.base_prompt)
return super(CiscoWlcSSH, self).exit_config_mode(exit_config, pattern) | Exit config_mode. |
def error(self, relative_to='AME2003'):
"""
Calculate error difference
Parameters
----------
relative_to : string,
a valid mass table name.
Example:
----------
>>> Table('DUZU').error(relative_to='AME2003')
"""
df = self.df - Table(relative_to).df
return Table(df=df) | Calculate error difference
Parameters
----------
relative_to : string,
a valid mass table name.
Example:
----------
>>> Table('DUZU').error(relative_to='AME2003') |
def _find_valid_block(self, table, worksheet, flags, units, used_cells, start_pos, end_pos):
'''
Searches for the next location where a valid block could reside and constructs the block
object representing that location.
'''
for row_index in range(len(table)):
if row_index < start_pos[0] or row_index > end_pos[0]:
continue
convRow = table[row_index]
used_row = used_cells[row_index]
for column_index, conv in enumerate(convRow):
if (column_index < start_pos[1] or column_index > end_pos[1] or used_row[column_index]):
continue
# Is non empty cell?
if not is_empty_cell(conv):
block_start, block_end = self._find_block_bounds(table, used_cells,
(row_index, column_index), start_pos, end_pos)
if (block_end[0] > block_start[0] and
block_end[1] > block_start[1]):
try:
return TableBlock(table, used_cells, block_start, block_end, worksheet,
flags, units, self.assume_complete_blocks, self.max_title_rows)
except InvalidBlockError:
pass
# Prevent infinite loops if something goes wrong
used_cells[row_index][column_index] = True | Searches for the next location where a valid block could reside and constructs the block
object representing that location. |
def add_link(self):
"Create a new internal link"
n=len(self.links)+1
self.links[n]=(0,0)
return n | Create a new internal link |
def fourier_fit_magseries(times, mags, errs, period,
fourierorder=None,
fourierparams=None,
sigclip=3.0,
magsarefluxes=False,
plotfit=False,
ignoreinitfail=True,
verbose=True):
'''This fits a Fourier series to a mag/flux time series.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to fit a Fourier cosine series to.
period : float
The period to use for the Fourier fit.
fourierorder : None or int
If this is an int, will be interpreted as the Fourier order of the
series to fit to the input mag/flux times-series. If this is None and
`fourierparams` is specified, `fourierparams` will be used directly to
generate the fit Fourier series. If `fourierparams` is also None, this
function will try to fit a Fourier cosine series of order 3 to the
mag/flux time-series.
fourierparams : list of floats or None
If this is specified as a list of floats, it must be of the form below::
[fourier_amp1, fourier_amp2, fourier_amp3,...,fourier_ampN,
fourier_phase1, fourier_phase2, fourier_phase3,...,fourier_phaseN]
to specify a Fourier cosine series of order N. If this is None and
`fourierorder` is specified, the Fourier order specified there will be
used to construct the Fourier cosine series used to fit the input
mag/flux time-series. If both are None, this function will try to fit a
Fourier cosine series of order 3 to the input mag/flux time-series.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot for the fit to the
mag/flux time-series and writes the plot to the path specified here.
ignoreinitfail : bool
If this is True, ignores the initial failure to find a set of optimized
Fourier parameters using the global optimization function and proceeds
to do a least-squares fit anyway.
verbose : bool
If True, will indicate progress and warn of any problems.
Returns
-------
dict
This function returns a dict containing the model fit parameters, the
minimized chi-sq value and the reduced chi-sq value. The form of this
dict is mostly standardized across all functions in this module::
{
'fittype':'fourier',
'fitinfo':{
'finalparams': the list of final model fit params,
'leastsqfit':the full tuple returned by scipy.leastsq,
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
... other fit function specific keys ...
},
'fitchisq': the minimized value of the fit's chi-sq,
'fitredchisq':the reduced chi-sq value,
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
NOTE: the returned value of 'fitepoch' in the 'fitinfo' dict returned by
this function is the time value of the first observation since this is
where the LC is folded for the fit procedure. To get the actual time of
minimum epoch as calculated by a spline fit to the phased LC, use the
key 'actual_fitepoch' in the 'fitinfo' dict.
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# get rid of zero errs
nzind = npnonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
phase, pmags, perrs, ptimes, mintime = (
get_phased_quantities(stimes, smags, serrs, period)
)
# get the fourier order either from the scalar order kwarg...
if fourierorder and fourierorder > 0 and not fourierparams:
fourieramps = [0.6] + [0.2]*(fourierorder - 1)
fourierphas = [0.1] + [0.1]*(fourierorder - 1)
fourierparams = fourieramps + fourierphas
# or from the fully specified coeffs vector
elif not fourierorder and fourierparams:
fourierorder = int(len(fourierparams)/2)
else:
LOGWARNING('specified both/neither Fourier order AND Fourier coeffs, '
'using default Fourier order of 3')
fourierorder = 3
fourieramps = [0.6] + [0.2]*(fourierorder - 1)
fourierphas = [0.1] + [0.1]*(fourierorder - 1)
fourierparams = fourieramps + fourierphas
if verbose:
LOGINFO('fitting Fourier series of order %s to '
'mag series with %s observations, '
'using period %.6f, folded at %.6f' % (fourierorder,
len(phase),
period,
mintime))
# initial minimize call to find global minimum in chi-sq
initialfit = spminimize(_fourier_chisq,
fourierparams,
method='BFGS',
args=(phase, pmags, perrs))
# make sure this initial fit succeeds before proceeding
if initialfit.success or ignoreinitfail:
if verbose:
LOGINFO('initial fit done, refining...')
leastsqparams = initialfit.x
try:
leastsqfit = spleastsq(_fourier_residual,
leastsqparams,
args=(phase, pmags))
except Exception as e:
leastsqfit = None
# if the fit succeeded, then we can return the final parameters
if leastsqfit and leastsqfit[-1] in (1,2,3,4):
finalparams = leastsqfit[0]
# calculate the chisq and reduced chisq
fitmags = _fourier_func(finalparams, phase, pmags)
fitchisq = npsum(
((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)
)
fitredchisq = fitchisq/(len(pmags) - len(finalparams) - 1)
if verbose:
LOGINFO(
'final fit done. chisq = %.5f, reduced chisq = %.5f' %
(fitchisq,fitredchisq)
)
# figure out the time of light curve minimum (i.e. the fit epoch)
# this is when the fit mag is maximum (i.e. the faintest)
# or if magsarefluxes = True, then this is when fit flux is minimum
if not magsarefluxes:
fitmagminind = npwhere(fitmags == npmax(fitmags))
else:
fitmagminind = npwhere(fitmags == npmin(fitmags))
if len(fitmagminind[0]) > 1:
fitmagminind = (fitmagminind[0][0],)
# assemble the returndict
returndict = {
'fittype':'fourier',
'fitinfo':{
'fourierorder':fourierorder,
'finalparams':finalparams,
'initialfit':initialfit,
'leastsqfit':leastsqfit,
'fitmags':fitmags,
'fitepoch':mintime,
'actual_fitepoch':ptimes[fitmagminind]
},
'fitchisq':fitchisq,
'fitredchisq':fitredchisq,
'fitplotfile':None,
'magseries':{
'times':ptimes,
'phase':phase,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes
},
}
# make the fit plot if required
if plotfit and isinstance(plotfit, str):
make_fit_plot(phase, pmags, perrs, fitmags,
period, mintime, mintime,
plotfit,
magsarefluxes=magsarefluxes)
returndict['fitplotfile'] = plotfit
return returndict
# if the leastsq fit did not succeed, return Nothing
else:
LOGERROR('fourier-fit: least-squared fit to the light curve failed')
return {
'fittype':'fourier',
'fitinfo':{
'fourierorder':fourierorder,
'finalparams':None,
'initialfit':initialfit,
'leastsqfit':None,
'fitmags':None,
'fitepoch':None
},
'fitchisq':npnan,
'fitredchisq':npnan,
'fitplotfile':None,
'magseries':{
'times':ptimes,
'phase':phase,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes
}
}
# if the fit didn't succeed, we can't proceed
else:
LOGERROR('initial Fourier fit did not succeed, '
'reason: %s, returning scipy OptimizeResult'
% initialfit.message)
return {
'fittype':'fourier',
'fitinfo':{
'fourierorder':fourierorder,
'finalparams':None,
'initialfit':initialfit,
'leastsqfit':None,
'fitmags':None,
'fitepoch':None
},
'fitchisq':npnan,
'fitredchisq':npnan,
'fitplotfile':None,
'magseries':{
'times':ptimes,
'phase':phase,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes
}
} | This fits a Fourier series to a mag/flux time series.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to fit a Fourier cosine series to.
period : float
The period to use for the Fourier fit.
fourierorder : None or int
If this is an int, will be interpreted as the Fourier order of the
series to fit to the input mag/flux times-series. If this is None and
`fourierparams` is specified, `fourierparams` will be used directly to
generate the fit Fourier series. If `fourierparams` is also None, this
function will try to fit a Fourier cosine series of order 3 to the
mag/flux time-series.
fourierparams : list of floats or None
If this is specified as a list of floats, it must be of the form below::
[fourier_amp1, fourier_amp2, fourier_amp3,...,fourier_ampN,
fourier_phase1, fourier_phase2, fourier_phase3,...,fourier_phaseN]
to specify a Fourier cosine series of order N. If this is None and
`fourierorder` is specified, the Fourier order specified there will be
used to construct the Fourier cosine series used to fit the input
mag/flux time-series. If both are None, this function will try to fit a
Fourier cosine series of order 3 to the input mag/flux time-series.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot for the fit to the
mag/flux time-series and writes the plot to the path specified here.
ignoreinitfail : bool
If this is True, ignores the initial failure to find a set of optimized
Fourier parameters using the global optimization function and proceeds
to do a least-squares fit anyway.
verbose : bool
If True, will indicate progress and warn of any problems.
Returns
-------
dict
This function returns a dict containing the model fit parameters, the
minimized chi-sq value and the reduced chi-sq value. The form of this
dict is mostly standardized across all functions in this module::
{
'fittype':'fourier',
'fitinfo':{
'finalparams': the list of final model fit params,
'leastsqfit':the full tuple returned by scipy.leastsq,
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
... other fit function specific keys ...
},
'fitchisq': the minimized value of the fit's chi-sq,
'fitredchisq':the reduced chi-sq value,
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
NOTE: the returned value of 'fitepoch' in the 'fitinfo' dict returned by
this function is the time value of the first observation since this is
where the LC is folded for the fit procedure. To get the actual time of
minimum epoch as calculated by a spline fit to the phased LC, use the
key 'actual_fitepoch' in the 'fitinfo' dict. |
def _get_formatter(self, json):
'''
Return the proper log formatter
@param json: Boolean value
'''
if json:
return jsonlogger.JsonFormatter()
else:
return logging.Formatter(self.format_string) | Return the proper log formatter
@param json: Boolean value |
def is_cellular_component(self, go_term):
"""
Returns True is go_term has is_a, part_of ancestor of cellular component GO:0005575
"""
cc_root = "GO:0005575"
if go_term == cc_root:
return True
ancestors = self.get_isa_closure(go_term)
if cc_root in ancestors:
return True
else:
return False | Returns True is go_term has is_a, part_of ancestor of cellular component GO:0005575 |
def delete(self, *args, **kwargs):
"""Delete the data model."""
# Store ids in memory as relations are also deleted with the Data object.
storage_ids = list(self.storages.values_list('pk', flat=True)) # pylint: disable=no-member
super().delete(*args, **kwargs)
Storage.objects.filter(pk__in=storage_ids, data=None).delete() | Delete the data model. |
def _handle_ctrl_c(self, *args):
"""Handle the keyboard interrupts."""
if self.anybar: self.anybar.change("exclamation")
if self._stop:
print("\nForced shutdown...")
raise SystemExit
if not self._stop:
hline = 42 * '='
print(
'\n' + hline + "\nGot CTRL+C, waiting for current cycle...\n"
"Press CTRL+C again if you're in hurry!\n" + hline
)
self._stop = True | Handle the keyboard interrupts. |
def eval(self, expression, identify_erros=True):
""" Evaluates a matlab expression synchronously.
If identify_erros is true, and the last output line after evaluating the
expressions begins with '???' an excpetion is thrown with the matlab error
following the '???'.
The return value of the function is the matlab output following the call.
"""
#print expression
self._check_open()
ret = self.client.Execute(expression)
#print ret
if identify_erros and ret.rfind('???') != -1:
begin = ret.rfind('???') + 4
raise MatlabError(ret[begin:])
return ret | Evaluates a matlab expression synchronously.
If identify_erros is true, and the last output line after evaluating the
expressions begins with '???' an excpetion is thrown with the matlab error
following the '???'.
The return value of the function is the matlab output following the call. |
def post(self, request, bot_id, format=None):
"""
Add a new hook
---
serializer: HookSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
return super(HookList, self).post(request, bot_id, format) | Add a new hook
---
serializer: HookSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request |
def get_summary(self):
"""
Get a summary of this ResultLists contents as dictionary.
:return: dictionary
"""
return {
"count": self.count(),
"pass": self.success_count(),
"fail": self.failure_count(),
"skip": self.skip_count(),
"inconclusive": self.inconclusive_count(),
"retries": self.retry_count(),
"duration": self.total_duration()
} | Get a summary of this ResultLists contents as dictionary.
:return: dictionary |
def loadEL(dbpath=None, recpath=None, remove_subs=None, wordpool=None, groupby=None, experiments=None,
filters=None):
'''
Function that loads sql files generated by autoFR Experiment
'''
assert (dbpath is not None), "You must specify a db file or files."
assert (recpath is not None), "You must specify a recall folder."
assert (wordpool is not None), "You must specify a wordpool file."
assert (experiments is not None), "You must specify a list of experiments"
############################################################################
# subfunctions #############################################################
def db2df(db, filter_func=None):
'''
Loads db file and converts to dataframe
'''
db_url = "sqlite:///" + db
table_name = 'turkdemo'
data_column_name = 'datastring'
# boilerplace sqlalchemy setup
engine = create_engine(db_url)
metadata = MetaData()
metadata.bind = engine
table = Table(table_name, metadata, autoload=True)
# make a query and loop through
s = table.select()
rows = s.execute()
data = []
for row in rows:
data.append(row[data_column_name])
# parse each participant's datastring as json object
# and take the 'data' sub-object
data = [json.loads(part)['data'] for part in data if part is not None]
# remove duplicate subject data for debugXG82XV:debug7XPXQA
# data[110] = data[110][348:]
# insert uniqueid field into trialdata in case it wasn't added
# in experiment:
for part in data:
for record in part:
# print(record)
if type(record['trialdata']) is list:
record['trialdata'] = {record['trialdata'][0]:record['trialdata'][1]}
record['trialdata']['uniqueid'] = record['uniqueid']
# flatten nested list so we just have a list of the trialdata recorded
# each time psiturk.recordTrialData(trialdata) was called.
def isNotNumber(s):
try:
float(s)
return False
except ValueError:
return True
data = [record['trialdata'] for part in data for record in part]
# filter out fields that we dont want using isNotNumber function
filtered_data = [{k:v for (k,v) in list(part.items()) if isNotNumber(k)} for part in data]
# Put all subjects' trial data into a dataframe object from the
# 'pandas' python library: one option among many for analysis
data_frame = pd.DataFrame(filtered_data)
data_column_name = 'codeversion'
# boilerplace sqlalchemy setup
engine = create_engine(db_url)
metadata = MetaData()
metadata.bind = engine
table = Table(table_name, metadata, autoload=True)
# make a query and loop through
s = table.select()
rows = s.execute()
versions = []
version_dict = {}
for row in rows:
version_dict[row[0]]=row[data_column_name]
version_col = []
for idx,sub in enumerate(data_frame['uniqueid'].unique()):
for i in range(sum(data_frame['uniqueid']==sub)):
version_col.append(version_dict[sub])
data_frame['exp_version']=version_col
if filter_func:
for idx,filt in enumerate(filter_func):
data_frame = filt(data_frame)
return data_frame
# custom filter to clean db file
def experimenter_filter(data_frame):
data=[]
indexes=[]
for line in data_frame.iterrows():
try:
if json.loads(line[1]['responses'])['Q1'].lower() in ['kirsten','allison','allison\nallison','marisol', 'marisol ','marisiol', 'maddy','campbell', 'campbell field', 'kirsten\nkirsten', 'emily', 'bryan', 'armando', 'armando ortiz',
'maddy/lucy','paxton', 'lucy','campbell\ncampbell','madison','darya','rachael']:
delete = False
else:
delete = True
except:
pass
if delete:
indexes.append(line[0])
return data_frame.drop(indexes)
def adaptive_filter(data_frame):
data=[]
indexes=[]
subjcb={}
for line in data_frame.iterrows():
try:
if 'Q2' in json.loads(line[1]['responses']):
delete = False
else:
delete = False
except:
pass
if delete:
indexes.append(line[0])
return data_frame.drop(indexes)
def experiments_filter(data_frame):
indexes=[]
for line in data_frame.iterrows():
try:
if line[1]['exp_version'] in experiments:
delete = False
else:
delete = True
except:
pass
if delete:
indexes.append(line[0])
return data_frame.drop(indexes)
# this function takes the data frame and returns subject specific data based on the subid variable
def filterData(data_frame,subid):
filtered_stim_data = data_frame[data_frame['stimulus'].notnull() & data_frame['listNumber'].notnull()]
filtered_stim_data = filtered_stim_data[filtered_stim_data['trial_type']=='single-stim']
filtered_stim_data = filtered_stim_data[filtered_stim_data['uniqueid']==subid]
return filtered_stim_data
def createStimDict(data):
stimDict = []
for index, row in data.iterrows():
try:
stimDict.append({
'text': str(re.findall('>(.+)<',row['stimulus'])[0]),
'color' : { 'r' : int(re.findall('rgb\((.+)\)',row['stimulus'])[0].split(',')[0]),
'g' : int(re.findall('rgb\((.+)\)',row['stimulus'])[0].split(',')[1]),
'b' : int(re.findall('rgb\((.+)\)',row['stimulus'])[0].split(',')[2])
},
'location' : {
'top': float(re.findall('top:(.+)\%;', row['stimulus'])[0]),
'left' : float(re.findall('left:(.+)\%', row['stimulus'])[0])
},
'category' : wordpool['CATEGORY'].iloc[list(wordpool['WORD'].values).index(str(re.findall('>(.+)<',row['stimulus'])[0]))],
'size' : wordpool['SIZE'].iloc[list(wordpool['WORD'].values).index(str(re.findall('>(.+)<',row['stimulus'])[0]))],
'wordLength' : len(str(re.findall('>(.+)<',row['stimulus'])[0])),
'firstLetter' : str(re.findall('>(.+)<',row['stimulus'])[0])[0],
'listnum' : row['listNumber']
})
except:
stimDict.append({
'text': str(re.findall('>(.+)<',row['stimulus'])[0]),
'color' : { 'r' : 0,
'g' : 0,
'b' : 0
},
'location' : {
'top': 50,
'left' : 50
},
'category' : wordpool['CATEGORY'].iloc[list(wordpool['WORD'].values).index(str(re.findall('>(.+)<',row['stimulus'])[0]))],
'size' : wordpool['SIZE'].iloc[list(wordpool['WORD'].values).index(str(re.findall('>(.+)<',row['stimulus'])[0]))],
'wordLength' : len(str(re.findall('>(.+)<',row['stimulus'])[0])),
'firstLetter' : str(re.findall('>(.+)<',row['stimulus'])[0])[0],
'listnum' : row['listNumber']
})
return stimDict
# this function loads in the recall data into an array of arrays, where each array represents a list of words
def loadRecallData(subid):
recalledWords = []
for i in range(0,16):
try:
f = open(recpath + subid + '/' + subid + '-' + str(i) + '.wav.txt', 'r')
spamreader = csv.reader(f, delimiter=',', quotechar='|')
except (IOError, OSError) as e:
try:
f = open(recpath + subid + '-' + str(i) + '.wav.txt', 'r')
spamreader = csv.reader(f, delimiter=',', quotechar='|')
except (IOError, OSError) as e:
print(e)
try:
words=[]
altformat=True
for row in spamreader:
if len(row)>1:
recalledWords.append(row)
altformat=False
break
else:
try:
words.append(row[0])
except:
pass
if altformat:
recalledWords.append(words)
except:
print('couldnt process '+ recpath + subid + '/' + subid + '-' + str(i) + '.wav.txt')
return recalledWords
# this function computes accuracy for a series of lists
def computeListAcc(stimDict,recalledWords):
accVec = []
for i in range(0,16):
stim = [stim['text'] for stim in stimDict if stim['listnum']==i]
recalled= recalledWords[i]
acc = 0
tmpstim = stim[:]
for word in recalled:
if word in tmpstim:
tmpstim.remove(word)
acc+=1
accVec.append(acc/len(stim))
return accVec
def getFeatures(stimDict):
stimDict_copy = stimDict[:]
for item in stimDict_copy:
item['location'] = [item['location']['top'], item['location']['left']]
item['color'] = [item['color']['r'], item['color']['g'], item['color']['b']]
item.pop('text', None)
item.pop('listnum', None)
stimDict_copy = [stimDict_copy[i:i+16] for i in range(0, len(stimDict_copy), 16)]
return stimDict_copy
############################################################################
# main program #############################################################
# if its not a list, make it one
if type(dbpath) is not list:
dbpath = [dbpath]
# read in stimulus library
wordpool = pd.read_csv(wordpool)
# add custom filters
if filters:
filter_func = [adaptive_filter, experimeter_filter, experiments_filter] + filters
else:
filter_func = [adaptive_filter, experimenter_filter, experiments_filter]
# load in dbs and convert to df, and filter
dfs = [db2df(db, filter_func=filter_func) for db in dbpath]
# concatenate the db files
df = pd.concat(dfs)
# subjects who have completed the exp
subids = list(df[df['listNumber']==15]['uniqueid'].unique())
# remove problematic subjects
if remove_subs:
for sub in remove_subs:
try:
subids.remove(sub)
except:
print('Could not find subject: ' + sub + ', skipping...')
# set up data structure to load in subjects
if groupby:
pres = [[] for i in range(len(groupby['exp_version']))]
rec = [[] for i in range(len(groupby['exp_version']))]
features = [[] for i in range(len(groupby['exp_version']))]
subs = [[] for i in range(len(groupby['exp_version']))]
# make each groupby item a list
groupby = [exp if type(exp) is list else [exp] for exp in groupby['exp_version']]
else:
pres = [[]]
rec = [[]]
features = [[]]
subs = [[]]
# for each subject that completed the experiment
for idx,sub in enumerate(subids):
# get the subjects data
filteredStimData = filterData(df,sub)
if filteredStimData['exp_version'].values[0] in experiments:
# create stim dict
stimDict = createStimDict(filteredStimData)
sub_data = pd.DataFrame(stimDict)
sub_data['subject']=idx
sub_data['experiment']=filteredStimData['exp_version'].values[0]
sub_data = sub_data[['experiment','subject','listnum','text','category','color','location','firstLetter','size','wordLength']]
# get features from stim dict
feats = getFeatures(stimDict)
# load in the recall data
recalledWords = loadRecallData(sub)
# get experiment version
exp_version = filteredStimData['exp_version'].values[0]
# find the idx of the experiment for this subjects
if groupby:
exp_idx = list(np.where([exp_version in item for item in groupby])[0])
else:
exp_idx = [0]
if exp_idx != []:
pres[exp_idx[0]].append([list(sub_data[sub_data['listnum']==lst]['text'].values) for lst in sub_data['listnum'].unique()])
rec[exp_idx[0]].append(recalledWords)
features[exp_idx[0]].append(feats)
subs[exp_idx[0]].append(sub)
eggs = [Egg(pres=ipres, rec=irec, features=ifeatures, meta={'ids' : isub}) for ipres,irec,ifeatures,isub in zip(pres, rec, features, subs)]
# map feature dictionaries in pres df to rec df
def checkword(x):
if x is None:
return x
else:
try:
return stim_dict[x['item']]
except:
return x
# convert utf-8 bytes type to string
def update_types(egg):
featlist = list(egg.pres.loc[0].loc[0].values.tolist()[0].keys())
def update1df(df):
for sub in range(egg.n_subjects):
for liszt in range(egg.n_lists):
for item in range(len(df.loc[sub].loc[liszt].values.tolist())):
for feat in featlist:
if feat in df.loc[sub].loc[liszt].values.tolist()[item].keys():
if isinstance(df.loc[sub].loc[liszt].values.tolist()[item][feat], np.bytes_):
try:
df.loc[sub].loc[liszt].values.tolist()[item][feat] = str(df.loc[sub].loc[liszt].values.tolist()[item][feat], 'utf-8')
except:
print("Subject " + str(sub) + ", list " + str(liszt) + ", item " + str(item) + ", feature " + str(feat) + ": Could not convert type " + str(type(egg.rec.loc[sub].loc[liszt].values.tolist()[item][feat])) + " to string.")
update1df(egg.pres)
update1df(egg.rec)
for egg in eggs:
update_types(egg)
old_meta = egg.meta
temp_eggs = [egg]
for i in range(egg.n_subjects):
e = egg.crack(subjects=[i])
stim = e.pres.values.ravel()
stim_dict = {str(x['item']) : {k:v for k, v in iter(x.items())} for x in stim}
e.rec = e.rec.applymap(lambda x: checkword(x))
temp_eggs.append(e)
edited_egg = stack_eggs(temp_eggs)
mapped_egg = edited_egg.crack(subjects=[i for i in range(egg.n_subjects,egg.n_subjects*2)])
mapped_egg.meta = old_meta
eggs[eggs.index(egg)] = mapped_egg
if len(eggs)>1:
return eggs
else:
return eggs[0] | Function that loads sql files generated by autoFR Experiment |
def chunk(self, lenient=False):
"""
Read the next PNG chunk from the input file;
returns a (*type*, *data*) tuple.
*type* is the chunk's type as a byte string
(all PNG chunk types are 4 bytes long).
*data* is the chunk's data content, as a byte string.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions.
"""
self.validate_signature()
# http://www.w3.org/TR/PNG/#5Chunk-layout
if not self.atchunk:
self.atchunk = self._chunk_len_type()
if not self.atchunk:
raise ChunkError("No more chunks.")
length, type = self.atchunk
self.atchunk = None
data = self.file.read(length)
if len(data) != length:
raise ChunkError(
'Chunk %s too short for required %i octets.'
% (type, length))
checksum = self.file.read(4)
if len(checksum) != 4:
raise ChunkError('Chunk %s too short for checksum.' % type)
verify = zlib.crc32(type)
verify = zlib.crc32(data, verify)
# Whether the output from zlib.crc32 is signed or not varies
# according to hideous implementation details, see
# http://bugs.python.org/issue1202 .
# We coerce it to be positive here (in a way which works on
# Python 2.3 and older).
verify &= 2**32 - 1
verify = struct.pack('!I', verify)
if checksum != verify:
(a, ) = struct.unpack('!I', checksum)
(b, ) = struct.unpack('!I', verify)
message = ("Checksum error in %s chunk: 0x%08X != 0x%08X."
% (type.decode('ascii'), a, b))
if lenient:
warnings.warn(message, RuntimeWarning)
else:
raise ChunkError(message)
return type, data | Read the next PNG chunk from the input file;
returns a (*type*, *data*) tuple.
*type* is the chunk's type as a byte string
(all PNG chunk types are 4 bytes long).
*data* is the chunk's data content, as a byte string.
If the optional `lenient` argument evaluates to `True`,
checksum failures will raise warnings rather than exceptions. |
def inquire(self, name=True, lifetime=True, usage=True, mechs=True):
"""Inspect these credentials for information
This method inspects these credentials for information about them.
Args:
name (bool): get the name associated with the credentials
lifetime (bool): get the remaining lifetime for the credentials
usage (bool): get the usage for the credentials
mechs (bool): get the mechanisms associated with the credentials
Returns:
InquireCredResult: the information about the credentials,
with None used when the corresponding argument was False
Raises:
MissingCredentialsError
InvalidCredentialsError
ExpiredCredentialsError
"""
res = rcreds.inquire_cred(self, name, lifetime, usage, mechs)
if res.name is not None:
res_name = names.Name(res.name)
else:
res_name = None
return tuples.InquireCredResult(res_name, res.lifetime,
res.usage, res.mechs) | Inspect these credentials for information
This method inspects these credentials for information about them.
Args:
name (bool): get the name associated with the credentials
lifetime (bool): get the remaining lifetime for the credentials
usage (bool): get the usage for the credentials
mechs (bool): get the mechanisms associated with the credentials
Returns:
InquireCredResult: the information about the credentials,
with None used when the corresponding argument was False
Raises:
MissingCredentialsError
InvalidCredentialsError
ExpiredCredentialsError |
def save(self, path, format, binary=False, use_load_condition=False):
"""
Save object as word embedding file. For most arguments, you should refer
to :func:`~word_embedding_loader.word_embedding.WordEmbedding.load`.
Args:
use_load_condition (bool): If `True`, options from
:func:`~word_embedding_loader.word_embedding.WordEmbedding.load`
is used.
Raises:
ValueError: ``use_load_condition == True`` but the object is not
initialized via
:func:`~word_embedding_loader.word_embedding.WordEmbedding.load`.
"""
if use_load_condition:
if self._load_cond is None:
raise ValueError(
b"use_load_condition was specified but the object is not "
b"loaded from a file")
# Use load condition
mod = self._load_cond
else:
mod = _select_module(format, binary)
if self.freqs is None:
itr = list(
sorted(six.iteritems(self.vocab), key=lambda k_v: k_v[1]))
else:
itr = list(
sorted(six.iteritems(self.vocab),
key=lambda k_v: self.freqs[k_v[0]], reverse=True)
)
with open(path, mode='wb') as f:
mod.saver.save(f, self.vectors, itr) | Save object as word embedding file. For most arguments, you should refer
to :func:`~word_embedding_loader.word_embedding.WordEmbedding.load`.
Args:
use_load_condition (bool): If `True`, options from
:func:`~word_embedding_loader.word_embedding.WordEmbedding.load`
is used.
Raises:
ValueError: ``use_load_condition == True`` but the object is not
initialized via
:func:`~word_embedding_loader.word_embedding.WordEmbedding.load`. |
def __print(self, msg):
"""Shortcut for printing with the distutils logger."""
self.announce(msg, level=distutils.log.INFO) | Shortcut for printing with the distutils logger. |
def _set_version(self, version):
'''
Set up this object based on the capabilities of the
known versions of Redmine
'''
# Store the version we are evaluating
self.version = version or None
# To evaluate the version capabilities,
# assume the best-case if no version is provided.
version_check = version or 9999.0
if version_check < 1.0:
raise RedmineError('This library will only work with '
'Redmine version 1.0 and higher.')
## SECURITY AUGMENTATION
# All versions support the key in the request
# (http://server/stuff.json?key=blah)
# But versions 1.1 and higher can put the key in a header field
# for better security.
# If no version was provided (0.0) then assume we should
# set the key with the request.
self.key_in_header = version >= 1.1
# it puts the key in the header or
# it gets the hose, but not for 1.0.
self.impersonation_supported = version_check >= 2.2
self.has_project_memberships = version_check >= 1.4
self.has_project_versions = version_check >= 1.3
self.has_wiki_pages = version_check >= 2.2
## ITEM MANAGERS
# Step through all the item managers by version
# and instatiate and item manager for that item.
for manager_version in self._item_managers_by_version:
if version_check >= manager_version:
managers = self._item_managers_by_version[manager_version]
for attribute_name, item in managers.iteritems():
setattr(self, attribute_name,
Redmine_Items_Manager(self, item)) | Set up this object based on the capabilities of the
known versions of Redmine |
def make_grid(rect, cells={}, num_rows=0, num_cols=0, padding=None,
inner_padding=None, outer_padding=None, row_heights={}, col_widths={},
default_row_height='expand', default_col_width='expand'):
"""
Return rectangles for each cell in the specified grid. The rectangles are
returned in a dictionary where the keys are (row, col) tuples.
"""
grid = Grid(
bounding_rect=rect,
min_cell_rects=cells,
num_rows=num_rows,
num_cols=num_cols,
padding=padding,
inner_padding=inner_padding,
outer_padding=outer_padding,
row_heights=row_heights,
col_widths=col_widths,
default_row_height=default_row_height,
default_col_width=default_col_width,
)
return grid.make_cells() | Return rectangles for each cell in the specified grid. The rectangles are
returned in a dictionary where the keys are (row, col) tuples. |
def build_filter(predicate: Callable[[Any], bool] = None, *,
unpack: bool = False):
""" Decorator to wrap a function to return a Filter operator.
:param predicate: function to be wrapped
:param unpack: value from emits will be unpacked (*value)
"""
def _build_filter(predicate: Callable[[Any], bool]):
@wraps(predicate)
def _wrapper(*args, **kwargs) -> Filter:
if 'unpack' in kwargs:
raise TypeError('"unpack" has to be defined by decorator')
return Filter(predicate, *args, unpack=unpack, **kwargs)
return _wrapper
if predicate:
return _build_filter(predicate)
return _build_filter | Decorator to wrap a function to return a Filter operator.
:param predicate: function to be wrapped
:param unpack: value from emits will be unpacked (*value) |
def key(self):
"""Embedded supports curies."""
if self.curie is None:
return self.name
return ":".join((self.curie.name, self.name)) | Embedded supports curies. |
def find_stop(self, query, direction=""):
"""
Search the list of stops, optionally in a direction (inbound or outbound),
for the term passed to the function. Case insensitive, searches both the
stop name and ID. Yields a generator.
Defaults to both directions.
"""
_directions = ["inbound", "outbound", ""]
direction = direction.lower()
if direction == "inbound":
stops = self.inbound_stops
elif direction == "outbound":
stops = self.outbound_stops
else:
stops = self.inbound_stops + self.outbound_stops
found = []
for stop in stops:
q = str(query).lower()
if q in stop.name.lower() or q in str(stop.id).lower():
found.append(stop)
return found | Search the list of stops, optionally in a direction (inbound or outbound),
for the term passed to the function. Case insensitive, searches both the
stop name and ID. Yields a generator.
Defaults to both directions. |
def defer(self, func, *args, **kwargs):
"""
Arrange for `func(*args, **kwargs)` to be invoked in the context of a
service pool thread.
"""
self._ipc_latch.put(lambda: func(*args, **kwargs)) | Arrange for `func(*args, **kwargs)` to be invoked in the context of a
service pool thread. |
def _parse_alt_url(html_chunk):
"""
Parse URL from alternative location if not found where it should be.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
str: Book's URL.
"""
url_list = html_chunk.find("a", fn=has_param("href"))
url_list = map(lambda x: x.params["href"], url_list)
url_list = filter(lambda x: not x.startswith("autori/"), url_list)
if not url_list:
return None
return normalize_url(BASE_URL, url_list[0]) | Parse URL from alternative location if not found where it should be.
Args:
html_chunk (obj): HTMLElement containing slice of the page with details.
Returns:
str: Book's URL. |
def getResponse(self, context=""):
""" Poll for finished block or first byte ACK.
Args:
context (str): internal serial call context.
Returns:
string: Response, implict cast from byte array.
"""
waits = 0 # allowed interval counter
response_str = "" # returned bytes in string default
try:
waits = 0 # allowed interval counter
while (waits < self.m_max_waits):
bytes_to_read = self.m_ser.inWaiting()
if bytes_to_read > 0:
next_chunk = str(self.m_ser.read(bytes_to_read)).encode('ascii', 'ignore')
response_str += next_chunk
if (len(response_str) == 255):
time.sleep(self.m_force_wait)
return response_str
if (len(response_str) == 1) and (response_str.encode('hex') == '06'):
time.sleep(self.m_force_wait)
return response_str
else: # hang out -- half shortest expected interval (50 ms)
waits += 1
time.sleep(self.m_force_wait)
response_str = ""
except:
ekm_log(traceback.format_exc(sys.exc_info()))
return response_str | Poll for finished block or first byte ACK.
Args:
context (str): internal serial call context.
Returns:
string: Response, implict cast from byte array. |
def _compute_faulting_style_term(self, C, rake):
"""
Compute faulting style term as a function of rake angle value as given
in equation 5 page 465.
"""
if rake > -120.0 and rake <= -60.0:
return C['aN']
elif rake > 30.0 and rake <= 150.0:
return C['aR']
else:
return C['aS'] | Compute faulting style term as a function of rake angle value as given
in equation 5 page 465. |
def step_impl07(context, len_list):
"""Check assertions.
:param len_list: expected number of variants.
:param context: test context.
"""
assert len(context.fuzzed_string_list) == len_list
for fuzzed_string in context.fuzzed_string_list:
assert len(context.seed) == len(fuzzed_string)
count = number_of_modified_bytes(context.seed, fuzzed_string)
assert count >= 0 | Check assertions.
:param len_list: expected number of variants.
:param context: test context. |
def _fromData(cls, header, tflags, data):
"""Construct this ID3 frame from raw string data.
Raises:
ID3JunkFrameError in case parsing failed
NotImplementedError in case parsing isn't implemented
ID3EncryptionUnsupportedError in case the frame is encrypted.
"""
if header.version >= header._V24:
if tflags & (Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN):
# The data length int is syncsafe in 2.4 (but not 2.3).
# However, we don't actually need the data length int,
# except to work around a QL 0.12 bug, and in that case
# all we need are the raw bytes.
datalen_bytes = data[:4]
data = data[4:]
if tflags & Frame.FLAG24_UNSYNCH or header.f_unsynch:
try:
data = unsynch.decode(data)
except ValueError:
# Some things write synch-unsafe data with either the frame
# or global unsynch flag set. Try to load them as is.
# https://github.com/quodlibet/mutagen/issues/210
# https://github.com/quodlibet/mutagen/issues/223
pass
if tflags & Frame.FLAG24_ENCRYPT:
raise ID3EncryptionUnsupportedError
if tflags & Frame.FLAG24_COMPRESS:
try:
data = zlib.decompress(data)
except zlib.error:
# the initial mutagen that went out with QL 0.12 did not
# write the 4 bytes of uncompressed size. Compensate.
data = datalen_bytes + data
try:
data = zlib.decompress(data)
except zlib.error as err:
raise ID3JunkFrameError(
'zlib: %s: %r' % (err, data))
elif header.version >= header._V23:
if tflags & Frame.FLAG23_COMPRESS:
usize, = unpack('>L', data[:4])
data = data[4:]
if tflags & Frame.FLAG23_ENCRYPT:
raise ID3EncryptionUnsupportedError
if tflags & Frame.FLAG23_COMPRESS:
try:
data = zlib.decompress(data)
except zlib.error as err:
raise ID3JunkFrameError('zlib: %s: %r' % (err, data))
frame = cls()
frame._readData(header, data)
return frame | Construct this ID3 frame from raw string data.
Raises:
ID3JunkFrameError in case parsing failed
NotImplementedError in case parsing isn't implemented
ID3EncryptionUnsupportedError in case the frame is encrypted. |
def on_created(self, event, dry_run=False, remove_uploaded=True):
'Called when a file (or directory) is created. '
super(ArchiveEventHandler, self).on_created(event)
log.info("created: %s", event) | Called when a file (or directory) is created. |
def help_center_section_subscriptions(self, section_id, locale=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/subscriptions#list-section-subscriptions"
api_path = "/api/v2/help_center/sections/{section_id}/subscriptions.json"
api_path = api_path.format(section_id=section_id)
if locale:
api_opt_path = "/api/v2/help_center/{locale}/sections/{section_id}/subscriptions.json"
api_path = api_opt_path.format(section_id=section_id, locale=locale)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/help_center/subscriptions#list-section-subscriptions |
def select_grid_model_residential(lvgd):
"""Selects typified model grid based on population
Parameters
----------
lvgd : LVGridDistrictDing0
Low-voltage grid district object
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Selected string of typified model grid
:pandas:`pandas.DataFrame<dataframe>`
Parameters of chosen Transformer
Notes
-----
In total 196 distinct LV grid topologies are available that are chosen
by population in the LV grid district. Population is translated to
number of house branches. Each grid model fits a number of house
branches. If this number exceeds 196, still the grid topology of 196
house branches is used. The peak load of the LV grid district is
uniformly distributed across house branches.
"""
# Load properties of LV typified model grids
string_properties = lvgd.lv_grid.network.static_data['LV_model_grids_strings']
# Load relational table of apartment count and strings of model grid
apartment_string = lvgd.lv_grid.network.static_data[
'LV_model_grids_strings_per_grid']
# load assumtions
apartment_house_branch_ratio = cfg_ding0.get("assumptions",
"apartment_house_branch_ratio")
population_per_apartment = cfg_ding0.get("assumptions",
"population_per_apartment")
# calc count of apartments to select string types
apartments = round(lvgd.population / population_per_apartment)
if apartments > 196:
apartments = 196
# select set of strings that represent one type of model grid
strings = apartment_string.loc[apartments]
selected_strings = [int(s) for s in strings[strings >= 1].index.tolist()]
# slice dataframe of string parameters
selected_strings_df = string_properties.loc[selected_strings]
# add number of occurences of each branch to df
occurence_selector = [str(i) for i in selected_strings]
selected_strings_df['occurence'] = strings.loc[occurence_selector].tolist()
return selected_strings_df | Selects typified model grid based on population
Parameters
----------
lvgd : LVGridDistrictDing0
Low-voltage grid district object
Returns
-------
:pandas:`pandas.DataFrame<dataframe>`
Selected string of typified model grid
:pandas:`pandas.DataFrame<dataframe>`
Parameters of chosen Transformer
Notes
-----
In total 196 distinct LV grid topologies are available that are chosen
by population in the LV grid district. Population is translated to
number of house branches. Each grid model fits a number of house
branches. If this number exceeds 196, still the grid topology of 196
house branches is used. The peak load of the LV grid district is
uniformly distributed across house branches. |
def _writeLinks(self, links, fileObject, replaceParamFile):
"""
Write Link Lines to File Method
"""
for link in links:
linkType = link.type
fileObject.write('LINK %s\n' % link.linkNumber)
# Cases
if 'TRAP' in linkType or 'TRAPEZOID' in linkType or 'BREAKPOINT' in linkType:
self._writeCrossSectionLink(link, fileObject, replaceParamFile)
elif linkType == 'STRUCTURE':
self._writeStructureLink(link, fileObject, replaceParamFile)
elif linkType in ('RESERVOIR', 'LAKE'):
self._writeReservoirLink(link, fileObject, replaceParamFile)
else:
log.error('OOPS: CIF LINE 417') # THIS SHOULDN'T HAPPEN
fileObject.write('\n') | Write Link Lines to File Method |
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
if host.startswith('['):
host = host.strip('[]')
err = None
# Using the value from allowed_gai_family() in the context of getaddrinfo lets
# us select whether to work with IPv4 DNS records, IPv6 records, or both.
# The original create_connection function always returns all records.
family = allowed_gai_family()
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
raise socket.error("getaddrinfo returns an empty list") | Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default. |
def show(module):
'''
Show information about a specific Perl module
CLI Example:
.. code-block:: bash
salt '*' cpan.show Template::Alloy
'''
ret = {}
ret['name'] = module
# This section parses out details from CPAN, if possible
cmd = 'cpan -D {0}'.format(module)
out = __salt__['cmd.run'](cmd).splitlines()
mode = 'skip'
info = []
for line in out:
if line.startswith('-------------'):
mode = 'parse'
continue
if mode == 'skip':
continue
info.append(line)
if len(info) == 6:
# If the module is not installed, we'll be short a line
info.insert(2, '')
if len(info) < 6:
# This must not be a real package
ret['error'] = 'This package does not seem to exist'
return ret
ret['description'] = info[0].strip()
ret['cpan file'] = info[1].strip()
if info[2].strip():
ret['installed file'] = info[2].strip()
else:
ret['installed file'] = None
comps = info[3].split(':')
if len(comps) > 1:
ret['installed version'] = comps[1].strip()
if 'installed version' not in ret or not ret['installed version']:
ret['installed version'] = None
comps = info[4].split(':')
comps = comps[1].split()
ret['cpan version'] = comps[0].strip()
ret['author name'] = info[5].strip()
ret['author email'] = info[6].strip()
# Check and see if there are cpan build directories
config = show_config()
build_dir = config.get('build_dir', None)
if build_dir is not None:
ret['cpan build dirs'] = []
builds = os.listdir(build_dir)
pfile = module.replace('::', '-')
for file_ in builds:
if file_.startswith(pfile):
ret['cpan build dirs'].append(os.path.join(build_dir, file_))
return ret | Show information about a specific Perl module
CLI Example:
.. code-block:: bash
salt '*' cpan.show Template::Alloy |
def _generate_anchor_for(self, element, data_attribute, anchor_class):
"""
Generate an anchor for the element.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:param data_attribute: The name of attribute that links the element
with the anchor.
:type data_attribute: str
:param anchor_class: The HTML class of anchor.
:type anchor_class: str
:return: The anchor.
:rtype: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
self.id_generator.generate_id(element)
if self.parser.find(
'[' + data_attribute + '="' + element.get_attribute('id') + '"]'
).first_result() is None:
if element.get_tag_name() == 'A':
anchor = element
else:
anchor = self.parser.create_element('a')
self.id_generator.generate_id(anchor)
anchor.set_attribute('class', anchor_class)
element.insert_before(anchor)
if not anchor.has_attribute('name'):
anchor.set_attribute('name', anchor.get_attribute('id'))
anchor.set_attribute(data_attribute, element.get_attribute('id'))
return anchor
return None | Generate an anchor for the element.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:param data_attribute: The name of attribute that links the element
with the anchor.
:type data_attribute: str
:param anchor_class: The HTML class of anchor.
:type anchor_class: str
:return: The anchor.
:rtype: hatemile.util.html.htmldomelement.HTMLDOMElement |
def _map_segmentation_mask_to_stft_domain(mask, times, frequencies, stft_times, stft_frequencies):
"""
Maps the given `mask`, which is in domain (`frequencies`, `times`) to the new domain (`stft_frequencies`, `stft_times`)
and returns the result.
"""
assert mask.shape == (frequencies.shape[0], times.shape[0]), "Times is shape {} and frequencies is shape {}, but mask is shaped {}".format(
times.shape, frequencies.shape, mask.shape
)
result = np.zeros((stft_frequencies.shape[0], stft_times.shape[0]))
if len(stft_times) > len(times):
all_j = [j for j in range(len(stft_times))]
idxs = [int(i) for i in np.linspace(0, len(times) - 1, num=len(stft_times))]
all_i = [all_j[idx] for idx in idxs]
else:
all_i = [i for i in range(len(times))]
idxs = [int(i) for i in np.linspace(0, len(stft_times) - 1, num=len(times))]
all_j = [all_i[idx] for idx in idxs]
for i, j in zip(all_i, all_j):
result[:, j] = np.interp(stft_frequencies, frequencies, mask[:, i])
return result | Maps the given `mask`, which is in domain (`frequencies`, `times`) to the new domain (`stft_frequencies`, `stft_times`)
and returns the result. |
def bbox_rotate(bbox, angle, rows, cols, interpolation):
"""Rotates a bounding box by angle degrees
Args:
bbox (tuple): A tuple (x_min, y_min, x_max, y_max).
angle (int): Angle of rotation in degrees
rows (int): Image rows.
cols (int): Image cols.
interpolation (int): interpolation method.
return a tuple (x_min, y_min, x_max, y_max)
"""
scale = cols / float(rows)
x = np.array([bbox[0], bbox[2], bbox[2], bbox[0]])
y = np.array([bbox[1], bbox[1], bbox[3], bbox[3]])
x = x - 0.5
y = y - 0.5
angle = np.deg2rad(angle)
x_t = (np.cos(angle) * x * scale + np.sin(angle) * y) / scale
y_t = (-np.sin(angle) * x * scale + np.cos(angle) * y)
x_t = x_t + 0.5
y_t = y_t + 0.5
return [min(x_t), min(y_t), max(x_t), max(y_t)] | Rotates a bounding box by angle degrees
Args:
bbox (tuple): A tuple (x_min, y_min, x_max, y_max).
angle (int): Angle of rotation in degrees
rows (int): Image rows.
cols (int): Image cols.
interpolation (int): interpolation method.
return a tuple (x_min, y_min, x_max, y_max) |
def getOntoCategory(curie, alwaysFetch=False):
"""
Accessing web-based ontology service is too long, so we cache the
information in a pickle file and query the services only if the info
has not already been cached.
"""
fileName = os.path.join(os.path.dirname(__file__), "ontoCategories.bin")
if not alwaysFetch:
try:
with open(fileName, "rb") as catFile:
ontoCat = pickle.load(catFile)
if curie in ontoCat:
return ontoCat[curie]
except:
ontoCat = {}
base = bases["KS"]
query = base + "/vocabulary/id/" + curie
response = requests.get(query)
if not response.ok:
ontoCat[curie] = []
else:
try:
concepts = response.json()
except ValueError:
print(query)
print(response)
raise
if len(concepts["categories"]):
ontoCat[curie] = concepts["categories"]
else:
ontoCat[curie] = []
try:
with open(fileName, "wb") as catFile:
pickle.dump(ontoCat, catFile)
except:
pass
return ontoCat[curie] | Accessing web-based ontology service is too long, so we cache the
information in a pickle file and query the services only if the info
has not already been cached. |
def generate_ecdsa_key(scheme='ecdsa-sha2-nistp256'):
"""
<Purpose>
Generate public and private ECDSA keys, with NIST P-256 + SHA256 (for
hashing) being the default scheme. In addition, a keyid identifier for the
ECDSA key is generated. The object returned conforms to
'securesystemslib.formats.ECDSAKEY_SCHEMA' and has the form:
{'keytype': 'ecdsa-sha2-nistp256',
'scheme', 'ecdsa-sha2-nistp256',
'keyid': keyid,
'keyval': {'public': '',
'private': ''}}
The public and private keys are strings in TODO format.
>>> ecdsa_key = generate_ecdsa_key(scheme='ecdsa-sha2-nistp256')
>>> securesystemslib.formats.ECDSAKEY_SCHEMA.matches(ecdsa_key)
True
<Arguments>
scheme:
The ECDSA signature scheme. By default, ECDSA NIST P-256 is used, with
SHA256 for hashing.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'scheme' is improperly
formatted or invalid (i.e., not one of the supported ECDSA signature
schemes).
<Side Effects>
None.
<Returns>
A dictionary containing the ECDSA keys and other identifying information.
Conforms to 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
"""
# Does 'scheme' have the correct format?
# This check will ensure 'scheme' is properly formatted and is a supported
# ECDSA signature scheme. Raise 'securesystemslib.exceptions.FormatError' if
# the check fails.
securesystemslib.formats.ECDSA_SCHEME_SCHEMA.check_match(scheme)
# Begin building the ECDSA key dictionary.
ecdsa_key = {}
keytype = 'ecdsa-sha2-nistp256'
public = None
private = None
# Generate the public and private ECDSA keys with one of the supported
# libraries.
public, private = \
securesystemslib.ecdsa_keys.generate_public_and_private(scheme)
# Generate the keyid of the Ed25519 key. 'key_value' corresponds to the
# 'keyval' entry of the 'Ed25519KEY_SCHEMA' dictionary. The private key
# information is not included in the generation of the 'keyid' identifier.
# Convert any '\r\n' (e.g., Windows) newline characters to '\n' so that a
# consistent keyid is generated.
key_value = {'public': public.replace('\r\n', '\n'),
'private': ''}
keyid = _get_keyid(keytype, scheme, key_value)
# Build the 'ed25519_key' dictionary. Update 'key_value' with the Ed25519
# private key prior to adding 'key_value' to 'ed25519_key'.
key_value['private'] = private
ecdsa_key['keytype'] = keytype
ecdsa_key['scheme'] = scheme
ecdsa_key['keyid'] = keyid
ecdsa_key['keyval'] = key_value
# Add "keyid_hash_algorithms" so that equal ECDSA keys with different keyids
# can be associated using supported keyid_hash_algorithms.
ecdsa_key['keyid_hash_algorithms'] = \
securesystemslib.settings.HASH_ALGORITHMS
return ecdsa_key | <Purpose>
Generate public and private ECDSA keys, with NIST P-256 + SHA256 (for
hashing) being the default scheme. In addition, a keyid identifier for the
ECDSA key is generated. The object returned conforms to
'securesystemslib.formats.ECDSAKEY_SCHEMA' and has the form:
{'keytype': 'ecdsa-sha2-nistp256',
'scheme', 'ecdsa-sha2-nistp256',
'keyid': keyid,
'keyval': {'public': '',
'private': ''}}
The public and private keys are strings in TODO format.
>>> ecdsa_key = generate_ecdsa_key(scheme='ecdsa-sha2-nistp256')
>>> securesystemslib.formats.ECDSAKEY_SCHEMA.matches(ecdsa_key)
True
<Arguments>
scheme:
The ECDSA signature scheme. By default, ECDSA NIST P-256 is used, with
SHA256 for hashing.
<Exceptions>
securesystemslib.exceptions.FormatError, if 'scheme' is improperly
formatted or invalid (i.e., not one of the supported ECDSA signature
schemes).
<Side Effects>
None.
<Returns>
A dictionary containing the ECDSA keys and other identifying information.
Conforms to 'securesystemslib.formats.ECDSAKEY_SCHEMA'. |
def readline(self):
"""
Read until a new-line character is encountered
"""
line = ""
n_pos = -1
try:
while n_pos < 0:
line += self.next_chunk()
n_pos = line.find('\n')
except StopIteration:
pass
if n_pos >= 0:
line, extra = line[:n_pos+1], line[n_pos+1:]
self.unshift(extra)
return line | Read until a new-line character is encountered |
def gather(self, analysis, node):
"High-level function to call an `analysis' on a `node'"
assert issubclass(analysis, Analysis)
a = analysis()
a.attach(self)
return a.run(node) | High-level function to call an `analysis' on a `node |
def walk(self, *types):
'''
Iterator which visits all suites and suite files,
yielding test cases and keywords
'''
requested = types if len(types) > 0 else [SuiteFile, ResourceFile, SuiteFolder, Testcase, Keyword]
for thing in self.robot_files:
if thing.__class__ in requested:
yield thing
if isinstance(thing, SuiteFolder):
for child in thing.walk():
if child.__class__ in requested:
yield child
else:
for child in thing.walk(*types):
yield child | Iterator which visits all suites and suite files,
yielding test cases and keywords |
def export_tour(tour_steps, name=None, filename="my_tour.js", url=None):
""" Exports a tour as a JS file.
It will include necessary resources as well, such as jQuery.
You'll be able to copy the tour directly into the Console of
any web browser to play the tour outside of SeleniumBase runs. """
if not name:
name = "default"
if name not in tour_steps:
raise Exception("Tour {%s} does not exist!" % name)
if not filename.endswith('.js'):
raise Exception('Tour file must end in ".js"!')
if not url:
url = "data:,"
tour_type = None
if "Bootstrap" in tour_steps[name][0]:
tour_type = "bootstrap"
elif "Hopscotch" in tour_steps[name][0]:
tour_type = "hopscotch"
elif "IntroJS" in tour_steps[name][0]:
tour_type = "introjs"
elif "Shepherd" in tour_steps[name][0]:
tour_type = "shepherd"
else:
raise Exception('Unknown tour type!')
instructions = (
'''//////// Load Tour Start Page (if not there now) ////////\n\n'''
'''if (window.location.href != "%s") {\n'''
''' window.location.href="%s";\n'''
'''}\n\n'''
'''//////// Resources ////////\n\n'''
'''function injectCSS(css_link) {'''
'''var head = document.getElementsByTagName("head")[0];'''
'''var link = document.createElement("link");'''
'''link.rel = "stylesheet";'''
'''link.type = "text/css";'''
'''link.href = css_link;'''
'''link.crossorigin = "anonymous";'''
'''head.appendChild(link);'''
'''};\n'''
'''function injectJS(js_link) {'''
'''var head = document.getElementsByTagName("head")[0];'''
'''var script = document.createElement("script");'''
'''script.src = js_link;'''
'''script.defer;'''
'''script.type="text/javascript";'''
'''script.crossorigin = "anonymous";'''
'''script.onload = function() { null };'''
'''head.appendChild(script);'''
'''};\n'''
'''function injectStyle(css) {'''
'''var head = document.getElementsByTagName("head")[0];'''
'''var style = document.createElement("style");'''
'''style.type = "text/css";'''
'''style.appendChild(document.createTextNode(css));'''
'''head.appendChild(style);'''
'''};\n''' % (url, url))
if tour_type == "bootstrap":
jquery_js = constants.JQuery.MIN_JS
bootstrap_tour_css = constants.BootstrapTour.MIN_CSS
bootstrap_tour_js = constants.BootstrapTour.MIN_JS
backdrop_style = style_sheet.bt_backdrop_style
backdrop_style = backdrop_style.replace('\n', '')
backdrop_style = js_utils.escape_quotes_if_needed(backdrop_style)
instructions += 'injectJS("%s");' % jquery_js
instructions += '\n\n//////// Resources - Load 2 ////////\n\n'
instructions += 'injectCSS("%s");\n' % bootstrap_tour_css
instructions += 'injectStyle("%s");\n' % backdrop_style
instructions += 'injectJS("%s");' % bootstrap_tour_js
elif tour_type == "hopscotch":
hopscotch_css = constants.Hopscotch.MIN_CSS
hopscotch_js = constants.Hopscotch.MIN_JS
backdrop_style = style_sheet.hops_backdrop_style
backdrop_style = backdrop_style.replace('\n', '')
backdrop_style = js_utils.escape_quotes_if_needed(backdrop_style)
instructions += 'injectCSS("%s");\n' % hopscotch_css
instructions += 'injectStyle("%s");\n' % backdrop_style
instructions += 'injectJS("%s");' % hopscotch_js
elif tour_type == "introjs":
intro_css = constants.IntroJS.MIN_CSS
intro_js = constants.IntroJS.MIN_JS
instructions += 'injectCSS("%s");\n' % intro_css
instructions += 'injectJS("%s");' % intro_js
elif tour_type == "shepherd":
jquery_js = constants.JQuery.MIN_JS
shepherd_js = constants.Shepherd.MIN_JS
sh_theme_arrows_css = constants.Shepherd.THEME_ARROWS_CSS
sh_theme_arrows_fix_css = constants.Shepherd.THEME_ARR_FIX_CSS
sh_theme_default_css = constants.Shepherd.THEME_DEFAULT_CSS
sh_theme_dark_css = constants.Shepherd.THEME_DARK_CSS
sh_theme_sq_css = constants.Shepherd.THEME_SQ_CSS
sh_theme_sq_dark_css = constants.Shepherd.THEME_SQ_DK_CSS
tether_js = constants.Tether.MIN_JS
spinner_css = constants.Messenger.SPINNER_CSS
backdrop_style = style_sheet.sh_backdrop_style
backdrop_style = backdrop_style.replace('\n', '')
backdrop_style = js_utils.escape_quotes_if_needed(backdrop_style)
instructions += 'injectCSS("%s");\n' % spinner_css
instructions += 'injectJS("%s");\n' % jquery_js
instructions += 'injectJS("%s");' % tether_js
instructions += '\n\n//////// Resources - Load 2 ////////\n\n'
instructions += 'injectCSS("%s");' % sh_theme_arrows_css
instructions += 'injectCSS("%s");' % sh_theme_arrows_fix_css
instructions += 'injectCSS("%s");' % sh_theme_default_css
instructions += 'injectCSS("%s");' % sh_theme_dark_css
instructions += 'injectCSS("%s");' % sh_theme_sq_css
instructions += 'injectCSS("%s");\n' % sh_theme_sq_dark_css
instructions += 'injectStyle("%s");\n' % backdrop_style
instructions += 'injectJS("%s");' % shepherd_js
instructions += '\n\n//////// Tour Code ////////\n\n'
for tour_step in tour_steps[name]:
instructions += tour_step
if tour_type == "bootstrap":
instructions += (
"""]);
// Initialize the tour
tour.init();
// Start the tour
tour.start();
$tour = tour;
$tour.restart();\n""")
elif tour_type == "hopscotch":
instructions += (
"""]
};
// Start the tour!
hopscotch.startTour(tour);
$tour = hopscotch;\n""")
elif tour_type == "introjs":
instructions += (
"""]
});
intro.setOption("disableInteraction", true);
intro.setOption("overlayOpacity", .29);
intro.setOption("scrollToElement", true);
intro.setOption("keyboardNavigation", true);
intro.setOption("exitOnEsc", false);
intro.setOption("exitOnOverlayClick", false);
intro.setOption("showStepNumbers", false);
intro.setOption("showProgress", false);
intro.start();
$tour = intro;
};
startIntro();\n""")
elif tour_type == "shepherd":
instructions += (
"""
tour.start();
$tour = tour;\n""")
else:
pass
exported_tours_folder = EXPORTED_TOURS_FOLDER
if exported_tours_folder.endswith("/"):
exported_tours_folder = exported_tours_folder[:-1]
if not os.path.exists(exported_tours_folder):
try:
os.makedirs(exported_tours_folder)
except Exception:
pass
import codecs
file_path = exported_tours_folder + "/" + filename
out_file = codecs.open(file_path, "w+")
out_file.writelines(instructions)
out_file.close()
print('\n>>> [%s] was saved!\n' % file_path) | Exports a tour as a JS file.
It will include necessary resources as well, such as jQuery.
You'll be able to copy the tour directly into the Console of
any web browser to play the tour outside of SeleniumBase runs. |
def __get_stack_trace(self, depth = 16, bUseLabels = True,
bMakePretty = True):
"""
Tries to get a stack trace for the current function using the debug
helper API (dbghelp.dll).
@type depth: int
@param depth: Maximum depth of stack trace.
@type bUseLabels: bool
@param bUseLabels: C{True} to use labels, C{False} to use addresses.
@type bMakePretty: bool
@param bMakePretty:
C{True} for user readable labels,
C{False} for labels that can be passed to L{Process.resolve_label}.
"Pretty" labels look better when producing output for the user to
read, while pure labels are more useful programatically.
@rtype: tuple of tuple( int, int, str )
@return: Stack trace of the thread as a tuple of
( return address, frame pointer address, module filename )
when C{bUseLabels} is C{True}, or a tuple of
( return address, frame pointer label )
when C{bUseLabels} is C{False}.
@raise WindowsError: Raises an exception on error.
"""
aProcess = self.get_process()
arch = aProcess.get_arch()
bits = aProcess.get_bits()
if arch == win32.ARCH_I386:
MachineType = win32.IMAGE_FILE_MACHINE_I386
elif arch == win32.ARCH_AMD64:
MachineType = win32.IMAGE_FILE_MACHINE_AMD64
elif arch == win32.ARCH_IA64:
MachineType = win32.IMAGE_FILE_MACHINE_IA64
else:
msg = "Stack walking is not available for this architecture: %s"
raise NotImplementedError(msg % arch)
hProcess = aProcess.get_handle( win32.PROCESS_VM_READ |
win32.PROCESS_QUERY_INFORMATION )
hThread = self.get_handle( win32.THREAD_GET_CONTEXT |
win32.THREAD_QUERY_INFORMATION )
StackFrame = win32.STACKFRAME64()
StackFrame.AddrPC = win32.ADDRESS64( self.get_pc() )
StackFrame.AddrFrame = win32.ADDRESS64( self.get_fp() )
StackFrame.AddrStack = win32.ADDRESS64( self.get_sp() )
trace = list()
while win32.StackWalk64(MachineType, hProcess, hThread, StackFrame):
if depth <= 0:
break
fp = StackFrame.AddrFrame.Offset
ra = aProcess.peek_pointer(fp + 4)
if ra == 0:
break
lib = aProcess.get_module_at_address(ra)
if lib is None:
lib = ""
else:
if lib.fileName:
lib = lib.fileName
else:
lib = "%s" % HexDump.address(lib.lpBaseOfDll, bits)
if bUseLabels:
label = aProcess.get_label_at_address(ra)
if bMakePretty:
label = '%s (%s)' % (HexDump.address(ra, bits), label)
trace.append( (fp, label) )
else:
trace.append( (fp, ra, lib) )
fp = aProcess.peek_pointer(fp)
return tuple(trace) | Tries to get a stack trace for the current function using the debug
helper API (dbghelp.dll).
@type depth: int
@param depth: Maximum depth of stack trace.
@type bUseLabels: bool
@param bUseLabels: C{True} to use labels, C{False} to use addresses.
@type bMakePretty: bool
@param bMakePretty:
C{True} for user readable labels,
C{False} for labels that can be passed to L{Process.resolve_label}.
"Pretty" labels look better when producing output for the user to
read, while pure labels are more useful programatically.
@rtype: tuple of tuple( int, int, str )
@return: Stack trace of the thread as a tuple of
( return address, frame pointer address, module filename )
when C{bUseLabels} is C{True}, or a tuple of
( return address, frame pointer label )
when C{bUseLabels} is C{False}.
@raise WindowsError: Raises an exception on error. |
def retinotopy_data(m, source='any'):
'''
retinotopy_data(m) yields a dict containing a retinotopy dataset with the keys 'polar_angle',
'eccentricity', and any other related fields for the given retinotopy type; for example,
'pRF_size' and 'variance_explained' may be included for measured retinotopy datasets and
'visual_area' may be included for atlas or model datasets. The coordinates are always in the
'visual' retinotopy style, but can be reinterpreted with as_retinotopy.
retinotopy_data(m, source) may be used to specify a particular source for the data; this may be
either 'empirical', 'model', or 'any'; or it may be a prefix/suffix beginning/ending with
an _ character.
'''
if pimms.is_map(source):
if all(k in source for k in ['polar_angle', 'eccentricity']): return source
if geo.is_vset(m): return retinotopy_data(m.properties, source=source)
source = source.lower()
model_rets = ['predicted', 'model', 'template', 'atlas', 'inferred']
empir_rets = ['empirical', 'measured', 'prf', 'data']
wild = False
extra_fields = {'empirical':('radius','variance_explained'), 'model':('radius','visual_area')}
check_fields = []
if source in empir_rets:
fixes = empir_rets
check_fields = extra_fields['empirical']
elif source in model_rets:
fixes = model_rets
check_fields = extra_fields['model']
elif source in ['any', '*', 'all']:
fixes = model_rets + empir_rets
check_fields = extra_fields['model'] + extra_fields['empirical']
wild = True
elif source in ['none', 'basic']:
fixes = []
check_fields = extra_fields['model'] + extra_fields['empirical']
wild = True
else: fixes = []
# first, try all the fixes as prefixes then suffixes
(z, prefix, suffix) = (None, None, None)
if wild:
try: z = as_retinotopy(m, 'visual')
except Exception: pass
for fix in fixes:
if z: break
try:
z = as_retinotopy(m, 'visual', prefix=(fix + '_'))
prefix = fix + '_'
except Exception: pass
for fix in fixes:
if z: break
try:
z = as_retinotopy(m, 'visual', suffix=('_' + fix))
suffix = fix + '_'
except Exception: pass
# if none of those worked, try with no prefix/suffix
if not z:
try:
pref = source if source.endswith('_') else (source + '_')
z = as_retinotopy(m, 'visual', prefix=pref)
prefix = pref
check_fields = extra_fields['model'] + extra_fields['empirical']
except Exception:
raise
try:
suff = source if source.startswith('_') else ('_' + source)
z = as_retinotopy(m, 'visual', suffix=suff)
suffix = suff
check_fields = extra_fields['model'] + extra_fields['empirical']
except Exception: pass
# if still not z... we couldn't figure it out
if not z: raise ValueError('Could not find an interpretation for source %s' % source)
# okay, we found it; make it into a dict
res = {'polar_angle': z[0], 'eccentricity': z[1]}
# check for extra fields if relevant
pnames = {k.lower():k for k in m} if check_fields else {}
for fname in set(check_fields):
for (aliases, trfn) in retinotopic_property_aliases.get(fname, []):
if trfn is None: trfn = lambda x:x
for f in aliases:
if prefix: f = prefix + f
if suffix: f = f + suffix
f = f.lower()
if f in pnames:
res[fname] = trfn(m[pnames[f]])
trfn = None
break
if trfn is None: break
# That's it
return res | retinotopy_data(m) yields a dict containing a retinotopy dataset with the keys 'polar_angle',
'eccentricity', and any other related fields for the given retinotopy type; for example,
'pRF_size' and 'variance_explained' may be included for measured retinotopy datasets and
'visual_area' may be included for atlas or model datasets. The coordinates are always in the
'visual' retinotopy style, but can be reinterpreted with as_retinotopy.
retinotopy_data(m, source) may be used to specify a particular source for the data; this may be
either 'empirical', 'model', or 'any'; or it may be a prefix/suffix beginning/ending with
an _ character. |
def _parse_annotations(sbase):
"""Parses cobra annotations from a given SBase object.
Annotations are dictionaries with the providers as keys.
Parameters
----------
sbase : libsbml.SBase
SBase from which the SBML annotations are read
Returns
-------
dict (annotation dictionary)
FIXME: annotation format must be updated (this is a big collection of
fixes) - see: https://github.com/opencobra/cobrapy/issues/684)
"""
annotation = {}
# SBO term
if sbase.isSetSBOTerm():
# FIXME: correct handling of annotations
annotation["sbo"] = sbase.getSBOTermID()
# RDF annotation
cvterms = sbase.getCVTerms()
if cvterms is None:
return annotation
for cvterm in cvterms: # type: libsbml.CVTerm
for k in range(cvterm.getNumResources()):
# FIXME: read and store the qualifier
uri = cvterm.getResourceURI(k)
match = URL_IDENTIFIERS_PATTERN.match(uri)
if not match:
LOGGER.warning("%s does not conform to "
"http(s)://identifiers.org/collection/id", uri)
continue
provider, identifier = match.group(1), match.group(2)
if provider in annotation:
if isinstance(annotation[provider], string_types):
annotation[provider] = [annotation[provider]]
# FIXME: use a list
if identifier not in annotation[provider]:
annotation[provider].append(identifier)
else:
# FIXME: always in list
annotation[provider] = identifier
return annotation | Parses cobra annotations from a given SBase object.
Annotations are dictionaries with the providers as keys.
Parameters
----------
sbase : libsbml.SBase
SBase from which the SBML annotations are read
Returns
-------
dict (annotation dictionary)
FIXME: annotation format must be updated (this is a big collection of
fixes) - see: https://github.com/opencobra/cobrapy/issues/684) |
def get_relationship(self, attribute):
"""
Returns the domain relationship object for the given resource
attribute.
"""
rel = self.__relationships.get(attribute.entity_attr)
if rel is None:
rel = LazyDomainRelationship(self, attribute,
direction=
self.relationship_direction)
self.__relationships[attribute.entity_attr] = rel
return rel | Returns the domain relationship object for the given resource
attribute. |
def _do_code_blocks(self, text):
"""Process Markdown `<pre><code>` blocks."""
code_block_re = re.compile(r'''
(?:\n\n|\A\n?)
( # $1 = the code block -- one or more lines, starting with a space/tab
(?:
(?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces
.*\n+
)+
)
((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
''' % (self.tab_width, self.tab_width),
re.M | re.X)
return code_block_re.sub(self._code_block_sub, text) | Process Markdown `<pre><code>` blocks. |
def get_genomes(cfg):
"""
Installs genomes
:param cfg: configuration dict
"""
if cfg['host']=='homo_sapiens':
contigs=['1',
'2',
'3',
'4',
'5',
'6',
'7',
'8',
'9',
'10',
'11',
'12',
'13',
'14',
'15',
'16',
'17',
'18',
'19',
'20',
'21',
'22',
'X','Y']
else:
runbashcmd(f"pyensembl install --reference-name {cfg['genomeassembly']} --release {cfg['genomerelease']} --species {cfg['host']}")
import pyensembl
ensembl = pyensembl.EnsemblRelease(species=pyensembl.species.Species.register(
latin_name=cfg['host'],
synonyms=[cfg['host']],
reference_assemblies={
cfg['genomeassembly']: (cfg['genomerelease'], cfg['genomerelease']),
}),release=cfg['genomerelease'])
contig_mito=['MTDNA','MITO','MT']
contigs=[c for c in ensembl.contigs() if ((not '.' in c) and (len(c)<5) and (c not in contig_mito))]
if len(contigs)==0:
contigs=[c for c in ensembl.contigs()]
# logging.error([c for c in ensembl.contigs()])
# logging.error('no contigs identified by pyensembl; aborting')
# sys.exit(0)
logging.info(f"{len(contigs)} contigs/chromosomes in the genome")
logging.info(contigs)
# raw genome next
if 'human' in cfg['host'].lower():
cfg['host']='homo_sapiens'
if 'yeast' in cfg['host'].lower():
cfg['host']='saccharomyces_cerevisiae'
host_="_".join(s for s in cfg['host'].split('_')).capitalize()
if 'GRCh37' in cfg['genomeassembly']:
ensembl_fastad=f"pub/grch37/update/fasta/{cfg['host']}/dna/"
else:
ensembl_fastad=f"pub/release-{cfg['genomerelease']}/fasta/{cfg['host']}/dna/"
genome_fastad=f"{cfg['genomed']}/{ensembl_fastad}"
cfg['genomep']=f'{genome_fastad}/genome.fa'.format()
if not exists(cfg['genomep']):
logging.error(f"not found: {cfg['genomep']}")
ifdlref = input(f"Genome file are not there at {genome_fastad}.\n Download?[Y/n]: ")
if ifdlref=='Y':
# #FIXME download contigs and cat and get index, sizes
for contig in contigs:
fn=f"{cfg['host'].capitalize()}.{cfg['genomeassembly']}.dna_sm.chromosome.{contig}.fa.gz"
fp=f'{ensembl_fastad}/{fn}'
if not exists(f"{cfg['genomed']}{fp.replace('.gz','')}"):
if not exists(f"{cfg['genomed']}{fp}"):
logging.info(f'downloading {fp}')
cmd=f"wget -q -x -nH ftp://ftp.ensembl.org/{fp} -P {cfg['genomed']}"
try:
runbashcmd(cmd,test=cfg['test'])
except:
fn=f"{cfg['host'].capitalize()}.{cfg['genomeassembly']}.dna_sm.toplevel.fa.gz"
fp='{}/{}'.format(ensembl_fastad,fn)
if not exists(fp):
cmd=f"wget -q -x -nH ftp://ftp.ensembl.org/{fp} -P {cfg['genomed']}"
# print(cmd)
runbashcmd(cmd,test=cfg['test'])
break
# break
# make the fa ready
if not exists(cfg['genomep']):
cmd=f"gunzip {genome_fastad}*.fa.gz;cat {genome_fastad}/*.fa > {genome_fastad}/genome.fa;"
runbashcmd(cmd,test=cfg['test'])
else:
logging.error('abort')
sys.exit(1)
if not exists(cfg['genomep']+'.bwt'):
cmd=f"{cfg['bwa']} index {cfg['genomep']}"
runbashcmd(cmd,test=cfg['test'])
else:
logging.info('bwa index is present')
if not exists(cfg['genomep']+'.fai'):
cmd=f"{cfg['samtools']} faidx {cfg['genomep']}"
runbashcmd(cmd,test=cfg['test'])
else:
logging.info('samtools index is present')
if not exists(cfg['genomep']+'.sizes'):
cmd=f"cut -f1,2 {cfg['genomep']}.fai > {cfg['genomep']}.sizes"
runbashcmd(cmd,test=cfg['test'])
else:
logging.info('sizes of contigs are present')
if 'GRCh37' in cfg['genomeassembly']:
# ftp://ftp.ensembl.org/pub/grch37/update/gff3/homo_sapiens/Homo_sapiens.GRCh37.87.gff3.gz
ensembl_gff3d=f"pub/grch37/update/gff3/{cfg['host']}/"
else:
ensembl_gff3d=f"pub/release-{cfg['genomerelease']}/gff3/{cfg['host']}/"
genome_gff3d=f"{cfg['genomed']}/{ensembl_gff3d}"
cfg['genomegffp']=f'{genome_gff3d}/genome.gff3'
if not exists(cfg['genomegffp']):
logging.error('not found: {}'.format(cfg['genomegffp']))
ifdlref = input("Download genome annotations at {}?[Y/n]: ".format(genome_gff3d))
if ifdlref=='Y':
# #FIXME download contigs and cat and get index, sizes
fn=f"{cfg['host'].capitalize()}.{cfg['genomeassembly']}.{cfg['genomerelease']}.gff3.gz"
fp=f"{ensembl_gff3d}/{fn}"
if not exists(fp):
cmd=f"wget -x -nH ftp://ftp.ensembl.org/{fp} -P {cfg['genomed']}"
runbashcmd(cmd,test=cfg['test'])
# move to genome.gff3
cmd=f"cp {genome_gff3d}/{fn} {cfg['genomegffp']}"
runbashcmd(cmd,test=cfg['test'])
else:
logging.error('abort')
sys.exit(1)
logging.info('genomes are installed!')
return cfg | Installs genomes
:param cfg: configuration dict |
def logical_volume(cls, file_path, sudo=False):
""" Return logical volume that stores the given path
:param file_path: target path to search
:param sudo: same as 'sudo' in :meth:`.WLogicalVolume.__init__`
:return: WLogicalVolume or None (if file path is outside current mount points)
"""
mp = WMountPoint.mount_point(file_path)
if mp is not None:
name_file = '/sys/block/%s/dm/name' % mp.device_name()
if os.path.exists(name_file):
lv_path = '/dev/mapper/%s' % open(name_file).read().strip()
return WLogicalVolume(lv_path, sudo=sudo) | Return logical volume that stores the given path
:param file_path: target path to search
:param sudo: same as 'sudo' in :meth:`.WLogicalVolume.__init__`
:return: WLogicalVolume or None (if file path is outside current mount points) |
def src2obj(srcpath, CompilerRunner_=None, objpath=None,
only_update=False, cwd=None, out_ext=None, inc_py=False,
**kwargs):
"""
Compiles a source code file to an object file.
Files ending with '.pyx' assumed to be cython files and
are dispatched to pyx2obj.
Parameters
----------
srcpath: path string
path to source file
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
Default: deduced from extension of srcpath
objpath: path string (optional)
path to generated object. defualt: deduced from srcpath
only_update: bool
only compile if source is newer than objpath. default: False
cwd: path string (optional)
working directory and root of relative paths. default: current dir.
out_ext: string
set when objpath is a dir and you want to override defaults
('.o'/'.obj' for Unix/Windows).
inc_py: bool
add Python include path to include_dirs. default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_ or pyx2obj
"""
name, ext = os.path.splitext(os.path.basename(srcpath))
if objpath is None:
if os.path.isabs(srcpath):
objpath = '.'
else:
objpath = os.path.dirname(srcpath)
objpath = objpath or '.' # avoid objpath == ''
out_ext = out_ext or objext
if os.path.isdir(objpath):
objpath = os.path.join(objpath, name+out_ext)
include_dirs = kwargs.pop('include_dirs', [])
if inc_py:
from distutils.sysconfig import get_python_inc
py_inc_dir = get_python_inc()
if py_inc_dir not in include_dirs:
include_dirs.append(py_inc_dir)
if ext.lower() == '.pyx':
return pyx2obj(srcpath, objpath=objpath,
include_dirs=include_dirs, cwd=cwd,
only_update=only_update, **kwargs)
if CompilerRunner_ is None:
CompilerRunner_, std = extension_mapping[ext.lower()]
if 'std' not in kwargs:
kwargs['std'] = std
# src2obj implies not running the linker...
run_linker = kwargs.pop('run_linker', False)
if run_linker:
raise CompilationError("src2obj called with run_linker=True")
if only_update:
if not missing_or_other_newer(objpath, srcpath, cwd=cwd):
msg = "Found {0}, did not recompile.".format(objpath)
if kwargs.get('logger', None):
kwargs['logger'].info(msg)
else:
print(msg)
return objpath
runner = CompilerRunner_(
[srcpath], objpath, include_dirs=include_dirs,
run_linker=run_linker, cwd=cwd, **kwargs)
runner.run()
return objpath | Compiles a source code file to an object file.
Files ending with '.pyx' assumed to be cython files and
are dispatched to pyx2obj.
Parameters
----------
srcpath: path string
path to source file
CompilerRunner_: pycompilation.CompilerRunner subclass (optional)
Default: deduced from extension of srcpath
objpath: path string (optional)
path to generated object. defualt: deduced from srcpath
only_update: bool
only compile if source is newer than objpath. default: False
cwd: path string (optional)
working directory and root of relative paths. default: current dir.
out_ext: string
set when objpath is a dir and you want to override defaults
('.o'/'.obj' for Unix/Windows).
inc_py: bool
add Python include path to include_dirs. default: False
**kwargs: dict
keyword arguments passed onto CompilerRunner_ or pyx2obj |
def get_area(self):
"""Calculate area of bounding box."""
return (self.p2.x-self.p1.x)*(self.p2.y-self.p1.y) | Calculate area of bounding box. |
def start_event_loop(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
Call signature::
start_event_loop(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
Raises RuntimeError if event loop is already running.
"""
if hasattr(self, '_event_loop'):
raise RuntimeError("Event loop already running")
id = wx.NewId()
timer = wx.Timer(self, id=id)
if timeout > 0:
timer.Start(timeout*1000, oneShot=True)
bind(self, wx.EVT_TIMER, self.stop_event_loop, id=id)
# Event loop handler for start/stop event loop
self._event_loop = wx.EventLoop()
self._event_loop.Run()
timer.Stop() | Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
Call signature::
start_event_loop(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
Raises RuntimeError if event loop is already running. |
def ReadHuntObjects(self,
offset,
count,
with_creator=None,
created_after=None,
with_description_match=None,
cursor=None):
"""Reads multiple hunt objects from the database."""
query = "SELECT {columns} FROM hunts ".format(columns=_HUNT_COLUMNS_SELECT)
args = []
components = []
if with_creator is not None:
components.append("creator = %s ")
args.append(with_creator)
if created_after is not None:
components.append("create_timestamp > FROM_UNIXTIME(%s) ")
args.append(mysql_utils.RDFDatetimeToTimestamp(created_after))
if with_description_match is not None:
components.append("description LIKE %s")
args.append("%" + with_description_match + "%")
if components:
query += "WHERE " + " AND ".join(components)
query += " ORDER BY create_timestamp DESC LIMIT %s OFFSET %s"
args.append(count)
args.append(offset)
cursor.execute(query, args)
return [self._HuntObjectFromRow(row) for row in cursor.fetchall()] | Reads multiple hunt objects from the database. |
def convert_dense(builder, layer, input_names, output_names, keras_layer):
"""Convert a dense layer from keras to coreml.
Parameters
keras_layer: layer
----------
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
has_bias = keras_layer.bias
# Get the weights from keras
W = keras_layer.get_weights ()[0].T
Wb = keras_layer.get_weights ()[1].T if has_bias else None
builder.add_inner_product(name = layer,
W = W,
b = Wb,
input_channels = keras_layer.input_dim,
output_channels = keras_layer.output_dim,
has_bias = has_bias,
input_name = input_name,
output_name = output_name) | Convert a dense layer from keras to coreml.
Parameters
keras_layer: layer
----------
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object. |
def audio_set_format(self, format, rate, channels):
'''Set decoded audio format.
This only works in combination with L{audio_set_callbacks}(),
and is mutually exclusive with L{audio_set_format_callbacks}().
@param format: a four-characters string identifying the sample format (e.g. "S16N" or "FL32").
@param rate: sample rate (expressed in Hz).
@param channels: channels count.
@version: LibVLC 2.0.0 or later.
'''
return libvlc_audio_set_format(self, str_to_bytes(format), rate, channels) | Set decoded audio format.
This only works in combination with L{audio_set_callbacks}(),
and is mutually exclusive with L{audio_set_format_callbacks}().
@param format: a four-characters string identifying the sample format (e.g. "S16N" or "FL32").
@param rate: sample rate (expressed in Hz).
@param channels: channels count.
@version: LibVLC 2.0.0 or later. |
def _from_binary_obj_ace(cls, binary_stream):
"""See base class."""
''' Access rights flags - 4
Flags - 4
Object type class identifier (GUID) - 16
Inherited object type class identifier (GUID) - 16
SID - n
'''
#content = cls._REPR.unpack(binary_stream[:cls._REPR.size])
access_flags, flags, object_guid, inher_guid = cls._REPR.unpack(binary_stream[:cls._REPR.size])
sid = SID.create_from_binary(binary_stream[cls._REPR.size:])
nw_obj = cls((ACEAccessFlags(access_flags),flags, UUID(bytes_le=object_guid), UUID(bytes_le=inher_guid), sid))
return nw_obj | See base class. |
def dropEvent( self, event ):
"""
Handles a drop event.
"""
url = event.mimeData().urls()[0]
url_path = nativestring(url.toString())
# download an icon from the web
if ( not url_path.startswith('file:') ):
filename = os.path.basename(url_path)
temp_path = os.path.join(nativestring(QDir.tempPath()), filename)
try:
urllib.urlretrieve(url_path, temp_path)
except IOError:
return
self.setFilepath(temp_path)
else:
self.setFilepath(url_path.replace('file://', '')) | Handles a drop event. |
def run_check_kind(_):
'''
Running the script.
'''
for kindv in router_post:
for rec_cat in MCategory.query_all(kind=kindv):
catid = rec_cat.uid
catinfo = MCategory.get_by_uid(catid)
for rec_post2tag in MPost2Catalog.query_by_catid(catid):
postinfo = MPost.get_by_uid(rec_post2tag.post_id)
if postinfo.kind == catinfo.kind:
pass
else:
print(postinfo.uid) | Running the script. |
def add_style_opts(cls, component, new_options, backend=None):
"""
Given a component such as an Element (e.g. Image, Curve) or a
container (e.g Layout) specify new style options to be
accepted by the corresponding plotting class.
Note: This is supplied for advanced users who know which
additional style keywords are appropriate for the
corresponding plotting class.
"""
backend = cls.current_backend if backend is None else backend
if component not in cls.registry[backend]:
raise ValueError("Component %r not registered to a plotting class" % component)
if not isinstance(new_options, list) or not all(isinstance(el, str) for el in new_options):
raise ValueError("Please supply a list of style option keyword strings")
with param.logging_level('CRITICAL'):
for option in new_options:
if option not in cls.registry[backend][component].style_opts:
plot_class = cls.registry[backend][component]
plot_class.style_opts = sorted(plot_class.style_opts+[option])
cls._options[backend][component.name] = Options('style', merge_keywords=True, allowed_keywords=new_options) | Given a component such as an Element (e.g. Image, Curve) or a
container (e.g Layout) specify new style options to be
accepted by the corresponding plotting class.
Note: This is supplied for advanced users who know which
additional style keywords are appropriate for the
corresponding plotting class. |
def errors(self):
""" Returns the list of recent errors.
Returns:
list: of :obj:`.ErrorEvent` tuples.
"""
ret_errs = list()
errors = self.get('error').get('errors', None) or list()
assert isinstance(errors, list)
for err in errors:
when = parse_datetime(err.get('when', None))
msg = err.get('message', '')
e = ErrorEvent(when, msg)
ret_errs.append(e)
return ret_errs | Returns the list of recent errors.
Returns:
list: of :obj:`.ErrorEvent` tuples. |
def search_process(process, pattern, minAddr = None,
maxAddr = None,
bufferPages = None,
overlapping = False):
"""
Search for the given pattern within the process memory.
@type process: L{Process}
@param process: Process to search.
@type pattern: L{Pattern}
@param pattern: Pattern to search for.
It must be an instance of a subclass of L{Pattern}.
The following L{Pattern} subclasses are provided by WinAppDbg:
- L{BytePattern}
- L{TextPattern}
- L{RegExpPattern}
- L{HexPattern}
You can also write your own subclass of L{Pattern} for customized
searches.
@type minAddr: int
@param minAddr: (Optional) Start the search at this memory address.
@type maxAddr: int
@param maxAddr: (Optional) Stop the search at this memory address.
@type bufferPages: int
@param bufferPages: (Optional) Number of memory pages to buffer when
performing the search. Valid values are:
- C{0} or C{None}:
Automatically determine the required buffer size. May not give
complete results for regular expressions that match variable
sized strings.
- C{> 0}: Set the buffer size, in memory pages.
- C{< 0}: Disable buffering entirely. This may give you a little
speed gain at the cost of an increased memory usage. If the
target process has very large contiguous memory regions it may
actually be slower or even fail. It's also the only way to
guarantee complete results for regular expressions that match
variable sized strings.
@type overlapping: bool
@param overlapping: C{True} to allow overlapping results, C{False}
otherwise.
Overlapping results yield the maximum possible number of results.
For example, if searching for "AAAA" within "AAAAAAAA" at address
C{0x10000}, when overlapping is turned off the following matches
are yielded::
(0x10000, 4, "AAAA")
(0x10004, 4, "AAAA")
If overlapping is turned on, the following matches are yielded::
(0x10000, 4, "AAAA")
(0x10001, 4, "AAAA")
(0x10002, 4, "AAAA")
(0x10003, 4, "AAAA")
(0x10004, 4, "AAAA")
As you can see, the middle results are overlapping the last two.
@rtype: iterator of tuple( int, int, str )
@return: An iterator of tuples. Each tuple contains the following:
- The memory address where the pattern was found.
- The size of the data that matches the pattern.
- The data that matches the pattern.
@raise WindowsError: An error occurred when querying or reading the
process memory.
"""
# Do some namespace lookups of symbols we'll be using frequently.
MEM_COMMIT = win32.MEM_COMMIT
PAGE_GUARD = win32.PAGE_GUARD
page = MemoryAddresses.pageSize
read = pattern.read
find = pattern.find
# Calculate the address range.
if minAddr is None:
minAddr = 0
if maxAddr is None:
maxAddr = win32.LPVOID(-1).value # XXX HACK
# Calculate the buffer size from the number of pages.
if bufferPages is None:
try:
size = MemoryAddresses.\
align_address_to_page_end(len(pattern)) + page
except NotImplementedError:
size = None
elif bufferPages > 0:
size = page * (bufferPages + 1)
else:
size = None
# Get the memory map of the process.
memory_map = process.iter_memory_map(minAddr, maxAddr)
# Perform search with buffering enabled.
if size:
# Loop through all memory blocks containing data.
buffer = "" # buffer to hold the memory data
prev_addr = 0 # previous memory block address
last = 0 # position of the last match
delta = 0 # delta of last read address and start of buffer
for mbi in memory_map:
# Skip blocks with no data to search on.
if not mbi.has_content():
continue
# Get the address and size of this block.
address = mbi.BaseAddress # current address to search on
block_size = mbi.RegionSize # total size of the block
if address >= maxAddr:
break
end = address + block_size # end address of the block
# If the block is contiguous to the previous block,
# coalesce the new data in the buffer.
if delta and address == prev_addr:
buffer += read(process, address, page)
# If not, clear the buffer and read new data.
else:
buffer = read(process, address, min(size, block_size))
last = 0
delta = 0
# Search for the pattern in this block.
while 1:
# Yield each match of the pattern in the buffer.
pos, length = find(buffer, last)
while pos >= last:
match_addr = address + pos - delta
if minAddr <= match_addr < maxAddr:
result = pattern.found(
match_addr, length,
buffer [ pos : pos + length ] )
if result is not None:
yield result
if overlapping:
last = pos + 1
else:
last = pos + length
pos, length = find(buffer, last)
# Advance to the next page.
address = address + page
block_size = block_size - page
prev_addr = address
# Fix the position of the last match.
last = last - page
if last < 0:
last = 0
# Remove the first page in the buffer.
buffer = buffer[ page : ]
delta = page
# If we haven't reached the end of the block yet,
# read the next page in the block and keep seaching.
if address < end:
buffer = buffer + read(process, address, page)
# Otherwise, we're done searching this block.
else:
break
# Perform search with buffering disabled.
else:
# Loop through all memory blocks containing data.
for mbi in memory_map:
# Skip blocks with no data to search on.
if not mbi.has_content():
continue
# Get the address and size of this block.
address = mbi.BaseAddress
block_size = mbi.RegionSize
if address >= maxAddr:
break;
# Read the whole memory region.
buffer = process.read(address, block_size)
# Search for the pattern in this region.
pos, length = find(buffer)
last = 0
while pos >= last:
match_addr = address + pos
if minAddr <= match_addr < maxAddr:
result = pattern.found(
match_addr, length,
buffer [ pos : pos + length ] )
if result is not None:
yield result
if overlapping:
last = pos + 1
else:
last = pos + length
pos, length = find(buffer, last) | Search for the given pattern within the process memory.
@type process: L{Process}
@param process: Process to search.
@type pattern: L{Pattern}
@param pattern: Pattern to search for.
It must be an instance of a subclass of L{Pattern}.
The following L{Pattern} subclasses are provided by WinAppDbg:
- L{BytePattern}
- L{TextPattern}
- L{RegExpPattern}
- L{HexPattern}
You can also write your own subclass of L{Pattern} for customized
searches.
@type minAddr: int
@param minAddr: (Optional) Start the search at this memory address.
@type maxAddr: int
@param maxAddr: (Optional) Stop the search at this memory address.
@type bufferPages: int
@param bufferPages: (Optional) Number of memory pages to buffer when
performing the search. Valid values are:
- C{0} or C{None}:
Automatically determine the required buffer size. May not give
complete results for regular expressions that match variable
sized strings.
- C{> 0}: Set the buffer size, in memory pages.
- C{< 0}: Disable buffering entirely. This may give you a little
speed gain at the cost of an increased memory usage. If the
target process has very large contiguous memory regions it may
actually be slower or even fail. It's also the only way to
guarantee complete results for regular expressions that match
variable sized strings.
@type overlapping: bool
@param overlapping: C{True} to allow overlapping results, C{False}
otherwise.
Overlapping results yield the maximum possible number of results.
For example, if searching for "AAAA" within "AAAAAAAA" at address
C{0x10000}, when overlapping is turned off the following matches
are yielded::
(0x10000, 4, "AAAA")
(0x10004, 4, "AAAA")
If overlapping is turned on, the following matches are yielded::
(0x10000, 4, "AAAA")
(0x10001, 4, "AAAA")
(0x10002, 4, "AAAA")
(0x10003, 4, "AAAA")
(0x10004, 4, "AAAA")
As you can see, the middle results are overlapping the last two.
@rtype: iterator of tuple( int, int, str )
@return: An iterator of tuples. Each tuple contains the following:
- The memory address where the pattern was found.
- The size of the data that matches the pattern.
- The data that matches the pattern.
@raise WindowsError: An error occurred when querying or reading the
process memory. |
def find_by_id(self, project_membership, params={}, **options):
"""Returns the project membership record.
Parameters
----------
project_membership : {Id} Globally unique identifier for the project membership.
[params] : {Object} Parameters for the request
"""
path = "/project_memberships/%s" % (project_membership)
return self.client.get(path, params, **options) | Returns the project membership record.
Parameters
----------
project_membership : {Id} Globally unique identifier for the project membership.
[params] : {Object} Parameters for the request |
def get_operator(self, op):
"""Assigns function to the operators property of the instance.
"""
if op in self.OPERATORS:
return self.OPERATORS.get(op)
try:
n_args = len(inspect.getargspec(op)[0])
if n_args != 2:
raise TypeError
except:
eprint('Error: invalid operator function. Operators must accept two args.')
raise
else:
return op | Assigns function to the operators property of the instance. |
def select(self):
"""
Makes this fit the selected fit on the GUI that is it's parent
(Note: may be moved into GUI soon)
"""
if self.GUI==None: return
self.GUI.current_fit = self
if self.tmax != None and self.tmin != None:
self.GUI.update_bounds_boxes()
if self.PCA_type != None:
self.GUI.update_PCA_box()
try: self.GUI.zijplot
except AttributeError: self.GUI.draw_figure(self.GUI.s)
self.GUI.fit_box.SetStringSelection(self.name)
self.GUI.get_new_PCA_parameters(-1) | Makes this fit the selected fit on the GUI that is it's parent
(Note: may be moved into GUI soon) |
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url) | Build the path URL to use. |
def search_tags(self,series_search_text=None,response_type=None,params=None):
"""
Function to request the FRED tags for a series search.
`<https://research.stlouisfed.org/docs/api/fred/series_search_tags.html>`_
:arg str series_search_text: The words to match against economic data series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str order_by: Order results by values of the specified attribute. Options are 'series_count',
'popularity', 'created', 'name', 'group_id'
:arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc'
:arg str tag_names: Tag names that series match. Separate with semicolon as in "income;bea"
:arg str tag_group_id: Tag ID to filter tags by. Options are 'freq', 'gen', 'geo', 'geot', 'rls', 'seas', 'src'
:arg str tag_search_text: The words to find matching tags with.
:arg bool ssl_verify: To verify HTTPs.
"""
path = '/series/search/tags?'
params['series_search_text'] = series_search_text
response_type = response_type if response_type else self.response_type
if response_type != 'xml': params['file_type'] = 'json'
response = _get_request(self.url_root,self.api_key,path,response_type,params,self.ssl_verify)
return response | Function to request the FRED tags for a series search.
`<https://research.stlouisfed.org/docs/api/fred/series_search_tags.html>`_
:arg str series_search_text: The words to match against economic data series. Required.
:arg str response_type: File extension of response. Options are 'xml', 'json',
'dict','df','numpy','csv','tab,'pipe'. Required.
:arg str realtime_start: The start of the real-time period. Format "YYYY-MM-DD"
:arg str realtime_end: The end of the real-time period. Format "YYYY-MM-DD"
:arg int limit: The maximum number of results to return. Options 1 to 1000
:arg int offset: Data offset. Options >=0
:arg str order_by: Order results by values of the specified attribute. Options are 'series_count',
'popularity', 'created', 'name', 'group_id'
:arg str sort_order: Sort results for attribute values specified by order_by. Options are 'asc','desc'
:arg str tag_names: Tag names that series match. Separate with semicolon as in "income;bea"
:arg str tag_group_id: Tag ID to filter tags by. Options are 'freq', 'gen', 'geo', 'geot', 'rls', 'seas', 'src'
:arg str tag_search_text: The words to find matching tags with.
:arg bool ssl_verify: To verify HTTPs. |
def get_tunnel_info_input_filter_type_filter_by_adm_state_admin_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_tunnel_info = ET.Element("get_tunnel_info")
config = get_tunnel_info
input = ET.SubElement(get_tunnel_info, "input")
filter_type = ET.SubElement(input, "filter-type")
filter_by_adm_state = ET.SubElement(filter_type, "filter-by-adm-state")
admin_state = ET.SubElement(filter_by_adm_state, "admin-state")
admin_state.text = kwargs.pop('admin_state')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.