signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def get_function_doc(function, config=default_config):
|
if config.exclude_function:<EOL><INDENT>for ex in config.exclude_function:<EOL><INDENT>if ex.match(function.__name__):<EOL><INDENT>return None<EOL><DEDENT><DEDENT><DEDENT>return _doc_object(function, '<STR_LIT>', config=config)<EOL>
|
Return doc for a function.
|
f8758:m1
|
def get_class_doc(klass, config=default_config):
|
if config.exclude_class:<EOL><INDENT>for ex in config.exclude_class:<EOL><INDENT>if ex.match(klass.__name__):<EOL><INDENT>return None<EOL><DEDENT><DEDENT><DEDENT>nested_doc = []<EOL>class_dict = klass.__dict__<EOL>for item in dir(klass):<EOL><INDENT>if item in class_dict.keys():<EOL><INDENT>appended = None<EOL>if isinstance(class_dict[item], type) and config.nested_class:<EOL><INDENT>appended = get_class_doc(class_dict[item], config)<EOL><DEDENT>elif isinstance(class_dict[item], types.FunctionType):<EOL><INDENT>appended = get_function_doc(class_dict[item], config)<EOL><DEDENT>if appended is not None:<EOL><INDENT>nested_doc.append(appended)<EOL><DEDENT><DEDENT><DEDENT>return _doc_object(klass, '<STR_LIT:class>', nested_doc, config)<EOL>
|
Return doc for a class.
|
f8758:m2
|
def get_module_doc(module, config=default_config, already_met=None):
|
<EOL>if already_met is None:<EOL><INDENT>already_met = set()<EOL><DEDENT>if config.exclude_module:<EOL><INDENT>for ex in config.exclude_module:<EOL><INDENT>if ex.match(module.__name__):<EOL><INDENT>return None<EOL><DEDENT><DEDENT><DEDENT>if hasattr(module, '<STR_LIT>'):<EOL><INDENT>subm = [<EOL>modname for importer, modname, ispkg<EOL>in pkgutil.iter_modules(module.__path__)<EOL>]<EOL>__import__(module.__name__, fromlist=subm)<EOL><DEDENT>if hasattr(module, '<STR_LIT>'):<EOL><INDENT>module_file = module.__file__<EOL><DEDENT>else:<EOL><INDENT>module_file = inspect.getsourcefile(module)<EOL><DEDENT>path, ext = os.path.splitext(module_file)<EOL>if ext == '<STR_LIT>':<EOL><INDENT>module_file = path + '<STR_LIT>'<EOL><DEDENT>try:<EOL><INDENT>code = open(module_file).read()<EOL>body = ast.parse(code).body<EOL><DEDENT>except SyntaxError:<EOL><INDENT>code = open(module_file).read().encode('<STR_LIT:utf-8>')<EOL>body = ast.parse(code).body<EOL><DEDENT>imported = []<EOL>for node in body:<EOL><INDENT>if isinstance(node, (ast.Import, ast.ImportFrom)):<EOL><INDENT>imported.extend([n.name for n in node.names])<EOL><DEDENT><DEDENT>nested_doc = []<EOL>module_dict = module.__dict__<EOL>for item in dir(module):<EOL><INDENT>if item not in imported and item in module_dict.keys():<EOL><INDENT>if id(item) in already_met:<EOL><INDENT>continue<EOL><DEDENT>already_met.add(id(item))<EOL>appended = None<EOL>if isinstance(module_dict[item], types.ModuleType):<EOL><INDENT>appended = get_module_doc(module_dict[item], config, already_met) <EOL><DEDENT>elif isinstance(module_dict[item], type):<EOL><INDENT>appended = get_class_doc(module_dict[item], config)<EOL><DEDENT>elif isinstance(module_dict[item], types.FunctionType):<EOL><INDENT>appended = get_function_doc(module_dict[item], config)<EOL><DEDENT>if appended is not None:<EOL><INDENT>nested_doc.append(appended)<EOL><DEDENT><DEDENT><DEDENT>return _doc_object(module, '<STR_LIT>', nested_doc, config)<EOL>
|
Return doc for a module.
|
f8758:m3
|
def __init__(self, value, method=Method.PREFIX):
|
self.value = value<EOL>self.method = method<EOL>
|
Init method.
Args:
value (str): value to match.
method (const): Method constant, matching method.
|
f8758:c0:m0
|
def match(self, name):
|
if self.method == Ex.Method.PREFIX:<EOL><INDENT>return name.startswith(self.value)<EOL><DEDENT>elif self.method == Ex.Method.SUFFIX:<EOL><INDENT>return name.endswith(self.value)<EOL><DEDENT>elif self.method == Ex.Method.CONTAINS:<EOL><INDENT>return self.value in name<EOL><DEDENT>elif self.method == Ex.Method.EXACT:<EOL><INDENT>return self.value == name<EOL><DEDENT>elif self.method == Ex.Method.REGEX:<EOL><INDENT>return re.search(self.value, name)<EOL><DEDENT>return False<EOL>
|
Check if given name matches.
Args:
name (str): name to check.
Returns:
bool: matches name.
|
f8758:c0:m1
|
def __init__(self,<EOL>exclude_module=None,<EOL>exclude_class=None,<EOL>exclude_function=None,<EOL>nested_class=False,<EOL>missing_doc=True):
|
self.exclude_module = exclude_module<EOL>self.exclude_class = exclude_class<EOL>self.exclude_function = exclude_function<EOL>self.nested_class = nested_class<EOL>self.missing_doc = missing_doc<EOL>
|
Init method.
Args:
exclude_module (list): list of Ex instances.
exclude_class (list): list of Ex instances.
exclude_function (list): list of Ex instances.
nested_class (bool): whether to get nested classes in classes.
missing_doc (bool): whether to get doc even when empty.
|
f8758:c1:m0
|
def read(*names, **kwargs):
|
return io.open(<EOL>join(dirname(__file__), *names),<EOL>encoding=kwargs.get('<STR_LIT>', '<STR_LIT:utf8>')<EOL>).read()<EOL>
|
Read a file in current directory.
|
f8759:m0
|
def build_toc_tree(title, input, output, content_directory):
|
LOGGER.info("<STR_LIT>".format(build_toc_tree.__name__,<EOL>output))<EOL>file = File(input)<EOL>file.cache()<EOL>existing_files = [foundations.strings.get_splitext_basename(item)<EOL>for item in glob.glob("<STR_LIT>".format(content_directory, FILES_EXTENSION))]<EOL>relative_directory = content_directory.replace("<STR_LIT>".format(os.path.dirname(output)), "<STR_LIT>")<EOL>toc_tree = ["<STR_LIT:\n>"]<EOL>for line in file.content:<EOL><INDENT>search = re.search(r"<STR_LIT>", line)<EOL>if not search:<EOL><INDENT>continue<EOL><DEDENT>item = search.groups()[<NUM_LIT:0>]<EOL>code = "<STR_LIT>".format(item[<NUM_LIT:0>].lower(), item.replace("<STR_LIT:U+0020>", "<STR_LIT>")[<NUM_LIT:1>:])<EOL>if code in existing_files:<EOL><INDENT>link = "<STR_LIT>".format(relative_directory, code)<EOL>data = "<STR_LIT>".format("<STR_LIT:U+0020>", "<STR_LIT:U+0020>" * line.index("<STR_LIT:->"), item, link)<EOL>LOGGER.info("<STR_LIT>".format(build_toc_tree.__name__,<EOL>data.replace("<STR_LIT:\n>", "<STR_LIT>")))<EOL>toc_tree.append(data)<EOL><DEDENT><DEDENT>toc_tree.append("<STR_LIT:\n>")<EOL>TOCTREE_TEMPLATE_BEGIN[<NUM_LIT:0>] = TOCTREE_TEMPLATE_BEGIN[<NUM_LIT:0>].format(title)<EOL>TOCTREE_TEMPLATE_BEGIN[<NUM_LIT:1>] = TOCTREE_TEMPLATE_BEGIN[<NUM_LIT:1>].format("<STR_LIT:=>" * len(TOCTREE_TEMPLATE_BEGIN[<NUM_LIT:0>]))<EOL>content = TOCTREE_TEMPLATE_BEGIN<EOL>content.extend(toc_tree)<EOL>content.extend(TOCTREE_TEMPLATE_END)<EOL>file = File(output)<EOL>file.content = content<EOL>file.write()<EOL>return True<EOL>
|
Builds Sphinx documentation table of content tree file.
:param title: Package title.
:type title: unicode
:param input: Input file to convert.
:type input: unicode
:param output: Output file.
:type output: unicode
:param content_directory: Directory containing the content to be included in the table of content.
:type content_directory: unicode
:return: Definition success.
:rtype: bool
|
f8761:m0
|
def get_command_line_arguments():
|
parser = argparse.ArgumentParser(add_help=False)<EOL>parser.add_argument("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>action="<STR_LIT>",<EOL>help="<STR_LIT>")<EOL>parser.add_argument("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>type=unicode,<EOL>dest="<STR_LIT:title>",<EOL>help="<STR_LIT>")<EOL>parser.add_argument("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>type=unicode,<EOL>dest="<STR_LIT:input>",<EOL>help="<STR_LIT>")<EOL>parser.add_argument("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>type=unicode,<EOL>dest="<STR_LIT>",<EOL>help="<STR_LIT>")<EOL>parser.add_argument("<STR_LIT:-c>",<EOL>"<STR_LIT>",<EOL>type=unicode,<EOL>dest="<STR_LIT>",<EOL>help="<STR_LIT>")<EOL>if len(sys.argv) == <NUM_LIT:1>:<EOL><INDENT>parser.print_help()<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>return parser.parse_args()<EOL>
|
Retrieves command line arguments.
:return: Namespace.
:rtype: Namespace
|
f8761:m1
|
@foundations.decorators.system_exit<EOL>def main():
|
args = get_command_line_arguments()<EOL>return build_toc_tree(args.title,<EOL>args.input,<EOL>args.output,<EOL>args.content_directory)<EOL>
|
Starts the Application.
:return: Definition success.
:rtype: bool
|
f8761:m2
|
def bleach(file):
|
LOGGER.info("<STR_LIT>".format(__name__, file))<EOL>source_file = File(file)<EOL>content = source_file.read()<EOL>for pattern in STATEMENT_SUBSTITUTE:<EOL><INDENT>matches = [match for match in re.finditer(pattern, content, re.DOTALL)]<EOL>offset = <NUM_LIT:0><EOL>for match in matches:<EOL><INDENT>start, end = match.start("<STR_LIT>"), match.end("<STR_LIT>")<EOL>substitution = "<STR_LIT>".format(STATEMENT_UPDATE_MESSAGE,<EOL>re.sub("<STR_LIT:\n>", "<STR_LIT>".format(STATEMENT_UPDATE_MESSAGE),<EOL>match.group("<STR_LIT>")))<EOL>content = "<STR_LIT>".join((content[<NUM_LIT:0>: start + offset],<EOL>substitution,<EOL>content[end + offset:]))<EOL>offset += len(substitution) - len(match.group("<STR_LIT>"))<EOL><DEDENT><DEDENT>source_file.content = [content]<EOL>source_file.write()<EOL>return True<EOL>
|
Sanitizes given python module.
:param file: Python module file.
:type file: unicode
:return: Definition success.
:rtype: bool
|
f8762:m0
|
def readmodule(module, path=None):
|
res = {}<EOL>for key, value in list(_readmodule(module, path or []).items()):<EOL><INDENT>if isinstance(value, Class):<EOL><INDENT>res[key] = value<EOL><DEDENT><DEDENT>return res<EOL>
|
Backwards compatible interface.
Call readmodule_ex() and then only keep Class objects from the
resulting dictionary.
|
f8763:m0
|
def readmodule_ex(module, path=None):
|
return _readmodule(module, path or [])<EOL>
|
Read a module file and return a dictionary of classes.
Search for MODULE in PATH and sys.path, read and parse the
module and return a dictionary with one entry for each class
found in the module.
|
f8763:m1
|
def _readmodule(module, path, inpackage=None):
|
<EOL>if inpackage is not None:<EOL><INDENT>fullmodule = "<STR_LIT>" % (inpackage, module)<EOL><DEDENT>else:<EOL><INDENT>fullmodule = module<EOL><DEDENT>if fullmodule in _modules:<EOL><INDENT>return _modules[fullmodule]<EOL><DEDENT>dict = OrderedDict()<EOL>if module in sys.builtin_module_names and inpackage is None:<EOL><INDENT>_modules[module] = dict<EOL>return dict<EOL><DEDENT>i = module.rfind('<STR_LIT:.>')<EOL>if i >= <NUM_LIT:0>:<EOL><INDENT>package = module[:i]<EOL>submodule = module[i + <NUM_LIT:1>:]<EOL>parent = _readmodule(package, path, inpackage)<EOL>if inpackage is not None:<EOL><INDENT>package = "<STR_LIT>" % (inpackage, package)<EOL><DEDENT>return _readmodule(submodule, parent['<STR_LIT>'], package)<EOL><DEDENT>f = None<EOL>if inpackage is not None:<EOL><INDENT>f, fname, (_s, _m, ty) = imp.find_module(module, path)<EOL><DEDENT>else:<EOL><INDENT>f, fname, (_s, _m, ty) = imp.find_module(module, path + sys.path)<EOL><DEDENT>if ty == imp.PKG_DIRECTORY:<EOL><INDENT>dict['<STR_LIT>'] = [fname]<EOL>path = [fname] + path<EOL>f, fname, (_s, _m, ty) = imp.find_module('<STR_LIT>', [fname])<EOL><DEDENT>_modules[fullmodule] = dict<EOL>if ty != imp.PY_SOURCE:<EOL><INDENT>f.close()<EOL>return dict<EOL><DEDENT>stack = [] <EOL>g = tokenize.generate_tokens(f.readline)<EOL>try:<EOL><INDENT>for tokentype, token, start, _end, _line in g:<EOL><INDENT>if tokentype == DEDENT:<EOL><INDENT>lineno, thisindent = start<EOL>while stack and stack[-<NUM_LIT:1>][<NUM_LIT:1>] >= thisindent:<EOL><INDENT>del stack[-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>elif token == '<STR_LIT>':<EOL><INDENT>lineno, thisindent = start<EOL>while stack and stack[-<NUM_LIT:1>][<NUM_LIT:1>] >= thisindent:<EOL><INDENT>del stack[-<NUM_LIT:1>]<EOL><DEDENT>tokentype, meth_name, start = g.next()[<NUM_LIT:0>:<NUM_LIT:3>]<EOL>if tokentype != NAME:<EOL><INDENT>continue <EOL><DEDENT>if stack:<EOL><INDENT>cur_class = stack[-<NUM_LIT:1>][<NUM_LIT:0>]<EOL>if isinstance(cur_class, Class):<EOL><INDENT>cur_class._addmethod(meth_name, lineno)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>dict[meth_name] = Function(fullmodule, meth_name,<EOL>fname, lineno)<EOL><DEDENT>stack.append((None, thisindent)) <EOL><DEDENT>elif token == '<STR_LIT:class>':<EOL><INDENT>lineno, thisindent = start<EOL>while stack and stack[-<NUM_LIT:1>][<NUM_LIT:1>] >= thisindent:<EOL><INDENT>del stack[-<NUM_LIT:1>]<EOL><DEDENT>tokentype, class_name, start = g.next()[<NUM_LIT:0>:<NUM_LIT:3>]<EOL>if tokentype != NAME:<EOL><INDENT>continue <EOL><DEDENT>tokentype, token, start = g.next()[<NUM_LIT:0>:<NUM_LIT:3>]<EOL>inherit = None<EOL>if token == '<STR_LIT:(>':<EOL><INDENT>names = [] <EOL>level = <NUM_LIT:1><EOL>super = [] <EOL>while True:<EOL><INDENT>tokentype, token, start = g.next()[<NUM_LIT:0>:<NUM_LIT:3>]<EOL>if token in ('<STR_LIT:)>', '<STR_LIT:U+002C>') and level == <NUM_LIT:1>:<EOL><INDENT>n = "<STR_LIT>".join(super)<EOL>if n in dict:<EOL><INDENT>n = dict[n]<EOL><DEDENT>else:<EOL><INDENT>c = n.split('<STR_LIT:.>')<EOL>if len(c) > <NUM_LIT:1>:<EOL><INDENT>m = c[-<NUM_LIT:2>]<EOL>c = c[-<NUM_LIT:1>]<EOL>if m in _modules:<EOL><INDENT>d = _modules[m]<EOL>if c in d:<EOL><INDENT>n = d[c]<EOL><DEDENT><DEDENT><DEDENT><DEDENT>names.append(n)<EOL>super = []<EOL><DEDENT>if token == '<STR_LIT:(>':<EOL><INDENT>level += <NUM_LIT:1><EOL><DEDENT>elif token == '<STR_LIT:)>':<EOL><INDENT>level -= <NUM_LIT:1><EOL>if level == <NUM_LIT:0>:<EOL><INDENT>break<EOL><DEDENT><DEDENT>elif token == '<STR_LIT:U+002C>' and level == <NUM_LIT:1>:<EOL><INDENT>pass<EOL><DEDENT>elif tokentype in (NAME, OP) and level == <NUM_LIT:1>:<EOL><INDENT>super.append(token)<EOL><DEDENT><DEDENT>inherit = names<EOL><DEDENT>cur_class = Class(fullmodule, class_name, inherit,<EOL>fname, lineno)<EOL>if not stack:<EOL><INDENT>dict[class_name] = cur_class<EOL><DEDENT>stack.append((cur_class, thisindent))<EOL><DEDENT>elif token == '<STR_LIT>' and start[<NUM_LIT:1>] == <NUM_LIT:0>:<EOL><INDENT>modules = _getnamelist(g)<EOL>for mod, _mod2 in modules:<EOL><INDENT>try:<EOL><INDENT>if inpackage is None:<EOL><INDENT>_readmodule(mod, path)<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>_readmodule(mod, path, inpackage)<EOL><DEDENT>except ImportError:<EOL><INDENT>_readmodule(mod, [])<EOL><DEDENT><DEDENT><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>elif token == '<STR_LIT>' and start[<NUM_LIT:1>] == <NUM_LIT:0>:<EOL><INDENT>mod, token = _getname(g)<EOL>if not mod or token != "<STR_LIT>":<EOL><INDENT>continue<EOL><DEDENT>names = _getnamelist(g)<EOL>try:<EOL><INDENT>d = _readmodule(mod, path, inpackage)<EOL><DEDENT>except:<EOL><INDENT>continue<EOL><DEDENT>for n, n2 in names:<EOL><INDENT>if n in d:<EOL><INDENT>dict[n2 or n] = d[n]<EOL><DEDENT>elif n == '<STR_LIT:*>':<EOL><INDENT>for n in d:<EOL><INDENT>if n[<NUM_LIT:0>] != '<STR_LIT:_>':<EOL><INDENT>dict[n] = d[n]<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>elif tokentype == NAME and start[<NUM_LIT:1>] == <NUM_LIT:0>:<EOL><INDENT>name = token<EOL>line = _line<EOL>tokentype, token = g.next()[<NUM_LIT:0>:<NUM_LIT:2>]<EOL>if tokentype == OP and token == "<STR_LIT:=>":<EOL><INDENT>dict[name] = Global(fullmodule, name, fname, _line)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>except StopIteration:<EOL><INDENT>pass<EOL><DEDENT>f.close()<EOL>return dict<EOL>
|
Do the hard work for readmodule[_ex].
If INPACKAGE is given, it must be the dotted name of the package in
which we are searching for a submodule, and then PATH must be the
package search path; otherwise, we are searching for a top-level
module, and PATH is combined with sys.path.
|
f8763:m2
|
def reStructuredText_to_html(input, output, css_file):
|
LOGGER.info("<STR_LIT>".format(<EOL>reStructuredText_to_html.__name__, input))<EOL>os.system("<STR_LIT>".format(RST2HTML,<EOL>os.path.join(os.path.dirname(__file__), css_file),<EOL>input,<EOL>output))<EOL>LOGGER.info("<STR_LIT>".format("<STR_LIT>"))<EOL>os.system("<STR_LIT>".format(os.path.join(os.path.dirname(__file__), TIDY_SETTINGS_FILE), output))<EOL>file = File(output)<EOL>file.cache()<EOL>LOGGER.info("<STR_LIT>".format(reStructuredText_to_html.__name__))<EOL>file.content = [line.replace("<STR_LIT:U+0020>" * <NUM_LIT:4>, "<STR_LIT:\t>") for line in file.content]<EOL>file.write()<EOL>return True<EOL>
|
Outputs a reStructuredText file to html.
:param input: Input reStructuredText file to convert.
:type input: unicode
:param output: Output html file.
:type output: unicode
:param css_file: Css file.
:type css_file: unicode
:return: Definition success.
:rtype: bool
|
f8765:m0
|
def get_command_line_arguments():
|
parser = argparse.ArgumentParser(add_help=False)<EOL>parser.add_argument("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>action="<STR_LIT>",<EOL>help="<STR_LIT>")<EOL>parser.add_argument("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>type=unicode,<EOL>dest="<STR_LIT:input>",<EOL>help="<STR_LIT>")<EOL>parser.add_argument("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>type=unicode,<EOL>dest="<STR_LIT>",<EOL>help="<STR_LIT>")<EOL>parser.add_argument("<STR_LIT:-c>",<EOL>"<STR_LIT>",<EOL>type=unicode,<EOL>dest="<STR_LIT>",<EOL>help="<STR_LIT>")<EOL>if len(sys.argv) == <NUM_LIT:1>:<EOL><INDENT>parser.print_help()<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>return parser.parse_args()<EOL>
|
Retrieves command line arguments.
:return: Namespace.
:rtype: Namespace
|
f8765:m1
|
@foundations.decorators.system_exit<EOL>def main():
|
args = get_command_line_arguments()<EOL>args.css_file = args.css_file if foundations.common.path_exists(args.css_file) else CSS_FILE<EOL>return reStructuredText_to_html(args.input,<EOL>args.output,<EOL>args.css_file)<EOL>
|
Starts the Application.
:return: Definition success.
:rtype: bool
|
f8765:m2
|
def import_sanitizer(sanitizer):
|
directory = os.path.dirname(sanitizer)<EOL>not directory in sys.path and sys.path.append(directory)<EOL>namespace = __import__(foundations.strings.get_splitext_basename(sanitizer))<EOL>if hasattr(namespace, "<STR_LIT>"):<EOL><INDENT>return namespace<EOL><DEDENT>else:<EOL><INDENT>raise foundations.exceptions.ProgrammingError(<EOL>"<STR_LIT>".format(sanitizer))<EOL><DEDENT>
|
Imports the sanitizer python module.
:param sanitizer: Sanitizer python module file.
:type sanitizer: unicode
:return: Module.
:rtype: object
|
f8766:m0
|
def build_api(packages, input, output, sanitizer, excluded_modules=None):
|
LOGGER.info("<STR_LIT>".format(build_api.__name__))<EOL>sanitizer = import_sanitizer(sanitizer)<EOL>if os.path.exists(input):<EOL><INDENT>shutil.rmtree(input)<EOL>os.makedirs(input)<EOL><DEDENT>excluded_modules = [] if excluded_modules is None else excluded_modules<EOL>packages_modules = {"<STR_LIT>": [],<EOL>"<STR_LIT>": []}<EOL>for package in packages:<EOL><INDENT>package = __import__(package)<EOL>path = foundations.common.get_first_item(package.__path__)<EOL>package_directory = os.path.dirname(path)<EOL>for file in sorted(<EOL>list(foundations.walkers.files_walker(package_directory, filters_in=("<STR_LIT>".format(path),)))):<EOL><INDENT>LOGGER.info("<STR_LIT>".format(build_api.__name__, file))<EOL>target_directory = os.path.dirname(file).replace(package_directory, "<STR_LIT>")<EOL>directory = "<STR_LIT>".format(input, target_directory)<EOL>if not foundations.common.path_exists(directory):<EOL><INDENT>os.makedirs(directory)<EOL><DEDENT>source = os.path.join(directory, os.path.basename(file))<EOL>shutil.copyfile(file, source)<EOL><DEDENT>modules = []<EOL>for file in sorted(<EOL>list(foundations.walkers.files_walker(package_directory, filters_in=("<STR_LIT>".format(path),),<EOL>filters_out=excluded_modules))):<EOL><INDENT>LOGGER.info("<STR_LIT>".format(build_api.__name__, file))<EOL>module = "<STR_LIT>".format(("<STR_LIT:.>".join(os.path.dirname(file).replace(package_directory, "<STR_LIT>").split("<STR_LIT:/>"))),<EOL>foundations.strings.get_splitext_basename(file)).strip("<STR_LIT:.>")<EOL>LOGGER.info("<STR_LIT>".format(build_api.__name__, module))<EOL>directory = os.path.dirname(os.path.join(input, module.replace("<STR_LIT:.>", "<STR_LIT:/>")))<EOL>if not foundations.common.path_exists(directory):<EOL><INDENT>os.makedirs(directory)<EOL><DEDENT>source = os.path.join(directory, os.path.basename(file))<EOL>shutil.copyfile(file, source)<EOL>sanitizer.bleach(source)<EOL>if "<STR_LIT>" in file:<EOL><INDENT>continue<EOL><DEDENT>rst_file_path = "<STR_LIT>".format(module, FILES_EXTENSION)<EOL>LOGGER.info("<STR_LIT>".format(build_api.__name__, rst_file_path))<EOL>rst_file = File(os.path.join(output, rst_file_path))<EOL>header = ["<STR_LIT>".format(module),<EOL>"<STR_LIT>".format("<STR_LIT:=>" * len(module)),<EOL>"<STR_LIT:\n>",<EOL>"<STR_LIT>".format(module),<EOL>"<STR_LIT:\n>"]<EOL>rst_file.content.extend(header)<EOL>functions = OrderedDict()<EOL>classes = OrderedDict()<EOL>module_attributes = OrderedDict()<EOL>for member, object in module_browser._readmodule(module, [source, ]).iteritems():<EOL><INDENT>if object.__class__ == module_browser.Function:<EOL><INDENT>if not member.startswith("<STR_LIT:_>"):<EOL><INDENT>functions[member] = ["<STR_LIT>".format(member)]<EOL><DEDENT><DEDENT>elif object.__class__ == module_browser.Class:<EOL><INDENT>classes[member] = ["<STR_LIT>".format(member),<EOL>"<STR_LIT>",<EOL>"<STR_LIT>"]<EOL><DEDENT>elif object.__class__ == module_browser.Global:<EOL><INDENT>if not member.startswith("<STR_LIT:_>"):<EOL><INDENT>module_attributes[member] = ["<STR_LIT>".format(module, member)]<EOL><DEDENT><DEDENT><DEDENT>module_attributes and rst_file.content.append("<STR_LIT>")<EOL>for module_attribute in module_attributes.itervalues():<EOL><INDENT>rst_file.content.extend(module_attribute)<EOL>rst_file.content.append("<STR_LIT:\n>")<EOL><DEDENT>functions and rst_file.content.append("<STR_LIT>")<EOL>for function in functions.itervalues():<EOL><INDENT>rst_file.content.extend(function)<EOL>rst_file.content.append("<STR_LIT:\n>")<EOL><DEDENT>classes and rst_file.content.append("<STR_LIT>")<EOL>for class_ in classes.itervalues():<EOL><INDENT>rst_file.content.extend(class_)<EOL>rst_file.content.append("<STR_LIT:\n>")<EOL><DEDENT>rst_file.write()<EOL>modules.append(module)<EOL><DEDENT>packages_modules["<STR_LIT>"].extend([module for module in modules if not "<STR_LIT>" in module])<EOL>packages_modules["<STR_LIT>"].extend([module for module in modules if "<STR_LIT>" in module])<EOL><DEDENT>api_file = File("<STR_LIT>".format(output, FILES_EXTENSION))<EOL>api_file.content.extend(TOCTREE_TEMPLATE_BEGIN)<EOL>for module in packages_modules["<STR_LIT>"]:<EOL><INDENT>api_file.content.append("<STR_LIT>".format(module, "<STR_LIT>".format(module)))<EOL><DEDENT>for module in packages_modules["<STR_LIT>"]:<EOL><INDENT>api_file.content.append("<STR_LIT>".format(module, "<STR_LIT>".format(module)))<EOL><DEDENT>api_file.content.extend(TOCTREE_TEMPLATE_END)<EOL>api_file.write()<EOL>return True<EOL>
|
Builds the Sphinx documentation API.
:param packages: Packages to include in the API.
:type packages: list
:param input: Input modules directory.
:type input: unicode
:param output: Output reStructuredText files directory.
:type output: unicode
:param sanitizer: Sanitizer python module.
:type sanitizer: unicode
:param excluded_modules: Excluded modules.
:type excluded_modules: list
:return: Definition success.
:rtype: bool
|
f8766:m1
|
def get_command_line_arguments():
|
parser = argparse.ArgumentParser(add_help=False)<EOL>parser.add_argument("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>action="<STR_LIT>",<EOL>help="<STR_LIT>")<EOL>parser.add_argument("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>dest="<STR_LIT>",<EOL>nargs="<STR_LIT:+>",<EOL>help="<STR_LIT>")<EOL>parser.add_argument("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>type=unicode,<EOL>dest="<STR_LIT:input>",<EOL>help="<STR_LIT>")<EOL>parser.add_argument("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>type=unicode,<EOL>dest="<STR_LIT>",<EOL>help="<STR_LIT>")<EOL>parser.add_argument("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>type=unicode,<EOL>dest="<STR_LIT>",<EOL>help="<STR_LIT>")<EOL>parser.add_argument("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>dest="<STR_LIT>",<EOL>nargs="<STR_LIT:*>",<EOL>help="<STR_LIT>")<EOL>if len(sys.argv) == <NUM_LIT:1>:<EOL><INDENT>parser.print_help()<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>return parser.parse_args()<EOL>
|
Retrieves command line arguments.
:return: Namespace.
:rtype: Namespace
|
f8766:m2
|
@foundations.decorators.system_exit<EOL>def main():
|
args = get_command_line_arguments()<EOL>args.sanitizer = args.sanitizer if foundations.common.path_exists(args.sanitizer) else SANITIZER<EOL>args.excluded_modules = args.excluded_modules if all(args.excluded_modules) else []<EOL>return build_api(args.packages,<EOL>args.input,<EOL>args.output,<EOL>args.sanitizer,<EOL>args.excluded_modules)<EOL>
|
Starts the Application.
:return: Definition success.
:rtype: bool
|
f8766:m3
|
def slice_reStructuredText(input, output):
|
LOGGER.info("<STR_LIT>".format(slice_reStructuredText.__name__, input))<EOL>file = File(input)<EOL>file.cache()<EOL>slices = OrderedDict()<EOL>for i, line in enumerate(file.content):<EOL><INDENT>search = re.search(r"<STR_LIT>", line)<EOL>if search:<EOL><INDENT>slices[search.groups()[<NUM_LIT:0>]] = i + SLICE_ATTRIBUTE_INDENT<EOL><DEDENT><DEDENT>index = <NUM_LIT:0><EOL>for slice, slice_start in slices.iteritems():<EOL><INDENT>slice_file = File(os.path.join(output, "<STR_LIT>".format(slice, OUTPUT_FILES_EXTENSION)))<EOL>LOGGER.info("<STR_LIT>".format(slice_reStructuredText.__name__, slice_file.path))<EOL>slice_end = index < (len(slices.values()) - <NUM_LIT:1>) and slices.values()[index + <NUM_LIT:1>] - SLICE_ATTRIBUTE_INDENT orlen(file.content)<EOL>for i in range(slice_start, slice_end):<EOL><INDENT>skip_line = False<EOL>for item in CONTENT_DELETION:<EOL><INDENT>if re.search(item, file.content[i]):<EOL><INDENT>LOGGER.info("<STR_LIT>".format(slice_reStructuredText.__name__,<EOL>i,<EOL>item))<EOL>skip_line = True<EOL>break<EOL><DEDENT><DEDENT>if skip_line:<EOL><INDENT>continue<EOL><DEDENT>line = file.content[i]<EOL>for pattern, value in STATEMENT_SUBSTITUTE.iteritems():<EOL><INDENT>line = re.sub(pattern, value, line)<EOL><DEDENT>search = re.search(r"<STR_LIT>", line)<EOL>if search:<EOL><INDENT>LOGGER.info("<STR_LIT>".format(slice_reStructuredText.__name__,<EOL>i,<EOL>search.groups()[<NUM_LIT:0>]))<EOL>line = "<STR_LIT>".format(search.groups()[<NUM_LIT:0>])<EOL><DEDENT>slice_file.content.append(line)<EOL><DEDENT>slice_file.write()<EOL>index += <NUM_LIT:1><EOL><DEDENT>return True<EOL>
|
Slices given reStructuredText file.
:param input: ReStructuredText file to slice.
:type input: unicode
:param output: Directory to output sliced reStructuredText files.
:type output: unicode
:return: Definition success.
:rtype: bool
|
f8767:m0
|
def get_command_line_arguments():
|
parser = argparse.ArgumentParser(add_help=False)<EOL>parser.add_argument("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>action="<STR_LIT>",<EOL>help="<STR_LIT>")<EOL>parser.add_argument("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>type=unicode,<EOL>dest="<STR_LIT:input>",<EOL>help="<STR_LIT>")<EOL>parser.add_argument("<STR_LIT>",<EOL>"<STR_LIT>",<EOL>type=unicode,<EOL>dest="<STR_LIT>",<EOL>help="<STR_LIT>")<EOL>if len(sys.argv) == <NUM_LIT:1>:<EOL><INDENT>parser.print_help()<EOL>sys.exit(<NUM_LIT:1>)<EOL><DEDENT>return parser.parse_args()<EOL>
|
Retrieves command line arguments.
:return: Namespace.
:rtype: Namespace
|
f8767:m1
|
@foundations.decorators.system_exit<EOL>def main():
|
args = get_command_line_arguments()<EOL>return slice_reStructuredText(args.input,<EOL>args.output)<EOL>
|
Starts the Application.
:return: Definition success.
:rtype: bool
|
f8767:m2
|
def bleach(file):
|
LOGGER.info("<STR_LIT>".format(__name__, file))<EOL>source_file = File(file)<EOL>content = source_file.read()<EOL>for pattern in STATEMENT_SUBSTITUTE:<EOL><INDENT>matches = [match for match in re.finditer(pattern, content, re.DOTALL)]<EOL>offset = <NUM_LIT:0><EOL>for match in matches:<EOL><INDENT>start, end = match.start("<STR_LIT>"), match.end("<STR_LIT>")<EOL>substitution = "<STR_LIT>".format(STATEMENT_UPDATE_MESSAGE,<EOL>re.sub("<STR_LIT:\n>", "<STR_LIT>".format(STATEMENT_UPDATE_MESSAGE),<EOL>match.group("<STR_LIT>")))<EOL>content = "<STR_LIT>".join((content[<NUM_LIT:0>: start + offset],<EOL>substitution,<EOL>content[end + offset:]))<EOL>offset += len(substitution) - len(match.group("<STR_LIT>"))<EOL><DEDENT><DEDENT>source_file.content = [content]<EOL>source_file.write()<EOL>return True<EOL>
|
Sanitizes given python module.
:param file: Python module file.
:type file: unicode
:return: Definition success.
:rtype: bool
|
f8768:m0
|
def get_long_description():
|
description = []<EOL>with open("<STR_LIT>") as file:<EOL><INDENT>for line in file:<EOL><INDENT>if "<STR_LIT>" in line and len(description) >= <NUM_LIT:2>:<EOL><INDENT>blockLine = description[-<NUM_LIT:2>]<EOL>if re.search(r"<STR_LIT>", blockLine) and not re.search(r"<STR_LIT>", blockLine):<EOL><INDENT>description[-<NUM_LIT:2>] = "<STR_LIT>".join(blockLine.rsplit("<STR_LIT::>", <NUM_LIT:1>))<EOL><DEDENT>continue<EOL><DEDENT>description.append(line)<EOL><DEDENT><DEDENT>return "<STR_LIT>".join(description)<EOL>
|
Returns the Package long description.
:return: Package long description.
:rtype: unicode
|
f8769:m0
|
def predict(self, fitted):
|
if fitted.shape[<NUM_LIT:0>] != len(self.modalities):<EOL><INDENT>raise ValueError("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT>return fitted.idxmin()<EOL>
|
Assign the most likely modality given the fitted data
Parameters
----------
fitted : pandas.DataFrame or pandas.Series
Either a (n_modalities, features) DatFrame or (n_modalities,)
Series, either of which will return the best modality for each
feature.
|
f8771:c0:m2
|
def __init__(self, alphas, betas, ylabel='<STR_LIT>'):
|
if not isinstance(alphas, Iterable) and not isinstance(betas,<EOL>Iterable):<EOL><INDENT>alphas = [alphas]<EOL>betas = [betas]<EOL><DEDENT>self.ylabel = ylabel<EOL>self.alphas = np.array(alphas) if isinstance(alphas, Iterable)else np.ones(len(betas)) * alphas<EOL>self.betas = np.array(betas) if isinstance(betas, Iterable)else np.ones(len(alphas)) * betas<EOL>self.rvs = [stats.beta(a, b) for a, b in<EOL>zip(self.alphas, self.betas)]<EOL>self.scores = np.ones(self.alphas.shape).astype(float)<EOL>self.prob_parameters = self.scores/self.scores.sum()<EOL>
|
Model a family of beta distributions
Parameters
----------
alphas : float or list-like
List of values for the alpha parameter of the Beta distribution. If
this is a single value (not a list), it will be assumed that this
value is constant, and will be propagated through to have as many
values as the "betas" parameter
betas : float or list-like
List of values for the alpha parameter of the Beta distribution. If
this is a single value (not a list), it will be assumed that this
value is constant, and will be propagated through to have as many
values as the "alphas" parameter
ylabel : str, optional
Name of the value you're estimating. Originally developed for
alternative splicing "percent spliced in"/"Psi" scores, the default
is the Greek letter Psi
|
f8773:c0:m0
|
def __eq__(self, other):
|
return np.all(self.alphas == other.alphas)and np.all(self.betas == other.betas)and np.all(self.prob_parameters == other.prob_parameters)<EOL>
|
Test equality with other model
|
f8773:c0:m1
|
def __ne__(self, other):
|
return not self.__eq__(other)<EOL>
|
Test not equality with other model
|
f8773:c0:m2
|
def logliks(self, x):
|
x = x.copy()<EOL>x[x == <NUM_LIT:0>] = VERY_SMALL_NUMBER<EOL>x[x == <NUM_LIT:1>] = <NUM_LIT:1> - VERY_SMALL_NUMBER<EOL>return np.array([np.log(prob) + rv.logpdf(x[np.isfinite(x)]).sum()<EOL>for prob, rv in<EOL>zip(self.prob_parameters, self.rvs)])<EOL>
|
Calculate log-likelihood of a feature x for each model
Converts all values that are exactly 1 or exactly 0 to 0.999 and 0.001
because they are out of range of the beta distribution.
Parameters
----------
x : numpy.array-like
A single vector to estimate the log-likelihood of the models on
Returns
-------
logliks : numpy.array
Log-likelihood of these data in each member of the model's family
|
f8773:c0:m3
|
def logsumexp_logliks(self, x):
|
return logsumexp(self.logliks(x))<EOL>
|
Calculate how well this model fits these data
Parameters
----------
x : numpy.array-like
A single vector to estimate the log-likelihood of the models on
Returns
-------
logsumexp_logliks : float
Total log-likelihood of this model given this data
|
f8773:c0:m5
|
@staticmethod<EOL><INDENT>def nice_number_string(number, decimal_places=<NUM_LIT:2>):<DEDENT>
|
if number == np.round(number):<EOL><INDENT>return str(int(number))<EOL><DEDENT>elif number < <NUM_LIT:1> and number > <NUM_LIT:0>:<EOL><INDENT>inverse = <NUM_LIT:1> / number<EOL>if int(inverse) == np.round(inverse):<EOL><INDENT>return r'<STR_LIT>'.format(int(inverse))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>template = '<STR_LIT>'.format(decimal_places)<EOL>return template.format(number)<EOL><DEDENT>
|
Convert floats to either integers or a nice looking fraction
|
f8773:c0:m6
|
def violinplot(self, n=<NUM_LIT:1000>, **kwargs):
|
kwargs.setdefault('<STR_LIT>', '<STR_LIT>')<EOL>dfs = []<EOL>for rv in self.rvs:<EOL><INDENT>psi = rv.rvs(n)<EOL>df = pd.Series(psi, name=self.ylabel).to_frame()<EOL>alpha, beta = rv.args<EOL>alpha = self.nice_number_string(alpha, decimal_places=<NUM_LIT:2>)<EOL>beta = self.nice_number_string(beta, decimal_places=<NUM_LIT:2>)<EOL>df['<STR_LIT>'] = '<STR_LIT>'.format(<EOL>alpha, beta)<EOL>dfs.append(df)<EOL><DEDENT>data = pd.concat(dfs)<EOL>if '<STR_LIT>' not in kwargs:<EOL><INDENT>fig, ax = plt.subplots(figsize=(len(self.alphas)*<NUM_LIT>, <NUM_LIT:4>))<EOL><DEDENT>else:<EOL><INDENT>ax = kwargs.pop('<STR_LIT>')<EOL><DEDENT>ax = violinplot(x='<STR_LIT>', y=self.ylabel, data=data,<EOL>ax=ax, **kwargs)<EOL>sns.despine(ax=ax)<EOL>return ax<EOL>
|
Plot violins of each distribution in the model family
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
ax : matplotlib.Axes object
Axes object with violins plotted
|
f8773:c0:m7
|
def __init__(self, one_parameter_models=ONE_PARAMETER_MODELS,<EOL>two_parameter_models=TWO_PARAMETER_MODELS,<EOL>logbf_thresh=<NUM_LIT:10>):
|
self.logbf_thresh = logbf_thresh<EOL>self.one_param_models = {k: ModalityModel(**v)<EOL>for k, v in one_parameter_models.items()}<EOL>self.two_param_models = {k: ModalityModel(**v)<EOL>for k, v in two_parameter_models.items()}<EOL>self.models = self.one_param_models.copy()<EOL>self.models.update(self.two_param_models)<EOL>
|
Initialize an object with models to estimate splicing modality
Parameters
----------
step : float
Distance between parameter values
vmax : float
Maximum parameter value
logbf_thresh : float
Minimum threshold at which the bayes factor difference is defined
to be significant
|
f8779:c0:m0
|
def _single_feature_logliks_one_step(self, feature, models):
|
x_non_na = feature[~feature.isnull()]<EOL>if x_non_na.empty:<EOL><INDENT>return pd.DataFrame()<EOL><DEDENT>else:<EOL><INDENT>dfs = []<EOL>for name, model in models.items():<EOL><INDENT>df = model.single_feature_logliks(feature)<EOL>df['<STR_LIT>'] = name<EOL>dfs.append(df)<EOL><DEDENT>return pd.concat(dfs, ignore_index=True)<EOL><DEDENT>
|
Get log-likelihood of models at each parameterization for given data
Parameters
----------
feature : pandas.Series
Percent-based values of a single feature. May contain NAs, but only
non-NA values are used.
Returns
-------
logliks : pandas.DataFrame
|
f8779:c0:m1
|
@staticmethod<EOL><INDENT>def assert_non_negative(x):<DEDENT>
|
assert np.all(x[np.isfinite(x)] >= <NUM_LIT:0>)<EOL>
|
Ensure all values are greater than zero
Parameters
----------
x : array_like
A numpy array
Raises
------
AssertionError
If any value in ``x`` is less than 0
|
f8779:c0:m2
|
@staticmethod<EOL><INDENT>def assert_less_than_or_equal_1(x):<DEDENT>
|
assert np.all(x[np.isfinite(x)] <= <NUM_LIT:1>)<EOL>
|
Ensure all values are less than 1
Parameters
----------
x : array_like
A numpy array
Raises
------
AssertionError
If any value in ``x`` are greater than 1
|
f8779:c0:m3
|
def fit(self, data):
|
self.assert_less_than_or_equal_1(data.values.flat)<EOL>self.assert_non_negative(data.values.flat)<EOL>if isinstance(data, pd.DataFrame):<EOL><INDENT>log2_bayes_factors = data.apply(self.single_feature_fit)<EOL><DEDENT>elif isinstance(data, pd.Series):<EOL><INDENT>log2_bayes_factors = self.single_feature_fit(data)<EOL><DEDENT>log2_bayes_factors.name = self.score_name<EOL>return log2_bayes_factors<EOL>
|
Get the modality assignments of each splicing event in the data
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_events) dataframe of splicing events' PSI scores.
Must be psi scores which range from 0 to 1
Returns
-------
log2_bayes_factors : pandas.DataFrame
A (n_modalities, n_events) dataframe of the estimated log2
bayes factor for each splicing event, for each modality
Raises
------
AssertionError
If any value in ``data`` does not fall only between 0 and 1.
|
f8779:c0:m4
|
def predict(self, log2_bayes_factors, reset_index=False):
|
if reset_index:<EOL><INDENT>x = log2_bayes_factors.reset_index(level=<NUM_LIT:0>, drop=True)<EOL><DEDENT>else:<EOL><INDENT>x = log2_bayes_factors<EOL><DEDENT>if isinstance(x, pd.DataFrame):<EOL><INDENT>not_na = (x.notnull() > <NUM_LIT:0>).any()<EOL>not_na_columns = not_na[not_na].index<EOL>x.ix[NULL_MODEL, not_na_columns] = self.logbf_thresh<EOL><DEDENT>elif isinstance(x, pd.Series):<EOL><INDENT>x[NULL_MODEL] = self.logbf_thresh<EOL><DEDENT>return x.idxmax()<EOL>
|
Guess the most likely modality for each event
For each event that has at least one non-NA value, if no modalilites
have logsumexp'd logliks greater than the log Bayes factor threshold,
then they are assigned the 'multimodal' modality, because we cannot
reject the null hypothesis that these did not come from the uniform
distribution.
Parameters
----------
log2_bayes_factors : pandas.DataFrame
A (4, n_events) dataframe with bayes factors for the Psi~1, Psi~0,
bimodal, and middle modalities. If an event has no bayes factors
for any of those modalities, it is ignored
reset_index : bool
If True, remove the first level of the index from the dataframe.
Useful if you are using this function to apply to a grouped
dataframe where the first level is something other than the
modality, e.g. the celltype
Returns
-------
modalities : pandas.Series
A (n_events,) series with the most likely modality for each event
|
f8779:c0:m5
|
def fit_predict(self, data):
|
return self.predict(self.fit(data))<EOL>
|
Convenience function to assign modalities directly from data
|
f8779:c0:m6
|
def single_feature_logliks(self, feature):
|
self.assert_less_than_or_equal_1(feature.values)<EOL>self.assert_non_negative(feature.values)<EOL>logliks = self._single_feature_logliks_one_step(<EOL>feature, self.one_param_models)<EOL>logsumexps = self.logliks_to_logsumexp(logliks)<EOL>if (logsumexps <= self.logbf_thresh).all():<EOL><INDENT>logliks_two_params = self._single_feature_logliks_one_step(<EOL>feature, self.two_param_models)<EOL>logliks = pd.concat([logliks, logliks_two_params])<EOL><DEDENT>return logliks<EOL>
|
Calculate log-likelihoods of each modality's parameterization
Used for plotting the estimates of a single feature
Parameters
----------
featre : pandas.Series
A single feature's values. All values must range from 0 to 1.
Returns
-------
logliks : pandas.DataFrame
The log-likelihood the data, for each model, for each
parameterization
Raises
------
AssertionError
If any value in ``x`` does not fall only between 0 and 1.
|
f8779:c0:m7
|
def single_feature_fit(self, feature):
|
if np.isfinite(feature).sum() == <NUM_LIT:0>:<EOL><INDENT>series = pd.Series(index=MODALITY_ORDER)<EOL><DEDENT>else:<EOL><INDENT>logbf_one_param = pd.Series(<EOL>{k: v.logsumexp_logliks(feature) for<EOL>k, v in self.one_param_models.items()})<EOL>if (logbf_one_param <= self.logbf_thresh).all():<EOL><INDENT>logbf_two_param = pd.Series(<EOL>{k: v.logsumexp_logliks(feature)<EOL>for k, v in self.two_param_models.items()})<EOL>series = pd.concat([logbf_one_param, logbf_two_param])<EOL>series[NULL_MODEL] = self.logbf_thresh<EOL><DEDENT>else:<EOL><INDENT>series = logbf_one_param<EOL><DEDENT><DEDENT>series.index.name = '<STR_LIT>'<EOL>series.name = self.score_name<EOL>return series<EOL>
|
Get the log2 bayes factor of the fit for each modality
|
f8779:c0:m9
|
def violinplot(self, n=<NUM_LIT:1000>, figsize=None, **kwargs):
|
if figsize is None:<EOL><INDENT>nrows = len(self.models)<EOL>width = max(len(m.rvs) for name, m in self.models.items())*<NUM_LIT><EOL>height = nrows*<NUM_LIT><EOL>figsize = width, height<EOL><DEDENT>fig, axes = plt.subplots(nrows=nrows, figsize=figsize)<EOL>for ax, model_name in zip(axes, MODALITY_ORDER):<EOL><INDENT>try:<EOL><INDENT>model = self.models[model_name]<EOL>cmap = MODALITY_TO_CMAP[model_name]<EOL>palette = cmap(np.linspace(<NUM_LIT:0>, <NUM_LIT:1>, len(model.rvs)))<EOL>model.violinplot(n=n, ax=ax, palette=palette, **kwargs)<EOL>ax.set(title=model_name, xlabel='<STR_LIT>')<EOL><DEDENT>except KeyError:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>fig.tight_layout()<EOL>
|
r"""Visualize all modality family members with parameters
Use violinplots to visualize distributions of modality family members
Parameters
----------
n : int
Number of random variables to generate
kwargs : dict or keywords
Any keyword arguments to seaborn.violinplot
Returns
-------
fig : matplotlib.Figure object
Figure object with violins plotted
|
f8779:c0:m11
|
def violinplot(x=None, y=None, data=None, bw=<NUM_LIT>, scale='<STR_LIT:width>',<EOL>inner=None, ax=None, **kwargs):
|
if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>sns.violinplot(x, y, data=data, bw=bw, scale=scale, inner=inner, ax=ax,<EOL>**kwargs)<EOL>ax.set(ylim=(<NUM_LIT:0>, <NUM_LIT:1>), yticks=(<NUM_LIT:0>, <NUM_LIT:0.5>, <NUM_LIT:1>))<EOL>return ax<EOL>
|
Wrapper around Seaborn's Violinplot specifically for [0, 1] ranged data
What's different:
- bw = 0.2: Sets bandwidth to be small and the same between datasets
- scale = 'width': Sets the width of all violinplots to be the same
- inner = None: Don't plot a boxplot or points inside the violinplot
|
f8780:m0
|
def bar(self, counts, phenotype_to_color=None, ax=None, percentages=True):
|
if percentages:<EOL><INDENT>counts = <NUM_LIT:100> * (counts.T / counts.T.sum()).T<EOL><DEDENT>if ax is None:<EOL><INDENT>ax = plt.gca()<EOL><DEDENT>full_width = <NUM_LIT><EOL>width = full_width / counts.shape[<NUM_LIT:0>]<EOL>for i, (group, series) in enumerate(counts.iterrows()):<EOL><INDENT>left = np.arange(len(self.modality_order)) + i * width<EOL>height = [series[i] if i in series else <NUM_LIT:0><EOL>for i in self.modality_order]<EOL>color = phenotype_to_color[group]<EOL>ax.bar(left, height, width=width, color=color, label=group,<EOL>linewidth=<NUM_LIT>, edgecolor='<STR_LIT:k>')<EOL><DEDENT>ylabel = '<STR_LIT>' if percentages else '<STR_LIT>'<EOL>ax.set_ylabel(ylabel)<EOL>ax.set_xticks(np.arange(len(self.modality_order)) + full_width / <NUM_LIT:2>)<EOL>ax.set_xticklabels(self.modality_order)<EOL>ax.set_xlabel('<STR_LIT>')<EOL>ax.set_xlim(<NUM_LIT:0>, len(self.modality_order))<EOL>ax.legend(loc='<STR_LIT>')<EOL>ax.grid(axis='<STR_LIT:y>', linestyle='<STR_LIT:->', linewidth=<NUM_LIT:0.5>)<EOL>sns.despine()<EOL>
|
Draw barplots grouped by modality of modality percentage per group
Parameters
----------
Returns
-------
Raises
------
|
f8780:c1:m0
|
def event_estimation(self, event, logliks, logsumexps, renamed='<STR_LIT>'):
|
plotter = _ModelLoglikPlotter()<EOL>plotter.plot(event, logliks, logsumexps, self.modality_to_color,<EOL>renamed=renamed)<EOL>return plotter<EOL>
|
Show the values underlying bayesian modality estimations of an event
Parameters
----------
Returns
-------
Raises
------
|
f8780:c1:m1
|
def plot_best_worst_fits(assignments_df, data, modality_col='<STR_LIT>',<EOL>score='<STR_LIT>'):
|
ncols = <NUM_LIT:2><EOL>nrows = len(assignments_df.groupby(modality_col).groups.keys())<EOL>fig, axes = plt.subplots(nrows=nrows, ncols=ncols,<EOL>figsize=(nrows*<NUM_LIT:4>, ncols*<NUM_LIT:6>))<EOL>axes_iter = axes.flat<EOL>fits = '<STR_LIT>', '<STR_LIT>'<EOL>for modality, df in assignments_df.groupby(modality_col):<EOL><INDENT>df = df.sort_values(score)<EOL>color = MODALITY_TO_COLOR[modality]<EOL>for fit in fits:<EOL><INDENT>if fit == '<STR_LIT>':<EOL><INDENT>ids = df['<STR_LIT>'][-<NUM_LIT:10>:]<EOL><DEDENT>else:<EOL><INDENT>ids = df['<STR_LIT>'][:<NUM_LIT:10>]<EOL><DEDENT>fit_psi = data[ids]<EOL>tidy_fit_psi = fit_psi.stack().reset_index()<EOL>tidy_fit_psi = tidy_fit_psi.rename(columns={'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>':<EOL>'<STR_LIT>',<EOL><NUM_LIT:0>: '<STR_LIT>'})<EOL>if tidy_fit_psi.empty:<EOL><INDENT>continue<EOL><DEDENT>ax = six.next(axes_iter)<EOL>violinplot(x='<STR_LIT>', y='<STR_LIT>', data=tidy_fit_psi,<EOL>color=color, ax=ax)<EOL>ax.set(title='<STR_LIT>'.format(fit, score, modality), xticks=[])<EOL><DEDENT><DEDENT>sns.despine()<EOL>fig.tight_layout()<EOL>
|
Violinplots of the highest and lowest scoring of each modality
|
f8781:m2
|
def bin_range_strings(bins, fmt='<STR_LIT>'):
|
return [('<STR_LIT:{>' + fmt + '<STR_LIT>' + fmt + '<STR_LIT:}>').format(i, j)<EOL>for i, j in zip(bins, bins[<NUM_LIT:1>:])]<EOL>
|
Given a list of bins, make a list of strings of those bin ranges
Parameters
----------
bins : list_like
List of anything, usually values of bin edges
Returns
-------
bin_ranges : list
List of bin ranges
>>> bin_range_strings((0, 0.5, 1))
['0-0.5', '0.5-1']
|
f8782:m0
|
def binify(data, bins):
|
if bins is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if isinstance(data, pd.DataFrame):<EOL><INDENT>binned = data.apply(lambda x: pd.Series(np.histogram(x, bins=bins,<EOL>range=(<NUM_LIT:0>, <NUM_LIT:1>))[<NUM_LIT:0>]))<EOL><DEDENT>elif isinstance(data, pd.Series):<EOL><INDENT>binned = pd.Series(np.histogram(data, bins=bins, range=(<NUM_LIT:0>, <NUM_LIT:1>))[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>binned.index = bin_range_strings(bins)<EOL>binned = binned / binned.sum().astype(float)<EOL>return binned<EOL>
|
Makes a histogram of each column the provided binsize
Parameters
----------
data : pandas.DataFrame
A samples x features dataframe. Each feature (column) will be binned
into the provided bins
bins : iterable
Bins you would like to use for this data. Must include the final bin
value, e.g. (0, 0.5, 1) for the two bins (0, 0.5) and (0.5, 1).
nbins = len(bins) - 1
Returns
-------
binned : pandas.DataFrame
An nbins x features DataFrame of each column binned across rows
|
f8782:m2
|
def kld(p, q):
|
try:<EOL><INDENT>_check_prob_dist(p)<EOL>_check_prob_dist(q)<EOL><DEDENT>except ValueError:<EOL><INDENT>return np.nan<EOL><DEDENT>p = p.replace(<NUM_LIT:0>, np.nan)<EOL>q = q.replace(<NUM_LIT:0>, np.nan)<EOL>return (np.log2(p / q) * p).sum(axis=<NUM_LIT:0>)<EOL>
|
Kullback-Leiber divergence of two probability distributions pandas
dataframes, p and q
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
q : pandas.DataFrame
An nbins x features DataFrame, or (nbins,) Series
Returns
-------
kld : pandas.Series
Kullback-Lieber divergence of the common columns between the
dataframe. E.g. between 1st column in p and 1st column in q, and 2nd
column in p and 2nd column in q.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
Notes
-----
The input to this function must be probability distributions, not raw
values. Otherwise, the output makes no sense.
|
f8782:m3
|
def jsd(p, q):
|
try:<EOL><INDENT>_check_prob_dist(p)<EOL>_check_prob_dist(q)<EOL><DEDENT>except ValueError:<EOL><INDENT>return np.nan<EOL><DEDENT>weight = <NUM_LIT:0.5><EOL>m = weight * (p + q)<EOL>result = weight * kld(p, m) + (<NUM_LIT:1> - weight) * kld(q, m)<EOL>return result<EOL>
|
Finds the per-column JSD between dataframes p and q
Jensen-Shannon divergence of two probability distrubutions pandas
dataframes, p and q. These distributions are usually created by running
binify() on the dataframe.
Parameters
----------
p : pandas.DataFrame
An nbins x features DataFrame.
q : pandas.DataFrame
An nbins x features DataFrame.
Returns
-------
jsd : pandas.Series
Jensen-Shannon divergence of each column with the same names between
p and q
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
|
f8782:m4
|
def entropy(binned, base=<NUM_LIT:2>):
|
try:<EOL><INDENT>_check_prob_dist(binned)<EOL><DEDENT>except ValueError:<EOL><INDENT>np.nan<EOL><DEDENT>return -((np.log(binned) / np.log(base)) * binned).sum(axis=<NUM_LIT:0>)<EOL>
|
Find the entropy of each column of a dataframe
Parameters
----------
binned : pandas.DataFrame
A nbins x features DataFrame of probability distributions, where each
column sums to 1
base : numeric
The log-base of the entropy. Default is 2, so the resulting entropy
is in bits.
Returns
-------
entropy : pandas.Seires
Entropy values for each column of the dataframe.
Raises
------
ValueError
If the data provided is not a probability distribution, i.e. it has
negative values or its columns do not sum to 1, raise ValueError
|
f8782:m5
|
def binify_and_jsd(df1, df2, bins, pair=None):
|
binned1 = binify(df1, bins=bins).dropna(how='<STR_LIT:all>', axis=<NUM_LIT:1>)<EOL>binned2 = binify(df2, bins=bins).dropna(how='<STR_LIT:all>', axis=<NUM_LIT:1>)<EOL>binned1, binned2 = binned1.align(binned2, axis=<NUM_LIT:1>, join='<STR_LIT>')<EOL>series = np.sqrt(jsd(binned1, binned2))<EOL>series.name = pair<EOL>return series<EOL>
|
Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2
|
f8782:m6
|
def cross_phenotype_jsd(data, groupby, bins, n_iter=<NUM_LIT:100>):
|
grouped = data.groupby(groupby)<EOL>jsds = []<EOL>seen = set([])<EOL>for phenotype1, df1 in grouped:<EOL><INDENT>for phenotype2, df2 in grouped:<EOL><INDENT>pair = tuple(sorted([phenotype1, phenotype2]))<EOL>if pair in seen:<EOL><INDENT>continue<EOL><DEDENT>seen.add(pair)<EOL>if phenotype1 == phenotype2:<EOL><INDENT>seriess = []<EOL>bs = cross_validation.Bootstrap(df1.shape[<NUM_LIT:0>], n_iter=n_iter,<EOL>train_size=<NUM_LIT:0.5>)<EOL>for i, (ind1, ind2) in enumerate(bs):<EOL><INDENT>df1_subset = df1.iloc[ind1, :]<EOL>df2_subset = df2.iloc[ind2, :]<EOL>seriess.append(<EOL>binify_and_jsd(df1_subset, df2_subset, None, bins))<EOL><DEDENT>series = pd.concat(seriess, axis=<NUM_LIT:1>, names=None).mean(axis=<NUM_LIT:1>)<EOL>series.name = pair<EOL>jsds.append(series)<EOL><DEDENT>else:<EOL><INDENT>series = binify_and_jsd(df1, df2, pair, bins)<EOL>jsds.append(series)<EOL><DEDENT><DEDENT><DEDENT>return pd.concat(jsds, axis=<NUM_LIT:1>)<EOL>
|
Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
|
f8782:m7
|
def jsd_df_to_2d(jsd_df):
|
jsd_2d = jsd_df.mean().reset_index()<EOL>jsd_2d = jsd_2d.rename(<EOL>columns={'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': '<STR_LIT>', <NUM_LIT:0>: '<STR_LIT>'})<EOL>jsd_2d = jsd_2d.pivot(index='<STR_LIT>', columns='<STR_LIT>',<EOL>values='<STR_LIT>')<EOL>return jsd_2d + np.tril(jsd_2d.T, -<NUM_LIT:1>)<EOL>
|
Transform a tall JSD dataframe to a square matrix of mean JSDs
Parameters
----------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
Returns
-------
jsd_2d : pandas.DataFrame
A (n_phenotypes, n_phenotypes) symmetric dataframe of the mean JSD
between and within phenotypes
|
f8782:m8
|
def build_parser():
|
import argparse<EOL>description = "<STR_LIT>"<EOL>parser = argparse.ArgumentParser(description=description)<EOL>parser.add_argument('<STR_LIT>', metavar='<STR_LIT>', action='<STR_LIT:store>',<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>', action='<STR_LIT:store_true>',<EOL>default=False, help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>', action='<STR_LIT>',<EOL>default=True, help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>',<EOL>action='<STR_LIT>', default=True,<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>',<EOL>action='<STR_LIT:store_true>', default=False,<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>',<EOL>action='<STR_LIT:store_true>', default=False,<EOL>help='<STR_LIT>')<EOL>if sys.version_info >= (<NUM_LIT:2>, <NUM_LIT:6>):<EOL><INDENT>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>',<EOL>action='<STR_LIT:store_true>', default=False,<EOL>help='<STR_LIT>')<EOL><DEDENT>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>',<EOL>action='<STR_LIT:store_true>', default=False,<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>',<EOL>action='<STR_LIT:store_true>', default=False,<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT:-c>', dest='<STR_LIT>',<EOL>action='<STR_LIT>', default=True,<EOL>help="<STR_LIT>")<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>',<EOL>action='<STR_LIT:store_true>', default=False,<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', dest='<STR_LIT>',<EOL>action='<STR_LIT:store>', default=<NUM_LIT:10>, type=int,<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', metavar='<STR_LIT>', dest='<STR_LIT>',<EOL>action='<STR_LIT>', default=[], type=str,<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', '<STR_LIT>', metavar='<STR_LIT>',<EOL>dest='<STR_LIT>', action='<STR_LIT>', default=[],<EOL>type=str,<EOL>help='<STR_LIT>')<EOL>parser.add_argument('<STR_LIT>', metavar='<STR_LIT>', nargs='<STR_LIT:?>',<EOL>default=getcwd(), help='<STR_LIT>')<EOL>return parser<EOL>
|
Returns an argparse.ArgumentParser instance to parse the command line
arguments for lk
|
f8785:m0
|
def get_file_contents(path, binary=False):
|
<EOL>f = open(path, '<STR_LIT:r>')<EOL>file_contents = f.read()<EOL>f.close()<EOL>if not binary and file_contents.find('<STR_LIT>') >= <NUM_LIT:0>:<EOL><INDENT>raise IOError('<STR_LIT>')<EOL><DEDENT>return file_contents<EOL>
|
Return the contents of the text file at path.
If it is a binary file,raise an IOError
|
f8785:m1
|
def main():
|
parser = build_parser()<EOL>args = parser.parse_args()<EOL>flags = re.LOCALE<EOL>if args.dot_all:<EOL><INDENT>flags |= re.DOTALL<EOL><DEDENT>if args.ignorecase:<EOL><INDENT>flags |= re.IGNORECASE<EOL><DEDENT>if args.str:<EOL><INDENT>flags |= re.UNICODE<EOL><DEDENT>if args.multiline:<EOL><INDENT>flags |= re.MULTILINE<EOL><DEDENT>exclude_path_flags = re.UNICODE | re.LOCALE<EOL>exclude_path_regexes = [ re.compile(pattern, exclude_path_flags)<EOL>for pattern in args.exclude_path_patterns ]<EOL>pattern = re.escape(args.pattern) if args.escape else args.pattern<EOL>try:<EOL><INDENT>search_manager = SearchManager(regex=re.compile(pattern, flags),<EOL>number_processes=args.number_processes,<EOL>search_hidden=args.search_hidden,<EOL>follow_links=args.follow_links,<EOL>search_binary=args.search_binary,<EOL>use_ansi_colors=args.use_ansi_colors,<EOL>print_stats=args.print_stats,<EOL>exclude_path_regexes=exclude_path_regexes,<EOL>command_strings=args.command_strings)<EOL>search_manager.enqueue_directory(args.directory)<EOL>search_manager.process_queue()<EOL><DEDENT>except (KeyboardInterruptError, KeyboardInterrupt):<EOL><INDENT>sys.stdout.write('<STR_LIT:\n>')<EOL>exit(<NUM_LIT:1>)<EOL><DEDENT>
|
if lk.py is run as a script, this function will run
|
f8785:m2
|
def enqueue_directory(self, directory):
|
exclude_path_regexes = self.exclude_path_regexes[:]<EOL>if not self.search_hidden:<EOL><INDENT>exclude_path_regexes.append(self.hidden_file_regex)<EOL><DEDENT>else:<EOL><INDENT>exclude_path_regexes.remove(self.hidden_file_regex)<EOL><DEDENT>self.mark = datetime.datetime.now()<EOL>def is_path_excluded(path):<EOL><INDENT>"""<STR_LIT>"""<EOL>for exclude_path_regex in exclude_path_regexes:<EOL><INDENT>for found in exclude_path_regex.finditer(path):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL><DEDENT>def search_walk():<EOL><INDENT>try:<EOL><INDENT>walk_generator = walk(directory, followlinks=self.follow_links)<EOL><DEDENT>except TypeError:<EOL><INDENT>walk_generator = walk(directory)<EOL><DEDENT>for packed in walk_generator:<EOL><INDENT>directory_path, directory_names, file_names = packed<EOL>directory_names[:] = list(filter(is_path_excluded, directory_names))<EOL>file_names[:] = list(filter(is_path_excluded, file_names))<EOL>yield directory_path, directory_names, file_names<EOL><DEDENT><DEDENT>writer = ColorWriter(sys.stdout, self.use_ansi_colors)<EOL>def print_directory_result(directory_result):<EOL><INDENT>writer.print_result(directory_result)<EOL>for command_string in self.command_strings:<EOL><INDENT>if command_string.find('<STR_LIT:%s>') < <NUM_LIT:0>:<EOL><INDENT>command_string += '<STR_LIT>'<EOL><DEDENT>for file_name, line_result in directory_result.iter_line_results_items():<EOL><INDENT>file_path = path.join(directory_result.directory_path, file_name)<EOL>Popen(command_string % file_path, shell=True)<EOL>break<EOL><DEDENT><DEDENT><DEDENT>for directory_path, directory_names, file_names in search_walk():<EOL><INDENT>process = Process(target=self.search_worker,<EOL>args=(self.regex,<EOL>directory_path,<EOL>file_names,<EOL>self.search_binary,<EOL>print_directory_result))<EOL>self.queue.append(process)<EOL><DEDENT>
|
add a search of the directory to the queue
|
f8785:c0:m1
|
def search_worker(self, regex, directory_path, names, binary=False,<EOL>callback=None):
|
try:<EOL><INDENT>result = DirectoryResult(directory_path)<EOL>def find_matches(name):<EOL><INDENT>full_path = path.join(directory_path, name)<EOL>file_contents = get_file_contents(full_path, binary)<EOL>start = <NUM_LIT:0><EOL>match = regex.search(file_contents, start)<EOL>while match:<EOL><INDENT>result.put(name, file_contents, match)<EOL>start = match.end()<EOL>match = regex.search(file_contents, start)<EOL><DEDENT><DEDENT>for name in names:<EOL><INDENT>try:<EOL><INDENT>find_matches(name)<EOL><DEDENT>except IOError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if callback:<EOL><INDENT>callback(result)<EOL><DEDENT><DEDENT>except KeyboardInterrupt as e:<EOL><INDENT>exit(<NUM_LIT:1>)<EOL><DEDENT>
|
build a DirectoryResult for the given regex, directory path, and file names
|
f8785:c0:m2
|
def print_result(self, directory_result):
|
for file_name, line_results_dict in directory_result.iter_line_results_items():<EOL><INDENT>full_path = path.join(directory_result.directory_path, file_name)<EOL>self.write(full_path, '<STR_LIT>')<EOL>self.write('<STR_LIT:\n>')<EOL>for line_number, line_results in sorted(line_results_dict.items()):<EOL><INDENT>self.write('<STR_LIT>' % (line_results[<NUM_LIT:0>].line_number))<EOL>out = list(line_results[<NUM_LIT:0>].left_of_group + line_results[<NUM_LIT:0>].group + line_results[<NUM_LIT:0>].right_of_group)<EOL>offset = <NUM_LIT:0><EOL>for line_result in line_results:<EOL><INDENT>group_length = len(line_result.group)<EOL>out.insert(offset+line_result.left_offset-<NUM_LIT:1>, self.colors['<STR_LIT>'])<EOL>out.insert(offset+line_result.left_offset+group_length, self.colors['<STR_LIT:end>'])<EOL>offset += group_length + <NUM_LIT:1><EOL><DEDENT>self.write('<STR_LIT>'.join(out)+'<STR_LIT:\n>')<EOL><DEDENT>self.write('<STR_LIT:\n>')<EOL><DEDENT>
|
Print out the contents of the directory result, using ANSI color codes if
supported
|
f8785:c1:m4
|
def get(key, default=None):
|
try:<EOL><INDENT>return ast.literal_eval(os.environ.get(key.upper(), default))<EOL><DEDENT>except (ValueError, SyntaxError):<EOL><INDENT>return os.environ.get(key.upper(), default)<EOL><DEDENT>
|
Searches os.environ. If a key is found try evaluating its type else;
return the string.
returns: k->value (type as defined by ast.literal_eval)
|
f8790:m0
|
def save(filepath=None, **kwargs):
|
if filepath is None:<EOL><INDENT>filepath = os.path.join('<STR_LIT>')<EOL><DEDENT>with open(filepath, '<STR_LIT:wb>') as file_handle:<EOL><INDENT>file_handle.writelines(<EOL>'<STR_LIT>'.format(key.upper(), val)<EOL>for key, val in kwargs.items()<EOL>)<EOL><DEDENT>
|
Saves a list of keyword arguments as environment variables to a file.
If no filepath given will default to the default `.env` file.
|
f8790:m1
|
def load(filepath=None):
|
if filepath and os.path.exists(filepath):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>if not os.path.exists('<STR_LIT>'):<EOL><INDENT>return False<EOL><DEDENT>filepath = os.path.join('<STR_LIT>')<EOL><DEDENT>for key, value in _get_line_(filepath):<EOL><INDENT>os.environ.setdefault(key, str(value))<EOL><DEDENT>return True<EOL>
|
Reads a .env file into os.environ.
For a set filepath, open the file and read contents into os.environ.
If filepath is not set then look in current dir for a .env file.
|
f8790:m2
|
def _get_line_(filepath):
|
for line in open(filepath):<EOL><INDENT>line = line.strip()<EOL>if line.startswith('<STR_LIT:#>') or '<STR_LIT:=>' not in line:<EOL><INDENT>continue<EOL><DEDENT>key, value = line.split('<STR_LIT:=>', <NUM_LIT:1>)<EOL>key = key.strip().upper()<EOL>value = value.strip()<EOL>if not (key and value):<EOL><INDENT>continue<EOL><DEDENT>try:<EOL><INDENT>value = ast.literal_eval(value)<EOL><DEDENT>except (ValueError, SyntaxError):<EOL><INDENT>pass<EOL><DEDENT>yield (key, value)<EOL><DEDENT>
|
Gets each line from the file and parse the data.
Attempt to translate the value into a python type is possible
(falls back to string).
|
f8790:m3
|
def get_config_path():
|
dir_path = (os.getenv('<STR_LIT>') if os.name == "<STR_LIT>"<EOL>else os.path.expanduser('<STR_LIT>'))<EOL>return os.path.join(dir_path, '<STR_LIT>')<EOL>
|
Put together the default configuration path based on OS.
|
f8792:m0
|
def read_config():
|
config = configparser.RawConfigParser(allow_no_value=True)<EOL>config.read(get_config_path())<EOL>if not config.has_section('<STR_LIT>'):<EOL><INDENT>config.add_section('<STR_LIT>')<EOL>config.set('<STR_LIT>', '<STR_LIT:key>', '<STR_LIT>')<EOL>config.set('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>write_config(config)<EOL><DEDENT>return config<EOL>
|
Read configuration file
|
f8792:m1
|
def write_config(config):
|
with open(get_config_path(), '<STR_LIT:w>') as configfile:<EOL><INDENT>config.write(configfile)<EOL><DEDENT>
|
Write configuration file
|
f8792:m2
|
def print_table(document, *columns):
|
headers = []<EOL>for _, header in columns:<EOL><INDENT>headers.append(header)<EOL><DEDENT>table = []<EOL>for element in document:<EOL><INDENT>row = []<EOL>for item, _ in columns:<EOL><INDENT>if item in element:<EOL><INDENT>row.append(element[item])<EOL><DEDENT>else:<EOL><INDENT>row.append(None)<EOL><DEDENT><DEDENT>table.append(row)<EOL><DEDENT>print(tabulate.tabulate(table, headers))<EOL>
|
Print json document as table
|
f8792:m3
|
def print_trip_table(document):
|
headers = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT:Name>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>']<EOL>table = []<EOL>altnr = <NUM_LIT:0><EOL>for alternative in document:<EOL><INDENT>altnr += <NUM_LIT:1><EOL>first_trip_in_alt = True<EOL>if not isinstance(alternative['<STR_LIT>'], list):<EOL><INDENT>alternative['<STR_LIT>'] = [alternative['<STR_LIT>']]<EOL><DEDENT>for part in alternative['<STR_LIT>']:<EOL><INDENT>orig = part['<STR_LIT>']<EOL>dest = part['<STR_LIT>']<EOL>row = [<EOL>altnr if first_trip_in_alt else None,<EOL>part['<STR_LIT:name>'],<EOL>orig['<STR_LIT>'] if '<STR_LIT>' in orig else orig['<STR_LIT:time>'],<EOL>orig['<STR_LIT>'],<EOL>part['<STR_LIT>'] if '<STR_LIT>' in part else None,<EOL>dest['<STR_LIT:name>'],<EOL>dest['<STR_LIT>'],<EOL>dest['<STR_LIT>'] if '<STR_LIT>' in dest else dest['<STR_LIT:time>'],<EOL>]<EOL>table.append(row)<EOL>first_trip_in_alt = False<EOL><DEDENT><DEDENT>print(tabulate.tabulate(table, headers))<EOL>
|
Print trip table
|
f8792:m4
|
def main():
|
config = read_config()<EOL>key = config.get('<STR_LIT>', '<STR_LIT:key>')<EOL>secret = config.get('<STR_LIT>', '<STR_LIT>')<EOL>parser = argparse.ArgumentParser(<EOL>description=u'<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>nargs='<STR_LIT:?>' if key else None,<EOL>default=key,<EOL>help='<STR_LIT>')<EOL>parser.add_argument(<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>nargs='<STR_LIT:?>' if secret else None,<EOL>default=secret,<EOL>help='<STR_LIT>')<EOL>service_parser = parser.add_subparsers(<EOL>dest='<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>service_parser.add_parser(<EOL>'<STR_LIT:store>',<EOL>help='<STR_LIT>')<EOL>location_parser = service_parser.add_parser(<EOL>'<STR_LIT:location>',<EOL>help='<STR_LIT>')<EOL>location_subparser = location_parser.add_subparsers(<EOL>help='<STR_LIT>',<EOL>dest='<STR_LIT>')<EOL>location_name_parser = location_subparser.add_parser(<EOL>'<STR_LIT:name>',<EOL>help='<STR_LIT>')<EOL>location_name_parser.add_argument(<EOL>'<STR_LIT:name>',<EOL>help='<STR_LIT>')<EOL>location_subparser.add_parser(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>location_nearbystops_parser = location_subparser.add_parser(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>location_nearbystops_parser.add_argument(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>location_nearbystops_parser.add_argument(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>location_nearbystops_parser = location_subparser.add_parser(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>location_nearbystops_parser.add_argument(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>location_nearbystops_parser.add_argument(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>arrival_parser = service_parser.add_parser(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>arrival_parser.add_argument(<EOL>'<STR_LIT:id>',<EOL>help='<STR_LIT>')<EOL>arrival_parser.add_argument(<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>nargs='<STR_LIT:?>',<EOL>help='<STR_LIT>')<EOL>arrival_parser.add_argument(<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>nargs='<STR_LIT:?>',<EOL>help='<STR_LIT>')<EOL>arrival_parser.add_argument(<EOL>'<STR_LIT>',<EOL>default=None,<EOL>help='<STR_LIT>')<EOL>departure_parser = service_parser.add_parser(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>departure_parser.add_argument(<EOL>'<STR_LIT:id>',<EOL>help='<STR_LIT>')<EOL>departure_parser.add_argument(<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>nargs='<STR_LIT:?>',<EOL>help='<STR_LIT>')<EOL>departure_parser.add_argument(<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>nargs='<STR_LIT:?>',<EOL>help='<STR_LIT>')<EOL>departure_parser.add_argument(<EOL>'<STR_LIT>',<EOL>default=None,<EOL>help='<STR_LIT>')<EOL>departure_parser = service_parser.add_parser(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>departure_parser.add_argument(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>departure_parser.add_argument(<EOL>'<STR_LIT>',<EOL>help='<STR_LIT>')<EOL>departure_parser.add_argument(<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>nargs='<STR_LIT:?>',<EOL>help='<STR_LIT>')<EOL>departure_parser.add_argument(<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>nargs='<STR_LIT:?>',<EOL>help='<STR_LIT>')<EOL>args = parser.parse_args()<EOL>planner = JournyPlanner(<EOL>key=args.key,<EOL>secret=args.secret)<EOL>def name_to_id(attribute):<EOL><INDENT>"""<STR_LIT>"""<EOL>if not hasattr(args, attribute):<EOL><INDENT>return<EOL><DEDENT>value = getattr(args, attribute)<EOL>if not value or value.isdigit():<EOL><INDENT>return<EOL><DEDENT>setattr(args, attribute, planner.location_name(value)[<NUM_LIT:0>]['<STR_LIT:id>'])<EOL><DEDENT>name_to_id('<STR_LIT:id>')<EOL>name_to_id('<STR_LIT>')<EOL>name_to_id('<STR_LIT>')<EOL>name_to_id('<STR_LIT>')<EOL>date = datetime.now()<EOL>if hasattr(args, '<STR_LIT:date>') and args.date:<EOL><INDENT>newdate = datetime.strptime(args.date, '<STR_LIT>')<EOL>date = date.replace(<EOL>year=newdate.year,<EOL>month=newdate.month,<EOL>day=newdate.day)<EOL><DEDENT>if hasattr(args, '<STR_LIT:time>') and args.time:<EOL><INDENT>newtime = datetime.strptime(args.time, '<STR_LIT>')<EOL>date = date.replace(<EOL>hour=newtime.hour,<EOL>minute=newtime.minute)<EOL><DEDENT>if args.service == '<STR_LIT:store>':<EOL><INDENT>config.set('<STR_LIT>', '<STR_LIT:key>', args.key)<EOL>config.set('<STR_LIT>', '<STR_LIT>', args.secret)<EOL>write_config(config)<EOL><DEDENT>if args.service == '<STR_LIT:location>':<EOL><INDENT>if args.location_method == '<STR_LIT>':<EOL><INDENT>print_table(<EOL>planner.location_allstops(),<EOL>('<STR_LIT:id>', '<STR_LIT>'),<EOL>('<STR_LIT:name>', '<STR_LIT:Name>'),<EOL>('<STR_LIT>', '<STR_LIT>'))<EOL><DEDENT>if args.location_method == '<STR_LIT:name>':<EOL><INDENT>print_table(<EOL>planner.location_name(args.name),<EOL>('<STR_LIT:id>', '<STR_LIT>'),<EOL>('<STR_LIT:name>', '<STR_LIT:Name>'))<EOL><DEDENT>if args.location_method == '<STR_LIT>':<EOL><INDENT>print_table(<EOL>planner.location_nearbystops(args.lat, args.lon),<EOL>('<STR_LIT:id>', '<STR_LIT>'),<EOL>('<STR_LIT:name>', '<STR_LIT:Name>'),<EOL>('<STR_LIT>', '<STR_LIT>'))<EOL><DEDENT>if args.location_method == '<STR_LIT>':<EOL><INDENT>print_table(<EOL>[planner.location_nearbyaddress(args.lat, args.lon)],<EOL>('<STR_LIT:name>', '<STR_LIT:Name>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'))<EOL><DEDENT><DEDENT>elif args.service == '<STR_LIT>':<EOL><INDENT>print_table(<EOL>planner.arrivalboard(<EOL>args.id,<EOL>date=date,<EOL>direction=args.direction),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT:time>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'))<EOL><DEDENT>elif args.service == '<STR_LIT>':<EOL><INDENT>print_table(<EOL>planner.departureboard(<EOL>args.id,<EOL>date=date,<EOL>direction=args.direction),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT:time>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'),<EOL>('<STR_LIT>', '<STR_LIT>'))<EOL><DEDENT>elif args.service == '<STR_LIT>':<EOL><INDENT>print_trip_table(<EOL>planner.trip(<EOL>args.originId,<EOL>args.destinationId,<EOL>date=date))<EOL><DEDENT>else:<EOL><INDENT>parser.print_help()<EOL><DEDENT>
|
Main function
|
f8792:m5
|
def _get_node(response, *ancestors):
|
document = response<EOL>for ancestor in ancestors:<EOL><INDENT>if ancestor not in document:<EOL><INDENT>return {}<EOL><DEDENT>else:<EOL><INDENT>document = document[ancestor]<EOL><DEDENT><DEDENT>return document<EOL>
|
Traverse tree to node
|
f8793:m0
|
def update_token(self):
|
headers = {<EOL>'<STR_LIT:Content-Type>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>' + base64.b64encode(<EOL>(self._key + '<STR_LIT::>' + self._secret).encode()).decode()<EOL>}<EOL>data = {'<STR_LIT>': '<STR_LIT>'}<EOL>response = requests.post(TOKEN_URL, data=data, headers=headers)<EOL>obj = json.loads(response.content.decode('<STR_LIT>'))<EOL>self._token = obj['<STR_LIT>']<EOL>self._token_expire_date = (<EOL>datetime.now() +<EOL>timedelta(minutes=self._expiery))<EOL>
|
Get token from key and secret
|
f8793:c1:m1
|
def location_allstops(self):
|
response = self._request(<EOL>'<STR_LIT>')<EOL>return _get_node(response, '<STR_LIT>', '<STR_LIT>')<EOL>
|
location.allstops
|
f8793:c1:m2
|
def location_nearbystops(self, origin_coord_lat, origin_coord_long):
|
response = self._request(<EOL>'<STR_LIT>',<EOL>originCoordLat=origin_coord_lat,<EOL>originCoordLong=origin_coord_long)<EOL>return _get_node(response, '<STR_LIT>', '<STR_LIT>')<EOL>
|
location.nearbystops
|
f8793:c1:m3
|
def location_nearbyaddress(self, origin_coord_lat, origin_coord_long):
|
response = self._request(<EOL>'<STR_LIT>',<EOL>originCoordLat=origin_coord_lat,<EOL>originCoordLong=origin_coord_long)<EOL>return _get_node(response, '<STR_LIT>', '<STR_LIT>')<EOL>
|
location.nearbyaddress
|
f8793:c1:m4
|
def location_name(self, name):
|
response = self._request(<EOL>'<STR_LIT>',<EOL>input=name)<EOL>return _get_node(response, '<STR_LIT>', '<STR_LIT>')<EOL>
|
location.name
|
f8793:c1:m5
|
def arrivalboard(self, stop_id, date=None, direction=None):
|
date = date if date else datetime.now()<EOL>request_parameters = {<EOL>'<STR_LIT:id>': stop_id,<EOL>'<STR_LIT:date>': date.strftime(DATE_FORMAT),<EOL>'<STR_LIT:time>': date.strftime(TIME_FORMAT)<EOL>}<EOL>if direction:<EOL><INDENT>request_parameters['<STR_LIT>'] = direction<EOL><DEDENT>response = self._request(<EOL>'<STR_LIT>',<EOL>**request_parameters)<EOL>return _get_node(response, '<STR_LIT>', '<STR_LIT>')<EOL>
|
arrivalBoard
|
f8793:c1:m6
|
def departureboard(self, stop_id, date=None, direction=None):
|
date = date if date else datetime.now()<EOL>request_parameters = {<EOL>'<STR_LIT:id>': stop_id,<EOL>'<STR_LIT:date>': date.strftime(DATE_FORMAT),<EOL>'<STR_LIT:time>': date.strftime(TIME_FORMAT)<EOL>}<EOL>if direction:<EOL><INDENT>request_parameters['<STR_LIT>'] = direction<EOL><DEDENT>response = self._request(<EOL>'<STR_LIT>',<EOL>**request_parameters)<EOL>return _get_node(response, '<STR_LIT>', '<STR_LIT>')<EOL>
|
departureBoard
|
f8793:c1:m7
|
def trip(self, origin_id, dest_id, date=None):
|
date = date if date else datetime.now()<EOL>response = self._request(<EOL>'<STR_LIT>',<EOL>originId=origin_id,<EOL>destId=dest_id,<EOL>date=date.strftime(DATE_FORMAT),<EOL>time=date.strftime(TIME_FORMAT))<EOL>return _get_node(response, '<STR_LIT>', '<STR_LIT>')<EOL>
|
trip
|
f8793:c1:m8
|
def _request(self, service, **parameters):
|
urlformat = "<STR_LIT>"<EOL>url = urlformat.format(<EOL>baseurl=API_BASE_URL,<EOL>service=service,<EOL>parameters="<STR_LIT:&>".join([<EOL>"<STR_LIT>".format(key, value) for key, value in parameters.items()<EOL>]))<EOL>if datetime.now() > self._token_expire_date:<EOL><INDENT>self.update_token()<EOL><DEDENT>headers = {'<STR_LIT>': '<STR_LIT>' + self._token}<EOL>res = requests.get(url, headers=headers)<EOL>if res.status_code == <NUM_LIT:200>:<EOL><INDENT>return json.loads(res.content.decode('<STR_LIT>'))<EOL><DEDENT>else:<EOL><INDENT>raise Error('<STR_LIT>' + str(res.status_code) +<EOL>str(res.content))<EOL><DEDENT>
|
request builder
|
f8793:c1:m9
|
@click.command()<EOL>@click.option('<STR_LIT>', is_flag=True, help="<STR_LIT>")<EOL>@click.option('<STR_LIT>', type=click.Choice(['<STR_LIT>', '<STR_LIT>']), help="<STR_LIT>")<EOL>@pass_wio<EOL>def cli(wio, get_debug, debug):
|
if debug:<EOL><INDENT>if debug == "<STR_LIT>":<EOL><INDENT>cmd = "<STR_LIT>"<EOL><DEDENT>elif debug == "<STR_LIT>":<EOL><INDENT>cmd = "<STR_LIT>"<EOL><DEDENT>if not cmd:<EOL><INDENT>return debug_error()<EOL><DEDENT>result = udp.send(cmd)<EOL>if not result:<EOL><INDENT>return debug_error()<EOL><DEDENT>click.echo("<STR_LIT>")<EOL><DEDENT>elif get_debug:<EOL><INDENT>try:<EOL><INDENT>result = udp.udp_debug()<EOL><DEDENT>except Exception as e:<EOL><INDENT>return get_debug_error()<EOL><DEDENT>if result == "<STR_LIT:1>":<EOL><INDENT>click.echo("<STR_LIT>")<EOL><DEDENT>elif result == '<STR_LIT:0>':<EOL><INDENT>click.echo("<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>return get_debug_error()<EOL><DEDENT><DEDENT>else:<EOL><INDENT>click.echo("<STR_LIT>")<EOL>click.echo("<STR_LIT>")<EOL>click.echo("<STR_LIT>")<EOL>click.echo()<EOL>click.echo("<STR_LIT>")<EOL>click.echo("<STR_LIT>")<EOL>click.echo("<STR_LIT>")<EOL><DEDENT>
|
Change setting of device.
\b
DOES:
The config command lets you change setting of device through upd.
1. Ensure your device is Configure Mode.
2. Change your computer network to Wio's AP.
\b
EXAMPLE:
wio config --debug [on|off], enable/disable wio debug
wio config --get-debug, get wio debug status
|
f8797:m0
|
@click.command()<EOL>@click.option('<STR_LIT>', nargs=<NUM_LIT:1>, type=unicode, help="<STR_LIT>")<EOL>@pass_wio<EOL>def cli(wio, send):
|
command = send<EOL>click.echo("<STR_LIT>".format(command))<EOL>result = udp.common_send(command)<EOL>if result is None:<EOL><INDENT>return debug_error()<EOL><DEDENT>else:<EOL><INDENT>click.echo(result)<EOL><DEDENT>
|
Sends a UDP command to the wio device.
\b
DOES:
Support "VERSION", "SCAN", "Blank?", "DEBUG", "ENDEBUG: 1", "ENDEBUG: 0"
"APCFG: AP\\tPWDs\\tTOKENs\\tSNs\\tSERVER_Domains\\tXSERVER_Domain\\t\\r\\n",
Note:
1. Ensure your device is Configure Mode.
2. Change your computer network to Wio's AP.
\b
EXAMPLE:
wio udp --send [command], send UPD command
|
f8798:m0
|
@click.command()<EOL>@pass_wio<EOL>def cli(wio):
|
user_token = wio.config.get("<STR_LIT>", None)<EOL>mserver_url = wio.config.get("<STR_LIT>", None)<EOL>if not mserver_url or not user_token:<EOL><INDENT>click.echo(click.style('<STR_LIT>', fg='<STR_LIT>') + "<STR_LIT>" +<EOL>click.style("<STR_LIT>", fg='<STR_LIT>'))<EOL>return<EOL><DEDENT>email = wio.config.get("<STR_LIT:email>",None)<EOL>server = wio.config.get("<STR_LIT>",None)<EOL>token = wio.config.get("<STR_LIT>",None)<EOL>click.secho('<STR_LIT>', fg='<STR_LIT>', nl=False)<EOL>click.echo("<STR_LIT>" + click.style(server, fg='<STR_LIT>', bold=True) + '<STR_LIT:U+002CU+0020>' +<EOL>click.style(mserver_url, fg='<STR_LIT>', bold=True))<EOL>click.secho('<STR_LIT>', fg='<STR_LIT>', nl=False)<EOL>click.echo("<STR_LIT>" + click.style(email, fg='<STR_LIT>', bold=True))<EOL>click.secho('<STR_LIT>', fg='<STR_LIT>', nl=False)<EOL>click.echo("<STR_LIT>" + click.style(token, fg='<STR_LIT>', bold=True))<EOL>
|
Login state.
\b
DOES:
Display login email, token, server url.
\b
USE:
wio state
|
f8799:m0
|
@click.command()<EOL>@pass_wio<EOL>def cli(wio):
|
user_token = wio.config.get("<STR_LIT>", None)<EOL>api_prefix = wio.config.get("<STR_LIT>", None)<EOL>if not api_prefix or not user_token:<EOL><INDENT>click.echo(click.style('<STR_LIT>', fg='<STR_LIT>') + "<STR_LIT>" +<EOL>click.style("<STR_LIT>", fg='<STR_LIT>'))<EOL>return<EOL><DEDENT>thread = termui.waiting_echo("<STR_LIT>")<EOL>thread.daemon = True<EOL>thread.start()<EOL>params = {"<STR_LIT>":user_token}<EOL>try:<EOL><INDENT>r = requests.get("<STR_LIT>" %(api_prefix, node_list_endpoint), params=params, timeout=<NUM_LIT:10>, verify=verify)<EOL>r.raise_for_status()<EOL>json_response = r.json()<EOL><DEDENT>except requests.exceptions.HTTPError as e:<EOL><INDENT>thread.stop('<STR_LIT>')<EOL>thread.join()<EOL>if r.status_code == <NUM_LIT>:<EOL><INDENT>error = r.json().get("<STR_LIT:error>", None)<EOL>click.secho("<STR_LIT>" %error, fg='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>click.secho("<STR_LIT>" %e, fg='<STR_LIT>')<EOL><DEDENT>return<EOL><DEDENT>except Exception as e:<EOL><INDENT>thread.stop('<STR_LIT>')<EOL>thread.join()<EOL>click.secho("<STR_LIT>" %e, fg='<STR_LIT>')<EOL>return<EOL><DEDENT>nodes = json_response.get("<STR_LIT>", None)<EOL>thread.message("<STR_LIT>")<EOL>node_list = []<EOL>for n in nodes:<EOL><INDENT>if n['<STR_LIT:name>'] == '<STR_LIT>':<EOL><INDENT>params = {"<STR_LIT>":user_token, "<STR_LIT>":n['<STR_LIT>']}<EOL>try:<EOL><INDENT>r = requests.post("<STR_LIT>" %(api_prefix, nodes_delete_endpoint), params=params, timeout=<NUM_LIT:10>, verify=verify)<EOL>r.raise_for_status()<EOL>json_response = r.json()<EOL><DEDENT>except requests.exceptions.HTTPError as e:<EOL><INDENT>thread.stop('<STR_LIT>')<EOL>thread.join()<EOL>if r.status_code == <NUM_LIT>:<EOL><INDENT>error = r.json().get("<STR_LIT:error>", None)<EOL>click.secho("<STR_LIT>" %error, fg='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>click.secho("<STR_LIT>" %e, fg='<STR_LIT>')<EOL><DEDENT>return<EOL><DEDENT>except Exception as e:<EOL><INDENT>thread.stop('<STR_LIT>')<EOL>thread.join()<EOL>click.secho("<STR_LIT>" %e, fg='<STR_LIT>')<EOL>return<EOL><DEDENT>continue<EOL><DEDENT>if n["<STR_LIT>"]:<EOL><INDENT>params = {"<STR_LIT>":n["<STR_LIT>"]}<EOL>try:<EOL><INDENT>r = requests.get("<STR_LIT>" %(api_prefix, well_known_endpoint), params=params, timeout=<NUM_LIT:15>, verify=verify)<EOL>r.raise_for_status()<EOL>json_response = r.json()<EOL><DEDENT>except requests.exceptions.HTTPError as e:<EOL><INDENT>if r.status_code == <NUM_LIT>:<EOL><INDENT>error = r.json().get("<STR_LIT:error>", None)<EOL>click.secho("<STR_LIT>" %error, fg='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>click.secho("<STR_LIT>" %e, fg='<STR_LIT>')<EOL><DEDENT>n['<STR_LIT>'] = []<EOL><DEDENT>except Exception as e:<EOL><INDENT>click.secho("<STR_LIT>" %e, fg='<STR_LIT>')<EOL>n['<STR_LIT>'] = []<EOL><DEDENT>else:<EOL><INDENT>well_known = json_response["<STR_LIT>"] <EOL>n['<STR_LIT>'] = well_known<EOL><DEDENT>n['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>n['<STR_LIT>'] = []<EOL>n['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>n['<STR_LIT>'] = "<STR_LIT>" %(api_prefix, node_resources_endpoint, n['<STR_LIT>'])<EOL>node_list.append(n)<EOL><DEDENT>thread.stop('<STR_LIT>')<EOL>thread.join()<EOL>termui.tree(node_list)<EOL>
|
Displays a list of your devices.
\b
DOES:
Displays a list of your devices, as well as their APIs
\b
USE:
wio list
|
f8800:m0
|
@click.command()<EOL>@pass_wio<EOL>def cli(wio):
|
mserver = wio.config.get("<STR_LIT>", None)<EOL>if mserver:<EOL><INDENT>click.echo(click.style('<STR_LIT>', fg='<STR_LIT>') + "<STR_LIT>" +<EOL>click.style(mserver, fg='<STR_LIT>'))<EOL>if click.confirm(click.style('<STR_LIT>', bold=True), default=False):<EOL><INDENT>choise_server(wio)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>choise_server(wio)<EOL><DEDENT>if wio.config.get("<STR_LIT>") == '<STR_LIT>':<EOL><INDENT>email = click.prompt(click.style('<STR_LIT>', fg='<STR_LIT>') +<EOL>click.style('<STR_LIT>', bold=True), type=str)<EOL>password = click.prompt(click.style('<STR_LIT>', fg='<STR_LIT>') +<EOL>click.style('<STR_LIT>', bold=True), hide_input=True, type=str)<EOL>server_url = wio.config.get("<STR_LIT>")<EOL>thread = termui.waiting_echo("<STR_LIT>")<EOL>thread.daemon = True<EOL>thread.start()<EOL>try:<EOL><INDENT>json_response = login_wio(server_url, email, password)<EOL>token = json_response['<STR_LIT>']<EOL><DEDENT>except Exception as e:<EOL><INDENT>thread.stop('<STR_LIT>')<EOL>thread.join()<EOL>click.secho("<STR_LIT>" %e, fg='<STR_LIT>')<EOL>return<EOL><DEDENT><DEDENT>else:<EOL><INDENT>token = click.prompt(click.style('<STR_LIT>', fg='<STR_LIT>') +<EOL>click.style('<STR_LIT>', bold=True) +<EOL>click.style('<STR_LIT>', bold=True, fg='<STR_LIT>') +<EOL>click.style('<STR_LIT>', fg='<STR_LIT>') +<EOL>click.style('<STR_LIT>', bold=True), type=str)<EOL>email = '<STR_LIT>'<EOL>thread = termui.waiting_echo("<STR_LIT>")<EOL>thread.daemon = True<EOL>thread.start()<EOL>try:<EOL><INDENT>check_token(wio.config.get("<STR_LIT>"), token)<EOL><DEDENT>except Exception as e:<EOL><INDENT>thread.stop('<STR_LIT>')<EOL>thread.join()<EOL>click.secho("<STR_LIT>" %e, fg='<STR_LIT>')<EOL>return<EOL><DEDENT><DEDENT>wio.set_config('<STR_LIT:email>', email)<EOL>wio.set_config('<STR_LIT>', token)<EOL>thread.stop('<STR_LIT>')<EOL>thread.join()<EOL>click.secho("<STR_LIT>", fg='<STR_LIT>', nl=False)<EOL>click.echo("<STR_LIT>")<EOL>
|
Login with your Wio account.
\b
DOES:
Login and save an access token for interacting with your account on the Wio.
\b
USE:
wio login
|
f8801:m0
|
@click.command()<EOL>@click.argument('<STR_LIT>')<EOL>@click.argument('<STR_LIT>')<EOL>@click.argument('<STR_LIT>')<EOL>@pass_wio<EOL>def cli(wio, method, endpoint, token,):
|
user_token = wio.config.get("<STR_LIT>", None)<EOL>api_prefix = wio.config.get("<STR_LIT>", None)<EOL>if not api_prefix or not user_token:<EOL><INDENT>click.echo(click.style('<STR_LIT>', fg='<STR_LIT>') + "<STR_LIT>" +<EOL>click.style("<STR_LIT>", fg='<STR_LIT>'))<EOL>return<EOL><DEDENT>api = "<STR_LIT>" %(api_prefix, endpoint, token)<EOL>try:<EOL><INDENT>if method == "<STR_LIT:GET>":<EOL><INDENT>r = requests.get(api, timeout=<NUM_LIT:15>, verify=verify)<EOL><DEDENT>elif method == "<STR_LIT:POST>":<EOL><INDENT>r = requests.post(api, timeout=<NUM_LIT:15>, verify=verify)<EOL><DEDENT>else:<EOL><INDENT>click.secho("<STR_LIT>" %method, fg='<STR_LIT>')<EOL>return<EOL><DEDENT>r.raise_for_status()<EOL>json_response = r.json()<EOL><DEDENT>except requests.exceptions.HTTPError as e:<EOL><INDENT>if r.status_code == <NUM_LIT>:<EOL><INDENT>click.echo(r.json())<EOL><DEDENT>else:<EOL><INDENT>click.secho("<STR_LIT>" %e, fg='<STR_LIT>')<EOL><DEDENT>return<EOL><DEDENT>except Exception as e:<EOL><INDENT>click.secho("<STR_LIT>" %e, fg='<STR_LIT>')<EOL>return<EOL><DEDENT>click.echo(r.json())<EOL>
|
Request api, return json.
\b
DOES:
Call a api on your devices.
token: device_token
method: GET or POST
endpoint: device_path, such as: /v1/node/GroveTempHumProD0/temperature
wio call <device_token> <request_method> <device_path>
\b
EXAMPLE:
wio call 98dd464bd268d4dc4cb9b37e4e779313 GET /v1/node/GroveTempHumProD0/temperature
|
f8802:m0
|
@click.command()<EOL>@click.argument('<STR_LIT>')<EOL>@pass_wio<EOL>def cli(wio, sn):
|
user_token = wio.config.get("<STR_LIT>", None)<EOL>api_prefix = wio.config.get("<STR_LIT>", None)<EOL>if not api_prefix or not user_token:<EOL><INDENT>click.echo(click.style('<STR_LIT>', fg='<STR_LIT>') + "<STR_LIT>" +<EOL>click.style("<STR_LIT>", fg='<STR_LIT>'))<EOL>return<EOL><DEDENT>params = {"<STR_LIT>":user_token, "<STR_LIT>":sn}<EOL>try:<EOL><INDENT>r = requests.post("<STR_LIT>" %(api_prefix, nodes_delete_endpoint), params=params, timeout=<NUM_LIT:10>, verify=verify)<EOL>r.raise_for_status()<EOL>json_response = r.json()<EOL><DEDENT>except requests.exceptions.HTTPError as e:<EOL><INDENT>if r.status_code == <NUM_LIT>:<EOL><INDENT>error = r.json().get("<STR_LIT:error>", None)<EOL>click.secho("<STR_LIT>" %error, fg='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>click.secho("<STR_LIT>" %e, fg='<STR_LIT>')<EOL><DEDENT>return<EOL><DEDENT>except Exception as e:<EOL><INDENT>click.secho("<STR_LIT>" %e, fg='<STR_LIT>')<EOL>return<EOL><DEDENT>click.secho('<STR_LIT>', fg='<STR_LIT>')<EOL>
|
Delete a device.
\b
DOES:
Delete a device.
sn: device_sn
wio delete <device_sn>
\b
EXAMPLE:
wio delete 2885b2cab8abc5fb8e229e4a77bf5e4d
|
f8803:m0
|
def get_new(mserver_url, token, board):
|
thread = termui.waiting_echo("<STR_LIT>")<EOL>thread.daemon = True<EOL>thread.start()<EOL>try:<EOL><INDENT>params = {"<STR_LIT:name>":"<STR_LIT>", "<STR_LIT>":board, "<STR_LIT>":token}<EOL>r = requests.post("<STR_LIT>" %(mserver_url, nodes_create_endpoint), params=params, timeout=<NUM_LIT:10>, verify=verify)<EOL>r.raise_for_status()<EOL>json_response = r.json()<EOL><DEDENT>except requests.exceptions.HTTPError as e:<EOL><INDENT>thread.stop('<STR_LIT>')<EOL>thread.join()<EOL>if r.status_code == <NUM_LIT>:<EOL><INDENT>error = r.json().get("<STR_LIT:error>", None)<EOL>click.secho("<STR_LIT>" %error, fg='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>click.secho("<STR_LIT>" %e, fg='<STR_LIT>')<EOL><DEDENT>return None<EOL><DEDENT>except Exception as e:<EOL><INDENT>thread.stop('<STR_LIT>')<EOL>thread.join()<EOL>click.secho("<STR_LIT>" %e, fg='<STR_LIT>')<EOL>return None<EOL><DEDENT>thread.stop('<STR_LIT>')<EOL>thread.join()<EOL>return json_response<EOL>
|
get node sn and key
|
f8804:m0
|
@click.command()<EOL>@pass_wio<EOL>def cli(wio):
|
token = wio.config.get("<STR_LIT>", None)<EOL>mserver_url = wio.config.get("<STR_LIT>", None)<EOL>msvr_ip = wio.config.get("<STR_LIT>", None)<EOL>if not mserver_url or not token:<EOL><INDENT>click.echo(click.style('<STR_LIT>', fg='<STR_LIT>') + "<STR_LIT>" +<EOL>click.style("<STR_LIT>", fg='<STR_LIT>'))<EOL>return<EOL><DEDENT>msvr = urlparse(mserver_url).hostname<EOL>xsvr = msvr<EOL>xsvr_ip = msvr_ip<EOL>board = '<STR_LIT>'<EOL>click.secho('<STR_LIT>', fg='<STR_LIT>', nl=False)<EOL>click.echo("<STR_LIT>")<EOL>click.secho('<STR_LIT>', fg='<STR_LIT>', nl=False)<EOL>click.echo("<STR_LIT>" +<EOL>click.style("<STR_LIT>", fg='<STR_LIT>') +<EOL>click.style("<STR_LIT>", fg='<STR_LIT>') +<EOL>click.style("<STR_LIT>", fg='<STR_LIT>'))<EOL>click.secho('<STR_LIT>', fg='<STR_LIT>', nl=False)<EOL>click.echo("<STR_LIT>" +<EOL>click.style("<STR_LIT>", fg='<STR_LIT>') +<EOL>click.style("<STR_LIT>", fg='<STR_LIT>') +<EOL>click.style("<STR_LIT>", fg='<STR_LIT>') +<EOL>click.style("<STR_LIT>", fg='<STR_LIT>'))<EOL>click.echo()<EOL>click.secho('<STR_LIT>', fg='<STR_LIT>', nl=False)<EOL>if not click.confirm(click.style('<STR_LIT>', bold=True), default=True):<EOL><INDENT>click.echo('<STR_LIT>')<EOL>return<EOL><DEDENT>while <NUM_LIT:1>:<EOL><INDENT>for x in range(len(boards)):<EOL><INDENT>click.echo("<STR_LIT>" %(x, boards[x]))<EOL><DEDENT>click.secho('<STR_LIT>', fg='<STR_LIT>', nl=False)<EOL>value = click.prompt(click.style('<STR_LIT>', bold=True), type=int)<EOL>if value >= <NUM_LIT:0> and value < len(boards):<EOL><INDENT>board = boards[value]<EOL>break<EOL><DEDENT>else:<EOL><INDENT>click.echo(click.style('<STR_LIT>', fg='<STR_LIT>') + "<STR_LIT>")<EOL><DEDENT><DEDENT>r = get_new(mserver_url, token, board)<EOL>if not r:<EOL><INDENT>return<EOL><DEDENT>node_key = r["<STR_LIT>"]<EOL>node_sn = r["<STR_LIT>"]<EOL>if board == WIO_LINK_V1_0:<EOL><INDENT>try:<EOL><INDENT>ports = serial_list.serial_ports()<EOL><DEDENT>except serial.SerialException as e:<EOL><INDENT>click.secho('<STR_LIT>', fg='<STR_LIT>', nl=False)<EOL>click.echo(e)<EOL>if e.errno == <NUM_LIT>:<EOL><INDENT>click.echo("<STR_LIT>")<EOL><DEDENT>return<EOL><DEDENT>count = len(ports)<EOL>port = None<EOL>if count == <NUM_LIT:0>:<EOL><INDENT>pass <EOL><DEDENT>elif count == <NUM_LIT:1>:<EOL><INDENT>port = ports[<NUM_LIT:0>]<EOL><DEDENT>elif count >= <NUM_LIT:2>:<EOL><INDENT>while <NUM_LIT:1>:<EOL><INDENT>for x in range(len(ports)):<EOL><INDENT>click.echo("<STR_LIT>" %(x, ports[x]))<EOL><DEDENT>click.secho('<STR_LIT>', fg='<STR_LIT>', nl=False)<EOL>value = click.prompt(click.style('<STR_LIT>', bold=True), type=int)<EOL>if value >= <NUM_LIT:0> and value < len(ports):<EOL><INDENT>port = ports[value]<EOL>break<EOL><DEDENT>else:<EOL><INDENT>click.echo(click.style('<STR_LIT>', fg='<STR_LIT>') + "<STR_LIT>")<EOL><DEDENT><DEDENT><DEDENT>if not port:<EOL><INDENT>click.secho('<STR_LIT>', fg='<STR_LIT>', nl=False)<EOL>click.echo("<STR_LIT>")<EOL>click.secho('<STR_LIT>', fg='<STR_LIT>', nl=False)<EOL>value = click.confirm(<EOL>click.style('<STR_LIT>', bold=True), default=True)<EOL>r = upd_send(msvr, msvr_ip, xsvr, xsvr_ip, node_sn, node_key)<EOL>if not r:<EOL><INDENT>return<EOL><DEDENT>d_name = r['<STR_LIT:name>']<EOL>check_connect(mserver_url, token, node_sn, d_name)<EOL>return<EOL><DEDENT>click.echo(click.style('<STR_LIT>', fg='<STR_LIT>') + "<STR_LIT>" +<EOL>click.style("<STR_LIT>", fg='<STR_LIT>') + "<STR_LIT>")<EOL>r = serial_send(msvr, msvr_ip, xsvr, xsvr_ip, node_sn, node_key, port)<EOL>if not r:<EOL><INDENT>return<EOL><DEDENT>d_name = r['<STR_LIT:name>']<EOL>check_connect(mserver_url, token, node_sn, d_name)<EOL><DEDENT>elif board == WIO_NODE_V1_0:<EOL><INDENT>r = upd_send(msvr, msvr_ip, xsvr, xsvr_ip, node_sn, node_key)<EOL>if not r:<EOL><INDENT>return<EOL><DEDENT>d_name = r['<STR_LIT:name>']<EOL>check_connect(mserver_url, token, node_sn, d_name)<EOL><DEDENT>
|
Add a new device with USB connect.
\b
DOES:
Guides you through setting up a new device, and getting it on your network.
\b
USE:
wio setup
|
f8804:m4
|
def serial_ports():
|
if sys.platform.startswith('<STR_LIT>'):<EOL><INDENT>ports = ['<STR_LIT>' % (i + <NUM_LIT:1>) for i in range(<NUM_LIT>)]<EOL><DEDENT>elif sys.platform.startswith('<STR_LIT>') or sys.platform.startswith('<STR_LIT>'):<EOL><INDENT>ports = glob.glob('<STR_LIT>') <EOL><DEDENT>elif sys.platform.startswith('<STR_LIT>'):<EOL><INDENT>ports = glob.glob('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise EnvironmentError('<STR_LIT>')<EOL><DEDENT>result = []<EOL>for port in ports:<EOL><INDENT>try:<EOL><INDENT>s = serial.Serial(port)<EOL>s.close()<EOL>result.append(port)<EOL><DEDENT>except serial.SerialException as e:<EOL><INDENT>if e.errno == <NUM_LIT>:<EOL><INDENT>raise e<EOL><DEDENT>pass<EOL><DEDENT>except OSError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return result<EOL>
|
Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
|
f8806:m0
|
@click.command(cls=ComplexCLI, context_settings=CONTEXT_SETTINGS)<EOL>@click.version_option(version)<EOL>@click.pass_context<EOL>def cli(ctx):
|
ctx.obj = Wio()<EOL>cur_dir = os.path.abspath(os.path.expanduser("<STR_LIT>"))<EOL>if not os.path.exists(cur_dir):<EOL><INDENT>text = {"<STR_LIT:email>":"<STR_LIT>", "<STR_LIT>":"<STR_LIT>"}<EOL>os.mkdir(cur_dir)<EOL>open("<STR_LIT>"%cur_dir,"<STR_LIT:w>").write(json.dumps(text))<EOL><DEDENT>db_file_path = '<STR_LIT>' % cur_dir<EOL>config = json.load(open(db_file_path))<EOL>ctx.obj.config = config<EOL>signal.signal(signal.SIGINT, sigint_handler)<EOL>if not verify:<EOL><INDENT>requests.packages.urllib3.disable_warnings(InsecureRequestWarning)<EOL><DEDENT>
|
\b
Welcome to the Wio Command line utility!
https://github.com/Seeed-Studio/wio-cli
For more information Run: wio <command_name> --help
|
f8807:m1
|
def split(inp_str, sep_char, maxsplit=-<NUM_LIT:1>, escape_char='<STR_LIT:\\>'):
|
word_chars = []<EOL>word_chars_append = word_chars.append<EOL>inp_str_iter = iter(inp_str)<EOL>for c in inp_str_iter:<EOL><INDENT>word_chars_append(c)<EOL>if c == escape_char:<EOL><INDENT>try:<EOL><INDENT>next_char = next(inp_str_iter)<EOL><DEDENT>except StopIteration:<EOL><INDENT>continue<EOL><DEDENT>if next_char == sep_char:<EOL><INDENT>word_chars[-<NUM_LIT:1>] = next_char<EOL><DEDENT>else:<EOL><INDENT>word_chars.append(next_char)<EOL><DEDENT><DEDENT>elif c == sep_char:<EOL><INDENT>word_chars.pop()<EOL>yield '<STR_LIT>'.join(word_chars)<EOL>maxsplit -= <NUM_LIT:1><EOL>if maxsplit == <NUM_LIT:0>:<EOL><INDENT>yield '<STR_LIT>'.join(inp_str_iter)<EOL>return<EOL><DEDENT>del word_chars[:]<EOL><DEDENT><DEDENT>yield '<STR_LIT>'.join(word_chars)<EOL>
|
Separates a string on a character, taking into account escapes.
:param str inp_str: string to split.
:param str sep_char: separator character.
:param int maxsplit: maximum number of times to split from left.
:param str escape_char: escape character.
:rtype: __generator[str]
:return: sub-strings generator separated on the `sep_char`.
|
f8812:m0
|
def _full_sub_array(data_obj, xj_path, create_dict_path):
|
if isinstance(data_obj, list):<EOL><INDENT>if xj_path:<EOL><INDENT>res = []<EOL>for d in data_obj:<EOL><INDENT>val, exists = path_lookup(d, xj_path, create_dict_path)<EOL>if exists:<EOL><INDENT>res.append(val)<EOL><DEDENT><DEDENT>return tuple(res), True<EOL><DEDENT>else:<EOL><INDENT>return tuple(data_obj), True<EOL><DEDENT><DEDENT>elif isinstance(data_obj, dict):<EOL><INDENT>if xj_path:<EOL><INDENT>res = []<EOL>for d in data_obj.values():<EOL><INDENT>val, exists = path_lookup(d, xj_path, create_dict_path)<EOL>if exists:<EOL><INDENT>res.append(val)<EOL><DEDENT><DEDENT>return tuple(res), True<EOL><DEDENT>else:<EOL><INDENT>return tuple(data_obj.values()), True<EOL><DEDENT><DEDENT>else:<EOL><INDENT>return None, False<EOL><DEDENT>
|
Retrieves all array or dictionary elements for '*' JSON path marker.
:param dict|list data_obj: The current data object.
:param str xj_path: A json path.
:param bool create_dict_path create a dict path.
:return: tuple with two values: first is a result and second
a boolean flag telling if this value exists or not.
|
f8812:m1
|
def _get_array_index(array_path):
|
if not array_path.startswith('<STR_LIT:@>'):<EOL><INDENT>raise XJPathError('<STR_LIT>')<EOL><DEDENT>array_path = array_path[<NUM_LIT:1>:]<EOL>if array_path == '<STR_LIT>':<EOL><INDENT>return -<NUM_LIT:1><EOL><DEDENT>if array_path == '<STR_LIT>':<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>if array_path.isdigit() or (array_path.startswith('<STR_LIT:->')<EOL>and array_path[<NUM_LIT:1>:].isdigit()):<EOL><INDENT>return int(array_path)<EOL><DEDENT>else:<EOL><INDENT>raise XJPathError('<STR_LIT>', (array_path,))<EOL><DEDENT>
|
Translates @first @last @1 @-1 expressions into an actual array index.
:param str array_path: Array path in XJ notation.
:rtype: int
:return: Array index.
|
f8812:m2
|
def _single_array_element(data_obj, xj_path, array_path, create_dict_path):
|
val_type, array_path = _clean_key_type(array_path)<EOL>array_idx = _get_array_index(array_path)<EOL>if data_obj and isinstance(data_obj, (list, tuple)):<EOL><INDENT>try:<EOL><INDENT>value = data_obj[array_idx]<EOL>if val_type is not None and not isinstance(value, val_type):<EOL><INDENT>raise XJPathError('<STR_LIT>'<EOL>'<STR_LIT>' %<EOL>(array_idx, type(value).__name__,<EOL>val_type.__name__))<EOL><DEDENT>if xj_path:<EOL><INDENT>return path_lookup(value, xj_path, create_dict_path)<EOL><DEDENT>else:<EOL><INDENT>return value, True<EOL><DEDENT><DEDENT>except IndexError:<EOL><INDENT>return None, False<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if val_type is not None:<EOL><INDENT>raise XJPathError('<STR_LIT>' %<EOL>type(data_obj).__name__)<EOL><DEDENT>return None, False<EOL><DEDENT>
|
Retrieves a single array for a '@' JSON path marker.
:param list data_obj: The current data object.
:param str xj_path: A json path.
:param str array_path: A lookup key.
:param bool create_dict_path create a dict path.
|
f8812:m3
|
def _split_path(xj_path):
|
res = xj_path.rsplit('<STR_LIT:.>', <NUM_LIT:1>)<EOL>root_key = res[<NUM_LIT:0>]<EOL>if len(res) > <NUM_LIT:1>:<EOL><INDENT>return root_key, res[<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>if root_key and root_key != '<STR_LIT:.>':<EOL><INDENT>return None, root_key<EOL><DEDENT>else:<EOL><INDENT>raise XJPathError('<STR_LIT>', (xj_path,))<EOL><DEDENT><DEDENT>
|
Extract the last piece of XJPath.
:param str xj_path: A XJPath expression.
:rtype: tuple[str|None, str]
:return: A tuple where first element is a root XJPath and the second is
a last piece of key.
|
f8812:m4
|
def validate_path(xj_path):
|
if not isinstance(xj_path, str):<EOL><INDENT>raise XJPathError('<STR_LIT>')<EOL><DEDENT>for path in split(xj_path, '<STR_LIT:.>'):<EOL><INDENT>if path == '<STR_LIT:*>':<EOL><INDENT>continue<EOL><DEDENT>if path.startswith('<STR_LIT:@>'):<EOL><INDENT>if path == '<STR_LIT>' or path == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>try:<EOL><INDENT>int(path[<NUM_LIT:1>:])<EOL><DEDENT>except ValueError:<EOL><INDENT>raise XJPathError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>
|
Validates XJ path.
:param str xj_path: XJ Path
:raise: XJPathError if validation fails.
|
f8812:m5
|
def _clean_key_type(key_name, escape_char=ESCAPE_SEQ):
|
for i in (<NUM_LIT:2>, <NUM_LIT:1>):<EOL><INDENT>if len(key_name) < i:<EOL><INDENT>return None, key_name<EOL><DEDENT>type_v = key_name[-i:]<EOL>if type_v in _KEY_SPLIT:<EOL><INDENT>if len(key_name) <= i:<EOL><INDENT>return _KEY_SPLIT[type_v], '<STR_LIT>'<EOL><DEDENT>esc_cnt = <NUM_LIT:0><EOL>for pos in range(-i - <NUM_LIT:1>, -len(key_name) - <NUM_LIT:1>, -<NUM_LIT:1>):<EOL><INDENT>if key_name[pos] == escape_char:<EOL><INDENT>esc_cnt += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if esc_cnt % <NUM_LIT:2> == <NUM_LIT:0>:<EOL><INDENT>return _KEY_SPLIT[type_v], key_name[:-i]<EOL><DEDENT>else:<EOL><INDENT>return None, key_name<EOL><DEDENT><DEDENT><DEDENT>return None, key_name<EOL>
|
Removes type specifier returning detected type and
a key name without type specifier.
:param str key_name: A key name containing type postfix.
:rtype: tuple[type|None, str]
:returns: Type definition and cleaned key name.
|
f8812:m7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.