sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def handle(self, name): """ Call all advices at the provided name. This has an analogue in the join point in aspected oriented programming, but the analogy is a weak one as we don't have the proper metaobject protocol to support this. Implementation that make use of this system should make it clear that they will call this method with name associated with its group before and after its execution, or that the method at hand that want this invoked be called by this other conductor method. For the Toolchain standard steps (prepare, compile, assemble, link and finalize), this handle method will only be called by invoking the toolchain as a callable. Calling those methods piecemeal will not trigger the invocation, even though it probably should. Modules, classes and methods that desire to call their own handler should instead follow the convention where the handle be called before and after with the appropriate names. For instance: def test(self, spec): spec.handle(BEFORE_TEST) # do the things spec.handle(AFTER_TEST) This arrangement will need to be revisited when a proper system is written at the metaclass level. Arguments: name The name of the advices group. All the callables registered to this group will be invoked, last-in-first-out. """ if name in self._called: logger.warning( "advice group '%s' has been called for this spec %r", name, self, ) # only now ensure checking self.__advice_stack_frame_protection(currentframe()) else: self._called.add(name) # Get a complete clone, so indirect manipulation done to the # reference that others have access to will not have an effect # within the scope of this execution. Please refer to the # test_toolchain, test_spec_advice_no_infinite_pop test case. advices = [] advices.extend(self._advices.get(name, [])) if advices and self.get('debug'): logger.debug( "handling %d advices in group '%s' ", len(advices), name) while advices: try: # cleanup basically done lifo (last in first out) values = advices.pop() advice, a, kw = values if not ((callable(advice)) and isinstance(a, tuple) and isinstance(kw, dict)): raise TypeError except ValueError: logger.info('Spec advice extraction error: got %s', values) except TypeError: logger.info('Spec advice malformed: got %s', values) else: try: try: advice(*a, **kw) except Exception as e: # get that back by the id. frame = self._frames.get(id(values)) if frame: logger.info('Spec advice exception: %r', e) logger.info( 'Traceback for original advice:\n%s', frame) # continue on for the normal exception raise except AdviceCancel as e: logger.info( "advice %s in group '%s' signaled its cancellation " "during its execution: %s", advice, name, e ) if self.get(DEBUG): logger.debug( 'showing traceback for cancellation', exc_info=1, ) except AdviceAbort as e: # this is a signaled error with a planned abortion logger.warning( "advice %s in group '%s' encountered a known error " "during its execution: %s; continuing with toolchain " "execution", advice, name, e ) if self.get(DEBUG): logger.warning( 'showing traceback for error', exc_info=1, ) except ToolchainCancel: # this is the safe cancel raise except ToolchainAbort as e: logger.critical( "an advice in group '%s' triggered an abort: %s", name, str(e) ) raise except KeyboardInterrupt: raise ToolchainCancel('interrupted') except Exception as e: # a completely unplanned failure logger.critical( "advice %s in group '%s' terminated due to an " "unexpected exception: %s", advice, name, e ) if self.get(DEBUG): logger.critical( 'showing traceback for error', exc_info=1, )
Call all advices at the provided name. This has an analogue in the join point in aspected oriented programming, but the analogy is a weak one as we don't have the proper metaobject protocol to support this. Implementation that make use of this system should make it clear that they will call this method with name associated with its group before and after its execution, or that the method at hand that want this invoked be called by this other conductor method. For the Toolchain standard steps (prepare, compile, assemble, link and finalize), this handle method will only be called by invoking the toolchain as a callable. Calling those methods piecemeal will not trigger the invocation, even though it probably should. Modules, classes and methods that desire to call their own handler should instead follow the convention where the handle be called before and after with the appropriate names. For instance: def test(self, spec): spec.handle(BEFORE_TEST) # do the things spec.handle(AFTER_TEST) This arrangement will need to be revisited when a proper system is written at the metaclass level. Arguments: name The name of the advices group. All the callables registered to this group will be invoked, last-in-first-out.
entailment
def realpath(self, spec, key): """ Resolve and update the path key in the spec with its realpath, based on the working directory. """ if key not in spec: # do nothing for now return if not spec[key]: logger.warning( "cannot resolve realpath of '%s' as it is not defined", key) return check = realpath(join(spec.get(WORKING_DIR, ''), spec[key])) if check != spec[key]: spec[key] = check logger.warning( "realpath of '%s' resolved to '%s', spec is updated", key, check ) return check
Resolve and update the path key in the spec with its realpath, based on the working directory.
entailment
def setup_prefix_suffix(self): """ Set up the compile prefix, sourcepath and the targetpath suffix attributes, which are the prefix to the function name and the suffixes to retrieve the values from for creating the generator function. """ self.compile_prefix = 'compile_' self.sourcepath_suffix = '_sourcepath' self.modpath_suffix = '_modpaths' self.targetpath_suffix = '_targetpaths'
Set up the compile prefix, sourcepath and the targetpath suffix attributes, which are the prefix to the function name and the suffixes to retrieve the values from for creating the generator function.
entailment
def _validate_build_target(self, spec, target): """ Essentially validate that the target is inside the build_dir. """ if not realpath(target).startswith(spec[BUILD_DIR]): raise ValueError('build_target %s is outside build_dir' % target)
Essentially validate that the target is inside the build_dir.
entailment
def transpile_modname_source_target(self, spec, modname, source, target): """ The function that gets called by compile_transpile_entry for processing the provided JavaScript source file provided by some Python package through the transpiler instance. """ if not isinstance(self.transpiler, BaseUnparser): _deprecation_warning( 'transpiler callable assigned to %r must be an instance of ' 'calmjs.parse.unparsers.base.BaseUnparser by calmjs-4.0.0; ' 'if the original transpile behavior is to be retained, the ' 'subclass may instead override this method to call ' '`simple_transpile_modname_source_target` directly, as ' 'this fallback behavior will be removed by calmjs-4.0.0' % ( self, ) ) return self.simple_transpile_modname_source_target( spec, modname, source, target) # do the new thing here. return self._transpile_modname_source_target( spec, modname, source, target)
The function that gets called by compile_transpile_entry for processing the provided JavaScript source file provided by some Python package through the transpiler instance.
entailment
def simple_transpile_modname_source_target( self, spec, modname, source, target): """ The original simple transpile method called by compile_transpile on each target. """ opener = self.opener bd_target = self._generate_transpile_target(spec, target) logger.info('Transpiling %s to %s', source, bd_target) with opener(source, 'r') as reader, opener(bd_target, 'w') as _writer: writer = SourceWriter(_writer) self.transpiler(spec, reader, writer) if writer.mappings and spec.get(GENERATE_SOURCE_MAP): source_map_path = bd_target + '.map' with open(source_map_path, 'w') as sm_fd: self.dump(encode_sourcemap( filename=bd_target, mappings=writer.mappings, sources=[source], ), sm_fd) # just use basename source_map_url = basename(source_map_path) _writer.write('\n//# sourceMappingURL=') _writer.write(source_map_url) _writer.write('\n')
The original simple transpile method called by compile_transpile on each target.
entailment
def compile_transpile_entry(self, spec, entry): """ Handler for each entry for the transpile method of the compile process. This invokes the transpiler that was set up to transpile the input files into the build directory. """ modname, source, target, modpath = entry transpiled_modpath = {modname: modpath} transpiled_target = {modname: target} export_module_name = [modname] self.transpile_modname_source_target(spec, modname, source, target) return transpiled_modpath, transpiled_target, export_module_name
Handler for each entry for the transpile method of the compile process. This invokes the transpiler that was set up to transpile the input files into the build directory.
entailment
def compile_bundle_entry(self, spec, entry): """ Handler for each entry for the bundle method of the compile process. This copies the source file or directory into the build directory. """ modname, source, target, modpath = entry bundled_modpath = {modname: modpath} bundled_target = {modname: target} export_module_name = [] if isfile(source): export_module_name.append(modname) copy_target = join(spec[BUILD_DIR], target) if not exists(dirname(copy_target)): makedirs(dirname(copy_target)) shutil.copy(source, copy_target) elif isdir(source): copy_target = join(spec[BUILD_DIR], modname) shutil.copytree(source, copy_target) return bundled_modpath, bundled_target, export_module_name
Handler for each entry for the bundle method of the compile process. This copies the source file or directory into the build directory.
entailment
def compile_loaderplugin_entry(self, spec, entry): """ Generic loader plugin entry handler. The default implementation assumes that everything up to the first '!' symbol resolves to some known loader plugin within the registry. The registry instance responsible for the resolution of the loader plugin handlers must be available in the spec under CALMJS_LOADERPLUGIN_REGISTRY """ modname, source, target, modpath = entry handler = spec[CALMJS_LOADERPLUGIN_REGISTRY].get(modname) if handler: return handler(self, spec, modname, source, target, modpath) logger.warning( "no loaderplugin handler found for plugin entry '%s'", modname) return {}, {}, []
Generic loader plugin entry handler. The default implementation assumes that everything up to the first '!' symbol resolves to some known loader plugin within the registry. The registry instance responsible for the resolution of the loader plugin handlers must be available in the spec under CALMJS_LOADERPLUGIN_REGISTRY
entailment
def modname_source_to_target(self, spec, modname, source): """ Create a target file name from the input module name and its source file name. The result should be a path relative to the build_dir, and this is derived directly from the modname with NO implicit convers of path separators (i.e. '/' or any other) into a system or OS specific form (e.g. '\\'). The rationale for this choice is that there exists Node.js/JavaScript tools that handle this internally and/or these paths and values are directly exposed on the web and thus these separators must be preserved. If the specific implementation requires this to be done, implementations may override by wrapping the result of this using os.path.normpath. For the generation of transpile write targets, this will be done in _generate_transpile_target. Default is to append the module name with the filename_suffix assigned to this instance (setup by setup_filename_suffix), iff the provided source also end with this filename suffix. However, certain tools have issues dealing with loader plugin syntaxes showing up on the filesystem (and certain filesystems definitely do not like some of the characters), so the usage of the loaderplugin registry assigned to the spec may be used for lookup if available. Called by generator method `_gen_modname_source_target_modpath`. """ loaderplugin_registry = spec.get(CALMJS_LOADERPLUGIN_REGISTRY) if '!' in modname and loaderplugin_registry: handler = loaderplugin_registry.get(modname) if handler: return handler.modname_source_to_target( self, spec, modname, source) if (source.endswith(self.filename_suffix) and not modname.endswith(self.filename_suffix)): return modname + self.filename_suffix else: # assume that modname IS the filename return modname
Create a target file name from the input module name and its source file name. The result should be a path relative to the build_dir, and this is derived directly from the modname with NO implicit convers of path separators (i.e. '/' or any other) into a system or OS specific form (e.g. '\\'). The rationale for this choice is that there exists Node.js/JavaScript tools that handle this internally and/or these paths and values are directly exposed on the web and thus these separators must be preserved. If the specific implementation requires this to be done, implementations may override by wrapping the result of this using os.path.normpath. For the generation of transpile write targets, this will be done in _generate_transpile_target. Default is to append the module name with the filename_suffix assigned to this instance (setup by setup_filename_suffix), iff the provided source also end with this filename suffix. However, certain tools have issues dealing with loader plugin syntaxes showing up on the filesystem (and certain filesystems definitely do not like some of the characters), so the usage of the loaderplugin registry assigned to the spec may be used for lookup if available. Called by generator method `_gen_modname_source_target_modpath`.
entailment
def modname_source_target_modnamesource_to_modpath( self, spec, modname, source, target, modname_source): """ Typical JavaScript tools will get confused if '.js' is added, so by default the same modname is returned as path rather than the target file for the module path to be written to the output file for linkage by tools. Some other tools may desire the target to be returned instead, or construct some other string that is more suitable for the tool that will do the assemble and link step. The modname and source argument provided to aid pedantic tools, but really though this provides more consistency to method signatures. Same as `self.modname_source_target_to_modpath`, but includes the original raw key-value as a 2-tuple. Called by generator method `_gen_modname_source_target_modpath`. """ return self.modname_source_target_to_modpath( spec, modname, source, target)
Typical JavaScript tools will get confused if '.js' is added, so by default the same modname is returned as path rather than the target file for the module path to be written to the output file for linkage by tools. Some other tools may desire the target to be returned instead, or construct some other string that is more suitable for the tool that will do the assemble and link step. The modname and source argument provided to aid pedantic tools, but really though this provides more consistency to method signatures. Same as `self.modname_source_target_to_modpath`, but includes the original raw key-value as a 2-tuple. Called by generator method `_gen_modname_source_target_modpath`.
entailment
def _gen_modname_source_target_modpath(self, spec, d): """ Private generator that will consume those above functions. This should NOT be overridden. Produces the following 4-tuple on iteration with the input dict; the definition is written at the module level documention for calmjs.toolchain, but in brief: modname The JavaScript module name. source Stands for sourcepath - path to some JavaScript source file. target Stands for targetpath - the target path relative to spec[BUILD_DIR] where the source file will be written to using the method that genearted this entry. modpath The module path that is compatible with tool referencing the target. While this is typically identical with modname, some tools require certain modifications or markers in additional to what is presented (e.g. such as the addition of a '?' symbol to ensure absolute lookup). """ for modname_source in d.items(): try: modname = self.modname_source_to_modname(spec, *modname_source) source = self.modname_source_to_source(spec, *modname_source) target = self.modname_source_to_target(spec, *modname_source) modpath = self.modname_source_target_modnamesource_to_modpath( spec, modname, source, target, modname_source) except ValueError as e: # figure out which of the above 3 functions failed by # acquiring the name from one frame down. f_name = sys.exc_info()[2].tb_next.tb_frame.f_code.co_name if isinstance(e, ValueSkip): # a purposely benign failure. log = partial( logger.info, "toolchain purposely skipping on '%s', " "reason: %s, where modname='%s', source='%s'", ) else: log = partial( logger.warning, "toolchain failed to acquire name with '%s', " "reason: %s, where modname='%s', source='%s'; " "skipping", ) log(f_name, e, *modname_source) continue yield modname, source, target, modpath
Private generator that will consume those above functions. This should NOT be overridden. Produces the following 4-tuple on iteration with the input dict; the definition is written at the module level documention for calmjs.toolchain, but in brief: modname The JavaScript module name. source Stands for sourcepath - path to some JavaScript source file. target Stands for targetpath - the target path relative to spec[BUILD_DIR] where the source file will be written to using the method that genearted this entry. modpath The module path that is compatible with tool referencing the target. While this is typically identical with modname, some tools require certain modifications or markers in additional to what is presented (e.g. such as the addition of a '?' symbol to ensure absolute lookup).
entailment
def compile(self, spec): """ Generic step that compiles from a spec to build the specified things into the build directory `build_dir`, by gathering all the files and feed them through the transpilation process or by simple copying. """ spec[EXPORT_MODULE_NAMES] = export_module_names = spec.get( EXPORT_MODULE_NAMES, []) if not isinstance(export_module_names, list): raise TypeError( "spec provided a '%s' but it is not of type list " "(got %r instead)" % (EXPORT_MODULE_NAMES, export_module_names) ) def compile_entry(method, read_key, store_key): spec_read_key = read_key + self.sourcepath_suffix spec_modpath_key = store_key + self.modpath_suffix spec_target_key = store_key + self.targetpath_suffix if _check_key_exists(spec, [spec_modpath_key, spec_target_key]): logger.error( "aborting compile step %r due to existing key", entry, ) return sourcepath_dict = spec.get(spec_read_key, {}) entries = self._gen_modname_source_target_modpath( spec, sourcepath_dict) (spec[spec_modpath_key], spec[spec_target_key], new_module_names) = method(spec, entries) logger.debug( "entry %r " "wrote %d entries to spec[%r], " "wrote %d entries to spec[%r], " "added %d export_module_names", entry, len(spec[spec_modpath_key]), spec_modpath_key, len(spec[spec_target_key]), spec_target_key, len(new_module_names), ) export_module_names.extend(new_module_names) for entry in self.compile_entries: if isinstance(entry, ToolchainSpecCompileEntry): log = partial( logging.getLogger(entry.logger).log, entry.log_level, ( entry.store_key + "%s['%s'] is being rewritten from " "'%s' to '%s'; configuration may now be invalid" ), ) if entry.logger else None compile_entry(partial( toolchain_spec_compile_entries, self, process_name=entry.process_name, overwrite_log=log, ), entry.read_key, entry.store_key) continue m, read_key, store_key = entry if callable(m): method = m else: method = getattr(self, self.compile_prefix + m, None) if not callable(method): logger.error( "'%s' not a callable attribute for %r from " "compile_entries entry %r; skipping", m, self, entry ) continue compile_entry(method, read_key, store_key)
Generic step that compiles from a spec to build the specified things into the build directory `build_dir`, by gathering all the files and feed them through the transpilation process or by simple copying.
entailment
def _calf(self, spec): """ The main call, assuming the base spec is prepared. Also, no advices will be triggered. """ self.prepare(spec) self.compile(spec) self.assemble(spec) self.link(spec) self.finalize(spec)
The main call, assuming the base spec is prepared. Also, no advices will be triggered.
entailment
def calf(self, spec): """ Typical safe usage is this, which sets everything that could be problematic up. Requires the filename which everything will be produced to. """ if not isinstance(spec, Spec): raise TypeError('spec must be of type Spec') if not spec.get(BUILD_DIR): tempdir = realpath(mkdtemp()) spec.advise(CLEANUP, shutil.rmtree, tempdir) build_dir = join(tempdir, 'build') mkdir(build_dir) spec[BUILD_DIR] = build_dir else: build_dir = self.realpath(spec, BUILD_DIR) if not isdir(build_dir): logger.error("build_dir '%s' is not a directory", build_dir) raise_os_error(errno.ENOTDIR, build_dir) self.realpath(spec, EXPORT_TARGET) # Finally, handle setup which may set up the deferred advices, # as all the toolchain (and its runtime and/or its parent # runtime and related toolchains) spec advises should have been # done. spec.handle(SETUP) try: process = ('prepare', 'compile', 'assemble', 'link', 'finalize') for p in process: spec.handle('before_' + p) getattr(self, p)(spec) spec.handle('after_' + p) spec.handle(SUCCESS) except ToolchainCancel: # quietly handle the issue and move on out of here. pass finally: spec.handle(CLEANUP)
Typical safe usage is this, which sets everything that could be problematic up. Requires the filename which everything will be produced to.
entailment
def transpile_modname_source_target(self, spec, modname, source, target): """ Calls the original version. """ return self.simple_transpile_modname_source_target( spec, modname, source, target)
Calls the original version.
entailment
def get_bin_version_str(bin_path, version_flag='-v', kw={}): """ Get the version string through the binary. """ try: prog = _get_exec_binary(bin_path, kw) version_str = version_expr.search( check_output([prog, version_flag], **kw).decode(locale) ).groups()[0] except OSError: logger.warning("failed to execute '%s'", bin_path) return None except Exception: logger.exception( "encountered unexpected error while trying to find version of " "'%s':", bin_path ) return None logger.info("'%s' is version '%s'", bin_path, version_str) return version_str
Get the version string through the binary.
entailment
def get_bin_version(bin_path, version_flag='-v', kw={}): """ Get the version string through the binary and return a tuple of integers. """ version_str = get_bin_version_str(bin_path, version_flag, kw) if version_str: return tuple(int(i) for i in version_str.split('.'))
Get the version string through the binary and return a tuple of integers.
entailment
def node(self, source, args=(), env={}): """ Calls node with an inline source. Returns decoded output of stdout and stderr; decoding determine by locale. """ return self._exec(self.node_bin, source, args=args, env=env)
Calls node with an inline source. Returns decoded output of stdout and stderr; decoding determine by locale.
entailment
def create_for_module_vars(cls, scope_vars): """ This was originally designed to be invoked at the module level for packages that implement specific support, but this can be used to create an instance that has the Node.js backed executable be found via current directory's node_modules or NODE_PATH. """ inst = cls() if not inst._set_env_path_with_node_modules(): import warnings msg = ( "Unable to locate the '%(binary)s' binary or runtime; default " "module level functions will not work. Please either provide " "%(PATH)s and/or update %(PATH)s environment variable " "with one that provides '%(binary)s'; or specify a " "working %(NODE_PATH)s environment variable with " "%(binary)s installed; or have install '%(binary)s' into " "the current working directory (%(cwd)s) either through " "npm or calmjs framework for this package. Restart or " "reload this module once that is done. Alternatively, " "create a manual Driver instance for '%(binary)s' with " "explicitly defined arguments." % { 'binary': inst.binary, 'PATH': 'PATH', 'NODE_PATH': 'NODE_PATH', 'cwd': inst.join_cwd(), } ) warnings.warn(msg, RuntimeWarning) scope_vars.update(inst._aliases) return inst
This was originally designed to be invoked at the module level for packages that implement specific support, but this can be used to create an instance that has the Node.js backed executable be found via current directory's node_modules or NODE_PATH.
entailment
def pkg_manager_view( self, package_names, stream=None, explicit=False, **kw): """ Returns the manifest JSON for the Python package name. Default npm implementation calls for package.json. If this class is initiated using standard procedures, this will mimic the functionality of ``npm view`` but mostly for showing the dependencies. This is done as a default action. Arguments: package_names The names of the python packages with their requirements to source the package.json from. stream If specified, the generated package.json will be written to there. explicit If True, the package names specified are the explicit list to search for - no dependency resolution will then be done. Returns the manifest json as a dict. """ # For looking up the pkg_name to dist converter for explicit to_dists = { False: find_packages_requirements_dists, True: pkg_names_to_dists, } # assuming string, and assume whitespaces are invalid. pkg_names, malformed = convert_package_names(package_names) if malformed: msg = 'malformed package name(s) specified: %s' % ', '.join( malformed) raise ValueError(msg) if len(pkg_names) == 1: logger.info( "generating a flattened '%s' for '%s'", self.pkgdef_filename, pkg_names[0], ) else: logger.info( "generating a flattened '%s' for packages {%s}", self.pkgdef_filename, ', '.join(pkg_names), ) # remember the filename is in the context of the distribution, # not the filesystem. dists = to_dists[explicit](pkg_names) pkgdef_json = flatten_dist_egginfo_json( dists, filename=self.pkgdef_filename, dep_keys=self.dep_keys, ) if pkgdef_json.get( self.pkg_name_field, NotImplemented) is NotImplemented: # use the last item. pkg_name = Requirement.parse(pkg_names[-1]).project_name pkgdef_json[self.pkg_name_field] = pkg_name if stream: self.dump(pkgdef_json, stream) stream.write('\n') return pkgdef_json
Returns the manifest JSON for the Python package name. Default npm implementation calls for package.json. If this class is initiated using standard procedures, this will mimic the functionality of ``npm view`` but mostly for showing the dependencies. This is done as a default action. Arguments: package_names The names of the python packages with their requirements to source the package.json from. stream If specified, the generated package.json will be written to there. explicit If True, the package names specified are the explicit list to search for - no dependency resolution will then be done. Returns the manifest json as a dict.
entailment
def pkg_manager_init( self, package_names, overwrite=False, merge=False, callback=None, **kw): """ Note: default implementation calls for npm and package.json, please note that it may not be the case for this instance of Driver. If this class is initiated using standard procedures, this will emulate the functionality of ``npm init`` for the generation of a working ``package.json``, but without asking users for input but instead uses information available through the distribution packages within ``setuptools``. Arguments: package_names The names of the python packages with their requirements to source the package.json from. overwrite Boolean flag; if set, overwrite package.json with the newly generated ``package.json``; merge Boolean flag; if set, implies overwrite, but does not ignore interactive setting. However this will keep details defined in existing ``package.json`` and only merge dependencies / devDependencies defined by the specified Python package. callback A callable. If this is passed, the value for overwrite will be derived from its result; it will be called with arguments (original_json, pkgdef_json, pkgdef_path, dumps=self.dumps). Typically the calmjs.ui.prompt_overwrite_json is passed into this argument; refer to its documentation on details. Returns generated definition file if successful; can be achieved by writing a new file or that the existing one matches with the expected version. Returns False otherwise. """ # this will be modified in place original_json = {} pkgdef_json = self.pkg_manager_view(package_names, **kw) # Now we figure out the actual file we want to work with. pkgdef_path = self.join_cwd(self.pkgdef_filename) existed = exists(pkgdef_path) if existed: try: with open(pkgdef_path, 'r') as fd: original_json = json.load(fd) except ValueError: logger.warning( "ignoring existing malformed '%s'", pkgdef_path) except (IOError, OSError): logger.error( "reading of existing '%s' failed; " "please confirm that it is a file and/or permissions to " "read and write is permitted before retrying.", pkgdef_path ) # Cowardly giving up. raise if merge: # Merge the generated on top of the original. updates = generate_merge_dict( self.dep_keys, original_json, pkgdef_json, ) final = {} final.update(original_json) final.update(pkgdef_json) final.update(updates) pkgdef_json = final if original_json == pkgdef_json: # Well, if original existing one is identical with the # generated version, we have reached our target. return pkgdef_json if not overwrite and callable(callback): overwrite = callback( original_json, pkgdef_json, pkgdef_path, dumps=self.dumps) else: # here the implied settings due to non-interactive mode # are finally set if merge: overwrite = True if not overwrite: logger.warning("not overwriting existing '%s'", pkgdef_path) return False with open(pkgdef_path, 'w') as fd: self.dump(pkgdef_json, fd) logger.info("wrote '%s'", pkgdef_path) return pkgdef_json
Note: default implementation calls for npm and package.json, please note that it may not be the case for this instance of Driver. If this class is initiated using standard procedures, this will emulate the functionality of ``npm init`` for the generation of a working ``package.json``, but without asking users for input but instead uses information available through the distribution packages within ``setuptools``. Arguments: package_names The names of the python packages with their requirements to source the package.json from. overwrite Boolean flag; if set, overwrite package.json with the newly generated ``package.json``; merge Boolean flag; if set, implies overwrite, but does not ignore interactive setting. However this will keep details defined in existing ``package.json`` and only merge dependencies / devDependencies defined by the specified Python package. callback A callable. If this is passed, the value for overwrite will be derived from its result; it will be called with arguments (original_json, pkgdef_json, pkgdef_path, dumps=self.dumps). Typically the calmjs.ui.prompt_overwrite_json is passed into this argument; refer to its documentation on details. Returns generated definition file if successful; can be achieved by writing a new file or that the existing one matches with the expected version. Returns False otherwise.
entailment
def pkg_manager_install( self, package_names=None, production=None, development=None, args=(), env={}, **kw): """ This will install all dependencies into the current working directory for the specific Python package from the selected JavaScript package manager; this requires that this package manager's package definition file to be properly generated first, otherwise the process will be aborted. If the production argument is supplied, it will be passed to the underlying package manager binary as a true or false value with the --production flag, otherwise it will not be set. Likewise for development. However, the production flag has priority. If the argument 'args' is supplied as a tuple, those will be passed through to the package manager install command as its arguments. This will be very specific to the underlying program; use with care as misuse can result in an environment that is not expected by the other parts of the framework. If the argument 'env' is supplied, they will be additional environment variables that are not already defined by the framework, which are 'NODE_PATH' and 'PATH'. Values set for those will have highest precedence, then the ones passed in through env, then finally whatever was already defined before the execution of this program. All other arguments to this method will be passed forward to the pkg_manager_init method, if the package_name is supplied for the Python package. If no package_name was supplied then just continue with the process anyway, to still enable the shorthand calling. If the package manager could not be invoked, it will simply not be. Arguments: package_names The names of the Python package to generate the manifest for. args The arguments to pass into the command line install. """ if not package_names: logger.warning( "no package name supplied, not continuing with '%s %s'", self.pkg_manager_bin, self.install_cmd, ) return result = self.pkg_manager_init(package_names, **kw) if result is False: logger.warning( "not continuing with '%s %s' as the generation of " "'%s' failed", self.pkg_manager_bin, self.install_cmd, self.pkgdef_filename ) return call_kw = self._gen_call_kws(**env) logger.debug( "invoking '%s %s'", self.pkg_manager_bin, self.install_cmd) if self.env_path: logger.debug( "invoked with env_path '%s'", self.env_path) if self.working_dir: logger.debug( "invoked from working directory '%s'", self.working_dir) try: cmd = [self._get_exec_binary(call_kw), self.install_cmd] cmd.extend(self._prodev_flag( production, development, result.get(self.devkey))) cmd.extend(args) logger.info('invoking %s', ' '.join(cmd)) call(cmd, **call_kw) except (IOError, OSError): logger.error( "invocation of the '%s' binary failed; please ensure it and " "its dependencies are installed and available.", self.binary ) # Still raise the exception as this is a lower level API. raise return True
This will install all dependencies into the current working directory for the specific Python package from the selected JavaScript package manager; this requires that this package manager's package definition file to be properly generated first, otherwise the process will be aborted. If the production argument is supplied, it will be passed to the underlying package manager binary as a true or false value with the --production flag, otherwise it will not be set. Likewise for development. However, the production flag has priority. If the argument 'args' is supplied as a tuple, those will be passed through to the package manager install command as its arguments. This will be very specific to the underlying program; use with care as misuse can result in an environment that is not expected by the other parts of the framework. If the argument 'env' is supplied, they will be additional environment variables that are not already defined by the framework, which are 'NODE_PATH' and 'PATH'. Values set for those will have highest precedence, then the ones passed in through env, then finally whatever was already defined before the execution of this program. All other arguments to this method will be passed forward to the pkg_manager_init method, if the package_name is supplied for the Python package. If no package_name was supplied then just continue with the process anyway, to still enable the shorthand calling. If the package manager could not be invoked, it will simply not be. Arguments: package_names The names of the Python package to generate the manifest for. args The arguments to pass into the command line install.
entailment
def run(self, args=(), env={}): """ Calls the package manager with the arguments. Returns decoded output of stdout and stderr; decoding determine by locale. """ # the following will call self._get_exec_binary return self._exec(self.binary, args=args, env=env)
Calls the package manager with the arguments. Returns decoded output of stdout and stderr; decoding determine by locale.
entailment
def _get_exec_binary(binary, kw): """ On win32, the subprocess module can only reliably resolve the target binary if it's actually a binary; as for a Node.js script it seems to only work iff shell=True was specified, presenting a security risk. Resolve the target manually through which will account for that. The kw argument is the keyword arguments that will be passed into whatever respective subprocess.Popen family of methods. The PATH environment variable will be used if available. """ binary = which(binary, path=kw.get('env', {}).get('PATH')) if binary is None: raise_os_error(errno.ENOENT) return binary
On win32, the subprocess module can only reliably resolve the target binary if it's actually a binary; as for a Node.js script it seems to only work iff shell=True was specified, presenting a security risk. Resolve the target manually through which will account for that. The kw argument is the keyword arguments that will be passed into whatever respective subprocess.Popen family of methods. The PATH environment variable will be used if available.
entailment
def _init_entry_points(self, entry_points): """ Default initialization loop. """ logger.debug( "registering %d entry points for registry '%s'", len(entry_points), self.registry_name, ) for entry_point in entry_points: try: logger.debug( "registering entry point '%s' from '%s'", entry_point, entry_point.dist, ) self._init_entry_point(entry_point) except ImportError: logger.warning( 'ImportError: %s not found; skipping registration', entry_point.module_name) except Exception: logger.exception( "registration of entry point '%s' from '%s' to registry " "'%s' failed with the following exception", entry_point, entry_point.dist, self.registry_name, )
Default initialization loop.
entailment
def store_records_for_package(self, entry_point, records): """ Store the records in a way that permit lookup by package """ # If provided records already exist in the module mapping list, # it likely means that a package declared multiple keys for the # same package namespace; while normally this does not happen, # this default implementation make no assumptions as to whether # or not this is permitted. pkg_module_records = self._dist_to_package_module_map(entry_point) pkg_module_records.extend(records)
Store the records in a way that permit lookup by package
entailment
def register_entry_point(self, entry_point): """ Register a lone entry_point Will raise ImportError if the entry_point leads to an invalid import. """ module = _import_module(entry_point.module_name) self._register_entry_point_module(entry_point, module)
Register a lone entry_point Will raise ImportError if the entry_point leads to an invalid import.
entailment
def _register_entry_point_module(self, entry_point, module): """ Private method that registers an entry_point with a provided module. """ records_map = self._map_entry_point_module(entry_point, module) self.store_records_for_package(entry_point, list(records_map.keys())) for module_name, records in records_map.items(): if module_name in self.records: logger.info( "module '%s' was already declared in registry '%s'; " "applying new records on top.", module_name, self.registry_name, ) logger.debug("overwriting keys: %s", sorted( set(self.records[module_name].keys()) & set(records.keys()) )) self.records[module_name].update(records) else: logger.debug( "adding records for module '%s' to registry '%s'", module_name, self.registry_name, ) self.records[module_name] = records
Private method that registers an entry_point with a provided module.
entailment
def get_record(self, name): """ Get a record by name """ result = {} result.update(self.records.get(name, {})) return result
Get a record by name
entailment
def get_records_for_package(self, package_name): """ Get all records identified by package. """ names = self.package_module_map.get(package_name, []) result = {} for name in names: result.update(self.get_record(name)) return result
Get all records identified by package.
entailment
def resolve_parent_registry_name(self, registry_name, suffix): """ Subclasses should override to specify the default suffix, as the invocation is done without a suffix. """ if not registry_name.endswith(suffix): raise ValueError( "child module registry name defined with invalid suffix " "('%s' does not end with '%s')" % (registry_name, suffix)) return registry_name[:-len(suffix)]
Subclasses should override to specify the default suffix, as the invocation is done without a suffix.
entailment
def get_record(self, name): """ Get a record for the registered name, which will be a set of matching desired "module names" for the given path. """ return set().union(self.records.get(name, set()))
Get a record for the registered name, which will be a set of matching desired "module names" for the given path.
entailment
def get_records_for_package(self, package_name): """ Get all records identified by package. """ result = [] result.extend(self.package_module_map.get(package_name)) return result
Get all records identified by package.
entailment
def which(self): """ Figure out which binary this will execute. Returns None if the binary is not found. """ if self.binary is None: return None return which(self.binary, path=self.env_path)
Figure out which binary this will execute. Returns None if the binary is not found.
entailment
def find_node_modules_basedir(self): """ Find all node_modules directories configured to be accessible through this driver instance. This is typically used for adding the direct instance, and does not traverse the parent directories like what Node.js does. Returns a list of directories that contain a 'node_modules' directory. """ paths = [] # First do the working dir. local_node_path = self.join_cwd(NODE_MODULES) if isdir(local_node_path): paths.append(local_node_path) # do the NODE_PATH environment variable last, as Node.js seem to # have these resolving just before the global. if self.node_path: paths.extend(self.node_path.split(pathsep)) return paths
Find all node_modules directories configured to be accessible through this driver instance. This is typically used for adding the direct instance, and does not traverse the parent directories like what Node.js does. Returns a list of directories that contain a 'node_modules' directory.
entailment
def which_with_node_modules(self): """ Which with node_path and node_modules """ if self.binary is None: return None # first, log down the pedantic things... if isdir(self.join_cwd(NODE_MODULES)): logger.debug( "'%s' instance will attempt to locate '%s' binary from " "%s%s%s%s%s, located through the working directory", self.__class__.__name__, self.binary, self.join_cwd(), sep, NODE_MODULES, sep, NODE_MODULES_BIN, ) if self.node_path: logger.debug( "'%s' instance will attempt to locate '%s' binary from " "its %s of %s", self.__class__.__name__, self.binary, NODE_PATH, self.node_path, ) paths = self.find_node_modules_basedir() whichpaths = pathsep.join(join(p, NODE_MODULES_BIN) for p in paths) if paths: logger.debug( "'%s' instance located %d possible paths to the '%s' binary, " "which are %s", self.__class__.__name__, len(paths), self.binary, whichpaths, ) return which(self.binary, path=whichpaths)
Which with node_path and node_modules
entailment
def _set_env_path_with_node_modules(self): """ Attempt to locate and set the paths to the binary with the working directory defined for this instance. """ modcls_name = ':'.join(( self.__class__.__module__, self.__class__.__name__)) if self.binary is None: raise ValueError( "binary undefined for '%s' instance" % modcls_name) logger.debug( "locating '%s' node binary for %s instance...", self.binary, modcls_name, ) default = self.which() if default is not None: logger.debug( "found '%s'; " "not modifying PATH environment variable in instance of '%s'.", realpath(default), modcls_name) return True target = self.which_with_node_modules() if target: # Only setting the path specific for the binary; side effect # will be whoever else borrowing the _exec in here might not # get the binary they want. That's why it's private. self.env_path = dirname(target) logger.debug( "located '%s' binary at '%s'; setting PATH environment " "variable for '%s' instance.", self.binary, self.env_path, modcls_name ) return True else: logger.debug( "Unable to locate '%s'; not modifying PATH environment " "variable for instance of '%s'.", self.binary, modcls_name ) return False
Attempt to locate and set the paths to the binary with the working directory defined for this instance.
entailment
def _exec(self, binary, stdin='', args=(), env={}): """ Executes the binary using stdin and args with environment variables. Returns a tuple of stdout, stderr. Format determined by the input text (either str or bytes), and the encoding of str will be determined by the locale this module was imported in. """ call_kw = self._gen_call_kws(**env) call_args = [self._get_exec_binary(call_kw)] call_args.extend(args) return fork_exec(call_args, stdin, **call_kw)
Executes the binary using stdin and args with environment variables. Returns a tuple of stdout, stderr. Format determined by the input text (either str or bytes), and the encoding of str will be determined by the locale this module was imported in.
entailment
def dump(self, blob, stream): """ Call json.dump with the attributes of this instance as arguments. """ json.dump( blob, stream, indent=self.indent, sort_keys=True, separators=self.separators, )
Call json.dump with the attributes of this instance as arguments.
entailment
def dumps(self, blob): """ Call json.dumps with the attributes of this instance as arguments. """ return json.dumps( blob, indent=self.indent, sort_keys=True, separators=self.separators, )
Call json.dumps with the attributes of this instance as arguments.
entailment
def join_cwd(self, path=None): """ Join the path with the current working directory. If it is specified for this instance of the object it will be used, otherwise rely on the global value. """ if self.working_dir: logger.debug( "'%s' instance 'working_dir' set to '%s' for join_cwd", type(self).__name__, self.working_dir, ) cwd = self.working_dir else: cwd = getcwd() logger.debug( "'%s' instance 'working_dir' unset; " "default to process '%s' for join_cwd", type(self).__name__, cwd, ) if path: return join(cwd, path) return cwd
Join the path with the current working directory. If it is specified for this instance of the object it will be used, otherwise rely on the global value.
entailment
def modname_source_to_target( self, toolchain, spec, modname, source): """ This is called by the Toolchain for modnames that contain a '!' as that signifies a loaderplugin syntax. This will be used by the toolchain (which will also be supplied as the first argument) to resolve the copy target, which must be a path relative to the spec[WORKING_DIR]. If the provided modname points contains a chain of loaders, the registry associated with this handler instance will be used to resolve the subsequent handlers until none are found, which that handler will be used to return this result. """ stripped_modname = self.unwrap(modname) chained = ( self.registry.get_record(stripped_modname) if '!' in stripped_modname else None) if chained: # ensure the stripped_modname is provided as by default the # handler will only deal with its own kind return chained.modname_source_to_target( toolchain, spec, stripped_modname, source) return stripped_modname
This is called by the Toolchain for modnames that contain a '!' as that signifies a loaderplugin syntax. This will be used by the toolchain (which will also be supplied as the first argument) to resolve the copy target, which must be a path relative to the spec[WORKING_DIR]. If the provided modname points contains a chain of loaders, the registry associated with this handler instance will be used to resolve the subsequent handlers until none are found, which that handler will be used to return this result.
entailment
def unwrap(self, value): """ A helper method for unwrapping the loaderplugin fragment out of the provided value (typically a modname) and return it. Note that the filter chaining is very implementation specific to each and every loader plugin and their specific toolchain, so this default implementation is not going to attempt to consume everything in one go. Another note: if this is to be subclassed and if the return value does not actually remove the loaderplugin fragment, issues like default implmenetation of ``modname_source_to_target`` in this class to recurse forever. """ globs = value.split('!', 1) if globs[0].split('?', 1)[0] == self.name: return globs[-1] else: return value
A helper method for unwrapping the loaderplugin fragment out of the provided value (typically a modname) and return it. Note that the filter chaining is very implementation specific to each and every loader plugin and their specific toolchain, so this default implementation is not going to attempt to consume everything in one go. Another note: if this is to be subclassed and if the return value does not actually remove the loaderplugin fragment, issues like default implmenetation of ``modname_source_to_target`` in this class to recurse forever.
entailment
def _unicode_handler(obj): """ Transforms an unicode string into a UTF-8 equivalent. :param obj: object to transform into it's UTF-8 equivalent :return: the UTF-8 equivalent of the string """ try: result = obj.isoformat() except AttributeError: raise TypeError("Unserializable object {} of type {}".format(obj, type(obj))) return result
Transforms an unicode string into a UTF-8 equivalent. :param obj: object to transform into it's UTF-8 equivalent :return: the UTF-8 equivalent of the string
entailment
def encode(self, entity): """ Encodes the data, creating a JSON structure from an instance from the domain model. :param entity: the instance to encode :return: a JSON structure created from the received data """ encoded = self._dict_encoder.encode(entity) if sys.version_info[0] == 2: result = json.dumps(encoded, ensure_ascii=False, default=_iso_handler, encoding='latin1') else: # For Python 3 result = json.dumps(encoded, ensure_ascii=False, default=_iso_handler) return result
Encodes the data, creating a JSON structure from an instance from the domain model. :param entity: the instance to encode :return: a JSON structure created from the received data
entailment
def ipi_base_number(name=None): """ IPI Base Number field. An IPI Base Number code written on a field follows the Pattern C-NNNNNNNNN-M. This being: - C: header, a character. - N: numeric value. - M: control digit. So, for example, an IPI Base Number code field can contain I-000000229-7. :param name: name for the field :return: a parser for the IPI Base Number field """ if name is None: name = 'IPI Base Number Field' field = pp.Regex('I-[0-9]{9}-[0-9]') # Name field.setName(name) field_num = basic.numeric(13) field_num.setName(name) field = field | field_num # White spaces are not removed field.leaveWhitespace() return field.setResultsName('ipi_base_n')
IPI Base Number field. An IPI Base Number code written on a field follows the Pattern C-NNNNNNNNN-M. This being: - C: header, a character. - N: numeric value. - M: control digit. So, for example, an IPI Base Number code field can contain I-000000229-7. :param name: name for the field :return: a parser for the IPI Base Number field
entailment
def ipi_name_number(name=None): """ IPI Name Number field. An IPI Name Number is composed of eleven digits. So, for example, an IPI Name Number code field can contain 00014107338. :param name: name for the field :return: a parser for the IPI Name Number field """ if name is None: name = 'IPI Name Number Field' field = basic.numeric(11) field.setName(name) return field.setResultsName('ipi_name_n')
IPI Name Number field. An IPI Name Number is composed of eleven digits. So, for example, an IPI Name Number code field can contain 00014107338. :param name: name for the field :return: a parser for the IPI Name Number field
entailment
def iswc(name=None): """ ISWC field. A ISWC code written on a field follows the Pattern TNNNNNNNNNC. This being: - T: header, it is always T. - N: numeric value. - C: control digit. So, for example, an ISWC code field can contain T0345246801. :param name: name for the field :return: a parser for the ISWC field """ if name is None: name = 'ISWC Field' # T followed by 10 numbers field = pp.Regex('T[0-9]{10}') # Name field.setName(name) # White spaces are not removed field.leaveWhitespace() return field.setResultsName('iswc')
ISWC field. A ISWC code written on a field follows the Pattern TNNNNNNNNNC. This being: - T: header, it is always T. - N: numeric value. - C: control digit. So, for example, an ISWC code field can contain T0345246801. :param name: name for the field :return: a parser for the ISWC field
entailment
def percentage(columns, maximum=100, name=None): """ Creates the grammar for a Numeric (N) field storing a percentage and accepting only the specified number of characters. It is possible to set the maximum allowed value. By default this is 100 (for 100%), and if modified it is expected to be reduced, not increased. The three first digits will be for the integer value. The columns can't be lower than 3. :param columns: number of columns for this field :param maximum: maximum allowed value :param name: name for the field :return: grammar for the float numeric field """ if name is None: name = 'Percentage Field' if columns < 3: message = 'The values can not be lower than 3' raise pp.ParseException(message) field = basic.numeric_float(columns, 3) field.addParseAction(lambda v: _assert_is_percentage(v[0], maximum)) field.setName(name) return field
Creates the grammar for a Numeric (N) field storing a percentage and accepting only the specified number of characters. It is possible to set the maximum allowed value. By default this is 100 (for 100%), and if modified it is expected to be reduced, not increased. The three first digits will be for the integer value. The columns can't be lower than 3. :param columns: number of columns for this field :param maximum: maximum allowed value :param name: name for the field :return: grammar for the float numeric field
entailment
def _assert_is_percentage(value, maximum=100): """ Makes sure the received value is a percentage. Otherwise an exception is thrown. :param value: the value to check """ if value < 0 or value > maximum: message = 'The value on a percentage field should be between 0 and %s' \ % maximum raise pp.ParseException(message)
Makes sure the received value is a percentage. Otherwise an exception is thrown. :param value: the value to check
entailment
def ean_13(name=None): """ Creates the grammar for an EAN 13 code. These are the codes on thirteen digits barcodes. :param name: name for the field :return: grammar for an EAN 13 field """ if name is None: name = 'EAN 13 Field' field = basic.numeric(13) field = field.setName(name) return field.setResultsName('ean_13')
Creates the grammar for an EAN 13 code. These are the codes on thirteen digits barcodes. :param name: name for the field :return: grammar for an EAN 13 field
entailment
def isrc(name=None): """ Creates the grammar for an ISRC code. ISRC stands for International Standard Recording Code, which is the standard ISO 3901. This stores information identifying a particular recording. :param name: name for the field :return: grammar for an ISRC field """ if name is None: name = 'ISRC Field' field = _isrc_short(name) | _isrc_long(name) field.setName(name) return field.setResultsName('isrc')
Creates the grammar for an ISRC code. ISRC stands for International Standard Recording Code, which is the standard ISO 3901. This stores information identifying a particular recording. :param name: name for the field :return: grammar for an ISRC field
entailment
def _isrc_long(name=None): """ Creates the grammar for a short ISRC code. ISRC stands for International Standard Recording Code, which is the standard ISO 3901. This stores information identifying a particular recording. This variant contain no separator for the parts, and follows the pattern: CCXXXYYNNNNN Where each code means: - CC: country code - XXX: registrant - YY: year - NNNNN: work id :param name: name for the field :return: grammar for an ISRC field """ config = CWRTables() if name is None: name = 'ISRC Field' country = config.get_data('isrc_country_code') # registrant = basic.alphanum(3) # year = pp.Regex('[0-9]{2}') # work_id = pp.Regex('[0-9]{5}') country_regex = '' for c in country: if len(country_regex) > 0: country_regex += '|' country_regex += c country_regex = '(' + country_regex + ')' field = pp.Regex(country_regex + '.{3}[0-9]{2}[0-9]{5}') # country.setName('ISO-2 Country Code') # registrant.setName('Registrant') # year.setName('Year') # work_id.setName('Work ID') field.setName(name) return field.setResultsName('isrc')
Creates the grammar for a short ISRC code. ISRC stands for International Standard Recording Code, which is the standard ISO 3901. This stores information identifying a particular recording. This variant contain no separator for the parts, and follows the pattern: CCXXXYYNNNNN Where each code means: - CC: country code - XXX: registrant - YY: year - NNNNN: work id :param name: name for the field :return: grammar for an ISRC field
entailment
def visan(name=None): """ Creates the grammar for a V-ISAN code. This is a variation on the ISAN (International Standard Audiovisual Number) :param name: name for the field :return: grammar for an ISRC field """ if name is None: name = 'V-ISAN Field' field = pp.Regex('[0-9]{25}') field.setName(name) return field.setResultsName('visan')
Creates the grammar for a V-ISAN code. This is a variation on the ISAN (International Standard Audiovisual Number) :param name: name for the field :return: grammar for an ISRC field
entailment
def audio_visual_key(name=None): """ Creates the grammar for an Audio Visual Key code. This is a variation on the ISAN (International Standard Audiovisual Number) :param name: name for the field :return: grammar for an ISRC field """ if name is None: name = 'AVI Field' society_code = basic.numeric(3) society_code = society_code.setName('Society Code') \ .setResultsName('society_code') av_number = basic.alphanum(15, extended=True, isLast=True) field_empty = pp.Regex('[ ]{15}') field_empty.setParseAction(pp.replaceWith('')) av_number = av_number | field_empty av_number = av_number.setName('Audio-Visual Number') \ .setResultsName('av_number') field = pp.Group(society_code + pp.Optional(av_number)) field.setParseAction(lambda v: _to_avi(v[0])) field = field.setName(name) return field.setResultsName('audio_visual_key')
Creates the grammar for an Audio Visual Key code. This is a variation on the ISAN (International Standard Audiovisual Number) :param name: name for the field :return: grammar for an ISRC field
entailment
def date_time(name=None): """ Creates the grammar for a date and time field, which is a combination of the Date (D) and Time or Duration field (T). This field requires first a Date, and then a Time, without any space in between. :param name: name for the field :return: grammar for a Date and Time field """ if name is None: name = 'Date and Time Field' date = basic.date('Date') time = basic.time('Time') date = date.setResultsName('date') time = time.setResultsName('time') field = pp.Group(date + time) field.setParseAction(lambda d: _combine_date_time(d[0])) field.setName(name) return field.setResultsName('date_time')
Creates the grammar for a date and time field, which is a combination of the Date (D) and Time or Duration field (T). This field requires first a Date, and then a Time, without any space in between. :param name: name for the field :return: grammar for a Date and Time field
entailment
def lookup_int(values, name=None): """ Lookup field which transforms the result into an integer. :param values: values allowed :param name: name for the field :return: grammar for the lookup field """ field = basic.lookup(values, name) field.addParseAction(lambda l: int(l[0])) return field
Lookup field which transforms the result into an integer. :param values: values allowed :param name: name for the field :return: grammar for the lookup field
entailment
def extract_function_argument(text, f_name, f_argn, f_argt=asttypes.String): """ Extract a specific argument from a specific function name. Arguments: text The source text. f_name The name of the function f_argn The argument position f_argt The argument type from calmjs.parse.asttypes; default: calmjs.parse.asttypes.String """ tree = parse(text) return list(filter_function_argument(tree, f_name, f_argn, f_argt))
Extract a specific argument from a specific function name. Arguments: text The source text. f_name The name of the function f_argn The argument position f_argt The argument type from calmjs.parse.asttypes; default: calmjs.parse.asttypes.String
entailment
def yield_amd_require_string_arguments( node, pos, reserved_module=reserved_module, wrapped=define_wrapped): """ This yields only strings within the lists provided in the argument list at the specified position from a function call. Originally, this was implemented for yield a list of module names to be imported as represented by this given node, which must be of the FunctionCall type. """ for i, child in enumerate(node.args.items[pos]): if isinstance(child, asttypes.String): result = to_str(child) if ((result not in reserved_module) and ( result != define_wrapped.get(i))): yield result
This yields only strings within the lists provided in the argument list at the specified position from a function call. Originally, this was implemented for yield a list of module names to be imported as represented by this given node, which must be of the FunctionCall type.
entailment
def yield_string_argument(node, pos): """ Yield just a string argument from position of the function call. """ if not isinstance(node.args.items[pos], asttypes.String): return yield to_str(node.args.items[pos])
Yield just a string argument from position of the function call.
entailment
def yield_module_imports(root, checks=string_imports()): """ Gather all require and define calls from unbundled JavaScript source files and yield all module names. The imports can either be of the CommonJS or AMD syntax. """ if not isinstance(root, asttypes.Node): raise TypeError('provided root must be a node') for child in yield_function(root, deep_filter): for f, condition in checks: if condition(child): for name in f(child): yield name continue
Gather all require and define calls from unbundled JavaScript source files and yield all module names. The imports can either be of the CommonJS or AMD syntax.
entailment
def yield_module_imports_nodes(root, checks=import_nodes()): """ Yield all nodes that provide an import """ if not isinstance(root, asttypes.Node): raise TypeError('provided root must be a node') for child in yield_function(root, deep_filter): for f, condition in checks: if condition(child): for name in f(child): yield name continue
Yield all nodes that provide an import
entailment
def open_fasta_index(self): """ custom type for file made w/ buildFastaIndex, fai for ones made with samtools """ index = self.fasta_index try: handle = open(index, 'rb') except (IOError, TypeError): sys.stderr.write('index not found, creating it\n') try: self.build_fasta_index() return except IOError: raise IOError("Index File "+self.fasta_index+"can't be found nor created, check file permissions") self.sequence_index = {} _seq_dict = self.sequence_index for row in handle: entry = row.decode('utf-8').strip().split('\t') #stored as: {header: length, # of chars to end of this header, length of fasta lines, length of each line including breakchar} _seq_dict[entry[0]] = (entry[1], entry[2], entry[3], entry[4])
custom type for file made w/ buildFastaIndex, fai for ones made with samtools
entailment
def get_sequence(self, chrom, start, end, strand='+', indexing=(-1, 0)): """ chromosome is entered relative to the file it was built with, so it can be 'chr11' or '11', start/end are coordinates, which default to python style [0,1) internally. So positions should be entered with (1,1) indexing. This can be changed with the indexing keyword. The default is for everything to be relative to the positive strand """ try: divisor = int(self.sequence_index[chrom][2]) except KeyError: self.open_fasta_index() try: divisor = int(self.sequence_index[chrom][2]) except KeyError: sys.stderr.write("%s cannot be found within the fasta index file.\n" % chrom) return "" start+=indexing[0] end+=indexing[1] #is it a valid position? if ( start < 0 or end > int(self.sequence_index[chrom][0] )): raise ValueError("The range %d-%d is invalid. Valid range for this feature is 1-%d." % (start-indexing[0], end-indexing[1], int(self.sequence_index[chrom][0]))) #go to start of chromosome seekpos = int(self.sequence_index[chrom][1]) #find how many newlines we have seekpos += start+start/divisor slen = end-start endpos = int(slen + (slen/divisor) + 1) #a hack of sorts but it works and is easy self.fasta_file.seek(seekpos, 0) output = self.fasta_file.read(endpos) output = output.replace('\n', '') out = output[:slen] if strand == '+' or strand == 1: return out if strand == '-' or strand == -1: return _reverse_complement(out)
chromosome is entered relative to the file it was built with, so it can be 'chr11' or '11', start/end are coordinates, which default to python style [0,1) internally. So positions should be entered with (1,1) indexing. This can be changed with the indexing keyword. The default is for everything to be relative to the positive strand
entailment
def resolve_child_module_registries_lineage(registry): """ For a given child module registry, attempt to resolve the lineage. Return an iterator, yielding from parent down to the input registry, inclusive of the input registry. """ children = [registry] while isinstance(registry, BaseChildModuleRegistry): if registry.parent in children: # this should never normally occur under normal usage where # classes have been properly subclassed with methods defined # to specificiation and with standard entry point usage, but # non-standard definitions/usage can definitely trigger this # self-referential loop. raise TypeError( "registry '%s' was already recorded in the lineage, " "indicating that it may be some (grand)child of itself, which " "is an illegal reference in the registry system; previously " "resolved lineage is: %r" % (registry.parent.registry_name, [ r.registry_name for r in reversed(children) ]) ) pl = len(registry.parent.registry_name) if len(registry.parent.registry_name) > len(registry.registry_name): logger.warning( "the parent registry '%s' somehow has a longer name than its " "child registry '%s'; the underlying registry class may be " "constructed in an invalid manner", registry.parent.registry_name, registry.registry_name, ) elif registry.registry_name[:pl] != registry.parent.registry_name: logger.warning( "child registry '%s' does not share the same common prefix as " "its parent registry '%s'; there may be errors with how the " "related registries are set up or constructed", registry.registry_name, registry.parent.registry_name, ) children.append(registry.parent) registry = registry.parent # the lineage down from parent to child. return iter(reversed(children))
For a given child module registry, attempt to resolve the lineage. Return an iterator, yielding from parent down to the input registry, inclusive of the input registry.
entailment
def addModification(self, aa,position, modMass, modType): """ !!!!MODIFICATION POSITION IS 0 BASED!!!!!! Modifications are stored internally as a tuple with this format: (amino acid modified, index in peptide of amino acid, modification type, modification mass) ie (M, 7, Oxidation, 15.9...) such as: M35(o) for an oxidized methionine at residue 35 """ #clean up xtandem if not modType: #try to figure out what it is tmass = abs(modMass) smass = str(tmass) prec = len(str(tmass-int(tmass)))-2 precFormat = '%'+'0.%df'%prec # modType = "" # masses = config.MODIFICATION_MASSES # for i in masses: # if tmass in masses[i] or smass == precFormat%masses[i][0]: # #found it # modType = i # if not modType: # sys.stderr.write('mod not found %s\n'%modMass) self.mods.add((aa,str(position),str(modMass),str(modType)))
!!!!MODIFICATION POSITION IS 0 BASED!!!!!! Modifications are stored internally as a tuple with this format: (amino acid modified, index in peptide of amino acid, modification type, modification mass) ie (M, 7, Oxidation, 15.9...) such as: M35(o) for an oxidized methionine at residue 35
entailment
def resource_filename_mod_dist(module_name, dist): """ Given a module name and a distribution, attempt to resolve the actual path to the module. """ try: return pkg_resources.resource_filename( dist.as_requirement(), join(*module_name.split('.'))) except pkg_resources.DistributionNotFound: logger.warning( "distribution '%s' not found, falling back to resolution using " "module_name '%s'", dist, module_name, ) return pkg_resources.resource_filename(module_name, '')
Given a module name and a distribution, attempt to resolve the actual path to the module.
entailment
def resource_filename_mod_entry_point(module_name, entry_point): """ If a given package declares a namespace and also provide submodules nested at that namespace level, and for whatever reason that module is needed, Python's import mechanism will not have a path associated with that module. However, if given an entry_point, this path can be resolved through its distribution. That said, the default resource_filename function does not accept an entry_point, and so we have to chain that back together manually. """ if entry_point.dist is None: # distribution missing is typically caused by mocked entry # points from tests; silently falling back to basic lookup result = pkg_resources.resource_filename(module_name, '') else: result = resource_filename_mod_dist(module_name, entry_point.dist) if not result: logger.warning( "resource path cannot be found for module '%s' and entry_point " "'%s'", module_name, entry_point ) return None if not exists(result): logger.warning( "resource path found at '%s' for module '%s' and entry_point " "'%s', but it does not exist", result, module_name, entry_point, ) return None return result
If a given package declares a namespace and also provide submodules nested at that namespace level, and for whatever reason that module is needed, Python's import mechanism will not have a path associated with that module. However, if given an entry_point, this path can be resolved through its distribution. That said, the default resource_filename function does not accept an entry_point, and so we have to chain that back together manually.
entailment
def modgen( module, entry_point, modpath='pkg_resources', globber='root', fext=JS_EXT, registry=_utils): """ JavaScript styled module location listing generator. Arguments: module The Python module to start fetching from. entry_point This is the original entry point that has a distribution reference such that the resource_filename API call may be used to locate the actual resources. Optional Arguments: modpath The name to the registered modpath function that will fetch the paths belonging to the module. Defaults to 'pkg_resources'. globber The name to the registered file globbing function. Defaults to one that will only glob the local path. fext The filename extension to match. Defaults to `.js`. registry The "registry" to extract the functions from Yields 3-tuples of - raw list of module name fragments - the source base path to the python module (equivalent to module) - the relative path to the actual module For each of the module basepath and source files the globber finds. """ globber_f = globber if callable(globber) else registry['globber'][globber] modpath_f = modpath if callable(modpath) else registry['modpath'][modpath] logger.debug( 'modgen generating file listing for module %s', module.__name__, ) module_frags = module.__name__.split('.') module_base_paths = modpath_f(module, entry_point) for module_base_path in module_base_paths: logger.debug('searching for *%s files in %s', fext, module_base_path) for path in globber_f(module_base_path, '*' + fext): mod_path = (relpath(path, module_base_path)) yield ( module_frags + mod_path[:-len(fext)].split(sep), module_base_path, mod_path, )
JavaScript styled module location listing generator. Arguments: module The Python module to start fetching from. entry_point This is the original entry point that has a distribution reference such that the resource_filename API call may be used to locate the actual resources. Optional Arguments: modpath The name to the registered modpath function that will fetch the paths belonging to the module. Defaults to 'pkg_resources'. globber The name to the registered file globbing function. Defaults to one that will only glob the local path. fext The filename extension to match. Defaults to `.js`. registry The "registry" to extract the functions from Yields 3-tuples of - raw list of module name fragments - the source base path to the python module (equivalent to module) - the relative path to the actual module For each of the module basepath and source files the globber finds.
entailment
def register(util_type, registry=_utils): """ Crude, local registration decorator for a crude local registry of all utilities local to this module. """ def marker(f): mark = util_type + '_' if not f.__name__.startswith(mark): raise TypeError( 'not registering %s to %s' % (f.__name__, util_type)) registry[util_type][f.__name__[len(mark):]] = f return f return marker
Crude, local registration decorator for a crude local registry of all utilities local to this module.
entailment
def modpath_all(module, entry_point): """ Provides the raw __path__. Incompatible with PEP 302-based import hooks and incompatible with zip_safe packages. Deprecated. Will be removed by calmjs-4.0. """ module_paths = getattr(module, '__path__', []) if not module_paths: logger.warning( "module '%s' does not appear to be a namespace module or does not " "export available paths onto the filesystem; JavaScript source " "files cannot be extracted from this module.", module.__name__ ) return module_paths
Provides the raw __path__. Incompatible with PEP 302-based import hooks and incompatible with zip_safe packages. Deprecated. Will be removed by calmjs-4.0.
entailment
def modpath_last(module, entry_point): """ Provides the raw __path__. Incompatible with PEP 302-based import hooks and incompatible with zip_safe packages. Deprecated. Will be removed by calmjs-4.0. """ module_paths = modpath_all(module, entry_point) if len(module_paths) > 1: logger.info( "module '%s' has multiple paths, default selecting '%s' as base.", module.__name__, module_paths[-1], ) return module_paths[-1:]
Provides the raw __path__. Incompatible with PEP 302-based import hooks and incompatible with zip_safe packages. Deprecated. Will be removed by calmjs-4.0.
entailment
def modpath_pkg_resources(module, entry_point): """ Goes through pkg_resources for compliance with various PEPs. This one accepts a module as argument. """ result = [] try: path = resource_filename_mod_entry_point(module.__name__, entry_point) except ImportError: logger.warning("module '%s' could not be imported", module.__name__) except Exception: logger.warning("%r does not appear to be a valid module", module) else: if path: result.append(path) return result
Goes through pkg_resources for compliance with various PEPs. This one accepts a module as argument.
entailment
def mapper(module, entry_point, modpath='pkg_resources', globber='root', modname='es6', fext=JS_EXT, registry=_utils): """ General mapper Loads components from the micro registry. """ modname_f = modname if callable(modname) else _utils['modname'][modname] return { modname_f(modname_fragments): join(base, subpath) for modname_fragments, base, subpath in modgen( module, entry_point=entry_point, modpath=modpath, globber=globber, fext=fext, registry=_utils) }
General mapper Loads components from the micro registry.
entailment
def mapper_python(module, entry_point, globber='root', fext=JS_EXT): """ Default mapper using python style globber Finds the latest path declared for the module at hand and extract a list of importable JS modules using the es6 module import format. """ return mapper( module, entry_point=entry_point, modpath='pkg_resources', globber=globber, modname='python', fext=fext)
Default mapper using python style globber Finds the latest path declared for the module at hand and extract a list of importable JS modules using the es6 module import format.
entailment
def _printable_id_code(self): """ Returns the code in a printable form, filling with zeros if needed. :return: the ID code in a printable form """ code = str(self.id_code) while len(code) < self._code_size: code = '0' + code return code
Returns the code in a printable form, filling with zeros if needed. :return: the ID code in a printable form
entailment
def _printable_id_code(self): """ Returns the code in a printable form, separating it into groups of three characters using a point between them. :return: the ID code in a printable form """ code = super(ISWCCode, self)._printable_id_code() code1 = code[:3] code2 = code[3:6] code3 = code[-3:] return '%s.%s.%s' % (code1, code2, code3)
Returns the code in a printable form, separating it into groups of three characters using a point between them. :return: the ID code in a printable form
entailment
def write(self, s): """ Standard write, for standard sources part of the original file. """ lines = s.splitlines(True) for line in lines: self.current_mapping.append( (self.generated_col, self.index, self.row, self.col_last)) self.stream.write(line) if line[-1] in '\r\n': # start again. self._newline() self.row = 1 self.col_current = 0 else: self.col_current += len(line) self.generated_col = self.col_last = len(line)
Standard write, for standard sources part of the original file.
entailment
def discard(self, s): """ Discard from original file. """ lines = s.splitlines(True) for line in lines: if line[-1] not in '\r\n': if not self.warn: logger.warning( 'partial line discard UNSUPPORTED; source map ' 'generated will not match at the column level' ) self.warn = True else: # simply increment row self.row += 1
Discard from original file.
entailment
def write_padding(self, s): """ Write string that are not part of the original file. """ lines = s.splitlines(True) for line in lines: self.stream.write(line) if line[-1] in '\r\n': self._newline() else: # this is the last line self.generated_col += len(line)
Write string that are not part of the original file.
entailment
def format_currency(number, currency, format, locale=babel.numbers.LC_NUMERIC, force_frac=None, format_type='standard'): """Same as ``babel.numbers.format_currency``, but has ``force_frac`` argument instead of ``currency_digits``. If the ``force_frac`` argument is given, the argument is passed down to ``pattern.apply``. """ locale = babel.core.Locale.parse(locale) if format: pattern = babel.numbers.parse_pattern(format) else: try: pattern = locale.currency_formats[format_type] except KeyError: raise babel.numbers.UnknownCurrencyFormatError( "%r is not a known currency format type" % format_type) if force_frac is None: fractions = babel.core.get_global('currency_fractions') try: digits = fractions[currency][0] except KeyError: digits = fractions['DEFAULT'][0] frac = (digits, digits) else: frac = force_frac return pattern.apply(number, locale, currency=currency, force_frac=frac)
Same as ``babel.numbers.format_currency``, but has ``force_frac`` argument instead of ``currency_digits``. If the ``force_frac`` argument is given, the argument is passed down to ``pattern.apply``.
entailment
def run(unihan_options={}): """Wrapped so we can test in tests/test_examples.py""" print("This example prints variant character data.") c = Cihai() if not c.unihan.is_bootstrapped: # download and install Unihan to db c.unihan.bootstrap(unihan_options) c.unihan.add_plugin( 'cihai.data.unihan.dataset.UnihanVariants', namespace='variants' ) print("## ZVariants") variant_list(c.unihan, "kZVariant") print("## kSemanticVariant") variant_list(c.unihan, "kSemanticVariant") print("## kSpecializedSemanticVariant") variant_list(c.unihan, "kSpecializedSemanticVariant")
Wrapped so we can test in tests/test_examples.py
entailment
def reflect_db(self): """ No-op to reflect db info. This is available as a method so the database can be reflected outside initialization (such bootstrapping unihan during CLI usage). """ self.metadata.reflect(views=True, extend_existing=True) self.base = automap_base(metadata=self.metadata) self.base.prepare()
No-op to reflect db info. This is available as a method so the database can be reflected outside initialization (such bootstrapping unihan during CLI usage).
entailment
def parse(s, element=Element, atomicstring=lambda s: s): """ Translates from ASCIIMathML (an easy to type and highly readable way to represent math formulas) into MathML (a w3c standard directly displayable by some web browsers). The function `parse()` generates a tree of elements: >>> import asciimathml >>> asciimathml.parse('sqrt 2') <Element math at b76fb28c> The tree can then be manipulated using the standard python library. For example we can generate its string representation: >>> from xml.etree.ElementTree import tostring >>> tostring(asciimathml.parse('sqrt 2')) '<math><mstyle><msqrt><mn>2</mn></msqrt></mstyle></math>' """ global Element_, AtomicString_ Element_ = element AtomicString_ = atomicstring s, nodes = parse_exprs(s) remove_invisible(nodes) nodes = map(remove_private, nodes) return El('math', El('mstyle', *nodes))
Translates from ASCIIMathML (an easy to type and highly readable way to represent math formulas) into MathML (a w3c standard directly displayable by some web browsers). The function `parse()` generates a tree of elements: >>> import asciimathml >>> asciimathml.parse('sqrt 2') <Element math at b76fb28c> The tree can then be manipulated using the standard python library. For example we can generate its string representation: >>> from xml.etree.ElementTree import tostring >>> tostring(asciimathml.parse('sqrt 2')) '<math><mstyle><msqrt><mn>2</mn></msqrt></mstyle></math>'
entailment
def trace_parser(p): """ Decorator for tracing the parser. Use it to decorate functions with signature: string -> (string, nodes) and a trace of the progress made by the parser will be printed to stderr. Currently parse_exprs(), parse_expr() and parse_m() have the right signature. """ def nodes_to_string(n): if isinstance(n, list): result = '[ ' for m in map(nodes_to_string, n): result += m result += ' ' result += ']' return result else: try: return tostring(remove_private(copy(n))) except Exception as e: return n def print_trace(*args): import sys sys.stderr.write(" " * tracing_level) for arg in args: sys.stderr.write(str(arg)) sys.stderr.write(' ') sys.stderr.write('\n') sys.stderr.flush() def wrapped(s, *args, **kwargs): global tracing_level print_trace(p.__name__, repr(s)) tracing_level += 1 s, n = p(s, *args, **kwargs) tracing_level -= 1 print_trace("-> ", repr(s), nodes_to_string(n)) return s, n return wrapped
Decorator for tracing the parser. Use it to decorate functions with signature: string -> (string, nodes) and a trace of the progress made by the parser will be printed to stderr. Currently parse_exprs(), parse_expr() and parse_m() have the right signature.
entailment
def remove_symbol_from_dist(dist, index): ''' prob is a ndarray representing a probability distribution. index is a number between 0 and and the number of symbols ( len(prob)-1 ) return the probability distribution if the element at 'index' was no longer available ''' if type(dist) is not Distribution: raise TypeError("remove_symbol_from_dist got an object ot type {0}".format(type(dist))) new_prob = dist.prob.copy() new_prob[index]=0 new_prob /= sum(new_prob) return Distribution(new_prob)
prob is a ndarray representing a probability distribution. index is a number between 0 and and the number of symbols ( len(prob)-1 ) return the probability distribution if the element at 'index' was no longer available
entailment
def change_response(x, prob, index): ''' change every response in x that matches 'index' by randomly sampling from prob ''' #pdb.set_trace() N = (x==index).sum() #x[x==index]=9 x[x==index] = dist.sample(N)
change every response in x that matches 'index' by randomly sampling from prob
entailment
def toy_example(): """ Make a toy example where x is uniformly distributed with N bits and y follows x but with symbol dependent noise. x=0 -> y=0 x=1 -> y=1 + e x=2 -> y=2 + 2*e ... x=n -> y=n + n*e where by n*e I am saying that the noise grows """ #pdb.set_trace() N=4 m = 100 x = np.zeros(m*(2**N)) y = np.zeros(m*(2**N)) for i in range(1, 2**N): x[i*m:(i+1)*m] = i y[i*m:(i+1)*m] = i + np.random.randint(0, 2*i, m) diff = differentiate_mi(x,y) return x, y, diff
Make a toy example where x is uniformly distributed with N bits and y follows x but with symbol dependent noise. x=0 -> y=0 x=1 -> y=1 + e x=2 -> y=2 + 2*e ... x=n -> y=n + n*e where by n*e I am saying that the noise grows
entailment
def differentiate_mi(x, y): ''' for each symbol in x, change x such that there are no more of such symbols (replacing by a random distribution with the same proba of all other symbols) and compute mi(new_x, y) ''' #pdb.set_trace() dist = Distribution(discrete.symbols_to_prob(x)) diff = np.zeros(len(dist.prob)) for i in range(len(dist.prob)): i = int(i) dist = Distribution(remove_symbol_from_dist(dist, i).prob) new_x = change_response(x, dist, i) diff[i] = discrete.mi(x,y) return diff
for each symbol in x, change x such that there are no more of such symbols (replacing by a random distribution with the same proba of all other symbols) and compute mi(new_x, y)
entailment
def sample(self, *args): ''' generate a random number in [0,1) and return the index into self.prob such that self.prob[index] <= random_number but self.prob[index+1] > random_number implementation note: the problem is identical to finding the index into self.cumsum where the random number should be inserted to keep the array sorted. This is exactly what searchsorted does. usage: myDist = Distribution(array(0.5, .25, .25)) x = myDist.sample() # generates 1 sample x = myDist.sample(100) # generates 100 samples x = myDist.sample(10,10) # generates a 10x10 ndarray ''' return self.cumsum.searchsorted(np.random.rand(*args))
generate a random number in [0,1) and return the index into self.prob such that self.prob[index] <= random_number but self.prob[index+1] > random_number implementation note: the problem is identical to finding the index into self.cumsum where the random number should be inserted to keep the array sorted. This is exactly what searchsorted does. usage: myDist = Distribution(array(0.5, .25, .25)) x = myDist.sample() # generates 1 sample x = myDist.sample(100) # generates 100 samples x = myDist.sample(10,10) # generates a 10x10 ndarray
entailment
def default_log_template(self, record): """Return the prefix for the log message. Template for Formatter. :param: record: :py:class:`logging.LogRecord` object. this is passed in from inside the :py:meth:`logging.Formatter.format` record. """ reset = Style.RESET_ALL levelname = [ LEVEL_COLORS.get(record.levelname), Style.BRIGHT, '(%(levelname)s)', Style.RESET_ALL, ' ', ] asctime = [ '[', Fore.BLACK, Style.DIM, Style.BRIGHT, '%(asctime)s', Fore.RESET, Style.RESET_ALL, ']', ] name = [ ' ', Fore.WHITE, Style.DIM, Style.BRIGHT, '%(name)s', Fore.RESET, Style.RESET_ALL, ' ', ] tpl = "".join(reset + levelname + asctime + name + reset) return tpl
Return the prefix for the log message. Template for Formatter. :param: record: :py:class:`logging.LogRecord` object. this is passed in from inside the :py:meth:`logging.Formatter.format` record.
entailment
def merge_dict(base, additional): """ Combine two dictionary-like objects. Notes ----- Code from https://github.com/pypa/warehouse Copyright 2013 Donald Stufft Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ if base is None: return additional if additional is None: return base if not ( isinstance(base, collections.Mapping) and isinstance(additional, collections.Mapping) ): return additional merged = base for key, value in additional.items(): if isinstance(value, collections.Mapping): merged[key] = merge_dict(merged.get(key), value) else: merged[key] = value return merged
Combine two dictionary-like objects. Notes ----- Code from https://github.com/pypa/warehouse Copyright 2013 Donald Stufft Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
entailment
def expand_config(d, dirs): """ Expand configuration XDG variables, environmental variables, and tildes. Parameters ---------- d : dict config information dirs : appdirs.AppDirs XDG application mapping Notes ----- *Environmentable variables* are expanded via :py:func:`os.path.expandvars`. So ``${PWD}`` would be replaced by the current PWD in the shell, ``${USER}`` would be the user running the app. *XDG variables* are expanded via :py:meth:`str.format`. These do not have a dollar sign. They are: - ``{user_cache_dir}`` - ``{user_config_dir}`` - ``{user_data_dir}`` - ``{user_log_dir}`` - ``{site_config_dir}`` - ``{site_data_dir}`` See Also -------- os.path.expanduser, os.path.expandvars : Standard library functions for expanding variables. Same concept, used inside. """ context = { 'user_cache_dir': dirs.user_cache_dir, 'user_config_dir': dirs.user_config_dir, 'user_data_dir': dirs.user_data_dir, 'user_log_dir': dirs.user_log_dir, 'site_config_dir': dirs.site_config_dir, 'site_data_dir': dirs.site_data_dir, } for k, v in d.items(): if isinstance(v, dict): expand_config(v, dirs) if isinstance(v, string_types): d[k] = os.path.expanduser(os.path.expandvars(d[k])) d[k] = d[k].format(**context)
Expand configuration XDG variables, environmental variables, and tildes. Parameters ---------- d : dict config information dirs : appdirs.AppDirs XDG application mapping Notes ----- *Environmentable variables* are expanded via :py:func:`os.path.expandvars`. So ``${PWD}`` would be replaced by the current PWD in the shell, ``${USER}`` would be the user running the app. *XDG variables* are expanded via :py:meth:`str.format`. These do not have a dollar sign. They are: - ``{user_cache_dir}`` - ``{user_config_dir}`` - ``{user_data_dir}`` - ``{user_log_dir}`` - ``{site_config_dir}`` - ``{site_data_dir}`` See Also -------- os.path.expanduser, os.path.expandvars : Standard library functions for expanding variables. Same concept, used inside.
entailment
def bootstrap_unihan(metadata, options={}): """Download, extract and import unihan to database.""" options = merge_dict(UNIHAN_ETL_DEFAULT_OPTIONS.copy(), options) p = unihan.Packager(options) p.download() data = p.export() table = create_unihan_table(UNIHAN_FIELDS, metadata) metadata.create_all() metadata.bind.execute(table.insert(), data)
Download, extract and import unihan to database.
entailment
def is_bootstrapped(metadata): """Return True if cihai is correctly bootstrapped.""" fields = UNIHAN_FIELDS + DEFAULT_COLUMNS if TABLE_NAME in metadata.tables.keys(): table = metadata.tables[TABLE_NAME] if set(fields) == set(c.name for c in table.columns): return True else: return False else: return False
Return True if cihai is correctly bootstrapped.
entailment
def create_unihan_table(columns, metadata): """Create table and return :class:`sqlalchemy.Table`. Parameters ---------- columns : list columns for table, e.g. ``['kDefinition', 'kCantonese']`` metadata : :class:`sqlalchemy.schema.MetaData` Instance of sqlalchemy metadata Returns ------- :class:`sqlalchemy.schema.Table` : Newly created table with columns and index. """ if TABLE_NAME not in metadata.tables: table = Table(TABLE_NAME, metadata) table.append_column(Column('char', String(12), primary_key=True)) table.append_column(Column('ucn', String(12), primary_key=True)) for column_name in columns: col = Column(column_name, String(256), nullable=True) table.append_column(col) return table else: return Table(TABLE_NAME, metadata)
Create table and return :class:`sqlalchemy.Table`. Parameters ---------- columns : list columns for table, e.g. ``['kDefinition', 'kCantonese']`` metadata : :class:`sqlalchemy.schema.MetaData` Instance of sqlalchemy metadata Returns ------- :class:`sqlalchemy.schema.Table` : Newly created table with columns and index.
entailment
def get_address(pk, main_net=True, prefix=None): """ compute the nem-py address from the public one """ if isinstance(pk, str): pk = unhexlify(pk.encode()) assert len(pk) == 32, 'PK is 32bytes {}'.format(len(pk)) k = keccak_256(pk).digest() ripe = RIPEMD160.new(k).digest() if prefix is None: body = (b"\x68" if main_net else b"\x98") + ripe else: assert isinstance(prefix, bytes), 'Set prefix 1 bytes' body = prefix + ripe checksum = keccak_256(body).digest()[0:4] return b32encode(body + checksum).decode()
compute the nem-py address from the public one
entailment
def lookup_char(self, char): """Return character information from datasets. Parameters ---------- char : str character / string to lookup Returns ------- :class:`sqlalchemy.orm.query.Query` : list of matches """ Unihan = self.sql.base.classes.Unihan return self.sql.session.query(Unihan).filter_by(char=char)
Return character information from datasets. Parameters ---------- char : str character / string to lookup Returns ------- :class:`sqlalchemy.orm.query.Query` : list of matches
entailment
def reverse_char(self, hints): """Return QuerySet of objects from SQLAlchemy of results. Parameters ---------- hints: list of str strings to lookup Returns ------- :class:`sqlalchemy.orm.query.Query` : reverse matches """ if isinstance(hints, string_types): hints = [hints] Unihan = self.sql.base.classes.Unihan columns = Unihan.__table__.columns return self.sql.session.query(Unihan).filter( or_(*[column.contains(hint) for column in columns for hint in hints]) )
Return QuerySet of objects from SQLAlchemy of results. Parameters ---------- hints: list of str strings to lookup Returns ------- :class:`sqlalchemy.orm.query.Query` : reverse matches
entailment