sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def init_argparser_loaderplugin_registry( self, argparser, default=None, help=( 'the name of the registry to use for the handling of loader ' 'plugins that may be loaded from the given Python packages' )): """ Default helper for setting up the loaderplugin registries flags. Note that this is NOT part of the init_argparser due to implementation specific requirements. Subclasses should consider modifying the default value help message to cater to the toolchain it encapsulates. """ argparser.add_argument( '--loaderplugin-registry', default=default, dest=CALMJS_LOADERPLUGIN_REGISTRY_NAME, action='store', metavar=metavar('registry'), help=help, )
Default helper for setting up the loaderplugin registries flags. Note that this is NOT part of the init_argparser due to implementation specific requirements. Subclasses should consider modifying the default value help message to cater to the toolchain it encapsulates.
entailment
def init_argparser(self, argparser): """ Other runtimes (or users of ArgumentParser) can pass their subparser into here to collect the arguments here for a subcommand. """ super(SourcePackageToolchainRuntime, self).init_argparser(argparser) self.init_argparser_source_registry(argparser) self.init_argparser_package_names(argparser)
Other runtimes (or users of ArgumentParser) can pass their subparser into here to collect the arguments here for a subcommand.
entailment
def init_argparser(self, argparser): """ Other runtimes (or users of ArgumentParser) can pass their subparser into here to collect the arguments here for a subcommand. """ super(PackageManagerRuntime, self).init_argparser(argparser) # Ideally, we could use more subparsers for each action (i.e. # init and install). However, this is complicated by the fact # that setuptools has its own calling conventions through the # setup.py file, and to present a consistent cli to end-users # from both calmjs entry point and setuptools using effectively # the same codebase will require a bit of creative handling. # provide this for the setuptools command class. actions = argparser.add_argument_group('action arguments') count = 0 for full, short, desc in self.pkg_manager_options: args = [ dash + key for dash, key in zip(('-', '--'), (short, full)) if key ] # default is singular, but for the argparsed version in our # runtime permits multiple packages. desc = desc.replace('Python package', 'Python package(s)') if not short: f = getattr(self.cli_driver, '%s_%s' % ( self.cli_driver.binary, full), None) if callable(f): count += 1 actions.add_argument( *args, help=desc, action=PackageManagerAction, dest=self.action_key, const=(count, f) ) if self.default_action is None: self.default_action = f continue # pragma: no cover argparser.add_argument(*args, help=desc, action='store_true') argparser.add_argument( 'package_names', metavar=metavar('package'), nargs='+', help="python packages to be used for the generation of '%s'" % ( self.cli_driver.pkgdef_filename, ), )
Other runtimes (or users of ArgumentParser) can pass their subparser into here to collect the arguments here for a subcommand.
entailment
def alphanum(columns, name=None, extended=False, isLast=False): """ Creates the grammar for an Alphanumeric (A) field, accepting only the specified number of characters. By default Alphanumeric fields accept only ASCII characters, excluding lowercases. If the extended flag is set to True, then non-ASCII characters are allowed, but the no ASCII lowercase constraint is kept. This can be a compulsory field, in which case the empty string is disallowed. The text will be stripped of heading and trailing whitespaces. :param columns: number of columns for this field :param name: name for the field :param extended: indicates if this is the exceptional case where non-ASCII are allowed :return: grammar for this Alphanumeric field """ if name is None: name = 'Alphanumeric Field' if columns < 0: # Can't be empty or have negative size raise BaseException() if isLast: columns = str('1,' + str(columns)) # Checks if non-ASCII characters are allowed if not extended: # The regular expression just forbids lowercase characters field = pp.Regex('([\x00-\x60]|[\x7B-\x7F]){' + str(columns) + '}') else: # The regular expression forbids lowercase characters but allows # non-ASCII characters field = pp.Regex('([\x00-\x09]|[\x0E-\x60]|[\x7B-\x7F]|[^\x00-\x7F]){' + str(columns) + '}') # Parse action field.setParseAction(lambda s: s[0].strip()) # Compulsory field validation action if columns: field.addParseAction(lambda s: _check_not_empty(s[0])) # White spaces are not removed field.leaveWhitespace() # Name field.setName(name) return field
Creates the grammar for an Alphanumeric (A) field, accepting only the specified number of characters. By default Alphanumeric fields accept only ASCII characters, excluding lowercases. If the extended flag is set to True, then non-ASCII characters are allowed, but the no ASCII lowercase constraint is kept. This can be a compulsory field, in which case the empty string is disallowed. The text will be stripped of heading and trailing whitespaces. :param columns: number of columns for this field :param name: name for the field :param extended: indicates if this is the exceptional case where non-ASCII are allowed :return: grammar for this Alphanumeric field
entailment
def _check_not_empty(string): """ Checks that the string is not empty. If it is empty an exception is raised, stopping the validation. This is used for compulsory alphanumeric fields. :param string: the field value """ string = string.strip() if len(string) == 0: message = 'The string should not be empty' raise pp.ParseException(message)
Checks that the string is not empty. If it is empty an exception is raised, stopping the validation. This is used for compulsory alphanumeric fields. :param string: the field value
entailment
def numeric(columns, name=None): """ Creates the grammar for a Numeric (N) field, accepting only the specified number of characters. This version only allows integers. :param columns: number of columns for this field :param name: name for the field :return: grammar for the integer numeric field """ if name is None: name = 'Numeric Field' if columns <= 0: # Can't be empty or have negative size raise BaseException() # Only numbers are accepted field = pp.Regex('[0-9]{' + str(columns) + '}') # Parse action field.setParseAction(_to_int) field.leaveWhitespace() # Name field.setName(name) return field
Creates the grammar for a Numeric (N) field, accepting only the specified number of characters. This version only allows integers. :param columns: number of columns for this field :param name: name for the field :return: grammar for the integer numeric field
entailment
def numeric_float(columns, nums_int, name=None): """ Creates the grammar for a Numeric (N) field, accepting only the specified number of characters. This version only allows floats. As nothing in the string itself indicates how many of the characters are for the integer and the decimal sections, this should be specified with the nums_int parameter. This will indicate the number of characters, starting from the left, to be used for the integer value. All the remaining ones will be used for the decimal value. :param columns: number of columns for this field :param name: name for the field :param nums_int: characters, counting from the left, for the integer value :return: grammar for the float numeric field """ if name is None: name = 'Numeric Field' if columns <= 0: # Can't be empty or have negative size raise BaseException('Number of columns should be positive') if nums_int < 0: # Integer columns can't have negative size raise BaseException('Number of integer values should be positive or ' 'zero') if columns < nums_int: # There are more integer numbers than columns message = 'The number of columns is %s and should be higher or ' \ 'equal than the integers: %s' % ( columns, nums_int) raise BaseException(message) # Basic field field = pp.Word(pp.nums, exact=columns) # Parse action field.setParseAction(lambda n: _to_numeric_float(n[0], nums_int)) # Compulsory field validation action field.addParseAction(lambda s: _check_above_value_float(s[0], 0)) # Name field.setName(name) return field
Creates the grammar for a Numeric (N) field, accepting only the specified number of characters. This version only allows floats. As nothing in the string itself indicates how many of the characters are for the integer and the decimal sections, this should be specified with the nums_int parameter. This will indicate the number of characters, starting from the left, to be used for the integer value. All the remaining ones will be used for the decimal value. :param columns: number of columns for this field :param name: name for the field :param nums_int: characters, counting from the left, for the integer value :return: grammar for the float numeric field
entailment
def _to_numeric_float(number, nums_int): """ Transforms a string into a float. The nums_int parameter indicates the number of characters, starting from the left, to be used for the integer value. All the remaining ones will be used for the decimal value. :param number: string with the number :param nums_int: characters, counting from the left, for the integer value :return: a float created from the string """ index_end = len(number) - nums_int return float(number[:nums_int] + '.' + number[-index_end:])
Transforms a string into a float. The nums_int parameter indicates the number of characters, starting from the left, to be used for the integer value. All the remaining ones will be used for the decimal value. :param number: string with the number :param nums_int: characters, counting from the left, for the integer value :return: a float created from the string
entailment
def _check_above_value_float(string, minimum): """ Checks that the number parsed from the string is above a minimum. This is used on compulsory numeric fields. If the value is not above the minimum an exception is thrown. :param string: the field value :param minimum: minimum value """ value = float(string) if value < minimum: message = 'The Numeric Field value should be above %s' % minimum raise pp.ParseException(message)
Checks that the number parsed from the string is above a minimum. This is used on compulsory numeric fields. If the value is not above the minimum an exception is thrown. :param string: the field value :param minimum: minimum value
entailment
def boolean(name=None): """ Creates the grammar for a Boolean (B) field, accepting only 'Y' or 'N' :param name: name for the field :return: grammar for the flag field """ if name is None: name = 'Boolean Field' # Basic field field = pp.Regex('[YN]') # Parse action field.setParseAction(lambda b: _to_boolean(b[0])) # Name field.setName(name) return field
Creates the grammar for a Boolean (B) field, accepting only 'Y' or 'N' :param name: name for the field :return: grammar for the flag field
entailment
def _to_boolean(string): """ Transforms a string into a boolean value. If a value which is not 'Y' or 'N' is received, a ParseException is thrown. :param: string: the string to transform :return: True if the string is 'Y', False if it is 'N' """ if string == 'Y': result = True elif string == 'N': result = False else: raise pp.ParseException(string, msg='Is not a valid boolean value') return result
Transforms a string into a boolean value. If a value which is not 'Y' or 'N' is received, a ParseException is thrown. :param: string: the string to transform :return: True if the string is 'Y', False if it is 'N'
entailment
def flag(name=None): """ Creates the grammar for a Flag (F) field, accepting only 'Y', 'N' or 'U'. :param name: name for the field :return: grammar for the flag field """ if name is None: name = 'Flag Field' # Basic field field = pp.Regex('[YNU]') # Name field.setName(name) field.leaveWhitespace() return field
Creates the grammar for a Flag (F) field, accepting only 'Y', 'N' or 'U'. :param name: name for the field :return: grammar for the flag field
entailment
def date(name=None): """ Creates the grammar for a Date (D) field, accepting only numbers in a certain pattern. :param name: name for the field :return: grammar for the date field """ if name is None: name = 'Date Field' # Basic field # This regex allows values from 00000101 to 99991231 field = pp.Regex('[0-9][0-9][0-9][0-9](0[1-9]|1[0-2])' '(0[1-9]|[1-2][0-9]|3[0-1])') # Parse action field.setParseAction(lambda d: datetime.datetime.strptime(d[0], '%Y%m%d') .date()) # Name field.setName(name) # White spaces are not removed field.leaveWhitespace() return field
Creates the grammar for a Date (D) field, accepting only numbers in a certain pattern. :param name: name for the field :return: grammar for the date field
entailment
def time(name=None): """ Creates the grammar for a Time or Duration (T) field, accepting only numbers in a certain pattern. :param name: name for the field :return: grammar for the date field """ if name is None: name = 'Time Field' # Basic field # This regex allows values from 000000 to 235959 field = pp.Regex('(0[0-9]|1[0-9]|2[0-3])[0-5][0-9][0-5][0-9]') # Parse action field.setParseAction(lambda t: datetime.datetime.strptime(t[0], '%H%M%S') .time()) # White spaces are not removed field.leaveWhitespace() # Name field.setName(name) return field
Creates the grammar for a Time or Duration (T) field, accepting only numbers in a certain pattern. :param name: name for the field :return: grammar for the date field
entailment
def lookup(values, name=None): """ Creates the grammar for a Lookup (L) field, accepting only values from a list. Like in the Alphanumeric field, the result will be stripped of all heading and trailing whitespaces. :param values: values allowed :param name: name for the field :return: grammar for the lookup field """ if name is None: name = 'Lookup Field' if values is None: raise ValueError('The values can no be None') # TODO: This should not be needed, it is just a patch. Fix this. try: v = values.asList() values = v except AttributeError: values = values # Only the specified values are allowed lookup_field = pp.oneOf(values) lookup_field.setName(name) lookup_field.setParseAction(lambda s: s[0].strip()) lookup_field.leaveWhitespace() return lookup_field
Creates the grammar for a Lookup (L) field, accepting only values from a list. Like in the Alphanumeric field, the result will be stripped of all heading and trailing whitespaces. :param values: values allowed :param name: name for the field :return: grammar for the lookup field
entailment
def blank(columns=1, name=None): """ Creates the grammar for a blank field. These are for constant empty strings which should be ignored, as they are used just as fillers. :param columns: number of columns, which is the required number of whitespaces :param name: name for the field :return: grammar for the blank field """ if name is None: name = 'Blank Field' field = pp.Regex('[ ]{' + str(columns) + '}') field.leaveWhitespace() field.suppress() field.setName(name) return field
Creates the grammar for a blank field. These are for constant empty strings which should be ignored, as they are used just as fillers. :param columns: number of columns, which is the required number of whitespaces :param name: name for the field :return: grammar for the blank field
entailment
def get_attribute(self, attribute, value=None, features=False): """This returns a list of GFF objects (or GFF Features) with the given attribute and if supplied, those attributes with the specified value :param attribute: The 'info' field attribute we are querying :param value: Optional keyword, only return attributes equal to this value :param features: Optional keyword, return GFF Features instead of GFF Objects :return: A list of GFF objects (or GFF features if requested) """ if attribute in self.filters: valid_gff_objects = self.fast_attributes[attribute] if not value else\ [i for i in self.fast_attributes[attribute] if i.attributes.get(attribute, False) == value] if features: valid_ids = [gff_object.attributes.get(self.id_tag, None) for gff_object in valid_gff_objects] return [self.feature_map[gff_id] for gff_id in valid_ids if gff_id] else: return valid_gff_objects else: valid_gff_objects = [gff_object for gff_feature in self.feature_map.values() for gff_object in gff_feature.features if gff_object.attributes.get(attribute, False)] valid_gff_objects = valid_gff_objects if not value else [gff_object for gff_object in valid_gff_objects if gff_object.attributes[attribute] == value] if features: valid_ids = [gff_object.attributes.get(self.id_tag, None) for gff_object in valid_gff_objects] return [self.feature_map[gff_id] for gff_id in valid_ids if gff_id] else: return valid_gff_objects
This returns a list of GFF objects (or GFF Features) with the given attribute and if supplied, those attributes with the specified value :param attribute: The 'info' field attribute we are querying :param value: Optional keyword, only return attributes equal to this value :param features: Optional keyword, return GFF Features instead of GFF Objects :return: A list of GFF objects (or GFF features if requested)
entailment
def contains(self, seqid, start, end, overlap=True): """This returns a list of GFF objects which cover a specified location. :param seqid: The landmark identifier (usually a chromosome) :param start: The 1-based position of the start of the range we are querying :param end: The 1-based position of the end of the range we are querying :param overlap: A boolean value, if true we allow features to overlap the query range. For instance, overlap=True with the range (5,10), will return a GFF object spanning from (8,15). overlap=False will only return objects fully containing the range. :return: A list of GFF objects """ d = self.positions.get(seqid,[]) if overlap: return [gff_object for gff_start, gff_end in d for gff_object in d[(gff_start, gff_end)] if not (end <= gff_start or start >= gff_end)] else: return [gff_object for gff_start, gff_end in d for gff_object in d[(gff_start, gff_end)] if (gff_start <= start and gff_end >= end)]
This returns a list of GFF objects which cover a specified location. :param seqid: The landmark identifier (usually a chromosome) :param start: The 1-based position of the start of the range we are querying :param end: The 1-based position of the end of the range we are querying :param overlap: A boolean value, if true we allow features to overlap the query range. For instance, overlap=True with the range (5,10), will return a GFF object spanning from (8,15). overlap=False will only return objects fully containing the range. :return: A list of GFF objects
entailment
def contains(self, chrom, start, end, overlap=True): """This returns a list of VCFEntry objects which cover a specified location. :param chrom: The landmark identifier (usually a chromosome) :param start: The 1-based position of the start of the range we are querying :param end: The 1-based position of the end of the range we are querying :param overlap: A boolean value, if true we allow features to overlap the query range. For instance, overlap=True with the range (5,10), will return a VCFEntry object spanning from (8,15). overlap=False will only return objects fully containing the range. :return: A list of VCFEntry objects """ d = self.positions.get(chrom,[]) if overlap: return [vcf_entry for vcf_start, vcf_end in d for vcf_entry in d[(vcf_start, vcf_end)] if not (end < vcf_start or start > vcf_end)] else: return [vcf_entry for vcf_start, vcf_end in d for vcf_entry in d[(vcf_start, vcf_end)] if (vcf_start <= start and vcf_end >= end)]
This returns a list of VCFEntry objects which cover a specified location. :param chrom: The landmark identifier (usually a chromosome) :param start: The 1-based position of the start of the range we are querying :param end: The 1-based position of the end of the range we are querying :param overlap: A boolean value, if true we allow features to overlap the query range. For instance, overlap=True with the range (5,10), will return a VCFEntry object spanning from (8,15). overlap=False will only return objects fully containing the range. :return: A list of VCFEntry objects
entailment
def remove_variants(self, variants): """Remove a list of variants from the positions we are scanning""" chroms = set([i.chrom for i in variants]) for chrom in chroms: if self.append_chromosome: chrom = 'chr%s' % chrom to_delete = [pos for pos in self.positions[chrom] if pos in variants] for pos in to_delete: del self.positions[chrom][pos]
Remove a list of variants from the positions we are scanning
entailment
def generate_handler_sourcepath( self, toolchain, spec, loaderplugin_sourcepath): """ The default implementation is a recursive lookup method, which subclasses may make use of. Subclasses must implement this to return a mapping of modnames the the absolute path of the desired sourcefiles. Example: return { 'text': '/tmp/src/example_module/text/index.js', 'json': '/tmp/src/example_module/json/index.js', } Subclasses of this implementation must accept the same arguments, and they should invoke this implementation via super and merge its results (e.g. using dict.update) with one provided by this one. Also, this implementation depends on a correct unwrap implementation for the loaderplugin at hand, if required. """ # since the loaderplugin_sourcepath values is the complete # modpath with the loader plugin, the values must be stripped # before making use of the filtering helper function for # grouping the inner mappings fake_spec = {} registry = spec.get(CALMJS_LOADERPLUGIN_REGISTRY) if registry: fake_spec[CALMJS_LOADERPLUGIN_REGISTRY] = registry spec_update_sourcepath_filter_loaderplugins(fake_spec, { self.unwrap(k): v for k, v in loaderplugin_sourcepath.items() }, 'current', 'nested') result = {} for plugin_name, sourcepath in fake_spec['nested'].items(): if sourcepath == loaderplugin_sourcepath: logger.warning( "loaderplugin '%s' extracted same sourcepath of while " "locating chain loaders: %s; skipping", self.name, sourcepath ) continue plugin = self.registry.get_record(plugin_name) if not plugin: logger.warning( "loaderplugin '%s' from registry '%s' cannot find " "sibling loaderplugin handler for '%s'; processing " "may fail for the following nested/chained sources: " "%s", self.name, self.registry.registry_name, plugin_name, sourcepath, ) continue result.update(plugin.generate_handler_sourcepath( toolchain, spec, sourcepath)) return result
The default implementation is a recursive lookup method, which subclasses may make use of. Subclasses must implement this to return a mapping of modnames the the absolute path of the desired sourcefiles. Example: return { 'text': '/tmp/src/example_module/text/index.js', 'json': '/tmp/src/example_module/json/index.js', } Subclasses of this implementation must accept the same arguments, and they should invoke this implementation via super and merge its results (e.g. using dict.update) with one provided by this one. Also, this implementation depends on a correct unwrap implementation for the loaderplugin at hand, if required.
entailment
def generate_handler_sourcepath( self, toolchain, spec, loaderplugin_sourcepath): """ Attempt to locate the plugin source; returns a mapping of modnames to the absolute path of the located sources. """ # TODO calmjs-4.0.0 consider formalizing to the method instead npm_pkg_name = ( self.node_module_pkg_name if self.node_module_pkg_name else self.find_node_module_pkg_name(toolchain, spec) ) if not npm_pkg_name: cls = type(self) registry_name = getattr( self.registry, 'registry_name', '<invalid_registry/handler>') if cls is NPMLoaderPluginHandler: logger.error( "no npm package name specified or could be resolved for " "loaderplugin '%s' of registry '%s'; please subclass " "%s:%s such that the npm package name become specified", self.name, registry_name, cls.__module__, cls.__name__, ) else: logger.error( "no npm package name specified or could be resolved for " "loaderplugin '%s' of registry '%s'; implementation of " "%s:%s may be at fault", self.name, registry_name, cls.__module__, cls.__name__, ) return {} working_dir = spec.get(WORKING_DIR, None) if working_dir is None: logger.info( "attempting to derive working directory using %s, as the " "provided spec is missing working_dir", toolchain ) working_dir = toolchain.join_cwd() logger.debug("deriving npm loader plugin from '%s'", working_dir) target = locate_package_entry_file(working_dir, npm_pkg_name) if target: logger.debug('picked %r for loader plugin %r', target, self.name) # use the parent recursive lookup. result = super( NPMLoaderPluginHandler, self).generate_handler_sourcepath( toolchain, spec, loaderplugin_sourcepath) result.update({self.name: target}) return result # the expected package file is not found, use the logger to show # why. # Also note that any inner/chained loaders will be dropped. if exists(join( working_dir, 'node_modules', npm_pkg_name, 'package.json')): logger.warning( "'package.json' for the npm package '%s' does not contain a " "valid entry point: sources required for loader plugin '%s' " "cannot be included automatically; the build process may fail", npm_pkg_name, self.name, ) else: logger.warning( "could not locate 'package.json' for the npm package '%s' " "which was specified to contain the loader plugin '%s' in the " "current working directory '%s'; the missing package may " "be installed by running 'npm install %s' for the mean time " "as a workaround, though the package that owns that source " "file that has this requirement should declare an explicit " "dependency; the build process may fail", npm_pkg_name, self.name, working_dir, npm_pkg_name, ) return {}
Attempt to locate the plugin source; returns a mapping of modnames to the absolute path of the located sources.
entailment
def store_records_for_package(self, entry_point, records): """ Given that records are based on the parent, and the same entry point(s) will reference those same records multiple times, the actual stored records must be limited. """ pkg_records_entry = self._dist_to_package_module_map(entry_point) pkg_records_entry.extend( rec for rec in records if rec not in pkg_records_entry) # TODO figure out a more efficient way to do this with a bit # more reuse. if entry_point.dist is not None: if entry_point.dist.project_name not in self.package_loader_map: self.package_loader_map[entry_point.dist.project_name] = [] self.package_loader_map[entry_point.dist.project_name].append( entry_point.name)
Given that records are based on the parent, and the same entry point(s) will reference those same records multiple times, the actual stored records must be limited.
entailment
def default_file_encoder(): """ Get default encoder cwr file :return: """ config = CWRConfiguration() field_configs = config.load_field_config('table') field_configs.update(config.load_field_config('common')) field_values = CWRTables() for entry in field_configs.values(): if 'source' in entry: values_id = entry['source'] entry['values'] = field_values.get_data(values_id) record_configs = config.load_record_config('common') return CwrFileEncoder(record_configs, field_configs)
Get default encoder cwr file :return:
entailment
def encode(self, tag): """ Parses a CWR file name from a FileTag object. The result will be a string following the format CWyynnnnsss_rrr.Vxx, where the numeric sequence will have the length set on the encoder's constructor. :param tag: FileTag to parse :return: a string file name parsed from the FileTag """ # Acquires sequence number sequence = str(tag.sequence_n) # If the sequence is bigger the max, it is cut if len(sequence) > self._sequence_l: sequence = sequence[:self._sequence_l] # If the sequence is smaller the max, it is padded with zeroes while len(sequence) < self._sequence_l: sequence = '0' + sequence # Acquires version version = str(tag.version) # If the version is too long only the first and last number are taken, # to remove decimal separator if len(version) > 2: version = version[:1] + version[-1:] # If the version is too short, it is padded with zeroes while len(version) < 2: version = '0' + version # Acquires year # Only the two last digits of the year are used year = str(tag.year)[-2:] # Acquires sender and receiver sender = tag.sender[:3] receiver = tag.receiver[:3] rule = self._header + year + sequence + sender rule = rule + self._ip_delimiter + receiver + ".V" + version return rule
Parses a CWR file name from a FileTag object. The result will be a string following the format CWyynnnnsss_rrr.Vxx, where the numeric sequence will have the length set on the encoder's constructor. :param tag: FileTag to parse :return: a string file name parsed from the FileTag
entailment
def encode(self, transmission): """ Encodes the data, creating a CWR structure from an instance from the domain model. :param entity: the instance to encode :return: a cwr string structure created from the received data """ data = '' data += self._record_encode(transmission.header) for group in transmission.groups: data += self._record_encode(group.group_header) for transaction in group.transactions: for record in transaction: data += self._record_encode(record) data += self._record_encode(group.group_trailer) data += self._record_encode(transmission.trailer) return data
Encodes the data, creating a CWR structure from an instance from the domain model. :param entity: the instance to encode :return: a cwr string structure created from the received data
entailment
def getScans(self, modifications=True, fdr=True): """ get a random scan """ if not self.scans: for i in self: yield i else: for i in self.scans.values(): yield i yield None
get a random scan
entailment
def getScan(self, title, peptide=None): """ allows random lookup """ if self.ra.has_key(title): self.filename.seek(self.ra[title][0],0) toRead = self.ra[title][1]-self.ra[title][0] info = self.filename.read(toRead) scan = self.parseScan(info) else: return None return scan
allows random lookup
entailment
def parseScan(self, scan): """ All input follows the BEGIN IONS row and ends before END IONS """ setupScan = True foundCharge = False foundMass = False foundTitle = False scanObj = ScanObject() scanObj.ms_level = 2 for row in scan.split('\n'): if not row: continue entry = row.strip().split('=') if len(entry) >= 2: if entry[0] == 'PEPMASS': scanObj.mass = float(entry[1]) foundMass = True elif entry[0] == 'CHARGE': scanObj.charge = entry[1] foundCharge = True elif entry[0] == 'TITLE': # if self.titleMap: # pos = entry[1].find(',') # title = self.titleMap[int(entry[1][:entry[1].find(',')])] # else: title = '='.join(entry[1:]) foundTitle = True scanObj.title = title scanObj.id = title elif entry[0] == 'RTINSECONDS': scanObj.rt = float(entry[1]) else: mz,intensity = self.scanSplit.split(row.strip()) scanObj.scans.append((float(mz),float(intensity))) if foundCharge and foundMass and foundTitle: return scanObj return None
All input follows the BEGIN IONS row and ends before END IONS
entailment
def getScan(self, specId, peptide=None): """ get a random scan """ sql = self.base_sql + " where sh.SpectrumID = %d and p.Sequence = '%s'"%(int(specId),peptide) self.cur.execute(sql) i = self.cur.fetchone() if not i: return None scan = self.parseFullScan(i) scan.spectrumId = specId return scan
get a random scan
entailment
def getScans(self, modifications=False, fdr=True): """ get a random scan """ if fdr: sql = self.base_sql+"WHERE p.ConfidenceLevel >= {} and p.SearchEngineRank <= {} {}".format(self.clvl, self.srank, self.extra) try: self.cur.execute(sql) except sqlite3.OperationalError: sql = self.base_sql+"WHERE p.ConfidenceLevel >= {} {}".format(self.clvl, self.extra) self.cur.execute(sql) else: sql = self.base_sql self.cur.execute(sql) while True: # results = self.cur.fetchmany(1000) # if not results: # break try: tup = self.cur.fetchone() except: sys.stderr.write('Error fetching scan:\n{}\n'.format(traceback.format_exc())) else: while tup is not None: if tup is None: break if tup[1] is not None: scan = self.parseFullScan(tup, modifications=modifications) scan.spectrumId = tup[3] yield scan try: tup = self.cur.fetchone() except: sys.stderr.write('Error fetching scan:\n{}\n'.format(traceback.format_exc())) if tup is None: break yield None
get a random scan
entailment
def parseFullScan(self, i, modifications=False): """ parses scan info for giving a Spectrum Obj for plotting. takes significantly longer since it has to unzip/parse xml """ scanObj = PeptideObject() peptide = str(i[1]) pid=i[2] scanObj.acc = self.protein_map.get(i[4], i[4]) if pid is None: return None if modifications: sql = 'select aam.ModificationName,pam.Position,aam.DeltaMass from peptidesaminoacidmodifications pam left join aminoacidmodifications aam on (aam.AminoAcidModificationID=pam.AminoAcidModificationID) where pam.PeptideID=%s'%pid for row in self.conn.execute(sql): scanObj.addModification(peptide[row[1]], str(row[1]), str(row[2]), row[0]) else: mods = self.mods.get(int(pid)) if mods is not None: for modId, modPosition in zip(mods[0].split(','),mods[1].split(',')): modEntry = self.modTable[str(modId)] scanObj.addModification(peptide[int(modPosition)], modPosition, modEntry[1], modEntry[0]) tmods = self.tmods.get(int(pid)) if tmods is not None: for modIds in tmods: for modId in modIds.split(','): modEntry = self.modTable[str(modId)] scanObj.addModification('[', 0, modEntry[1], modEntry[0]) scanObj.peptide = peptide if self.decompressScanInfo(scanObj, i[0]): return scanObj return None
parses scan info for giving a Spectrum Obj for plotting. takes significantly longer since it has to unzip/parse xml
entailment
def getScans(self, modifications=True, fdr=True): """ get a random scan """ if fdr: sql = "select sp.Spectrum, p.Sequence, p.PeptideID, p.SpectrumID from spectrumheaders sh left join spectra sp on (sp.UniqueSpectrumID=sh.UniqueSpectrumID) left join peptides p on (sh.SpectrumID=p.SpectrumID) WHERE p.ConfidenceLevel >= %d and p.SearchEngineRank <= %d" % (self.clvl, self.srank) try: self.cur.execute(sql) except sqlite3.OperationalError: sql = "select sp.Spectrum, p.Sequence, p.PeptideID, p.SpectrumID from spectrumheaders sh left join spectra sp on (sp.UniqueSpectrumID=sh.UniqueSpectrumID) left join peptides p on (sh.SpectrumID=p.SpectrumID) WHERE p.ConfidenceLevel >= %d" % self.clvl self.cur.execute(sql) else: sql = "select sp.Spectrum, p.Sequence, p.PeptideID, p.SpectrumID from spectrumheaders sh left join spectra sp on (sp.UniqueSpectrumID=sh.UniqueSpectrumID) left join peptides p on (sh.SpectrumID=p.SpectrumID)" self.cur.execute(sql) while True: results = self.cur.fetchmany(1000) if not results: break for tup in results: scan = self.parseFullScan(tup, modifications=modifications) scan.spectrumId = tup[3] yield scan yield None
get a random scan
entailment
def parseFullScan(self, i, modifications=True): """ parses scan info for giving a Spectrum Obj for plotting. takes significantly longer since it has to unzip/parse xml """ scanObj = PeptideObject() peptide = str(i[1]) pid=i[2] if modifications: sql = 'select aam.ModificationName,pam.Position,aam.DeltaMass from peptidesaminoacidmodifications pam left join aminoacidmodifications aam on (aam.AminoAcidModificationID=pam.AminoAcidModificationID) where pam.PeptideID=%s'%pid for row in self.conn.execute(sql): scanObj.addModification(peptide[row[1]], str(row[1]), str(row[2]), row[0]) scanObj.peptide = peptide if self.decompressScanInfo(scanObj, i[0]): return scanObj return None
parses scan info for giving a Spectrum Obj for plotting. takes significantly longer since it has to unzip/parse xml
entailment
def _cls_lookup_dist(cls): """ Attempt to resolve the distribution from the provided class in the most naive way - this assumes the Python module path to the class contains the name of the package that provided the module and class. """ frags = cls.__module__.split('.') for name in ('.'.join(frags[:x]) for x in range(len(frags), 0, -1)): dist = find_pkg_dist(name) if dist: return dist
Attempt to resolve the distribution from the provided class in the most naive way - this assumes the Python module path to the class contains the name of the package that provided the module and class.
entailment
def verify_builder(builder): """ To ensure that the provided builder has a signature that is at least compatible. """ try: d = getcallargs(builder, package_names=[], export_target='some_path') except TypeError: return False return d == {'package_names': [], 'export_target': 'some_path'}
To ensure that the provided builder has a signature that is at least compatible.
entailment
def extract_builder_result(builder_result, toolchain_cls=Toolchain): """ Extract the builder result to produce a ``Toolchain`` and ``Spec`` instance. """ try: toolchain, spec = builder_result except Exception: return None, None if not isinstance(toolchain, toolchain_cls) or not isinstance(spec, Spec): return None, None return toolchain, spec
Extract the builder result to produce a ``Toolchain`` and ``Spec`` instance.
entailment
def trace_toolchain(toolchain): """ Trace the versions of the involved packages for the provided toolchain instance. """ pkgs = [] for cls in getmro(type(toolchain)): if not issubclass(cls, Toolchain): continue dist = _cls_lookup_dist(cls) value = { 'project_name': dist.project_name, 'version': dist.version, } if dist else {} key = '%s:%s' % (cls.__module__, cls.__name__) pkgs.append({key: value}) return pkgs
Trace the versions of the involved packages for the provided toolchain instance.
entailment
def get_artifact_filename(self, package_name, artifact_name): """ Similar to pkg_resources.resource_filename, however this works with the information cached in this registry instance, and arguments are not quite the same. Arguments: package_name The name of the package to get the artifact from artifact_name The exact name of the artifact. Returns the path of where the artifact should be if it has been declared, otherwise None. """ project_name = self.packages.normalize(package_name) return self.records.get((project_name, artifact_name))
Similar to pkg_resources.resource_filename, however this works with the information cached in this registry instance, and arguments are not quite the same. Arguments: package_name The name of the package to get the artifact from artifact_name The exact name of the artifact. Returns the path of where the artifact should be if it has been declared, otherwise None.
entailment
def resolve_artifacts_by_builder_compat( self, package_names, builder_name, dependencies=False): """ Yield the list of paths to the artifacts in the order of the dependency resolution Arguments: package_names The names of the packages to probe the dependency graph, to be provided as a list of strings. artifact_name The exact name of the artifact. dependencies Trace dependencies. Default is off. Returns the path of where the artifact should be if it has been declared, otherwise None. """ paths = self.compat_builders.get(builder_name) if not paths: # perhaps warn, but just return return resolver = ( # traces dependencies for distribution. find_packages_requirements_dists if dependencies else # just get grabs the distribution. pkg_names_to_dists ) for distribution in resolver(package_names): path = paths.get(distribution.project_name) if path: yield path
Yield the list of paths to the artifacts in the order of the dependency resolution Arguments: package_names The names of the packages to probe the dependency graph, to be provided as a list of strings. artifact_name The exact name of the artifact. dependencies Trace dependencies. Default is off. Returns the path of where the artifact should be if it has been declared, otherwise None.
entailment
def get_artifact_metadata(self, package_name): """ Return metadata of the artifacts built through this registry. """ filename = self.metadata.get(package_name) if not filename or not exists(filename): return {} with open(filename, encoding='utf8') as fd: contents = fd.read() try: is_json_compat(contents) except ValueError: logger.info("artifact metadata file '%s' is invalid", filename) return {} return json.loads(contents)
Return metadata of the artifacts built through this registry.
entailment
def generate_metadata_entry(self, entry_point, toolchain, spec): """ After the toolchain and spec have been executed, this may be called to generate the artifact export entry for persistence into the metadata file. """ export_target = spec['export_target'] toolchain_bases = trace_toolchain(toolchain) toolchain_bin_path = spec.get(TOOLCHAIN_BIN_PATH) toolchain_bin = ([ basename(toolchain_bin_path), # bin_name get_bin_version_str(toolchain_bin_path), # bin_version ] if toolchain_bin_path else []) return {basename(export_target): { 'toolchain_bases': toolchain_bases, 'toolchain_bin': toolchain_bin, 'builder': '%s:%s' % ( entry_point.module_name, '.'.join(entry_point.attrs)), }}
After the toolchain and spec have been executed, this may be called to generate the artifact export entry for persistence into the metadata file.
entailment
def iter_records_for(self, package_name): """ Iterate records for a specific package. """ entry_points = self.packages.get(package_name, NotImplemented) if entry_points is NotImplemented: logger.debug( "package '%s' has not declared any entry points for the '%s' " "registry for artifact construction", package_name, self.registry_name, ) return iter([]) logger.debug( "package '%s' has declared %d entry points for the '%s' " "registry for artifact construction", package_name, len(entry_points), self.registry_name, ) return iter(entry_points.values())
Iterate records for a specific package.
entailment
def generate_builder(self, entry_point, export_target): """ Yields exactly one builder if both the provided entry point and export target satisfies the checks required. """ try: builder = entry_point.resolve() except ImportError: logger.error( "unable to import the target builder for the entry point " "'%s' from package '%s' to generate artifact '%s'", entry_point, entry_point.dist, export_target, ) return if not self.verify_builder(builder): logger.error( "the builder referenced by the entry point '%s' " "from package '%s' has an incompatible signature", entry_point, entry_point.dist, ) return # CLEANUP see deprecation notice below verifier = self.verify_export_target(export_target) if not verifier: logger.error( "the export target '%s' has been rejected", export_target) return toolchain, spec = self.extract_builder_result(builder( [entry_point.dist.project_name], export_target=export_target)) if not toolchain: logger.error( "the builder referenced by the entry point '%s' " "from package '%s' failed to produce a valid " "toolchain", entry_point, entry_point.dist, ) return if spec.get(EXPORT_TARGET) != export_target: logger.error( "the builder referenced by the entry point '%s' " "from package '%s' failed to produce a spec with the " "expected export_target", entry_point, entry_point.dist, ) return if callable(verifier): warnings.warn( "%s:%s.verify_export_target returned a callable, which " "will no longer be passed to spec.advise by calmjs-4.0.0; " "please instead override 'setup_export_location' or " "'prepare_export_location' in that class" % ( self.__class__.__module__, self.__class__.__name__), DeprecationWarning ) spec.advise(BEFORE_PREPARE, verifier, export_target) else: spec.advise( BEFORE_PREPARE, self.prepare_export_location, export_target) yield entry_point, toolchain, spec
Yields exactly one builder if both the provided entry point and export target satisfies the checks required.
entailment
def execute_builder(self, entry_point, toolchain, spec): """ Accepts the arguments provided by the builder and executes them. """ toolchain(spec) if not exists(spec['export_target']): logger.error( "the entry point '%s' from package '%s' failed to " "generate an artifact at '%s'", entry_point, entry_point.dist, spec['export_target'] ) return {} return self.generate_metadata_entry(entry_point, toolchain, spec)
Accepts the arguments provided by the builder and executes them.
entailment
def process_package(self, package_name): """ Build artifacts declared for the given package. """ metadata = super(ArtifactRegistry, self).process_package(package_name) if metadata: self.update_artifact_metadata(package_name, metadata)
Build artifacts declared for the given package.
entailment
def alphanum_variable(min_size, max_size, name=None): """ Creates the grammar for an alphanumeric code where the size ranges between two values. :param min_size: minimum size :param max_size: maximum size :param name: name for the field :return: grammar for an alphanumeric field of a variable size """ if name is None: name = 'Alphanumeric Field' if min_size < 0: # Can't have negative min raise BaseException() if max_size < min_size: # Max can't be lower than min raise BaseException() field = pp.Word(pp.alphanums, min=min_size, max=max_size) # Parse action field.setParseAction(lambda s: s[0].strip()) # White spaces are not removed field.leaveWhitespace() # Name field.setName(name) return field
Creates the grammar for an alphanumeric code where the size ranges between two values. :param min_size: minimum size :param max_size: maximum size :param name: name for the field :return: grammar for an alphanumeric field of a variable size
entailment
def year(columns, name=None): """ Creates the grammar for a field containing a year. :param columns: the number of columns for the year :param name: the name of the field :return: """ if columns < 0: # Can't have negative size raise BaseException() field = numeric(columns, name) # Parse action field.addParseAction(_to_year) return field
Creates the grammar for a field containing a year. :param columns: the number of columns for the year :param name: the name of the field :return:
entailment
def is_json_compat(value): """ Check that the value is either a JSON decodable string or a dict that can be encoded into a JSON. Raises ValueError when validation fails. """ try: value = json.loads(value) except ValueError as e: raise ValueError('JSON decoding error: ' + str(e)) except TypeError: # Check that the value can be serialized back into json. try: json.dumps(value) except TypeError as e: raise ValueError( 'must be a JSON serializable object: ' + str(e)) if not isinstance(value, dict): raise ValueError( 'must be specified as a JSON serializable dict or a ' 'JSON deserializable string' ) return True
Check that the value is either a JSON decodable string or a dict that can be encoded into a JSON. Raises ValueError when validation fails.
entailment
def validate_json_field(dist, attr, value): """ Check for json validity. """ try: is_json_compat(value) except ValueError as e: raise DistutilsSetupError("%r %s" % (attr, e)) return True
Check for json validity.
entailment
def validate_line_list(dist, attr, value): """ Validate that the value is compatible """ # does not work as reliably in Python 2. if isinstance(value, str): value = value.split() value = list(value) try: check = (' '.join(value)).split() if check == value: return True except Exception: pass raise DistutilsSetupError("%r must be a list of valid identifiers" % attr)
Validate that the value is compatible
entailment
def write_json_file(argname, cmd, basename, filename): """ Write JSON captured from the defined argname into the package's egg-info directory using the specified filename. """ value = getattr(cmd.distribution, argname, None) if isinstance(value, dict): value = json.dumps( value, indent=4, sort_keys=True, separators=(',', ': ')) cmd.write_or_delete_file(argname, filename, value, force=True)
Write JSON captured from the defined argname into the package's egg-info directory using the specified filename.
entailment
def write_line_list(argname, cmd, basename, filename): """ Write out the retrieved value as list of lines. """ values = getattr(cmd.distribution, argname, None) if isinstance(values, list): values = '\n'.join(values) cmd.write_or_delete_file(argname, filename, values, force=True)
Write out the retrieved value as list of lines.
entailment
def find_pkg_dist(pkg_name, working_set=None): """ Locate a package's distribution by its name. """ working_set = working_set or default_working_set req = Requirement.parse(pkg_name) return working_set.find(req)
Locate a package's distribution by its name.
entailment
def convert_package_names(package_names): """ Convert package names, which can be a string of a number of package names or requirements separated by spaces. """ results = [] errors = [] for name in ( package_names.split() if hasattr(package_names, 'split') else package_names): try: Requirement.parse(name) except ValueError: errors.append(name) else: results.append(name) return results, errors
Convert package names, which can be a string of a number of package names or requirements separated by spaces.
entailment
def find_packages_requirements_dists(pkg_names, working_set=None): """ Return the entire list of dependency requirements, reversed from the bottom. """ working_set = working_set or default_working_set requirements = [ r for r in (Requirement.parse(req) for req in pkg_names) if working_set.find(r) ] return list(reversed(working_set.resolve(requirements)))
Return the entire list of dependency requirements, reversed from the bottom.
entailment
def find_packages_parents_requirements_dists(pkg_names, working_set=None): """ Leverages the `find_packages_requirements_dists` but strip out the distributions that matches pkg_names. """ dists = [] # opting for a naive implementation targets = set(pkg_names) for dist in find_packages_requirements_dists(pkg_names, working_set): if dist.project_name in targets: continue dists.append(dist) return dists
Leverages the `find_packages_requirements_dists` but strip out the distributions that matches pkg_names.
entailment
def read_dist_egginfo_json(dist, filename=DEFAULT_JSON): """ Safely get a json within an egginfo from a distribution. """ # use the given package's distribution to acquire the json file. if not dist.has_metadata(filename): logger.debug("no '%s' for '%s'", filename, dist) return try: result = dist.get_metadata(filename) except IOError: logger.error("I/O error on reading of '%s' for '%s'.", filename, dist) return try: obj = json.loads(result) except (TypeError, ValueError): logger.error( "the '%s' found in '%s' is not a valid json.", filename, dist) return logger.debug("found '%s' for '%s'.", filename, dist) return obj
Safely get a json within an egginfo from a distribution.
entailment
def read_egginfo_json(pkg_name, filename=DEFAULT_JSON, working_set=None): """ Read json from egginfo of a package identified by `pkg_name` that's already installed within the current Python environment. """ working_set = working_set or default_working_set dist = find_pkg_dist(pkg_name, working_set=working_set) return read_dist_egginfo_json(dist, filename)
Read json from egginfo of a package identified by `pkg_name` that's already installed within the current Python environment.
entailment
def flatten_dist_egginfo_json( source_dists, filename=DEFAULT_JSON, dep_keys=DEP_KEYS, working_set=None): """ Flatten a distribution's egginfo json, with the depended keys to be flattened. Originally this was done for this: Resolve a distribution's (dev)dependencies through the working set and generate a flattened version package.json, returned as a dict, from the resolved distributions. Default working set is the one from pkg_resources. The generated package.json dict is done by grabbing all package.json metadata from all parent Python packages, starting from the highest level and down to the lowest. The current distribution's dependencies will be layered on top along with its other package information. This has the effect of child packages overriding node/npm dependencies which is by the design of this function. If nested dependencies are desired, just rely on npm only for all dependency management. Flat is better than nested. """ working_set = working_set or default_working_set obj = {} # TODO figure out the best way to explicitly report back to caller # how the keys came to be (from which dist). Perhaps create a # detailed function based on this, retain this one to return the # distilled results. depends = {dep: {} for dep in dep_keys} # Go from the earliest package down to the latest one, as we will # flatten children's d(evD)ependencies on top of parent's. for dist in source_dists: obj = read_dist_egginfo_json(dist, filename) if not obj: continue logger.debug("merging '%s' for required '%s'", filename, dist) for dep in dep_keys: depends[dep].update(obj.get(dep, {})) if obj is None: # top level object does not have egg-info defined return depends for dep in dep_keys: # filtering out all the nulls. obj[dep] = {k: v for k, v in depends[dep].items() if v is not None} return obj
Flatten a distribution's egginfo json, with the depended keys to be flattened. Originally this was done for this: Resolve a distribution's (dev)dependencies through the working set and generate a flattened version package.json, returned as a dict, from the resolved distributions. Default working set is the one from pkg_resources. The generated package.json dict is done by grabbing all package.json metadata from all parent Python packages, starting from the highest level and down to the lowest. The current distribution's dependencies will be layered on top along with its other package information. This has the effect of child packages overriding node/npm dependencies which is by the design of this function. If nested dependencies are desired, just rely on npm only for all dependency management. Flat is better than nested.
entailment
def flatten_egginfo_json( pkg_names, filename=DEFAULT_JSON, dep_keys=DEP_KEYS, working_set=None): """ A shorthand calling convention where the package name is supplied instead of a distribution. Originally written for this: Generate a flattened package.json with packages `pkg_names` that are already installed within the current Python environment (defaults to the current global working_set which should have been set up correctly by pkg_resources). """ working_set = working_set or default_working_set # Ensure only grabbing packages that exists in working_set dists = find_packages_requirements_dists( pkg_names, working_set=working_set) return flatten_dist_egginfo_json( dists, filename=filename, dep_keys=dep_keys, working_set=working_set)
A shorthand calling convention where the package name is supplied instead of a distribution. Originally written for this: Generate a flattened package.json with packages `pkg_names` that are already installed within the current Python environment (defaults to the current global working_set which should have been set up correctly by pkg_resources).
entailment
def build_helpers_egginfo_json( json_field, json_key_registry, json_filename=None): """ Return a tuple of functions that will provide the usage of the JSON egginfo based around the provided field. """ json_filename = ( json_field + '.json' if json_filename is None else json_filename) # Default calmjs core implementation specific functions, to be used by # integrators intended to use this as a distribution. def get_extras_json(pkg_names, working_set=None): """ Only extract the extras_json information for the given packages 'pkg_names'. """ working_set = working_set or default_working_set dep_keys = set(get(json_key_registry).iter_records()) dists = pkg_names_to_dists(pkg_names, working_set=working_set) return flatten_dist_egginfo_json( dists, filename=json_filename, dep_keys=dep_keys, working_set=working_set ) def _flatten_extras_json(pkg_names, find_dists, working_set): # registry key must be explicit here as it was designed for this. dep_keys = set(get(json_key_registry).iter_records()) dists = find_dists(pkg_names, working_set=working_set) return flatten_dist_egginfo_json( dists, filename=json_filename, dep_keys=dep_keys, working_set=working_set ) def flatten_extras_json(pkg_names, working_set=None): """ Traverses through the dependency graph of packages 'pkg_names' and flattens all the egg_info json information """ working_set = working_set or default_working_set return _flatten_extras_json( pkg_names, find_packages_requirements_dists, working_set) def flatten_parents_extras_json(pkg_names, working_set=None): """ Traverses through the dependency graph of packages 'pkg_names' and flattens all the egg_info json information for parents of the specified packages. """ working_set = working_set or default_working_set return _flatten_extras_json( pkg_names, find_packages_parents_requirements_dists, working_set) write_extras_json = partial(write_json_file, json_field) return ( get_extras_json, flatten_extras_json, flatten_parents_extras_json, write_extras_json, )
Return a tuple of functions that will provide the usage of the JSON egginfo based around the provided field.
entailment
def build_helpers_module_registry_dependencies(registry_name='calmjs.module'): """ Return a tuple of funtions that will provide the functions that return the relevant sets of module registry records based on the dependencies defined for the provided packages. """ def get_module_registry_dependencies( pkg_names, registry_name=registry_name, working_set=None): """ Get dependencies for the given package names from module registry identified by registry name. For the given packages 'pkg_names' and the registry identified by 'registry_name', resolve the exported location for just the package. """ working_set = working_set or default_working_set registry = get(registry_name) if not isinstance(registry, BaseModuleRegistry): return {} result = {} for pkg_name in pkg_names: result.update(registry.get_records_for_package(pkg_name)) return result def _flatten_module_registry_dependencies( pkg_names, registry_name, find_dists, working_set): """ Flatten dependencies for the given package names from module registry identified by registry name using the find_dists function on the given working_set. For the given packages 'pkg_names' and the registry identified by 'registry_name', resolve and flatten all the exported locations. """ result = {} registry = get(registry_name) if not isinstance(registry, BaseModuleRegistry): return result dists = find_dists(pkg_names, working_set=working_set) for dist in dists: result.update(registry.get_records_for_package(dist.project_name)) return result def flatten_module_registry_dependencies( pkg_names, registry_name=registry_name, working_set=None): """ Flatten dependencies for the specified packages from the module registry identified by registry name. For the given packages 'pkg_names' and the registry identified by 'registry_name', resolve and flatten all the exported locations. """ working_set = working_set or default_working_set return _flatten_module_registry_dependencies( pkg_names, registry_name, find_packages_requirements_dists, working_set) def flatten_parents_module_registry_dependencies( pkg_names, registry_name=registry_name, working_set=None): """ Flatten dependencies for the parents of the specified packages from the module registry identified by registry name. For the given packages 'pkg_names' and the registry identified by 'registry_name', resolve and flatten all the exported locations. """ working_set = working_set or default_working_set return _flatten_module_registry_dependencies( pkg_names, registry_name, find_packages_parents_requirements_dists, working_set) return ( get_module_registry_dependencies, flatten_module_registry_dependencies, flatten_parents_module_registry_dependencies, )
Return a tuple of funtions that will provide the functions that return the relevant sets of module registry records based on the dependencies defined for the provided packages.
entailment
def has_calmjs_artifact_declarations(cmd, registry_name='calmjs.artifacts'): """ For a distutils command to verify that the artifact build step is possible. """ return any(get(registry_name).iter_records_for( cmd.distribution.get_name()))
For a distutils command to verify that the artifact build step is possible.
entailment
def build_calmjs_artifacts(dist, key, value, cmdclass=BuildCommand): """ Trigger the artifact build process through the setuptools. """ if value is not True: return build_cmd = dist.get_command_obj('build') if not isinstance(build_cmd, cmdclass): logger.error( "'build' command in Distribution is not an instance of " "'%s:%s' (got %r instead)", cmdclass.__module__, cmdclass.__name__, build_cmd) return build_cmd.sub_commands.append((key, has_calmjs_artifact_declarations))
Trigger the artifact build process through the setuptools.
entailment
def get_rule(self, field_id): """ Returns the rule for the field identified by the id. If it is set as not being compulsory, the rule will be adapted to accept string composed only of white characters. :param field_id: unique id in the system for the field :return: the rule of a field """ if field_id in self._fields: # Field already exists field = self._fields[field_id] else: # Field does not exist # It is created field = self._create_field(field_id) # Field is saved self._fields[field_id] = field return field
Returns the rule for the field identified by the id. If it is set as not being compulsory, the rule will be adapted to accept string composed only of white characters. :param field_id: unique id in the system for the field :return: the rule of a field
entailment
def _create_field(self, field_id): """ Creates the field with the specified parameters. :param field_id: identifier for the field :return: the basic rule for the field """ # Field configuration info config = self._field_configs[field_id] adapter = self._adapters[config['type']] if 'name' in config: name = config['name'] else: name = None if 'size' in config: columns = config['size'] else: columns = None if 'values' in config: values = config['values'] else: values = None field = adapter.get_field(name, columns, values) if 'results_name' in config: field = field.setResultsName(config['results_name']) else: field = field.setResultsName(field_id) return field
Creates the field with the specified parameters. :param field_id: identifier for the field :return: the basic rule for the field
entailment
def read_csv_file(self, file_name): """ Parses a CSV file into a list. :param file_name: name of the CSV file :return: a list with the file's contents """ result = [] with open(os.path.join(self.__path(), os.path.basename(file_name)), 'rt') as csvfile: headers_reader = csv.reader(csvfile, delimiter=',', quotechar='|') for type_row in headers_reader: for t in type_row: result.append(t) return result
Parses a CSV file into a list. :param file_name: name of the CSV file :return: a list with the file's contents
entailment
def read_yaml_file(self, file_name): """ Parses a YAML file into a matrix. :param file_name: name of the YAML file :return: a matrix with the file's contents """ with open(os.path.join(self.__path(), os.path.basename(file_name)), 'rt') as yamlfile: return yaml.load(yamlfile)
Parses a YAML file into a matrix. :param file_name: name of the YAML file :return: a matrix with the file's contents
entailment
def get_data(self, file_id): """ Acquires the data from the table identified by the id. The file is read only once, consecutive calls to this method will return the sale collection. :param file_id: identifier for the table :return: all the values from the table """ if file_id not in self._file_values: file_contents = 'cwr_%s.csv' % file_id self._file_values[file_id] = self._reader.read_csv_file( file_contents) return self._file_values[file_id]
Acquires the data from the table identified by the id. The file is read only once, consecutive calls to this method will return the sale collection. :param file_id: identifier for the table :return: all the values from the table
entailment
def record_type(values): """ Creates a record type field. These serve as the header field on records, identifying them. Usually this field can be only an specific value, but sometimes a small range of codes is allowed. This is specified by the 'values' parameter. While it is possible to set this field as optional, it is expected to be compulsory. :param values: allowed record type codes :return: grammar for the record type field """ field = basic.lookup(values, name='Record Type (one of %s)' % values) return field.setResultsName('record_type')
Creates a record type field. These serve as the header field on records, identifying them. Usually this field can be only an specific value, but sometimes a small range of codes is allowed. This is specified by the 'values' parameter. While it is possible to set this field as optional, it is expected to be compulsory. :param values: allowed record type codes :return: grammar for the record type field
entailment
def record_prefix(required_type, factory): """ Creates a record prefix for the specified record type. :param required_type: the type of the record using this prefix :param factory: field factory :return: the record prefix """ field = record_type(required_type) field += factory.get_rule('transaction_sequence_n') field += factory.get_rule('record_sequence_n') # field.leaveWhitespace() return field
Creates a record prefix for the specified record type. :param required_type: the type of the record using this prefix :param factory: field factory :return: the record prefix
entailment
def expand_entity(self, entity): """ Search and return entity or sub entity that contain value of this field. :param entity: :return: entity :raise KeyError """ if self.name in entity: return entity for key, value in entity.items(): if isinstance(value, dict): if self.name in value: return value raise KeyError("The field %s (%s) not found in %s" % (self.name, self._rule['type'], entity))
Search and return entity or sub entity that contain value of this field. :param entity: :return: entity :raise KeyError
entailment
def encode(self, entity): """ Encode this :param entity: :return: cwr string """ entity = self.expand_entity(entity) value = entity[self.name] result = self.format(value) return result
Encode this :param entity: :return: cwr string
entailment
def read_config_file(self, file_name): """ Reads a CWR grammar config file. :param file_name: name of the text file :return: the file's contents """ with open(os.path.join(self.__path(), os.path.basename(file_name)), 'rt') as file_config: return self._parser.parseString(file_config.read())
Reads a CWR grammar config file. :param file_name: name of the text file :return: the file's contents
entailment
def _load_cwr_defaults(self): """ Loads the CWR default values file, creating a matrix from it, and then returns this data. The file will only be loaded once. :return: the CWR default values matrix """ if self._cwr_defaults is None: self._cwr_defaults = self._reader.read_yaml_file( self._file_defaults) return self._cwr_defaults
Loads the CWR default values file, creating a matrix from it, and then returns this data. The file will only be loaded once. :return: the CWR default values matrix
entailment
def load_field_config(self, file_id): """ Loads the configuration fields file for the id. :param file_id: the id for the field :return: the fields configuration """ if file_id not in self._field_configs: self._field_configs[file_id] = self._reader.read_yaml_file( 'field_config_%s.yml' % file_id) return self._field_configs[file_id]
Loads the configuration fields file for the id. :param file_id: the id for the field :return: the fields configuration
entailment
def load_group_config(self, file_id): """ Loads the configuration fields file for the id. :param file_id: the id for the field :return: the fields configuration """ if file_id not in self._group_configs: self._group_configs[file_id] = self._reader.read_config_file( 'group_config_%s.cml' % file_id) return self._group_configs[file_id]
Loads the configuration fields file for the id. :param file_id: the id for the field :return: the fields configuration
entailment
def load_record_config(self, file_id): """ Loads the configuration fields file for the id. :param file_id: the id for the field :return: the fields configuration """ if file_id not in self._record_configs: self._record_configs[file_id] = self._reader.read_config_file( 'record_config_%s.cml' % file_id) return self._record_configs[file_id]
Loads the configuration fields file for the id. :param file_id: the id for the field :return: the fields configuration
entailment
def load_transaction_config(self, file_id): """ Loads the configuration fields file for the id. :param file_id: the id for the field :return: the fields configuration """ if file_id not in self._transaction_configs: self._transaction_configs[file_id] = self._reader.read_config_file( 'transaction_config_%s.cml' % file_id) return self._transaction_configs[file_id]
Loads the configuration fields file for the id. :param file_id: the id for the field :return: the fields configuration
entailment
def load_acknowledge_config(self, file_id): """ Loads the CWR acknowledge config :return: the values matrix """ if self._cwr_defaults is None: self._cwr_defaults = self._reader.read_yaml_file( 'acknowledge_config_%s.yml' % file_id) return self._cwr_defaults
Loads the CWR acknowledge config :return: the values matrix
entailment
def soft_error(self, message): """ Same as error, without the dying in a fire part. """ self.print_usage(sys.stderr) args = {'prog': self.prog, 'message': message} self._print_message( _('%(prog)s: error: %(message)s\n') % args, sys.stderr)
Same as error, without the dying in a fire part.
entailment
def default_filename_decoder(): """ Creates a decoder which parses CWR filenames following the old or the new convention. :return: a CWR filename decoder for the old and the new conventions """ factory = default_filename_grammar_factory() grammar_old = factory.get_rule('filename_old') grammar_new = factory.get_rule('filename_new') return FileNameDecoder(grammar_old, grammar_new)
Creates a decoder which parses CWR filenames following the old or the new convention. :return: a CWR filename decoder for the old and the new conventions
entailment
def decode(self, data): """ Parses the file, creating a CWRFile from it. It requires a dictionary with two values: - filename, containing the filename - contents, containing the file contents :param data: dictionary with the data to parse :return: a CWRFile instance """ file_name = self._filename_decoder.decode(data['filename']) file_data = data['contents'] i = 0 max_size = len(file_data) while file_data[i:i + 1] != 'H' and i < max_size: i += 1 if i > 0: data['contents'] = file_data[i:] transmission = self._file_decoder.decode(data['contents'])[0] return CWRFile(file_name, transmission)
Parses the file, creating a CWRFile from it. It requires a dictionary with two values: - filename, containing the filename - contents, containing the file contents :param data: dictionary with the data to parse :return: a CWRFile instance
entailment
def decode(self, file_name): """ Parses the filename, creating a FileTag from it. It will try both the old and the new conventions, if the filename does not conform any of them, then an empty FileTag will be returned. :param file_name: filename to parse :return: a FileTag instance """ try: file_tag = self._filename_decoder_new.decode(file_name) except: try: file_tag = self._filename_decoder_old.decode(file_name) except: file_tag = FileTag(0, 0, '', '', '') return file_tag
Parses the filename, creating a FileTag from it. It will try both the old and the new conventions, if the filename does not conform any of them, then an empty FileTag will be returned. :param file_name: filename to parse :return: a FileTag instance
entailment
def enable_pretty_logging(logger='calmjs', level=logging.DEBUG, stream=None): """ Shorthand to enable pretty logging """ def cleanup(): logger.removeHandler(handler) logger.level = old_level if not isinstance(logger, logging.Logger): logger = logging.getLogger(logger) old_level = logger.level handler = logging.StreamHandler(stream) handler.setFormatter(logging.Formatter( u'%(asctime)s %(levelname)s %(name)s %(message)s')) logger.addHandler(handler) logger.setLevel(level) return cleanup
Shorthand to enable pretty logging
entailment
def finalize_env(env): """ Produce a platform specific env for passing into subprocess.Popen family of external process calling methods, and the supplied env will be updated on top of it. Returns a new env. """ keys = _PLATFORM_ENV_KEYS.get(sys.platform, []) if 'PATH' not in keys: # this MUST be available due to Node.js (and others really) # needing something to look for binary locations when it shells # out to other binaries. keys.append('PATH') results = { key: os.environ.get(key, '') for key in keys } results.update(env) return results
Produce a platform specific env for passing into subprocess.Popen family of external process calling methods, and the supplied env will be updated on top of it. Returns a new env.
entailment
def fork_exec(args, stdin='', **kwargs): """ Do a fork-exec through the subprocess.Popen abstraction in a way that takes a stdin and return stdout. """ as_bytes = isinstance(stdin, bytes) source = stdin if as_bytes else stdin.encode(locale) p = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, **kwargs) stdout, stderr = p.communicate(source) if as_bytes: return stdout, stderr return (stdout.decode(locale), stderr.decode(locale))
Do a fork-exec through the subprocess.Popen abstraction in a way that takes a stdin and return stdout.
entailment
def raise_os_error(_errno, path=None): """ Helper for raising the correct exception under Python 3 while still being able to raise the same common exception class in Python 2.7. """ msg = "%s: '%s'" % (strerror(_errno), path) if path else strerror(_errno) raise OSError(_errno, msg)
Helper for raising the correct exception under Python 3 while still being able to raise the same common exception class in Python 2.7.
entailment
def which(cmd, mode=os.F_OK | os.X_OK, path=None): """ Given cmd, check where it is on PATH. Loosely based on the version in python 3.3. """ if os.path.dirname(cmd): if os.path.isfile(cmd) and os.access(cmd, mode): return cmd if path is None: path = os.environ.get('PATH', defpath) if not path: return None paths = path.split(pathsep) if sys.platform == 'win32': # oh boy if curdir not in paths: paths = [curdir] + paths # also need to check the fileexts... pathext = os.environ.get('PATHEXT', '').split(pathsep) if any(cmd.lower().endswith(ext.lower()) for ext in pathext): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # sanity files = [cmd] seen = set() for p in paths: normpath = normcase(p) if normpath in seen: continue seen.add(normpath) for f in files: fn = os.path.join(p, f) if os.path.isfile(fn) and os.access(fn, mode): return fn return None
Given cmd, check where it is on PATH. Loosely based on the version in python 3.3.
entailment
def _init(self): """ Turn the records into actual usable keys. """ self._entry_points = {} for entry_point in self.raw_entry_points: if entry_point.dist.project_name != self.reserved.get( entry_point.name, entry_point.dist.project_name): logger.error( "registry '%s' for '%s' is reserved for package '%s'", entry_point.name, self.registry_name, self.reserved[entry_point.name], ) continue if self.get_record(entry_point.name): logger.warning( "registry '%s' for '%s' is already registered.", entry_point.name, self.registry_name, ) existing = self._entry_points[entry_point.name] logger.debug( "registered '%s' from '%s'", existing, existing.dist) logger.debug( "discarded '%s' from '%s'", entry_point, entry_point.dist) continue logger.debug( "recording '%s' from '%s'", entry_point, entry_point.dist) self._entry_points[entry_point.name] = entry_point
Turn the records into actual usable keys.
entailment
def dict_update_overwrite_check(base, fresh): """ For updating a base dict with a fresh one, returning a list of 3-tuples containing the key, previous value (base[key]) and the fresh value (fresh[key]) for all colliding changes (reassignment of identical values are omitted). """ result = [ (key, base[key], fresh[key]) for key in set(base.keys()) & set(fresh.keys()) if base[key] != fresh[key] ] base.update(fresh) return result
For updating a base dict with a fresh one, returning a list of 3-tuples containing the key, previous value (base[key]) and the fresh value (fresh[key]) for all colliding changes (reassignment of identical values are omitted).
entailment
def spec_update_loaderplugin_registry(spec, default=None): """ Resolve a BasePluginLoaderRegistry instance from spec, and update spec[CALMJS_LOADERPLUGIN_REGISTRY] with that value before returning it. """ registry = spec.get(CALMJS_LOADERPLUGIN_REGISTRY) if isinstance(registry, BaseLoaderPluginRegistry): logger.debug( "loaderplugin registry '%s' already assigned to spec", registry.registry_name) return registry elif not registry: # resolving registry registry = get_registry(spec.get(CALMJS_LOADERPLUGIN_REGISTRY_NAME)) if isinstance(registry, BaseLoaderPluginRegistry): logger.info( "using loaderplugin registry '%s'", registry.registry_name) spec[CALMJS_LOADERPLUGIN_REGISTRY] = registry return registry # acquire the real default instance, if possible. if not isinstance(default, BaseLoaderPluginRegistry): default = get_registry(default) if not isinstance(default, BaseLoaderPluginRegistry): logger.info( "provided default is not a valid loaderplugin registry") default = None if default is None: default = BaseLoaderPluginRegistry('<default_loaderplugins>') # TODO determine the best way to optionally warn about this for # toolchains that require this. if registry: logger.info( "object referenced in spec is not a valid loaderplugin registry; " "using default loaderplugin registry '%s'", default.registry_name) else: logger.info( "no loaderplugin registry referenced in spec; " "using default loaderplugin registry '%s'", default.registry_name) spec[CALMJS_LOADERPLUGIN_REGISTRY] = registry = default return registry
Resolve a BasePluginLoaderRegistry instance from spec, and update spec[CALMJS_LOADERPLUGIN_REGISTRY] with that value before returning it.
entailment
def spec_update_sourcepath_filter_loaderplugins( spec, sourcepath_map, sourcepath_map_key, loaderplugin_sourcepath_map_key=LOADERPLUGIN_SOURCEPATH_MAPS): """ Take an existing spec and a sourcepath mapping (that could be produced via calmjs.dist.*_module_registry_dependencies functions) and split out the keys that does not contain loaderplugin syntax and assign it to the spec under sourcepath_key. For the parts with loader plugin syntax (i.e. modnames (keys) that contain a '!' character), they are instead stored under a different mapping under its own mapping identified by the plugin_name. The mapping under loaderplugin_sourcepath_map_key will contain all mappings of this type. The resolution for the handlers will be done through the loader plugin registry provided via spec[CALMJS_LOADERPLUGIN_REGISTRY] if available, otherwise the registry instance will be acquired through the main registry using spec[CALMJS_LOADERPLUGIN_REGISTRY_NAME]. For the example sourcepath_map input: sourcepath = { 'module': 'something', 'plugin!inner': 'inner', 'plugin!other': 'other', 'plugin?query!question': 'question', 'plugin!plugin2!target': 'target', } The following will be stored under the following keys in spec: spec[sourcepath_key] = { 'module': 'something', } spec[loaderplugin_sourcepath_map_key] = { 'plugin': { 'plugin!inner': 'inner', 'plugin!other': 'other', 'plugin?query!question': 'question', 'plugin!plugin2!target': 'target', }, } The goal of this function is to aid in processing each of the plugin types by batch, one level at a time. It is up to the handler itself to trigger further lookups as there are implementations of loader plugins that do not respect the chaining mechanism, thus a generic lookup done at once may not be suitable. Note that nested/chained loaderplugins are not immediately grouped as they must be individually handled given that the internal syntax are generally proprietary to the outer plugin. The handling will be dealt with at the Toolchain.compile_loaderplugin_entry method through the associated handler call method. Toolchain implementations may either invoke this directly as part of the prepare step on the required sourcepaths values stored in the spec, or implement this at a higher level before invocating the toolchain instance with the spec. """ default = dict_setget_dict(spec, sourcepath_map_key) registry = spec_update_loaderplugin_registry(spec) # it is more loaderplugin_sourcepath_maps plugins = dict_setget_dict(spec, loaderplugin_sourcepath_map_key) for modname, sourcepath in sourcepath_map.items(): parts = modname.split('!', 1) if len(parts) == 1: # default default[modname] = sourcepath continue # don't actually do any processing yet. plugin_name = registry.to_plugin_name(modname) plugin = dict_setget_dict(plugins, plugin_name) plugin[modname] = sourcepath
Take an existing spec and a sourcepath mapping (that could be produced via calmjs.dist.*_module_registry_dependencies functions) and split out the keys that does not contain loaderplugin syntax and assign it to the spec under sourcepath_key. For the parts with loader plugin syntax (i.e. modnames (keys) that contain a '!' character), they are instead stored under a different mapping under its own mapping identified by the plugin_name. The mapping under loaderplugin_sourcepath_map_key will contain all mappings of this type. The resolution for the handlers will be done through the loader plugin registry provided via spec[CALMJS_LOADERPLUGIN_REGISTRY] if available, otherwise the registry instance will be acquired through the main registry using spec[CALMJS_LOADERPLUGIN_REGISTRY_NAME]. For the example sourcepath_map input: sourcepath = { 'module': 'something', 'plugin!inner': 'inner', 'plugin!other': 'other', 'plugin?query!question': 'question', 'plugin!plugin2!target': 'target', } The following will be stored under the following keys in spec: spec[sourcepath_key] = { 'module': 'something', } spec[loaderplugin_sourcepath_map_key] = { 'plugin': { 'plugin!inner': 'inner', 'plugin!other': 'other', 'plugin?query!question': 'question', 'plugin!plugin2!target': 'target', }, } The goal of this function is to aid in processing each of the plugin types by batch, one level at a time. It is up to the handler itself to trigger further lookups as there are implementations of loader plugins that do not respect the chaining mechanism, thus a generic lookup done at once may not be suitable. Note that nested/chained loaderplugins are not immediately grouped as they must be individually handled given that the internal syntax are generally proprietary to the outer plugin. The handling will be dealt with at the Toolchain.compile_loaderplugin_entry method through the associated handler call method. Toolchain implementations may either invoke this directly as part of the prepare step on the required sourcepaths values stored in the spec, or implement this at a higher level before invocating the toolchain instance with the spec.
entailment
def toolchain_spec_prepare_loaderplugins( toolchain, spec, loaderplugin_read_key, handler_sourcepath_key, loaderplugin_sourcepath_map_key=LOADERPLUGIN_SOURCEPATH_MAPS): """ A standard helper function for combining the filtered (e.g. using ``spec_update_sourcepath_filter_loaderplugins``) loaderplugin sourcepath mappings back into one that is usable with the standard ``toolchain_spec_compile_entries`` function. Arguments: toolchain The toolchain spec The spec loaderplugin_read_key The read_key associated with the loaderplugin process as set up for the Toolchain that implemented this. If the toolchain has this in its compile_entries: ToolchainSpecCompileEntry('loaderplugin', 'plugsrc', 'plugsink') The loaderplugin_read_key it must use will be 'plugsrc'. handler_sourcepath_key All found handlers will have their handler_sourcepath method be invoked, and the combined results will be a dict stored in the spec under that key. loaderplugin_sourcepath_map_key It must be the same key to the value produced by ``spec_update_sourcepath_filter_loaderplugins`` """ # ensure the registry is applied to the spec registry = spec_update_loaderplugin_registry( spec, default=toolchain.loaderplugin_registry) # this one is named like so for the compile entry method plugin_sourcepath = dict_setget_dict( spec, loaderplugin_read_key + '_sourcepath') # the key is supplied by the toolchain that might make use of this if handler_sourcepath_key: handler_sourcepath = dict_setget_dict(spec, handler_sourcepath_key) else: # provide a null value for this. handler_sourcepath = {} for key, value in spec.get(loaderplugin_sourcepath_map_key, {}).items(): handler = registry.get(key) if handler: # assume handler will do the job. logger.debug("found handler for '%s' loader plugin", key) plugin_sourcepath.update(value) logger.debug( "plugin_sourcepath updated with %d keys", len(value)) # TODO figure out how to address the case where the actual # JavaScript module for the handling wasn't found. handler_sourcepath.update( handler.generate_handler_sourcepath(toolchain, spec, value)) else: logger.warning( "loaderplugin handler for '%s' not found in loaderplugin " "registry '%s'; as arguments associated with loader plugins " "are specific, processing is disabled for this group; the " "sources referenced by the following names will not be " "compiled into the build target: %s", key, registry.registry_name, sorted(value.keys()), )
A standard helper function for combining the filtered (e.g. using ``spec_update_sourcepath_filter_loaderplugins``) loaderplugin sourcepath mappings back into one that is usable with the standard ``toolchain_spec_compile_entries`` function. Arguments: toolchain The toolchain spec The spec loaderplugin_read_key The read_key associated with the loaderplugin process as set up for the Toolchain that implemented this. If the toolchain has this in its compile_entries: ToolchainSpecCompileEntry('loaderplugin', 'plugsrc', 'plugsink') The loaderplugin_read_key it must use will be 'plugsrc'. handler_sourcepath_key All found handlers will have their handler_sourcepath method be invoked, and the combined results will be a dict stored in the spec under that key. loaderplugin_sourcepath_map_key It must be the same key to the value produced by ``spec_update_sourcepath_filter_loaderplugins``
entailment
def toolchain_spec_compile_entries( toolchain, spec, entries, process_name, overwrite_log=None): """ The standardized Toolchain Spec Entries compile function This function accepts a toolchain instance, the spec to be operated with and the entries provided for the process name. The standard flow is to deferr the actual processing to the toolchain method `compile_{process_name}_entry` for each entry in the entries list. The generic compile entries method for the compile process. Arguments: toolchain The toolchain to be used for the operation. spec The spec to be operated with. entries The entries for the source. process_name The name of the specific compile process of the provided toolchain. overwrite_log A callable that will accept a 4-tuple of suffix, key, original and new value, if monitoring of overwritten values are required. suffix is derived from the modpath_suffix or targetpath_suffix of the toolchain instance, key is the key on any of the keys on either of those mappings, original and new are the original and the replacement value. """ processor = getattr(toolchain, 'compile_%s_entry' % process_name) modpath_logger = ( partial(overwrite_log, toolchain.modpath_suffix) if callable(overwrite_log) else None) targetpath_logger = ( partial(overwrite_log, toolchain.targetpath_suffix) if callable(overwrite_log) else None) return process_compile_entries( processor, spec, entries, modpath_logger, targetpath_logger)
The standardized Toolchain Spec Entries compile function This function accepts a toolchain instance, the spec to be operated with and the entries provided for the process name. The standard flow is to deferr the actual processing to the toolchain method `compile_{process_name}_entry` for each entry in the entries list. The generic compile entries method for the compile process. Arguments: toolchain The toolchain to be used for the operation. spec The spec to be operated with. entries The entries for the source. process_name The name of the specific compile process of the provided toolchain. overwrite_log A callable that will accept a 4-tuple of suffix, key, original and new value, if monitoring of overwritten values are required. suffix is derived from the modpath_suffix or targetpath_suffix of the toolchain instance, key is the key on any of the keys on either of those mappings, original and new are the original and the replacement value.
entailment
def process_compile_entries( processor, spec, entries, modpath_logger=None, targetpath_logger=None): """ The generalized raw spec entry process invocation loop. """ # Contains a mapping of the module name to the compiled file's # relative path starting from the base build_dir. all_modpaths = {} all_targets = {} # List of exported module names, should be equal to all keys of # the compiled and bundled sources. all_export_module_names = [] def update(base, fresh, logger): if callable(logger): for dupes in dict_update_overwrite_check(base, fresh): logger(*dupes) else: base.update(fresh) for entry in entries: modpaths, targetpaths, export_module_names = processor(spec, entry) update(all_modpaths, modpaths, modpath_logger) update(all_targets, targetpaths, targetpath_logger) all_export_module_names.extend(export_module_names) return all_modpaths, all_targets, all_export_module_names
The generalized raw spec entry process invocation loop.
entailment
def update_selected(self, other, selected): """ Like update, however a list of selected keys must be provided. """ self.update({k: other[k] for k in selected})
Like update, however a list of selected keys must be provided.
entailment
def __advice_stack_frame_protection(self, frame): """ Overriding of this is only permitted if and only if your name is Megumin and you have a pet/familiar named Chomusuke. """ if frame is None: logger.debug( 'currentframe() returned None; frame protection disabled') return f_back = frame.f_back while f_back: if f_back.f_code is self.handle.__code__: raise RuntimeError( "indirect invocation of '%s' by 'handle' is forbidden" % frame.f_code.co_name, ) f_back = f_back.f_back
Overriding of this is only permitted if and only if your name is Megumin and you have a pet/familiar named Chomusuke.
entailment
def advise(self, name, f, *a, **kw): """ Add an advice that will be handled later by the handle method. Arguments: name The name of the advice group f A callable method or function. The rest of the arguments will be passed as arguments and keyword arguments to f when it's invoked. """ if name is None: return advice = (f, a, kw) debug = self.get(DEBUG) frame = currentframe() if frame is None: logger.debug('currentframe() failed to return frame') else: if name in self._called: self.__advice_stack_frame_protection(frame) if debug: logger.debug( "advise '%s' invoked by %s:%d", name, frame.f_back.f_code.co_filename, frame.f_back.f_lineno, ) if debug > 1: # use the memory address of the tuple which should # be stable self._frames[id(advice)] = ''.join( format_stack(frame.f_back)) self._advices[name] = self._advices.get(name, []) self._advices[name].append(advice)
Add an advice that will be handled later by the handle method. Arguments: name The name of the advice group f A callable method or function. The rest of the arguments will be passed as arguments and keyword arguments to f when it's invoked.
entailment