_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q28000
TargetAdaptor.field_adaptors
train
def field_adaptors(self): """Returns a tuple of Fields for captured fields which need additional treatment.""" with exception_logging(logger, 'Exception in `field_adaptors` property'): conjunction_globs = self.get_sources() if conjunction_globs is None: return tuple() sources = conjunction_globs.non_path_globs conjunction = conjunction_globs.conjunction if not sources: return tuple() base_globs = BaseGlobs.from_sources_field(sources, self.address.spec_path) path_globs = base_globs.to_path_globs(self.address.spec_path, conjunction) return (SourcesField( self.address, 'sources', base_globs.filespecs, base_globs, path_globs, self.validate_sources, ),)
python
{ "resource": "" }
q28001
BaseGlobs.from_sources_field
train
def from_sources_field(sources, spec_path): """Return a BaseGlobs for the given sources field. `sources` may be None, a list/tuple/set, a string or a BaseGlobs instance. """ if sources is None: return Files(spec_path=spec_path) elif isinstance(sources, BaseGlobs): return sources elif isinstance(sources, string_types): return Files(sources, spec_path=spec_path) elif isinstance(sources, (MutableSet, MutableSequence, tuple)) and \ all(isinstance(s, string_types) for s in sources): return Files(*sources, spec_path=spec_path) else: raise ValueError('Expected either a glob or list of literal sources: got: {}'.format(sources))
python
{ "resource": "" }
q28002
BaseGlobs.filespecs
train
def filespecs(self): """Return a filespecs dict representing both globs and excludes.""" filespecs = {'globs': self._file_globs} exclude_filespecs = self._exclude_filespecs if exclude_filespecs: filespecs['exclude'] = exclude_filespecs return filespecs
python
{ "resource": "" }
q28003
BaseGlobs.to_path_globs
train
def to_path_globs(self, relpath, conjunction): """Return a PathGlobs representing the included and excluded Files for these patterns.""" return PathGlobs( include=tuple(os.path.join(relpath, glob) for glob in self._file_globs), exclude=tuple(os.path.join(relpath, exclude) for exclude in self._excluded_file_globs), conjunction=conjunction)
python
{ "resource": "" }
q28004
ConsolidateClasspath._consolidate_classpath
train
def _consolidate_classpath(self, targets, classpath_products): """Convert loose directories in classpath_products into jars. """ # TODO: find a way to not process classpath entries for valid VTs. # NB: It is very expensive to call to get entries for each target one at a time. # For performance reasons we look them all up at once. entries_map = defaultdict(list) for (cp, target) in classpath_products.get_product_target_mappings_for_targets(targets, True): entries_map[target].append(cp) with self.invalidated(targets=targets, invalidate_dependents=True) as invalidation: for vt in invalidation.all_vts: entries = entries_map.get(vt.target, []) for index, (conf, entry) in enumerate(entries): if ClasspathUtil.is_dir(entry.path): jarpath = os.path.join(vt.results_dir, 'output-{}.jar'.format(index)) # Regenerate artifact for invalid vts. if not vt.valid: with self.open_jar(jarpath, overwrite=True, compressed=False) as jar: jar.write(entry.path) # Replace directory classpath entry with its jarpath. classpath_products.remove_for_target(vt.target, [(conf, entry.path)]) classpath_products.add_for_target(vt.target, [(conf, jarpath)])
python
{ "resource": "" }
q28005
_convert
train
def _convert(val, acceptable_types): """Ensure that val is one of the acceptable types, converting it if needed. :param val: The value we're parsing (either a string or one of the acceptable types). :param acceptable_types: A tuple of expected types for val. :returns: The parsed value. :raises :class:`pants.options.errors.ParseError`: if there was a problem parsing the val as an acceptable type. """ if isinstance(val, acceptable_types): return val return parse_expression(val, acceptable_types, raise_type=ParseError)
python
{ "resource": "" }
q28006
ListValueComponent.create
train
def create(cls, value): """Interpret value as either a list or something to extend another list with. Note that we accept tuple literals, but the internal value is always a list. :param value: The value to convert. Can be an instance of ListValueComponent, a list, a tuple, a string representation of a list or tuple (possibly prefixed by + or - indicating modification instead of replacement), or any allowed member_type. May also be a comma-separated sequence of modifications. :rtype: `ListValueComponent` """ if isinstance(value, bytes): value = value.decode('utf-8') if isinstance(value, str): comma_separated_exprs = cls._split_modifier_expr(value) if len(comma_separated_exprs) > 1: return cls.merge([cls.create(x) for x in comma_separated_exprs]) action = cls.MODIFY appends = [] filters = [] if isinstance(value, cls): # Ensure idempotency. action = value._action appends = value._appends filters = value._filters elif isinstance(value, (list, tuple)): # Ensure we can handle list-typed default values. action = cls.REPLACE appends = value elif value.startswith('[') or value.startswith('('): action = cls.REPLACE appends = _convert(value, (list, tuple)) elif value.startswith('+[') or value.startswith('+('): appends = _convert(value[1:], (list, tuple)) elif value.startswith('-[') or value.startswith('-('): filters = _convert(value[1:], (list, tuple)) elif isinstance(value, str): appends = [value] else: appends = _convert('[{}]'.format(value), list) return cls(action, list(appends), list(filters))
python
{ "resource": "" }
q28007
DictValueComponent.create
train
def create(cls, value): """Interpret value as either a dict or something to extend another dict with. :param value: The value to convert. Can be an instance of DictValueComponent, a dict, or a string representation (possibly prefixed by +) of a dict. :rtype: `DictValueComponent` """ if isinstance(value, bytes): value = value.decode('utf-8') if isinstance(value, cls): # Ensure idempotency. action = value.action val = value.val elif isinstance(value, dict): # Ensure we can handle dict-typed default values. action = cls.REPLACE val = value elif value.startswith('{'): action = cls.REPLACE val = _convert(value, dict) elif value.startswith('+{'): action = cls.EXTEND val = _convert(value[1:], dict) else: raise ParseError('Invalid dict value: {}'.format(value)) return cls(action, dict(val))
python
{ "resource": "" }
q28008
TemplateData.extend
train
def extend(self, **kwargs): """Returns a new instance with this instance's data overlayed by the key-value args.""" props = self.copy() props.update(kwargs) return TemplateData(**props)
python
{ "resource": "" }
q28009
Scm.github
train
def github(cls, user, repo): """Creates an `Scm` for a github repo. :param string user: The github user or organization name the repo is hosted under. :param string repo: The repository name. :returns: An `Scm` representing the github repo. """ # For the url format, see: http://maven.apache.org/scm/git.html params = dict(user=user, repo=repo) connection = 'scm:git:git@github.com:{user}/{repo}.git'.format(**params) url = 'https://github.com/{user}/{repo}'.format(**params) return cls(connection=connection, developer_connection=connection, url=url)
python
{ "resource": "" }
q28010
Scm.tagged
train
def tagged(self, tag): """Creates a new `Scm` identical to this `Scm` but with the given `tag`.""" return Scm(self.connection, self.developer_connection, self.url, tag=tag)
python
{ "resource": "" }
q28011
Cookies.update
train
def update(self, cookies): """Add specified cookies to our cookie jar, and persists it. :param cookies: Any iterable that yields http.cookiejar.Cookie instances, such as a CookieJar. """ cookie_jar = self.get_cookie_jar() for cookie in cookies: cookie_jar.set_cookie(cookie) with self._lock: cookie_jar.save()
python
{ "resource": "" }
q28012
Cookies.get_cookie_jar
train
def get_cookie_jar(self): """Returns our cookie jar.""" cookie_file = self._get_cookie_file() cookie_jar = LWPCookieJar(cookie_file) if os.path.exists(cookie_file): cookie_jar.load() else: safe_mkdir_for(cookie_file) # Save an empty cookie jar so we can change the file perms on it before writing data to it. with self._lock: cookie_jar.save() os.chmod(cookie_file, 0o600) return cookie_jar
python
{ "resource": "" }
q28013
Cookies._lock
train
def _lock(self): """An identity-keyed inter-process lock around the cookie file.""" lockfile = '{}.lock'.format(self._get_cookie_file()) safe_mkdir_for(lockfile) return OwnerPrintingInterProcessFileLock(lockfile)
python
{ "resource": "" }
q28014
GoTargetGenerator.generate
train
def generate(self, local_go_targets): """Automatically generates a Go target graph for the given local go targets. :param iter local_go_targets: The target roots to fill in a target graph for. :raises: :class:`GoTargetGenerator.GenerationError` if any missing targets cannot be generated. """ visited = {l.import_path: l.address for l in local_go_targets} with temporary_dir() as gopath: for local_go_target in local_go_targets: deps = self._list_deps(gopath, local_go_target.address) self._generate_missing(gopath, local_go_target.address, deps, visited) return list(visited.items())
python
{ "resource": "" }
q28015
GoBuildgen.generate_targets
train
def generate_targets(self, local_go_targets=None): """Generate Go targets in memory to form a complete Go graph. :param local_go_targets: The local Go targets to fill in a complete target graph for. If `None`, then all local Go targets under the Go source root are used. :type local_go_targets: :class:`collections.Iterable` of :class:`pants.contrib.go.targets.go_local_source import GoLocalSource` :returns: A generation result if targets were generated, else `None`. :rtype: :class:`GoBuildgen.GenerationResult` """ # TODO(John Sirois): support multiple source roots like GOPATH does? # The GOPATH's 1st element is read-write, the rest are read-only; ie: their sources build to # the 1st element's pkg/ and bin/ dirs. go_roots_by_category = defaultdict(list) # TODO: Add "find source roots for lang" functionality to SourceRoots and use that instead. for sr in self.context.source_roots.all_roots(): if 'go' in sr.langs: go_roots_by_category[sr.category].append(sr.path) if go_roots_by_category[SourceRootCategories.TEST]: raise self.InvalidLocalRootsError('Go buildgen does not support test source roots.') if go_roots_by_category[SourceRootCategories.UNKNOWN]: raise self.InvalidLocalRootsError('Go buildgen does not support source roots of ' 'unknown category.') local_roots = go_roots_by_category[SourceRootCategories.SOURCE] if not local_roots: raise self.NoLocalRootsError('Can only BUILD gen if a Go local sources source root is ' 'defined.') if len(local_roots) > 1: raise self.InvalidLocalRootsError('Can only BUILD gen for a single Go local sources source ' 'root, found:\n\t{}' .format('\n\t'.join(sorted(local_roots)))) local_root = local_roots.pop() if local_go_targets: unrooted_locals = {t for t in local_go_targets if t.target_base != local_root} if unrooted_locals: raise self.UnrootedLocalSourceError('Cannot BUILD gen until the following targets are ' 'relocated to the source root at {}:\n\t{}' .format(local_root, '\n\t'.join(sorted(t.address.reference() for t in unrooted_locals)))) else: root = os.path.join(get_buildroot(), local_root) local_go_targets = self.context.scan(root=root).targets(self.is_local_src) if not local_go_targets: return None remote_roots = go_roots_by_category[SourceRootCategories.THIRDPARTY] if len(remote_roots) > 1: raise self.InvalidRemoteRootsError('Can only BUILD gen for a single Go remote library source ' 'root, found:\n\t{}' .format('\n\t'.join(sorted(remote_roots)))) remote_root = remote_roots.pop() if remote_roots else None generator = GoTargetGenerator(self.import_oracle, self.context.build_graph, local_root, self.get_fetcher_factory(), generate_remotes=self.get_options().remote, remote_root=remote_root) with self.context.new_workunit('go.buildgen', labels=[WorkUnitLabel.MULTITOOL]): try: generated = generator.generate(local_go_targets) return self.GenerationResult(generated=generated, local_root=local_root, remote_root=remote_root) except generator.GenerationError as e: raise self.GenerationError(e)
python
{ "resource": "" }
q28016
SingleAddress.address_target_pairs_from_address_families
train
def address_target_pairs_from_address_families(self, address_families): """Return the pair for the single target matching the single AddressFamily, or error. :raises: :class:`SingleAddress._SingleAddressResolutionError` if no targets could be found for a :class:`SingleAddress` instance. :return: list of (Address, Target) pairs with exactly one element. """ single_af = assert_single_element(address_families) addr_tgt_pairs = [ (addr, tgt) for addr, tgt in single_af.addressables.items() if addr.target_name == self.name ] if len(addr_tgt_pairs) == 0: raise self._SingleAddressResolutionError(single_af, self.name) # There will be at most one target with a given name in a single AddressFamily. assert(len(addr_tgt_pairs) == 1) return addr_tgt_pairs
python
{ "resource": "" }
q28017
Bootstrapper.ivy
train
def ivy(self, bootstrap_workunit_factory=None): """Returns an ivy instance bootstrapped by this bootstrapper. :param bootstrap_workunit_factory: the optional workunit to bootstrap under. :raises: Bootstrapper.Error if ivy could not be bootstrapped """ return Ivy(self._get_classpath(bootstrap_workunit_factory), ivy_settings=self._ivy_subsystem.get_options().ivy_settings, ivy_resolution_cache_dir=self._ivy_subsystem.resolution_cache_dir(), extra_jvm_options=self._ivy_subsystem.extra_jvm_options())
python
{ "resource": "" }
q28018
Bootstrapper._get_classpath
train
def _get_classpath(self, workunit_factory): """Returns the bootstrapped ivy classpath as a list of jar paths. :raises: Bootstrapper.Error if the classpath could not be bootstrapped """ if not self._classpath: self._classpath = self._bootstrap_ivy_classpath(workunit_factory) return self._classpath
python
{ "resource": "" }
q28019
Target.maybe_readable_combine_ids
train
def maybe_readable_combine_ids(cls, ids): """Generates combined id for a set of ids, but if the set is a single id, just use that. :API: public """ ids = list(ids) # We can't len a generator. return ids[0] if len(ids) == 1 else cls.combine_ids(ids)
python
{ "resource": "" }
q28020
Target.closure_for_targets
train
def closure_for_targets(cls, target_roots, exclude_scopes=None, include_scopes=None, bfs=None, postorder=None, respect_intransitive=False): """Computes the closure of the given targets respecting the given input scopes. :API: public :param list target_roots: The list of Targets to start from. These targets will always be included in the closure, regardless of scope settings. :param Scope exclude_scopes: If present and non-empty, only dependencies which have none of the scope names in this Scope will be traversed. :param Scope include_scopes: If present and non-empty, only dependencies which have at least one of the scope names in this Scope will be traversed. :param bool bfs: Whether to traverse in breadth-first or depth-first order. (Defaults to True). :param bool respect_intransitive: If True, any dependencies which have the 'intransitive' scope will not be included unless they are direct dependencies of one of the root targets. (Defaults to False). """ target_roots = list(target_roots) # Sometimes generators are passed into this function. if not target_roots: return OrderedSet() build_graph = target_roots[0]._build_graph addresses = [target.address for target in target_roots] dep_predicate = cls._closure_dep_predicate(target_roots, include_scopes=include_scopes, exclude_scopes=exclude_scopes, respect_intransitive=respect_intransitive) closure = OrderedSet() if not bfs: build_graph.walk_transitive_dependency_graph( addresses=addresses, work=closure.add, postorder=postorder, dep_predicate=dep_predicate, ) else: closure.update(build_graph.transitive_subgraph_of_addresses_bfs( addresses=addresses, dep_predicate=dep_predicate, )) # Make sure all the roots made it into the closure. closure.update(target_roots) return closure
python
{ "resource": "" }
q28021
Target.mark_invalidation_hash_dirty
train
def mark_invalidation_hash_dirty(self): """Invalidates memoized fingerprints for this target, including those in payloads. Exposed for testing. :API: public """ self._cached_fingerprint_map = {} self._cached_all_transitive_fingerprint_map = {} self._cached_direct_transitive_fingerprint_map = {} self._cached_strict_dependencies_map = {} self._cached_exports_addresses = None self.mark_extra_invalidation_hash_dirty() self.payload.mark_dirty()
python
{ "resource": "" }
q28022
Target.has_sources
train
def has_sources(self, extension=None): """Return `True` if this target owns sources; optionally of the given `extension`. :API: public :param string extension: Optional suffix of filenames to test for. :return: `True` if the target contains sources that match the optional extension suffix. :rtype: bool """ source_paths = self._sources_field.source_paths if not source_paths: return False if not extension: return True return any(source.endswith(extension) for source in source_paths)
python
{ "resource": "" }
q28023
Target.derived_from_chain
train
def derived_from_chain(self): """Returns all targets that this target was derived from. If this target was not derived from another, returns an empty sequence. :API: public """ cur = self while cur.derived_from is not cur: cur = cur.derived_from yield cur
python
{ "resource": "" }
q28024
Target.walk
train
def walk(self, work, predicate=None): """Walk of this target's dependency graph, DFS preorder traversal, visiting each node exactly once. If a predicate is supplied it will be used to test each target before handing the target to work and descending. Work can return targets in which case these will be added to the walk candidate set if not already walked. :API: public :param work: Callable that takes a :py:class:`pants.build_graph.target.Target` as its single argument. :param predicate: Callable that takes a :py:class:`pants.build_graph.target.Target` as its single argument and returns True if the target should passed to ``work``. """ if not callable(work): raise ValueError('work must be callable but was {}'.format(work)) if predicate and not callable(predicate): raise ValueError('predicate must be callable but was {}'.format(predicate)) self._build_graph.walk_transitive_dependency_graph([self.address], work, predicate)
python
{ "resource": "" }
q28025
Target.create_sources_field
train
def create_sources_field(self, sources, sources_rel_path, key_arg=None): """Factory method to create a SourcesField appropriate for the type of the sources object. Note that this method is called before the call to Target.__init__ so don't expect fields to be populated! :API: public :return: a payload field object representing the sources parameter :rtype: SourcesField """ if not sources: sources = FilesetWithSpec.empty(sources_rel_path) elif not isinstance(sources, FilesetWithSpec): key_arg_section = "'{}' to be ".format(key_arg) if key_arg else "" raise TargetDefinitionException(self, "Expected {}a glob, an address or a list, but was {}" .format(key_arg_section, type(sources))) return SourcesField(sources=sources)
python
{ "resource": "" }
q28026
SourceRootFactory.create
train
def create(self, relpath, langs, category): """Return a source root at the given `relpath` for the given `langs` and `category`. :returns: :class:`SourceRoot`. """ return SourceRoot(relpath, tuple(self._canonicalize_langs(langs)), category)
python
{ "resource": "" }
q28027
SourceRoots.add_source_root
train
def add_source_root(self, path, langs=tuple(), category=SourceRootCategories.UNKNOWN): """Add the specified fixed source root, which must be relative to the buildroot. Useful in a limited set of circumstances, e.g., when unpacking sources from a jar with unknown structure. Tests should prefer to use dirs that match our source root patterns instead of explicitly setting source roots here. """ self._trie.add_fixed(path, langs, category)
python
{ "resource": "" }
q28028
SourceRoots.find_by_path
train
def find_by_path(self, path): """Find the source root for the given path, or None. :param path: Find the source root for this path, relative to the buildroot. :return: A SourceRoot instance, or None if the path is not located under a source root and `unmatched==fail`. """ matched = self._trie.find(path) if matched: return matched elif self._options.unmatched == 'fail': return None elif self._options.unmatched == 'create': # If no source root is found, use the path directly. # TODO: Remove this logic. It should be an error to have no matching source root. return SourceRoot(path, [], SourceRootCategories.UNKNOWN)
python
{ "resource": "" }
q28029
SourceRoots.all_roots
train
def all_roots(self): """Return all known source roots. Returns a generator over (source root, list of langs, category) triples. Note: Requires a directory walk to match actual directories against patterns. However we don't descend into source roots, once found, so this should be fast in practice. Note: Does not follow symlinks. """ project_tree = get_project_tree(self._options) fixed_roots = set() for root, langs, category in self._trie.fixed(): if project_tree.exists(root): yield self._source_root_factory.create(root, langs, category) fixed_roots.add(root) for relpath, dirnames, _ in project_tree.walk('', topdown=True): match = self._trie.find(relpath) if match: if not any(fixed_root.startswith(relpath) for fixed_root in fixed_roots): yield match # Found a source root not a prefix of any fixed roots. del dirnames[:]
python
{ "resource": "" }
q28030
SourceRootConfig.create_trie
train
def create_trie(self): """Create a trie of source root patterns from options. :returns: :class:`SourceRootTrie` """ trie = SourceRootTrie(self.source_root_factory) options = self.get_options() for category in SourceRootCategories.ALL: # Add patterns. for pattern in options.get('{}_root_patterns'.format(category), []): trie.add_pattern(pattern, category) # Add fixed source roots. for path, langs in options.get('{}_roots'.format(category), {}).items(): trie.add_fixed(path, langs, category) return trie
python
{ "resource": "" }
q28031
SourceRootTrie.add_pattern
train
def add_pattern(self, pattern, category=SourceRootCategories.UNKNOWN): """Add a pattern to the trie.""" self._do_add_pattern(pattern, tuple(), category)
python
{ "resource": "" }
q28032
SourceRootTrie.add_fixed
train
def add_fixed(self, path, langs, category=SourceRootCategories.UNKNOWN): """Add a fixed source root to the trie.""" if '*' in path: raise self.InvalidPath(path, 'fixed path cannot contain the * character') fixed_path = os.path.join('^', path) if path else '^' self._do_add_pattern(fixed_path, tuple(langs), category)
python
{ "resource": "" }
q28033
SourceRootTrie.fixed
train
def fixed(self): """Returns a list of just the fixed source roots in the trie.""" for key, child in self._root.children.items(): if key == '^': return list(child.subpatterns()) return []
python
{ "resource": "" }
q28034
SourceRootTrie.find
train
def find(self, path): """Find the source root for the given path.""" keys = ['^'] + path.split(os.path.sep) for i in range(len(keys)): # See if we have a match at position i. We have such a match if following the path # segments into the trie, from the root, leads us to a terminal. node = self._root langs = set() j = i while j < len(keys): child = node.get_child(keys[j], langs) if child is None: break else: node = child j += 1 if node.is_terminal: if j == 1: # The match was on the root itself. path = '' else: path = os.path.join(*keys[1:j]) return self._source_root_factory.create(path, langs, node.category) # Otherwise, try the next value of i. return None
python
{ "resource": "" }
q28035
BinaryToolBase.version
train
def version(self, context=None): """Returns the version of the specified binary tool. If replaces_scope and replaces_name are defined, then the caller must pass in a context, otherwise no context should be passed. # TODO: Once we're migrated, get rid of the context arg. :API: public """ if self.replaces_scope and self.replaces_name: if context: # If the old option is provided explicitly, let it take precedence. old_opts = context.options.for_scope(self.replaces_scope) if old_opts.get(self.replaces_name) and not old_opts.is_default(self.replaces_name): return old_opts.get(self.replaces_name) else: logger.warn('Cannot resolve version of {} from deprecated option {} in scope {} without a ' 'context!'.format(self._get_name(), self.replaces_name, self.replaces_scope)) return self.get_options().version
python
{ "resource": "" }
q28036
Reporting.initialize
train
def initialize(self, run_tracker, all_options, start_time=None): """Initialize with the given RunTracker. TODO: See `RunTracker.start`. """ run_id, run_uuid = run_tracker.initialize(all_options) run_dir = os.path.join(self.get_options().reports_dir, run_id) html_dir = os.path.join(run_dir, 'html') safe_mkdir(html_dir) relative_symlink(run_dir, os.path.join(self.get_options().reports_dir, 'latest')) report = Report() # Capture initial console reporting into a buffer. We'll do something with it once # we know what the cmd-line flag settings are. outfile = BytesIO() errfile = BytesIO() capturing_reporter_settings = PlainTextReporter.Settings( outfile=outfile, errfile=errfile, log_level=Report.INFO, color=False, indent=True, timing=False, cache_stats=False, label_format=self.get_options().console_label_format, tool_output_format=self.get_options().console_tool_output_format) capturing_reporter = PlainTextReporter(run_tracker, capturing_reporter_settings) report.add_reporter('capturing', capturing_reporter) # Set up HTML reporting. We always want that. html_reporter_settings = HtmlReporter.Settings(log_level=Report.INFO, html_dir=html_dir, template_dir=self.get_options().template_dir) html_reporter = HtmlReporter(run_tracker, html_reporter_settings) report.add_reporter('html', html_reporter) # Set up Zipkin reporting. zipkin_endpoint = self.get_options().zipkin_endpoint trace_id = self.get_options().zipkin_trace_id parent_id = self.get_options().zipkin_parent_id sample_rate = self.get_options().zipkin_sample_rate if zipkin_endpoint is None and trace_id is not None and parent_id is not None: raise ValueError( "The zipkin-endpoint flag must be set if zipkin-trace-id and zipkin-parent-id flags are given." ) if (trace_id is None) != (parent_id is None): raise ValueError( "Flags zipkin-trace-id and zipkin-parent-id must both either be set or not set." ) # If trace_id isn't set by a flag, use UUID from run_id if trace_id is None: trace_id = run_uuid if trace_id and (len(trace_id) != 16 and len(trace_id) != 32 or not is_hex_string(trace_id)): raise ValueError( "Value of the flag zipkin-trace-id must be a 16-character or 32-character hex string. " + "Got {}.".format(trace_id) ) if parent_id and (len(parent_id) != 16 or not is_hex_string(parent_id)): raise ValueError( "Value of the flag zipkin-parent-id must be a 16-character hex string. " + "Got {}.".format(parent_id) ) if zipkin_endpoint is not None: zipkin_reporter_settings = ZipkinReporter.Settings(log_level=Report.INFO) zipkin_reporter = ZipkinReporter( run_tracker, zipkin_reporter_settings, zipkin_endpoint, trace_id, parent_id, sample_rate ) report.add_reporter('zipkin', zipkin_reporter) # Add some useful RunInfo. run_tracker.run_info.add_info('default_report', html_reporter.report_path()) port = ReportingServerManager().socket if port: run_tracker.run_info.add_info('report_url', 'http://localhost:{}/run/{}'.format(port, run_id)) # And start tracking the run. run_tracker.start(report, start_time)
python
{ "resource": "" }
q28037
Reporting.update_reporting
train
def update_reporting(self, global_options, is_quiet, run_tracker): """Updates reporting config once we've parsed cmd-line flags.""" # Get any output silently buffered in the old console reporter, and remove it. removed_reporter = run_tracker.report.remove_reporter('capturing') buffered_out = self._consume_stringio(removed_reporter.settings.outfile) buffered_err = self._consume_stringio(removed_reporter.settings.errfile) log_level = Report.log_level_from_string(global_options.level or 'info') # Ideally, we'd use terminfo or somesuch to discover whether a # terminal truly supports color, but most that don't set TERM=dumb. color = global_options.colors and (os.getenv('TERM') != 'dumb') timing = global_options.time cache_stats = global_options.time # TODO: Separate flag for this? if is_quiet: console_reporter = QuietReporter(run_tracker, QuietReporter.Settings(log_level=log_level, color=color, timing=timing, cache_stats=cache_stats)) else: # Set up the new console reporter. stdout = sys.stdout.buffer if PY3 else sys.stdout stderr = sys.stderr.buffer if PY3 else sys.stderr settings = PlainTextReporter.Settings(log_level=log_level, outfile=stdout, errfile=stderr, color=color, indent=True, timing=timing, cache_stats=cache_stats, label_format=self.get_options().console_label_format, tool_output_format=self.get_options().console_tool_output_format) console_reporter = PlainTextReporter(run_tracker, settings) console_reporter.emit(buffered_out, dest=ReporterDestination.OUT) console_reporter.emit(buffered_err, dest=ReporterDestination.ERR) console_reporter.flush() run_tracker.report.add_reporter('console', console_reporter) if global_options.logdir: # Also write plaintext logs to a file. This is completely separate from the html reports. safe_mkdir(global_options.logdir) run_id = run_tracker.run_info.get_info('id') outfile = open(os.path.join(global_options.logdir, '{}.log'.format(run_id)), 'wb') errfile = open(os.path.join(global_options.logdir, '{}.err.log'.format(run_id)), 'wb') settings = PlainTextReporter.Settings(log_level=log_level, outfile=outfile, errfile=errfile, color=False, indent=True, timing=True, cache_stats=True, label_format=self.get_options().console_label_format, tool_output_format=self.get_options().console_tool_output_format) logfile_reporter = PlainTextReporter(run_tracker, settings) logfile_reporter.emit(buffered_out, dest=ReporterDestination.OUT) logfile_reporter.emit(buffered_err, dest=ReporterDestination.ERR) logfile_reporter.flush() run_tracker.report.add_reporter('logfile', logfile_reporter) invalidation_report = self._get_invalidation_report() if invalidation_report: run_id = run_tracker.run_info.get_info('id') outfile = os.path.join(self.get_options().reports_dir, run_id, 'invalidation-report.csv') invalidation_report.set_filename(outfile) return invalidation_report
python
{ "resource": "" }
q28038
Pinger.ping
train
def ping(self, url): """Time a single roundtrip to the url. :param url to ping. :returns: the fastest ping time for a given netloc and number of tries. or Pinger.UNREACHABLE if ping times out. :rtype: float Note that we don't use actual ICMP pings, because cmd-line ping is inflexible and platform-dependent, so shelling out to it is annoying, and the ICMP python lib can only be called by the superuser. """ return self._get_ping_time(url, self._timeout, self._tries)
python
{ "resource": "" }
q28039
BestUrlSelector.select_best_url
train
def select_best_url(self): """Select `best` url. Since urls are pre-sorted w.r.t. their ping times, we simply return the first element from the list. And we always return the same url unless we observe greater than max allowed number of consecutive failures. In this case, we would return the next `best` url, and append the previous best one to the end of list (essentially rotate to the left by one element). """ best_url = self.parsed_urls[0] try: yield best_url except Exception: self.unsuccessful_calls[best_url] += 1 # Not thread-safe but pool used by cache is based on subprocesses, therefore no race. if self.unsuccessful_calls[best_url] > self.max_failures: self.parsed_urls.rotate(-1) self.unsuccessful_calls[best_url] = 0 raise else: self.unsuccessful_calls[best_url] = 0
python
{ "resource": "" }
q28040
ExceptionSink.reset_log_location
train
def reset_log_location(cls, new_log_location): """Re-acquire file handles to error logs based in the new location. Class state: - Overwrites `cls._log_dir`, `cls._pid_specific_error_fileobj`, and `cls._shared_error_fileobj`. OS state: - May create a new directory. - Overwrites signal handlers for many fatal and non-fatal signals (but not SIGUSR2). :raises: :class:`ExceptionSink.ExceptionSinkError` if the directory does not exist or is not writable. """ # We could no-op here if the log locations are the same, but there's no reason not to have the # additional safety of re-acquiring file descriptors each time (and erroring out early if the # location is no longer writable). # Create the directory if possible, or raise if not writable. cls._check_or_create_new_destination(new_log_location) pid_specific_error_stream, shared_error_stream = cls._recapture_fatal_error_log_streams( new_log_location) # NB: mutate process-global state! if faulthandler.is_enabled(): logger.debug('re-enabling faulthandler') # Call Py_CLEAR() on the previous error stream: # https://github.com/vstinner/faulthandler/blob/master/faulthandler.c faulthandler.disable() # Send a stacktrace to this file if interrupted by a fatal error. faulthandler.enable(file=pid_specific_error_stream, all_threads=True) # NB: mutate the class variables! cls._log_dir = new_log_location cls._pid_specific_error_fileobj = pid_specific_error_stream cls._shared_error_fileobj = shared_error_stream
python
{ "resource": "" }
q28041
ExceptionSink.exceptions_log_path
train
def exceptions_log_path(cls, for_pid=None, in_dir=None): """Get the path to either the shared or pid-specific fatal errors log file.""" if for_pid is None: intermediate_filename_component = '' else: assert(isinstance(for_pid, IntegerForPid)) intermediate_filename_component = '.{}'.format(for_pid) in_dir = in_dir or cls._log_dir return os.path.join( in_dir, 'logs', 'exceptions{}.log'.format(intermediate_filename_component))
python
{ "resource": "" }
q28042
ExceptionSink.log_exception
train
def log_exception(cls, msg): """Try to log an error message to this process's error log and the shared error log. NB: Doesn't raise (logs an error instead). """ pid = os.getpid() fatal_error_log_entry = cls._format_exception_message(msg, pid) # We care more about this log than the shared log, so write to it first. try: cls._try_write_with_flush(cls._pid_specific_error_fileobj, fatal_error_log_entry) except Exception as e: logger.error( "Error logging the message '{}' to the pid-specific file handle for {} at pid {}:\n{}" .format(msg, cls._log_dir, pid, e)) # Write to the shared log. try: # TODO: we should probably guard this against concurrent modification by other pants # subprocesses somehow. cls._try_write_with_flush(cls._shared_error_fileobj, fatal_error_log_entry) except Exception as e: logger.error( "Error logging the message '{}' to the shared file handle for {} at pid {}:\n{}" .format(msg, cls._log_dir, pid, e))
python
{ "resource": "" }
q28043
ExceptionSink.trapped_signals
train
def trapped_signals(cls, new_signal_handler): """A contextmanager which temporarily overrides signal handling.""" try: previous_signal_handler = cls.reset_signal_handler(new_signal_handler) yield finally: cls.reset_signal_handler(previous_signal_handler)
python
{ "resource": "" }
q28044
ExceptionSink._log_unhandled_exception_and_exit
train
def _log_unhandled_exception_and_exit(cls, exc_class=None, exc=None, tb=None, add_newline=False): """A sys.excepthook implementation which logs the error and exits with failure.""" exc_class = exc_class or sys.exc_info()[0] exc = exc or sys.exc_info()[1] tb = tb or sys.exc_info()[2] # This exception was raised by a signal handler with the intent to exit the program. if exc_class == SignalHandler.SignalHandledNonLocalExit: return cls._handle_signal_gracefully(exc.signum, exc.signame, exc.traceback_lines) extra_err_msg = None try: # Always output the unhandled exception details into a log file, including the traceback. exception_log_entry = cls._format_unhandled_exception_log(exc, tb, add_newline, should_print_backtrace=True) cls.log_exception(exception_log_entry) except Exception as e: extra_err_msg = 'Additional error logging unhandled exception {}: {}'.format(exc, e) logger.error(extra_err_msg) # Generate an unhandled exception report fit to be printed to the terminal (respecting the # Exiter's should_print_backtrace field). stderr_printed_error = cls._format_unhandled_exception_log( exc, tb, add_newline, should_print_backtrace=cls._should_print_backtrace_to_terminal) if extra_err_msg: stderr_printed_error = '{}\n{}'.format(stderr_printed_error, extra_err_msg) cls._exit_with_failure(stderr_printed_error)
python
{ "resource": "" }
q28045
ExceptionSink._handle_signal_gracefully
train
def _handle_signal_gracefully(cls, signum, signame, traceback_lines): """Signal handler for non-fatal signals which raises or logs an error and exits with failure.""" # Extract the stack, and format an entry to be written to the exception log. formatted_traceback = cls._format_traceback(traceback_lines=traceback_lines, should_print_backtrace=True) signal_error_log_entry = cls._CATCHABLE_SIGNAL_ERROR_LOG_FORMAT.format( signum=signum, signame=signame, formatted_traceback=formatted_traceback) # TODO: determine the appropriate signal-safe behavior here (to avoid writing to our file # descriptors re-entrantly, which raises an IOError). # This method catches any exceptions raised within it. cls.log_exception(signal_error_log_entry) # Create a potentially-abbreviated traceback for the terminal or other interactive stream. formatted_traceback_for_terminal = cls._format_traceback( traceback_lines=traceback_lines, should_print_backtrace=cls._should_print_backtrace_to_terminal) terminal_log_entry = cls._CATCHABLE_SIGNAL_ERROR_LOG_FORMAT.format( signum=signum, signame=signame, formatted_traceback=formatted_traceback_for_terminal) # Exit, printing the output to the terminal. cls._exit_with_failure(terminal_log_entry)
python
{ "resource": "" }
q28046
DaemonExiter.exit
train
def exit(self, result=0, msg=None, *args, **kwargs): """Exit the runtime.""" if self._finalizer: try: self._finalizer() except Exception as e: try: NailgunProtocol.send_stderr( self._socket, '\nUnexpected exception in finalizer: {!r}\n'.format(e) ) except Exception: pass try: # Write a final message to stderr if present. if msg: NailgunProtocol.send_stderr(self._socket, msg) # Send an Exit chunk with the result. NailgunProtocol.send_exit_with_code(self._socket, result) # Shutdown the connected socket. teardown_socket(self._socket) finally: super(DaemonExiter, self).exit(result=result, *args, **kwargs)
python
{ "resource": "" }
q28047
DaemonPantsRunner._tty_stdio
train
def _tty_stdio(cls, env): """Handles stdio redirection in the case of all stdio descriptors being the same tty.""" # If all stdio is a tty, there's only one logical I/O device (the tty device). This happens to # be addressable as a file in OSX and Linux, so we take advantage of that and directly open the # character device for output redirection - eliminating the need to directly marshall any # interactive stdio back/forth across the socket and permitting full, correct tty control with # no middle-man. stdin_ttyname, stdout_ttyname, stderr_ttyname = NailgunProtocol.ttynames_from_env(env) assert stdin_ttyname == stdout_ttyname == stderr_ttyname, ( 'expected all stdio ttys to be the same, but instead got: {}\n' 'please file a bug at http://github.com/pantsbuild/pants' .format([stdin_ttyname, stdout_ttyname, stderr_ttyname]) ) with open(stdin_ttyname, 'rb+', 0) as tty: tty_fileno = tty.fileno() with stdio_as(stdin_fd=tty_fileno, stdout_fd=tty_fileno, stderr_fd=tty_fileno): def finalizer(): termios.tcdrain(tty_fileno) yield finalizer
python
{ "resource": "" }
q28048
DaemonPantsRunner.nailgunned_stdio
train
def nailgunned_stdio(cls, sock, env, handle_stdin=True): """Redirects stdio to the connected socket speaking the nailgun protocol.""" # Determine output tty capabilities from the environment. stdin_isatty, stdout_isatty, stderr_isatty = NailgunProtocol.isatty_from_env(env) is_tty_capable = all((stdin_isatty, stdout_isatty, stderr_isatty)) if is_tty_capable: with cls._tty_stdio(env) as finalizer: yield finalizer else: with cls._pipe_stdio( sock, stdin_isatty, stdout_isatty, stderr_isatty, handle_stdin ) as finalizer: yield finalizer
python
{ "resource": "" }
q28049
DaemonPantsRunner._raise_deferred_exc
train
def _raise_deferred_exc(self): """Raises deferred exceptions from the daemon's synchronous path in the post-fork client.""" if self._deferred_exception: try: exc_type, exc_value, exc_traceback = self._deferred_exception raise_with_traceback(exc_value, exc_traceback) except TypeError: # If `_deferred_exception` isn't a 3-item tuple (raising a TypeError on the above # destructuring), treat it like a bare exception. raise self._deferred_exception
python
{ "resource": "" }
q28050
NpmResolver._scoped_package_name
train
def _scoped_package_name(node_task, package_name, node_scope): """Apply a node_scope to the package name. Overrides any existing package_name if already in a scope :return: A package_name with prepended with a node scope via '@' """ if not node_scope: return package_name scoped_package_name = package_name chunk = package_name.split('/', 1) if len(chunk) > 1 and chunk[0].startswith('@'): scoped_package_name = os.path.join('@{}'.format(node_scope), chunk[1:]) else: scoped_package_name = os.path.join('@{}'.format(node_scope), package_name) node_task.context.log.debug( 'Node package "{}" will be resolved with scope "{}".'.format(package_name, scoped_package_name)) return scoped_package_name
python
{ "resource": "" }
q28051
JvmCompile.compile
train
def compile(self, ctx, args, dependency_classpath, upstream_analysis, settings, compiler_option_sets, zinc_file_manager, javac_plugin_map, scalac_plugin_map): """Invoke the compiler. Subclasses must implement. Must raise TaskError on compile failure. :param CompileContext ctx: A CompileContext for the target to compile. :param list args: Arguments to the compiler (such as javac or zinc). :param list dependency_classpath: List of classpath entries of type ClasspathEntry for dependencies. :param upstream_analysis: A map from classpath entry to analysis file for dependencies. :param JvmPlatformSettings settings: platform settings determining the -source, -target, etc for javac to use. :param list compiler_option_sets: The compiler_option_sets flags for the target. :param zinc_file_manager: whether to use zinc provided file manager. :param javac_plugin_map: Map of names of javac plugins to use to their arguments. :param scalac_plugin_map: Map of names of scalac plugins to use to their arguments. """ raise NotImplementedError()
python
{ "resource": "" }
q28052
JvmCompile.do_compile
train
def do_compile(self, invalidation_check, compile_contexts, classpath_product): """Executes compilations for the invalid targets contained in a single chunk.""" invalid_targets = [vt.target for vt in invalidation_check.invalid_vts] valid_targets = [vt.target for vt in invalidation_check.all_vts if vt.valid] if self.execution_strategy == self.HERMETIC: self._set_directory_digests_for_valid_target_classpath_directories(valid_targets, compile_contexts) for valid_target in valid_targets: cc = self.select_runtime_context(compile_contexts[valid_target]) classpath_product.add_for_target( valid_target, [(conf, self._classpath_for_context(cc)) for conf in self._confs], ) self.register_extra_products_from_contexts(valid_targets, compile_contexts) if not invalid_targets: return # This ensures the workunit for the worker pool is set before attempting to compile. with self.context.new_workunit('isolation-{}-pool-bootstrap'.format(self.name())) \ as workunit: # This uses workunit.parent as the WorkerPool's parent so that child workunits # of different pools will show up in order in the html output. This way the current running # workunit is on the bottom of the page rather than possibly in the middle. worker_pool = WorkerPool(workunit.parent, self.context.run_tracker, self._worker_count) # Prepare the output directory for each invalid target, and confirm that analysis is valid. for target in invalid_targets: cc = self.select_runtime_context(compile_contexts[target]) safe_mkdir(cc.classes_dir.path) # Now create compile jobs for each invalid target one by one, using the classpath # generated by upstream JVM tasks and our own prepare_compile(). jobs = self._create_compile_jobs(compile_contexts, invalid_targets, invalidation_check.invalid_vts, classpath_product) exec_graph = ExecutionGraph(jobs, self.get_options().print_exception_stacktrace) try: exec_graph.execute(worker_pool, self.context.log) except ExecutionFailure as e: raise TaskError("Compilation failure: {}".format(e))
python
{ "resource": "" }
q28053
JvmCompile._compile_vts
train
def _compile_vts(self, vts, ctx, upstream_analysis, dependency_classpath, progress_message, settings, compiler_option_sets, zinc_file_manager, counter): """Compiles sources for the given vts into the given output dir. :param vts: VersionedTargetSet with one entry for the target. :param ctx: - A CompileContext instance for the target. :param dependency_classpath: A list of classpath entries of type ClasspathEntry for dependencies May be invoked concurrently on independent target sets. Postcondition: The individual targets in vts are up-to-date, as if each were compiled individually. """ if not ctx.sources: self.context.log.warn('Skipping {} compile for targets with no sources:\n {}' .format(self.name(), vts.targets)) else: counter_val = str(counter()).rjust(counter.format_length(), ' ') counter_str = '[{}/{}] '.format(counter_val, counter.size) # Do some reporting. self.context.log.info( counter_str, 'Compiling ', items_to_report_element(ctx.sources, '{} source'.format(self.name())), ' in ', items_to_report_element([t.address.reference() for t in vts.targets], 'target'), ' (', progress_message, ').') with self.context.new_workunit('compile', labels=[WorkUnitLabel.COMPILER]) as compile_workunit: try: directory_digest = self.compile( ctx, self._args, dependency_classpath, upstream_analysis, settings, compiler_option_sets, zinc_file_manager, self._get_plugin_map('javac', Java.global_instance(), ctx.target), self._get_plugin_map('scalac', ScalaPlatform.global_instance(), ctx.target), ) self._capture_logs(compile_workunit, ctx.log_dir) return directory_digest except TaskError: if self.get_options().suggest_missing_deps: logs = [path for _, name, _, path in self._find_logs(compile_workunit) if name == self.name()] if logs: self._find_missing_deps(logs, ctx.target) raise
python
{ "resource": "" }
q28054
JvmCompile._get_plugin_map
train
def _get_plugin_map(self, compiler, options_src, target): """Returns a map of plugin to args, for the given compiler. Only plugins that must actually be activated will be present as keys in the map. Plugins with no arguments will have an empty list as a value. Active plugins and their args will be gathered from (in order of precedence): - The <compiler>_plugins and <compiler>_plugin_args fields of the target, if it has them. - The <compiler>_plugins and <compiler>_plugin_args options of this task, if it has them. - The <compiler>_plugins and <compiler>_plugin_args fields of this task, if it has them. Note that in-repo plugins will not be returned, even if requested, when building themselves. Use published versions of those plugins for that. See: - examples/src/java/org/pantsbuild/example/javac/plugin/README.md. - examples/src/scala/org/pantsbuild/example/scalac/plugin/README.md :param compiler: one of 'javac', 'scalac'. :param options_src: A JvmToolMixin instance providing plugin options. :param target: The target whose plugins we compute. """ # Note that we get() options and getattr() target fields and task methods, # so we're robust when those don't exist (or are None). plugins_key = '{}_plugins'.format(compiler) requested_plugins = ( tuple(getattr(self, plugins_key, []) or []) + tuple(options_src.get_options().get(plugins_key, []) or []) + tuple((getattr(target, plugins_key, []) or [])) ) # Allow multiple flags and also comma-separated values in a single flag. requested_plugins = {p for val in requested_plugins for p in val.split(',')} plugin_args_key = '{}_plugin_args'.format(compiler) available_plugin_args = {} available_plugin_args.update(getattr(self, plugin_args_key, {}) or {}) available_plugin_args.update(options_src.get_options().get(plugin_args_key, {}) or {}) available_plugin_args.update(getattr(target, plugin_args_key, {}) or {}) # From all available args, pluck just the ones for the selected plugins. plugin_map = {} for plugin in requested_plugins: # Don't attempt to use a plugin while building that plugin. # This avoids a bootstrapping problem. Note that you can still # use published plugins on themselves, just not in-repo plugins. if target not in self._plugin_targets(compiler).get(plugin, {}): plugin_map[plugin] = available_plugin_args.get(plugin, []) return plugin_map
python
{ "resource": "" }
q28055
JvmCompile._find_logs
train
def _find_logs(self, compile_workunit): """Finds all logs under the given workunit.""" for idx, workunit in enumerate(compile_workunit.children): for output_name, outpath in workunit.output_paths().items(): if output_name in ('stdout', 'stderr'): yield idx, workunit.name, output_name, outpath
python
{ "resource": "" }
q28056
JvmCompile._upstream_analysis
train
def _upstream_analysis(self, compile_contexts, classpath_entries): """Returns tuples of classes_dir->analysis_file for the closure of the target.""" # Reorganize the compile_contexts by class directory. compile_contexts_by_directory = {} for compile_context in compile_contexts.values(): compile_context = self.select_runtime_context(compile_context) compile_contexts_by_directory[compile_context.classes_dir.path] = compile_context # If we have a compile context for the target, include it. for entry in classpath_entries: path = entry.path if not path.endswith('.jar'): compile_context = compile_contexts_by_directory.get(path) if not compile_context: self.context.log.debug('Missing upstream analysis for {}'.format(path)) else: yield compile_context.classes_dir.path, compile_context.analysis_file
python
{ "resource": "" }
q28057
JvmCompile.should_compile_incrementally
train
def should_compile_incrementally(self, vts, ctx): """Check to see if the compile should try to re-use the existing analysis. Returns true if we should try to compile the target incrementally. """ if not vts.is_incremental: return False if not self._clear_invalid_analysis: return True return os.path.exists(ctx.analysis_file)
python
{ "resource": "" }
q28058
JvmCompile._create_context_jar
train
def _create_context_jar(self, compile_context): """Jar up the compile_context to its output jar location. TODO(stuhood): In the medium term, we hope to add compiler support for this step, which would allow the jars to be used as compile _inputs_ as well. Currently using jar'd compile outputs as compile inputs would make the compiler's analysis useless. see https://github.com/twitter-forks/sbt/tree/stuhood/output-jars """ root = compile_context.classes_dir.path with compile_context.open_jar(mode='w') as jar: for abs_sub_dir, dirnames, filenames in safe_walk(root): for name in dirnames + filenames: abs_filename = os.path.join(abs_sub_dir, name) arcname = fast_relpath(abs_filename, root) jar.write(abs_filename, arcname)
python
{ "resource": "" }
q28059
JvmCompile._extra_compile_time_classpath
train
def _extra_compile_time_classpath(self): """Compute any extra compile-time-only classpath elements.""" def extra_compile_classpath_iter(): for conf in self._confs: for jar in self.extra_compile_time_classpath_elements(): yield (conf, jar) return list(extra_compile_classpath_iter())
python
{ "resource": "" }
q28060
JvmCompile._plugin_targets
train
def _plugin_targets(self, compiler): """Returns a map from plugin name to the targets that build that plugin.""" if compiler == 'javac': plugin_cls = JavacPlugin elif compiler == 'scalac': plugin_cls = ScalacPlugin else: raise TaskError('Unknown JVM compiler: {}'.format(compiler)) plugin_tgts = self.context.targets(predicate=lambda t: isinstance(t, plugin_cls)) return {t.plugin: t.closure() for t in plugin_tgts}
python
{ "resource": "" }
q28061
CppCompile.execute
train
def execute(self): """Compile all sources in a given target to object files.""" def is_cc(source): _, ext = os.path.splitext(source) return ext in self.get_options().cc_extensions targets = self.context.targets(self.is_cpp) # Compile source files to objects. with self.invalidated(targets, invalidate_dependents=True) as invalidation_check: obj_mapping = self.context.products.get('objs') for vt in invalidation_check.all_vts: for source in vt.target.sources_relative_to_buildroot(): if is_cc(source): if not vt.valid: with self.context.new_workunit(name='cpp-compile', labels=[WorkUnitLabel.MULTITOOL]): # TODO: Parallelise the compilation. # TODO: Only recompile source files that have changed since the # object file was last written. Also use the output from # gcc -M to track dependencies on headers. self._compile(vt.target, vt.results_dir, source) objpath = self._objpath(vt.target, vt.results_dir, source) obj_mapping.add(vt.target, vt.results_dir).append(objpath)
python
{ "resource": "" }
q28062
CppCompile._compile
train
def _compile(self, target, results_dir, source): """Compile given source to an object file.""" obj = self._objpath(target, results_dir, source) safe_mkdir_for(obj) abs_source = os.path.join(get_buildroot(), source) # TODO: include dir should include dependent work dir when headers are copied there. include_dirs = [] for dep in target.dependencies: if self.is_library(dep): include_dirs.extend([os.path.join(get_buildroot(), dep.target_base)]) cmd = [self.cpp_toolchain.compiler] cmd.extend(['-c']) cmd.extend(('-I{0}'.format(i) for i in include_dirs)) cmd.extend(['-o' + obj, abs_source]) cmd.extend(self.get_options().cc_options) # TODO: submit_async_work with self.run_command, [(cmd)] as a Work object. with self.context.new_workunit(name='cpp-compile', labels=[WorkUnitLabel.COMPILER]) as workunit: self.run_command(cmd, workunit) self.context.log.info('Built c++ object: {0}'.format(obj))
python
{ "resource": "" }
q28063
PantsHandler.do_GET
train
def do_GET(self): """GET method implementation for BaseHTTPRequestHandler.""" if not self._client_allowed(): return try: (_, _, path, query, _) = urlsplit(self.path) params = parse_qs(query) # Give each handler a chance to respond. for prefix, handler in self._GET_handlers: if self._maybe_handle(prefix, handler, path, params): return # If no path specified, default to showing the list of all runs. if path == '/': self._handle_runs('', {}) return content = 'Invalid GET request {}'.format(self.path).encode('utf-8'), self._send_content(content, 'text/html', code=400) except (IOError, ValueError): pass
python
{ "resource": "" }
q28064
PantsHandler._handle_runs
train
def _handle_runs(self, relpath, params): """Show a listing of all pants runs since the last clean-all.""" runs_by_day = self._partition_runs_by_day() args = self._default_template_args('run_list.html') args['runs_by_day'] = runs_by_day content = self._renderer.render_name('base.html', args).encode("utf-8") self._send_content(content, 'text/html')
python
{ "resource": "" }
q28065
PantsHandler._handle_run
train
def _handle_run(self, relpath, params): """Show the report for a single pants run.""" args = self._default_template_args('run.html') run_id = relpath run_info = self._get_run_info_dict(run_id) if run_info is None: args['no_such_run'] = relpath if run_id == 'latest': args['is_latest'] = 'none' else: report_abspath = run_info['default_report'] report_relpath = os.path.relpath(report_abspath, self._root) report_dir = os.path.dirname(report_relpath) self_timings_path = os.path.join(report_dir, 'self_timings') cumulative_timings_path = os.path.join(report_dir, 'cumulative_timings') artifact_cache_stats_path = os.path.join(report_dir, 'artifact_cache_stats') run_info['timestamp_text'] = \ datetime.fromtimestamp(float(run_info['timestamp'])).strftime('%H:%M:%S on %A, %B %d %Y') timings_and_stats = '\n'.join([ self._collapsible_fmt_string.format(id='cumulative-timings-collapsible', title='Cumulative timings', class_prefix='aggregated-timings'), self._collapsible_fmt_string.format(id='self-timings-collapsible', title='Self timings', class_prefix='aggregated-timings'), self._collapsible_fmt_string.format(id='artifact-cache-stats-collapsible', title='Artifact cache stats', class_prefix='artifact-cache-stats') ]) args.update({'run_info': run_info, 'report_path': report_relpath, 'self_timings_path': self_timings_path, 'cumulative_timings_path': cumulative_timings_path, 'artifact_cache_stats_path': artifact_cache_stats_path, 'timings_and_stats': timings_and_stats}) if run_id == 'latest': args['is_latest'] = run_info['id'] content = self._renderer.render_name('base.html', args).encode("utf-8") self._send_content(content, 'text/html')
python
{ "resource": "" }
q28066
PantsHandler._handle_browse
train
def _handle_browse(self, relpath, params): """Handle requests to browse the filesystem under the build root.""" abspath = os.path.normpath(os.path.join(self._root, relpath)) if not abspath.startswith(self._root): raise ValueError # Prevent using .. to get files from anywhere other than root. if os.path.isdir(abspath): self._serve_dir(abspath, params) elif os.path.isfile(abspath): self._serve_file(abspath, params)
python
{ "resource": "" }
q28067
PantsHandler._handle_content
train
def _handle_content(self, relpath, params): """Render file content for pretty display.""" abspath = os.path.normpath(os.path.join(self._root, relpath)) if os.path.isfile(abspath): with open(abspath, 'rb') as infile: content = infile.read() else: content = 'No file found at {}'.format(abspath).encode('utf-8') content_type = mimetypes.guess_type(abspath)[0] or 'text/plain' if not content_type.startswith('text/') and not content_type == 'application/xml': # Binary file. Display it as hex, split into lines. n = 120 # Display lines of this max size. content = repr(content)[1:-1] # Will escape non-printables etc, dropping surrounding quotes. content = '\n'.join([content[i:i + n] for i in range(0, len(content), n)]) prettify = False prettify_extra_langs = [] else: prettify = True if self._settings.assets_dir: prettify_extra_dir = os.path.join(self._settings.assets_dir, 'js', 'prettify_extra_langs') prettify_extra_langs = [{'name': x} for x in os.listdir(prettify_extra_dir)] else: # TODO: Find these from our package, somehow. prettify_extra_langs = [] linenums = True args = {'prettify_extra_langs': prettify_extra_langs, 'content': content, 'prettify': prettify, 'linenums': linenums} content = self._renderer.render_name('file_content.html', args).encode("utf-8") self._send_content(content, 'text/html')
python
{ "resource": "" }
q28068
PantsHandler._handle_poll
train
def _handle_poll(self, relpath, params): """Handle poll requests for raw file contents.""" request = json.loads(params.get('q')[0]) ret = {} # request is a polling request for multiple files. For each file: # - id is some identifier assigned by the client, used to differentiate the results. # - path is the file to poll. # - pos is the last byte position in that file seen by the client. for poll in request: _id = poll.get('id', None) path = poll.get('path', None) pos = poll.get('pos', 0) if path: abspath = os.path.normpath(os.path.join(self._root, path)) if os.path.isfile(abspath): with open(abspath, 'rb') as infile: if pos: infile.seek(pos) content = infile.read() ret[_id] = content.decode("utf-8") content = json.dumps(ret).encode("utf-8") self._send_content(content, 'application/json')
python
{ "resource": "" }
q28069
PantsHandler._partition_runs_by_day
train
def _partition_runs_by_day(self): """Split the runs by day, so we can display them grouped that way.""" run_infos = self._get_all_run_infos() for x in run_infos: ts = float(x['timestamp']) x['time_of_day_text'] = datetime.fromtimestamp(ts).strftime('%H:%M:%S') def date_text(dt): delta_days = (date.today() - dt).days if delta_days == 0: return 'Today' elif delta_days == 1: return 'Yesterday' elif delta_days < 7: return dt.strftime('%A') # Weekday name. else: d = dt.day % 10 suffix = 'st' if d == 1 else 'nd' if d == 2 else 'rd' if d == 3 else 'th' return dt.strftime('%B %d') + suffix # E.g., October 30th. keyfunc = lambda x: datetime.fromtimestamp(float(x['timestamp'])) sorted_run_infos = sorted(run_infos, key=keyfunc, reverse=True) return [{'date_text': date_text(dt), 'run_infos': [x for x in infos]} for dt, infos in itertools.groupby(sorted_run_infos, lambda x: keyfunc(x).date())]
python
{ "resource": "" }
q28070
PantsHandler._get_run_info_dict
train
def _get_run_info_dict(self, run_id): """Get the RunInfo for a run, as a dict.""" run_info_path = os.path.join(self._settings.info_dir, run_id, 'info') if os.path.exists(run_info_path): # We copy the RunInfo as a dict, so we can add stuff to it to pass to the template. return RunInfo(run_info_path).get_as_dict() else: return None
python
{ "resource": "" }
q28071
PantsHandler._get_all_run_infos
train
def _get_all_run_infos(self): """Find the RunInfos for all runs since the last clean-all.""" info_dir = self._settings.info_dir if not os.path.isdir(info_dir): return [] paths = [os.path.join(info_dir, x) for x in os.listdir(info_dir)] # We copy the RunInfo as a dict, so we can add stuff to it to pass to the template. # We filter only those that have a timestamp, to avoid a race condition with writing # that field. return [d for d in [RunInfo(os.path.join(p, 'info')).get_as_dict() for p in paths if os.path.isdir(p) and not os.path.islink(p)] if 'timestamp' in d]
python
{ "resource": "" }
q28072
PantsHandler._serve_dir
train
def _serve_dir(self, abspath, params): """Show a directory listing.""" relpath = os.path.relpath(abspath, self._root) breadcrumbs = self._create_breadcrumbs(relpath) entries = [{'link_path': os.path.join(relpath, e), 'name': e} for e in os.listdir(abspath)] args = self._default_template_args('dir.html') args.update({'root_parent': os.path.dirname(self._root), 'breadcrumbs': breadcrumbs, 'entries': entries, 'params': params}) content = self._renderer.render_name('base.html', args).encode("utf-8") self._send_content(content, 'text/html')
python
{ "resource": "" }
q28073
PantsHandler._serve_file
train
def _serve_file(self, abspath, params): """Show a file. The actual content of the file is rendered by _handle_content. """ relpath = os.path.relpath(abspath, self._root) breadcrumbs = self._create_breadcrumbs(relpath) link_path = urlunparse(['', '', relpath, '', urlencode(params), '']) args = self._default_template_args('file.html') args.update({'root_parent': os.path.dirname(self._root), 'breadcrumbs': breadcrumbs, 'link_path': link_path}) content = self._renderer.render_name('base.html', args).encode("utf-8") self._send_content(content, 'text/html')
python
{ "resource": "" }
q28074
PantsHandler._send_content
train
def _send_content(self, content, content_type, code=200): """Send content to client.""" assert isinstance(content, bytes) self.send_response(code) self.send_header('Content-Type', content_type) self.send_header('Content-Length', str(len(content))) self.end_headers() self.wfile.write(content)
python
{ "resource": "" }
q28075
PantsHandler._client_allowed
train
def _client_allowed(self): """Check if client is allowed to connect to this server.""" client_ip = self._client_address[0] if not client_ip in self._settings.allowed_clients and \ not 'ALL' in self._settings.allowed_clients: content = 'Access from host {} forbidden.'.format(client_ip).encode('utf-8') self._send_content(content, 'text/html') return False return True
python
{ "resource": "" }
q28076
PantsHandler._maybe_handle
train
def _maybe_handle(self, prefix, handler, path, params, data=None): """Apply the handler if the prefix matches.""" if path.startswith(prefix): relpath = path[len(prefix):] if data: handler(relpath, params, data) else: handler(relpath, params) return True else: return False
python
{ "resource": "" }
q28077
PantsHandler._create_breadcrumbs
train
def _create_breadcrumbs(self, relpath): """Create filesystem browsing breadcrumb navigation. That is, make each path segment into a clickable element that takes you to that dir. """ if relpath == '.': breadcrumbs = [] else: path_parts = [os.path.basename(self._root)] + relpath.split(os.path.sep) path_links = ['/'.join(path_parts[1:i + 1]) for i, name in enumerate(path_parts)] breadcrumbs = [{'link_path': link_path, 'name': name} for link_path, name in zip(path_links, path_parts)] return breadcrumbs
python
{ "resource": "" }
q28078
PantsHandler._default_template_args
train
def _default_template_args(self, content_template): """Initialize template args.""" def include(text, args): template_name = pystache.render(text, args) return self._renderer.render_name(template_name, args) # Our base template calls include on the content_template. ret = {'content_template': content_template} ret['include'] = lambda text: include(text, ret) return ret
python
{ "resource": "" }
q28079
ArgSplitter._consume_flags
train
def _consume_flags(self): """Read flags until we encounter the first token that isn't a flag.""" flags = [] while self._at_flag(): flag = self._unconsumed_args.pop() if not self._check_for_help_request(flag): flags.append(flag) return flags
python
{ "resource": "" }
q28080
ArgSplitter._descope_flag
train
def _descope_flag(self, flag, default_scope): """If the flag is prefixed by its scope, in the old style, extract the scope. Otherwise assume it belongs to default_scope. returns a pair (scope, flag). """ for scope_prefix, scope_info in self._known_scoping_prefixes: for flag_prefix in ['--', '--no-']: prefix = flag_prefix + scope_prefix if flag.startswith(prefix): scope = scope_info.scope if scope_info.category == ScopeInfo.SUBSYSTEM and default_scope != GLOBAL_SCOPE: # We allow goal.task --subsystem-foo to refer to the task-level subsystem instance, # i.e., as if qualified by --subsystem-goal-task-foo. # Note that this means that we can't set a task option on the cmd-line if its # name happens to start with a subsystem scope. # TODO: Either fix this or at least detect such options and warn. task_subsystem_scope = '{}.{}'.format(scope_info.scope, default_scope) if task_subsystem_scope in self._known_scopes: # Such a task subsystem actually exists. scope = task_subsystem_scope return scope, flag_prefix + flag[len(prefix):] return default_scope, flag
python
{ "resource": "" }
q28081
Git.detect_worktree
train
def detect_worktree(cls, binary='git', subdir=None): """Detect the git working tree above cwd and return it; else, return None. :param string binary: The path to the git binary to use, 'git' by default. :param string subdir: The path to start searching for a git repo. :returns: path to the directory where the git working tree is rooted. :rtype: string """ # TODO(John Sirois): This is only used as a factory for a Git instance in # pants.base.build_environment.get_scm, encapsulate in a true factory method. cmd = [binary, 'rev-parse', '--show-toplevel'] try: if subdir: with pushd(subdir): process, out = cls._invoke(cmd) else: process, out = cls._invoke(cmd) cls._check_result(cmd, process.returncode, raise_type=Scm.ScmException) except Scm.ScmException: return None return cls._cleanse(out)
python
{ "resource": "" }
q28082
Git.clone
train
def clone(cls, repo_url, dest, binary='git'): """Clone the repo at repo_url into dest. :param string binary: The path to the git binary to use, 'git' by default. :returns: an instance of this class representing the cloned repo. :rtype: Git """ cmd = [binary, 'clone', repo_url, dest] process, out = cls._invoke(cmd) cls._check_result(cmd, process.returncode) return cls(binary=binary, worktree=dest)
python
{ "resource": "" }
q28083
Git._invoke
train
def _invoke(cls, cmd): """Invoke the given command, and return a tuple of process and raw binary output. stderr flows to wherever its currently mapped for the parent process - generally to the terminal where the user can see the error. :param list cmd: The command in the form of a list of strings :returns: The completed process object and its standard output. :raises: Scm.LocalException if there was a problem exec'ing the command at all. """ try: process = subprocess.Popen(cmd, stdout=subprocess.PIPE) except OSError as e: # Binary DNE or is not executable raise cls.LocalException('Failed to execute command {}: {}'.format(' '.join(cmd), e)) out, _ = process.communicate() return process, out
python
{ "resource": "" }
q28084
Git._get_upstream
train
def _get_upstream(self): """Return the remote and remote merge branch for the current branch""" if not self._remote or not self._branch: branch = self.branch_name if not branch: raise Scm.LocalException('Failed to determine local branch') def get_local_config(key): value = self._check_output(['config', '--local', '--get', key], raise_type=Scm.LocalException) return value.strip() self._remote = self._remote or get_local_config('branch.{}.remote'.format(branch)) self._branch = self._branch or get_local_config('branch.{}.merge'.format(branch)) return self._remote, self._branch
python
{ "resource": "" }
q28085
GitRepositoryReader.listdir
train
def listdir(self, relpath): """Like os.listdir, but reads from the git repository. :returns: a list of relative filenames """ path = self._realpath(relpath) if not path.endswith('/'): raise self.NotADirException(self.rev, relpath) if path[0] == '/' or path.startswith('../'): return os.listdir(path) tree = self._read_tree(path[:-1]) return list(tree.keys())
python
{ "resource": "" }
q28086
GitRepositoryReader.open
train
def open(self, relpath): """Read a file out of the repository at a certain revision. This is complicated because, unlike vanilla git cat-file, this follows symlinks in the repo. If a symlink points outside repo, the file is read from the filesystem; that's because presumably whoever put that symlink there knew what they were doing. """ path = self._realpath(relpath) if path.endswith('/'): raise self.IsDirException(self.rev, relpath) if path.startswith('../') or path[0] == '/': yield open(path, 'rb') return object_type, data = self._read_object_from_repo(rev=self.rev, relpath=path) if object_type == b'tree': raise self.IsDirException(self.rev, relpath) assert object_type == b'blob' yield io.BytesIO(data)
python
{ "resource": "" }
q28087
GitRepositoryReader._realpath
train
def _realpath(self, relpath): """Follow symlinks to find the real path to a file or directory in the repo. :returns: if the expanded path points to a file, the relative path to that file; if a directory, the relative path + '/'; if a symlink outside the repo, a path starting with / or ../. """ obj, path_so_far = self._read_object(relpath, MAX_SYMLINKS_IN_REALPATH) if isinstance(obj, self.Symlink): raise self.SymlinkLoopException(self.rev, relpath) return path_so_far
python
{ "resource": "" }
q28088
GitRepositoryReader._read_tree
train
def _read_tree(self, path): """Given a revision and path, parse the tree data out of git cat-file output. :returns: a dict from filename -> [list of Symlink, Dir, and File objects] """ path = self._fixup_dot_relative(path) tree = self._trees.get(path) if tree: return tree tree = {} object_type, tree_data = self._read_object_from_repo(rev=self.rev, relpath=path) assert object_type == b'tree' # The tree data here is (mode ' ' filename \0 20-byte-sha)* # It's transformed to a list of byte chars to allow iteration. # See http://python-future.org/compatible_idioms.html#byte-string-literals. tree_data = [bytes([b]) for b in tree_data] i = 0 while i < len(tree_data): start = i while tree_data[i] != b' ': i += 1 mode = b''.join(tree_data[start:i]) i += 1 # skip space start = i while tree_data[i] != NUL: i += 1 name = b''.join(tree_data[start:i]) sha = b''.join(tree_data[i + 1:i + 1 + GIT_HASH_LENGTH]) sha_hex = binascii.hexlify(sha) i += 1 + GIT_HASH_LENGTH if mode == b'120000': tree[name] = self.Symlink(name, sha_hex) elif mode == b'40000': tree[name] = self.Dir(name, sha_hex) else: tree[name] = self.File(name, sha_hex) self._trees[path] = tree return tree
python
{ "resource": "" }
q28089
BundleMixin.register_options
train
def register_options(cls, register): """Register options common to all bundle tasks.""" super(BundleMixin, cls).register_options(register) register('--archive', choices=list(archive.TYPE_NAMES), fingerprint=True, help='Create an archive of this type from the bundle. ' 'This option is also defined in app target. ' 'Precedence is CLI option > target option > pants.ini option.') # `target.id` ensures global uniqueness, this flag is provided primarily for # backward compatibility. register('--use-basename-prefix', advanced=True, type=bool, help='Use target basename to prefix bundle folder or archive; otherwise a unique ' 'identifier derived from target will be used.')
python
{ "resource": "" }
q28090
BundleMixin.resolved_option
train
def resolved_option(options, target, key): """Get value for option "key". Resolution precedence is CLI option > target option > pants.ini option. :param options: Options returned by `task.get_option()` :param target: Target :param key: Key to get using the resolution precedence """ option_value = options.get(key) if not isinstance(target, AppBase) or options.is_flagged(key): return option_value v = target.payload.get_field_value(key, None) return option_value if v is None else v
python
{ "resource": "" }
q28091
BundleMixin.symlink_bundles
train
def symlink_bundles(self, app, bundle_dir): """For each bundle in the given app, symlinks relevant matched paths. Validates that at least one path was matched by a bundle. """ for bundle_counter, bundle in enumerate(app.bundles): count = 0 for path, relpath in bundle.filemap.items(): bundle_path = os.path.join(bundle_dir, relpath) count += 1 if os.path.exists(bundle_path): continue if os.path.isfile(path): safe_mkdir(os.path.dirname(bundle_path)) os.symlink(path, bundle_path) elif os.path.isdir(path): safe_mkdir(bundle_path) if count == 0: raise TargetDefinitionException(app.target, 'Bundle index {} of "bundles" field ' 'does not match any files.'.format(bundle_counter))
python
{ "resource": "" }
q28092
BundleMixin.publish_results
train
def publish_results(self, dist_dir, use_basename_prefix, vt, bundle_dir, archivepath, id, archive_ext): """Publish a copy of the bundle and archive from the results dir in dist.""" # TODO (from mateor) move distdir management somewhere more general purpose. name = vt.target.basename if use_basename_prefix else id bundle_copy = os.path.join(dist_dir, '{}-bundle'.format(name)) absolute_symlink(bundle_dir, bundle_copy) self.context.log.info( 'created bundle copy {}'.format(os.path.relpath(bundle_copy, get_buildroot()))) if archivepath: ext = archive.archive_extensions.get(archive_ext, archive_ext) archive_copy = os.path.join(dist_dir,'{}.{}'.format(name, ext)) safe_mkdir_for(archive_copy) # Ensure parent dir exists atomic_copy(archivepath, archive_copy) self.context.log.info( 'created archive copy {}'.format(os.path.relpath(archive_copy, get_buildroot())))
python
{ "resource": "" }
q28093
NodeResolve.prepare
train
def prepare(cls, options, round_manager): """Allow each resolver to declare additional product requirements.""" super(NodeResolve, cls).prepare(options, round_manager) for resolver in cls._resolver_by_type.values(): resolver.prepare(options, round_manager)
python
{ "resource": "" }
q28094
NodeResolve._topological_sort
train
def _topological_sort(self, targets): """Topologically order a list of targets""" target_set = set(targets) return [t for t in reversed(sort_targets(targets)) if t in target_set]
python
{ "resource": "" }
q28095
safe_kill
train
def safe_kill(pid, signum): """Kill a process with the specified signal, catching nonfatal errors.""" assert(isinstance(pid, IntegerForPid)) assert(isinstance(signum, int)) try: os.kill(pid, signum) except (IOError, OSError) as e: if e.errno in [errno.ESRCH, errno.EPERM]: pass elif e.errno == errno.EINVAL: raise ValueError("Invalid signal number {}: {}" .format(signum, e), e) else: raise
python
{ "resource": "" }
q28096
IvyResolveResult.all_linked_artifacts_exist
train
def all_linked_artifacts_exist(self): """All of the artifact paths for this resolve point to existing files.""" if not self.has_resolved_artifacts: return False for path in self.resolved_artifact_paths: if not os.path.isfile(path): return False else: return True
python
{ "resource": "" }
q28097
IvyResolveResult.resolved_jars_for_each_target
train
def resolved_jars_for_each_target(self, conf, targets): """Yields the resolved jars for each passed JarLibrary. If there is no report for the requested conf, yields nothing. :param conf: The ivy conf to load jars for. :param targets: The collection of JarLibrary targets to find resolved jars for. :yield: target, resolved_jars :raises IvyTaskMixin.UnresolvedJarError """ ivy_info = self._ivy_info_for(conf) if not ivy_info: return jar_library_targets = [t for t in targets if isinstance(t, JarLibrary)] ivy_jar_memo = {} for target in jar_library_targets: # Add the artifacts from each dependency module. resolved_jars = self._resolved_jars_with_hardlinks(conf, ivy_info, ivy_jar_memo, self._jar_dependencies_for_target(conf, target), target) yield target, resolved_jars
python
{ "resource": "" }
q28098
IvyInfo.traverse_dependency_graph
train
def traverse_dependency_graph(self, ref, collector, memo=None): """Traverses module graph, starting with ref, collecting values for each ref into the sets created by the collector function. :param ref an IvyModuleRef to start traversing the ivy dependency graph :param collector a function that takes a ref and returns a new set of values to collect for that ref, which will also be updated with all the dependencies accumulated values :param memo is a dict of ref -> set that memoizes the results of each node in the graph. If provided, allows for retaining cache across calls. :returns the accumulated set for ref """ resolved_ref = self.refs_by_unversioned_refs.get(ref.unversioned) if resolved_ref: ref = resolved_ref if memo is None: memo = dict() visited = set() return self._do_traverse_dependency_graph(ref, collector, memo, visited)
python
{ "resource": "" }
q28099
IvyInfo.get_resolved_jars_for_coordinates
train
def get_resolved_jars_for_coordinates(self, coordinates, memo=None): """Collects jars for the passed coordinates. Because artifacts are only fetched for the "winning" version of a module, the artifacts will not always represent the version originally declared by the library. This method is transitive within the passed coordinates dependencies. :param coordinates collections.Iterable: Collection of coordinates to collect transitive resolved jars for. :param memo: See `traverse_dependency_graph`. :returns: All the artifacts for all of the jars for the provided coordinates, including transitive dependencies. :rtype: list of :class:`pants.java.jar.ResolvedJar` """ def to_resolved_jar(jar_ref, jar_path): return ResolvedJar(coordinate=M2Coordinate(org=jar_ref.org, name=jar_ref.name, rev=jar_ref.rev, classifier=jar_ref.classifier, ext=jar_ref.ext), cache_path=jar_path) resolved_jars = OrderedSet() def create_collection(dep): return OrderedSet([dep]) for jar in coordinates: classifier = jar.classifier if self._conf == 'default' else self._conf jar_module_ref = IvyModuleRef(jar.org, jar.name, jar.rev, classifier, jar.ext) for module_ref in self.traverse_dependency_graph(jar_module_ref, create_collection, memo): for artifact_path in self._artifacts_by_ref[module_ref.unversioned]: resolved_jars.add(to_resolved_jar(module_ref, artifact_path)) return resolved_jars
python
{ "resource": "" }