_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q27700 | JvmBinaryTask.list_external_jar_dependencies | train | def list_external_jar_dependencies(self, binary):
"""Returns the external jar dependencies of the given binary.
:param binary: The jvm binary target to list transitive external dependencies for.
:type binary: :class:`pants.backend.jvm.targets.jvm_binary.JvmBinary`
:returns: A list of (jar path, coordinate) tuples.
:rtype: list of (string, :class:`pants.java.jar.M2Coordinate`)
"""
classpath_products = self.context.products.get_data('runtime_classpath')
classpath_entries = classpath_products.get_artifact_classpath_entries_for_targets(
binary.closure(bfs=True, include_scopes=Scopes.JVM_RUNTIME_SCOPES,
respect_intransitive=True))
external_jars = OrderedSet(jar_entry for conf, jar_entry in classpath_entries
if conf == 'default')
return [(entry.path, entry.coordinate) for entry in external_jars
if not entry.is_excluded_by(binary.deploy_excludes)] | python | {
"resource": ""
} |
q27701 | JvmBinaryTask.monolithic_jar | train | def monolithic_jar(self, binary, path, manifest_classpath=None):
"""Creates a jar containing all the dependencies for a jvm_binary target.
Yields a handle to the open jarfile, so the caller can add to the jar if needed.
The yielded jar file either has all the class files for the jvm_binary target as
a fat jar, or includes those dependencies in the `Class-Path` field of its
Manifest.
:param binary: The jvm_binary target to operate on.
:param path: Write the output jar here, overwriting an existing file, if any.
:param iterable manifest_classpath: If set output jar will set as its manifest's
classpath, otherwise output jar will simply include class files.
"""
# TODO(benjy): There's actually nothing here that requires 'binary' to be a jvm_binary.
# It could be any target. And that might actually be useful.
with self.context.new_workunit(name='create-monolithic-jar'):
with self.open_jar(path,
jar_rules=binary.deploy_jar_rules,
overwrite=True,
compressed=True) as monolithic_jar:
if manifest_classpath:
monolithic_jar.append_classpath(manifest_classpath)
else:
with self.context.new_workunit(name='add-internal-classes'):
with self.create_jar_builder(monolithic_jar) as jar_builder:
jar_builder.add_target(binary, recursive=True)
# NB(gmalmquist): Shading each jar dependency with its own prefix would be a nice feature,
# but is not currently possible with how things are set up. It may not be possible to do
# in general, at least efficiently.
with self.context.new_workunit(name='add-dependency-jars'):
dependencies = self.list_external_jar_dependencies(binary)
for jar, coordinate in dependencies:
self.context.log.debug(' dumping {} from {}'.format(coordinate, jar))
monolithic_jar.writejar(jar)
yield monolithic_jar
if binary.shading_rules:
with self.context.new_workunit('shade-monolithic-jar'):
self.shade_jar(binary.shading_rules, jar_path=path) | python | {
"resource": ""
} |
q27702 | JvmBinaryTask.shade_jar | train | def shade_jar(self, shading_rules, jar_path):
"""Shades a jar using the shading rules from the given jvm_binary.
This *overwrites* the existing jar file at ``jar_path``.
:param shading_rules: predefined rules for shading
:param jar_path: The filepath to the jar that should be shaded.
"""
self.context.log.debug('Shading {}.'.format(jar_path))
with temporary_dir() as tempdir:
output_jar = os.path.join(tempdir, os.path.basename(jar_path))
with self.shader.binary_shader_for_rules(output_jar, jar_path, shading_rules) as shade_runner:
result = execute_runner(shade_runner, workunit_factory=self.context.new_workunit,
workunit_name='jarjar')
if result != 0:
raise TaskError('Shading tool failed to shade {0} (error code {1})'.format(jar_path,
result))
if not os.path.exists(output_jar):
raise TaskError('Shading tool returned success for {0}, but '
'the output jar was not found at {1}'.format(jar_path, output_jar))
atomic_copy(output_jar, jar_path)
return jar_path | python | {
"resource": ""
} |
q27703 | parse_address_family | train | def parse_address_family(address_mapper, directory):
"""Given an AddressMapper and a directory, return an AddressFamily.
The AddressFamily may be empty, but it will not be None.
"""
patterns = tuple(join(directory.path, p) for p in address_mapper.build_patterns)
path_globs = PathGlobs(include=patterns,
exclude=address_mapper.build_ignore_patterns)
snapshot = yield Get(Snapshot, PathGlobs, path_globs)
files_content = yield Get(FilesContent, Digest, snapshot.directory_digest)
if not files_content:
raise ResolveError('Directory "{}" does not contain any BUILD files.'.format(directory.path))
address_maps = []
for filecontent_product in files_content:
address_maps.append(AddressMap.parse(filecontent_product.path,
filecontent_product.content,
address_mapper.parser))
yield AddressFamily.create(directory.path, address_maps) | python | {
"resource": ""
} |
q27704 | addresses_from_address_families | train | def addresses_from_address_families(address_mapper, specs):
"""Given an AddressMapper and list of Specs, return matching BuildFileAddresses.
:raises: :class:`ResolveError` if:
- there were no matching AddressFamilies, or
- the Spec matches no addresses for SingleAddresses.
:raises: :class:`AddressLookupError` if no targets are matched for non-SingleAddress specs.
"""
# Capture a Snapshot covering all paths for these Specs, then group by directory.
snapshot = yield Get(Snapshot, PathGlobs, _spec_to_globs(address_mapper, specs))
dirnames = {dirname(f) for f in snapshot.files}
address_families = yield [Get(AddressFamily, Dir(d)) for d in dirnames]
address_family_by_directory = {af.namespace: af for af in address_families}
matched_addresses = OrderedSet()
for spec in specs:
# NB: if a spec is provided which expands to some number of targets, but those targets match
# --exclude-target-regexp, we do NOT fail! This is why we wait to apply the tag and exclude
# patterns until we gather all the targets the spec would have matched without them.
try:
addr_families_for_spec = spec.matching_address_families(address_family_by_directory)
except Spec.AddressFamilyResolutionError as e:
raise raise_from(ResolveError(e), e)
try:
all_addr_tgt_pairs = spec.address_target_pairs_from_address_families(addr_families_for_spec)
except Spec.AddressResolutionError as e:
raise raise_from(AddressLookupError(e), e)
except SingleAddress._SingleAddressResolutionError as e:
_raise_did_you_mean(e.single_address_family, e.name, source=e)
matched_addresses.update(
addr for (addr, tgt) in all_addr_tgt_pairs
if specs.matcher.matches_target_address_pair(addr, tgt)
)
# NB: This may be empty, as the result of filtering by tag and exclude patterns!
yield BuildFileAddresses(tuple(matched_addresses)) | python | {
"resource": ""
} |
q27705 | _spec_to_globs | train | def _spec_to_globs(address_mapper, specs):
"""Given a Specs object, return a PathGlobs object for the build files that it matches."""
patterns = set()
for spec in specs:
patterns.update(spec.make_glob_patterns(address_mapper))
return PathGlobs(include=patterns, exclude=address_mapper.build_ignore_patterns) | python | {
"resource": ""
} |
q27706 | create_graph_rules | train | def create_graph_rules(address_mapper):
"""Creates tasks used to parse Structs from BUILD files.
:param address_mapper_key: The subject key for an AddressMapper instance.
:param symbol_table: A SymbolTable instance to provide symbols for Address lookups.
"""
@rule(AddressMapper, [])
def address_mapper_singleton():
return address_mapper
return [
address_mapper_singleton,
# BUILD file parsing.
hydrate_struct,
parse_address_family,
# Spec handling: locate directories that contain build files, and request
# AddressFamilies for each of them.
addresses_from_address_families,
# Root rules representing parameters that might be provided via root subjects.
RootRule(Address),
RootRule(BuildFileAddress),
RootRule(BuildFileAddresses),
RootRule(Specs),
] | python | {
"resource": ""
} |
q27707 | PythonArtifact.with_binaries | train | def with_binaries(self, *args, **kw):
"""Add binaries tagged to this artifact.
For example: ::
provides = setup_py(
name = 'my_library',
zip_safe = True
).with_binaries(
my_command = ':my_library_bin'
)
This adds a console_script entry_point for the python_binary target
pointed at by :my_library_bin. Currently only supports
python_binaries that specify entry_point explicitly instead of source.
Also can take a dictionary, e.g.
with_binaries({'my-command': ':my_library_bin'})
"""
for arg in args:
if isinstance(arg, dict):
self._binaries.update(arg)
self._binaries.update(kw)
return self | python | {
"resource": ""
} |
q27708 | Confluence.getpage | train | def getpage(self, wiki_space, page_title):
""" Fetches a page object.
Returns None if the page does not exist or otherwise could not be fetched.
"""
try:
return self._api_entrypoint.getPage(self._session_token, wiki_space, page_title)
except XMLRPCError as e:
log.warning('Failed to fetch page %s: %s' % (page_title, e))
return None | python | {
"resource": ""
} |
q27709 | Confluence.storepage | train | def storepage(self, page):
"""Stores a page object, updating the page if it already exists.
returns the stored page, or None if the page could not be stored.
"""
try:
return self._api_entrypoint.storePage(self._session_token, page)
except XMLRPCError as e:
log.error('Failed to store page %s: %s' % (page.get('title', '[unknown title]'), e))
return None | python | {
"resource": ""
} |
q27710 | Confluence.removepage | train | def removepage(self, page):
"""Deletes a page from confluence.
raises ConfluenceError if the page could not be removed.
"""
try:
self._api_entrypoint.removePage(self._session_token, page)
except XMLRPCError as e:
raise ConfluenceError('Failed to delete page: %s' % e) | python | {
"resource": ""
} |
q27711 | Confluence.create | train | def create(self, space, title, content, parent_page=None, **pageoptions):
""" Create a new confluence page with the given title and content. Additional page options
available in the xmlrpc api can be specified as kwargs.
returns the created page or None if the page could not be stored.
raises ConfluenceError if a parent page was specified but could not be found.
"""
pagedef = dict(
space = space,
title = title,
url = Confluence.get_url(self._server_url, space, title),
content = content,
contentStatus = 'current',
current = True
)
pagedef.update(**pageoptions)
if parent_page:
# Get the parent page id.
parent_page_obj = self.getpage(space, parent_page)
if parent_page_obj is None:
raise ConfluenceError('Failed to find parent page %s in space %s' % (parent_page, space))
pagedef['parentId'] = parent_page_obj['id']
# Now create the page
return self.storepage(pagedef) | python | {
"resource": ""
} |
q27712 | SchedulerService._get_snapshot | train | def _get_snapshot(self):
"""Returns a Snapshot of the input globs"""
return self._scheduler_session.product_request(
Snapshot, subjects=[PathGlobs(self._invalidation_globs)])[0] | python | {
"resource": ""
} |
q27713 | SchedulerService.setup | train | def setup(self, services):
"""Service setup."""
super(SchedulerService, self).setup(services)
# Register filesystem event handlers on an FSEventService instance.
self._fs_event_service.register_all_files_handler(self._enqueue_fs_event)
# N.B. We compute the invalidating fileset eagerly at launch with an assumption that files
# that exist at startup are the only ones that can affect the running daemon.
if self._invalidation_globs:
self._invalidating_snapshot = self._get_snapshot()
self._invalidating_files = self._invalidating_snapshot.files
self._logger.info('watching invalidating files: {}'.format(self._invalidating_files))
if self._pantsd_pidfile:
self._fs_event_service.register_pidfile_handler(self._pantsd_pidfile, self._enqueue_fs_event) | python | {
"resource": ""
} |
q27714 | SchedulerService._process_event_queue | train | def _process_event_queue(self):
"""File event notification queue processor. """
try:
event = self._event_queue.get(timeout=0.05)
except queue.Empty:
return
try:
subscription, is_initial_event, files = (event['subscription'],
event['is_fresh_instance'],
event['files'] if PY3 else [f.decode('utf-8') for f in event['files']])
except (KeyError, UnicodeDecodeError) as e:
self._logger.warn('%r raised by invalid watchman event: %s', e, event)
return
self._logger.debug('processing {} files for subscription {} (first_event={})'
.format(len(files), subscription, is_initial_event))
# The first watchman event is a listing of all files - ignore it.
if not is_initial_event:
if subscription == self._fs_event_service.PANTS_PID_SUBSCRIPTION_NAME:
self._maybe_invalidate_scheduler_pidfile()
else:
self._handle_batch_event(files)
if not self._watchman_is_running.is_set():
self._watchman_is_running.set()
self._event_queue.task_done() | python | {
"resource": ""
} |
q27715 | SchedulerService.prefork | train | def prefork(self, options, options_bootstrapper):
"""Runs all pre-fork logic in the process context of the daemon.
:returns: `(LegacyGraphSession, TargetRoots, exit_code)`
"""
# If any nodes exist in the product graph, wait for the initial watchman event to avoid
# racing watchman startup vs invalidation events.
graph_len = self._scheduler.graph_len()
if graph_len > 0:
self._logger.debug('graph len was {}, waiting for initial watchman event'.format(graph_len))
self._watchman_is_running.wait()
v2_ui = options.for_global_scope().v2_ui
zipkin_trace_v2 = options.for_scope('reporting').zipkin_trace_v2
session = self._graph_helper.new_session(zipkin_trace_v2, v2_ui)
if options.for_global_scope().loop:
prefork_fn = self._prefork_loop
else:
prefork_fn = self._prefork_body
target_roots, exit_code = prefork_fn(session, options, options_bootstrapper)
return session, target_roots, exit_code | python | {
"resource": ""
} |
q27716 | SchedulerService.run | train | def run(self):
"""Main service entrypoint."""
while not self._state.is_terminating:
self._process_event_queue()
self._state.maybe_pause() | python | {
"resource": ""
} |
q27717 | LoopCondition.wait | train | def wait(self, timeout):
"""Waits for the condition for at most the given timeout and returns True if the condition triggered.
Generally called in a loop until the condition triggers.
"""
with self._condition:
previous_iteration = self._iteration
self._condition.wait(timeout)
return previous_iteration != self._iteration | python | {
"resource": ""
} |
q27718 | Context._set_target_root_count_in_runtracker | train | def _set_target_root_count_in_runtracker(self):
"""Sets the target root count in the run tracker's daemon stats object."""
# N.B. `self._target_roots` is always an expanded list of `Target` objects as
# provided by `GoalRunner`.
target_count = len(self._target_roots)
self.run_tracker.pantsd_stats.set_target_root_size(target_count)
return target_count | python | {
"resource": ""
} |
q27719 | Context._set_affected_target_count_in_runtracker | train | def _set_affected_target_count_in_runtracker(self):
"""Sets the realized target count in the run tracker's daemon stats object."""
target_count = len(self.build_graph)
self.run_tracker.pantsd_stats.set_affected_targets_size(target_count)
return target_count | python | {
"resource": ""
} |
q27720 | Context.subproc_map | train | def subproc_map(self, f, items):
"""Map function `f` over `items` in subprocesses and return the result.
:API: public
:param f: A multiproc-friendly (importable) work function.
:param items: A iterable of pickleable arguments to f.
"""
try:
# Pool.map (and async_map().get() w/o timeout) can miss SIGINT.
# See: http://stackoverflow.com/a/1408476, http://bugs.python.org/issue8844
# Instead, we map_async(...), wait *with a timeout* until ready, then .get()
# NB: in 2.x, wait() with timeout wakes up often to check, burning CPU. Oh well.
res = SubprocPool.foreground().map_async(f, items)
while not res.ready():
res.wait(60) # Repeatedly wait for up to a minute.
if not res.ready():
self.log.debug('subproc_map result still not ready...')
return res.get()
except KeyboardInterrupt:
SubprocPool.shutdown(True)
raise | python | {
"resource": ""
} |
q27721 | Context.new_workunit | train | def new_workunit(self, name, labels=None, cmd='', log_config=None):
"""Create a new workunit under the calling thread's current workunit.
:API: public
"""
with self.run_tracker.new_workunit(name=name, labels=labels, cmd=cmd, log_config=log_config) as workunit:
yield workunit | python | {
"resource": ""
} |
q27722 | Context.acquire_lock | train | def acquire_lock(self):
""" Acquire the global lock for the root directory associated with this context. When
a goal requires serialization, it will call this to acquire the lock.
:API: public
"""
if self.options.for_global_scope().lock:
if not self._lock.acquired:
self._lock.acquire() | python | {
"resource": ""
} |
q27723 | Context.release_lock | train | def release_lock(self):
"""Release the global lock if it's held.
Returns True if the lock was held before this call.
:API: public
"""
if not self._lock.acquired:
return False
else:
self._lock.release()
return True | python | {
"resource": ""
} |
q27724 | Context.add_new_target | train | def add_new_target(self, address, target_type, target_base=None, dependencies=None,
derived_from=None, **kwargs):
"""Creates a new target, adds it to the context and returns it.
This method ensures the target resolves files against the given target_base, creating the
directory if needed and registering a source root.
:API: public
"""
rel_target_base = target_base or address.spec_path
abs_target_base = os.path.join(get_buildroot(), rel_target_base)
if not os.path.exists(abs_target_base):
os.makedirs(abs_target_base)
# TODO: Adding source roots on the fly like this is yucky, but hopefully this
# method will go away entirely under the new engine. It's primarily used for injecting
# synthetic codegen targets, and that isn't how codegen will work in the future.
if not self.source_roots.find_by_path(rel_target_base):
# TODO: Set the lang and root category (source/test/thirdparty) based on the target type?
self.source_roots.add_source_root(rel_target_base)
if dependencies:
dependencies = [dep.address for dep in dependencies]
self.build_graph.inject_synthetic_target(address=address,
target_type=target_type,
dependencies=dependencies,
derived_from=derived_from,
**kwargs)
new_target = self.build_graph.get_target(address)
return new_target | python | {
"resource": ""
} |
q27725 | Context.targets | train | def targets(self, predicate=None, **kwargs):
"""Selects targets in-play in this run from the target roots and their transitive dependencies.
Also includes any new synthetic targets created from the target roots or their transitive
dependencies during the course of the run.
See Target.closure_for_targets for remaining parameters.
:API: public
:param predicate: If specified, the predicate will be used to narrow the scope of targets
returned.
:param bool postorder: `True` to gather transitive dependencies with a postorder traversal;
`False` or preorder by default.
:returns: A list of matching targets.
"""
target_set = self._collect_targets(self.target_roots, **kwargs)
synthetics = OrderedSet()
for synthetic_address in self.build_graph.synthetic_addresses:
if self.build_graph.get_concrete_derived_from(synthetic_address) in target_set:
synthetics.add(self.build_graph.get_target(synthetic_address))
target_set.update(self._collect_targets(synthetics, **kwargs))
return list(filter(predicate, target_set)) | python | {
"resource": ""
} |
q27726 | Context.dependents | train | def dependents(self, on_predicate=None, from_predicate=None):
"""Returns a map from targets that satisfy the from_predicate to targets they depend on that
satisfy the on_predicate.
:API: public
"""
core = set(self.targets(on_predicate))
dependees = defaultdict(set)
for target in self.targets(from_predicate):
for dependency in target.dependencies:
if dependency in core:
dependees[target].add(dependency)
return dependees | python | {
"resource": ""
} |
q27727 | Context.scan | train | def scan(self, root=None):
"""Scans and parses all BUILD files found under ``root``.
Only BUILD files found under ``root`` are parsed as roots in the graph, but any dependencies of
targets parsed in the root tree's BUILD files will be followed and this may lead to BUILD files
outside of ``root`` being parsed and included in the returned build graph.
:API: public
:param string root: The path to scan; by default, the build root.
:returns: A new build graph encapsulating the targets found.
"""
build_graph = self.build_graph.clone_new()
for address in self.address_mapper.scan_addresses(root):
build_graph.inject_address_closure(address)
return build_graph | python | {
"resource": ""
} |
q27728 | Context.execute_process_synchronously_or_raise | train | def execute_process_synchronously_or_raise(self, execute_process_request, name, labels=None):
"""Execute process synchronously, and throw if the return code is not 0.
See execute_process_synchronously for the api docs.
"""
fallible_result = self.execute_process_synchronously_without_raising(execute_process_request, name, labels)
return fallible_to_exec_result_or_raise(
fallible_result,
execute_process_request
) | python | {
"resource": ""
} |
q27729 | BundleCreate.bundle | train | def bundle(self, app, results_dir):
"""Create a self-contained application bundle.
The bundle will contain the target classes, dependencies and resources.
"""
assert(isinstance(app, BundleCreate.App))
bundle_dir = self.get_bundle_dir(app.id, results_dir)
self.context.log.debug('creating {}'.format(os.path.relpath(bundle_dir, get_buildroot())))
safe_mkdir(bundle_dir, clean=True)
classpath = OrderedSet()
# Create symlinks for both internal and external dependencies under `lib_dir`. This is
# only needed when not creating a deployjar
lib_dir = os.path.join(bundle_dir, self.LIBS_DIR)
if not app.deployjar:
os.mkdir(lib_dir)
consolidated_classpath = self.context.products.get_data('consolidated_classpath')
classpath.update(ClasspathProducts.create_canonical_classpath(
consolidated_classpath,
app.target.closure(bfs=True, **self._target_closure_kwargs),
lib_dir,
internal_classpath_only=False,
excludes=app.binary.deploy_excludes,
))
bundle_jar = os.path.join(bundle_dir, '{}.jar'.format(app.binary.basename))
with self.monolithic_jar(app.binary, bundle_jar,
manifest_classpath=classpath) as jar:
self.add_main_manifest_entry(jar, app.binary)
# Make classpath complete by adding the monolithic jar.
classpath.update([jar.path])
if app.binary.shading_rules:
for jar_path in classpath:
# In case `jar_path` is a symlink, this is still safe, shaded jar will overwrite jar_path,
# original file `jar_path` linked to remains untouched.
# TODO run in parallel to speed up
self.shade_jar(shading_rules=app.binary.shading_rules, jar_path=jar_path)
self.symlink_bundles(app, bundle_dir)
return bundle_dir | python | {
"resource": ""
} |
q27730 | BundleCreate.check_basename_conflicts | train | def check_basename_conflicts(self, targets):
"""Apps' basenames are used as bundle directory names. Ensure they are all unique."""
basename_seen = {}
for target in targets:
if target.basename in basename_seen:
raise self.BasenameConflictError('Basename must be unique, found two targets use '
"the same basename: {}'\n\t{} and \n\t{}"
.format(target.basename,
basename_seen[target.basename].address.spec,
target.address.spec))
basename_seen[target.basename] = target | python | {
"resource": ""
} |
q27731 | Reproducer.create_repro | train | def create_repro(self):
"""Return a Repro instance for capturing a repro of the current workspace state.
:return: a Repro instance, or None if no repro was requested.
:rtype: `pants.bin.repro.Repro`
"""
path = self.get_options().capture
if path is None:
return None
buildroot = get_buildroot()
# Ignore a couple of common cases. Note: If we support SCMs other than git in the future,
# add their (top-level only) metadata dirs here if relevant.
ignore = ['.git', os.path.relpath(self.get_options().pants_distdir, buildroot)]
if self.get_options().ignore:
ignore.extend(self.get_options().ignore)
return Repro(path, buildroot, ignore) | python | {
"resource": ""
} |
q27732 | BuildDictionaryInfoExtracter.get_target_args | train | def get_target_args(self, alias):
"""Returns a list of FunctionArgs for the specified target_type."""
target_types = list(self._buildfile_aliases.target_types_by_alias.get(alias))
if not target_types:
raise TaskError('No such target type: {}'.format(alias))
return self.get_args_for_target_type(target_types[0]) | python | {
"resource": ""
} |
q27733 | RunTracker.initialize | train | def initialize(self, all_options):
"""Create run_info and relevant directories, and return the run id.
Must be called before `start`.
"""
if self.run_info:
raise AssertionError('RunTracker.initialize must not be called multiple times.')
# Initialize the run.
# Select a globally unique ID for the run, that sorts by time.
millis = int((self._run_timestamp * 1000) % 1000)
# run_uuid is used as a part of run_id and also as a trace_id for Zipkin tracing
run_uuid = uuid.uuid4().hex
run_id = 'pants_run_{}_{}_{}'.format(
time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(self._run_timestamp)),
millis,
run_uuid
)
info_dir = os.path.join(self.get_options().pants_workdir, self.options_scope)
self.run_info_dir = os.path.join(info_dir, run_id)
self.run_info = RunInfo(os.path.join(self.run_info_dir, 'info'))
self.run_info.add_basic_info(run_id, self._run_timestamp)
self.run_info.add_info('cmd_line', self._cmd_line)
# Create a 'latest' symlink, after we add_infos, so we're guaranteed that the file exists.
link_to_latest = os.path.join(os.path.dirname(self.run_info_dir), 'latest')
relative_symlink(self.run_info_dir, link_to_latest)
# Time spent in a workunit, including its children.
self.cumulative_timings = AggregatedTimings(os.path.join(self.run_info_dir,
'cumulative_timings'))
# Time spent in a workunit, not including its children.
self.self_timings = AggregatedTimings(os.path.join(self.run_info_dir, 'self_timings'))
# Hit/miss stats for the artifact cache.
self.artifact_cache_stats = ArtifactCacheStats(os.path.join(self.run_info_dir,
'artifact_cache_stats'))
# Daemon stats.
self.pantsd_stats = PantsDaemonStats()
self._all_options = all_options
return (run_id, run_uuid) | python | {
"resource": ""
} |
q27734 | RunTracker.start | train | def start(self, report, run_start_time=None):
"""Start tracking this pants run using the given Report.
`RunTracker.initialize` must have been called first to create the run_info_dir and
run_info. TODO: This lifecycle represents a delicate dance with the `Reporting.initialize`
method, and portions of the `RunTracker` should likely move to `Reporting` instead.
report: an instance of pants.reporting.Report.
"""
if not self.run_info:
raise AssertionError('RunTracker.initialize must be called before RunTracker.start.')
self.report = report
# Set up the JsonReporter for V2 stats.
if self.get_options().stats_version == 2:
json_reporter_settings = JsonReporter.Settings(log_level=Report.INFO)
self.json_reporter = JsonReporter(self, json_reporter_settings)
report.add_reporter('json', self.json_reporter)
self.report.open()
# And create the workunit.
self._main_root_workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=None,
name=RunTracker.DEFAULT_ROOT_NAME, cmd=None)
self.register_thread(self._main_root_workunit)
# Set the true start time in the case of e.g. the daemon.
self._main_root_workunit.start(run_start_time)
self.report.start_workunit(self._main_root_workunit)
# Log reporting details.
url = self.run_info.get_info('report_url')
if url:
self.log(Report.INFO, 'See a report at: {}'.format(url))
else:
self.log(Report.INFO, '(To run a reporting server: ./pants server)') | python | {
"resource": ""
} |
q27735 | RunTracker.log | train | def log(self, level, *msg_elements):
"""Log a message against the current workunit."""
self.report.log(self._threadlocal.current_workunit, level, *msg_elements) | python | {
"resource": ""
} |
q27736 | RunTracker.post_stats | train | def post_stats(cls, stats_url, stats, timeout=2, auth_provider=None):
"""POST stats to the given url.
:return: True if upload was successful, False otherwise.
"""
def error(msg):
# Report aleady closed, so just print error.
print('WARNING: Failed to upload stats to {}. due to {}'.format(stats_url, msg),
file=sys.stderr)
return False
# TODO(benjy): The upload protocol currently requires separate top-level params, with JSON
# values. Probably better for there to be one top-level JSON value, namely json.dumps(stats).
# But this will first require changing the upload receiver at every shop that uses this.
params = {k: cls._json_dump_options(v) for (k, v) in stats.items()}
cookies = Cookies.global_instance()
auth_provider = auth_provider or '<provider>'
# We can't simply let requests handle redirects, as we only allow them for specific codes:
# 307 and 308 indicate that the redirected request must use the same method, POST in this case.
# So they indicate a true redirect of the POST itself, and we allow them.
# The other redirect codes either must, or in practice do, cause the user agent to switch the
# method to GET. So when they are encountered on a POST, it indicates an auth problem (a
# redirection to a login page).
def do_post(url, num_redirects_allowed):
if num_redirects_allowed < 0:
return error('too many redirects.')
r = requests.post(url, data=params, timeout=timeout,
cookies=cookies.get_cookie_jar(), allow_redirects=False)
if r.status_code in {307, 308}:
return do_post(r.headers['location'], num_redirects_allowed - 1)
elif r.status_code != 200:
error('HTTP error code: {}. Reason: {}.'.format(r.status_code, r.reason))
if 300 <= r.status_code < 400 or r.status_code == 401:
print('Use `path/to/pants login --to={}` to authenticate against the stats '
'upload service.'.format(auth_provider), file=sys.stderr)
return False
return True
try:
return do_post(stats_url, num_redirects_allowed=6)
except Exception as e: # Broad catch - we don't want to fail the build over upload errors.
return error('Error: {}'.format(e)) | python | {
"resource": ""
} |
q27737 | RunTracker.write_stats_to_json | train | def write_stats_to_json(cls, file_name, stats):
"""Write stats to a local json file."""
params = cls._json_dump_options(stats)
mode = 'w' if PY3 else 'wb'
try:
safe_file_dump(file_name, params, mode=mode)
except Exception as e: # Broad catch - we don't want to fail in stats related failure.
print('WARNING: Failed to write stats to {} due to Error: {}'.format(file_name, e),
file=sys.stderr) | python | {
"resource": ""
} |
q27738 | RunTracker.run_information | train | def run_information(self):
"""Basic information about this run."""
run_information = self.run_info.get_as_dict()
target_data = run_information.get('target_data', None)
if target_data:
run_information['target_data'] = ast.literal_eval(target_data)
return run_information | python | {
"resource": ""
} |
q27739 | RunTracker.store_stats | train | def store_stats(self):
"""Store stats about this run in local and optionally remote stats dbs."""
stats = self._stats()
# Write stats to user-defined json file.
stats_json_file_name = self.get_options().stats_local_json_file
if stats_json_file_name:
self.write_stats_to_json(stats_json_file_name, stats)
# Upload to remote stats db.
stats_upload_urls = copy.copy(self.get_options().stats_upload_urls)
timeout = self.get_options().stats_upload_timeout
for stats_url, auth_provider in stats_upload_urls.items():
self.post_stats(stats_url, stats, timeout=timeout, auth_provider=auth_provider) | python | {
"resource": ""
} |
q27740 | RunTracker.end | train | def end(self):
"""This pants run is over, so stop tracking it.
Note: If end() has been called once, subsequent calls are no-ops.
:return: PANTS_SUCCEEDED_EXIT_CODE or PANTS_FAILED_EXIT_CODE
"""
if self._end_memoized_result is not None:
return self._end_memoized_result
if self._background_worker_pool:
if self._aborted:
self.log(Report.INFO, "Aborting background workers.")
self._background_worker_pool.abort()
else:
self.log(Report.INFO, "Waiting for background workers to finish.")
self._background_worker_pool.shutdown()
self.end_workunit(self._background_root_workunit)
self.shutdown_worker_pool()
# Run a dummy work unit to write out one last timestamp.
with self.new_workunit("complete"):
pass
self.end_workunit(self._main_root_workunit)
outcome = self._main_root_workunit.outcome()
if self._background_root_workunit:
outcome = min(outcome, self._background_root_workunit.outcome())
outcome_str = WorkUnit.outcome_string(outcome)
log_level = RunTracker._log_levels[outcome]
self.log(log_level, outcome_str)
if self.run_info.get_info('outcome') is None:
# If the goal is clean-all then the run info dir no longer exists, so ignore that error.
self.run_info.add_info('outcome', outcome_str, ignore_errors=True)
if self._target_to_data:
self.run_info.add_info('target_data', self._target_to_data)
self.report.close()
self.store_stats()
run_failed = outcome in [WorkUnit.FAILURE, WorkUnit.ABORTED]
result = PANTS_FAILED_EXIT_CODE if run_failed else PANTS_SUCCEEDED_EXIT_CODE
self._end_memoized_result = result
return self._end_memoized_result | python | {
"resource": ""
} |
q27741 | RunTracker._create_dict_with_nested_keys_and_val | train | def _create_dict_with_nested_keys_and_val(cls, keys, value):
"""Recursively constructs a nested dictionary with the keys pointing to the value.
For example:
Given the list of keys ['a', 'b', 'c', 'd'] and a primitive
value 'hello world', the method will produce the nested dictionary
{'a': {'b': {'c': {'d': 'hello world'}}}}. The number of keys in the list
defines the depth of the nested dict. If the list of keys is ['a'] and
the value is 'hello world', then the result would be {'a': 'hello world'}.
:param list of string keys: A list of keys to be nested as a dictionary.
:param primitive value: The value of the information being stored.
:return: dict of nested keys leading to the value.
"""
if len(keys) > 1:
new_keys = keys[:-1]
new_val = {keys[-1]: value}
return cls._create_dict_with_nested_keys_and_val(new_keys, new_val)
elif len(keys) == 1:
return {keys[0]: value}
else:
raise ValueError('Keys must contain at least one key.') | python | {
"resource": ""
} |
q27742 | RunTracker._merge_list_of_keys_into_dict | train | def _merge_list_of_keys_into_dict(cls, data, keys, value, index=0):
"""Recursively merge list of keys that points to the given value into data.
Will override a primitive value with another primitive value, but will not
override a primitive with a dictionary.
For example:
Given the dictionary {'a': {'b': {'c': 1}}, {'x': {'y': 100}}}, the keys
['a', 'b', 'd'] and the value 2, the updated dictionary would be
{'a': {'b': {'c': 1, 'd': 2}}, {'x': {'y': 100}}}. Given this newly updated
dictionary, the keys ['a', 'x', 'y', 'z'] and the value 200, the method would raise
an error because we would be trying to override the primitive value 100 with the
dict {'z': 200}.
:param dict data: Dictionary to be updated.
:param list of string keys: The keys that point to where the value should be stored.
Will recursively find the correct place to store in the nested dicts.
:param primitive value: The value of the information being stored.
:param int index: The index into the list of keys (starting from the beginning).
"""
if len(keys) == 0 or index < 0 or index >= len(keys):
raise ValueError('Keys must contain at least one key and index must be'
'an integer greater than 0 and less than the number of keys.')
if len(keys) < 2 or not data:
new_data_to_add = cls._create_dict_with_nested_keys_and_val(keys, value)
data.update(new_data_to_add)
this_keys_contents = data.get(keys[index])
if this_keys_contents:
if isinstance(this_keys_contents, dict):
cls._merge_list_of_keys_into_dict(this_keys_contents, keys, value, index + 1)
elif index < len(keys) - 1:
raise ValueError('Keys must point to a dictionary.')
else:
data[keys[index]] = value
else:
new_keys = keys[index:]
new_data_to_add = cls._create_dict_with_nested_keys_and_val(new_keys, value)
data.update(new_data_to_add) | python | {
"resource": ""
} |
q27743 | RunTracker.report_target_info | train | def report_target_info(self, scope, target, keys, val):
"""Add target information to run_info under target_data.
Will Recursively construct a nested dict with the keys provided.
Primitive values can be overwritten with other primitive values,
but a primitive value cannot be overwritten with a dictionary.
For example:
Where the dictionary being updated is {'a': {'b': 16}}, reporting the value
15 with the key list ['a', 'b'] will result in {'a': {'b':15}};
but reporting the value 20 with the key list ['a', 'b', 'c'] will throw
an error.
:param string scope: The scope for which we are reporting the information.
:param target: The target for which we want to store information.
:type target: :class:`pants.build_graph.target.Target`
:param list of string keys: The keys that will be recursively
nested and pointing to the information being stored.
:param primitive val: The value of the information being stored.
:API: public
"""
new_key_list = [target.address.spec, scope]
new_key_list += keys
self._merge_list_of_keys_into_dict(self._target_to_data, new_key_list, val, 0) | python | {
"resource": ""
} |
q27744 | Executor.runner | train | def runner(self, classpath, main, jvm_options=None, args=None, cwd=None):
"""Returns an `Executor.Runner` for the given java command."""
return self._runner(*self._scrub_args(classpath, main, jvm_options, args, cwd=cwd)) | python | {
"resource": ""
} |
q27745 | Executor.execute | train | def execute(self, classpath, main, jvm_options=None, args=None, stdout=None, stderr=None,
cwd=None):
"""Launches the java program defined by the classpath and main.
:param list classpath: the classpath for the java program
:param string main: the fully qualified class name of the java program's entry point
:param list jvm_options: an optional sequence of options for the underlying jvm
:param list args: an optional sequence of args to pass to the java program
:param string cwd: optionally set the working directory
Returns the exit code of the java program.
Raises Executor.Error if there was a problem launching java itself.
"""
runner = self.runner(classpath=classpath, main=main, jvm_options=jvm_options, args=args,
cwd=cwd)
return runner.run(stdout=stdout, stderr=stderr) | python | {
"resource": ""
} |
q27746 | SubprocessExecutor.spawn | train | def spawn(self, classpath, main, jvm_options=None, args=None, cwd=None, **subprocess_args):
"""Spawns the java program passing any extra subprocess kwargs on to subprocess.Popen.
Returns the Popen process object handle to the spawned java program subprocess.
:API: public
:raises: :class:`Executor.Error` if there is a problem spawning the subprocess.
"""
cwd = cwd or os.getcwd()
cmd = self._create_command(*self._scrub_args(classpath, main, jvm_options, args, cwd=cwd))
return self._spawn(cmd, cwd, **subprocess_args) | python | {
"resource": ""
} |
q27747 | NailgunClientSession._set_exit_timeout | train | def _set_exit_timeout(self, timeout, reason):
"""Set a timeout for the remainder of the session, along with an exception to raise.
which is implemented by NailgunProtocol.
This method may be called by a signal handler to set a timeout for the remainder of the
session. If the session completes before the timeout does, the exception in `reason` is
raised. Otherwise, `NailgunProtocol.ProcessStreamTimeout` is raised.
:param float timeout: The length of time to time out, in seconds.
:param Exception reason: The exception to raise if the session completes before the timeout
occurs.
"""
self._exit_timeout_start_time = time.time()
self._exit_timeout = timeout
self._exit_reason = reason | python | {
"resource": ""
} |
q27748 | NailgunClientSession.maybe_timeout_options | train | def maybe_timeout_options(self):
"""Implements the NailgunProtocol.TimeoutProvider interface."""
if self._exit_timeout_start_time:
return NailgunProtocol.TimeoutOptions(self._exit_timeout_start_time, self._exit_timeout)
else:
return None | python | {
"resource": ""
} |
q27749 | NailgunClientSession._process_session | train | def _process_session(self):
"""Process the outputs of the nailgun session.
:raises: :class:`NailgunProtocol.ProcessStreamTimeout` if a timeout set from a signal handler
with .set_exit_timeout() completes.
:raises: :class:`Exception` if the session completes before the timeout, the `reason` argument
to .set_exit_timeout() will be raised."""
try:
for chunk_type, payload in self.iter_chunks(self._sock, return_bytes=True,
timeout_object=self):
# TODO(#6579): assert that we have at this point received all the chunk types in
# ChunkType.REQUEST_TYPES, then require PID and PGRP (exactly once?), and then allow any of
# ChunkType.EXECUTION_TYPES.
if chunk_type == ChunkType.STDOUT:
self._write_flush(self._stdout, payload)
elif chunk_type == ChunkType.STDERR:
self._write_flush(self._stderr, payload)
elif chunk_type == ChunkType.EXIT:
self._write_flush(self._stdout)
self._write_flush(self._stderr)
return int(payload)
elif chunk_type == ChunkType.PID:
self.remote_pid = int(payload)
self.remote_process_cmdline = psutil.Process(self.remote_pid).cmdline()
if self._remote_pid_callback:
self._remote_pid_callback(self.remote_pid)
elif chunk_type == ChunkType.PGRP:
self.remote_pgrp = int(payload)
if self._remote_pgrp_callback:
self._remote_pgrp_callback(self.remote_pgrp)
elif chunk_type == ChunkType.START_READING_INPUT:
self._maybe_start_input_writer()
else:
raise self.ProtocolError('received unexpected chunk {} -> {}'.format(chunk_type, payload))
except NailgunProtocol.ProcessStreamTimeout as e:
assert(self.remote_pid is not None)
# NB: We overwrite the process title in the pantsd-runner process, which causes it to have an
# argv with lots of empty spaces for some reason. We filter those out and pretty-print the
# rest here.
filtered_remote_cmdline = safe_shlex_join(
arg for arg in self.remote_process_cmdline if arg != '')
logger.warning(
"timed out when attempting to gracefully shut down the remote client executing \"{}\". "
"sending SIGKILL to the remote client at pid: {}. message: {}"
.format(filtered_remote_cmdline, self.remote_pid, e))
finally:
# Bad chunk types received from the server can throw NailgunProtocol.ProtocolError in
# NailgunProtocol.iter_chunks(). This ensures the NailgunStreamWriter is always stopped.
self._maybe_stop_input_writer()
# If an asynchronous error was set at any point (such as in a signal handler), we want to make
# sure we clean up the remote process before exiting with error.
if self._exit_reason:
if self.remote_pgrp:
safe_kill(self.remote_pgrp, signal.SIGKILL)
if self.remote_pid:
safe_kill(self.remote_pid, signal.SIGKILL)
raise self._exit_reason | python | {
"resource": ""
} |
q27750 | NailgunClient.try_connect | train | def try_connect(self):
"""Creates a socket, connects it to the nailgun and returns the connected socket.
:returns: a connected `socket.socket`.
:raises: `NailgunClient.NailgunConnectionError` on failure to connect.
"""
sock = RecvBufferedSocket(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
try:
sock.connect(self._address)
except (socket.error, socket.gaierror) as e:
logger.debug('Encountered socket exception {!r} when attempting connect to nailgun'.format(e))
sock.close()
raise self.NailgunConnectionError(
address=self._address_string,
pid=self._maybe_last_pid(),
pgrp=self._maybe_last_pgrp(),
wrapped_exc=e,
)
else:
return sock | python | {
"resource": ""
} |
q27751 | NailgunClient.execute | train | def execute(self, main_class, cwd=None, *args, **environment):
"""Executes the given main_class with any supplied args in the given environment.
:param string main_class: the fully qualified class name of the main entrypoint
:param string cwd: Set the working directory for this command
:param list args: any arguments to pass to the main entrypoint
:param dict environment: an env mapping made available to native nails via the nail context
:returns: the exit code of the main_class.
:raises: :class:`NailgunClient.NailgunError` if there was an error during execution.
"""
environment = dict(**environment)
environment.update(self.ENV_DEFAULTS)
cwd = cwd or self._workdir
sock = self.try_connect()
# TODO(#6579): NailgunClientSession currently requires callbacks because it can't depend on
# having received these chunks, so we need to avoid clobbering these fields until we initialize
# a new session.
self._current_remote_pid = None
self._current_remote_pgrp = None
self._session = NailgunClientSession(
sock=sock,
in_file=self._stdin,
out_file=self._stdout,
err_file=self._stderr,
exit_on_broken_pipe=self._exit_on_broken_pipe,
remote_pid_callback=self._receive_remote_pid,
remote_pgrp_callback=self._receive_remote_pgrp)
try:
return self._session.execute(cwd, main_class, *args, **environment)
except (socket.error, NailgunProtocol.ProtocolError) as e:
raise self.NailgunError(
address=self._address_string,
pid=self._maybe_last_pid(),
pgrp=self._maybe_last_pgrp(),
wrapped_exc=e,
)
finally:
sock.close()
self._session = None | python | {
"resource": ""
} |
q27752 | GoFetch._fetch_pkg | train | def _fetch_pkg(self, gopath, pkg, rev):
"""Fetch the package and setup symlinks."""
fetcher = self._get_fetcher(pkg)
root = fetcher.root()
root_dir = os.path.join(self.workdir, 'fetches', root, rev)
# Only fetch each remote root once.
if not os.path.exists(root_dir):
with temporary_dir() as tmp_fetch_root:
with self.context.new_workunit('fetch {}'.format(pkg)):
fetcher.fetch(dest=tmp_fetch_root, rev=rev)
safe_mkdir(root_dir)
for path in os.listdir(tmp_fetch_root):
shutil.move(os.path.join(tmp_fetch_root, path), os.path.join(root_dir, path))
# TODO(John Sirois): Circle back and get get rid of this symlink tree.
# GoWorkspaceTask will further symlink a single package from the tree below into a
# target's workspace when it could just be linking from the fetch_dir. The only thing
# standing in the way is a determination of what we want to artifact cache. If we don't
# want to cache fetched zips, linking straight from the fetch_dir works simply. Otherwise
# thought needs to be applied to using the artifact cache directly or synthesizing a
# canonical owner target for the fetched files that 'child' targets (subpackages) can
# depend on and share the fetch from.
dest_dir = os.path.join(gopath, 'src', root)
# We may have been `invalidate`d and not `clean-all`ed so we need a new empty symlink
# chroot to avoid collision; thus `clean=True`.
safe_mkdir(dest_dir, clean=True)
for path in os.listdir(root_dir):
os.symlink(os.path.join(root_dir, path), os.path.join(dest_dir, path)) | python | {
"resource": ""
} |
q27753 | GoFetch._resolve | train | def _resolve(self, dependent_remote_lib, address, pkg, rev, implicit_ok):
"""Resolves the GoRemoteLibrary at `address` defining the given `pkg`.
If `implicit_ok` is True, then a GoRemoteLibrary to own `pkg` is always synthesized if it does
not already exist; otherwise the address must already exist in the build graph (a BUILD file
must exist on disk that owns the given `pkg` and declares a `rev` for it).
:param dependent_remote_lib: The remote library that depends on the remote `pkg`.
:type: :class:`pants.contrib.go.targets.go_remote_library.GoRemoteLibrary`
:param address: The address of the remote library that should own `pkg`.
:type: :class:`pants.base.Address`
:param string pkg: The remote package path whose owning target needs to be resolved.
:param string rev: The revision of the package. None defaults to `master`.
:param bool implicit_ok: `False` if the given `address` must be defined in a BUILD file on disk;
otherwise a remote library to own `pkg` will always be created and
returned.
:returns: The resulting resolved remote library after injecting it in the build graph.
:rtype: :class:`pants.contrib.go.targets.go_remote_library.GoRemoteLibrary`
:raises: :class:`GoFetch.UndeclaredRemoteLibError`: If no BUILD file exists for the remote root
`pkg` lives in.
"""
try:
self.context.build_graph.inject_address_closure(address)
except AddressLookupError:
if implicit_ok:
self.context.add_new_target(address=address,
target_base=dependent_remote_lib.target_base,
target_type=GoRemoteLibrary,
pkg=pkg,
rev=rev)
else:
raise self.UndeclaredRemoteLibError(address)
return self.context.build_graph.get_target(address) | python | {
"resource": ""
} |
q27754 | GoFetch._get_remote_import_paths | train | def _get_remote_import_paths(self, pkg, gopath=None):
"""Returns the remote import paths declared by the given remote Go `pkg`.
NB: This only includes production code imports, no test code imports.
"""
import_listing = self.import_oracle.list_imports(pkg, gopath=gopath)
return [imp for imp in import_listing.imports
if (not self.import_oracle.is_go_internal_import(imp) and
# We assume relative imports are local to the package and skip attempts to
# recursively resolve them.
not self._is_relative(imp))] | python | {
"resource": ""
} |
q27755 | GoFetch._write_import_root_map_file | train | def _write_import_root_map_file(path, import_root_map):
"""Writes a file mapping import paths to roots."""
with safe_concurrent_creation(path) as safe_path:
with open(safe_path, 'w') as fp:
for import_path, root in sorted(import_root_map.items()):
fp.write('{}\t{}\n'.format(import_path, root)) | python | {
"resource": ""
} |
q27756 | declares_namespace_package | train | def declares_namespace_package(filename):
"""Given a filename, walk its ast and determine if it declares a namespace package."""
import ast
with open(filename) as fp:
init_py = ast.parse(fp.read(), filename)
calls = [node for node in ast.walk(init_py) if isinstance(node, ast.Call)]
for call in calls:
if len(call.args) != 1:
continue
if isinstance(call.func, ast.Attribute) and call.func.attr != 'declare_namespace':
continue
if isinstance(call.func, ast.Name) and call.func.id != 'declare_namespace':
continue
if isinstance(call.args[0], ast.Name) and call.args[0].id == '__name__':
return True
return False | python | {
"resource": ""
} |
q27757 | TargetAncestorIterator.iter_target_siblings_and_ancestors | train | def iter_target_siblings_and_ancestors(self, target):
"""Produces an iterator over a target's siblings and ancestor lineage.
:returns: A target iterator yielding the target and its siblings and then it ancestors from
nearest to furthest removed.
"""
def iter_targets_in_spec_path(spec_path):
try:
siblings = SiblingAddresses(spec_path)
for address in self._build_graph.inject_specs_closure([siblings]):
yield self._build_graph.get_target(address)
except AddressLookupError:
# A spec path may not have any addresses registered under it and that's ok.
# For example:
# a:a
# a/b/c:c
#
# Here a/b contains no addresses.
pass
def iter_siblings_and_ancestors(spec_path):
for sibling in iter_targets_in_spec_path(spec_path):
yield sibling
parent_spec_path = os.path.dirname(spec_path)
if parent_spec_path != spec_path:
for parent in iter_siblings_and_ancestors(parent_spec_path):
yield parent
for target in iter_siblings_and_ancestors(target.address.spec_path):
yield target | python | {
"resource": ""
} |
q27758 | ExportedTargetDependencyCalculator._walk | train | def _walk(self, target, visitor):
"""Walks the dependency graph for the given target.
:param target: The target to start the walk from.
:param visitor: A function that takes a target and returns `True` if its dependencies should
also be visited.
"""
visited = set()
def walk(current):
if current not in visited:
visited.add(current)
keep_going = visitor(current)
if keep_going:
for dependency in self.dependencies(current):
walk(dependency)
walk(target) | python | {
"resource": ""
} |
q27759 | ExportedTargetDependencyCalculator._closure | train | def _closure(self, target):
"""Return the target closure as defined by this dependency calculator's definition of a walk."""
closure = set()
def collect(current):
closure.add(current)
return True
self._walk(target, collect)
return closure | python | {
"resource": ""
} |
q27760 | ExportedTargetDependencyCalculator.reduced_dependencies | train | def reduced_dependencies(self, exported_target):
"""Calculates the reduced transitive dependencies for an exported target.
The reduced set of dependencies will be just those transitive dependencies "owned" by
the `exported_target`.
A target is considered "owned" if:
1. It's "3rdparty" and "directly reachable" from `exported_target` by at least 1 path.
2. It's not "3rdparty" and not "directly reachable" by any of `exported_target`'s "3rdparty"
dependencies.
Here "3rdparty" refers to targets identified as either `is_third_party` or `is_exported`.
And in this context "directly reachable" means the target can be reached by following a series
of dependency links from the `exported_target`, never crossing another exported target and
staying within the `exported_target` address space. It's the latter restriction that allows for
unambiguous ownership of exportable targets and mirrors the BUILD file convention of targets
only being able to own sources in their filesystem subtree. The single ambiguous case that can
arise is when there is more than one exported target in the same BUILD file family that can
"directly reach" a target in its address space.
:raises: `UnExportedError` if the given `exported_target` is not, in-fact, exported.
:raises: `NoOwnerError` if a transitive dependency is found with no proper owning exported
target.
:raises: `AmbiguousOwnerError` if there is more than one viable exported owner target for a
given transitive dependency.
"""
# The strategy adopted requires 3 passes:
# 1.) Walk the exported target to collect provisional owned exportable targets, but _not_
# 3rdparty since these may be introduced by exported subgraphs we discover in later steps!
# 2.) Determine the owner of each target collected in 1 by walking the ancestor chain to find
# the closest exported target. The ancestor chain is just all targets whose spec path is
# a prefix of the descendant. In other words, all targets in descendant's BUILD file family
# (its siblings), all targets in its parent directory BUILD file family, and so on.
# 3.) Finally walk the exported target once more, replacing each visited dependency with its
# owner.
if not self.is_exported(exported_target):
raise self.UnExportedError('Cannot calculate reduced dependencies for a non-exported '
'target, given: {}'.format(exported_target))
owner_by_owned_python_target = OrderedDict()
# Only check ownership on the original target graph.
original_exported_target = exported_target.derived_from
def collect_potentially_owned_python_targets(current):
if current.is_original:
owner_by_owned_python_target[current] = None # We can't know the owner in the 1st pass.
return (current == exported_target) or not self.is_exported(current)
self._walk(original_exported_target, collect_potentially_owned_python_targets)
for owned in owner_by_owned_python_target:
if self.requires_export(owned) and not self.is_exported(owned):
potential_owners = set()
for potential_owner in self._ancestor_iterator.iter_target_siblings_and_ancestors(owned):
if self.is_exported(potential_owner) and owned in self._closure(potential_owner):
potential_owners.add(potential_owner)
if not potential_owners:
raise self.NoOwnerError('No exported target owner found for {}'.format(owned))
owner = potential_owners.pop()
if potential_owners:
ambiguous_owners = [o for o in potential_owners
if o.address.spec_path == owner.address.spec_path]
if ambiguous_owners:
raise self.AmbiguousOwnerError('Owners for {} are ambiguous. Found {} and '
'{} others: {}'.format(owned,
owner,
len(ambiguous_owners),
ambiguous_owners))
owner_by_owned_python_target[owned] = owner
reduced_dependencies = OrderedSet()
def collect_reduced_dependencies(current):
if current == exported_target:
return True
else:
# The provider will be one of:
# 1. `None`, ie: a 3rdparty requirement we should collect.
# 2. `exported_target`, ie: a local exportable target owned by `exported_target` that we
# should collect
# 3. Or else a local exportable target owned by some other exported target in which case
# we should collect the exported owner.
owner = owner_by_owned_python_target.get(current)
if owner is None or owner == exported_target:
reduced_dependencies.add(current)
else:
reduced_dependencies.add(owner)
return owner == exported_target or not self.requires_export(current)
self._walk(exported_target, collect_reduced_dependencies)
return OrderedSet(d for d in reduced_dependencies if d.is_original) | python | {
"resource": ""
} |
q27761 | SetupPy.iter_entry_points | train | def iter_entry_points(cls, target):
"""Yields the name, entry_point pairs of binary targets in this PythonArtifact."""
for name, binary_target in target.provided_binaries.items():
concrete_target = binary_target
if not isinstance(concrete_target, PythonBinary) or concrete_target.entry_point is None:
raise TargetDefinitionException(target,
'Cannot add a binary to a PythonArtifact if it does not contain an entry_point.')
yield name, concrete_target.entry_point | python | {
"resource": ""
} |
q27762 | SetupPy.nearest_subpackage | train | def nearest_subpackage(cls, package, all_packages):
"""Given a package, find its nearest parent in all_packages."""
def shared_prefix(candidate):
zipped = zip(package.split('.'), candidate.split('.'))
matching = itertools.takewhile(lambda pair: pair[0] == pair[1], zipped)
return [pair[0] for pair in matching]
shared_packages = [_f for _f in map(shared_prefix, all_packages) if _f]
return '.'.join(max(shared_packages, key=len)) if shared_packages else package | python | {
"resource": ""
} |
q27763 | SetupPy.find_packages | train | def find_packages(self, root_target, chroot):
"""Detect packages, namespace packages and resources from an existing chroot.
:returns: a tuple of:
set(packages)
set(namespace_packages)
map(package => set(files))
"""
base = os.path.join(chroot.path(), self.SOURCE_ROOT)
packages, namespace_packages = set(), set()
resources = defaultdict(set)
def iter_files():
for root, _, files in safe_walk(base):
module = os.path.relpath(root, base).replace(os.path.sep, '.')
for filename in files:
yield module, filename, os.path.join(root, filename)
# establish packages, namespace packages in first pass
inits_to_check = {}
for module, filename, real_filename in iter_files():
if filename != '__init__.py':
continue
packages.add(module)
inits_to_check[real_filename] = module
namespace_packages = {inits_to_check[init]
for init in self.filter_namespace_packages(root_target,
inits_to_check.keys())}
# second pass establishes non-source content (resources)
for module, filename, real_filename in iter_files():
if filename.endswith('.py'):
if module not in packages:
# TODO(wickman) Consider changing this to a full-on error as it could indicate bad BUILD
# hygiene.
# raise cls.UndefinedSource('{} is source but does not belong to a package!'
# .format(filename))
self.context.log.warn('{} is source but does not belong to a package.'
.format(real_filename))
else:
continue
submodule = self.nearest_subpackage(module, packages)
if submodule == module:
resources[submodule].add(filename)
else:
assert module.startswith(submodule + '.')
relative_module = module[len(submodule) + 1:]
relative_filename = os.path.join(relative_module.replace('.', os.path.sep), filename)
resources[submodule].add(relative_filename)
return packages, namespace_packages, resources | python | {
"resource": ""
} |
q27764 | SetupPy.write_contents | train | def write_contents(self, root_target, reduced_dependencies, chroot):
"""Write contents of the target."""
def write_target_source(target, src):
chroot.copy(os.path.join(get_buildroot(), target.target_base, src),
os.path.join(self.SOURCE_ROOT, src))
# check parent __init__.pys to see if they also need to be copied. this is to allow
# us to determine if they belong to regular packages or namespace packages.
while True:
src = os.path.dirname(src)
if not src:
# Do not allow the repository root to leak (i.e. '.' should not be a package in setup.py)
break
if os.path.exists(os.path.join(target.target_base, src, '__init__.py')):
chroot.copy(os.path.join(target.target_base, src, '__init__.py'),
os.path.join(self.SOURCE_ROOT, src, '__init__.py'))
def write_target(target):
# We want to operate on the final sources target owns, so we potentially replace it with
# the target derived from it (by a codegen task).
subject = self.derived_by_original.get(target, target)
for rel_source in subject.sources_relative_to_buildroot():
abs_source_path = os.path.join(get_buildroot(), rel_source)
abs_source_root_path = os.path.join(get_buildroot(), subject.target_base)
source_root_relative_path = os.path.relpath(abs_source_path, abs_source_root_path)
write_target_source(subject, source_root_relative_path)
write_target(root_target)
for dependency in reduced_dependencies:
if self.is_python_target(dependency) and not dependency.provides:
write_target(dependency)
elif self.is_resources_target(dependency):
write_target(dependency) | python | {
"resource": ""
} |
q27765 | SetupPy.write_setup | train | def write_setup(self, root_target, reduced_dependencies, chroot):
"""Write the setup.py of a target.
Must be run after writing the contents to the chroot.
"""
setup_keywords = root_target.provides.setup_py_keywords.copy()
package_dir = {'': self.SOURCE_ROOT}
packages, namespace_packages, resources = self.find_packages(root_target, chroot)
if namespace_packages:
setup_keywords['namespace_packages'] = list(sorted(namespace_packages))
if packages:
setup_keywords.update(
package_dir=package_dir,
packages=list(sorted(packages)),
package_data=dict((str(package), list(map(str, rs)))
for (package, rs) in resources.items()))
setup_keywords['install_requires'] = list(self.install_requires(reduced_dependencies))
for binary_name, entry_point in self.iter_entry_points(root_target):
if 'entry_points' not in setup_keywords:
setup_keywords['entry_points'] = {}
if 'console_scripts' not in setup_keywords['entry_points']:
setup_keywords['entry_points']['console_scripts'] = []
setup_keywords['entry_points']['console_scripts'].append(
'{} = {}'.format(binary_name, entry_point))
setup_py = self._setup_boilerplate().format(setup_dict=distutils_repr(setup_keywords),
setup_target=root_target.address.reference())
chroot.write(ensure_binary(setup_py), 'setup.py')
# Make sure that `setup.py` is included.
chroot.write('include *.py', 'MANIFEST.in', mode='w') | python | {
"resource": ""
} |
q27766 | ArtifactCacheStats.get_all | train | def get_all(self):
"""Returns the cache stats as a list of dicts."""
ret = []
for cache_name, stat in self.stats_per_cache.items():
ret.append({
'cache_name': cache_name,
'num_hits': len(stat.hit_targets),
'num_misses': len(stat.miss_targets),
'hits': stat.hit_targets,
'misses': stat.miss_targets
})
return ret | python | {
"resource": ""
} |
q27767 | ScalaPlatform._tool_classpath | train | def _tool_classpath(self, tool, products, scheduler):
"""Return the proper classpath based on products and scala version."""
classpath = self.tool_classpath_from_products(products,
self.versioned_tool_name(tool, self.version),
scope=self.options_scope)
classpath = tuple(fast_relpath(c, get_buildroot()) for c in classpath)
return self._memoized_scalac_classpath(classpath, scheduler) | python | {
"resource": ""
} |
q27768 | ScalaPlatform.style_classpath | train | def style_classpath(self, products, scheduler):
"""Returns classpath as paths for scalastyle."""
classpath_entries = self._tool_classpath('scalastyle', products, scheduler)
return [classpath_entry.path for classpath_entry in classpath_entries] | python | {
"resource": ""
} |
q27769 | ScalaPlatform.suffix_version | train | def suffix_version(self, name):
"""Appends the platform version to the given artifact name.
Also validates that the name doesn't already end with the version.
"""
if self.version == 'custom':
suffix = self.get_options().suffix_version
if suffix:
return '{0}_{1}'.format(name, suffix)
else:
raise RuntimeError('Suffix version must be specified if using a custom scala version. '
'Suffix version is used for bootstrapping jars. If a custom '
'scala version is not specified, then the version specified in '
'--scala-suffix-version is used. For example for Scala '
'2.10.7 you would use the suffix version "2.10".')
elif name.endswith(self.version):
raise ValueError('The name "{0}" should not be suffixed with the scala platform version '
'({1}): it will be added automatically.'.format(name, self.version))
return '{0}_{1}'.format(name, self.version) | python | {
"resource": ""
} |
q27770 | BashCompletion._get_all_cmd_line_scopes | train | def _get_all_cmd_line_scopes():
"""Return all scopes that may be explicitly specified on the cmd line, in no particular order.
Note that this includes only task scope, and not, say, subsystem scopes,
as those aren't specifiable on the cmd line.
"""
all_scopes ={GLOBAL_SCOPE}
for goal in Goal.all():
for task in goal.task_types():
all_scopes.add(task.get_scope_info().scope)
return all_scopes | python | {
"resource": ""
} |
q27771 | BashCompletion.get_autocomplete_options_by_scope | train | def get_autocomplete_options_by_scope(self):
"""Return all cmd-line options.
These are of two types: scoped and unscoped. Scoped options are explicitly scoped
(e.g., --goal-task-foo-bar) and may appear anywhere on the cmd line. Unscoped options
may only appear in the appropriate cmd line scope (e.g., ./pants goal.task --foo-bar).
Technically, any scoped option can appear anywhere, but in practice, having so many
autocomplete options is more confusing than useful. So, as a heuristic:
1. In global scope we only autocomplete globally-registered options.
2. In a goal scope we only autocomplete options registered by any task in that goal.
3. In a task scope we only autocomplete options registered by that task.
:return: A map of scope -> options to complete at that scope.
"""
autocomplete_options_by_scope = defaultdict(set)
def get_from_parser(parser):
oschi = HelpInfoExtracter.get_option_scope_help_info_from_parser(parser)
# We ignore advanced options, as they aren't intended to be used on the cmd line.
option_help_infos = oschi.basic + oschi.recursive
for ohi in option_help_infos:
autocomplete_options_by_scope[oschi.scope].update(ohi.unscoped_cmd_line_args)
autocomplete_options_by_scope[oschi.scope].update(ohi.scoped_cmd_line_args)
# Autocomplete to this option in the enclosing goal scope, but exclude options registered
# on us, but not by us, e.g., recursive options (which are registered by
# GlobalOptionsRegisterer).
# We exclude those because they are already registered on the goal scope anyway
# (via the recursion) and it would be confusing and superfluous to have autocompletion
# to both --goal-recursive-opt and --goal-task-recursive-opt in goal scope.
if issubclass(ohi.registering_class, TaskBase):
goal_scope = oschi.scope.partition('.')[0]
autocomplete_options_by_scope[goal_scope].update(ohi.scoped_cmd_line_args)
self.context.options.walk_parsers(get_from_parser)
return autocomplete_options_by_scope | python | {
"resource": ""
} |
q27772 | CppToolchain.register_tool | train | def register_tool(self, tool, name=None):
"""Check tool and see if it is installed in the local cpp toolchain.
All cpp tasks should request their tools using this method. Tools are validated
and cached for quick lookup.
:param string tool: Name or path of program tool, eg 'g++'
:param string name: Logical name of tool, eg 'compiler'. If not supplied defaults to basename
of `tool`
"""
name = name or os.path.basename(tool)
if name in self._validated_tools:
return self._validated_tools[name]
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
tool_path = which(tool)
if tool_path is None:
raise self.Error('Failed to locate {0}. Please install.'.format(tool))
self._validated_tools[name] = tool_path
return tool_path | python | {
"resource": ""
} |
q27773 | retry_on_exception | train | def retry_on_exception(func, max_retries, exception_types, backoff_func=lambda n: 0):
"""Retry a callable against a set of exceptions, optionally sleeping between retries.
:param callable func: The callable to retry.
:param int max_retries: The maximum number of times to attempt running the function.
:param tuple exception_types: The types of exceptions to catch for retry.
:param callable backoff_func: A callable that will be called with the current attempt count to
determine the amount of time to sleep between retries. E.g. a
max_retries=4 with a backoff_func=lambda n: n * n will result in
sleeps of [1, 4, 9] between retries. Defaults to no backoff.
"""
for i in range(0, max_retries):
if i:
time.sleep(backoff_func(i))
try:
return func()
except exception_types as e:
logger.debug('encountered exception on retry #{}: {!r}'.format(i, e))
if i == max_retries - 1:
raise | python | {
"resource": ""
} |
q27774 | RoundManager.get_dependencies | train | def get_dependencies(self):
"""Returns the set of data dependencies as producer infos corresponding to data requirements."""
producer_infos = set()
for product_type in self._dependencies:
producer_infos.update(self._get_producer_infos_by_product_type(product_type))
return producer_infos | python | {
"resource": ""
} |
q27775 | M2Coordinate.unversioned | train | def unversioned(cls, coord):
"""The coordinate without the version.
:param M2Coordinate coord: an M2Coordinate or JarDependency.
:return: the coordinate without the version.
:rtype: M2Coordinate
"""
coord = cls.create(coord)
if coord.rev is None:
return coord
return M2Coordinate(org=coord.org, name=coord.name, classifier=coord.classifier, ext=coord.ext) | python | {
"resource": ""
} |
q27776 | M2Coordinate.artifact_filename | train | def artifact_filename(self):
"""Returns the canonical maven-style filename for an artifact pointed at by this coordinate.
:API: public
:rtype: string
"""
def maybe_compenent(component):
return '-{}'.format(component) if component else ''
return '{org}-{name}{rev}{classifier}.{ext}'.format(org=self.org,
name=self.name,
rev=maybe_compenent(self.rev),
classifier=maybe_compenent(self.classifier),
ext=self.ext) | python | {
"resource": ""
} |
q27777 | M2Coordinate.copy | train | def copy(self, **replacements):
"""Returns a clone of this M2Coordinate with the given replacements kwargs overlaid."""
cls = type(self)
kwargs = {'org': self.org, 'name': self.name, 'ext': self.ext, 'classifier': self.classifier, 'rev': self.rev}
for key, val in replacements.items():
kwargs[key] = val
return cls(**kwargs) | python | {
"resource": ""
} |
q27778 | Reporter.handle_log | train | def handle_log(self, workunit, level, *msg_elements):
"""Handle a message logged by pants code.
level: One of the constants above.
Each element in msg_elements is either a message or a (message, detail) pair.
A subclass must show the message, but may choose to show the detail in some
sensible way (e.g., when the message text is clicked on in a browser).
This convenience implementation filters by log level and then delegates to do_handle_log.
"""
if level <= self.level_for_workunit(workunit, self.settings.log_level):
self.do_handle_log(workunit, level, *msg_elements) | python | {
"resource": ""
} |
q27779 | GoalExecutor.attempt | train | def attempt(self, explain):
"""Attempts to execute the goal's tasks in installed order.
:param bool explain: If ``True`` then the goal plan will be explained instead of being
executed.
"""
goal_workdir = os.path.join(self._context.options.for_global_scope().pants_workdir,
self._goal.name)
with self._context.new_workunit(name=self._goal.name, labels=[WorkUnitLabel.GOAL]):
for name, task_type in reversed(list(self._tasktypes_by_name.items())):
task_workdir = os.path.join(goal_workdir, name)
task = task_type(self._context, task_workdir)
log_config = WorkUnit.LogConfig(level=task.get_options().level, colors=task.get_options().colors)
with self._context.new_workunit(name=name, labels=[WorkUnitLabel.TASK], log_config=log_config):
if explain:
self._context.log.debug('Skipping execution of {} in explain mode'.format(name))
elif task.skip_execution:
self._context.log.info('Skipping {}'.format(name))
else:
task.execute()
if explain:
reversed_tasktypes_by_name = reversed(list(self._tasktypes_by_name.items()))
goal_to_task = ', '.join(
'{}->{}'.format(name, task_type.__name__) for name, task_type in reversed_tasktypes_by_name)
print('{goal} [{goal_to_task}]'.format(goal=self._goal.name, goal_to_task=goal_to_task)) | python | {
"resource": ""
} |
q27780 | TrailingWhitespace.build_exception_map | train | def build_exception_map(cls, tokens):
"""Generates a set of ranges where we accept trailing slashes, specifically within comments
and strings.
"""
exception_ranges = defaultdict(list)
for token in tokens:
token_type, _, token_start, token_end = token[0:4]
if token_type in (tokenize.COMMENT, tokenize.STRING):
if token_start[0] == token_end[0]:
exception_ranges[token_start[0]].append((token_start[1], token_end[1]))
else:
exception_ranges[token_start[0]].append((token_start[1], sys.maxsize))
for line in range(token_start[0] + 1, token_end[0]):
exception_ranges[line].append((0, sys.maxsize))
exception_ranges[token_end[0]].append((0, token_end[1]))
return exception_ranges | python | {
"resource": ""
} |
q27781 | GoWorkspaceTask.ensure_workspace | train | def ensure_workspace(self, target):
"""Ensures that an up-to-date Go workspace exists for the given target.
Creates any necessary symlinks to source files based on the target and its transitive
dependencies, and removes any symlinks which do not correspond to any needed dep.
"""
gopath = self.get_gopath(target)
for d in ('bin', 'pkg', 'src'):
safe_mkdir(os.path.join(gopath, d))
required_links = set()
for dep in target.closure():
if not isinstance(dep, GoTarget):
continue
if self.is_remote_lib(dep):
self._symlink_remote_lib(gopath, dep, required_links)
else:
self._symlink_local_src(gopath, dep, required_links)
self.remove_unused_links(os.path.join(gopath, 'src'), required_links) | python | {
"resource": ""
} |
q27782 | GoWorkspaceTask.remove_unused_links | train | def remove_unused_links(dirpath, required_links):
"""Recursively remove any links in dirpath which are not contained in required_links.
:param str dirpath: Absolute path of directory to search.
:param container required_links: Container of "in use" links which should not be removed,
where each link is an absolute path.
"""
for root, dirs, files in os.walk(dirpath):
for p in chain(dirs, files):
p = os.path.join(root, p)
if os.path.islink(p) and p not in required_links:
os.unlink(p) | python | {
"resource": ""
} |
q27783 | GoWorkspaceTask._symlink_local_src | train | def _symlink_local_src(self, gopath, go_local_src, required_links):
"""Creates symlinks from the given gopath to the source files of the given local package.
Also duplicates directory structure leading to source files of package within
gopath, in order to provide isolation to the package.
Adds the symlinks to the source files to required_links.
"""
source_list = [os.path.join(get_buildroot(), src)
for src in go_local_src.sources_relative_to_buildroot()]
rel_list = go_local_src.sources_relative_to_target_base()
source_iter = zip(source_list, rel_list)
return self._symlink_lib(gopath, go_local_src, source_iter, required_links) | python | {
"resource": ""
} |
q27784 | GoWorkspaceTask._symlink_remote_lib | train | def _symlink_remote_lib(self, gopath, go_remote_lib, required_links):
"""Creates symlinks from the given gopath to the source files of the given remote lib.
Also duplicates directory structure leading to source files of package within
gopath, in order to provide isolation to the package.
Adds the symlinks to the source files to required_links.
"""
def source_iter():
remote_lib_source_dir = self.context.products.get_data('go_remote_lib_src')[go_remote_lib]
for path in os.listdir(remote_lib_source_dir):
remote_src = os.path.join(remote_lib_source_dir, path)
# We grab any file since a go package might have .go, .c, .cc, etc files - all needed for
# installation.
if os.path.isfile(remote_src):
yield (remote_src, os.path.basename(path))
return self._symlink_lib(gopath, go_remote_lib, source_iter(), required_links) | python | {
"resource": ""
} |
q27785 | ScopeInfoIterator.iterate | train | def iterate(self, scopes):
"""Yields ScopeInfo instances for the specified scopes, plus relevant related scopes.
Relevant scopes are:
- All tasks in a requested goal.
- All subsystems tied to a request scope.
Yields in a sensible order: Sorted by scope, but with subsystems tied to a request scope
following that scope, e.g.,
goal1
goal1.task11
subsys.goal1.task11
goal1.task12
goal2.task21
...
"""
scope_infos = [self._scope_to_info[s] for s in self._expand_tasks(scopes)]
if scope_infos:
for scope_info in self._expand_subsystems(scope_infos):
yield scope_info | python | {
"resource": ""
} |
q27786 | ScopeInfoIterator._expand_tasks | train | def _expand_tasks(self, scopes):
"""Add all tasks in any requested goals.
Returns the requested scopes, plus the added tasks, sorted by scope name.
"""
expanded_scopes = set(scopes)
for scope, info in self._scope_to_info.items():
if info.category == ScopeInfo.TASK:
outer = enclosing_scope(scope)
while outer != GLOBAL_SCOPE:
if outer in expanded_scopes:
expanded_scopes.add(scope)
break
outer = enclosing_scope(outer)
return sorted(expanded_scopes) | python | {
"resource": ""
} |
q27787 | ScopeInfoIterator._expand_subsystems | train | def _expand_subsystems(self, scope_infos):
"""Add all subsystems tied to a scope, right after that scope."""
# Get non-global subsystem dependencies of the specified subsystem client.
def subsys_deps(subsystem_client_cls):
for dep in subsystem_client_cls.subsystem_dependencies_iter():
if dep.scope != GLOBAL_SCOPE:
yield self._scope_to_info[dep.options_scope]
for x in subsys_deps(dep.subsystem_cls):
yield x
for scope_info in scope_infos:
yield scope_info
if scope_info.optionable_cls is not None:
# We don't currently subclass GlobalOptionsRegistrar, and I can't think of any reason why
# we would, but might as well be robust.
if issubclass(scope_info.optionable_cls, GlobalOptionsRegistrar):
# We were asked for global help, so also yield for all global subsystems.
for scope, info in self._scope_to_info.items():
if info.category == ScopeInfo.SUBSYSTEM and enclosing_scope(scope) == GLOBAL_SCOPE:
yield info
for subsys_dep in subsys_deps(info.optionable_cls):
yield subsys_dep
elif issubclass(scope_info.optionable_cls, SubsystemClientMixin):
for subsys_dep in subsys_deps(scope_info.optionable_cls):
yield subsys_dep | python | {
"resource": ""
} |
q27788 | JvmdocGen.generate_doc | train | def generate_doc(self, language_predicate, create_jvmdoc_command):
"""
Generate an execute method given a language predicate and command to create documentation
language_predicate: a function that accepts a target and returns True if the target is of that
language
create_jvmdoc_command: (classpath, directory, *targets) -> command (string) that will generate
documentation documentation for targets
"""
catalog = self.context.products.isrequired(self.jvmdoc().product_type)
if catalog and self.combined:
raise TaskError(
'Cannot provide {} target mappings for combined output'.format(self.jvmdoc().product_type))
def docable(target):
if not language_predicate(target):
self.context.log.debug('Skipping [{}] because it is does not pass the language predicate'.format(target.address.spec))
return False
if not self._include_codegen and target.is_synthetic:
self.context.log.debug('Skipping [{}] because it is a synthetic target'.format(target.address.spec))
return False
for pattern in self._exclude_patterns:
if pattern.search(target.address.spec):
self.context.log.debug(
"Skipping [{}] because it matches exclude pattern '{}'".format(target.address.spec, pattern.pattern))
return False
return True
targets = self.get_targets(predicate=docable)
if not targets:
return
with self.invalidated(targets, invalidate_dependents=self.combined) as invalidation_check:
def find_invalid_targets():
invalid_targets = set()
for vt in invalidation_check.invalid_vts:
invalid_targets.update(vt.targets)
return invalid_targets
invalid_targets = list(find_invalid_targets())
if invalid_targets:
if self.combined:
self._generate_combined(targets, create_jvmdoc_command)
else:
self._generate_individual(invalid_targets, create_jvmdoc_command)
if self.open and self.combined:
try:
desktop.ui_open(os.path.join(self.workdir, 'combined', 'index.html'))
except desktop.OpenError as e:
raise TaskError(e)
if catalog:
for target in targets:
gendir = self._gendir(target)
jvmdocs = []
for root, dirs, files in safe_walk(gendir):
jvmdocs.extend(os.path.relpath(os.path.join(root, f), gendir) for f in files)
self.context.products.get(self.jvmdoc().product_type).add(target, gendir, jvmdocs) | python | {
"resource": ""
} |
q27789 | JvmTask.classpath | train | def classpath(self, targets, classpath_prefix=None, classpath_product=None, exclude_scopes=None,
include_scopes=None):
"""Builds a transitive classpath for the given targets.
Optionally includes a classpath prefix or building from a non-default classpath product.
:param targets: the targets for which to build the transitive classpath.
:param classpath_prefix: optional additional entries to prepend to the classpath.
:param classpath_product: an optional ClasspathProduct from which to build the classpath. if not
specified, the runtime_classpath will be used.
:param :class:`pants.build_graph.target_scopes.Scope` exclude_scopes: Exclude targets which
have at least one of these scopes on the classpath.
:param :class:`pants.build_graph.target_scopes.Scope` include_scopes: Only include targets which
have at least one of these scopes on the classpath. Defaults to Scopes.JVM_RUNTIME_SCOPES.
:return: a list of classpath strings.
"""
include_scopes = Scopes.JVM_RUNTIME_SCOPES if include_scopes is None else include_scopes
classpath_product = classpath_product or self.context.products.get_data('runtime_classpath')
closure = BuildGraph.closure(targets, bfs=True, include_scopes=include_scopes,
exclude_scopes=exclude_scopes, respect_intransitive=True)
classpath_for_targets = ClasspathUtil.classpath(closure, classpath_product, self.confs)
classpath = list(classpath_prefix or ())
classpath.extend(classpath_for_targets)
return classpath | python | {
"resource": ""
} |
q27790 | atomic_copy | train | def atomic_copy(src, dst):
"""Copy the file src to dst, overwriting dst atomically."""
with temporary_file(root_dir=os.path.dirname(dst)) as tmp_dst:
shutil.copyfile(src, tmp_dst.name)
os.chmod(tmp_dst.name, os.stat(src).st_mode)
os.rename(tmp_dst.name, dst) | python | {
"resource": ""
} |
q27791 | safe_temp_edit | train | def safe_temp_edit(filename):
"""Safely modify a file within context that automatically reverts any changes afterwards
The file mutatation occurs in place. The file is backed up in a temporary file before edits
occur and when the context is closed, the mutated file is discarded and replaced with the backup.
WARNING: There may be a chance that the file may not be restored and this method should be used
carefully with the known risk.
"""
with temporary_file() as tmp_file:
try:
shutil.copyfile(filename, tmp_file.name)
yield filename
finally:
shutil.copyfile(tmp_file.name, filename) | python | {
"resource": ""
} |
q27792 | create_size_estimators | train | def create_size_estimators():
"""Create a dict of name to a function that returns an estimated size for a given target.
The estimated size is used to build the largest targets first (subject to dependency constraints).
Choose 'random' to choose random sizes for each target, which may be useful for distributed
builds.
:returns: Dict of a name to a function that returns an estimated size.
"""
def line_count(filename):
with open(filename, 'rb') as fh:
return sum(1 for line in fh)
return {
'linecount': lambda srcs: sum(line_count(src) for src in srcs),
'filecount': lambda srcs: len(srcs),
'filesize': lambda srcs: sum(os.path.getsize(src) for src in srcs),
'nosize': lambda srcs: 0,
'random': lambda srcs: random.randint(0, 10000),
} | python | {
"resource": ""
} |
q27793 | fallible_to_exec_result_or_raise | train | def fallible_to_exec_result_or_raise(fallible_result, request):
"""Converts a FallibleExecuteProcessResult to a ExecuteProcessResult or raises an error."""
if fallible_result.exit_code == 0:
return ExecuteProcessResult(
fallible_result.stdout,
fallible_result.stderr,
fallible_result.output_directory_digest
)
else:
raise ProcessExecutionFailure(
fallible_result.exit_code,
fallible_result.stdout,
fallible_result.stderr,
request.description
) | python | {
"resource": ""
} |
q27794 | PythonFile.parse | train | def parse(cls, filename, root=None):
"""Parses the file at filename and returns a PythonFile.
If root is specified, it will open the file with root prepended to the path. The idea is to
allow for errors to contain a friendlier file path than the full absolute path.
"""
if root is not None:
if os.path.isabs(filename):
raise ValueError("filename must be a relative path if root is specified")
full_filename = os.path.join(root, filename)
else:
full_filename = filename
with io.open(full_filename, 'rb') as fp:
blob = fp.read()
tree = cls._parse(blob, filename)
return cls(blob=blob, tree=tree, root=root, filename=filename) | python | {
"resource": ""
} |
q27795 | PythonFile.translate_logical_line | train | def translate_logical_line(start, end, contents, indent_stack, endmarker=False):
"""Translate raw contents to logical lines"""
# Remove leading blank lines.
while contents[0] == '\n':
start += 1
contents.pop(0)
# Remove trailing blank lines.
while contents[-1] == '\n':
end -= 1
contents.pop()
indent = len(indent_stack[-1]) if indent_stack else 0
if endmarker:
indent = len(contents[0])
return start, end + 1, indent | python | {
"resource": ""
} |
q27796 | PythonFile.line_range | train | def line_range(self, line_number):
"""Return a slice for the given line number"""
if line_number <= 0 or line_number > len(self.lines):
raise IndexError('NOTE: Python file line numbers are offset by 1.')
if line_number not in self.logical_lines:
return slice(line_number, line_number + 1)
else:
start, stop, _ = self.logical_lines[line_number]
return slice(start, stop) | python | {
"resource": ""
} |
q27797 | NativeTask.strict_deps_for_target | train | def strict_deps_for_target(self, target, predicate=None):
"""Get the dependencies of `target` filtered by `predicate`, accounting for 'strict_deps'.
If 'strict_deps' is on, instead of using the transitive closure of dependencies, targets will
only be able to see their immediate dependencies declared in the BUILD file. The 'strict_deps'
setting is obtained from the result of `get_compile_settings()`.
NB: This includes the current target in the result.
"""
if self._native_build_settings.get_strict_deps_value_for_target(target):
strict_deps = target.strict_dependencies(DependencyContext())
if predicate:
filtered_deps = list(filter(predicate, strict_deps))
else:
filtered_deps = strict_deps
deps = [target] + filtered_deps
else:
deps = self.context.build_graph.transitive_subgraph_of_addresses(
[target.address], predicate=predicate)
# Filter out the beginning target depending on whether it matches the predicate.
# TODO: There should be a cleaner way to do this.
deps = filter(predicate, deps)
return deps | python | {
"resource": ""
} |
q27798 | AntlrJavaGen._rearrange_output_for_package | train | def _rearrange_output_for_package(self, target_workdir, java_package):
"""Rearrange the output files to match a standard Java structure.
Antlr emits a directory structure based on the relative path provided
for the grammar file. If the source root of the file is different from
the Pants build root, then the Java files end up with undesired parent
directories.
"""
package_dir_rel = java_package.replace('.', os.path.sep)
package_dir = os.path.join(target_workdir, package_dir_rel)
safe_mkdir(package_dir)
for root, dirs, files in safe_walk(target_workdir):
if root == package_dir_rel:
# This path is already in the correct location
continue
for f in files:
os.rename(
os.path.join(root, f),
os.path.join(package_dir, f)
)
# Remove any empty directories that were left behind
for root, dirs, files in safe_walk(target_workdir, topdown = False):
for d in dirs:
full_dir = os.path.join(root, d)
if not os.listdir(full_dir):
os.rmdir(full_dir) | python | {
"resource": ""
} |
q27799 | AntlrJavaGen._scrub_generated_timestamps | train | def _scrub_generated_timestamps(self, target_workdir):
"""Remove the first line of comment from each file if it contains a timestamp."""
for root, _, filenames in safe_walk(target_workdir):
for filename in filenames:
source = os.path.join(root, filename)
with open(source, 'r') as f:
lines = f.readlines()
if len(lines) < 1:
return
with open(source, 'w') as f:
if not self._COMMENT_WITH_TIMESTAMP_RE.match(lines[0]):
f.write(lines[0])
for line in lines[1:]:
f.write(line) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.