_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q27200 | Config.load_file_contents | train | def load_file_contents(cls, file_contents, seed_values=None):
"""Loads config from the given string payloads.
A handful of seed values will be set to act as if specified in the loaded config file's DEFAULT
section, and be available for use in substitutions. The caller may override some of these
seed values.
:param list[FileContents] file_contents: Load from these FileContents. Later instances take
precedence over earlier ones. If empty, returns an
empty config.
:param seed_values: A dict with optional override seed values for buildroot, pants_workdir,
pants_supportdir and pants_distdir.
"""
@contextmanager
def opener(file_content):
with io.BytesIO(file_content.content) as fh:
yield fh
return cls._meta_load(opener, file_contents, seed_values) | python | {
"resource": ""
} |
q27201 | Config.load | train | def load(cls, config_paths, seed_values=None):
"""Loads config from the given paths.
A handful of seed values will be set to act as if specified in the loaded config file's DEFAULT
section, and be available for use in substitutions. The caller may override some of these
seed values.
:param list config_paths: Load from these paths. Later instances take precedence over earlier
ones. If empty, returns an empty config.
:param seed_values: A dict with optional override seed values for buildroot, pants_workdir,
pants_supportdir and pants_distdir.
"""
@contextmanager
def opener(f):
with open(f, 'rb') as fh:
yield fh
return cls._meta_load(opener, config_paths, seed_values) | python | {
"resource": ""
} |
q27202 | dlog | train | def dlog(msg, log_path=DEFAULT_LOG_PATH):
"""A handy log utility for debugging multi-process, multi-threaded activities."""
with open(log_path, 'a') as f:
f.write('\n{}@{}: {}'.format(os.getpid(), threading.current_thread().name, msg)) | python | {
"resource": ""
} |
q27203 | TaskBase.get_passthru_args | train | def get_passthru_args(self):
"""Returns the passthru args for this task, if it supports them.
:API: public
"""
if not self.supports_passthru_args():
raise TaskError('{0} Does not support passthru args.'.format(self.stable_name()))
else:
return self.context.options.passthru_args_for_scope(self.options_scope) | python | {
"resource": ""
} |
q27204 | TaskBase.get_targets | train | def get_targets(self, predicate=None):
"""Returns the candidate targets this task should act on.
This method is a convenience for processing optional transitivity. Tasks may bypass it
and make their own decisions on which targets to act on.
NOTE: This method was introduced in 2018, so at the time of writing few tasks consult it.
Instead, they query self.context.targets directly.
TODO: Fix up existing targets to consult this method, for uniformity.
Note that returned targets have not been checked for invalidation. The caller should do
so as needed, typically by calling self.invalidated().
:API: public
"""
initial_targets = (self.context.targets(predicate) if self.act_transitively
else list(filter(predicate, self.context.target_roots)))
if not self.target_filtering_enabled:
return initial_targets
else:
return self._filter_targets(initial_targets) | python | {
"resource": ""
} |
q27205 | TaskBase.versioned_workdir | train | def versioned_workdir(self):
"""The Task.workdir suffixed with a fingerprint of the Task implementation version.
When choosing whether to store values directly in `self.workdir` or below it in
the directory returned by this property, you should generally prefer this value.
:API: public
"""
versioned_workdir = os.path.join(self.workdir, self.implementation_version_slug())
safe_mkdir(versioned_workdir)
return versioned_workdir | python | {
"resource": ""
} |
q27206 | TaskBase.fingerprint | train | def fingerprint(self):
"""Returns a fingerprint for the identity of the task.
A task fingerprint is composed of the options the task is currently running under.
Useful for invalidating unchanging targets being executed beneath changing task
options that affect outputted artifacts.
A task's fingerprint is only valid after the task has been fully initialized.
"""
hasher = sha1()
hasher.update(self.stable_name().encode('utf-8'))
hasher.update(self._options_fingerprint(self.options_scope).encode('utf-8'))
hasher.update(self.implementation_version_str().encode('utf-8'))
for dep in self.subsystem_closure_iter():
hasher.update(self._options_fingerprint(dep.options_scope).encode('utf-8'))
return hasher.hexdigest() if PY3 else hasher.hexdigest().decode('utf-8') | python | {
"resource": ""
} |
q27207 | TaskBase.invalidated | train | def invalidated(self,
targets,
invalidate_dependents=False,
silent=False,
fingerprint_strategy=None,
topological_order=False):
"""Checks targets for invalidation, first checking the artifact cache.
Subclasses call this to figure out what to work on.
:API: public
:param targets: The targets to check for changes.
:param invalidate_dependents: If True then any targets depending on changed targets are
invalidated.
:param silent: If true, suppress logging information about target invalidation.
:param fingerprint_strategy: A FingerprintStrategy instance, which can do per task,
finer grained fingerprinting of a given Target.
:param topological_order: Whether to invalidate in dependency order.
If no exceptions are thrown by work in the block, the build cache is updated for the targets.
Note: the artifact cache is not updated. That must be done manually.
:returns: Yields an InvalidationCheck object reflecting the targets.
:rtype: InvalidationCheck
"""
invalidation_check = self._do_invalidation_check(fingerprint_strategy,
invalidate_dependents,
targets,
topological_order)
self._maybe_create_results_dirs(invalidation_check.all_vts)
if invalidation_check.invalid_vts and self.artifact_cache_reads_enabled():
with self.context.new_workunit('cache'):
cached_vts, uncached_vts, uncached_causes = \
self.check_artifact_cache(self.check_artifact_cache_for(invalidation_check))
if cached_vts:
cached_targets = [vt.target for vt in cached_vts]
self.context.run_tracker.artifact_cache_stats.add_hits(self._task_name, cached_targets)
if not silent:
self._report_targets('Using cached artifacts for ', cached_targets, '.')
if uncached_vts:
uncached_targets = [vt.target for vt in uncached_vts]
self.context.run_tracker.artifact_cache_stats.add_misses(self._task_name,
uncached_targets,
uncached_causes)
if not silent:
self._report_targets('No cached artifacts for ', uncached_targets, '.')
# Now that we've checked the cache, re-partition whatever is still invalid.
invalidation_check = InvalidationCheck(invalidation_check.all_vts, uncached_vts)
if not silent:
targets = []
for vt in invalidation_check.invalid_vts:
targets.extend(vt.targets)
if len(targets):
target_address_references = [t.address.reference() for t in targets]
msg_elements = [
'Invalidated ',
items_to_report_element(target_address_references, 'target'),
'.',
]
self.context.log.info(*msg_elements)
self._update_invalidation_report(invalidation_check, 'pre-check')
# Cache has been checked to create the full list of invalid VTs.
# Only copy previous_results for this subset of VTs.
if self.incremental:
for vts in invalidation_check.invalid_vts:
vts.copy_previous_results()
# This may seem odd: why would we need to invalidate a VersionedTargetSet that is already
# invalid? But the name force_invalidate() is slightly misleading in this context - what it
# actually does is delete the key file created at the end of the last successful task run.
# This is necessary to avoid the following scenario:
#
# 1) In state A: Task suceeds and writes some output. Key is recorded by the invalidator.
# 2) In state B: Task fails, but writes some output. Key is not recorded.
# 3) After reverting back to state A: The current key is the same as the one recorded at the
# end of step 1), so it looks like no work needs to be done, but actually the task
# must re-run, to overwrite the output written in step 2.
#
# Deleting the file ensures that if a task fails, there is no key for which we might think
# we're in a valid state.
for vts in invalidation_check.invalid_vts:
vts.force_invalidate()
# Yield the result, and then mark the targets as up to date.
yield invalidation_check
self._update_invalidation_report(invalidation_check, 'post-check')
for vt in invalidation_check.invalid_vts:
vt.update()
# Background work to clean up previous builds.
if self.context.options.for_global_scope().workdir_max_build_entries is not None:
self._launch_background_workdir_cleanup(invalidation_check.all_vts) | python | {
"resource": ""
} |
q27208 | TaskBase.do_check_artifact_cache | train | def do_check_artifact_cache(self, vts, post_process_cached_vts=None):
"""Checks the artifact cache for the specified list of VersionedTargetSets.
Returns a tuple (cached, uncached, uncached_causes) of VersionedTargets that were
satisfied/unsatisfied from the cache.
"""
if not vts:
return [], [], []
read_cache = self._cache_factory.get_read_cache()
items = [(read_cache, vt.cache_key, vt.current_results_dir if self.cache_target_dirs else None)
for vt in vts]
res = self.context.subproc_map(call_use_cached_files, items)
cached_vts = []
uncached_vts = []
uncached_causes = []
# Note that while the input vts may represent multiple targets (for tasks that overrride
# check_artifact_cache_for), the ones we return must represent single targets.
# Once flattened, cached/uncached vts are in separate lists. Each uncached vts is paired
# with why it is missed for stat reporting purpose.
for vt, was_in_cache in zip(vts, res):
if was_in_cache:
cached_vts.extend(vt.versioned_targets)
else:
uncached_vts.extend(vt.versioned_targets)
uncached_causes.extend(repeat(was_in_cache, len(vt.versioned_targets)))
if isinstance(was_in_cache, UnreadableArtifact):
self._cache_key_errors.update(was_in_cache.key)
if post_process_cached_vts:
post_process_cached_vts(cached_vts)
for vt in cached_vts:
vt.update()
return cached_vts, uncached_vts, uncached_causes | python | {
"resource": ""
} |
q27209 | TaskBase.update_artifact_cache | train | def update_artifact_cache(self, vts_artifactfiles_pairs):
"""Write to the artifact cache, if we're configured to.
vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
- vts is single VersionedTargetSet.
- artifactfiles is a list of absolute paths to artifacts for the VersionedTargetSet.
"""
update_artifact_cache_work = self._get_update_artifact_cache_work(vts_artifactfiles_pairs)
if update_artifact_cache_work:
self.context.submit_background_work_chain([update_artifact_cache_work],
parent_workunit_name='cache') | python | {
"resource": ""
} |
q27210 | TaskBase._get_update_artifact_cache_work | train | def _get_update_artifact_cache_work(self, vts_artifactfiles_pairs):
"""Create a Work instance to update an artifact cache, if we're configured to.
vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
- vts is single VersionedTargetSet.
- artifactfiles is a list of paths to artifacts for the VersionedTargetSet.
"""
cache = self._cache_factory.get_write_cache()
if cache:
if len(vts_artifactfiles_pairs) == 0:
return None
# Do some reporting.
targets = set()
for vts, _ in vts_artifactfiles_pairs:
targets.update(vts.targets)
self._report_targets(
'Caching artifacts for ',
list(targets),
'.',
logger=self.context.log.debug,
)
always_overwrite = self._cache_factory.overwrite()
# Cache the artifacts.
args_tuples = []
for vts, artifactfiles in vts_artifactfiles_pairs:
overwrite = always_overwrite or vts.cache_key in self._cache_key_errors
args_tuples.append((cache, vts.cache_key, artifactfiles, overwrite))
return Work(lambda x: self.context.subproc_map(call_insert, x), [(args_tuples,)], 'insert')
else:
return None | python | {
"resource": ""
} |
q27211 | TaskBase.require_single_root_target | train | def require_single_root_target(self):
"""If a single target was specified on the cmd line, returns that target.
Otherwise throws TaskError.
:API: public
"""
target_roots = self.context.target_roots
if len(target_roots) == 0:
raise TaskError('No target specified.')
elif len(target_roots) > 1:
raise TaskError('Multiple targets specified: {}'
.format(', '.join([repr(t) for t in target_roots])))
return target_roots[0] | python | {
"resource": ""
} |
q27212 | TaskBase.determine_target_roots | train | def determine_target_roots(self, goal_name):
"""Helper for tasks that scan for default target roots.
:param string goal_name: The goal name to use for any warning emissions.
"""
if not self.context.target_roots:
print('WARNING: No targets were matched in goal `{}`.'.format(goal_name), file=sys.stderr)
# For the v2 path, e.g. `./pants list` is a functional no-op. This matches the v2 mode behavior
# of e.g. `./pants --changed-parent=HEAD list` (w/ no changes) returning an empty result.
return self.context.target_roots | python | {
"resource": ""
} |
q27213 | create_archiver | train | def create_archiver(typename):
"""Returns Archivers in common configurations.
:API: public
The typename must correspond to one of the following:
'tar' Returns a tar archiver that applies no compression and emits .tar files.
'tgz' Returns a tar archiver that applies gzip compression and emits .tar.gz files.
'tbz2' Returns a tar archiver that applies bzip2 compression and emits .tar.bz2 files.
'zip' Returns a zip archiver that applies standard compression and emits .zip files.
'jar' Returns a jar archiver that applies no compression and emits .jar files.
Note this is provided as a light way of zipping input files into a jar, without the
need to prepare Manifest etc. For more advanced usages, please refer to :class:
`pants.backend.jvm.subsystems.jar_tool.JarTool` or :class:
`pants.backend.jvm.tasks.jar_task.JarTask`.
"""
archiver = _ARCHIVER_BY_TYPE.get(typename)
if not archiver:
raise ValueError('No archiver registered for {!r}'.format(typename))
return archiver | python | {
"resource": ""
} |
q27214 | archiver_for_path | train | def archiver_for_path(path_name):
"""Returns an Archiver for the given path name.
:API: public
:param string path_name: The path name of the archive - need not exist.
:raises: :class:`ValueError` If the path name does not uniquely identify a supported archive type.
"""
if path_name.endswith('.tar.gz'):
return TGZ
elif path_name.endswith('.tar.bz2'):
return TBZ2
else:
_, ext = os.path.splitext(path_name)
if ext:
ext = ext[1:] # Trim leading '.'.
if not ext:
raise ValueError('Could not determine archive type of path {}'.format(path_name))
return create_archiver(ext) | python | {
"resource": ""
} |
q27215 | Archiver.extract | train | def extract(self, path, outdir, concurrency_safe=False, **kwargs):
"""Extracts an archive's contents to the specified outdir with an optional filter.
Keyword arguments are forwarded to the instance's self._extract() method.
:API: public
:param string path: path to the zipfile to extract from
:param string outdir: directory to extract files into
:param bool concurrency_safe: True to use concurrency safe method. Concurrency safe extraction
will be performed on a temporary directory and the extacted directory will then be renamed
atomically to the outdir. As a side effect, concurrency safe extraction will not allow
overlay of extracted contents onto an existing outdir.
"""
if concurrency_safe:
with temporary_dir() as temp_dir:
self._extract(path, temp_dir, **kwargs)
safe_concurrent_rename(temp_dir, outdir)
else:
# Leave the existing default behavior unchanged and allows overlay of contents.
self._extract(path, outdir, **kwargs) | python | {
"resource": ""
} |
q27216 | XZCompressedTarArchiver._invoke_xz | train | def _invoke_xz(self, xz_input_file):
"""Run the xz command and yield a file object for its stdout.
This allows streaming the decompressed tar archive directly into a tar decompression stream,
which is significantly faster in practice than making a temporary file.
"""
# TODO: --threads=0 is supposed to use "the number of processor cores on the machine", but I
# see no more than 100% cpu used at any point. This seems like it could be a bug? If performance
# is an issue, investigate further.
cmd = [self._xz_binary_path, '--decompress', '--stdout', '--keep', '--threads=0', xz_input_file]
try:
# Pipe stderr to our own stderr, but leave stdout open so we can yield it.
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=sys.stderr)
except OSError as e:
raise self.XZArchiverError(
"Error invoking xz with command {} for input file {}: {}"
.format(cmd, xz_input_file, e),
e)
# This is a file object.
yield process.stdout
rc = process.wait()
if rc != 0:
raise self.XZArchiverError(
"Error decompressing xz input with command {} for input file {}. Exit code was: {}. "
.format(cmd, xz_input_file, rc)) | python | {
"resource": ""
} |
q27217 | ZipArchiver._extract | train | def _extract(self, path, outdir, filter_func=None):
"""Extract from a zip file, with an optional filter.
:param function filter_func: optional filter with the filename as the parameter. Returns True
if the file should be extracted."""
with open_zip(path) as archive_file:
for name in archive_file.namelist():
# While we're at it, we also perform this safety test.
if name.startswith('/') or name.startswith('..'):
raise ValueError('Zip file contains unsafe path: {}'.format(name))
if (not filter_func or filter_func(name)):
archive_file.extract(name, outdir) | python | {
"resource": ""
} |
q27218 | JvmResolverBase.add_directory_digests_for_jars | train | def add_directory_digests_for_jars(self, targets_and_jars):
"""For each target, get DirectoryDigests for its jars and return them zipped with the jars.
:param targets_and_jars: List of tuples of the form (Target, [pants.java.jar.jar_dependency_utils.ResolveJar])
:return: list[tuple[(Target, list[pants.java.jar.jar_dependency_utils.ResolveJar])]
"""
targets_and_jars=list(targets_and_jars)
if not targets_and_jars or not self.get_options().capture_snapshots:
return targets_and_jars
jar_paths = []
for target, jars_to_snapshot in targets_and_jars:
for jar in jars_to_snapshot:
jar_paths.append(fast_relpath(jar.pants_path, get_buildroot()))
snapshots = self.context._scheduler.capture_snapshots(
tuple(
PathGlobsAndRoot(PathGlobs([jar]), get_buildroot()) for jar in jar_paths
))
# We want to map back the list[Snapshot] to targets_and_jars
# We assume that (1) jars_to_snapshot has the same number of ResolveJars as snapshots does Snapshots,
# and that (2) capture_snapshots preserves ordering.
digests = [snapshot.directory_digest for snapshot in snapshots]
digest_iterator = iter(digests)
snapshotted_targets_and_jars = []
for target, jars_to_snapshot in targets_and_jars:
snapshotted_jars = [ResolvedJar(coordinate=jar.coordinate,
cache_path=jar.cache_path,
pants_path=jar.pants_path,
directory_digest=next(digest_iterator)) for jar in jars_to_snapshot]
snapshotted_targets_and_jars.append((target, snapshotted_jars))
return snapshotted_targets_and_jars | python | {
"resource": ""
} |
q27219 | RunInfo.add_basic_info | train | def add_basic_info(self, run_id, timestamp):
"""Adds basic build info."""
datetime = time.strftime('%A %b %d, %Y %H:%M:%S', time.localtime(timestamp))
user = getpass.getuser()
machine = socket.gethostname()
buildroot = get_buildroot()
# TODO: Get rid of the redundant 'path' key once everyone is off it.
self.add_infos(('id', run_id), ('timestamp', timestamp), ('datetime', datetime),
('user', user), ('machine', machine), ('path', buildroot),
('buildroot', buildroot), ('version', version.VERSION)) | python | {
"resource": ""
} |
q27220 | RunInfo.add_scm_info | train | def add_scm_info(self):
"""Adds SCM-related info."""
scm = get_scm()
if scm:
revision = scm.commit_id
branch = scm.branch_name or revision
else:
revision, branch = 'none', 'none'
self.add_infos(('revision', revision), ('branch', branch)) | python | {
"resource": ""
} |
q27221 | PyThriftNamespaceClashCheck._get_python_thrift_library_sources | train | def _get_python_thrift_library_sources(self, py_thrift_targets):
"""Get file contents for python thrift library targets."""
target_snapshots = OrderedDict(
(t, t.sources_snapshot(scheduler=self.context._scheduler).directory_digest)
for t in py_thrift_targets)
filescontent_by_target = OrderedDict(zip(
target_snapshots.keys(),
self.context._scheduler.product_request(FilesContent, target_snapshots.values())))
thrift_file_sources_by_target = OrderedDict(
(t, [(file_content.path, file_content.content) for file_content in all_content.dependencies])
for t, all_content in filescontent_by_target.items())
return thrift_file_sources_by_target | python | {
"resource": ""
} |
q27222 | JaxbGen._guess_package | train | def _guess_package(self, path):
"""Used in execute_codegen to actually invoke the compiler with the proper arguments, and in
_sources_to_be_generated to declare what the generated files will be.
"""
supported_prefixes = ('com', 'org', 'net',)
package = ''
slash = path.rfind(os.path.sep)
prefix_with_slash = max(path.rfind(os.path.join('', prefix, ''))
for prefix in supported_prefixes)
if prefix_with_slash < 0:
package = path[:slash]
elif prefix_with_slash >= 0:
package = path[prefix_with_slash:slash]
package = package.replace(os.path.sep, ' ')
package = package.strip().replace(' ', '.')
return package | python | {
"resource": ""
} |
q27223 | Digest.load | train | def load(cls, directory):
"""Load a Digest from a `.digest` file adjacent to the given directory.
:return: A Digest, or None if the Digest did not exist.
"""
read_file = maybe_read_file(cls._path(directory))
if read_file:
fingerprint, length = read_file.split(':')
return Digest(fingerprint, int(length))
else:
return None | python | {
"resource": ""
} |
q27224 | Digest.dump | train | def dump(self, directory):
"""Dump this Digest object adjacent to the given directory."""
payload = '{}:{}'.format(self.fingerprint, self.serialized_bytes_length)
safe_file_dump(self._path(directory), payload=payload) | python | {
"resource": ""
} |
q27225 | SourcesField.snapshot | train | def snapshot(self, scheduler=None):
"""
Returns a Snapshot containing the sources, relative to the build root.
This API is experimental, and subject to change.
"""
if isinstance(self._sources, EagerFilesetWithSpec):
snapshot = self._sources.snapshot
if snapshot is not None:
return snapshot
input_pathglobs = PathGlobs(tuple(self.relative_to_buildroot()))
return scheduler.product_request(Snapshot, [input_pathglobs])[0] | python | {
"resource": ""
} |
q27226 | combine_hashes | train | def combine_hashes(hashes):
"""A simple helper function to combine other hashes. Sorts the hashes before rolling them in."""
hasher = sha1()
for h in sorted(hashes):
h = ensure_binary(h)
hasher.update(h)
return hasher.hexdigest() if PY3 else hasher.hexdigest().decode('utf-8') | python | {
"resource": ""
} |
q27227 | PayloadField.fingerprint | train | def fingerprint(self):
"""A memoized sha1 hexdigest hashing the contents of this PayloadField
The fingerprint returns either a string or None. If the return is None, consumers of the
fingerprint may choose to elide this PayloadField from their combined hash computation.
:API: public
"""
if self._fingerprint_memo is None:
self._fingerprint_memo = self._compute_fingerprint()
return self._fingerprint_memo | python | {
"resource": ""
} |
q27228 | parse_failed_targets | train | def parse_failed_targets(test_registry, junit_xml_path, error_handler):
"""Parses junit xml reports and maps targets to the set of individual tests that failed.
Targets with no failed tests are omitted from the returned mapping and failed tests with no
identifiable owning target are keyed under `None`.
:param test_registry: A registry of tests that were run.
:type test_registry: :class:`RegistryOfTests`
:param string junit_xml_path: A path to a file or directory containing test junit xml reports
to analyze.
:param error_handler: An error handler that will be called with any junit xml parsing errors.
:type error_handler: callable that accepts a single :class:`ParseError` argument.
:returns: A mapping from targets to the set of individual tests that failed. Any failed tests
that belong to no identifiable target will be mapped to `None`.
:rtype: dict from :class:`pants.build_graph.target.Target` to a set of :class:`Test`
"""
failed_targets = defaultdict(set)
def parse_junit_xml_file(path):
try:
xml = XmlParser.from_file(path)
failures = int(xml.get_attribute('testsuite', 'failures'))
errors = int(xml.get_attribute('testsuite', 'errors'))
if failures or errors:
for testcase in xml.parsed.getElementsByTagName('testcase'):
test_failed = testcase.getElementsByTagName('failure')
test_errored = testcase.getElementsByTagName('error')
if test_failed or test_errored:
test = Test(classname=testcase.getAttribute('classname'),
methodname=testcase.getAttribute('name'))
target = test_registry.get_owning_target(test)
failed_targets[target].add(test)
except (XmlParser.XmlError, ValueError) as e:
error_handler(ParseError(path, e))
if os.path.isdir(junit_xml_path):
for root, _, files in safe_walk(junit_xml_path):
for junit_xml_file in fnmatch.filter(files, 'TEST-*.xml'):
parse_junit_xml_file(os.path.join(root, junit_xml_file))
else:
parse_junit_xml_file(junit_xml_path)
return dict(failed_targets) | python | {
"resource": ""
} |
q27229 | InjectablesMixin.injectables_specs_for_key | train | def injectables_specs_for_key(self, key):
"""Given a key, yield all relevant injectable spec addresses.
:API: public
"""
mapping = self.injectables_spec_mapping
if key not in mapping:
raise self.NoMappingForKey(key)
specs = mapping[key]
assert isinstance(specs, list), (
'invalid `injectables_spec_mapping` on {!r} for key "{}". '
'expected a `list` but instead found a `{}`: {}'
).format(self, key, type(specs), specs)
return [Address.parse(s).spec for s in specs] | python | {
"resource": ""
} |
q27230 | InjectablesMixin.injectables_spec_for_key | train | def injectables_spec_for_key(self, key):
"""Given a key, yield a singular spec representing that key.
:API: public
"""
specs = self.injectables_specs_for_key(key)
specs_len = len(specs)
if specs_len == 0:
return None
if specs_len != 1:
raise self.TooManySpecsForKey('injectables spec mapping for key included {} elements, '
'expected 1'.format(specs_len))
return specs[0] | python | {
"resource": ""
} |
q27231 | JvmDependencyCheck._skip | train | def _skip(options):
"""Return true if the task should be entirely skipped, and thus have no product requirements."""
values = [options.missing_direct_deps, options.unnecessary_deps]
return all(v == 'off' for v in values) | python | {
"resource": ""
} |
q27232 | JvmDependencyCheck.check | train | def check(self, src_tgt, actual_deps):
"""Check for missing deps.
See docstring for _compute_missing_deps for details.
"""
if self._check_missing_direct_deps or self._check_unnecessary_deps:
missing_file_deps, missing_direct_tgt_deps = \
self._compute_missing_deps(src_tgt, actual_deps)
buildroot = get_buildroot()
def shorten(path): # Make the output easier to read.
if path.startswith(buildroot):
return os.path.relpath(path, buildroot)
return path
def filter_whitelisted(missing_deps):
# Removing any targets that exist in the whitelist from the list of dependency issues.
return [(tgt_pair, evidence) for (tgt_pair, evidence) in missing_deps
if tgt_pair[0].address not in self._target_whitelist]
missing_direct_tgt_deps = filter_whitelisted(missing_direct_tgt_deps)
if self._check_missing_direct_deps and missing_direct_tgt_deps:
log_fn = (self.context.log.error if self._check_missing_direct_deps == 'fatal'
else self.context.log.warn)
for (tgt_pair, evidence) in missing_direct_tgt_deps:
evidence_str = '\n'.join([' {} uses {}'.format(shorten(e[0]), shorten(e[1]))
for e in evidence])
log_fn('Missing direct BUILD dependency {} -> {} because:\n{}'
.format(tgt_pair[0].address.spec, tgt_pair[1].address.spec, evidence_str))
if self._check_missing_direct_deps == 'fatal':
raise TaskError('Missing direct deps.')
if self._check_unnecessary_deps:
log_fn = (self.context.log.error if self._check_unnecessary_deps == 'fatal'
else self.context.log.warn)
had_unused = self._do_check_unnecessary_deps(src_tgt, actual_deps, log_fn)
if had_unused and self._check_unnecessary_deps == 'fatal':
raise TaskError('Unnecessary deps.') | python | {
"resource": ""
} |
q27233 | JvmDependencyCheck._compute_missing_deps | train | def _compute_missing_deps(self, src_tgt, actual_deps):
"""Computes deps that are used by the compiler but not specified in a BUILD file.
These deps are bugs waiting to happen: the code may happen to compile because the dep was
brought in some other way (e.g., by some other root target), but that is obviously fragile.
Note that in practice we're OK with reliance on indirect deps that are only brought in
transitively. E.g., in Scala type inference can bring in such a dep subtly. Fortunately these
cases aren't as fragile as a completely missing dependency. It's still a good idea to have
explicit direct deps where relevant, so we optionally warn about indirect deps, to make them
easy to find and reason about.
- actual_deps: a map src -> list of actual deps (source, class or jar file) as noted by the
compiler.
Returns a tuple (missing_file_deps, missing_direct_tgt_deps) where:
- missing_file_deps: a list of dep_files where src_tgt requires dep_file, and we're unable
to map to a target (because its target isn't in the total set of targets in play,
and we don't want to parse every BUILD file in the workspace just to find it).
- missing_direct_tgt_deps: a list of dep_tgts where src_tgt is missing a direct dependency
on dep_tgt but has a transitive dep on it.
All paths in the input and output are absolute.
"""
analyzer = self._analyzer
def must_be_explicit_dep(dep):
# We don't require explicit deps on the java runtime, so we shouldn't consider that
# a missing dep.
return (dep not in analyzer.bootstrap_jar_classfiles
and not dep.startswith(DistributionLocator.cached().real_home))
def target_or_java_dep_in_targets(target, targets):
# We want to check if the target is in the targets collection
#
# However, for the special case of scala_library that has a java_sources
# reference we're ok if that exists in targets even if the scala_library does not.
if target in targets:
return True
elif isinstance(target, ScalaLibrary):
return any(t in targets for t in target.java_sources)
else:
return False
# Find deps that are actual but not specified.
missing_file_deps = OrderedSet() # (src, src).
missing_direct_tgt_deps_map = defaultdict(list) # The same, but for direct deps.
targets_by_file = analyzer.targets_by_file(self.context.targets())
buildroot = get_buildroot()
abs_srcs = [os.path.join(buildroot, src) for src in src_tgt.sources_relative_to_buildroot()]
for src in abs_srcs:
for actual_dep in filter(must_be_explicit_dep, actual_deps.get(src, [])):
actual_dep_tgts = targets_by_file.get(actual_dep)
# actual_dep_tgts is usually a singleton. If it's not, we only need one of these
# to be in our declared deps to be OK.
if actual_dep_tgts is None:
missing_file_deps.add((src_tgt, actual_dep))
elif not target_or_java_dep_in_targets(src_tgt, actual_dep_tgts):
# Obviously intra-target deps are fine.
canonical_actual_dep_tgt = next(iter(actual_dep_tgts))
if canonical_actual_dep_tgt not in src_tgt.dependencies:
# The canonical dep is the only one a direct dependency makes sense on.
missing_direct_tgt_deps_map[(src_tgt, canonical_actual_dep_tgt)].append(
(src, actual_dep))
return (list(missing_file_deps),
list(missing_direct_tgt_deps_map.items())) | python | {
"resource": ""
} |
q27234 | JvmDependencyCheck._compute_unnecessary_deps | train | def _compute_unnecessary_deps(self, target, actual_deps):
"""Computes unused deps for the given Target.
:returns: A dict of directly declared but unused targets, to sets of suggested replacements.
"""
# Flatten the product deps of this target.
product_deps = set()
for dep_entries in actual_deps.values():
product_deps.update(dep_entries)
# Determine which of the DEFAULT deps in the declared set of this target were used.
used = set()
unused = set()
for dep, _ in self._analyzer.resolve_aliases(target, scope=Scopes.DEFAULT):
if dep in used or dep in unused:
continue
# TODO: What's a better way to accomplish this check? Filtering by `has_sources` would
# incorrectly skip "empty" `*_library` targets, which could then be used as a loophole.
if isinstance(dep, (Resources, UnpackedJars)):
continue
# If any of the target's jars or classfiles were used, consider it used.
if product_deps.isdisjoint(self._analyzer.files_for_target(dep)):
unused.add(dep)
else:
used.add(dep)
# If there were no unused deps, break.
if not unused:
return {}
# For any deps that were used, count their derived-from targets used as well.
# TODO: Refactor to do some of this above once tests are in place.
for dep in list(used):
for derived_from in dep.derived_from_chain:
if derived_from in unused:
unused.remove(derived_from)
used.add(derived_from)
# Prune derived targets that would be in the set twice.
for dep in list(unused):
if set(dep.derived_from_chain) & unused:
unused.remove(dep)
if not unused:
return {}
# For any deps that were not used, determine whether their transitive deps were used, and
# recommend those as replacements.
replacements = {}
for dep in unused:
replacements[dep] = set()
for t in dep.closure():
if t in used or t in unused:
continue
if not product_deps.isdisjoint(self._analyzer.files_for_target(t)):
replacements[dep].add(t.concrete_derived_from)
return replacements | python | {
"resource": ""
} |
q27235 | find_includes | train | def find_includes(basedirs, source, log=None):
"""Finds all thrift files included by the given thrift source.
:basedirs: A set of thrift source file base directories to look for includes in.
:source: The thrift source file to scan for includes.
:log: An optional logger
"""
all_basedirs = [os.path.dirname(source)]
all_basedirs.extend(basedirs)
includes = set()
with open(source, 'r') as thrift:
for line in thrift.readlines():
match = INCLUDE_PARSER.match(line)
if match:
capture = match.group(1)
added = False
for basedir in all_basedirs:
include = os.path.join(basedir, capture)
if os.path.exists(include):
if log:
log.debug('{} has include {}'.format(source, include))
includes.add(include)
added = True
if not added:
raise ValueError("{} included in {} not found in bases {}"
.format(include, source, all_basedirs))
return includes | python | {
"resource": ""
} |
q27236 | find_root_thrifts | train | def find_root_thrifts(basedirs, sources, log=None):
"""Finds the root thrift files in the graph formed by sources and their recursive includes.
:basedirs: A set of thrift source file base directories to look for includes in.
:sources: Seed thrift files to examine.
:log: An optional logger.
"""
root_sources = set(sources)
for source in sources:
root_sources.difference_update(find_includes(basedirs, source, log=log))
return root_sources | python | {
"resource": ""
} |
q27237 | calculate_include_paths | train | def calculate_include_paths(targets, is_thrift_target):
"""Calculates the set of import paths for the given targets.
:targets: The targets to examine.
:is_thrift_target: A predicate to pick out thrift targets for consideration in the analysis.
:returns: Include basedirs for the target.
"""
basedirs = set()
def collect_paths(target):
basedirs.add(target.target_base)
for target in targets:
target.walk(collect_paths, predicate=is_thrift_target)
return basedirs | python | {
"resource": ""
} |
q27238 | Distribution.home | train | def home(self):
"""Returns the distribution JAVA_HOME."""
if not self._home:
home = self._get_system_properties(self.java)['java.home']
# The `jre/bin/java` executable in a JDK distribution will report `java.home` as the jre dir,
# so we check for this and re-locate to the containing jdk dir when present.
if os.path.basename(home) == 'jre':
jdk_dir = os.path.dirname(home)
if self._is_executable(os.path.join(jdk_dir, 'bin', 'javac')):
home = jdk_dir
self._home = home
return text_type(self._home) | python | {
"resource": ""
} |
q27239 | Distribution.binary | train | def binary(self, name):
"""Returns the path to the command of the given name for this distribution.
For example: ::
>>> d = Distribution()
>>> jar = d.binary('jar')
>>> jar
'/usr/bin/jar'
>>>
If this distribution has no valid command of the given name raises Distribution.Error.
If this distribution is a JDK checks both `bin` and `jre/bin` for the binary.
"""
if not isinstance(name, str):
raise ValueError('name must be a binary name, given {} of type {}'.format(name, type(name)))
self.validate()
return self._validated_executable(name) | python | {
"resource": ""
} |
q27240 | Distribution.validate | train | def validate(self):
"""Validates this distribution against its configured constraints.
Raises Distribution.Error if this distribution is not valid according to the configured
constraints.
"""
if self._validated_binaries:
return
with self._valid_executable('java') as java:
if self._minimum_version:
version = self._get_version(java)
if version < self._minimum_version:
raise self.Error('The java distribution at {} is too old; expecting at least {} and'
' got {}'.format(java, self._minimum_version, version))
if self._maximum_version:
version = self._get_version(java)
if version > self._maximum_version:
raise self.Error('The java distribution at {} is too new; expecting no older than'
' {} and got {}'.format(java, self._maximum_version, version))
# We might be a JDK discovered by the embedded jre `java` executable.
# If so reset the bin path to the true JDK home dir for full access to all binaries.
self._bin_path = os.path.join(self.home, 'bin')
try:
self._validated_executable('javac') # Calling purely for the check and cache side effects
self._is_jdk = True
except self.Error as e:
if self._jdk:
logger.debug('Failed to validate javac executable. Please check you have a JDK '
'installed. Original error: {}'.format(e))
raise | python | {
"resource": ""
} |
q27241 | _Locator._scan_constraint_match | train | def _scan_constraint_match(self, minimum_version, maximum_version, jdk):
"""Finds a cached version matching the specified constraints
:param Revision minimum_version: minimum jvm version to look for (eg, 1.7).
:param Revision maximum_version: maximum jvm version to look for (eg, 1.7.9999).
:param bool jdk: whether the found java distribution is required to have a jdk.
:return: the Distribution, or None if no matching distribution is in the cache.
:rtype: :class:`pants.java.distribution.Distribution`
"""
for dist in self._cache.values():
if minimum_version and dist.version < minimum_version:
continue
if maximum_version and dist.version > maximum_version:
continue
if jdk and not dist.jdk:
continue
return dist | python | {
"resource": ""
} |
q27242 | _Locator._locate | train | def _locate(self, minimum_version=None, maximum_version=None, jdk=False):
"""Finds a java distribution that meets any given constraints and returns it.
:param minimum_version: minimum jvm version to look for (eg, 1.7).
:param maximum_version: maximum jvm version to look for (eg, 1.7.9999).
:param bool jdk: whether the found java distribution is required to have a jdk.
:return: the located Distribution.
:rtype: :class:`Distribution`
:raises: :class:`Distribution.Error` if no suitable java distribution could be found.
"""
for location in itertools.chain(self._distribution_environment.jvm_locations):
try:
dist = Distribution(home_path=location.home_path,
bin_path=location.bin_path,
minimum_version=minimum_version,
maximum_version=maximum_version,
jdk=jdk)
dist.validate()
logger.debug('Located {} for constraints: minimum_version {}, maximum_version {}, jdk {}'
.format(dist, minimum_version, maximum_version, jdk))
return dist
except (ValueError, Distribution.Error) as e:
logger.debug('{} is not a valid distribution because: {}'
.format(location.home_path, str(e)))
pass
if (minimum_version is not None
and maximum_version is not None
and maximum_version < minimum_version):
error_format = ('Pants configuration/options led to impossible constraints for {} '
'distribution: minimum_version {}, maximum_version {}')
else:
error_format = ('Failed to locate a {} distribution with minimum_version {}, '
'maximum_version {}')
raise self.Error(error_format.format('JDK' if jdk else 'JRE', minimum_version, maximum_version)) | python | {
"resource": ""
} |
q27243 | GoCompile._get_build_flags | train | def _get_build_flags(cls, build_flags_from_option, is_flagged, target):
"""Merge build flags with global < target < command-line order
Build flags can be defined as globals (in `pants.ini`), as arguments to a Target, and
via the command-line.
"""
# If self.get_options().build_flags returns a quoted string, remove the outer quotes,
# which happens for flags passed from the command-line.
if (build_flags_from_option.startswith('\'') and build_flags_from_option.endswith('\'')) or \
(build_flags_from_option.startswith('"') and build_flags_from_option.endswith('"')):
bfo = build_flags_from_option[1:-1]
else:
bfo = build_flags_from_option
global_build_flags, ephemeral_build_flags = ('', bfo) if is_flagged else (bfo, '')
target_build_flags = target.build_flags if getattr(target, 'build_flags', None) else ''
joined_build_flags = ' '.join([global_build_flags, target_build_flags, ephemeral_build_flags])
return cls._split_build_flags(joined_build_flags) | python | {
"resource": ""
} |
q27244 | GoCompile._go_install | train | def _go_install(self, target, gopath, build_flags):
"""Create and execute a `go install` command."""
args = build_flags + [target.import_path]
result, go_cmd = self.go_dist.execute_go_cmd(
'install', gopath=gopath, args=args,
workunit_factory=self.context.new_workunit,
workunit_name='install {}'.format(target.import_path),
workunit_labels=[WorkUnitLabel.COMPILER])
if result != 0:
raise TaskError('{} failed with exit code {}'.format(go_cmd, result)) | python | {
"resource": ""
} |
q27245 | GoCompile._sync_binary_dep_links | train | def _sync_binary_dep_links(self, target, gopath, lib_binary_map):
"""Syncs symlinks under gopath to the library binaries of target's transitive dependencies.
:param Target target: Target whose transitive dependencies must be linked.
:param str gopath: $GOPATH of target whose "pkg/" directory must be populated with links
to library binaries.
:param dict<Target, str> lib_binary_map: Dictionary mapping a remote/local Go library to the
path of the compiled binary (the ".a" file) of the
library.
Required links to binary dependencies under gopath's "pkg/" dir are either created if
non-existent, or refreshed if the link is older than the underlying binary. Any pre-existing
links within gopath's "pkg/" dir that do not correspond to a transitive dependency of target
are deleted.
"""
required_links = set()
for dep in target.closure():
if dep == target:
continue
if not isinstance(dep, GoTarget):
continue
lib_binary = lib_binary_map[dep]
lib_binary_link = os.path.join(gopath, os.path.relpath(lib_binary, self.get_gopath(dep)))
safe_mkdir(os.path.dirname(lib_binary_link))
if os.path.islink(lib_binary_link):
if os.stat(lib_binary).st_mtime > os.lstat(lib_binary_link).st_mtime:
# The binary under the link was updated after the link was created. Refresh
# the link so the mtime (modification time) of the link is greater than the
# mtime of the binary. This stops Go from needlessly re-compiling the library.
os.unlink(lib_binary_link)
os.symlink(lib_binary, lib_binary_link)
else:
os.symlink(lib_binary, lib_binary_link)
required_links.add(lib_binary_link)
self.remove_unused_links(os.path.join(gopath, 'pkg'), required_links) | python | {
"resource": ""
} |
q27246 | BuildFileAliases.target_types_by_alias | train | def target_types_by_alias(self):
"""Returns a mapping from target alias to the target types produced for that alias.
Normally there is 1 target type per alias, but macros can expand a single alias to several
target types.
:API: public
:rtype: dict
"""
target_types_by_alias = defaultdict(set)
for alias, target_type in self.target_types.items():
target_types_by_alias[alias].add(target_type)
for alias, target_macro_factory in self.target_macro_factories.items():
target_types_by_alias[alias].update(target_macro_factory.target_types)
return dict(target_types_by_alias) | python | {
"resource": ""
} |
q27247 | BuildFileAliases.merge | train | def merge(self, other):
"""Merges a set of build file aliases and returns a new set of aliases containing both.
Any duplicate aliases from `other` will trump.
:API: public
:param other: The BuildFileAliases to merge in.
:type other: :class:`BuildFileAliases`
:returns: A new BuildFileAliases containing `other`'s aliases merged into ours.
:rtype: :class:`BuildFileAliases`
"""
if not isinstance(other, BuildFileAliases):
raise TypeError('Can only merge other BuildFileAliases, given {0}'.format(other))
def merge(*items):
merged = {}
for item in items:
merged.update(item)
return merged
targets = merge(self.target_types, self.target_macro_factories,
other.target_types, other.target_macro_factories)
objects = merge(self.objects, other.objects)
context_aware_object_factories=merge(self.context_aware_object_factories,
other.context_aware_object_factories)
return BuildFileAliases(targets=targets,
objects=objects,
context_aware_object_factories=context_aware_object_factories) | python | {
"resource": ""
} |
q27248 | PythonEval._compile_target | train | def _compile_target(self, vt):
"""'Compiles' a python target.
'Compiling' means forming an isolated chroot of its sources and transitive deps and then
attempting to import each of the target's sources in the case of a python library or else the
entry point in the case of a python binary.
For a library with sources lib/core.py and lib/util.py a "compiler" main file would look like:
if __name__ == '__main__':
import lib.core
import lib.util
For a binary with entry point lib.bin:main the "compiler" main file would look like:
if __name__ == '__main__':
from lib.bin import main
In either case the main file is executed within the target chroot to reveal missing BUILD
dependencies.
"""
target = vt.target
with self.context.new_workunit(name=target.address.spec):
modules = self._get_modules(target)
if not modules:
# Nothing to eval, so a trivial compile success.
return 0
interpreter = self._get_interpreter_for_target_closure(target)
reqs_pex = self._resolve_requirements_for_versioned_target_closure(interpreter, vt)
srcs_pex = self._source_pex_for_versioned_target_closure(interpreter, vt)
# Create the executable pex.
exec_pex_parent = os.path.join(self.workdir, 'executable_pex')
executable_file_content = self._get_executable_file_content(exec_pex_parent, modules)
hasher = hashlib.sha1()
hasher.update(reqs_pex.path().encode('utf-8'))
hasher.update(srcs_pex.path().encode('utf-8'))
hasher.update(executable_file_content.encode('utf-8'))
exec_file_hash = hasher.hexdigest()
exec_pex_path = os.path.realpath(os.path.join(exec_pex_parent, exec_file_hash))
if not os.path.isdir(exec_pex_path):
with safe_concurrent_creation(exec_pex_path) as safe_path:
# Write the entry point.
safe_mkdir(safe_path)
with open(os.path.join(safe_path, '{}.py'.format(self._EXEC_NAME)), 'w') as outfile:
outfile.write(executable_file_content)
pex_info = (target.pexinfo if isinstance(target, PythonBinary) else None) or PexInfo()
# Override any user-specified entry point, under the assumption that the
# executable_file_content does what the user intends (including, probably, calling that
# underlying entry point).
pex_info.entry_point = self._EXEC_NAME
pex_info.pex_path = ':'.join(pex.path() for pex in (reqs_pex, srcs_pex) if pex)
builder = PEXBuilder(safe_path, interpreter, pex_info=pex_info)
builder.freeze()
pex = PEX(exec_pex_path, interpreter)
with self.context.new_workunit(name='eval',
labels=[WorkUnitLabel.COMPILER, WorkUnitLabel.RUN,
WorkUnitLabel.TOOL],
cmd=' '.join(pex.cmdline())) as workunit:
returncode = pex.run(stdout=workunit.output('stdout'), stderr=workunit.output('stderr'))
workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE)
if returncode != 0:
self.context.log.error('Failed to eval {}'.format(target.address.spec))
return returncode | python | {
"resource": ""
} |
q27249 | Xargs.subprocess | train | def subprocess(cls, cmd, **kwargs):
"""Creates an xargs engine that uses subprocess.call to execute the given cmd array with extra
arg chunks.
"""
def call(args):
return subprocess.call(cmd + args, **kwargs)
return cls(call) | python | {
"resource": ""
} |
q27250 | Xargs.execute | train | def execute(self, args):
"""Executes the configured cmd passing args in one or more rounds xargs style.
:param list args: Extra arguments to pass to cmd.
"""
all_args = list(args)
try:
return self._cmd(all_args)
except OSError as e:
if errno.E2BIG == e.errno:
args1, args2 = self._split_args(all_args)
result = self.execute(args1)
if result != 0:
return result
return self.execute(args2)
else:
raise e | python | {
"resource": ""
} |
q27251 | ZipkinReporter.close | train | def close(self):
"""End the report."""
endpoint = self.endpoint.replace("/api/v1/spans", "")
logger.debug("Zipkin trace may be located at this URL {}/traces/{}".format(endpoint, self.trace_id)) | python | {
"resource": ""
} |
q27252 | ZipkinReporter.bulk_record_workunits | train | def bulk_record_workunits(self, engine_workunits):
"""A collection of workunits from v2 engine part"""
for workunit in engine_workunits:
duration = workunit['end_timestamp'] - workunit['start_timestamp']
span = zipkin_span(
service_name="pants",
span_name=workunit['name'],
duration=duration,
span_storage=self.span_storage,
)
span.zipkin_attrs = ZipkinAttrs(
trace_id=self.trace_id,
span_id=workunit['span_id'],
# TODO change it when we properly pass parent_id to the v2 engine Nodes
# TODO Pass parent_id with ExecutionRequest when v2 engine is called by a workunit
# TODO pass parent_id when v2 engine Node is called by another v2 engine Node
parent_span_id=workunit.get("parent_id", self.parent_id),
flags='0', # flags: stores flags header. Currently unused
is_sampled=True,
)
span.start()
span.start_timestamp = workunit['start_timestamp']
span.stop() | python | {
"resource": ""
} |
q27253 | Payload.get_field_value | train | def get_field_value(self, key, default=None):
"""Retrieves the value in the payload field if the field exists, otherwise returns the default.
:API: public
"""
if key in self._fields:
payload_field = self._fields[key]
if payload_field:
return payload_field.value
return default | python | {
"resource": ""
} |
q27254 | Payload.add_fields | train | def add_fields(self, field_dict):
"""Add a mapping of field names to PayloadField instances.
:API: public
"""
for key, field in field_dict.items():
self.add_field(key, field) | python | {
"resource": ""
} |
q27255 | Payload.add_field | train | def add_field(self, key, field):
"""Add a field to the Payload.
:API: public
:param string key: The key for the field. Fields can be accessed using attribute access as
well as `get_field` using `key`.
:param PayloadField field: A PayloadField instance. None is an allowable value for `field`,
in which case it will be skipped during hashing.
"""
if key in self._fields:
raise PayloadFieldAlreadyDefinedError(
'Key {key} is already set on this payload. The existing field was {existing_field}.'
' Tried to set new field {field}.'
.format(key=key, existing_field=self._fields[key], field=field))
elif self._frozen:
raise PayloadFrozenError(
'Payload is frozen, field with key {key} cannot be added to it.'
.format(key=key))
else:
self._fields[key] = field
self._fingerprint_memo = None | python | {
"resource": ""
} |
q27256 | Payload.fingerprint | train | def fingerprint(self, field_keys=None):
"""A memoizing fingerprint that rolls together the fingerprints of underlying PayloadFields.
If no fields were hashed (or all fields opted out of being hashed by returning `None`), then
`fingerprint()` also returns `None`.
:param iterable<string> field_keys: A subset of fields to use for the fingerprint. Defaults
to all fields.
"""
field_keys = frozenset(field_keys or self._fields.keys())
if field_keys not in self._fingerprint_memo_map:
self._fingerprint_memo_map[field_keys] = self._compute_fingerprint(field_keys)
return self._fingerprint_memo_map[field_keys] | python | {
"resource": ""
} |
q27257 | Payload.mark_dirty | train | def mark_dirty(self):
"""Invalidates memoized fingerprints for this payload.
Exposed for testing.
:API: public
"""
self._fingerprint_memo_map = {}
for field in self._fields.values():
field.mark_dirty() | python | {
"resource": ""
} |
q27258 | ClasspathProducts.create_canonical_classpath | train | def create_canonical_classpath(cls, classpath_products, targets, basedir,
save_classpath_file=False,
internal_classpath_only=True,
excludes=None):
"""Create a stable classpath of symlinks with standardized names.
By default symlinks are created for each target under `basedir` based on its `target.id`.
Unique suffixes are added to further disambiguate classpath products from the same target.
It also optionally saves the classpath products to be used externally (by intellij plugin),
one output file for each target.
Note calling this function will refresh the symlinks and output files for the target under
`basedir` if they exist, but it will NOT delete/cleanup the contents for *other* targets.
Caller wants that behavior can make the similar calls for other targets or just remove
the `basedir` first.
:param classpath_products: Classpath products.
:param targets: Targets to create canonical classpath for.
:param basedir: Directory to create symlinks.
:param save_classpath_file: An optional file with original classpath entries that symlinks
are created from.
:param internal_classpath_only: whether to create symlinks just for internal classpath or
all classpath.
:param excludes: classpath entries should be excluded.
:returns: Converted canonical classpath.
:rtype: list of strings
"""
def delete_old_target_output_files(classpath_prefix):
"""Delete existing output files or symlinks for target."""
directory, basename = os.path.split(classpath_prefix)
pattern = re.compile(r'^{basename}(([0-9]+)(\.jar)?|classpath\.txt)$'
.format(basename=re.escape(basename)))
files = [filename for filename in os.listdir(directory) if pattern.match(filename)]
for rel_path in files:
path = os.path.join(directory, rel_path)
if os.path.islink(path) or os.path.isfile(path):
safe_delete(path)
def prepare_target_output_folder(basedir, target):
"""Prepare directory that will contain canonical classpath for the target.
This includes creating directories if it does not already exist, cleaning up
previous classpath output related to the target.
"""
output_dir = basedir
# TODO(peiyu) improve readability once we deprecate the old naming style.
# For example, `-` is commonly placed in string format as opposed to here.
classpath_prefix_for_target = '{basedir}/{target_id}-'.format(basedir=basedir,
target_id=target.id)
if os.path.exists(output_dir):
delete_old_target_output_files(classpath_prefix_for_target)
else:
os.makedirs(output_dir)
return classpath_prefix_for_target
excludes = excludes or set()
canonical_classpath = []
target_to_classpath = ClasspathUtil.classpath_by_targets(targets, classpath_products)
processed_entries = set()
for target, classpath_entries_for_target in target_to_classpath.items():
if internal_classpath_only:
classpath_entries_for_target = [entry for entry in classpath_entries_for_target
if ClasspathEntry.is_internal_classpath_entry(entry)]
if len(classpath_entries_for_target) > 0:
classpath_prefix_for_target = prepare_target_output_folder(basedir, target)
# Note: for internal targets pants has only one classpath entry, but user plugins
# might generate additional entries, for example, build.properties for the target.
# Also it's common to have multiple classpath entries associated with 3rdparty targets.
for (index, entry) in enumerate(classpath_entries_for_target):
if entry.is_excluded_by(excludes):
continue
# Avoid creating symlink for the same entry twice, only the first entry on
# classpath will get a symlink. The resulted symlinks as a whole are still stable,
# but may have non-consecutive suffixes because the 'missing' ones are those
# have already been created symlinks by previous targets.
if entry in processed_entries:
continue
processed_entries.add(entry)
# Create a unique symlink path by prefixing the base file name with a monotonic
# increasing `index` to avoid name collisions.
_, ext = os.path.splitext(entry.path)
symlink_path = '{}{}{}'.format(classpath_prefix_for_target, index, ext)
real_entry_path = os.path.realpath(entry.path)
if not os.path.exists(real_entry_path):
raise MissingClasspathEntryError('Could not find {realpath} when attempting to link '
'{src} into {dst}'
.format(realpath=real_entry_path, src=entry.path, dst=symlink_path))
os.symlink(real_entry_path, symlink_path)
canonical_classpath.append(symlink_path)
if save_classpath_file:
classpath = [entry.path for entry in classpath_entries_for_target]
with safe_open('{}classpath.txt'.format(classpath_prefix_for_target), 'w') as classpath_file:
classpath_file.write(os.pathsep.join(classpath))
classpath_file.write('\n')
return canonical_classpath | python | {
"resource": ""
} |
q27259 | ClasspathProducts.copy | train | def copy(self):
"""Returns a copy of this ClasspathProducts.
Edits to the copy's classpaths or exclude associations will not affect the classpaths or
excludes in the original. The copy is shallow though, so edits to the copy's product values
will mutate the original's product values. See `UnionProducts.copy`.
:API: public
:rtype: :class:`ClasspathProducts`
"""
return ClasspathProducts(pants_workdir=self._pants_workdir,
classpaths=self._classpaths.copy(),
excludes=self._excludes.copy()) | python | {
"resource": ""
} |
q27260 | ClasspathProducts.add_for_targets | train | def add_for_targets(self, targets, classpath_elements):
"""Adds classpath path elements to the products of all the provided targets."""
for target in targets:
self.add_for_target(target, classpath_elements) | python | {
"resource": ""
} |
q27261 | ClasspathProducts.add_for_target | train | def add_for_target(self, target, classpath_elements):
"""Adds classpath path elements to the products of the provided target.
:param target: The target for which to add the classpath elements.
:param classpath_elements: List of tuples, either (conf, filename) or
(conf, pants.backend.jvm.tasks.ClasspathEntry)
"""
self._add_elements_for_target(target, self._wrap_path_elements(classpath_elements)) | python | {
"resource": ""
} |
q27262 | ClasspathProducts.add_jars_for_targets | train | def add_jars_for_targets(self, targets, conf, resolved_jars):
"""Adds jar classpath elements to the products of the provided targets.
The resolved jars are added in a way that works with excludes.
:param targets: The targets to add the jars for.
:param conf: The configuration.
:param resolved_jars: A list of ResolvedJars.
"""
classpath_entries = []
for jar in resolved_jars:
if not jar.pants_path:
raise TaskError('Jar: {!s} has no specified path.'.format(jar.coordinate))
cp_entry = ArtifactClasspathEntry(jar.pants_path, jar.coordinate, jar.cache_path, jar.directory_digest)
classpath_entries.append((conf, cp_entry))
for target in targets:
self._add_elements_for_target(target, classpath_entries) | python | {
"resource": ""
} |
q27263 | ClasspathProducts.remove_for_target | train | def remove_for_target(self, target, classpath_elements):
"""Removes the given entries for the target."""
self._classpaths.remove_for_target(target, self._wrap_path_elements(classpath_elements)) | python | {
"resource": ""
} |
q27264 | ClasspathProducts.get_product_target_mappings_for_targets | train | def get_product_target_mappings_for_targets(self, targets, respect_excludes=True):
"""Gets the classpath products-target associations for the given targets.
Product-target tuples are returned in order, optionally respecting target excludes.
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (classpath products, target) tuples.
"""
classpath_target_tuples = self._classpaths.get_product_target_mappings_for_targets(targets)
if respect_excludes:
return self._filter_by_excludes(classpath_target_tuples, targets)
else:
return classpath_target_tuples | python | {
"resource": ""
} |
q27265 | ClasspathProducts.get_artifact_classpath_entries_for_targets | train | def get_artifact_classpath_entries_for_targets(self, targets, respect_excludes=True):
"""Gets the artifact classpath products for the given targets.
Products are returned in order, optionally respecting target excludes, and the products only
include external artifact classpath elements (ie: resolved jars).
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ArtifactClasspathEntry`)
"""
classpath_tuples = self.get_classpath_entries_for_targets(targets,
respect_excludes=respect_excludes)
return [(conf, cp_entry) for conf, cp_entry in classpath_tuples
if ClasspathEntry.is_artifact_classpath_entry(cp_entry)] | python | {
"resource": ""
} |
q27266 | ClasspathProducts.get_internal_classpath_entries_for_targets | train | def get_internal_classpath_entries_for_targets(self, targets, respect_excludes=True):
"""Gets the internal classpath products for the given targets.
Products are returned in order, optionally respecting target excludes, and the products only
include internal artifact classpath elements (ie: no resolved jars).
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ClasspathEntry`)
"""
classpath_tuples = self.get_classpath_entries_for_targets(targets,
respect_excludes=respect_excludes)
return [(conf, cp_entry) for conf, cp_entry in classpath_tuples
if ClasspathEntry.is_internal_classpath_entry(cp_entry)] | python | {
"resource": ""
} |
q27267 | ClasspathProducts.update | train | def update(self, other):
"""Adds the contents of other to this ClasspathProducts."""
if self._pants_workdir != other._pants_workdir:
raise ValueError('Other ClasspathProducts from a different pants workdir {}'.format(other._pants_workdir))
for target, products in other._classpaths._products_by_target.items():
self._classpaths.add_for_target(target, products)
for target, products in other._excludes._products_by_target.items():
self._excludes.add_for_target(target, products) | python | {
"resource": ""
} |
q27268 | ClasspathProducts._validate_classpath_tuples | train | def _validate_classpath_tuples(self, classpath, target):
"""Validates that all files are located within the working directory, to simplify relativization.
:param classpath: The list of classpath tuples. Each tuple is a 2-tuple of ivy_conf and
ClasspathEntry.
:param target: The target that the classpath tuple is being registered for.
:raises: `TaskError` when the path is outside the work directory
"""
for classpath_tuple in classpath:
conf, classpath_entry = classpath_tuple
path = classpath_entry.path
if os.path.relpath(path, self._pants_workdir).startswith(os.pardir):
raise TaskError(
'Classpath entry {} for target {} is located outside the working directory "{}".'
.format(path, target.address.spec, self._pants_workdir)) | python | {
"resource": ""
} |
q27269 | GoalOptionsRegistrar.registrar_for_scope | train | def registrar_for_scope(cls, goal):
"""Returns a subclass of this registrar suitable for registering on the specified goal.
Allows reuse of the same registrar for multiple goals, and also allows us to decouple task
code from knowing which goal(s) the task is to be registered in.
"""
type_name = '{}_{}'.format(cls.__name__, goal)
if PY2:
type_name = type_name.encode('utf-8')
return type(type_name, (cls, ), {'options_scope': goal}) | python | {
"resource": ""
} |
q27270 | teardown_socket | train | def teardown_socket(s):
"""Shuts down and closes a socket."""
try:
s.shutdown(socket.SHUT_WR)
except socket.error:
pass
finally:
s.close() | python | {
"resource": ""
} |
q27271 | RecvBufferedSocket.recv | train | def recv(self, bufsize):
"""Buffers up to _chunk_size bytes when the internal buffer has less than `bufsize` bytes."""
assert bufsize > 0, 'a positive bufsize is required'
if len(self._buffer) < bufsize:
readable, _, _ = safe_select([self._socket], [], [], self._select_timeout)
if readable:
recvd = self._socket.recv(max(self._chunk_size, bufsize))
self._buffer = self._buffer + recvd
return_buf, self._buffer = self._buffer[:bufsize], self._buffer[bufsize:]
return return_buf | python | {
"resource": ""
} |
q27272 | JavascriptStyleBase._install_eslint | train | def _install_eslint(self, bootstrap_dir):
"""Install the ESLint distribution.
:rtype: string
"""
with pushd(bootstrap_dir):
result, install_command = self.install_module(
package_manager=self.node_distribution.get_package_manager(package_manager=PACKAGE_MANAGER_YARNPKG),
workunit_name=self.INSTALL_JAVASCRIPTSTYLE_TARGET_NAME,
workunit_labels=[WorkUnitLabel.PREP])
if result != 0:
raise TaskError('Failed to install ESLint\n'
'\t{} failed with exit code {}'.format(install_command, result))
self.context.log.debug('Successfully installed ESLint to {}'.format(bootstrap_dir))
return bootstrap_dir | python | {
"resource": ""
} |
q27273 | PantsDaemon.shutdown | train | def shutdown(self, service_thread_map):
"""Gracefully terminate all services and kill the main PantsDaemon loop."""
with self._services.lifecycle_lock:
for service, service_thread in service_thread_map.items():
self._logger.info('terminating pantsd service: {}'.format(service))
service.terminate()
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
self._logger.info('terminating pantsd')
self._kill_switch.set() | python | {
"resource": ""
} |
q27274 | PantsDaemon._close_stdio | train | def _close_stdio():
"""Close stdio streams to avoid output in the tty that launched pantsd."""
for fd in (sys.stdin, sys.stdout, sys.stderr):
file_no = fd.fileno()
fd.flush()
fd.close()
os.close(file_no) | python | {
"resource": ""
} |
q27275 | PantsDaemon._pantsd_logging | train | def _pantsd_logging(self):
"""A context manager that runs with pantsd logging.
Asserts that stdio (represented by file handles 0, 1, 2) is closed to ensure that
we can safely reuse those fd numbers.
"""
# Ensure that stdio is closed so that we can safely reuse those file descriptors.
for fd in (0, 1, 2):
try:
os.fdopen(fd)
raise AssertionError(
'pantsd logging cannot initialize while stdio is open: {}'.format(fd))
except OSError:
pass
# Redirect stdio to /dev/null for the rest of the run, to reserve those file descriptors
# for further forks.
with stdio_as(stdin_fd=-1, stdout_fd=-1, stderr_fd=-1):
# Reinitialize logging for the daemon context.
init_rust_logger(self._log_level, self._log_show_rust_3rdparty)
result = setup_logging(self._log_level, log_dir=self._log_dir, log_name=self.LOG_NAME, native=self._native)
# Do a python-level redirect of stdout/stderr, which will not disturb `0,1,2`.
# TODO: Consider giving these pipes/actual fds, in order to make them "deep" replacements
# for `1,2`, and allow them to be used via `stdio_as`.
sys.stdout = _LoggerStream(logging.getLogger(), logging.INFO, result.log_handler)
sys.stderr = _LoggerStream(logging.getLogger(), logging.WARN, result.log_handler)
self._logger.debug('logging initialized')
yield (result.log_handler.stream, result.log_handler.native_filename) | python | {
"resource": ""
} |
q27276 | PantsDaemon._run_services | train | def _run_services(self, pants_services):
"""Service runner main loop."""
if not pants_services.services:
self._logger.critical('no services to run, bailing!')
return
service_thread_map = {service: self._make_thread(service)
for service in pants_services.services}
# Start services.
for service, service_thread in service_thread_map.items():
self._logger.info('starting service {}'.format(service))
try:
service_thread.start()
except (RuntimeError, FSEventService.ServiceError):
self.shutdown(service_thread_map)
raise PantsDaemon.StartupFailure('service {} failed to start, shutting down!'.format(service))
# Once all services are started, write our pid.
self.write_pid()
self.write_metadata_by_name('pantsd', self.FINGERPRINT_KEY, ensure_text(self.options_fingerprint))
# Monitor services.
while not self.is_killed:
for service, service_thread in service_thread_map.items():
if not service_thread.is_alive():
self.shutdown(service_thread_map)
raise PantsDaemon.RuntimeFailure('service failure for {}, shutting down!'.format(service))
else:
# Avoid excessive CPU utilization.
service_thread.join(self.JOIN_TIMEOUT_SECONDS) | python | {
"resource": ""
} |
q27277 | PantsDaemon._write_named_sockets | train | def _write_named_sockets(self, socket_map):
"""Write multiple named sockets using a socket mapping."""
for socket_name, socket_info in socket_map.items():
self.write_named_socket(socket_name, socket_info) | python | {
"resource": ""
} |
q27278 | PantsDaemon.run_sync | train | def run_sync(self):
"""Synchronously run pantsd."""
# Switch log output to the daemon's log stream from here forward.
self._close_stdio()
with self._pantsd_logging() as (log_stream, log_filename):
# Register an exiter using os._exit to ensure we only close stdio streams once.
ExceptionSink.reset_exiter(Exiter(exiter=os._exit))
# We don't have any stdio streams to log to anymore, so we log to a file.
# We don't override the faulthandler destination because the stream we get will proxy things
# via the rust logging code, and faulthandler needs to be writing directly to a real file
# descriptor. When pantsd logging was originally initialised, we already set up faulthandler
# to log to the correct file descriptor, so don't override it.
#
# We can get tracebacks of the pantsd process by tailing the pantsd log and sending it
# SIGUSR2.
ExceptionSink.reset_interactive_output_stream(
log_stream,
override_faulthandler_destination=False,
)
# Reset the log location and the backtrace preference from the global bootstrap options.
global_bootstrap_options = self._bootstrap_options.for_global_scope()
ExceptionSink.reset_should_print_backtrace_to_terminal(
global_bootstrap_options.print_exception_stacktrace)
ExceptionSink.reset_log_location(global_bootstrap_options.pants_workdir)
self._native.set_panic_handler()
# Set the process name in ps output to 'pantsd' vs './pants compile src/etc:: -ldebug'.
set_process_title('pantsd [{}]'.format(self._build_root))
# Write service socket information to .pids.
self._write_named_sockets(self._services.port_map)
# Enter the main service runner loop.
self._setup_services(self._services)
self._run_services(self._services) | python | {
"resource": ""
} |
q27279 | PantsDaemon.needs_launch | train | def needs_launch(self):
"""Determines if pantsd needs to be launched.
N.B. This should always be called under care of the `lifecycle_lock`.
:returns: True if the daemon needs launching, False otherwise.
:rtype: bool
"""
new_fingerprint = self.options_fingerprint
self._logger.debug('pantsd: is_alive={} new_fingerprint={} current_fingerprint={}'
.format(self.is_alive(), new_fingerprint, self.fingerprint))
return self.needs_restart(new_fingerprint) | python | {
"resource": ""
} |
q27280 | PantsDaemon.launch | train | def launch(self):
"""Launches pantsd in a subprocess.
N.B. This should always be called under care of the `lifecycle_lock`.
:returns: A Handle for the pantsd instance.
:rtype: PantsDaemon.Handle
"""
self.terminate(include_watchman=False)
self.watchman_launcher.maybe_launch()
self._logger.debug('launching pantsd')
self.daemon_spawn()
# Wait up to 60 seconds for pantsd to write its pidfile.
pantsd_pid = self.await_pid(60)
listening_port = self.read_named_socket('pailgun', int)
self._logger.debug('pantsd is running at pid {}, pailgun port is {}'
.format(self.pid, listening_port))
return self.Handle(pantsd_pid, listening_port, text_type(self._metadata_base_dir)) | python | {
"resource": ""
} |
q27281 | PantsDaemon.terminate | train | def terminate(self, include_watchman=True):
"""Terminates pantsd and watchman.
N.B. This should always be called under care of the `lifecycle_lock`.
"""
super(PantsDaemon, self).terminate()
if include_watchman:
self.watchman_launcher.terminate() | python | {
"resource": ""
} |
q27282 | Scope._parse | train | def _parse(cls, scope):
"""Parses the input scope into a normalized set of strings.
:param scope: A string or tuple containing zero or more scope names.
:return: A set of scope name strings, or a tuple with the default scope name.
:rtype: set
"""
if not scope:
return ('default',)
if isinstance(scope, string_types):
scope = scope.split(' ')
scope = {str(s).lower() for s in scope if s}
return scope or ('default',) | python | {
"resource": ""
} |
q27283 | Scope.in_scope | train | def in_scope(self, exclude_scopes=None, include_scopes=None):
"""Whether this scope should be included by the given inclusion and exclusion rules.
:param Scope exclude_scopes: An optional Scope containing scope names to exclude. None (the
default value) indicates that no filtering should be done based on exclude_scopes.
:param Scope include_scopes: An optional Scope containing scope names to include. None (the
default value) indicates that no filtering should be done based on include_scopes.
:return: True if none of the input scopes are in `exclude_scopes`, and either (a) no include
scopes are provided, or (b) at least one input scope is included in the `include_scopes` list.
:rtype: bool
"""
if include_scopes is not None and not isinstance(include_scopes, Scope):
raise ValueError('include_scopes must be a Scope instance but was {}.'.format(
type(include_scopes)
))
if exclude_scopes is not None and not isinstance(exclude_scopes, Scope):
raise ValueError('exclude_scopes must be a Scope instance but was {}.'.format(
type(exclude_scopes)
))
if exclude_scopes and any(s in exclude_scopes for s in self):
return False
if include_scopes and not any(s in include_scopes for s in self):
return False
return True | python | {
"resource": ""
} |
q27284 | Ivy.execute | train | def execute(self, jvm_options=None, args=None, executor=None,
workunit_factory=None, workunit_name=None, workunit_labels=None):
"""Executes the ivy commandline client with the given args.
Raises Ivy.Error if the command fails for any reason.
:param executor: Java executor to run ivy with.
"""
# NB(gmalmquist): It should be OK that we can't declare a subsystem_dependency in this file
# (because it's just a plain old object), because Ivy is only constructed by Bootstrapper, which
# makes an explicit call to IvySubsystem.global_instance() in its constructor, which in turn has
# a declared dependency on DistributionLocator.
executor = executor or SubprocessExecutor(DistributionLocator.cached())
runner = self.runner(jvm_options=jvm_options, args=args, executor=executor)
try:
with self.resolution_lock:
result = util.execute_runner(runner, workunit_factory, workunit_name, workunit_labels)
if result != 0:
raise self.Error('Ivy command failed with exit code {}{}'.format(
result, ': ' + ' '.join(args) if args else ''))
except executor.Error as e:
raise self.Error('Problem executing ivy: {}'.format(e)) | python | {
"resource": ""
} |
q27285 | Ivy.runner | train | def runner(self, jvm_options=None, args=None, executor=None):
"""Creates an ivy commandline client runner for the given args."""
args = args or []
if self._ivy_settings and '-settings' not in args:
args = ['-settings', self._ivy_settings] + args
options = list(jvm_options) if jvm_options else []
if self._ivy_resolution_cache_dir and '-cache' not in args:
# TODO(John Sirois): Currently this is a magic property to support hand-crafted <caches/> in
# ivysettings.xml. Ideally we'd support either simple -caches or these hand-crafted cases
# instead of just hand-crafted. Clean this up by taking over ivysettings.xml and generating
# it from BUILD constructs.
options += ['-Divy.cache.dir={}'.format(self._ivy_resolution_cache_dir)]
options += self._extra_jvm_options
executor = executor or SubprocessExecutor(DistributionLocator.cached())
if not isinstance(executor, Executor):
raise ValueError('The executor argument must be an Executor instance, given {} of type {}'.format(
executor, type(executor)))
return executor.runner(classpath=self._classpath, main='org.apache.ivy.Main',
jvm_options=options, args=args) | python | {
"resource": ""
} |
q27286 | _get_runner | train | def _get_runner(classpath, main, jvm_options, args, executor,
cwd, distribution,
create_synthetic_jar, synthetic_jar_dir):
"""Gets the java runner for execute_java and execute_java_async."""
executor = executor or SubprocessExecutor(distribution)
safe_cp = classpath
if create_synthetic_jar:
safe_cp = safe_classpath(classpath, synthetic_jar_dir)
logger.debug('Bundling classpath {} into {}'.format(':'.join(classpath), safe_cp))
return executor.runner(safe_cp, main, args=args, jvm_options=jvm_options, cwd=cwd) | python | {
"resource": ""
} |
q27287 | execute_java | train | def execute_java(classpath, main, jvm_options=None, args=None, executor=None,
workunit_factory=None, workunit_name=None, workunit_labels=None,
cwd=None, workunit_log_config=None, distribution=None,
create_synthetic_jar=True, synthetic_jar_dir=None, stdin=None):
"""Executes the java program defined by the classpath and main.
If `workunit_factory` is supplied, does so in the context of a workunit.
:param list classpath: the classpath for the java program
:param string main: the fully qualified class name of the java program's entry point
:param list jvm_options: an optional sequence of options for the underlying jvm
:param list args: an optional sequence of args to pass to the java program
:param executor: an optional java executor to use to launch the program; defaults to a subprocess
spawn of the default java distribution
:param workunit_factory: an optional callable that can produce a workunit context
:param string workunit_name: an optional name for the work unit; defaults to the main
:param list workunit_labels: an optional sequence of labels for the work unit
:param string cwd: optionally set the working directory
:param WorkUnit.LogConfig workunit_log_config: an optional tuple of options affecting reporting
:param bool create_synthetic_jar: whether to create a synthentic jar that includes the original
classpath in its manifest.
:param string synthetic_jar_dir: an optional directory to store the synthetic jar, if `None`
a temporary directory will be provided and cleaned up upon process exit.
:param file stdin: The stdin handle to use: by default None, meaning that stdin will
not be propagated into the process.
Returns the exit code of the java program.
Raises `pants.java.Executor.Error` if there was a problem launching java itself.
"""
runner = _get_runner(classpath, main, jvm_options, args, executor, cwd, distribution,
create_synthetic_jar, synthetic_jar_dir)
workunit_name = workunit_name or main
return execute_runner(runner,
workunit_factory=workunit_factory,
workunit_name=workunit_name,
workunit_labels=workunit_labels,
workunit_log_config=workunit_log_config,
stdin=stdin) | python | {
"resource": ""
} |
q27288 | execute_java_async | train | def execute_java_async(classpath, main, jvm_options=None, args=None, executor=None,
workunit_factory=None, workunit_name=None, workunit_labels=None,
cwd=None, workunit_log_config=None, distribution=None,
create_synthetic_jar=True, synthetic_jar_dir=None):
"""This is just like execute_java except that it returns a ProcessHandler rather than a return code.
If `workunit_factory` is supplied, does so in the context of a workunit.
:param list classpath: the classpath for the java program
:param string main: the fully qualified class name of the java program's entry point
:param list jvm_options: an optional sequence of options for the underlying jvm
:param list args: an optional sequence of args to pass to the java program
:param executor: an optional java executor to use to launch the program; defaults to a subprocess
spawn of the default java distribution
:param workunit_factory: an optional callable that can produce a workunit context
:param string workunit_name: an optional name for the work unit; defaults to the main
:param list workunit_labels: an optional sequence of labels for the work unit
:param string cwd: optionally set the working directory
:param WorkUnit.LogConfig workunit_log_config: an optional tuple of options affecting reporting
:param bool create_synthetic_jar: whether to create a synthentic jar that includes the original
classpath in its manifest.
:param string synthetic_jar_dir: an optional directory to store the synthetic jar, if `None`
a temporary directory will be provided and cleaned up upon process exit.
Returns a ProcessHandler to the java program.
Raises `pants.java.Executor.Error` if there was a problem launching java itself.
"""
runner = _get_runner(classpath, main, jvm_options, args, executor, cwd, distribution,
create_synthetic_jar, synthetic_jar_dir)
workunit_name = workunit_name or main
return execute_runner_async(runner,
workunit_factory=workunit_factory,
workunit_name=workunit_name,
workunit_labels=workunit_labels,
workunit_log_config=workunit_log_config) | python | {
"resource": ""
} |
q27289 | execute_runner | train | def execute_runner(runner, workunit_factory=None, workunit_name=None, workunit_labels=None,
workunit_log_config=None, stdin=None):
"""Executes the given java runner.
If `workunit_factory` is supplied, does so in the context of a workunit.
:param runner: the java runner to run
:param workunit_factory: an optional callable that can produce a workunit context
:param string workunit_name: an optional name for the work unit; defaults to the main
:param list workunit_labels: an optional sequence of labels for the work unit
:param WorkUnit.LogConfig workunit_log_config: an optional tuple of task options affecting reporting
:param file stdin: The stdin handle to use: by default None, meaning that stdin will
not be propagated into the process.
Returns the exit code of the java runner.
Raises `pants.java.Executor.Error` if there was a problem launching java itself.
"""
if not isinstance(runner, Executor.Runner):
raise ValueError('The runner argument must be a java Executor.Runner instance, '
'given {} of type {}'.format(runner, type(runner)))
if workunit_factory is None:
return runner.run(stdin=stdin)
else:
workunit_labels = [
WorkUnitLabel.TOOL,
WorkUnitLabel.NAILGUN if isinstance(runner.executor, NailgunExecutor) else WorkUnitLabel.JVM
] + (workunit_labels or [])
with workunit_factory(name=workunit_name, labels=workunit_labels,
cmd=runner.cmd, log_config=workunit_log_config) as workunit:
ret = runner.run(stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'),
stdin=stdin)
workunit.set_outcome(WorkUnit.FAILURE if ret else WorkUnit.SUCCESS)
return ret | python | {
"resource": ""
} |
q27290 | execute_runner_async | train | def execute_runner_async(runner, workunit_factory=None, workunit_name=None, workunit_labels=None,
workunit_log_config=None):
"""Executes the given java runner asynchronously.
We can't use 'with' here because the workunit_generator's __exit__ function
must be called after the process exits, in the return_code_handler.
The wrapper around process.wait() needs to handle the same exceptions
as the contextmanager does, so we have code duplication.
We're basically faking the 'with' call to deal with asynchronous
results.
If `workunit_factory` is supplied, does so in the context of a workunit.
:param runner: the java runner to run
:param workunit_factory: an optional callable that can produce a workunit context
:param string workunit_name: an optional name for the work unit; defaults to the main
:param list workunit_labels: an optional sequence of labels for the work unit
:param WorkUnit.LogConfig workunit_log_config: an optional tuple of task options affecting reporting
Returns a ProcessHandler to the java process that is spawned.
Raises `pants.java.Executor.Error` if there was a problem launching java itself.
"""
if not isinstance(runner, Executor.Runner):
raise ValueError('The runner argument must be a java Executor.Runner instance, '
'given {} of type {}'.format(runner, type(runner)))
if workunit_factory is None:
return SubprocessProcessHandler(runner.spawn())
else:
workunit_labels = [
WorkUnitLabel.TOOL,
WorkUnitLabel.NAILGUN if isinstance(runner.executor, NailgunExecutor) else WorkUnitLabel.JVM
] + (workunit_labels or [])
workunit_generator = workunit_factory(name=workunit_name, labels=workunit_labels,
cmd=runner.cmd, log_config=workunit_log_config)
workunit = workunit_generator.__enter__()
process = runner.spawn(stdout=workunit.output('stdout'), stderr=workunit.output('stderr'))
class WorkUnitProcessHandler(ProcessHandler):
def wait(_, timeout=None):
try:
ret = process.wait(timeout=timeout)
workunit.set_outcome(WorkUnit.FAILURE if ret else WorkUnit.SUCCESS)
workunit_generator.__exit__(None, None, None)
return ret
except BaseException:
if not workunit_generator.__exit__(*sys.exc_info()):
raise
def kill(_):
return process.kill()
def terminate(_):
return process.terminate()
def poll(_):
return process.poll()
return WorkUnitProcessHandler() | python | {
"resource": ""
} |
q27291 | relativize_classpath | train | def relativize_classpath(classpath, root_dir, followlinks=True):
"""Convert into classpath relative to a directory.
This is eventually used by a jar file located in this directory as its manifest
attribute Class-Path. See
https://docs.oracle.com/javase/7/docs/technotes/guides/extensions/spec.html#bundled
:param list classpath: Classpath to be relativized.
:param string root_dir: directory to relativize urls in the classpath, does not
have to exist yet.
:param bool followlinks: whether to follow symlinks to calculate relative path.
:returns: Converted classpath of the same size as input classpath.
:rtype: list of strings
"""
def relativize_url(url, root_dir):
# When symlink is involed, root_dir concatenated with the returned relpath may not exist.
# Consider on mac `/var` is a symlink of `/private/var`, the relative path of subdirectories
# under /var to any other directories under `/` computed by os.path.relpath misses one level
# of `..`. Use os.path.realpath to guarantee returned relpath can always be located.
# This is not needed only when path are all relative.
url = os.path.realpath(url) if followlinks else url
root_dir = os.path.realpath(root_dir) if followlinks else root_dir
url_in_bundle = os.path.relpath(url, root_dir)
# Append '/' for directories, those not ending with '/' are assumed to be jars.
# Note isdir does what we need here to follow symlinks.
if os.path.isdir(url):
url_in_bundle += '/'
return url_in_bundle
return [relativize_url(url, root_dir) for url in classpath] | python | {
"resource": ""
} |
q27292 | safe_classpath | train | def safe_classpath(classpath, synthetic_jar_dir, custom_name=None):
"""Bundles classpath into one synthetic jar that includes original classpath in its manifest.
This is to ensure classpath length never exceeds platform ARG_MAX.
:param list classpath: Classpath to be bundled.
:param string synthetic_jar_dir: directory to store the synthetic jar, if `None`
a temp directory will be provided and cleaned up upon process exit. Otherwise synthetic
jar will remain in the supplied directory, only for debugging purpose.
:param custom_name: filename of the synthetic jar to be created.
:returns: A classpath (singleton list with just the synthetic jar).
:rtype: list of strings
"""
if synthetic_jar_dir:
safe_mkdir(synthetic_jar_dir)
else:
synthetic_jar_dir = safe_mkdtemp()
# Quote the paths so that if they contain reserved characters can be safely passed to JVM classloader.
bundled_classpath = [parse.quote(cp) for cp in relativize_classpath(classpath, synthetic_jar_dir)]
manifest = Manifest()
manifest.addentry(Manifest.CLASS_PATH, ' '.join(bundled_classpath))
with temporary_file(root_dir=synthetic_jar_dir, cleanup=False, suffix='.jar') as jar_file:
with open_zip(jar_file, mode='w', compression=ZIP_STORED) as jar:
jar.writestr(Manifest.PATH, manifest.contents())
if custom_name:
custom_path = os.path.join(synthetic_jar_dir, custom_name)
safe_concurrent_rename(jar_file.name, custom_path)
return [custom_path]
else:
return [jar_file.name] | python | {
"resource": ""
} |
q27293 | Shading.create_relocate | train | def create_relocate(cls, from_pattern, shade_pattern=None, shade_prefix=None):
"""Creates a rule which shades jar entries from one pattern to another.
Examples: ::
# Rename everything in the org.foobar.example package
# to __shaded_by_pants__.org.foobar.example.
shading_relocate('org.foobar.example.**')
# Rename org.foobar.example.Main to __shaded_by_pants__.org.foobar.example.Main
shading_relocate('org.foobar.example.Main')
# Rename org.foobar.example.Main to org.foobar.example.NotMain
shading_relocate('org.foobar.example.Main', 'org.foobar.example.NotMain')
# Rename all 'Main' classes under any direct subpackage of org.foobar.
shading_relocate('org.foobar.*.Main')
# Rename org.foobar package to com.barfoo package
shading_relocate('org.foobar.**', 'com.barfoo.@1')
# Rename everything in org.foobar.example package to __hello__.org.foobar.example
shading_relocate('org.foobar.example.**', shade_prefix='__hello__')
:param string from_pattern: Any fully-qualified classname which matches this pattern will be
shaded. '*' is a wildcard that matches any individual package component, and '**' is a
wildcard that matches any trailing pattern (ie the rest of the string).
:param string shade_pattern: The shaded pattern to use, where ``@1``, ``@2``, ``@3``, etc are
references to the groups matched by wildcards (groups are numbered from left to right). If
omitted, this pattern is inferred from the input pattern, prefixed by the ``shade_prefix``
(if provided). (Eg, a ``from_pattern`` of ``com.*.foo.bar.**`` implies a default
``shade_pattern`` of ``__shaded_by_pants__.com.@1.foo.@2``)
:param string shade_prefix: Prefix to prepend when generating a ``shade_pattern`` (if a
``shade_pattern`` is not provided by the user). Defaults to '``__shaded_by_pants__.``'.
"""
# NB(gmalmquist): Have have to check "is None" rather than using an or statement, because the
# empty-string is a valid prefix which should not be replaced by the default prefix.
shade_prefix = Shading.SHADE_PREFIX if shade_prefix is None else shade_prefix
return RelocateRule.new(from_pattern, shade_pattern, shade_prefix) | python | {
"resource": ""
} |
q27294 | Shading.create_keep_package | train | def create_keep_package(cls, package_name, recursive=True):
"""Convenience constructor for a package keep rule.
Essentially equivalent to just using ``shading_keep('package_name.**')``.
:param string package_name: Package name to keep (eg, ``org.pantsbuild.example``).
:param bool recursive: Whether to keep everything under any subpackage of ``package_name``,
or just direct children of the package. (Defaults to True).
"""
return cls.create_keep(cls._format_package_glob(package_name, recursive)) | python | {
"resource": ""
} |
q27295 | Shading.create_zap_package | train | def create_zap_package(cls, package_name, recursive=True):
"""Convenience constructor for a package zap rule.
Essentially equivalent to just using ``shading_zap('package_name.**')``.
:param string package_name: Package name to remove (eg, ``org.pantsbuild.example``).
:param bool recursive: Whether to remove everything under any subpackage of ``package_name``,
or just direct children of the package. (Defaults to True).
"""
return cls.create_zap(cls._format_package_glob(package_name, recursive)) | python | {
"resource": ""
} |
q27296 | Shading.create_relocate_package | train | def create_relocate_package(cls, package_name, shade_prefix=None, recursive=True):
"""Convenience constructor for a package relocation rule.
Essentially equivalent to just using ``shading_relocate('package_name.**')``.
:param string package_name: Package name to shade (eg, ``org.pantsbuild.example``).
:param string shade_prefix: Optional prefix to apply to the package. Defaults to
``__shaded_by_pants__.``.
:param bool recursive: Whether to rename everything under any subpackage of ``package_name``,
or just direct children of the package. (Defaults to True).
"""
return cls.create_relocate(from_pattern=cls._format_package_glob(package_name, recursive),
shade_prefix=shade_prefix) | python | {
"resource": ""
} |
q27297 | Shader.exclude_package | train | def exclude_package(cls, package_name=None, recursive=False):
"""Excludes the given fully qualified package name from shading.
:param unicode package_name: A fully qualified package_name; eg: `org.pantsbuild`; `None` for
the java default (root) package.
:param bool recursive: `True` to exclude any package with `package_name` as a proper prefix;
`False` by default.
:returns: A `Shader.Rule` describing the shading exclusion.
"""
if not package_name:
return Shading.create_exclude('**' if recursive else '*')
return Shading.create_exclude_package(package_name, recursive=recursive) | python | {
"resource": ""
} |
q27298 | Shader.shade_package | train | def shade_package(cls, package_name=None, recursive=False):
"""Includes the given fully qualified package name in shading.
:param unicode package_name: A fully qualified package_name; eg: `org.pantsbuild`; `None` for
the java default (root) package.
:param bool recursive: `True` to include any package with `package_name` as a proper prefix;
`False` by default.
:returns: A `Shader.Rule` describing the packages to be shaded.
"""
if not package_name:
return Shading.create_relocate('**' if recursive else '*')
return Shading.create_relocate_package(package_name, recursive=recursive) | python | {
"resource": ""
} |
q27299 | Shader.assemble_binary_rules | train | def assemble_binary_rules(self, main, jar, custom_rules=None):
"""Creates an ordered list of rules suitable for fully shading the given binary.
The default rules will ensure the `main` class name is un-changed along with a minimal set of
support classes but that everything else will be shaded.
Any `custom_rules` are given highest precedence and so they can interfere with this automatic
binary shading. In general it's safe to add exclusion rules to open up classes that need to be
shared between the binary and the code it runs over. An example would be excluding the
`org.junit.Test` annotation class from shading since a tool running junit needs to be able
to scan for this annotation inside the user code it tests.
:param unicode main: The main class to preserve as the entry point.
:param unicode jar: The path of the binary jar the `main` class lives in.
:param list custom_rules: An optional list of custom `Shader.Rule`s.
:returns: a precedence-ordered list of `Shader.Rule`s
"""
# If a class is matched by multiple rules, the 1st lexical match wins (see:
# https://code.google.com/p/jarjar/wiki/CommandLineDocs#Rules_file_format).
# As such we 1st ensure the `main` package and the jre packages have exclusion rules and
# then apply a final set of shading rules to everything else at lowest precedence.
# Custom rules take precedence.
rules = list(custom_rules or [])
# Exclude the main entrypoint's package from shading. There may be package-private classes that
# the main class accesses so we must preserve the whole package).
parts = main.rsplit('.', 1)
if len(parts) == 2:
main_package = parts[0]
else:
# There is no package component, so the main class is in the root (default) package.
main_package = None
rules.append(self.exclude_package(main_package))
rules.extend(self.exclude_package(system_pkg, recursive=True)
for system_pkg in self._binary_package_excludes)
# Shade everything else.
#
# NB: A simpler way to do this jumps out - just emit 1 wildcard rule:
#
# rule **.* _shaded_.@1.@2
#
# Unfortunately, as of jarjar 1.4 this wildcard catch-all technique improperly transforms
# resources in the `main_package`. The jarjar binary jar itself has its command line help text
# stored as a resource in its main's package and so using a catch-all like this causes
# recursively shading jarjar with itself using this class to fail!
#
# As a result we explicitly shade all the non `main_package` packages in the binary jar instead
# which does support recursively shading jarjar.
rules.extend(self.shade_package(pkg) for pkg in sorted(self._iter_jar_packages(jar))
if pkg != main_package)
return rules | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.