_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q27800 | LinkerWrapperMixin.for_compiler | train | def for_compiler(self, compiler, platform):
"""Return a Linker object which is intended to be compatible with the given `compiler`."""
return (self.linker
# TODO(#6143): describe why the compiler needs to be first on the PATH!
.sequence(compiler, exclude_list_fields=['extra_args', 'path_entries'])
.prepend_field('path_entries', compiler.path_entries)
.copy(exe_filename=compiler.exe_filename)) | python | {
"resource": ""
} |
q27801 | ImportOrder.is_module_on_std_lib_path | train | def is_module_on_std_lib_path(cls, module):
"""
Sometimes .py files are symlinked to the real python files, such as the case of virtual
env. However the .pyc files are created under the virtual env directory rather than
the path in cls.STANDARD_LIB_PATH. Hence this function checks for both.
:param module: a module
:return: True if module is on interpreter's stdlib path. False otherwise.
"""
module_file_real_path = os.path.realpath(module.__file__)
if module_file_real_path.startswith(cls.STANDARD_LIB_PATH):
return True
elif os.path.splitext(module_file_real_path)[1] == '.pyc':
py_file_real_path = os.path.realpath(os.path.splitext(module_file_real_path)[0] + '.py')
return py_file_real_path.startswith(cls.STANDARD_LIB_PATH)
return False | python | {
"resource": ""
} |
q27802 | ImportOrder.iter_import_chunks | train | def iter_import_chunks(self):
"""Iterate over space-separated import chunks in a file."""
chunk = []
last_line = None
for leaf in self.python_file.tree.body:
if isinstance(leaf, (ast.Import, ast.ImportFrom)):
# we've seen previous imports but this import is not in the same chunk
if last_line and leaf.lineno != last_line[1]:
yield chunk
chunk = [leaf]
# we've either not seen previous imports or this is part of the same chunk
elif not last_line or last_line and leaf.lineno == last_line[1]:
chunk.append(leaf)
last_line = self.python_file.logical_lines[leaf.lineno]
if chunk:
yield chunk | python | {
"resource": ""
} |
q27803 | ThriftDefaults.compiler | train | def compiler(self, target):
"""Returns the thrift compiler to use for the given target.
:param target: The target to extract the thrift compiler from.
:type target: :class:`pants.backend.codegen.thrift.java.java_thrift_library.JavaThriftLibrary`
:returns: The thrift compiler to use.
:rtype: string
"""
self._check_target(target)
return target.compiler or self._default_compiler | python | {
"resource": ""
} |
q27804 | ThriftDefaults.language | train | def language(self, target):
"""Returns the target language to generate thrift stubs for.
:param target: The target to extract the target language from.
:type target: :class:`pants.backend.codegen.thrift.java.java_thrift_library.JavaThriftLibrary`
:returns: The target language to generate stubs for.
:rtype: string
"""
self._check_target(target)
return target.language or self._default_language | python | {
"resource": ""
} |
q27805 | ThriftDefaults.namespace_map | train | def namespace_map(self, target):
"""Returns the namespace_map used for Thrift generation.
:param target: The target to extract the namespace_map from.
:type target: :class:`pants.backend.codegen.targets.java_thrift_library.JavaThriftLibrary`
:returns: The namespaces to remap (old to new).
:rtype: dictionary
"""
self._check_target(target)
return target.namespace_map or self._default_namespace_map | python | {
"resource": ""
} |
q27806 | ThriftDefaults.compiler_args | train | def compiler_args(self, target):
"""Returns the compiler_args used for Thrift generation.
:param target: The target to extract the compiler args from.
:type target: :class:`pants.backend.codegen.targets.java_thrift_library.JavaThriftLibrary`
:returns: Extra arguments for the thrift compiler
:rtype: list
"""
self._check_target(target)
return target.compiler_args or self._default_compiler_args | python | {
"resource": ""
} |
q27807 | ThriftDefaults.default_java_namespace | train | def default_java_namespace(self, target):
"""Returns the default_java_namespace used for Thrift generation.
:param target: The target to extract the default_java_namespace from.
:type target: :class:`pants.backend.codegen.targets.java_thrift_library.JavaThriftLibrary`
:returns: The default Java namespace used when not specified in the IDL.
:rtype: string
"""
self._check_target(target)
return target.default_java_namespace or self._default_default_java_namespace | python | {
"resource": ""
} |
q27808 | UnpackRemoteSourcesBase._calculate_unpack_filter | train | def _calculate_unpack_filter(cls, includes=None, excludes=None, spec=None):
"""Take regex patterns and return a filter function.
:param list includes: List of include patterns to pass to _file_filter.
:param list excludes: List of exclude patterns to pass to _file_filter.
"""
include_patterns = cls.compile_patterns(includes or [],
field_name='include_patterns',
spec=spec)
logger.debug('include_patterns: {}'
.format(list(p.pattern for p in include_patterns)))
exclude_patterns = cls.compile_patterns(excludes or [],
field_name='exclude_patterns',
spec=spec)
logger.debug('exclude_patterns: {}'
.format(list(p.pattern for p in exclude_patterns)))
return lambda f: cls._file_filter(f, include_patterns, exclude_patterns) | python | {
"resource": ""
} |
q27809 | ScroogeGen._resolve_deps | train | def _resolve_deps(self, depmap):
"""Given a map of gen-key=>target specs, resolves the target specs into references."""
deps = defaultdict(lambda: OrderedSet())
for category, depspecs in depmap.items():
dependencies = deps[category]
for depspec in depspecs:
dep_address = Address.parse(depspec)
try:
self.context.build_graph.maybe_inject_address_closure(dep_address)
dependencies.add(self.context.build_graph.get_target(dep_address))
except AddressLookupError as e:
raise AddressLookupError('{}\n referenced from {} scope'.format(e, self.options_scope))
return deps | python | {
"resource": ""
} |
q27810 | TargetRootsCalculator.parse_specs | train | def parse_specs(cls, target_specs, build_root=None, exclude_patterns=None, tags=None):
"""Parse string specs into unique `Spec` objects.
:param iterable target_specs: An iterable of string specs.
:param string build_root: The path to the build root.
:returns: A `Specs` object.
"""
build_root = build_root or get_buildroot()
spec_parser = CmdLineSpecParser(build_root)
dependencies = tuple(OrderedSet(spec_parser.parse_spec(spec_str) for spec_str in target_specs))
return Specs(
dependencies=dependencies,
exclude_patterns=exclude_patterns if exclude_patterns else tuple(),
tags=tags) | python | {
"resource": ""
} |
q27811 | ResolveRequirementsTaskBase.resolve_requirements | train | def resolve_requirements(self, interpreter, req_libs):
"""Requirements resolution for PEX files.
:param interpreter: Resolve against this :class:`PythonInterpreter`.
:param req_libs: A list of :class:`PythonRequirementLibrary` targets to resolve.
:returns: a PEX containing target requirements and any specified python dist targets.
"""
with self.invalidated(req_libs) as invalidation_check:
# If there are no relevant targets, we still go through the motions of resolving
# an empty set of requirements, to prevent downstream tasks from having to check
# for this special case.
if invalidation_check.all_vts:
target_set_id = VersionedTargetSet.from_versioned_targets(
invalidation_check.all_vts).cache_key.hash
else:
target_set_id = 'no_targets'
# We need to ensure that we are resolving for only the current platform if we are
# including local python dist targets that have native extensions.
targets_by_platform = pex_build_util.targets_by_platform(self.context.targets(), self._python_setup)
if self._python_native_code_settings.check_build_for_current_platform_only(targets_by_platform):
platforms = ['current']
else:
platforms = list(sorted(targets_by_platform.keys()))
path = os.path.realpath(os.path.join(self.workdir, str(interpreter.identity), target_set_id))
# Note that we check for the existence of the directory, instead of for invalid_vts,
# to cover the empty case.
if not os.path.isdir(path):
with safe_concurrent_creation(path) as safe_path:
pex_builder = PexBuilderWrapper.Factory.create(
builder=PEXBuilder(path=safe_path, interpreter=interpreter, copy=True),
log=self.context.log)
pex_builder.add_requirement_libs_from(req_libs, platforms=platforms)
pex_builder.freeze()
return PEX(path, interpreter=interpreter) | python | {
"resource": ""
} |
q27812 | ResolveRequirementsTaskBase.resolve_requirement_strings | train | def resolve_requirement_strings(self, interpreter, requirement_strings):
"""Resolve a list of pip-style requirement strings."""
requirement_strings = sorted(requirement_strings)
if len(requirement_strings) == 0:
req_strings_id = 'no_requirements'
elif len(requirement_strings) == 1:
req_strings_id = requirement_strings[0]
else:
req_strings_id = hash_all(requirement_strings)
path = os.path.realpath(os.path.join(self.workdir, str(interpreter.identity), req_strings_id))
if not os.path.isdir(path):
reqs = [PythonRequirement(req_str) for req_str in requirement_strings]
with safe_concurrent_creation(path) as safe_path:
pex_builder = PexBuilderWrapper.Factory.create(
builder=PEXBuilder(path=safe_path, interpreter=interpreter, copy=True),
log=self.context.log)
pex_builder.add_resolved_requirements(reqs)
pex_builder.freeze()
return PEX(path, interpreter=interpreter) | python | {
"resource": ""
} |
q27813 | ResolveRequirementsTaskBase.merged_pex | train | def merged_pex(cls, path, pex_info, interpreter, pexes, interpeter_constraints=None):
"""Yields a pex builder at path with the given pexes already merged.
:rtype: :class:`pex.pex_builder.PEXBuilder`
"""
pex_paths = [pex.path() for pex in pexes if pex]
if pex_paths:
pex_info = pex_info.copy()
pex_info.merge_pex_path(':'.join(pex_paths))
with safe_concurrent_creation(path) as safe_path:
builder = PEXBuilder(safe_path, interpreter, pex_info=pex_info)
if interpeter_constraints:
for constraint in interpeter_constraints:
builder.add_interpreter_constraint(constraint)
yield builder | python | {
"resource": ""
} |
q27814 | ResolveRequirementsTaskBase.merge_pexes | train | def merge_pexes(cls, path, pex_info, interpreter, pexes, interpeter_constraints=None):
"""Generates a merged pex at path."""
with cls.merged_pex(path, pex_info, interpreter, pexes, interpeter_constraints) as builder:
builder.freeze() | python | {
"resource": ""
} |
q27815 | PythonSetup.artifact_cache_dir | train | def artifact_cache_dir(self):
"""Note that this is unrelated to the general pants artifact cache."""
return (self.get_options().artifact_cache_dir or
os.path.join(self.scratch_dir, 'artifacts')) | python | {
"resource": ""
} |
q27816 | PythonSetup.compatibility_or_constraints | train | def compatibility_or_constraints(self, target):
"""
Return either the compatibility of the given target, or the interpreter constraints.
If interpreter constraints are supplied by the CLI flag, return those only.
"""
if self.get_options().is_flagged('interpreter_constraints'):
return tuple(self.interpreter_constraints)
return tuple(target.compatibility or self.interpreter_constraints) | python | {
"resource": ""
} |
q27817 | PythonSetup.get_pex_python_paths | train | def get_pex_python_paths():
"""Returns a list of paths to Python interpreters as defined in a pexrc file.
These are provided by a PEX_PYTHON_PATH in either of '/etc/pexrc', '~/.pexrc'.
PEX_PYTHON_PATH defines a colon-separated list of paths to interpreters
that a pex can be built and run against.
"""
ppp = Variables.from_rc().get('PEX_PYTHON_PATH')
if ppp:
return ppp.split(os.pathsep)
else:
return [] | python | {
"resource": ""
} |
q27818 | PythonSetup.get_pyenv_paths | train | def get_pyenv_paths(pyenv_root_func=None):
"""Returns a list of paths to Python interpreters managed by pyenv.
:param pyenv_root_func: A no-arg function that returns the pyenv root. Defaults to
running `pyenv root`, but can be overridden for testing.
"""
pyenv_root_func = pyenv_root_func or get_pyenv_root
pyenv_root = pyenv_root_func()
if pyenv_root is None:
return []
versions_dir = os.path.join(pyenv_root, 'versions')
paths = []
for version in sorted(os.listdir(versions_dir)):
path = os.path.join(versions_dir, version, 'bin')
if os.path.isdir(path):
paths.append(path)
return paths | python | {
"resource": ""
} |
q27819 | BasicAuth.authenticate | train | def authenticate(self, provider, creds=None, cookies=None):
"""Authenticate against the specified provider.
:param str provider: Authorize against this provider.
:param pants.auth.basic_auth.BasicAuthCreds creds: The creds to use.
If unspecified, assumes that creds are set in the netrc file.
:param pants.auth.cookies.Cookies cookies: Store the auth cookies in this instance.
If unspecified, uses the global instance.
:raises pants.auth.basic_auth.BasicAuthException: If auth fails due to misconfiguration or
rejection by the server.
"""
cookies = cookies or Cookies.global_instance()
if not provider:
raise BasicAuthException('No basic auth provider specified.')
provider_config = self.get_options().providers.get(provider)
if not provider_config:
raise BasicAuthException('No config found for provider {}.'.format(provider))
url = provider_config.get('url')
if not url:
raise BasicAuthException('No url found in config for provider {}.'.format(provider))
if not self.get_options().allow_insecure_urls and not url.startswith('https://'):
raise BasicAuthException('Auth url for provider {} is not secure: {}.'.format(provider, url))
if creds:
auth = requests.auth.HTTPBasicAuth(creds.username, creds.password)
else:
auth = None # requests will use the netrc creds.
response = requests.get(url, auth=auth)
if response.status_code != requests.codes.ok:
if response.status_code == requests.codes.unauthorized:
parsed = www_authenticate.parse(response.headers.get('WWW-Authenticate', ''))
if 'Basic' in parsed:
raise Challenged(url, response.status_code, response.reason, parsed['Basic']['realm'])
raise BasicAuthException(url, response.status_code, response.reason)
cookies.update(response.cookies) | python | {
"resource": ""
} |
q27820 | ProjectTree.glob1 | train | def glob1(self, dir_relpath, glob):
"""Returns a list of paths in path that match glob and are not ignored."""
if self.isignored(dir_relpath, directory=True):
return []
matched_files = self._glob1_raw(dir_relpath, glob)
prefix = self._relpath_no_dot(dir_relpath)
return self._filter_ignored(matched_files, selector=lambda p: os.path.join(prefix, p)) | python | {
"resource": ""
} |
q27821 | ProjectTree.scandir | train | def scandir(self, relpath):
"""Return paths relative to the root, which are in the given directory and not ignored."""
if self.isignored(relpath, directory=True):
self._raise_access_ignored(relpath)
return self._filter_ignored(self._scandir_raw(relpath), selector=lambda e: e.path) | python | {
"resource": ""
} |
q27822 | ProjectTree.isdir | train | def isdir(self, relpath):
"""Returns True if path is a directory and is not ignored."""
if self._isdir_raw(relpath):
if not self.isignored(relpath, directory=True):
return True
return False | python | {
"resource": ""
} |
q27823 | ProjectTree.isfile | train | def isfile(self, relpath):
"""Returns True if path is a file and is not ignored."""
if self.isignored(relpath):
return False
return self._isfile_raw(relpath) | python | {
"resource": ""
} |
q27824 | ProjectTree.exists | train | def exists(self, relpath):
"""Returns True if path exists and is not ignored."""
if self.isignored(self._append_slash_if_dir_path(relpath)):
return False
return self._exists_raw(relpath) | python | {
"resource": ""
} |
q27825 | ProjectTree.content | train | def content(self, file_relpath):
"""Returns the content for file at path. Raises exception if path is ignored.
Raises exception if path is ignored.
"""
if self.isignored(file_relpath):
self._raise_access_ignored(file_relpath)
return self._content_raw(file_relpath) | python | {
"resource": ""
} |
q27826 | ProjectTree.relative_readlink | train | def relative_readlink(self, relpath):
"""Execute `readlink` for the given path, which may result in a relative path.
Raises exception if path is ignored.
"""
if self.isignored(self._append_slash_if_dir_path(relpath)):
self._raise_access_ignored(relpath)
return self._relative_readlink_raw(relpath) | python | {
"resource": ""
} |
q27827 | ProjectTree.walk | train | def walk(self, relpath, topdown=True):
"""Walk the file tree rooted at `path`.
Works like os.walk but returned root value is relative path.
Ignored paths will not be returned.
"""
for root, dirs, files in self._walk_raw(relpath, topdown):
matched_dirs = self.ignore.match_files([os.path.join(root, "{}/".format(d)) for d in dirs])
matched_files = self.ignore.match_files([os.path.join(root, f) for f in files])
for matched_dir in matched_dirs:
dirs.remove(fast_relpath(matched_dir, root).rstrip('/'))
for matched_file in matched_files:
files.remove(fast_relpath(matched_file, root))
yield root, dirs, files | python | {
"resource": ""
} |
q27828 | ProjectTree.isignored | train | def isignored(self, relpath, directory=False):
"""Returns True if path matches pants ignore pattern."""
relpath = self._relpath_no_dot(relpath)
if directory:
relpath = self._append_trailing_slash(relpath)
return self.ignore.match_file(relpath) | python | {
"resource": ""
} |
q27829 | ProjectTree._filter_ignored | train | def _filter_ignored(self, entries, selector=None):
"""Given an opaque entry list, filter any ignored entries.
:param entries: A list or generator that produces entries to filter.
:param selector: A function that computes a path for an entry relative to the root of the
ProjectTree, or None to use identity.
"""
selector = selector or (lambda x: x)
prefixed_entries = [(self._append_slash_if_dir_path(selector(entry)), entry)
for entry in entries]
ignored_paths = set(self.ignore.match_files(path for path, _ in prefixed_entries))
return [entry for path, entry in prefixed_entries if path not in ignored_paths] | python | {
"resource": ""
} |
q27830 | ProjectTree._append_slash_if_dir_path | train | def _append_slash_if_dir_path(self, relpath):
"""For a dir path return a path that has a trailing slash."""
if self._isdir_raw(relpath):
return self._append_trailing_slash(relpath)
return relpath | python | {
"resource": ""
} |
q27831 | ImportRemoteSourcesMixin.compute_dependency_specs | train | def compute_dependency_specs(cls, kwargs=None, payload=None):
"""Tack imported_target_specs onto the traversable_specs generator for this target."""
for spec in super(ImportRemoteSourcesMixin, cls).compute_dependency_specs(kwargs, payload):
yield spec
imported_target_specs = cls.imported_target_specs(kwargs=kwargs, payload=payload)
for spec in imported_target_specs:
yield spec | python | {
"resource": ""
} |
q27832 | BaseLocalArtifactCache._tmpfile | train | def _tmpfile(self, cache_key, use):
"""Allocate tempfile on same device as cache with a suffix chosen to prevent collisions"""
with temporary_file(suffix=cache_key.id + use, root_dir=self._cache_root,
permissions=self._permissions) as tmpfile:
yield tmpfile | python | {
"resource": ""
} |
q27833 | BaseLocalArtifactCache.insert_paths | train | def insert_paths(self, cache_key, paths):
"""Gather paths into artifact, store it, and yield the path to stored artifact tarball."""
with self._tmpfile(cache_key, 'write') as tmp:
self._artifact(tmp.name).collect(paths)
yield self._store_tarball(cache_key, tmp.name) | python | {
"resource": ""
} |
q27834 | BaseLocalArtifactCache.store_and_use_artifact | train | def store_and_use_artifact(self, cache_key, src, results_dir=None):
"""Store and then extract the artifact from the given `src` iterator for the given cache_key.
:param cache_key: Cache key for the artifact.
:param src: Iterator over binary data to store for the artifact.
:param str results_dir: The path to the expected destination of the artifact extraction: will
be cleared both before extraction, and after a failure to extract.
"""
with self._tmpfile(cache_key, 'read') as tmp:
for chunk in src:
tmp.write(chunk)
tmp.close()
tarball = self._store_tarball(cache_key, tmp.name)
artifact = self._artifact(tarball)
# NOTE(mateo): The two clean=True args passed in this method are likely safe, since the cache will by
# definition be dealing with unique results_dir, as opposed to the stable vt.results_dir (aka 'current').
# But if by chance it's passed the stable results_dir, safe_makedir(clean=True) will silently convert it
# from a symlink to a real dir and cause mysterious 'Operation not permitted' errors until the workdir is cleaned.
if results_dir is not None:
safe_mkdir(results_dir, clean=True)
try:
artifact.extract()
except Exception:
# Do our best to clean up after a failed artifact extraction. If a results_dir has been
# specified, it is "expected" to represent the output destination of the extracted
# artifact, and so removing it should clear any partially extracted state.
if results_dir is not None:
safe_mkdir(results_dir, clean=True)
safe_delete(tarball)
raise
return True | python | {
"resource": ""
} |
q27835 | LocalArtifactCache.prune | train | def prune(self, root):
"""Prune stale cache files
If the option --cache-target-max-entry is greater than zero, then prune will remove all but n
old cache files for each target/task.
:param str root: The path under which cacheable artifacts will be cleaned
"""
max_entries_per_target = self._max_entries_per_target
if os.path.isdir(root) and max_entries_per_target:
safe_rm_oldest_items_in_dir(root, max_entries_per_target) | python | {
"resource": ""
} |
q27836 | hash_all | train | def hash_all(strs, digest=None):
"""Returns a hash of the concatenation of all the strings in strs.
If a hashlib message digest is not supplied a new sha1 message digest is used.
"""
digest = digest or hashlib.sha1()
for s in strs:
s = ensure_binary(s)
digest.update(s)
return digest.hexdigest() if PY3 else digest.hexdigest().decode('utf-8') | python | {
"resource": ""
} |
q27837 | hash_file | train | def hash_file(path, digest=None):
"""Hashes the contents of the file at the given path and returns the hash digest in hex form.
If a hashlib message digest is not supplied a new sha1 message digest is used.
"""
digest = digest or hashlib.sha1()
with open(path, 'rb') as fd:
s = fd.read(8192)
while s:
digest.update(s)
s = fd.read(8192)
return digest.hexdigest() if PY3 else digest.hexdigest().decode('utf-8') | python | {
"resource": ""
} |
q27838 | json_hash | train | def json_hash(obj, digest=None, encoder=None):
"""Hashes `obj` by dumping to JSON.
:param obj: An object that can be rendered to json using the given `encoder`.
:param digest: An optional `hashlib` compatible message digest. Defaults to `hashlib.sha1`.
:param encoder: An optional custom json encoder.
:type encoder: :class:`json.JSONEncoder`
:returns: A hash of the given `obj` according to the given `encoder`.
:rtype: str
:API: public
"""
json_str = json.dumps(obj, ensure_ascii=True, allow_nan=False, sort_keys=True, cls=encoder)
return hash_all(json_str, digest=digest) | python | {
"resource": ""
} |
q27839 | Sharder.is_in_shard | train | def is_in_shard(self, s):
"""Returns True iff the string s is in this shard.
:param string s: The string to check.
"""
return self.compute_shard(s, self._nshards) == self._shard | python | {
"resource": ""
} |
q27840 | ClasspathUtil.classpath | train | def classpath(cls, targets, classpath_products, confs=('default',)):
"""Return the classpath as a list of paths covering all the passed targets.
:param targets: Targets to build an aggregated classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param confs: The list of confs for use by this classpath.
:returns: The classpath as a list of path elements.
:rtype: list of string
"""
classpath_iter = cls._classpath_iter(classpath_products.get_for_targets(targets), confs=confs)
return list(classpath_iter) | python | {
"resource": ""
} |
q27841 | ClasspathUtil.internal_classpath | train | def internal_classpath(cls, targets, classpath_products, confs=('default',)):
"""Return the list of internal classpath entries for a classpath covering all `targets`.
Any classpath entries contributed by external dependencies will be omitted.
:param targets: Targets to build an aggregated classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param confs: The list of confs for use by this classpath.
:returns: The classpath as a list of path elements.
:rtype: list of string
"""
classpath_tuples = classpath_products.get_internal_classpath_entries_for_targets(targets)
filtered_tuples_iter = cls._filtered_classpath_by_confs_iter(classpath_tuples, confs)
return [entry.path for entry in cls._entries_iter(filtered_tuples_iter)] | python | {
"resource": ""
} |
q27842 | ClasspathUtil.classpath_by_targets | train | def classpath_by_targets(cls, targets, classpath_products, confs=('default',)):
"""Return classpath entries grouped by their targets for the given `targets`.
:param targets: The targets to lookup classpath products for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param confs: The list of confs for use by this classpath.
:returns: The ordered (target, classpath) mappings.
:rtype: OrderedDict
"""
classpath_target_tuples = classpath_products.get_product_target_mappings_for_targets(targets)
filtered_items_iter = filter(cls._accept_conf_filter(confs, lambda x: x[0][0]),
classpath_target_tuples)
# group (classpath_entry, target) tuples by targets
target_to_classpath = OrderedDict()
for classpath_entry, target in filtered_items_iter:
_, entry = classpath_entry
if not target in target_to_classpath:
target_to_classpath[target] = []
target_to_classpath[target].append(entry)
return target_to_classpath | python | {
"resource": ""
} |
q27843 | PailgunService._setup_pailgun | train | def _setup_pailgun(self):
"""Sets up a PailgunServer instance."""
# Constructs and returns a runnable PantsRunner.
def runner_factory(sock, arguments, environment):
return self._runner_class.create(
sock,
arguments,
environment,
self.services,
self._scheduler_service,
)
# Plumb the daemon's lifecycle lock to the `PailgunServer` to safeguard teardown.
# This indirection exists to allow the server to be created before PantsService.setup
# has been called to actually initialize the `services` field.
@contextmanager
def lifecycle_lock():
with self.services.lifecycle_lock:
yield
return PailgunServer(self._bind_addr, runner_factory, lifecycle_lock, self._request_complete_callback) | python | {
"resource": ""
} |
q27844 | HtmlReporter._emit | train | def _emit(self, s):
"""Append content to the main report file."""
if os.path.exists(self._html_dir): # Make sure we're not immediately after a clean-all.
self._report_file.write(s)
self._report_file.flush() | python | {
"resource": ""
} |
q27845 | HtmlReporter._overwrite | train | def _overwrite(self, filename, func, force=False):
"""Overwrite a file with the specified contents.
Write times are tracked, too-frequent overwrites are skipped, for performance reasons.
:param filename: The path under the html dir to write to.
:param func: A no-arg function that returns the contents to write.
:param force: Whether to force a write now, regardless of the last overwrite time.
"""
now = int(time.time() * 1000)
last_overwrite_time = self._last_overwrite_time.get(filename) or now
# Overwrite only once per second.
if (now - last_overwrite_time >= 1000) or force:
if os.path.exists(self._html_dir): # Make sure we're not immediately after a clean-all.
with open(os.path.join(self._html_dir, filename), 'w') as f:
f.write(func())
self._last_overwrite_time[filename] = now | python | {
"resource": ""
} |
q27846 | HtmlReporter._htmlify_text | train | def _htmlify_text(self, s):
"""Make text HTML-friendly."""
colored = self._handle_ansi_color_codes(html.escape(s))
return linkify(self._buildroot, colored, self._linkify_memo).replace('\n', '</br>') | python | {
"resource": ""
} |
q27847 | HtmlReporter._handle_ansi_color_codes | train | def _handle_ansi_color_codes(self, s):
"""Replace ansi escape sequences with spans of appropriately named css classes."""
parts = HtmlReporter._ANSI_COLOR_CODE_RE.split(s)
ret = []
span_depth = 0
# Note that len(parts) is always odd: text, code, text, code, ..., text.
for i in range(0, len(parts), 2):
ret.append(parts[i])
if i + 1 < len(parts):
for code in parts[i + 1].split(';'):
if code == 0: # Reset.
while span_depth > 0:
ret.append('</span>')
span_depth -= 1
else:
ret.append('<span class="ansi-{}">'.format(code))
span_depth += 1
while span_depth > 0:
ret.append('</span>')
span_depth -= 1
return ''.join(ret) | python | {
"resource": ""
} |
q27848 | LocalPantsRunner.create | train | def create(cls, exiter, args, env, target_roots=None, daemon_graph_session=None,
options_bootstrapper=None):
"""Creates a new LocalPantsRunner instance by parsing options.
:param Exiter exiter: The Exiter instance to use for this run.
:param list args: The arguments (e.g. sys.argv) for this run.
:param dict env: The environment (e.g. os.environ) for this run.
:param TargetRoots target_roots: The target roots for this run.
:param LegacyGraphSession daemon_graph_session: The graph helper for this session.
:param OptionsBootstrapper options_bootstrapper: The OptionsBootstrapper instance to reuse.
"""
build_root = get_buildroot()
options, build_config, options_bootstrapper = cls.parse_options(
args,
env,
setup_logging=True,
options_bootstrapper=options_bootstrapper,
)
global_options = options.for_global_scope()
# Option values are usually computed lazily on demand,
# but command line options are eagerly computed for validation.
for scope in options.scope_to_flags.keys():
options.for_scope(scope)
# Verify configs.
if global_options.verify_config:
options_bootstrapper.verify_configs_against_options(options)
# If we're running with the daemon, we'll be handed a session from the
# resident graph helper - otherwise initialize a new one here.
graph_session = cls._maybe_init_graph_session(
daemon_graph_session,
options_bootstrapper,
build_config,
options
)
target_roots = cls._maybe_init_target_roots(
target_roots,
graph_session,
options,
build_root
)
profile_path = env.get('PANTS_PROFILE')
return cls(
build_root,
exiter,
options,
options_bootstrapper,
build_config,
target_roots,
graph_session,
daemon_graph_session is not None,
profile_path
) | python | {
"resource": ""
} |
q27849 | LocalPantsRunner._maybe_handle_help | train | def _maybe_handle_help(self):
"""Handle requests for `help` information."""
if self._options.help_request:
help_printer = HelpPrinter(self._options)
result = help_printer.print_help()
self._exiter(result) | python | {
"resource": ""
} |
q27850 | LocalPantsRunner._compute_final_exit_code | train | def _compute_final_exit_code(*codes):
"""Returns the exit code with higher abs value in case of negative values."""
max_code = None
for code in codes:
if max_code is None or abs(max_code) < abs(code):
max_code = code
return max_code | python | {
"resource": ""
} |
q27851 | GoTask.import_oracle | train | def import_oracle(self):
"""Return an import oracle that can help look up and categorize imports.
:rtype: :class:`ImportOracle`
"""
return ImportOracle(go_dist=self.go_dist, workunit_factory=self.context.new_workunit) | python | {
"resource": ""
} |
q27852 | ImportOracle.go_stdlib | train | def go_stdlib(self):
"""Return the set of all Go standard library import paths.
:rtype: frozenset of string
"""
out = self._go_dist.create_go_cmd('list', args=['std']).check_output()
return frozenset(out.decode('utf-8').strip().split()) | python | {
"resource": ""
} |
q27853 | ImportOracle.list_imports | train | def list_imports(self, pkg, gopath=None):
"""Return a listing of the dependencies of the given package.
:param string pkg: The package whose files to list all dependencies of.
:param string gopath: An optional $GOPATH which points to a Go workspace containing `pkg`.
:returns: The import listing for `pkg` that represents all its dependencies.
:rtype: :class:`ImportOracle.ImportListing`
:raises: :class:`ImportOracle.ListDepsError` if there was a problem listing the dependencies
of `pkg`.
"""
go_cmd = self._go_dist.create_go_cmd('list', args=['-json', pkg], gopath=gopath)
with self._workunit_factory('list {}'.format(pkg), cmd=str(go_cmd),
labels=[WorkUnitLabel.TOOL]) as workunit:
# TODO(John Sirois): It would be nice to be able to tee the stdout to the workunit to we have
# a capture of the json available for inspection in the server console.
process = go_cmd.spawn(stdout=subprocess.PIPE, stderr=workunit.output('stderr'))
out, _ = process.communicate()
returncode = process.returncode
workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE)
if returncode != 0:
raise self.ListDepsError('Problem listing imports for {}: {} failed with exit code {}'
.format(pkg, go_cmd, returncode))
data = json.loads(out.decode('utf-8'))
# XTestImports are for black box tests. These test files live inside the package dir but
# declare a different package and thus can only access the public members of the package's
# production code. This style of test necessarily means the test file will import the main
# package. For pants, this would lead to a cyclic self-dependency, so we omit the main
# package as implicitly included as its own dependency.
x_test_imports = [i for i in data.get('XTestImports', []) if i != pkg]
return self.ImportListing(pkg_name=data.get('Name'),
imports=data.get('Imports', []),
test_imports=data.get('TestImports', []),
x_test_imports=x_test_imports) | python | {
"resource": ""
} |
q27854 | linkify | train | def linkify(buildroot, s, memoized_urls):
"""Augment text by heuristically finding URL and file references and turning them into links.
:param string buildroot: The base directory of the project.
:param string s: The text to insert links into.
:param dict memoized_urls: A cache of text to links so repeated substitutions don't require
additional file stats calls.
"""
def memoized_to_url(m):
# to_url uses None to signal not to replace the text,
# so we use a different sentinel value.
value = memoized_urls.get(m.group(0), _NO_URL)
if value is _NO_URL:
value = to_url(m)
memoized_urls[m.group(0)] = value
return value
def to_url(m):
if m.group(1):
return m.group(0) # It's an http(s) url.
path = m.group(0)
if path.startswith('/'):
path = os.path.relpath(path, buildroot)
elif path.startswith('..'):
# The path is not located inside the buildroot, so it's definitely not a BUILD file.
return None
else:
# The path is located in the buildroot: see if it's a reference to a target in a BUILD file.
parts = path.split(':')
if len(parts) == 2:
putative_dir = parts[0]
else:
putative_dir = path
if os.path.isdir(os.path.join(buildroot, putative_dir)):
build_files = list(BuildFile.get_build_files_family(
FileSystemProjectTree(buildroot),
putative_dir))
if build_files:
path = build_files[0].relpath
else:
return None
if os.path.exists(os.path.join(buildroot, path)):
# The reporting server serves file content at /browse/<path_from_buildroot>.
return '/browse/{}'.format(path)
else:
return None
def maybe_add_link(url, text):
return '<a target="_blank" href="{}">{}</a>'.format(url, text) if url else text
return _PATH_RE.sub(lambda m: maybe_add_link(memoized_to_url(m), m.group(0)), s) | python | {
"resource": ""
} |
q27855 | assert_list | train | def assert_list(obj, expected_type=string_types, can_be_none=True, default=(), key_arg=None,
allowable=(list, Fileset, OrderedSet, set, tuple), raise_type=ValueError):
"""
This function is used to ensure that parameters set by users in BUILD files are of acceptable types.
:API: public
:param obj : the object that may be a list. It will pass if it is of type in allowable.
:param expected_type : this is the expected type of the returned list contents.
:param can_be_none : this defines whether or not the obj can be None. If True, return default.
:param default : this is the default to return if can_be_none is True and obj is None.
:param key_arg : this is the name of the key to which obj belongs to
:param allowable : the acceptable types for obj. We do not want to allow any iterable (eg string).
:param raise_type : the error to throw if the type is not correct.
"""
def get_key_msg(key=None):
if key is None:
return ''
else:
return "In key '{}': ".format(key)
allowable = tuple(allowable)
key_msg = get_key_msg(key_arg)
val = obj
if val is None:
if can_be_none:
val = list(default)
else:
raise raise_type(
'{}Expected an object of acceptable type {}, received None and can_be_none is False'
.format(key_msg, allowable))
if isinstance(val, allowable):
lst = list(val)
for e in lst:
if not isinstance(e, expected_type):
raise raise_type(
'{}Expected a list containing values of type {}, instead got a value {} of {}'
.format(key_msg, expected_type, e, e.__class__))
return lst
else:
raise raise_type(
'{}Expected an object of acceptable type {}, received {} instead'
.format(key_msg, allowable, val)) | python | {
"resource": ""
} |
q27856 | AggregatedTimings.add_timing | train | def add_timing(self, label, secs, is_tool=False):
"""Aggregate timings by label.
secs - a double, so fractional seconds are allowed.
is_tool - whether this label represents a tool invocation.
"""
self._timings_by_path[label] += secs
if is_tool:
self._tool_labels.add(label)
# Check existence in case we're a clean-all. We don't want to write anything in that case.
if self._path and os.path.exists(os.path.dirname(self._path)):
with open(self._path, 'w') as f:
for x in self.get_all():
f.write('{label}: {timing}\n'.format(**x)) | python | {
"resource": ""
} |
q27857 | AggregatedTimings.get_all | train | def get_all(self):
"""Returns all the timings, sorted in decreasing order.
Each value is a dict: { path: <path>, timing: <timing in seconds> }
"""
return [{'label': x[0], 'timing': x[1], 'is_tool': x[0] in self._tool_labels}
for x in sorted(self._timings_by_path.items(), key=lambda x: x[1], reverse=True)] | python | {
"resource": ""
} |
q27858 | JvmDependencyAnalyzer.files_for_target | train | def files_for_target(self, target):
"""Yields a sequence of abs path of source, class or jar files provided by the target.
The runtime classpath for a target must already have been finalized for a target in order
to compute its provided files.
"""
def gen():
# Compute src -> target.
if isinstance(target, JvmTarget):
for src in target.sources_relative_to_buildroot():
yield os.path.join(self.buildroot, src)
# TODO(Tejal Desai): pantsbuild/pants/65: Remove java_sources attribute for ScalaLibrary
if isinstance(target, ScalaLibrary):
for java_source in target.java_sources:
for src in java_source.sources_relative_to_buildroot():
yield os.path.join(self.buildroot, src)
# Compute classfile -> target and jar -> target.
files = ClasspathUtil.classpath_contents((target,), self.runtime_classpath)
# And jars; for binary deps, zinc doesn't emit precise deps (yet).
cp_entries = ClasspathUtil.classpath((target,), self.runtime_classpath)
jars = [cpe for cpe in cp_entries if ClasspathUtil.is_jar(cpe)]
for coll in [files, jars]:
for f in coll:
yield f
return set(gen()) | python | {
"resource": ""
} |
q27859 | JvmDependencyAnalyzer.targets_by_file | train | def targets_by_file(self, targets):
"""Returns a map from abs path of source, class or jar file to an OrderedSet of targets.
The value is usually a singleton, because a source or class file belongs to a single target.
However a single jar may be provided (transitively or intransitively) by multiple JarLibrary
targets. But if there is a JarLibrary target that depends on a jar directly, then that
"canonical" target will be the first one in the list of targets.
"""
targets_by_file = defaultdict(OrderedSet)
for target in targets:
for f in self.files_for_target(target):
targets_by_file[f].add(target)
return targets_by_file | python | {
"resource": ""
} |
q27860 | JvmDependencyAnalyzer.targets_for_class | train | def targets_for_class(self, target, classname):
"""Search which targets from `target`'s transitive dependencies contain `classname`."""
targets_with_class = set()
for target in target.closure():
for one_class in self._target_classes(target):
if classname in one_class:
targets_with_class.add(target)
break
return targets_with_class | python | {
"resource": ""
} |
q27861 | JvmDependencyAnalyzer._target_classes | train | def _target_classes(self, target):
"""Set of target's provided classes.
Call at the target level is to memoize efficiently.
"""
target_classes = set()
contents = ClasspathUtil.classpath_contents((target,), self.runtime_classpath)
for f in contents:
classname = ClasspathUtil.classname_for_rel_classfile(f)
if classname:
target_classes.add(classname)
return target_classes | python | {
"resource": ""
} |
q27862 | JvmDependencyAnalyzer._jar_classfiles | train | def _jar_classfiles(self, jar_file):
"""Returns an iterator over the classfiles inside jar_file."""
for cls in ClasspathUtil.classpath_entries_contents([jar_file]):
if cls.endswith('.class'):
yield cls | python | {
"resource": ""
} |
q27863 | JvmDependencyAnalyzer.bootstrap_jar_classfiles | train | def bootstrap_jar_classfiles(self):
"""Returns a set of classfiles from the JVM bootstrap jars."""
bootstrap_jar_classfiles = set()
for jar_file in self._find_all_bootstrap_jars():
for cls in self._jar_classfiles(jar_file):
bootstrap_jar_classfiles.add(cls)
return bootstrap_jar_classfiles | python | {
"resource": ""
} |
q27864 | JvmDependencyAnalyzer.compute_transitive_deps_by_target | train | def compute_transitive_deps_by_target(self, targets):
"""Map from target to all the targets it depends on, transitively."""
# Sort from least to most dependent.
sorted_targets = reversed(sort_targets(targets))
transitive_deps_by_target = defaultdict(set)
# Iterate in dep order, to accumulate the transitive deps for each target.
for target in sorted_targets:
transitive_deps = set()
for dep in target.dependencies:
transitive_deps.update(transitive_deps_by_target.get(dep, []))
transitive_deps.add(dep)
# Need to handle the case where a java_sources target has dependencies.
# In particular if it depends back on the original target.
if hasattr(target, 'java_sources'):
for java_source_target in target.java_sources:
for transitive_dep in java_source_target.dependencies:
transitive_deps_by_target[java_source_target].add(transitive_dep)
transitive_deps_by_target[target] = transitive_deps
return transitive_deps_by_target | python | {
"resource": ""
} |
q27865 | JvmDependencyAnalyzer.resolve_aliases | train | def resolve_aliases(self, target, scope=None):
"""Resolve aliases in the direct dependencies of the target.
:param target: The direct dependencies of this target are included.
:param scope: When specified, only deps with this scope are included. This is more
than a filter, because it prunes the subgraphs represented by aliases with
un-matched scopes.
:returns: An iterator of (resolved_dependency, resolved_from) tuples.
`resolved_from` is the top level target alias that depends on `resolved_dependency`,
and `None` if `resolved_dependency` is not a dependency of a target alias.
"""
for declared in target.dependencies:
if scope is not None and declared.scope != scope:
# Only `DEFAULT` scoped deps are eligible for the unused dep check.
continue
elif type(declared) in (AliasTarget, Target):
# Is an alias. Recurse to expand.
for r, _ in self.resolve_aliases(declared, scope=scope):
yield r, declared
else:
yield declared, None | python | {
"resource": ""
} |
q27866 | IvyTaskMixin.ivy_classpath | train | def ivy_classpath(self, targets, silent=True, workunit_name=None):
"""Create the classpath for the passed targets.
:API: public
:param targets: A collection of targets to resolve a classpath for.
:type targets: collection.Iterable
"""
result = self._ivy_resolve(targets, silent=silent, workunit_name=workunit_name)
return result.resolved_artifact_paths | python | {
"resource": ""
} |
q27867 | IvyTaskMixin._ivy_resolve | train | def _ivy_resolve(self,
targets,
executor=None,
silent=False,
workunit_name=None,
confs=None,
extra_args=None,
invalidate_dependents=False,
pinned_artifacts=None):
"""Resolves external dependencies for the given targets.
If there are no targets suitable for jvm transitive dependency resolution, an empty result is
returned.
:param targets: The targets to resolve jvm dependencies for.
:type targets: :class:`collections.Iterable` of :class:`pants.build_graph.target.Target`
:param executor: A java executor to run ivy with.
:type executor: :class:`pants.java.executor.Executor`
:param confs: The ivy configurations to resolve; ('default',) by default.
:type confs: :class:`collections.Iterable` of string
:param extra_args: Any extra command line arguments to pass to ivy.
:type extra_args: list of string
:param bool invalidate_dependents: `True` to invalidate dependents of targets that needed to be
resolved.
:returns: The result of the resolve.
:rtype: IvyResolveResult
"""
# If there are no targets, we don't need to do a resolve.
if not targets:
return NO_RESOLVE_RUN_RESULT
confs = confs or ('default',)
fingerprint_strategy = IvyResolveFingerprintStrategy(confs)
with self.invalidated(targets,
invalidate_dependents=invalidate_dependents,
silent=silent,
fingerprint_strategy=fingerprint_strategy) as invalidation_check:
# In case all the targets were filtered out because they didn't participate in fingerprinting.
if not invalidation_check.all_vts:
return NO_RESOLVE_RUN_RESULT
resolve_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
resolve_hash_name = resolve_vts.cache_key.hash
# NB: This used to be a global directory, but is now specific to each task that includes
# this mixin.
ivy_workdir = os.path.join(self.versioned_workdir, 'ivy')
targets = resolve_vts.targets
fetch = IvyFetchStep(confs,
resolve_hash_name,
pinned_artifacts,
self.get_options().soft_excludes,
self.ivy_resolution_cache_dir,
self.ivy_repository_cache_dir,
ivy_workdir)
resolve = IvyResolveStep(confs,
resolve_hash_name,
pinned_artifacts,
self.get_options().soft_excludes,
self.ivy_resolution_cache_dir,
self.ivy_repository_cache_dir,
ivy_workdir)
return self._perform_resolution(fetch, resolve, executor, extra_args, invalidation_check,
resolve_vts, targets, workunit_name) | python | {
"resource": ""
} |
q27868 | PythonExecutionTaskBase.create_pex | train | def create_pex(self, pex_info=None):
"""Returns a wrapped pex that "merges" the other pexes via PEX_PATH."""
relevant_targets = self.context.targets(
lambda tgt: isinstance(tgt, (
PythonDistribution, PythonRequirementLibrary, PythonTarget, Files)))
with self.invalidated(relevant_targets) as invalidation_check:
# If there are no relevant targets, we still go through the motions of resolving
# an empty set of requirements, to prevent downstream tasks from having to check
# for this special case.
if invalidation_check.all_vts:
target_set_id = VersionedTargetSet.from_versioned_targets(
invalidation_check.all_vts).cache_key.hash
else:
target_set_id = 'no_targets'
interpreter = self.context.products.get_data(PythonInterpreter)
path = os.path.realpath(os.path.join(self.workdir, str(interpreter.identity), target_set_id))
# Note that we check for the existence of the directory, instead of for invalid_vts,
# to cover the empty case.
if not os.path.isdir(path):
pexes = [
self.context.products.get_data(ResolveRequirements.REQUIREMENTS_PEX),
self.context.products.get_data(GatherSources.PYTHON_SOURCES)
]
if self.extra_requirements():
extra_requirements_pex = self.resolve_requirement_strings(
interpreter, self.extra_requirements())
# Add the extra requirements first, so they take precedence over any colliding version
# in the target set's dependency closure.
pexes = [extra_requirements_pex] + pexes
constraints = {constraint for rt in relevant_targets if is_python_target(rt)
for constraint in PythonSetup.global_instance().compatibility_or_constraints(rt)}
with self.merged_pex(path, pex_info, interpreter, pexes, constraints) as builder:
for extra_file in self.extra_files():
extra_file.add_to(builder)
builder.freeze()
return PEX(path, interpreter) | python | {
"resource": ""
} |
q27869 | get_pants_cachedir | train | def get_pants_cachedir():
"""Return the pants global cache directory."""
# Follow the unix XDB base spec: http://standards.freedesktop.org/basedir-spec/latest/index.html.
cache_home = os.environ.get('XDG_CACHE_HOME')
if not cache_home:
cache_home = '~/.cache'
return os.path.expanduser(os.path.join(cache_home, 'pants')) | python | {
"resource": ""
} |
q27870 | get_pants_configdir | train | def get_pants_configdir():
"""Return the pants global config directory."""
# Follow the unix XDB base spec: http://standards.freedesktop.org/basedir-spec/latest/index.html.
config_home = os.environ.get('XDG_CONFIG_HOME')
if not config_home:
config_home = '~/.config'
return os.path.expanduser(os.path.join(config_home, 'pants')) | python | {
"resource": ""
} |
q27871 | get_scm | train | def get_scm():
"""Returns the pants Scm if any.
:API: public
"""
# TODO(John Sirois): Extract a module/class to carry the bootstrap logic.
global _SCM
if not _SCM:
from pants.scm.git import Git
# We know about git, so attempt an auto-configure
worktree = Git.detect_worktree()
if worktree and os.path.isdir(worktree):
git = Git(worktree=worktree)
try:
logger.debug('Detected git repository at {} on branch {}'.format(worktree, git.branch_name))
set_scm(git)
except git.LocalException as e:
logger.info('Failed to load git repository at {}: {}'.format(worktree, e))
return _SCM | python | {
"resource": ""
} |
q27872 | set_scm | train | def set_scm(scm):
"""Sets the pants Scm."""
if scm is not None:
if not isinstance(scm, Scm):
raise ValueError('The scm must be an instance of Scm, given {}'.format(scm))
global _SCM
_SCM = scm | python | {
"resource": ""
} |
q27873 | NativeCompile.get_sources_headers_for_target | train | def get_sources_headers_for_target(self, target):
"""Return a list of file arguments to provide to the compiler.
NB: result list will contain both header and source files!
:raises: :class:`NativeCompile.NativeCompileError` if there is an error processing the sources.
"""
# Get source paths relative to the target base so the exception message with the target and
# paths makes sense.
target_relative_sources = target.sources_relative_to_target_base()
rel_root = target_relative_sources.rel_root
# Unique file names are required because we just dump object files into a single directory, and
# the compiler will silently just produce a single object file if provided non-unique filenames.
# TODO: add some shading to file names so we can remove this check.
# NB: It shouldn't matter if header files have the same name, but this will raise an error in
# that case as well. We won't need to do any shading of header file names.
seen_filenames = defaultdict(list)
for src in target_relative_sources:
seen_filenames[os.path.basename(src)].append(src)
duplicate_filename_err_msgs = []
for fname, source_paths in seen_filenames.items():
if len(source_paths) > 1:
duplicate_filename_err_msgs.append("filename: {}, paths: {}".format(fname, source_paths))
if duplicate_filename_err_msgs:
raise self.NativeCompileError(
"Error in target '{}': source files must have a unique filename within a '{}' target. "
"Conflicting filenames:\n{}"
.format(target.address.spec, target.alias(), '\n'.join(duplicate_filename_err_msgs)))
return [os.path.join(get_buildroot(), rel_root, src) for src in target_relative_sources] | python | {
"resource": ""
} |
q27874 | NativeCompile._make_compile_argv | train | def _make_compile_argv(self, compile_request):
"""Return a list of arguments to use to compile sources. Subclasses can override and append."""
sources_minus_headers = list(self._iter_sources_minus_headers(compile_request))
if len(sources_minus_headers) == 0:
raise self._HeaderOnlyLibrary()
compiler = compile_request.compiler
compiler_options = compile_request.compiler_options
# We are going to execute in the target output, so get absolute paths for everything.
buildroot = get_buildroot()
# TODO: add -v to every compiler and linker invocation!
argv = (
[compiler.exe_filename] +
compiler.extra_args +
# TODO: If we need to produce static libs, don't add -fPIC! (could use Variants -- see #5788).
['-c', '-fPIC'] +
compiler_options +
[
'-I{}'.format(os.path.join(buildroot, inc_dir))
for inc_dir in compile_request.include_dirs
] +
[os.path.join(buildroot, src) for src in sources_minus_headers])
self.context.log.info("selected compiler exe name: '{}'".format(compiler.exe_filename))
self.context.log.debug("compile argv: {}".format(argv))
return argv | python | {
"resource": ""
} |
q27875 | NativeCompile._compile | train | def _compile(self, compile_request):
"""Perform the process of compilation, writing object files to the request's 'output_dir'.
NB: This method must arrange the output files so that `collect_cached_objects()` can collect all
of the results (or vice versa)!
"""
try:
argv = self._make_compile_argv(compile_request)
except self._HeaderOnlyLibrary:
self.context.log.debug('{} is a header-only library'.format(compile_request))
return
compiler = compile_request.compiler
output_dir = compile_request.output_dir
env = compiler.invocation_environment_dict
with self.context.new_workunit(
name=self.workunit_label, labels=[WorkUnitLabel.COMPILER]) as workunit:
try:
process = subprocess.Popen(
argv,
cwd=output_dir,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'),
env=env)
except OSError as e:
workunit.set_outcome(WorkUnit.FAILURE)
raise self.NativeCompileError(
"Error invoking '{exe}' with command {cmd} and environment {env} for request {req}: {err}"
.format(exe=compiler.exe_filename, cmd=argv, env=env, req=compile_request, err=e))
rc = process.wait()
if rc != 0:
workunit.set_outcome(WorkUnit.FAILURE)
raise self.NativeCompileError(
"Error in '{section_name}' with command {cmd} and environment {env} for request {req}. "
"Exit code was: {rc}."
.format(section_name=self.workunit_label, cmd=argv, env=env, req=compile_request, rc=rc)) | python | {
"resource": ""
} |
q27876 | NativeCompile.collect_cached_objects | train | def collect_cached_objects(self, versioned_target):
"""Scan `versioned_target`'s results directory and return the output files from that directory.
:return: :class:`ObjectFiles`
"""
return ObjectFiles(versioned_target.results_dir, os.listdir(versioned_target.results_dir)) | python | {
"resource": ""
} |
q27877 | all_enclosing_scopes | train | def all_enclosing_scopes(scope, allow_global=True):
"""Utility function to return all scopes up to the global scope enclosing a
given scope."""
_validate_full_scope(scope)
# TODO: validate scopes here and/or in `enclosing_scope()` instead of assuming correctness.
def scope_within_range(tentative_scope):
if tentative_scope is None:
return False
if not allow_global and tentative_scope == GLOBAL_SCOPE:
return False
return True
while scope_within_range(scope):
yield scope
scope = (None if scope == GLOBAL_SCOPE else enclosing_scope(scope)) | python | {
"resource": ""
} |
q27878 | ExportTask._resolve_jars_info | train | def _resolve_jars_info(self, targets, classpath_products):
"""Consults ivy_jar_products to export the external libraries.
:return: mapping of jar_id -> { 'default' : <jar_file>,
'sources' : <jar_file>,
'javadoc' : <jar_file>,
<other_confs> : <jar_file>,
}
"""
mapping = defaultdict(dict)
jar_products = classpath_products.get_artifact_classpath_entries_for_targets(
targets, respect_excludes=False)
for conf, jar_entry in jar_products:
conf = jar_entry.coordinate.classifier or 'default'
mapping[self._jar_id(jar_entry.coordinate)][conf] = jar_entry.cache_path
return mapping | python | {
"resource": ""
} |
q27879 | ExportTask._get_pants_target_alias | train | def _get_pants_target_alias(self, pants_target_type):
"""Returns the pants target alias for the given target"""
if pants_target_type in self.target_aliases_map:
return self.target_aliases_map.get(pants_target_type)
else:
return "{}.{}".format(pants_target_type.__module__, pants_target_type.__name__) | python | {
"resource": ""
} |
q27880 | DependencyContext.all_dependencies | train | def all_dependencies(self, target):
"""All transitive dependencies of the context's target."""
for dep in target.closure(bfs=True, **self.target_closure_kwargs):
yield dep | python | {
"resource": ""
} |
q27881 | DependencyContext.defaulted_property | train | def defaulted_property(self, target, option_name):
"""Computes a language property setting for the given JvmTarget.
:param selector A function that takes a target or platform and returns the boolean value of the
property for that target or platform, or None if that target or platform does
not directly define the property.
If the target does not override the language property, returns true iff the property
is true for any of the matched languages for the target.
"""
if target.has_sources('.java'):
matching_subsystem = Java.global_instance()
elif target.has_sources('.scala'):
matching_subsystem = ScalaPlatform.global_instance()
else:
return getattr(target, option_name)
return matching_subsystem.get_scalar_mirrored_target_option(option_name, target) | python | {
"resource": ""
} |
q27882 | Zinc.dist | train | def dist(self):
"""Return the `Distribution` selected for Zinc based on execution strategy.
:rtype: pants.java.distribution.distribution.Distribution
"""
underlying_dist = self.underlying_dist
if self._execution_strategy != NailgunTaskBase.HERMETIC:
# symlink .pants.d/.jdk -> /some/java/home/
jdk_home_symlink = os.path.relpath(
os.path.join(self._zinc_factory.get_options().pants_workdir, '.jdk'),
get_buildroot())
# Since this code can be run in multi-threading mode due to multiple
# zinc workers, we need to make sure the file operations below is atomic.
with self._lock:
# Create the symlink if it does not exist
if not os.path.exists(jdk_home_symlink):
os.symlink(underlying_dist.home, jdk_home_symlink)
# Recreate if the symlink exists but does not match `underlying_dist.home`.
elif os.readlink(jdk_home_symlink) != underlying_dist.home:
os.remove(jdk_home_symlink)
os.symlink(underlying_dist.home, jdk_home_symlink)
return Distribution(home_path=jdk_home_symlink)
else:
return underlying_dist | python | {
"resource": ""
} |
q27883 | Zinc._compiler_bridge_cache_dir | train | def _compiler_bridge_cache_dir(self):
"""A directory where we can store compiled copies of the `compiler-bridge`.
The compiler-bridge is specific to each scala version.
Currently we compile the `compiler-bridge` only once, while bootstrapping.
Then, we store it in the working directory under .pants.d/zinc/<cachekey>, where
<cachekey> is calculated using the locations of zinc, the compiler interface,
and the compiler bridge.
"""
hasher = sha1()
for cp_entry in [self.zinc, self.compiler_interface, self.compiler_bridge]:
hasher.update(os.path.relpath(cp_entry, self._workdir()).encode('utf-8'))
key = hasher.hexdigest()[:12]
return os.path.join(self._workdir(), 'zinc', 'compiler-bridge', key) | python | {
"resource": ""
} |
q27884 | Zinc.compile_compiler_bridge | train | def compile_compiler_bridge(self, context):
"""Compile the compiler bridge to be used by zinc, using our scala bootstrapper.
It will compile and cache the jar, and materialize it if not already there.
:param context: The context of the task trying to compile the bridge.
This is mostly needed to use its scheduler to create digests of the relevant jars.
:return: The absolute path to the compiled scala-compiler-bridge jar.
"""
bridge_jar_name = 'scala-compiler-bridge.jar'
bridge_jar = os.path.join(self._compiler_bridge_cache_dir, bridge_jar_name)
global_bridge_cache_dir = os.path.join(self._zinc_factory.get_options().pants_bootstrapdir, fast_relpath(self._compiler_bridge_cache_dir, self._workdir()))
globally_cached_bridge_jar = os.path.join(global_bridge_cache_dir, bridge_jar_name)
# Workaround to avoid recompiling the bridge for every integration test
# We check the bootstrapdir (.cache) for the bridge.
# If it exists, we make a copy to the buildroot.
#
# TODO Remove when action caches are implemented.
if os.path.exists(globally_cached_bridge_jar):
# Cache the bridge jar under buildroot, to allow snapshotting
safe_mkdir(self._relative_to_buildroot(self._compiler_bridge_cache_dir))
safe_hardlink_or_copy(globally_cached_bridge_jar, bridge_jar)
if not os.path.exists(bridge_jar):
res = self._run_bootstrapper(bridge_jar, context)
context._scheduler.materialize_directories((
DirectoryToMaterialize(get_buildroot(), res.output_directory_digest),
))
# For the workaround above to work, we need to store a copy of the bridge in
# the bootstrapdir cache (.cache).
safe_mkdir(global_bridge_cache_dir)
safe_hardlink_or_copy(bridge_jar, globally_cached_bridge_jar)
return ClasspathEntry(bridge_jar, res.output_directory_digest)
else:
bridge_jar_snapshot = context._scheduler.capture_snapshots((PathGlobsAndRoot(
PathGlobs((self._relative_to_buildroot(bridge_jar),)),
text_type(get_buildroot())
),))[0]
bridge_jar_digest = bridge_jar_snapshot.directory_digest
return ClasspathEntry(bridge_jar, bridge_jar_digest) | python | {
"resource": ""
} |
q27885 | Zinc._compiler_plugins_cp_entries | train | def _compiler_plugins_cp_entries(self):
"""Any additional global compiletime classpath entries for compiler plugins."""
java_options_src = Java.global_instance()
scala_options_src = ScalaPlatform.global_instance()
def cp(instance, toolname):
scope = instance.options_scope
return instance.tool_classpath_from_products(self._products, toolname, scope=scope)
classpaths = (cp(java_options_src, 'javac-plugin-dep') +
cp(scala_options_src, 'scalac-plugin-dep'))
return [(conf, ClasspathEntry(jar)) for conf in self.DEFAULT_CONFS for jar in classpaths] | python | {
"resource": ""
} |
q27886 | Zinc.compile_classpath | train | def compile_classpath(self, classpath_product_key, target, extra_cp_entries=None):
"""Compute the compile classpath for the given target."""
classpath_entries = list(
entry.path
for entry in self.compile_classpath_entries(classpath_product_key, target, extra_cp_entries)
)
# Verify that all classpath entries are under the build root.
for entry in classpath_entries:
assert entry.startswith(get_buildroot()), \
"Classpath entry does not start with buildroot: {}".format(entry)
return classpath_entries | python | {
"resource": ""
} |
q27887 | GlobalSubsystems.get | train | def get(cls):
"""Subsystems used outside of any task."""
return {
SourceRootConfig,
Reporting,
Reproducer,
RunTracker,
Changed,
BinaryUtil.Factory,
Subprocess.Factory
} | python | {
"resource": ""
} |
q27888 | MustacheRenderer._get_template_text_from_package | train | def _get_template_text_from_package(self, template_name):
"""Load the named template embedded in our package."""
if self._package_name is None:
raise self.MustacheError('No package specified for template loading.')
path = os.path.join('templates', template_name + '.mustache')
template_text = pkgutil.get_data(self._package_name, path)
if template_text is None:
raise self.MustacheError(
'could not find template {} in package {}'.format(path, self._package_name))
return template_text.decode('utf8') | python | {
"resource": ""
} |
q27889 | union | train | def union(cls):
"""A class decorator which other classes can specify that they can resolve to with `UnionRule`.
Annotating a class with @union allows other classes to use a UnionRule() instance to indicate that
they can be resolved to this base union class. This class will never be instantiated, and should
have no members -- it is used as a tag only, and will be replaced with whatever object is passed
in as the subject of a `yield Get(...)`. See the following example:
@union
class UnionBase(object): pass
@rule(B, [X])
def get_some_union_type(x):
result = yield Get(ResultType, UnionBase, x.f())
# ...
If there exists a single path from (whatever type the expression `x.f()` returns) -> `ResultType`
in the rule graph, the engine will retrieve and execute that path to produce a `ResultType` from
`x.f()`. This requires also that whatever type `x.f()` returns was registered as a union member of
`UnionBase` with a `UnionRule`.
Unions allow @rule bodies to be written without knowledge of what types may eventually be provided
as input -- rather, they let the engine check that there is a valid path to the desired result.
"""
# TODO: Check that the union base type is used as a tag and nothing else (e.g. no attributes)!
assert isinstance(cls, type)
return type(cls.__name__, (cls,), {
'_is_union': True,
}) | python | {
"resource": ""
} |
q27890 | _RuleVisitor._maybe_end_of_stmt_list | train | def _maybe_end_of_stmt_list(attr_value):
"""If `attr_value` is a non-empty iterable, return its final element."""
if (attr_value is not None) and isinstance(attr_value, Iterable):
result = list(attr_value)
if len(result) > 0:
return result[-1]
return None | python | {
"resource": ""
} |
q27891 | NailgunTaskBase.create_java_executor | train | def create_java_executor(self, dist=None):
"""Create java executor that uses this task's ng daemon, if allowed.
Call only in execute() or later. TODO: Enforce this.
"""
dist = dist or self.dist
if self.execution_strategy == self.NAILGUN:
classpath = os.pathsep.join(self.tool_classpath('nailgun-server'))
return NailgunExecutor(self._identity,
self._executor_workdir,
classpath,
dist,
startup_timeout=self.get_options().nailgun_subprocess_startup_timeout,
connect_timeout=self.get_options().nailgun_timeout_seconds,
connect_attempts=self.get_options().nailgun_connect_attempts)
else:
return SubprocessExecutor(dist) | python | {
"resource": ""
} |
q27892 | NailgunTaskBase.runjava | train | def runjava(self, classpath, main, jvm_options=None, args=None, workunit_name=None,
workunit_labels=None, workunit_log_config=None, dist=None):
"""Runs the java main using the given classpath and args.
If --execution-strategy=subprocess is specified then the java main is run in a freshly spawned
subprocess, otherwise a persistent nailgun server dedicated to this Task subclass is used to
speed up amortized run times.
:API: public
"""
executor = self.create_java_executor(dist=dist)
# Creating synthetic jar to work around system arg length limit is not necessary
# when `NailgunExecutor` is used because args are passed through socket, therefore turning off
# creating synthetic jar if nailgun is used.
create_synthetic_jar = self.execution_strategy != self.NAILGUN
try:
return util.execute_java(classpath=classpath,
main=main,
jvm_options=jvm_options,
args=args,
executor=executor,
workunit_factory=self.context.new_workunit,
workunit_name=workunit_name,
workunit_labels=workunit_labels,
workunit_log_config=workunit_log_config,
create_synthetic_jar=create_synthetic_jar,
synthetic_jar_dir=self._executor_workdir)
except executor.Error as e:
raise TaskError(e) | python | {
"resource": ""
} |
q27893 | safe_filename | train | def safe_filename(name, extension=None, digest=None, max_length=_MAX_FILENAME_LENGTH):
"""Creates filename from name and extension ensuring that the final length is within the
max_length constraint.
By default the length is capped to work on most filesystems and the fallback to achieve
shortening is a sha1 hash of the proposed name.
Raises ValueError if the proposed name is not a simple filename but a file path.
Also raises ValueError when the name is simple but cannot be satisfactorily shortened with the
given digest.
:API: public
name: the proposed filename without extension
extension: an optional extension to append to the filename
digest: the digest to fall back on for too-long name, extension concatenations - should
support the hashlib digest api of update(string) and hexdigest
max_length: the maximum desired file name length
"""
if os.path.basename(name) != name:
raise ValueError('Name must be a filename, handed a path: {}'.format(name))
ext = extension or ''
filename = name + ext
if len(filename) <= max_length:
return filename
else:
digest = digest or hashlib.sha1()
digest.update(filename.encode('utf-8'))
hexdigest = digest.hexdigest()[:16]
# Prefix and suffix length: max length less 2 periods, the extension length, and the digest length.
ps_len = max(0, (max_length - (2 + len(ext) + len(hexdigest))) // 2)
sep = '.' if ps_len > 0 else ''
prefix = name[:ps_len]
suffix = name[-ps_len:] if ps_len > 0 else ''
safe_name = '{}{}{}{}{}{}'.format(prefix, sep, hexdigest, sep, suffix, ext)
if len(safe_name) > max_length:
raise ValueError('Digest {} failed to produce a filename <= {} '
'characters for {} - got {}'.format(digest, max_length, filename, safe_name))
return safe_name | python | {
"resource": ""
} |
q27894 | expand_path | train | def expand_path(path):
"""Returns ``path`` as an absolute path with ~user and env var expansion applied.
:API: public
"""
return os.path.abspath(os.path.expandvars(os.path.expanduser(path))) | python | {
"resource": ""
} |
q27895 | RemotePantsRunner._run_pants_with_retry | train | def _run_pants_with_retry(self, pantsd_handle, retries=3):
"""Runs pants remotely with retry and recovery for nascent executions.
:param PantsDaemon.Handle pantsd_handle: A Handle for the daemon to connect to.
"""
attempt = 1
while 1:
logger.debug(
'connecting to pantsd on port {} (attempt {}/{})'
.format(pantsd_handle.port, attempt, retries)
)
try:
return self._connect_and_execute(pantsd_handle)
except self.RECOVERABLE_EXCEPTIONS as e:
if attempt > retries:
raise self.Fallback(e)
self._backoff(attempt)
logger.warn(
'pantsd was unresponsive on port {}, retrying ({}/{})'
.format(pantsd_handle.port, attempt, retries)
)
# One possible cause of the daemon being non-responsive during an attempt might be if a
# another lifecycle operation is happening concurrently (incl teardown). To account for
# this, we won't begin attempting restarts until at least 1 second has passed (1 attempt).
if attempt > 1:
pantsd_handle = self._restart_pantsd()
attempt += 1
except NailgunClient.NailgunError as e:
# Ensure a newline.
logger.fatal('')
logger.fatal('lost active connection to pantsd!')
raise_with_traceback(self._extract_remote_exception(pantsd_handle.pid, e)) | python | {
"resource": ""
} |
q27896 | IvySubsystem.http_proxy | train | def http_proxy(self):
"""Set ivy to use an http proxy.
Expects a string of the form http://<host>:<port>
"""
if os.getenv('HTTP_PROXY'):
return os.getenv('HTTP_PROXY')
if os.getenv('http_proxy'):
return os.getenv('http_proxy')
return self.get_options().http_proxy | python | {
"resource": ""
} |
q27897 | IvySubsystem.https_proxy | train | def https_proxy(self):
"""Set ivy to use an https proxy.
Expects a string of the form http://<host>:<port>
"""
if os.getenv('HTTPS_PROXY'):
return os.getenv('HTTPS_PROXY')
if os.getenv('https_proxy'):
return os.getenv('https_proxy')
return self.get_options().https_proxy | python | {
"resource": ""
} |
q27898 | GoImportMetaTagReader.get_imported_repo | train | def get_imported_repo(self, import_path):
"""Looks for a go-import meta tag for the provided import_path.
Returns an ImportedRepo instance with the information in the meta tag,
or None if no go-import meta tag is found.
"""
try:
session = requests.session()
# TODO: Support https with (optional) fallback to http, as Go does.
# See https://github.com/pantsbuild/pants/issues/3503.
session.mount("http://",
requests.adapters.HTTPAdapter(max_retries=self.get_options().retries))
page_data = session.get('http://{import_path}?go-get=1'.format(import_path=import_path))
except requests.ConnectionError:
return None
if not page_data:
return None
# Return the first match, rather than doing some kind of longest prefix search.
# Hopefully no one returns multiple valid go-import meta tags.
for (root, vcs, url) in self.find_meta_tags(page_data.text):
if root and vcs and url:
# Check to make sure returned root is an exact match to the provided import path. If it is
# not then run a recursive check on the returned and return the values provided by that call.
if root == import_path:
return ImportedRepo(root, vcs, url)
elif import_path.startswith(root):
return self.get_imported_repo(root)
return None | python | {
"resource": ""
} |
q27899 | MutableBuildGraph._target_addressable_to_target | train | def _target_addressable_to_target(self, address, addressable):
"""Realizes a TargetAddressable into a Target at `address`.
:param TargetAddressable addressable:
:param Address address:
"""
try:
# TODO(John Sirois): Today - in practice, Addressable is unusable. BuildGraph assumes
# addressables are in fact TargetAddressables with dependencies (see:
# `inject_address_closure` for example), ie: leaf nameable things with - by definition - no
# deps cannot actually be used. Clean up BuildGraph to handle addressables as they are
# abstracted today which does not necessarily mean them having dependencies and thus forming
# graphs. They may only be multiply-referred to leaf objects.
target = addressable.instantiate(build_graph=self, address=address)
return target
except Exception:
traceback.print_exc()
logger.exception('Failed to instantiate Target with type {target_type} with name "{name}"'
' at address {address}'
.format(target_type=addressable.addressed_type,
name=addressable.addressed_name,
address=address))
raise | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.