_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q27900 | Depmap._dep_id | train | def _dep_id(self, dependency):
"""Returns a tuple of dependency_id, is_internal_dep."""
params = dict(sep=self.separator)
if isinstance(dependency, JarDependency):
# TODO(kwilson): handle 'classifier' and 'type'.
params.update(org=dependency.org, name=dependency.name, rev=dependency.rev)
is_internal_dep = False
else:
params.update(org='internal', name=dependency.id)
is_internal_dep = True
return ('{org}{sep}{name}{sep}{rev}' if params.get('rev') else
'{org}{sep}{name}').format(**params), is_internal_dep | python | {
"resource": ""
} |
q27901 | Depmap._output_dependency_tree | train | def _output_dependency_tree(self, target):
"""Plain-text depmap output handler."""
def make_line(dep, indent, is_dupe=False):
indent_join, indent_chars = ('--', ' |') if self.should_tree else ('', ' ')
dupe_char = '*' if is_dupe else ''
return ''.join((indent * indent_chars, indent_join, dupe_char, dep))
def output_deps(dep, indent, outputted, stack):
dep_id, internal = self._dep_id(dep)
if self.is_minimal and dep_id in outputted:
return
if self.output_candidate(internal):
yield make_line(dep_id,
0 if self.is_external_only else indent,
is_dupe=dep_id in outputted)
outputted.add(dep_id)
for sub_dep in self._enumerate_visible_deps(dep, self.output_candidate):
for item in output_deps(sub_dep, indent + 1, outputted, stack + [(dep_id, indent)]):
yield item
for item in output_deps(target, 0, set(), []):
yield item | python | {
"resource": ""
} |
q27902 | Depmap._output_digraph | train | def _output_digraph(self, target):
"""Graphviz format depmap output handler."""
color_by_type = {}
def maybe_add_type(dep, dep_id):
"""Add a class type to a dependency id if --show-types is passed."""
return dep_id if not self.show_types else '\\n'.join((dep_id, dep.__class__.__name__))
def make_node(dep, dep_id, internal):
line_fmt = ' "{id}" [style=filled, fillcolor={color}{internal}];'
int_shape = ', shape=ellipse' if not internal else ''
dep_class = dep.__class__.__name__
if dep_class not in color_by_type:
color_by_type[dep_class] = len(color_by_type.keys()) + 1
return line_fmt.format(id=dep_id, internal=int_shape, color=color_by_type[dep_class])
def make_edge(from_dep_id, to_dep_id, internal):
style = ' [style=dashed]' if not internal else ''
return ' "{}" -> "{}"{};'.format(from_dep_id, to_dep_id, style)
def output_deps(dep, parent, parent_id, outputted):
dep_id, internal = self._dep_id(dep)
if dep_id not in outputted:
yield make_node(dep, maybe_add_type(dep, dep_id), internal)
outputted.add(dep_id)
for sub_dep in self._enumerate_visible_deps(dep, self.output_candidate):
for item in output_deps(sub_dep, dep, dep_id, outputted):
yield item
if parent:
edge_id = (parent_id, dep_id)
if edge_id not in outputted:
yield make_edge(maybe_add_type(parent, parent_id), maybe_add_type(dep, dep_id), internal)
outputted.add(edge_id)
yield 'digraph "{}" {{'.format(target.id)
yield ' node [shape=rectangle, colorscheme=set312;];'
yield ' rankdir=LR;'
for line in output_deps(target, parent=None, parent_id=None, outputted=set()):
yield line
yield '}' | python | {
"resource": ""
} |
q27903 | _algebraic_data | train | def _algebraic_data(metaclass):
"""A class decorator to pull out `_list_fields` from a mixin class for use with a `datatype`."""
def wrapper(cls):
cls.__bases__ += (metaclass,)
cls._list_fields = metaclass._list_fields
return cls
return wrapper | python | {
"resource": ""
} |
q27904 | _ExtensibleAlgebraic.prepend_field | train | def prepend_field(self, field_name, list_value):
"""Return a copy of this object with `list_value` prepended to the field named `field_name`."""
return self._single_list_field_operation(field_name, list_value, prepend=True) | python | {
"resource": ""
} |
q27905 | _ExtensibleAlgebraic.append_field | train | def append_field(self, field_name, list_value):
"""Return a copy of this object with `list_value` appended to the field named `field_name`."""
return self._single_list_field_operation(field_name, list_value, prepend=False) | python | {
"resource": ""
} |
q27906 | _ExtensibleAlgebraic.sequence | train | def sequence(self, other, exclude_list_fields=None):
"""Return a copy of this object which combines all the fields common to both `self` and `other`.
List fields will be concatenated.
The return type of this method is the type of `self` (or whatever `.copy()` returns), but the
`other` argument can be any `_ExtensibleAlgebraic` instance.
"""
exclude_list_fields = frozenset(exclude_list_fields or [])
overwrite_kwargs = {}
nonexistent_excluded_fields = exclude_list_fields - self._list_fields
if nonexistent_excluded_fields:
raise self.AlgebraicDataError(
"Fields {} to exclude from a sequence() were not found in this object's list fields: {}. "
"This object is {}, the other object is {}."
.format(nonexistent_excluded_fields, self._list_fields, self, other))
shared_list_fields = (self._list_fields
& other._list_fields
- exclude_list_fields)
if not shared_list_fields:
raise self.AlgebraicDataError(
"Objects to sequence have no shared fields after excluding {}. "
"This object is {}, with list fields: {}. "
"The other object is {}, with list fields: {}."
.format(exclude_list_fields, self, self._list_fields, other, other._list_fields))
for list_field_name in shared_list_fields:
lhs_value = getattr(self, list_field_name)
rhs_value = getattr(other, list_field_name)
overwrite_kwargs[list_field_name] = lhs_value + rhs_value
return self.copy(**overwrite_kwargs) | python | {
"resource": ""
} |
q27907 | _Executable.invocation_environment_dict | train | def invocation_environment_dict(self):
"""A dict to use as this _Executable's execution environment.
This isn't made into an "algebraic" field because its contents (the keys of the dict) are
generally known to the specific class which is overriding this property. Implementations of this
property can then make use of the data in the algebraic fields to populate this dict.
:rtype: dict of string -> string
"""
lib_env_var = self._platform.resolve_for_enum_variant({
'darwin': 'DYLD_LIBRARY_PATH',
'linux': 'LD_LIBRARY_PATH',
})
return {
'PATH': create_path_env_var(self.path_entries),
lib_env_var: create_path_env_var(self.runtime_library_dirs),
} | python | {
"resource": ""
} |
q27908 | LibcDev._get_host_libc_from_host_compiler | train | def _get_host_libc_from_host_compiler(self):
"""Locate the host's libc-dev installation using a specified host compiler's search dirs."""
compiler_exe = self.get_options().host_compiler
# Implicitly, we are passing in the environment of the executing pants process to
# `get_compiler_library_dirs()`.
# These directories are checked to exist!
library_dirs = self._parse_search_dirs.get_compiler_library_dirs(compiler_exe)
libc_crti_object_file = None
for libc_dir_candidate in library_dirs:
maybe_libc_crti = os.path.join(libc_dir_candidate, self._LIBC_INIT_OBJECT_FILE)
if os.path.isfile(maybe_libc_crti):
libc_crti_object_file = maybe_libc_crti
break
if not libc_crti_object_file:
raise self.HostLibcDevResolutionError(
"Could not locate {fname} in library search dirs {dirs} from compiler: {compiler!r}. "
"You may need to install a libc dev package for the current system. "
"For many operating systems, this package is named 'libc-dev' or 'libc6-dev'."
.format(fname=self._LIBC_INIT_OBJECT_FILE, dirs=library_dirs, compiler=compiler_exe))
return HostLibcDev(crti_object=libc_crti_object_file,
fingerprint=hash_file(libc_crti_object_file)) | python | {
"resource": ""
} |
q27909 | RewriteBase.execute | train | def execute(self):
"""Runs the tool on all source files that are located."""
relevant_targets = self._get_non_synthetic_targets(self.get_targets())
if self.sideeffecting:
# Always execute sideeffecting tasks without invalidation.
self._execute_for(relevant_targets)
else:
# If the task is not sideeffecting we can use invalidation.
with self.invalidated(relevant_targets) as invalidation_check:
self._execute_for([vt.target for vt in invalidation_check.invalid_vts]) | python | {
"resource": ""
} |
q27910 | NodeDistribution._install_node | train | def _install_node(self):
"""Install the Node distribution from pants support binaries.
:returns: The Node distribution bin path.
:rtype: string
"""
node_package_path = self.select()
# Todo: https://github.com/pantsbuild/pants/issues/4431
# This line depends on repacked node distribution.
# Should change it from 'node/bin' to 'dist/bin'
node_bin_path = os.path.join(node_package_path, 'node', 'bin')
if not is_readable_dir(node_bin_path):
# The binary was pulled from nodejs and not our S3, in which
# case it's installed under a different directory.
return os.path.join(node_package_path, os.listdir(node_package_path)[0], 'bin')
return node_bin_path | python | {
"resource": ""
} |
q27911 | NodeDistribution._install_yarnpkg | train | def _install_yarnpkg(self):
"""Install the Yarnpkg distribution from pants support binaries.
:returns: The Yarnpkg distribution bin path.
:rtype: string
"""
yarnpkg_package_path = YarnpkgDistribution.scoped_instance(self).select()
yarnpkg_bin_path = os.path.join(yarnpkg_package_path, 'dist', 'bin')
return yarnpkg_bin_path | python | {
"resource": ""
} |
q27912 | NodeDistribution.node_command | train | def node_command(self, args=None, node_paths=None):
"""Creates a command that can run `node`, passing the given args to it.
:param list args: An optional list of arguments to pass to `node`.
:param list node_paths: An optional list of paths to node_modules.
:returns: A `node` command that can be run later.
:rtype: :class:`NodeDistribution.Command`
"""
# NB: We explicitly allow no args for the `node` command unlike the `npm` command since running
# `node` with no arguments is useful, it launches a REPL.
return command_gen([self._install_node], 'node', args=args, node_paths=node_paths) | python | {
"resource": ""
} |
q27913 | NodeDistribution.eslint_supportdir | train | def eslint_supportdir(self, task_workdir):
""" Returns the path where the ESLint is bootstrapped.
:param string task_workdir: The task's working directory
:returns: The path where ESLint is bootstrapped and whether or not it is configured
:rtype: (string, bool)
"""
bootstrapped_support_path = os.path.join(task_workdir, 'eslint')
# TODO(nsaechao): Should only have to check if the "eslint" dir exists in the task_workdir
# assuming fingerprinting works as intended.
# If the eslint_setupdir is not provided or missing required files, then
# clean up the directory so that Pants can install a pre-defined eslint version later on.
# Otherwise, if there is no configurations changes, rely on the cache.
# If there is a config change detected, use the new configuration.
if self.eslint_setupdir:
configured = all(os.path.exists(os.path.join(self.eslint_setupdir, f))
for f in self._eslint_required_files)
else:
configured = False
if not configured:
safe_mkdir(bootstrapped_support_path, clean=True)
else:
try:
installed = all(filecmp.cmp(
os.path.join(self.eslint_setupdir, f), os.path.join(bootstrapped_support_path, f))
for f in self._eslint_required_files)
except OSError:
installed = False
if not installed:
self._configure_eslinter(bootstrapped_support_path)
return bootstrapped_support_path, configured | python | {
"resource": ""
} |
q27914 | GoRemoteLibrary.remote_root | train | def remote_root(self):
"""The remote package root prefix portion of the the full `import_path`"""
return os.path.relpath(self.address.spec_path, self.target_base) | python | {
"resource": ""
} |
q27915 | GoRemoteLibrary.import_path | train | def import_path(self):
"""The full remote import path as used in import statements in `.go` source files."""
return os.path.join(self.remote_root, self.pkg) if self.pkg else self.remote_root | python | {
"resource": ""
} |
q27916 | JvmDependencyUsage.create_dep_usage_graph | train | def create_dep_usage_graph(self, targets):
"""Creates a graph of concrete targets, with their sum of products and dependencies.
Synthetic targets contribute products and dependencies to their concrete target.
"""
with self.invalidated(targets,
invalidate_dependents=True) as invalidation_check:
target_to_vts = {}
for vts in invalidation_check.all_vts:
target_to_vts[vts.target] = vts
if not self.get_options().use_cached:
node_creator = self.calculating_node_creator(
self.context.products.get_data('classes_by_source'),
self.context.products.get_data('runtime_classpath'),
self.context.products.get_data('product_deps_by_src'),
target_to_vts)
else:
node_creator = self.cached_node_creator(target_to_vts)
return DependencyUsageGraph(self.create_dep_usage_nodes(targets, node_creator),
self.size_estimators[self.get_options().size_estimator]) | python | {
"resource": ""
} |
q27917 | JvmDependencyUsage.calculating_node_creator | train | def calculating_node_creator(self, classes_by_source, runtime_classpath, product_deps_by_src,
target_to_vts):
"""Strategy directly computes dependency graph node based on
`classes_by_source`, `runtime_classpath`, `product_deps_by_src` parameters and
stores the result to the build cache.
"""
analyzer = JvmDependencyAnalyzer(get_buildroot(), runtime_classpath)
targets = self.context.targets()
targets_by_file = analyzer.targets_by_file(targets)
transitive_deps_by_target = analyzer.compute_transitive_deps_by_target(targets)
def creator(target):
transitive_deps = set(transitive_deps_by_target.get(target))
node = self.create_dep_usage_node(target,
analyzer,
product_deps_by_src,
classes_by_source,
targets_by_file,
transitive_deps)
vt = target_to_vts[target]
mode = 'w' if PY3 else 'wb'
with open(self.nodes_json(vt.results_dir), mode=mode) as fp:
json.dump(node.to_cacheable_dict(), fp, indent=2, sort_keys=True)
vt.update()
return node
return creator | python | {
"resource": ""
} |
q27918 | JvmDependencyUsage.cached_node_creator | train | def cached_node_creator(self, target_to_vts):
"""Strategy restores dependency graph node from the build cache.
"""
def creator(target):
vt = target_to_vts[target]
if vt.valid and os.path.exists(self.nodes_json(vt.results_dir)):
try:
with open(self.nodes_json(vt.results_dir), 'r') as fp:
return Node.from_cacheable_dict(json.load(fp),
lambda spec: next(self.context.resolve(spec).__iter__()))
except Exception:
self.context.log.warn("Can't deserialize json for target {}".format(target))
return Node(target.concrete_derived_from)
else:
self.context.log.warn("No cache entry for {}".format(target))
return Node(target.concrete_derived_from)
return creator | python | {
"resource": ""
} |
q27919 | DependencyUsageGraph.to_summary | train | def to_summary(self):
"""Outputs summarized dependencies ordered by a combination of max usage and cost."""
# Aggregate inbound edges by their maximum product usage ratio.
max_target_usage = defaultdict(lambda: 0.0)
for target, node in self._nodes.items():
for dep_target, edge in node.dep_edges.items():
if target == dep_target:
continue
used_ratio = self._used_ratio(dep_target, edge)
max_target_usage[dep_target] = max(max_target_usage[dep_target], used_ratio)
# Calculate a score for each.
Score = namedtuple('Score', ('badness', 'max_usage', 'cost_transitive', 'target'))
scores = []
for target, max_usage in max_target_usage.items():
cost_transitive = self._trans_cost(target)
score = int(max(cost_transitive, 1) / (max_usage if max_usage > 0.0 else 1.0))
scores.append(Score(score, max_usage, cost_transitive, target.address.spec))
# Output in order by score.
yield '[\n'
first = True
for score in sorted(scores, key=lambda s: s.badness):
yield '{} {}'.format('' if first else ',\n', json.dumps(score._asdict()))
first = False
yield '\n]\n' | python | {
"resource": ""
} |
q27920 | DependencyUsageGraph.to_json | train | def to_json(self):
"""Outputs the entire graph."""
res_dict = {}
def gen_dep_edge(node, edge, dep_tgt, aliases):
return {
'target': dep_tgt.address.spec,
'dependency_type': self._edge_type(node.concrete_target, edge, dep_tgt),
'products_used': len(edge.products_used),
'products_used_ratio': self._used_ratio(dep_tgt, edge),
'aliases': [alias.address.spec for alias in aliases],
}
for node in self._nodes.values():
res_dict[node.concrete_target.address.spec] = {
'cost': self._cost(node.concrete_target),
'cost_transitive': self._trans_cost(node.concrete_target),
'products_total': node.products_total,
'dependencies': [gen_dep_edge(node, edge, dep_tgt, node.dep_aliases.get(dep_tgt, {}))
for dep_tgt, edge in node.dep_edges.items()],
}
yield str(json.dumps(res_dict, indent=2, sort_keys=True)) | python | {
"resource": ""
} |
q27921 | rst_to_html | train | def rst_to_html(in_rst, stderr):
"""Renders HTML from an RST fragment.
:param string in_rst: An rst formatted string.
:param stderr: An open stream to use for docutils stderr output.
:returns: A tuple of (html rendered rst, return code)
"""
if not in_rst:
return '', 0
# Unfortunately, docutils is really setup for command line use.
# We're forced to patch the bits of sys its hardcoded to use so that we can call it in-process
# and still reliably determine errors.
# TODO(John Sirois): Move to a subprocess execution model utilizing a docutil chroot/pex.
orig_sys_exit = sys.exit
orig_sys_stderr = sys.stderr
returncodes = []
try:
sys.exit = returncodes.append
sys.stderr = stderr
pp = publish_parts(in_rst,
writer_name='html',
# Report and exit at level 2 (warnings) or higher.
settings_overrides=dict(exit_status_level=2, report_level=2),
enable_exit_status=True)
finally:
sys.exit = orig_sys_exit
sys.stderr = orig_sys_stderr
return_value = ''
if 'title' in pp and pp['title']:
return_value += '<title>{0}</title>\n<p style="font: 200% bold">{0}</p>\n'.format(pp['title'])
return_value += pp['body'].strip()
return return_value, returncodes.pop() if returncodes else 0 | python | {
"resource": ""
} |
q27922 | SubsystemDependency.options_scope | train | def options_scope(self):
"""The subscope for options of `subsystem_cls` scoped to `scope`.
This is the scope that option values are read from when initializing the instance
indicated by this dependency.
"""
if self.is_global():
return self.subsystem_cls.options_scope
else:
return self.subsystem_cls.subscope(self.scope) | python | {
"resource": ""
} |
q27923 | SubsystemClientMixin.subsystem_dependencies_iter | train | def subsystem_dependencies_iter(cls):
"""Iterate over the direct subsystem dependencies of this Optionable."""
for dep in cls.subsystem_dependencies():
if isinstance(dep, SubsystemDependency):
yield dep
else:
yield SubsystemDependency(dep, GLOBAL_SCOPE, removal_version=None, removal_hint=None) | python | {
"resource": ""
} |
q27924 | SubsystemClientMixin.subsystem_closure_iter | train | def subsystem_closure_iter(cls):
"""Iterate over the transitive closure of subsystem dependencies of this Optionable.
:rtype: :class:`collections.Iterator` of :class:`SubsystemDependency`
:raises: :class:`pants.subsystem.subsystem_client_mixin.SubsystemClientMixin.CycleException`
if a dependency cycle is detected.
"""
seen = set()
dep_path = OrderedSet()
def iter_subsystem_closure(subsystem_cls):
if subsystem_cls in dep_path:
raise cls.CycleException(list(dep_path) + [subsystem_cls])
dep_path.add(subsystem_cls)
for dep in subsystem_cls.subsystem_dependencies_iter():
if dep not in seen:
seen.add(dep)
yield dep
for d in iter_subsystem_closure(dep.subsystem_cls):
yield d
dep_path.remove(subsystem_cls)
for dep in iter_subsystem_closure(cls):
yield dep | python | {
"resource": ""
} |
q27925 | SubsystemClientMixin.known_scope_infos | train | def known_scope_infos(cls):
"""Yield ScopeInfo for all known scopes for this optionable, in no particular order.
:rtype: set of :class:`pants.option.scope.ScopeInfo`
:raises: :class:`pants.subsystem.subsystem_client_mixin.SubsystemClientMixin.CycleException`
if a dependency cycle is detected.
"""
known_scope_infos = set()
optionables_path = OrderedSet() # To check for cycles at the Optionable level, ignoring scope.
def collect_scope_infos(optionable_cls, scoped_to, removal_version=None, removal_hint=None):
if optionable_cls in optionables_path:
raise cls.CycleException(list(optionables_path) + [optionable_cls])
optionables_path.add(optionable_cls)
scope = (optionable_cls.options_scope if scoped_to == GLOBAL_SCOPE
else optionable_cls.subscope(scoped_to))
scope_info = ScopeInfo(
scope,
optionable_cls.options_scope_category,
optionable_cls,
removal_version=removal_version,
removal_hint=removal_hint
)
if scope_info not in known_scope_infos:
known_scope_infos.add(scope_info)
for dep in scope_info.optionable_cls.subsystem_dependencies_iter():
# A subsystem always exists at its global scope (for the purpose of options
# registration and specification), even if in practice we only use it scoped to
# some other scope.
#
# NB: We do not apply deprecations to this implicit global copy of the scope, because if
# the intention was to deprecate the entire scope, that could be accomplished by
# deprecating all options in the scope.
collect_scope_infos(dep.subsystem_cls, GLOBAL_SCOPE)
if not dep.is_global():
collect_scope_infos(dep.subsystem_cls,
scope,
removal_version=dep.removal_version,
removal_hint=dep.removal_hint)
optionables_path.remove(scope_info.optionable_cls)
collect_scope_infos(cls, GLOBAL_SCOPE)
return known_scope_infos | python | {
"resource": ""
} |
q27926 | PexBuilderWrapper.extract_single_dist_for_current_platform | train | def extract_single_dist_for_current_platform(self, reqs, dist_key):
"""Resolve a specific distribution from a set of requirements matching the current platform.
:param list reqs: A list of :class:`PythonRequirement` to resolve.
:param str dist_key: The value of `distribution.key` to match for a `distribution` from the
resolved requirements.
:return: The single :class:`pkg_resources.Distribution` matching `dist_key`.
:raises: :class:`self.SingleDistExtractionError` if no dists or multiple dists matched the given
`dist_key`.
"""
distributions = self._resolve_distributions_by_platform(reqs, platforms=['current'])
try:
matched_dist = assert_single_element(list(
dist
for _, dists in distributions.items()
for dist in dists
if dist.key == dist_key
))
except (StopIteration, ValueError) as e:
raise self.SingleDistExtractionError(
"Exactly one dist was expected to match name {} in requirements {}: {}"
.format(dist_key, reqs, e))
return matched_dist | python | {
"resource": ""
} |
q27927 | NodeModule.bin_executables | train | def bin_executables(self):
"""A normalized map of bin executable names and local path to an executable
:rtype: dict
"""
if isinstance(self.payload.bin_executables, string_types):
# In this case, the package_name is the bin name
return { self.package_name: self.payload.bin_executables }
return self.payload.bin_executables | python | {
"resource": ""
} |
q27928 | Fetcher.fetch | train | def fetch(self, url, listener, chunk_size_bytes=None, timeout_secs=None):
"""Fetches data from the given URL notifying listener of all lifecycle events.
:param string url: the url to GET data from
:param listener: the listener to notify of all download lifecycle events
:param chunk_size_bytes: the chunk size to use for buffering data, 10 KB by default
:param timeout_secs: the maximum time to wait for data to be available, 1 second by default
:raises: Fetcher.Error if there was a problem fetching all data from the given url
"""
if not isinstance(listener, self.Listener):
raise ValueError('listener must be a Listener instance, given {}'.format(listener))
chunk_size_bytes = chunk_size_bytes or 10 * 1024
timeout_secs = timeout_secs or 1.0
with closing(self._fetch(url, timeout_secs=timeout_secs)) as resp:
if resp.status_code != requests.codes.ok:
listener.status(resp.status_code)
raise self.PermanentError('Fetch of {} failed with status code {}'
.format(url, resp.status_code),
response_code=resp.status_code)
listener.status(resp.status_code, content_length=resp.size)
read_bytes = 0
for data in resp.iter_content(chunk_size_bytes=chunk_size_bytes):
listener.recv_chunk(data)
read_bytes += len(data)
if resp.size and read_bytes != resp.size:
raise self.Error('Expected {} bytes, read {}'.format(resp.size, read_bytes))
listener.finished() | python | {
"resource": ""
} |
q27929 | Fetcher.download | train | def download(self, url, listener=None, path_or_fd=None, chunk_size_bytes=None, timeout_secs=None):
"""Downloads data from the given URL.
By default data is downloaded to a temporary file.
:param string url: the url to GET data from
:param listener: an optional listener to notify of all download lifecycle events
:param path_or_fd: an optional file path or open file descriptor to write data to
:param chunk_size_bytes: the chunk size to use for buffering data
:param timeout_secs: the maximum time to wait for data to be available
:returns: the path to the file data was downloaded to.
:raises: Fetcher.Error if there was a problem downloading all data from the given url.
"""
@contextmanager
def download_fp(_path_or_fd):
if _path_or_fd and not isinstance(_path_or_fd, six.string_types):
yield _path_or_fd, _path_or_fd.name
else:
if not _path_or_fd:
fd, _path_or_fd = tempfile.mkstemp()
os.close(fd)
with safe_open(_path_or_fd, 'wb') as fp:
yield fp, _path_or_fd
with download_fp(path_or_fd) as (fp, path):
listener = self.DownloadListener(fp).wrap(listener)
self.fetch(url, listener, chunk_size_bytes=chunk_size_bytes, timeout_secs=timeout_secs)
return path | python | {
"resource": ""
} |
q27930 | NailgunProcessGroup.killall | train | def killall(self, everywhere=False):
"""Kills all nailgun servers started by pants.
:param bool everywhere: If ``True``, kills all pants-started nailguns on this machine;
otherwise restricts the nailguns killed to those started for the
current build root.
"""
with self._NAILGUN_KILL_LOCK:
for proc in self._iter_nailgun_instances(everywhere):
logger.info('killing nailgun server pid={pid}'.format(pid=proc.pid))
proc.terminate() | python | {
"resource": ""
} |
q27931 | NailgunExecutor._fingerprint | train | def _fingerprint(jvm_options, classpath, java_version):
"""Compute a fingerprint for this invocation of a Java task.
:param list jvm_options: JVM options passed to the java invocation
:param list classpath: The -cp arguments passed to the java invocation
:param Revision java_version: return value from Distribution.version()
:return: a hexstring representing a fingerprint of the java invocation
"""
digest = hashlib.sha1()
# TODO(John Sirois): hash classpath contents?
encoded_jvm_options = [option.encode('utf-8') for option in sorted(jvm_options)]
encoded_classpath = [cp.encode('utf-8') for cp in sorted(classpath)]
encoded_java_version = repr(java_version).encode('utf-8')
for item in (encoded_jvm_options, encoded_classpath, encoded_java_version):
digest.update(str(item).encode('utf-8'))
return digest.hexdigest() if PY3 else digest.hexdigest().decode('utf-8') | python | {
"resource": ""
} |
q27932 | NailgunExecutor._await_socket | train | def _await_socket(self, timeout):
"""Blocks for the nailgun subprocess to bind and emit a listening port in the nailgun stdout."""
with safe_open(self._ng_stdout, 'r') as ng_stdout:
start_time = time.time()
accumulated_stdout = ''
while 1:
# TODO: share the decreasing timeout logic here with NailgunProtocol.iter_chunks() by adding
# a method to pants.util.contextutil!
remaining_time = time.time() - (start_time + timeout)
if remaining_time > 0:
stderr = read_file(self._ng_stderr, binary_mode=True)
raise self.InitialNailgunConnectTimedOut(
timeout=timeout,
stdout=accumulated_stdout,
stderr=stderr,
)
readable, _, _ = select.select([ng_stdout], [], [], (-1 * remaining_time))
if readable:
line = ng_stdout.readline() # TODO: address deadlock risk here.
try:
return self._NG_PORT_REGEX.match(line).group(1)
except AttributeError:
pass
accumulated_stdout += line | python | {
"resource": ""
} |
q27933 | NailgunExecutor.ensure_connectable | train | def ensure_connectable(self, nailgun):
"""Ensures that a nailgun client is connectable or raises NailgunError."""
attempt_count = 1
while 1:
try:
with closing(nailgun.try_connect()) as sock:
logger.debug('Verified new ng server is connectable at {}'.format(sock.getpeername()))
return
except nailgun.NailgunConnectionError:
if attempt_count >= self._connect_attempts:
logger.debug('Failed to connect to ng after {} attempts'.format(self._connect_attempts))
raise # Re-raise the NailgunConnectionError which provides more context to the user.
attempt_count += 1
time.sleep(self.WAIT_INTERVAL_SEC) | python | {
"resource": ""
} |
q27934 | NailgunExecutor._spawn_nailgun_server | train | def _spawn_nailgun_server(self, fingerprint, jvm_options, classpath, stdout, stderr, stdin):
"""Synchronously spawn a new nailgun server."""
# Truncate the nailguns stdout & stderr.
safe_file_dump(self._ng_stdout, b'', mode='wb')
safe_file_dump(self._ng_stderr, b'', mode='wb')
jvm_options = jvm_options + [self._PANTS_NG_BUILDROOT_ARG,
self._create_owner_arg(self._workdir),
self._create_fingerprint_arg(fingerprint)]
post_fork_child_opts = dict(fingerprint=fingerprint,
jvm_options=jvm_options,
classpath=classpath,
stdout=stdout,
stderr=stderr)
logger.debug('Spawning nailgun server {i} with fingerprint={f}, jvm_options={j}, classpath={cp}'
.format(i=self._identity, f=fingerprint, j=jvm_options, cp=classpath))
self.daemon_spawn(post_fork_child_opts=post_fork_child_opts)
# Wait for and write the port information in the parent so we can bail on exception/timeout.
self.await_pid(self._startup_timeout)
self.write_socket(self._await_socket(self._connect_timeout))
logger.debug('Spawned nailgun server {i} with fingerprint={f}, pid={pid} port={port}'
.format(i=self._identity, f=fingerprint, pid=self.pid, port=self.socket))
client = self._create_ngclient(self.socket, stdout, stderr, stdin)
self.ensure_connectable(client)
return client | python | {
"resource": ""
} |
q27935 | XmlParser._parse | train | def _parse(cls, xml_path):
"""Parse .xml file and return parsed text as a DOM Document.
:param string xml_path: File path of xml file to be parsed.
:returns xml.dom.minidom.Document parsed_xml: Document instance containing parsed xml.
"""
try:
parsed_xml = parse(xml_path)
# Minidom is a frontend for various parsers, only Exception covers ill-formed .xml for them all.
except Exception as e:
raise cls.XmlError('Error parsing xml file at {0}: {1}'.format(xml_path, e))
return parsed_xml | python | {
"resource": ""
} |
q27936 | XmlParser.from_file | train | def from_file(cls, xml_path):
"""Parse .xml file and create a XmlParser object."""
try:
parsed_xml = cls._parse(xml_path)
except OSError as e:
raise XmlParser.XmlError("Problem reading xml file at {}: {}".format(xml_path, e))
return cls(xml_path, parsed_xml) | python | {
"resource": ""
} |
q27937 | XmlParser.get_attribute | train | def get_attribute(self, element, attribute):
"""Retrieve the value of an attribute that is contained by the tag element.
:param string element: Name of an xml element.
:param string attribute: Name of the attribute that is to be returned.
:return: Desired attribute value.
:rtype: string
"""
parsed_element = self.parsed.getElementsByTagName(element)
if not parsed_element:
raise self.XmlError("There is no '{0}' element in "
"xml file at: {1}".format(element, self.xml_path))
parsed_attribute = parsed_element[0].getAttribute(attribute)
if not parsed_attribute:
raise self.XmlError("There is no '{0}' attribute in "
"xml at: {1}".format(attribute, self.xml_path))
return parsed_attribute | python | {
"resource": ""
} |
q27938 | XmlParser.get_optional_attribute | train | def get_optional_attribute(self, element, attribute):
"""Attempt to retrieve an optional attribute from the xml and return None on failure."""
try:
return self.get_attribute(element, attribute)
except self.XmlError:
return None | python | {
"resource": ""
} |
q27939 | RankedValue.get_rank_value | train | def get_rank_value(cls, name):
"""Returns the integer constant value for the given rank name.
:param string rank: the string rank name (E.g., 'HARDCODED').
:returns: the integer constant value of the rank.
:rtype: int
"""
if name in cls._RANK_NAMES.values():
return getattr(cls, name, None)
return None | python | {
"resource": ""
} |
q27940 | RankedValue.prioritized_iter | train | def prioritized_iter(cls, flag_val, env_val, config_val, config_default_val,
hardcoded_val, default):
"""Yield the non-None values from highest-ranked to lowest, wrapped in RankedValue instances."""
if flag_val is not None:
yield RankedValue(cls.FLAG, flag_val)
if env_val is not None:
yield RankedValue(cls.ENVIRONMENT, env_val)
if config_val is not None:
yield RankedValue(cls.CONFIG, config_val)
if config_default_val is not None:
yield RankedValue(cls.CONFIG_DEFAULT, config_default_val)
if hardcoded_val is not None:
yield RankedValue(cls.HARDCODED, hardcoded_val)
yield RankedValue(cls.NONE, default) | python | {
"resource": ""
} |
q27941 | call_use_cached_files | train | def call_use_cached_files(tup):
"""Importable helper for multi-proc calling of ArtifactCache.use_cached_files on a cache instance.
Multiprocessing map/apply/etc require functions which can be imported, not bound methods.
To call a bound method, instead call a helper like this and pass tuple of the instance and args.
The helper can then call the original method on the deserialized instance.
:param tup: A tuple of an ArtifactCache and args (eg CacheKey) for ArtifactCache.use_cached_files.
"""
try:
cache, key, results_dir = tup
res = cache.use_cached_files(key, results_dir)
if res:
sys.stderr.write('.')
else:
sys.stderr.write(' ')
sys.stderr.flush()
return res
except NonfatalArtifactCacheError as e:
logger.warn('Error calling use_cached_files in artifact cache: {0}'.format(e))
return False | python | {
"resource": ""
} |
q27942 | call_insert | train | def call_insert(tup):
"""Importable helper for multi-proc calling of ArtifactCache.insert on an ArtifactCache instance.
See docstring on call_use_cached_files explaining why this is useful.
:param tup: A 4-tuple of an ArtifactCache and the 3 args passed to ArtifactCache.insert:
eg (some_cache_instance, cache_key, [some_file, another_file], False)
"""
try:
cache, key, files, overwrite = tup
return cache.insert(key, files, overwrite)
except NonfatalArtifactCacheError as e:
logger.warn('Error while inserting into artifact cache: {0}'.format(e))
return False | python | {
"resource": ""
} |
q27943 | ArtifactCache.insert | train | def insert(self, cache_key, paths, overwrite=False):
"""Cache the output of a build.
By default, checks cache.has(key) first, only proceeding to create and insert an artifact
if it is not already in the cache (though `overwrite` can be used to skip the check and
unconditionally insert).
:param CacheKey cache_key: A CacheKey object.
:param list<str> paths: List of absolute paths to generated dirs/files.
These must be under the artifact_root.
:param bool overwrite: Skip check for existing, insert even if already in cache.
"""
missing_files = [f for f in paths if not os.path.exists(f)]
if missing_files:
raise ArtifactCacheError('Tried to cache nonexistent files {0}'.format(missing_files))
if not overwrite:
if self.has(cache_key):
logger.debug('Skipping insert of existing artifact: {0}'.format(cache_key))
return False
try:
self.try_insert(cache_key, paths)
return True
except NonfatalArtifactCacheError as e:
logger.error('Error while writing to artifact cache: {0}'.format(e))
return False | python | {
"resource": ""
} |
q27944 | LineOriented.line_oriented | train | def line_oriented(cls, line_oriented_options, console):
"""Given Goal.Options and a Console, yields functions for writing to stdout and stderr, respectively.
The passed options instance will generally be the `Goal.Options` of a `LineOriented` `Goal`.
"""
if type(line_oriented_options) != cls.Options:
raise AssertionError(
'Expected Options for `{}`, got: {}'.format(cls.__name__, line_oriented_options))
output_file = line_oriented_options.values.output_file
sep = line_oriented_options.values.sep.encode('utf-8').decode('unicode_escape')
stdout, stderr = console.stdout, console.stderr
if output_file:
stdout = open(output_file, 'w')
try:
print_stdout = lambda msg: print(msg, file=stdout, end=sep)
print_stderr = lambda msg: print(msg, file=stderr)
yield print_stdout, print_stderr
finally:
if output_file:
stdout.close()
else:
stdout.flush()
stderr.flush() | python | {
"resource": ""
} |
q27945 | Goal.register | train | def register(cls, name, description, options_registrar_cls=None):
"""Register a goal description.
Otherwise the description must be set when registering some task on the goal,
which is clunky, and dependent on things like registration order of tasks in the goal.
A goal that isn't explicitly registered with a description will fall back to the description
of the task in that goal with the same name (if any). So singleton goals (e.g., 'clean-all')
need not be registered explicitly. This method is primarily useful for setting a
description on a generic goal like 'compile' or 'test', that multiple backends will
register tasks on.
:API: public
:param string name: The name of the goal; ie: the way to specify it on the command line.
:param string description: A description of the tasks in the goal do.
:param :class:pants.option.Optionable options_registrar_cls: A class for registering options
at the goal scope. Useful for registering recursive options on all tasks in a goal.
:return: The freshly registered goal.
:rtype: :class:`_Goal`
"""
goal = cls.by_name(name)
goal._description = description
goal._options_registrar_cls = (options_registrar_cls.registrar_for_scope(name)
if options_registrar_cls else None)
return goal | python | {
"resource": ""
} |
q27946 | Goal.by_name | train | def by_name(cls, name):
"""Returns the unique object representing the goal of the specified name.
:API: public
"""
if name not in cls._goal_by_name:
cls._goal_by_name[name] = _Goal(name)
return cls._goal_by_name[name] | python | {
"resource": ""
} |
q27947 | Goal.all | train | def all():
"""Returns all active registered goals, sorted alphabetically by name.
:API: public
"""
return [goal for _, goal in sorted(Goal._goal_by_name.items()) if goal.active] | python | {
"resource": ""
} |
q27948 | Goal.subsystems | train | def subsystems(cls):
"""Returns all subsystem types used by all tasks, in no particular order.
:API: public
"""
ret = set()
for goal in cls.all():
ret.update(goal.subsystems())
return ret | python | {
"resource": ""
} |
q27949 | _Goal.install | train | def install(self, task_registrar, first=False, replace=False, before=None, after=None):
"""Installs the given task in this goal.
The placement of the task in this goal's execution list defaults to the end but its position
can be influenced by specifying exactly one of the following arguments:
first: Places the task 1st in the execution list.
replace: Removes all existing tasks in this goal and installs this task.
before: Places the task before the named task in the execution list.
after: Places the task after the named task in the execution list.
:API: public
"""
if [bool(place) for place in [first, replace, before, after]].count(True) > 1:
raise GoalError('Can only specify one of first, replace, before or after')
otn = self._ordered_task_names
if replace:
for tt in self.task_types():
tt.options_scope = None
del otn[:]
self._task_type_by_name = {}
task_name = task_registrar.name
if task_name in self._task_type_by_name:
raise GoalError(
'Can only specify a task name once per goal, saw multiple values for {} in goal {}'.format(
task_name,
self.name))
Optionable.validate_scope_name_component(task_name)
options_scope = Goal.scope(self.name, task_name)
task_type = _create_stable_task_type(task_registrar.task_type, options_scope)
if first:
otn.insert(0, task_name)
elif before in otn:
otn.insert(otn.index(before), task_name)
elif after in otn:
otn.insert(otn.index(after) + 1, task_name)
else:
otn.append(task_name)
self._task_type_by_name[task_name] = task_type
if task_registrar.serialize:
self.serialize = True
return self | python | {
"resource": ""
} |
q27950 | _Goal.uninstall_task | train | def uninstall_task(self, name):
"""Removes the named task from this goal.
Allows external plugins to modify the execution plan. Use with caution.
Note: Does not relax a serialization requirement that originated
from the uninstalled task's install() call.
:API: public
"""
if name in self._task_type_by_name:
self._task_type_by_name[name].options_scope = None
del self._task_type_by_name[name]
self._ordered_task_names = [x for x in self._ordered_task_names if x != name]
else:
raise GoalError('Cannot uninstall unknown task: {0}'.format(name)) | python | {
"resource": ""
} |
q27951 | _Goal.subsystems | train | def subsystems(self):
"""Returns all subsystem types used by tasks in this goal, in no particular order."""
ret = set()
for task_type in self.task_types():
ret.update([dep.subsystem_cls for dep in task_type.subsystem_dependencies_iter()])
return ret | python | {
"resource": ""
} |
q27952 | Options.complete_scopes | train | def complete_scopes(cls, scope_infos):
"""Expand a set of scopes to include all enclosing scopes.
E.g., if the set contains `foo.bar.baz`, ensure that it also contains `foo.bar` and `foo`.
Also adds any deprecated scopes.
"""
ret = {GlobalOptionsRegistrar.get_scope_info()}
original_scopes = dict()
for si in scope_infos:
ret.add(si)
if si.scope in original_scopes:
raise cls.DuplicateScopeError('Scope `{}` claimed by {}, was also claimed by {}.'.format(
si.scope, si, original_scopes[si.scope]
))
original_scopes[si.scope] = si
if si.deprecated_scope:
ret.add(ScopeInfo(si.deprecated_scope, si.category, si.optionable_cls))
original_scopes[si.deprecated_scope] = si
# TODO: Once scope name validation is enforced (so there can be no dots in scope name
# components) we can replace this line with `for si in scope_infos:`, because it will
# not be possible for a deprecated_scope to introduce any new intermediate scopes.
for si in copy.copy(ret):
for scope in all_enclosing_scopes(si.scope, allow_global=False):
if scope not in original_scopes:
ret.add(ScopeInfo(scope, ScopeInfo.INTERMEDIATE))
return ret | python | {
"resource": ""
} |
q27953 | Options.create | train | def create(cls, env, config, known_scope_infos, args=None, bootstrap_option_values=None):
"""Create an Options instance.
:param env: a dict of environment variables.
:param :class:`pants.option.config.Config` config: data from a config file.
:param known_scope_infos: ScopeInfos for all scopes that may be encountered.
:param args: a list of cmd-line args; defaults to `sys.argv` if None is supplied.
:param bootstrap_option_values: An optional namespace containing the values of bootstrap
options. We can use these values when registering other options.
"""
# We need parsers for all the intermediate scopes, so inherited option values
# can propagate through them.
complete_known_scope_infos = cls.complete_scopes(known_scope_infos)
splitter = ArgSplitter(complete_known_scope_infos)
args = sys.argv if args is None else args
goals, scope_to_flags, target_specs, passthru, passthru_owner, unknown_scopes = splitter.split_args(args)
option_tracker = OptionTracker()
if bootstrap_option_values:
target_spec_files = bootstrap_option_values.target_spec_files
if target_spec_files:
for spec in target_spec_files:
with open(spec, 'r') as f:
target_specs.extend([line for line in [line.strip() for line in f] if line])
help_request = splitter.help_request
parser_hierarchy = ParserHierarchy(env, config, complete_known_scope_infos, option_tracker)
bootstrap_option_values = bootstrap_option_values
known_scope_to_info = {s.scope: s for s in complete_known_scope_infos}
return cls(goals, scope_to_flags, target_specs, passthru, passthru_owner, help_request,
parser_hierarchy, bootstrap_option_values, known_scope_to_info,
option_tracker, unknown_scopes) | python | {
"resource": ""
} |
q27954 | Options.drop_flag_values | train | def drop_flag_values(self):
"""Returns a copy of these options that ignores values specified via flags.
Any pre-cached option values are cleared and only option values that come from option defaults,
the config or the environment are used.
"""
# An empty scope_to_flags to force all values to come via the config -> env hierarchy alone
# and empty values in case we already cached some from flags.
no_flags = {}
return Options(self._goals,
no_flags,
self._target_specs,
self._passthru,
self._passthru_owner,
self._help_request,
self._parser_hierarchy,
self._bootstrap_option_values,
self._known_scope_to_info,
self._option_tracker,
self._unknown_scopes) | python | {
"resource": ""
} |
q27955 | Options.register | train | def register(self, scope, *args, **kwargs):
"""Register an option in the given scope."""
self._assert_not_frozen()
self.get_parser(scope).register(*args, **kwargs)
deprecated_scope = self.known_scope_to_info[scope].deprecated_scope
if deprecated_scope:
self.get_parser(deprecated_scope).register(*args, **kwargs) | python | {
"resource": ""
} |
q27956 | Options.registration_function_for_optionable | train | def registration_function_for_optionable(self, optionable_class):
"""Returns a function for registering options on the given scope."""
self._assert_not_frozen()
# TODO(benjy): Make this an instance of a class that implements __call__, so we can
# docstring it, and so it's less weird than attatching properties to a function.
def register(*args, **kwargs):
kwargs['registering_class'] = optionable_class
self.register(optionable_class.options_scope, *args, **kwargs)
# Clients can access the bootstrap option values as register.bootstrap.
register.bootstrap = self.bootstrap_option_values()
# Clients can access the scope as register.scope.
register.scope = optionable_class.options_scope
return register | python | {
"resource": ""
} |
q27957 | Options._check_and_apply_deprecations | train | def _check_and_apply_deprecations(self, scope, values):
"""Checks whether a ScopeInfo has options specified in a deprecated scope.
There are two related cases here. Either:
1) The ScopeInfo has an associated deprecated_scope that was replaced with a non-deprecated
scope, meaning that the options temporarily live in two locations.
2) The entire ScopeInfo is deprecated (as in the case of deprecated SubsystemDependencies),
meaning that the options live in one location.
In the first case, this method has the sideeffect of merging options values from deprecated
scopes into the given values.
"""
si = self.known_scope_to_info[scope]
# If this Scope is itself deprecated, report that.
if si.removal_version:
explicit_keys = self.for_scope(scope, inherit_from_enclosing_scope=False).get_explicit_keys()
if explicit_keys:
warn_or_error(
removal_version=si.removal_version,
deprecated_entity_description='scope {}'.format(scope),
hint=si.removal_hint,
)
# Check if we're the new name of a deprecated scope, and clone values from that scope.
# Note that deprecated_scope and scope share the same Optionable class, so deprecated_scope's
# Optionable has a deprecated_options_scope equal to deprecated_scope. Therefore we must
# check that scope != deprecated_scope to prevent infinite recursion.
deprecated_scope = si.deprecated_scope
if deprecated_scope is not None and scope != deprecated_scope:
# Do the deprecation check only on keys that were explicitly set on the deprecated scope
# (and not on its enclosing scopes).
explicit_keys = self.for_scope(deprecated_scope,
inherit_from_enclosing_scope=False).get_explicit_keys()
if explicit_keys:
# Update our values with those of the deprecated scope (now including values inherited
# from its enclosing scope).
# Note that a deprecated val will take precedence over a val of equal rank.
# This makes the code a bit neater.
values.update(self.for_scope(deprecated_scope))
warn_or_error(
removal_version=self.known_scope_to_info[scope].deprecated_scope_removal_version,
deprecated_entity_description='scope {}'.format(deprecated_scope),
hint='Use scope {} instead (options: {})'.format(scope, ', '.join(explicit_keys))
) | python | {
"resource": ""
} |
q27958 | Options.for_scope | train | def for_scope(self, scope, inherit_from_enclosing_scope=True):
"""Return the option values for the given scope.
Values are attributes of the returned object, e.g., options.foo.
Computed lazily per scope.
:API: public
"""
# First get enclosing scope's option values, if any.
if scope == GLOBAL_SCOPE or not inherit_from_enclosing_scope:
values = OptionValueContainer()
else:
values = copy.copy(self.for_scope(enclosing_scope(scope)))
# Now add our values.
flags_in_scope = self._scope_to_flags.get(scope, [])
self._parser_hierarchy.get_parser_by_scope(scope).parse_args(flags_in_scope, values)
# Check for any deprecation conditions, which are evaluated using `self._flag_matchers`.
if inherit_from_enclosing_scope:
self._check_and_apply_deprecations(scope, values)
return values | python | {
"resource": ""
} |
q27959 | OptionTracker.record_option | train | def record_option(self, scope, option, value, rank, deprecation_version=None, details=None):
"""Records that the given option was set to the given value.
:param string scope: scope of the option.
:param string option: name of the option.
:param string value: value the option was set to.
:param int rank: the rank of the option (Eg, RankedValue.HARDCODED), to keep track of where the
option came from.
:param deprecation_version: Deprecation version for this option.
:param string details: optional additional details about how the option was set (eg, the name
of a particular config file, if the rank is RankedValue.CONFIG).
"""
scoped_options = self.option_history_by_scope[scope]
if option not in scoped_options:
scoped_options[option] = self.OptionHistory()
scoped_options[option].record_value(value, rank, deprecation_version, details) | python | {
"resource": ""
} |
q27960 | FingerprintStrategy.fingerprint_target | train | def fingerprint_target(self, target):
"""Consumers of subclass instances call this to get a fingerprint labeled with the name"""
fingerprint = self.compute_fingerprint(target)
if fingerprint:
return '{fingerprint}-{name}'.format(fingerprint=fingerprint, name=type(self).__name__)
else:
return None | python | {
"resource": ""
} |
q27961 | UnpackWheelsFingerprintStrategy.compute_fingerprint | train | def compute_fingerprint(self, target):
"""UnpackedWheels targets need to be re-unpacked if any of its configuration changes or any of
the jars they import have changed.
"""
if isinstance(target, UnpackedWheels):
hasher = sha1()
for cache_key in sorted(req.cache_key() for req in target.all_imported_requirements):
hasher.update(cache_key.encode('utf-8'))
hasher.update(target.payload.fingerprint().encode('utf-8'))
return hasher.hexdigest() if PY3 else hasher.hexdigest().decode('utf-8')
return None | python | {
"resource": ""
} |
q27962 | UnpackWheels._get_matching_wheel | train | def _get_matching_wheel(self, pex_path, interpreter, requirements, module_name):
"""Use PexBuilderWrapper to resolve a single wheel from the requirement specs using pex."""
with self.context.new_workunit('extract-native-wheels'):
with safe_concurrent_creation(pex_path) as chroot:
pex_builder = PexBuilderWrapper.Factory.create(
builder=PEXBuilder(path=chroot, interpreter=interpreter),
log=self.context.log)
return pex_builder.extract_single_dist_for_current_platform(requirements, module_name) | python | {
"resource": ""
} |
q27963 | Exiter.exit | train | def exit(self, result=PANTS_SUCCEEDED_EXIT_CODE, msg=None, out=None):
"""Exits the runtime.
:param result: The exit status. Typically either PANTS_SUCCEEDED_EXIT_CODE or
PANTS_FAILED_EXIT_CODE, but can be a string as well. (Optional)
:param msg: A string message to print to stderr or another custom file desciptor before exiting.
(Optional)
:param out: The file descriptor to emit `msg` to. (Optional)
"""
if msg:
out = out or sys.stderr
if PY3 and hasattr(out, 'buffer'):
out = out.buffer
msg = ensure_binary(msg)
try:
out.write(msg)
out.write(b'\n')
# TODO: Determine whether this call is a no-op because the stream gets flushed on exit, or
# if we could lose what we just printed, e.g. if we get interrupted by a signal while
# exiting and the stream is buffered like stdout.
out.flush()
except Exception as e:
# If the file is already closed, or any other error occurs, just log it and continue to
# exit.
if msg:
logger.warning("Encountered error when trying to log this message: {}".format(msg))
# In pantsd, this won't go anywhere, because there's really nowhere for us to log if we
# can't log :(
# Not in pantsd, this will end up in sys.stderr.
traceback.print_stack()
logger.exception(e)
self._exit(result) | python | {
"resource": ""
} |
q27964 | Exiter.exit_and_fail | train | def exit_and_fail(self, msg=None, out=None):
"""Exits the runtime with a nonzero exit code, indicating failure.
:param msg: A string message to print to stderr or another custom file desciptor before exiting.
(Optional)
:param out: The file descriptor to emit `msg` to. (Optional)
"""
self.exit(result=PANTS_FAILED_EXIT_CODE, msg=msg, out=out) | python | {
"resource": ""
} |
q27965 | ResponseParser.parse | train | def parse(self, content):
"""Parse raw response content for a list of remote artifact cache URLs.
:API: public
"""
if self.format == 'json_map':
try:
return assert_list(json.loads(content.decode(self.encoding))[self.index])
except (KeyError, UnicodeDecodeError, ValueError) as e:
raise self.ResponseParserError("Error while parsing response content: {0}".format(str(e)))
# Should never get here.
raise ValueError('Unknown content format: "{}"'.format(self.format)) | python | {
"resource": ""
} |
q27966 | ExecutionGraph._compute_job_priorities | train | def _compute_job_priorities(self, job_list):
"""Walks the dependency graph breadth-first, starting from the most dependent tasks,
and computes the job priority as the sum of the jobs sizes along the critical path."""
job_size = {job.key: job.size for job in job_list}
job_priority = defaultdict(int)
bfs_queue = deque()
for job in job_list:
if len(self._dependees[job.key]) == 0:
job_priority[job.key] = job_size[job.key]
bfs_queue.append(job.key)
satisfied_dependees_count = defaultdict(int)
while len(bfs_queue) > 0:
job_key = bfs_queue.popleft()
for dependency_key in self._dependencies[job_key]:
job_priority[dependency_key] = \
max(job_priority[dependency_key],
job_size[dependency_key] + job_priority[job_key])
satisfied_dependees_count[dependency_key] += 1
if satisfied_dependees_count[dependency_key] == len(self._dependees[dependency_key]):
bfs_queue.append(dependency_key)
return job_priority | python | {
"resource": ""
} |
q27967 | JvmPlatform.preferred_jvm_distribution | train | def preferred_jvm_distribution(cls, platforms, strict=False):
"""Returns a jvm Distribution with a version that should work for all the platforms.
Any one of those distributions whose version is >= all requested platforms' versions
can be returned unless strict flag is set.
:param iterable platforms: An iterable of platform settings.
:param bool strict: If true, only distribution whose version matches the minimum
required version can be returned, i.e, the max target_level of all the requested
platforms.
:returns: Distribution one of the selected distributions.
"""
if not platforms:
return DistributionLocator.cached()
min_version = max(platform.target_level for platform in platforms)
max_version = Revision(*(min_version.components + [9999])) if strict else None
return DistributionLocator.cached(minimum_version=min_version, maximum_version=max_version) | python | {
"resource": ""
} |
q27968 | JvmPlatform.get_platform_by_name | train | def get_platform_by_name(self, name, for_target=None):
"""Finds the platform with the given name.
If the name is empty or None, returns the default platform.
If not platform with the given name is defined, raises an error.
:param str name: name of the platform.
:param JvmTarget for_target: optionally specified target we're looking up the platform for.
Only used in error message generation.
:return: The jvm platform object.
:rtype: JvmPlatformSettings
"""
if not name:
return self.default_platform
if name not in self.platforms_by_name:
raise self.UndefinedJvmPlatform(for_target, name, self.platforms_by_name)
return self.platforms_by_name[name] | python | {
"resource": ""
} |
q27969 | JvmPlatform.get_platform_for_target | train | def get_platform_for_target(self, target):
"""Find the platform associated with this target.
:param JvmTarget target: target to query.
:return: The jvm platform object.
:rtype: JvmPlatformSettings
"""
if not target.payload.platform and target.is_synthetic:
derived_from = target.derived_from
platform = derived_from and getattr(derived_from, 'platform', None)
if platform:
return platform
return self.get_platform_by_name(target.payload.platform, target) | python | {
"resource": ""
} |
q27970 | Serializable.is_serializable | train | def is_serializable(obj):
"""Return `True` if the given object conforms to the Serializable protocol.
:rtype: bool
"""
if inspect.isclass(obj):
return Serializable.is_serializable_type(obj)
return isinstance(obj, Serializable) or hasattr(obj, '_asdict') | python | {
"resource": ""
} |
q27971 | Serializable.is_serializable_type | train | def is_serializable_type(type_):
"""Return `True` if the given type's instances conform to the Serializable protocol.
:rtype: bool
"""
if not inspect.isclass(type_):
return Serializable.is_serializable(type_)
return issubclass(type_, Serializable) or hasattr(type_, '_asdict') | python | {
"resource": ""
} |
q27972 | HelpFormatter.format_options | train | def format_options(self, scope, description, option_registrations_iter):
"""Return a help message for the specified options.
:param option_registrations_iter: An iterator over (args, kwargs) pairs, as passed in to
options registration.
"""
oshi = HelpInfoExtracter(self._scope).get_option_scope_help_info(option_registrations_iter)
lines = []
def add_option(category, ohis):
if ohis:
lines.append('')
display_scope = scope or 'Global'
if category:
lines.append(self._maybe_blue('{} {} options:'.format(display_scope, category)))
else:
lines.append(self._maybe_blue('{} options:'.format(display_scope)))
if description:
lines.append(description)
lines.append(' ')
for ohi in ohis:
lines.extend(self.format_option(ohi))
add_option('', oshi.basic)
if self._show_recursive:
add_option('recursive', oshi.recursive)
if self._show_advanced:
add_option('advanced', oshi.advanced)
return lines | python | {
"resource": ""
} |
q27973 | HelpFormatter.format_option | train | def format_option(self, ohi):
"""Format the help output for a single option.
:param OptionHelpInfo ohi: Extracted information for option to print
:return: Formatted help text for this option
:rtype: list of string
"""
lines = []
choices = 'one of: [{}] '.format(ohi.choices) if ohi.choices else ''
arg_line = ('{args} {dflt}'
.format(args=self._maybe_cyan(', '.join(ohi.display_args)),
dflt=self._maybe_green('({}default: {})'.format(choices, ohi.default))))
lines.append(arg_line)
indent = ' '
lines.extend(['{}{}'.format(indent, s) for s in wrap(ohi.help, 76)])
if ohi.deprecated_message:
lines.append(self._maybe_red('{}{}.'.format(indent, ohi.deprecated_message)))
if ohi.removal_hint:
lines.append(self._maybe_red('{}{}'.format(indent, ohi.removal_hint)))
return lines | python | {
"resource": ""
} |
q27974 | pants_setup_py | train | def pants_setup_py(name, description, additional_classifiers=None, **kwargs):
"""Creates the setup_py for a pants artifact.
:param str name: The name of the package.
:param str description: A brief description of what the package provides.
:param list additional_classifiers: Any additional trove classifiers that apply to the package,
see: https://pypi.org/pypi?%3Aaction=list_classifiers
:param kwargs: Any additional keyword arguments to be passed to `setuptools.setup
<https://pythonhosted.org/setuptools/setuptools.html>`_.
:returns: A setup_py suitable for building and publishing pants components.
"""
if not name.startswith('pantsbuild.pants'):
raise ValueError("Pants distribution package names must start with 'pantsbuild.pants', "
"given {}".format(name))
standard_classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
# We know for a fact these OSs work but, for example, know Windows
# does not work yet. Take the conservative approach and only list OSs
# we know pants works with for now.
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Software Development :: Build Tools']
classifiers = OrderedSet(standard_classifiers + (additional_classifiers or []))
notes = PantsReleases.global_instance().notes_for_version(PANTS_SEMVER)
return PythonArtifact(
name=name,
version=VERSION,
description=description,
long_description=(_read_contents('src/python/pants/ABOUT.rst') + notes),
url='https://github.com/pantsbuild/pants',
license='Apache License, Version 2.0',
zip_safe=True,
classifiers=list(classifiers),
**kwargs) | python | {
"resource": ""
} |
q27975 | contrib_setup_py | train | def contrib_setup_py(name, description, additional_classifiers=None, **kwargs):
"""Creates the setup_py for a pants contrib plugin artifact.
:param str name: The name of the package; must start with 'pantsbuild.pants.contrib.'.
:param str description: A brief description of what the plugin provides.
:param list additional_classifiers: Any additional trove classifiers that apply to the plugin,
see: https://pypi.org/pypi?%3Aaction=list_classifiers
:param kwargs: Any additional keyword arguments to be passed to `setuptools.setup
<https://pythonhosted.org/setuptools/setuptools.html>`_.
:returns: A setup_py suitable for building and publishing pants components.
"""
if not name.startswith('pantsbuild.pants.contrib.'):
raise ValueError("Contrib plugin package names must start with 'pantsbuild.pants.contrib.', "
"given {}".format(name))
return pants_setup_py(name,
description,
additional_classifiers=additional_classifiers,
namespace_packages=['pants', 'pants.contrib'],
**kwargs) | python | {
"resource": ""
} |
q27976 | PantsReleases._branch_name | train | def _branch_name(cls, version):
"""Defines a mapping between versions and branches.
In particular, `-dev` suffixed releases always live on master. Any other (modern) release
lives in a branch.
"""
suffix = version.public[len(version.base_version):]
components = version.base_version.split('.') + [suffix]
if suffix == '' or suffix.startswith('rc'):
# An un-suffixed, or suffixed-with-rc version is a release from a stable branch.
return '{}.{}.x'.format(*components[:2])
elif suffix.startswith('.dev'):
# Suffixed `dev` release version in master.
return 'master'
else:
raise ValueError('Unparseable pants version number: {}'.format(version)) | python | {
"resource": ""
} |
q27977 | PantsReleases.notes_for_version | train | def notes_for_version(self, version):
"""Given the parsed Version of pants, return its release notes.
TODO: This method should parse out the specific version from the resulting file:
see https://github.com/pantsbuild/pants/issues/1708
"""
branch_name = self._branch_name(version)
branch_notes_file = self._branch_notes.get(branch_name, None)
if branch_notes_file is None:
raise ValueError(
'Version {} lives in branch {}, which is not configured in {}.'.format(
version, branch_name, self._branch_notes))
return _read_contents(branch_notes_file) | python | {
"resource": ""
} |
q27978 | JarLibrary.managed_dependencies | train | def managed_dependencies(self):
"""The managed_jar_dependencies target this jar_library specifies, or None.
:API: public
"""
if self.payload.managed_dependencies:
address = Address.parse(self.payload.managed_dependencies,
relative_to=self.address.spec_path)
self._build_graph.inject_address_closure(address)
return self._build_graph.get_target(address)
return None | python | {
"resource": ""
} |
q27979 | BuildConfigInitializer.setup | train | def setup(self):
"""Load backends and plugins.
:returns: A `BuildConfiguration` object constructed during backend/plugin loading.
"""
return self._load_plugins(
self._working_set,
self._bootstrap_options.pythonpath,
self._bootstrap_options.plugins,
self._bootstrap_options.backend_packages
) | python | {
"resource": ""
} |
q27980 | OptionsInitializer._construct_options | train | def _construct_options(options_bootstrapper, build_configuration):
"""Parse and register options.
:returns: An Options object representing the full set of runtime options.
"""
# Now that plugins and backends are loaded, we can gather the known scopes.
# Gather the optionables that are not scoped to any other. All known scopes are reachable
# via these optionables' known_scope_infos() methods.
top_level_optionables = (
{GlobalOptionsRegistrar} |
GlobalSubsystems.get() |
build_configuration.optionables() |
set(Goal.get_optionables())
)
# Now that we have the known scopes we can get the full options. `get_full_options` will
# sort and de-duplicate these for us.
known_scope_infos = [si
for optionable in top_level_optionables
for si in optionable.known_scope_infos()]
return options_bootstrapper.get_full_options(known_scope_infos) | python | {
"resource": ""
} |
q27981 | create_filters | train | def create_filters(predicate_params, predicate_factory):
"""Create filter functions from a list of string parameters.
:param predicate_params: A list of predicate_param arguments as in `create_filter`.
:param predicate_factory: As in `create_filter`.
"""
filters = []
for predicate_param in predicate_params:
filters.append(create_filter(predicate_param, predicate_factory))
return filters | python | {
"resource": ""
} |
q27982 | create_filter | train | def create_filter(predicate_param, predicate_factory):
"""Create a filter function from a string parameter.
:param predicate_param: Create a filter for this param string. Each string is a
comma-separated list of arguments to the predicate_factory.
If the entire comma-separated list is prefixed by a '-' then the
sense of the resulting filter is inverted.
:param predicate_factory: A function that takes a parameter and returns a predicate, i.e., a
function that takes a single parameter (of whatever type the filter
operates on) and returns a boolean.
:return: A filter function of one argument that is the logical OR of the predicates for each of
the comma-separated arguments. If the comma-separated list was prefixed by a '-',
the sense of the filter is inverted.
"""
# NOTE: Do not inline this into create_filters above. A separate function is necessary
# in order to capture the different closure on each invocation.
modifier, param = _extract_modifier(predicate_param)
predicates = [predicate_factory(p) for p in param.split(',')]
def filt(x):
return modifier(any(pred(x) for pred in predicates))
return filt | python | {
"resource": ""
} |
q27983 | wrap_filters | train | def wrap_filters(filters):
"""Returns a single filter that short-circuit ANDs the specified filters.
:API: public
"""
def combined_filter(x):
for filt in filters:
if not filt(x):
return False
return True
return combined_filter | python | {
"resource": ""
} |
q27984 | AppBase.binary | train | def binary(self):
"""Returns the binary this target references."""
dependencies = self.dependencies
if len(dependencies) != 1:
raise TargetDefinitionException(self, 'An app must define exactly one binary '
'dependency, have: {}'.format(dependencies))
binary = dependencies[0]
if not isinstance(binary, self.binary_target_type()):
raise TargetDefinitionException(self, 'Expected binary dependency to be a {} '
'target, found {}'.format(self.binary_target_type(),
binary))
return binary | python | {
"resource": ""
} |
q27985 | UnionProducts.copy | train | def copy(self):
"""Returns a copy of this UnionProducts.
Edits to the copy's mappings will not affect the product mappings in the original.
The copy is shallow though, so edits to the copy's product values will mutate the original's
product values.
:API: public
:rtype: :class:`UnionProducts`
"""
products_by_target = defaultdict(OrderedSet)
for key, value in self._products_by_target.items():
products_by_target[key] = OrderedSet(value)
return UnionProducts(products_by_target=products_by_target) | python | {
"resource": ""
} |
q27986 | UnionProducts.add_for_targets | train | def add_for_targets(self, targets, products):
"""Updates the products for the given targets, adding to existing entries.
:API: public
"""
# TODO: This is a temporary helper for use until the classpath has been split.
for target in targets:
self.add_for_target(target, products) | python | {
"resource": ""
} |
q27987 | UnionProducts.remove_for_target | train | def remove_for_target(self, target, products):
"""Updates the products for a particular target, removing the given existing entries.
:API: public
:param target: The target to remove the products for.
:param products: A list of tuples (conf, path) to remove.
"""
for product in products:
self._products_by_target[target].discard(product) | python | {
"resource": ""
} |
q27988 | UnionProducts.get_for_targets | train | def get_for_targets(self, targets):
"""Gets the union of the products for the given targets, preserving the input order.
:API: public
"""
products = OrderedSet()
for target in targets:
products.update(self._products_by_target[target])
return products | python | {
"resource": ""
} |
q27989 | UnionProducts.get_product_target_mappings_for_targets | train | def get_product_target_mappings_for_targets(self, targets):
"""Gets the product-target associations for the given targets, preserving the input order.
:API: public
:param targets: The targets to lookup products for.
:returns: The ordered (product, target) tuples.
"""
product_target_mappings = []
for target in targets:
for product in self._products_by_target[target]:
product_target_mappings.append((product, target))
return product_target_mappings | python | {
"resource": ""
} |
q27990 | UnionProducts.target_for_product | train | def target_for_product(self, product):
"""Looks up the target key for a product.
:API: public
:param product: The product to search for
:return: None if there is no target for the product
"""
for target, products in self._products_by_target.items():
if product in products:
return target
return None | python | {
"resource": ""
} |
q27991 | Products.register_data | train | def register_data(self, typename, value):
"""Registers a data product, raising if a product was already registered.
:API: public
:param typename: The type of product to register a value for.
:param value: The data product to register under `typename`.
:returns: The registered `value`.
:raises: :class:`ProductError` if a value for the given product `typename` is already
registered.
"""
if typename in self.data_products:
raise ProductError('Already have a product registered for {}, cannot over-write with {}'
.format(typename, value))
return self.safe_create_data(typename, lambda: value) | python | {
"resource": ""
} |
q27992 | Products.get_data | train | def get_data(self, typename, init_func=None):
"""Returns a data product.
:API: public
If the product isn't found, returns None, unless init_func is set, in which case the product's
value is set to the return value of init_func(), and returned.
"""
if typename not in self.data_products:
if not init_func:
return None
self.data_products[typename] = init_func()
return self.data_products.get(typename) | python | {
"resource": ""
} |
q27993 | Products.get_only | train | def get_only(self, product_type, target):
"""If there is exactly one product for the given product type and target, returns the
full filepath of said product.
Otherwise, raises a ProductError.
Useful for retrieving the filepath for the executable of a binary target.
:API: public
"""
product_mapping = self.get(product_type).get(target)
if len(product_mapping) != 1:
raise ProductError('{} directories in product mapping: requires exactly 1.'
.format(len(product_mapping)))
for _, files in product_mapping.items():
if len(files) != 1:
raise ProductError('{} files in target directory: requires exactly 1.'
.format(len(files)))
return files[0] | python | {
"resource": ""
} |
q27994 | PythonAWSLambda.binary | train | def binary(self):
"""Returns the binary that builds the pex for this lambda."""
dependencies = self.dependencies
if len(dependencies) != 1:
raise TargetDefinitionException(self, 'An app must define exactly one binary '
'dependency, have: {}'.format(dependencies))
binary = dependencies[0]
if not isinstance(binary, PythonBinary):
raise TargetDefinitionException(self, 'Expected binary dependency to be a python_binary '
'target, found {}'.format(binary))
return binary | python | {
"resource": ""
} |
q27995 | load_backends_and_plugins | train | def load_backends_and_plugins(plugins, working_set, backends, build_configuration=None):
"""Load named plugins and source backends
:param list<str> plugins: Plugins to load (see `load_plugins`). Plugins are loaded after
backends.
:param WorkingSet working_set: A pkg_resources.WorkingSet to load plugins from.
:param list<str> backends: Source backends to load (see `load_build_configuration_from_source`).
"""
build_configuration = build_configuration or BuildConfiguration()
load_build_configuration_from_source(build_configuration, backends)
load_plugins(build_configuration, plugins or [], working_set)
return build_configuration | python | {
"resource": ""
} |
q27996 | load_plugins | train | def load_plugins(build_configuration, plugins, working_set):
"""Load named plugins from the current working_set into the supplied build_configuration
"Loading" a plugin here refers to calling registration methods -- it is assumed each plugin
is already on the path and an error will be thrown if it is not. Plugins should define their
entrypoints in the `pantsbuild.plugin` group when configuring their distribution.
Like source backends, the `build_file_aliases`, `global_subsystems` and `register_goals` methods
are called if those entry points are defined.
* Plugins are loaded in the order they are provided. *
This is important as loading can add, remove or replace existing tasks installed by other plugins.
If a plugin needs to assert that another plugin is registered before it, it can define an
entrypoint "load_after" which can return a list of plugins which must have been loaded before it
can be loaded. This does not change the order or what plugins are loaded in any way -- it is
purely an assertion to guard against misconfiguration.
:param BuildConfiguration build_configuration: The BuildConfiguration (for adding aliases).
:param list<str> plugins: A list of plugin names optionally with versions, in requirement format.
eg ['widgetpublish', 'widgetgen==1.2'].
:param WorkingSet working_set: A pkg_resources.WorkingSet to load plugins from.
"""
loaded = {}
for plugin in plugins:
req = Requirement.parse(plugin)
dist = working_set.find(req)
if not dist:
raise PluginNotFound('Could not find plugin: {}'.format(req))
entries = dist.get_entry_map().get('pantsbuild.plugin', {})
if 'load_after' in entries:
deps = entries['load_after'].load()()
for dep_name in deps:
dep = Requirement.parse(dep_name)
if dep.key not in loaded:
raise PluginLoadOrderError('Plugin {0} must be loaded after {1}'.format(plugin, dep))
if 'build_file_aliases' in entries:
aliases = entries['build_file_aliases'].load()()
build_configuration.register_aliases(aliases)
if 'register_goals' in entries:
entries['register_goals'].load()()
if 'global_subsystems' in entries:
subsystems = entries['global_subsystems'].load()()
build_configuration.register_optionables(subsystems)
if 'rules' in entries:
rules = entries['rules'].load()()
build_configuration.register_rules(rules)
loaded[dist.as_requirement().key] = dist | python | {
"resource": ""
} |
q27997 | load_build_configuration_from_source | train | def load_build_configuration_from_source(build_configuration, backends=None):
"""Installs pants backend packages to provide BUILD file symbols and cli goals.
:param BuildConfiguration build_configuration: The BuildConfiguration (for adding aliases).
:param backends: An optional list of additional packages to load backends from.
:raises: :class:``pants.base.exceptions.BuildConfigurationError`` if there is a problem loading
the build configuration.
"""
# pants.build_graph and pants.core_task must always be loaded, and before any other backends.
# TODO: Consider replacing the "backend" nomenclature here. pants.build_graph and
# pants.core_tasks aren't really backends.
backend_packages = OrderedSet(['pants.build_graph', 'pants.core_tasks'] + (backends or []))
for backend_package in backend_packages:
load_backend(build_configuration, backend_package) | python | {
"resource": ""
} |
q27998 | load_backend | train | def load_backend(build_configuration, backend_package):
"""Installs the given backend package into the build configuration.
:param build_configuration the :class:``pants.build_graph.build_configuration.BuildConfiguration`` to
install the backend plugin into.
:param string backend_package: the package name containing the backend plugin register module that
provides the plugin entrypoints.
:raises: :class:``pants.base.exceptions.BuildConfigurationError`` if there is a problem loading
the build configuration."""
backend_module = backend_package + '.register'
try:
module = importlib.import_module(backend_module)
except ImportError as e:
traceback.print_exc()
raise BackendConfigurationError('Failed to load the {backend} backend: {error}'
.format(backend=backend_module, error=e))
def invoke_entrypoint(name):
entrypoint = getattr(module, name, lambda: None)
try:
return entrypoint()
except TypeError as e:
traceback.print_exc()
raise BackendConfigurationError(
'Entrypoint {entrypoint} in {backend} must be a zero-arg callable: {error}'
.format(entrypoint=name, backend=backend_module, error=e))
build_file_aliases = invoke_entrypoint('build_file_aliases')
if build_file_aliases:
build_configuration.register_aliases(build_file_aliases)
subsystems = invoke_entrypoint('global_subsystems')
if subsystems:
build_configuration.register_optionables(subsystems)
rules = invoke_entrypoint('rules')
if rules:
build_configuration.register_rules(rules)
invoke_entrypoint('register_goals') | python | {
"resource": ""
} |
q27999 | TargetAdaptor.get_sources | train | def get_sources(self):
"""Returns target's non-deferred sources if exists or the default sources if defined.
:rtype: :class:`GlobsWithConjunction`
NB: once ivy is implemented in the engine, we can fetch sources natively here, and/or
refactor how deferred sources are implemented.
see: https://github.com/pantsbuild/pants/issues/2997
"""
source = getattr(self, 'source', None)
sources = getattr(self, 'sources', None)
if source is not None and sources is not None:
raise Target.IllegalArgument(
self.address.spec,
'Cannot specify both source and sources attribute.'
)
if source is not None:
if not isinstance(source, string_types):
raise Target.IllegalArgument(
self.address.spec,
'source must be a string containing a path relative to the target, but got {} of type {}'
.format(source, type(source))
)
sources = [source]
# N.B. Here we check specifically for `sources is None`, as it's possible for sources
# to be e.g. an explicit empty list (sources=[]).
if sources is None:
if self.default_sources_globs is not None:
globs = Globs(*self.default_sources_globs,
spec_path=self.address.spec_path,
exclude=self.default_sources_exclude_globs or [])
conjunction_globs = GlobsWithConjunction(globs, GlobExpansionConjunction.any_match)
else:
globs = None
conjunction_globs = None
else:
globs = BaseGlobs.from_sources_field(sources, self.address.spec_path)
conjunction_globs = GlobsWithConjunction(globs, GlobExpansionConjunction.all_match)
return conjunction_globs | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.