_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q27300
ChangedRequest.from_options
train
def from_options(cls, options): """Given an `Options` object, produce a `ChangedRequest`.""" return cls(options.changes_since, options.diffspec, options.include_dependees, options.fast)
python
{ "resource": "" }
q27301
ensure_arg
train
def ensure_arg(args, arg, param=None): """Make sure the arg is present in the list of args. If arg is not present, adds the arg and the optional param. If present and param != None, sets the parameter following the arg to param. :param list args: strings representing an argument list. :param string arg: argument to make sure is present in the list. :param string param: parameter to add or update after arg in the list. :return: possibly modified list of args. """ for idx, found_arg in enumerate(args): if found_arg == arg: if param is not None: args[idx + 1] = param return args args.append(arg) if param is not None: args.append(param) return args
python
{ "resource": "" }
q27302
remove_arg
train
def remove_arg(args, arg, has_param=False): """Removes the first instance of the specified arg from the list of args. If the arg is present and has_param is set, also removes the parameter that follows the arg. :param list args: strings representing an argument list. :param staring arg: argument to remove from the list. :param bool has_param: if true, also remove the parameter that follows arg in the list. :return: possibly modified list of args. """ for idx, found_arg in enumerate(args): if found_arg == arg: if has_param: slice_idx = idx + 2 else: slice_idx = idx + 1 args = args[:idx] + args[slice_idx:] break return args
python
{ "resource": "" }
q27303
setup_logging_to_stderr
train
def setup_logging_to_stderr(python_logger, level): """ We setup logging as loose as possible from the Python side, and let Rust do the filtering. """ native = Native() levelno = get_numeric_level(level) handler = create_native_stderr_log_handler(levelno, native, stream=sys.stderr) python_logger.addHandler(handler) # Let the rust side filter levels; try to have the python side send everything to the rust logger. python_logger.setLevel("TRACE")
python
{ "resource": "" }
q27304
setup_logging
train
def setup_logging(level, console_stream=None, log_dir=None, scope=None, log_name=None, native=None): """Configures logging for a given scope, by default the global scope. :param str level: The logging level to enable, must be one of the level names listed here: https://docs.python.org/2/library/logging.html#levels :param file console_stream: The stream to use for default (console) logging. If None (default), this will disable console logging. :param str log_dir: An optional directory to emit logs files in. If unspecified, no disk logging will occur. If supplied, the directory will be created if it does not already exist and all logs will be tee'd to a rolling set of log files in that directory. :param str scope: A logging scope to configure. The scopes are hierarchichal logger names, with The '.' separator providing the scope hierarchy. By default the root logger is configured. :param str log_name: The base name of the log file (defaults to 'pants.log'). :param Native native: An instance of the Native FFI lib, to register rust logging. :returns: The full path to the main log file if file logging is configured or else `None`. :rtype: str """ # TODO(John Sirois): Consider moving to straight python logging. The divide between the # context/work-unit logging and standard python logging doesn't buy us anything. # TODO(John Sirois): Support logging.config.fileConfig so a site can setup fine-grained # logging control and we don't need to be the middleman plumbing an option for each python # standard logging knob. log_filename = None file_handler = None # A custom log handler for sub-debug trace logging. def trace(self, message, *args, **kwargs): if self.isEnabledFor(TRACE): self._log(TRACE, message, args, **kwargs) logging.Logger.trace = trace logger = logging.getLogger(scope) for handler in logger.handlers: logger.removeHandler(handler) if console_stream: native_handler = create_native_stderr_log_handler(level, native, stream=console_stream) logger.addHandler(native_handler) if log_dir: safe_mkdir(log_dir) log_filename = os.path.join(log_dir, log_name or 'pants.log') native_handler = create_native_pantsd_file_log_handler(level, native, log_filename) file_handler = native_handler logger.addHandler(native_handler) logger.setLevel(level) # This routes warnings through our loggers instead of straight to raw stderr. logging.captureWarnings(True) _maybe_configure_extended_logging(logger) return LoggingSetupResult(log_filename, file_handler)
python
{ "resource": "" }
q27305
ProtobufGen._jars_to_directories
train
def _jars_to_directories(self, target): """Extracts and maps jars to directories containing their contents. :returns: a set of filepaths to directories containing the contents of jar. """ files = set() jar_import_products = self.context.products.get_data(JarImportProducts) imports = jar_import_products.imports(target) for coordinate, jar in imports: files.add(self._extract_jar(coordinate, jar)) return files
python
{ "resource": "" }
q27306
PythonInterpreterCache.partition_targets_by_compatibility
train
def partition_targets_by_compatibility(self, targets): """Partition targets by their compatibility constraints. :param targets: a list of `PythonTarget` objects :returns: (tgts_by_compatibilities, filters): a dict that maps compatibility constraints to a list of matching targets, the aggregate set of compatibility constraints imposed by the target set :rtype: (dict(str, list), set) """ tgts_by_compatibilities = defaultdict(list) filters = set() for target in targets: if isinstance(target, PythonTarget): c = self.python_setup.compatibility_or_constraints(target) tgts_by_compatibilities[c].append(target) filters.update(c) return tgts_by_compatibilities, filters
python
{ "resource": "" }
q27307
PythonInterpreterCache._setup_cached
train
def _setup_cached(self, filters=()): """Find all currently-cached interpreters.""" for interpreter_dir in os.listdir(self._cache_dir): pi = self._interpreter_from_relpath(interpreter_dir, filters=filters) if pi: logger.debug('Detected interpreter {}: {}'.format(pi.binary, str(pi.identity))) yield pi
python
{ "resource": "" }
q27308
PythonInterpreterCache._setup_paths
train
def _setup_paths(self, paths, filters=()): """Find interpreters under paths, and cache them.""" for interpreter in self._matching(PythonInterpreter.all(paths), filters=filters): identity_str = str(interpreter.identity) pi = self._interpreter_from_relpath(identity_str, filters=filters) if pi is None: self._setup_interpreter(interpreter, identity_str) pi = self._interpreter_from_relpath(identity_str, filters=filters) if pi: yield pi
python
{ "resource": "" }
q27309
PythonInterpreterCache.setup
train
def setup(self, filters=()): """Sets up a cache of python interpreters. :param filters: A sequence of strings that constrain the interpreter compatibility for this cache, using the Requirement-style format, e.g. ``'CPython>=3', or just ['>=2.7','<3']`` for requirements agnostic to interpreter class. :returns: A list of cached interpreters :rtype: list of :class:`pex.interpreter.PythonInterpreter` """ # We filter the interpreter cache itself (and not just the interpreters we pull from it) # because setting up some python versions (e.g., 3<=python<3.3) crashes, and this gives us # an escape hatch. filters = filters if any(filters) else self.python_setup.interpreter_constraints setup_paths = self.python_setup.interpreter_search_paths logger.debug( 'Initializing Python interpreter cache matching filters `{}` from paths `{}`'.format( ':'.join(filters), ':'.join(setup_paths))) interpreters = [] def unsatisfied_filters(): return [f for f in filters if len(list(self._matching(interpreters, [f]))) == 0] with OwnerPrintingInterProcessFileLock(path=os.path.join(self._cache_dir, '.file_lock')): interpreters.extend(self._setup_cached(filters=filters)) if not interpreters or unsatisfied_filters(): interpreters.extend(self._setup_paths(setup_paths, filters=filters)) for filt in unsatisfied_filters(): logger.debug('No valid interpreters found for {}!'.format(filt)) matches = list(self._matching(interpreters, filters=filters)) if len(matches) == 0: logger.debug('Found no valid interpreters!') logger.debug( 'Initialized Python interpreter cache with {}'.format(', '.join([x.binary for x in matches]))) return matches
python
{ "resource": "" }
q27310
BaseZincCompile.validate_arguments
train
def validate_arguments(log, whitelisted_args, args): """Validate that all arguments match whitelisted regexes.""" valid_patterns = {re.compile(p): v for p, v in whitelisted_args.items()} def validate(idx): arg = args[idx] for pattern, has_argument in valid_patterns.items(): if pattern.match(arg): return 2 if has_argument else 1 log.warn("Zinc argument '{}' is not supported, and is subject to change/removal!".format(arg)) return 1 arg_index = 0 while arg_index < len(args): arg_index += validate(arg_index)
python
{ "resource": "" }
q27311
BaseZincCompile._format_zinc_arguments
train
def _format_zinc_arguments(settings, distribution): """Extracts and formats the zinc arguments given in the jvm platform settings. This is responsible for the symbol substitution which replaces $JAVA_HOME with the path to an appropriate jvm distribution. :param settings: The jvm platform settings from which to extract the arguments. :type settings: :class:`JvmPlatformSettings` """ zinc_args = [ '-C-source', '-C{}'.format(settings.source_level), '-C-target', '-C{}'.format(settings.target_level), ] if settings.args: settings_args = settings.args if any('$JAVA_HOME' in a for a in settings.args): logger.debug('Substituting "$JAVA_HOME" with "{}" in jvm-platform args.' .format(distribution.home)) settings_args = (a.replace('$JAVA_HOME', distribution.home) for a in settings.args) zinc_args.extend(settings_args) return zinc_args
python
{ "resource": "" }
q27312
BaseZincCompile.scalac_classpath_entries
train
def scalac_classpath_entries(self): """Returns classpath entries for the scalac classpath.""" return ScalaPlatform.global_instance().compiler_classpath_entries( self.context.products, self.context._scheduler)
python
{ "resource": "" }
q27313
BaseZincCompile.write_extra_resources
train
def write_extra_resources(self, compile_context): """Override write_extra_resources to produce plugin and annotation processor files.""" target = compile_context.target if isinstance(target, ScalacPlugin): self._write_scalac_plugin_info(compile_context.classes_dir.path, target) elif isinstance(target, JavacPlugin): self._write_javac_plugin_info(compile_context.classes_dir.path, target) elif isinstance(target, AnnotationProcessor) and target.processors: processor_info_file = os.path.join(compile_context.classes_dir.path, _PROCESSOR_INFO_FILE) self._write_processor_info(processor_info_file, target.processors)
python
{ "resource": "" }
q27314
BaseZincCompile._find_scalac_plugins
train
def _find_scalac_plugins(self, scalac_plugins, classpath): """Returns a map from plugin name to list of plugin classpath entries. The first entry in each list is the classpath entry containing the plugin metadata. The rest are the internal transitive deps of the plugin. This allows us to have in-repo plugins with dependencies (unlike javac, scalac doesn't load plugins or their deps from the regular classpath, so we have to provide these entries separately, in the -Xplugin: flag). Note that we don't currently support external plugins with dependencies, as we can't know which external classpath elements are required, and we'd have to put the entire external classpath on each -Xplugin: flag, which seems excessive. Instead, external plugins should be published as "fat jars" (which appears to be the norm, since SBT doesn't support plugins with dependencies anyway). """ # Allow multiple flags and also comma-separated values in a single flag. plugin_names = {p for val in scalac_plugins for p in val.split(',')} if not plugin_names: return {} active_plugins = {} buildroot = get_buildroot() cp_product = self.context.products.get_data('runtime_classpath') for classpath_element in classpath: name = self._maybe_get_plugin_name(classpath_element) if name in plugin_names: plugin_target_closure = self._plugin_targets('scalac').get(name, []) # It's important to use relative paths, as the compiler flags get embedded in the zinc # analysis file, and we port those between systems via the artifact cache. rel_classpath_elements = [ os.path.relpath(cpe, buildroot) for cpe in ClasspathUtil.internal_classpath(plugin_target_closure, cp_product, self._confs)] # If the plugin is external then rel_classpath_elements will be empty, so we take # just the external jar itself. rel_classpath_elements = rel_classpath_elements or [classpath_element] # Some classpath elements may be repeated, so we allow for that here. if active_plugins.get(name, rel_classpath_elements) != rel_classpath_elements: raise TaskError('Plugin {} defined in {} and in {}'.format(name, active_plugins[name], classpath_element)) active_plugins[name] = rel_classpath_elements if len(active_plugins) == len(plugin_names): # We've found all the plugins, so return now to spare us from processing # of the rest of the classpath for no reason. return active_plugins # If we get here we must have unresolved plugins. unresolved_plugins = plugin_names - set(active_plugins.keys()) raise TaskError('Could not find requested plugins: {}'.format(list(unresolved_plugins)))
python
{ "resource": "" }
q27315
BaseZincCompile._maybe_get_plugin_name
train
def _maybe_get_plugin_name(cls, classpath_element): """If classpath_element is a scalac plugin, returns its name. Returns None otherwise. """ def process_info_file(cp_elem, info_file): plugin_info = ElementTree.parse(info_file).getroot() if plugin_info.tag != 'plugin': raise TaskError('File {} in {} is not a valid scalac plugin descriptor'.format( _SCALAC_PLUGIN_INFO_FILE, cp_elem)) return plugin_info.find('name').text if os.path.isdir(classpath_element): try: with open(os.path.join(classpath_element, _SCALAC_PLUGIN_INFO_FILE), 'r') as plugin_info_file: return process_info_file(classpath_element, plugin_info_file) except IOError as e: if e.errno != errno.ENOENT: raise else: with open_zip(classpath_element, 'r') as jarfile: try: with closing(jarfile.open(_SCALAC_PLUGIN_INFO_FILE, 'r')) as plugin_info_file: return process_info_file(classpath_element, plugin_info_file) except KeyError: pass return None
python
{ "resource": "" }
q27316
clean_global_runtime_state
train
def clean_global_runtime_state(reset_subsystem=False): """Resets the global runtime state of a pants runtime for cleaner forking. :param bool reset_subsystem: Whether or not to clean Subsystem global state. """ if reset_subsystem: # Reset subsystem state. Subsystem.reset() # Reset Goals and Tasks. Goal.clear() # Reset global plugin state. BuildConfigInitializer.reset()
python
{ "resource": "" }
q27317
find_paths_breadth_first
train
def find_paths_breadth_first(from_target, to_target, log): """Yields the paths between from_target to to_target if they exist. The paths are returned ordered by length, shortest first. If there are cycles, it checks visited edges to prevent recrossing them.""" log.debug('Looking for all paths from {} to {}'.format(from_target.address.reference(), to_target.address.reference())) if from_target == to_target: yield [from_target] return visited_edges = set() to_walk_paths = deque([[from_target]]) while len(to_walk_paths) > 0: cur_path = to_walk_paths.popleft() target = cur_path[-1] if len(cur_path) > 1: prev_target = cur_path[-2] else: prev_target = None current_edge = (prev_target, target) if current_edge not in visited_edges: for dep in target.dependencies: dep_path = cur_path + [dep] if dep == to_target: yield dep_path else: to_walk_paths.append(dep_path) visited_edges.add(current_edge)
python
{ "resource": "" }
q27318
BuildGraph.apply_injectables
train
def apply_injectables(self, targets): """Given an iterable of `Target` instances, apply their transitive injectables.""" target_types = {type(t) for t in targets} target_subsystem_deps = {s for s in itertools.chain(*(t.subsystems() for t in target_types))} for subsystem in target_subsystem_deps: # TODO: The is_initialized() check is primarily for tests and would be nice to do away with. if issubclass(subsystem, InjectablesMixin) and subsystem.is_initialized(): subsystem.global_instance().injectables(self)
python
{ "resource": "" }
q27319
BuildGraph.reset
train
def reset(self): """Clear out the state of the BuildGraph, in particular Target mappings and dependencies. :API: public """ self._target_by_address = OrderedDict() self._target_dependencies_by_address = defaultdict(OrderedSet) self._target_dependees_by_address = defaultdict(OrderedSet) self._derived_from_by_derivative = {} # Address -> Address. self._derivatives_by_derived_from = defaultdict(list) # Address -> list of Address. self.synthetic_addresses = set()
python
{ "resource": "" }
q27320
BuildGraph.get_target_from_spec
train
def get_target_from_spec(self, spec, relative_to=''): """Converts `spec` into an address and returns the result of `get_target` :API: public """ return self.get_target(Address.parse(spec, relative_to=relative_to))
python
{ "resource": "" }
q27321
BuildGraph.dependencies_of
train
def dependencies_of(self, address): """Returns the dependencies of the Target at `address`. This method asserts that the address given is actually in the BuildGraph. :API: public """ assert address in self._target_by_address, ( 'Cannot retrieve dependencies of {address} because it is not in the BuildGraph.' .format(address=address) ) return self._target_dependencies_by_address[address]
python
{ "resource": "" }
q27322
BuildGraph.dependents_of
train
def dependents_of(self, address): """Returns the addresses of the targets that depend on the target at `address`. This method asserts that the address given is actually in the BuildGraph. :API: public """ assert address in self._target_by_address, ( 'Cannot retrieve dependents of {address} because it is not in the BuildGraph.' .format(address=address) ) return self._target_dependees_by_address[address]
python
{ "resource": "" }
q27323
BuildGraph.get_derived_from
train
def get_derived_from(self, address): """Get the target the specified target was derived from. If a Target was injected programmatically, e.g. from codegen, this allows us to trace its ancestry. If a Target is not derived, default to returning itself. :API: public """ parent_address = self._derived_from_by_derivative.get(address, address) return self.get_target(parent_address)
python
{ "resource": "" }
q27324
BuildGraph.get_direct_derivatives
train
def get_direct_derivatives(self, address): """Get all targets derived directly from the specified target. Note that the specified target itself is not returned. :API: public """ derivative_addrs = self._derivatives_by_derived_from.get(address, []) return [self.get_target(addr) for addr in derivative_addrs]
python
{ "resource": "" }
q27325
BuildGraph.get_all_derivatives
train
def get_all_derivatives(self, address): """Get all targets derived directly or indirectly from the specified target. Note that the specified target itself is not returned. :API: public """ ret = [] direct = self.get_direct_derivatives(address) ret.extend(direct) for t in direct: ret.extend(self.get_all_derivatives(t.address)) return ret
python
{ "resource": "" }
q27326
BuildGraph.inject_target
train
def inject_target(self, target, dependencies=None, derived_from=None, synthetic=False): """Injects a fully realized Target into the BuildGraph. :API: public :param Target target: The Target to inject. :param list<Address> dependencies: The Target addresses that `target` depends on. :param Target derived_from: The Target that `target` was derived from, usually as a result of codegen. :param bool synthetic: Whether to flag this target as synthetic, even if it isn't derived from another target. """ if self.contains_address(target.address): raise ValueError('Attempted to inject synthetic {target} derived from {derived_from}' ' into the BuildGraph with address {address}, but there is already a Target' ' {existing_target} with that address' .format(target=target, derived_from=derived_from, address=target.address, existing_target=self.get_target(target.address))) dependencies = dependencies or frozenset() address = target.address if address in self._target_by_address: raise ValueError('A Target {existing_target} already exists in the BuildGraph at address' ' {address}. Failed to insert {target}.' .format(existing_target=self._target_by_address[address], address=address, target=target)) if derived_from: if not self.contains_address(derived_from.address): raise ValueError('Attempted to inject synthetic {target} derived from {derived_from}' ' into the BuildGraph, but {derived_from} was not in the BuildGraph.' ' Synthetic Targets must be derived from no Target (None) or from a' ' Target already in the BuildGraph.' .format(target=target, derived_from=derived_from)) self._derived_from_by_derivative[target.address] = derived_from.address self._derivatives_by_derived_from[derived_from.address].append(target.address) if derived_from or synthetic: self.synthetic_addresses.add(address) self._target_by_address[address] = target for dependency_address in dependencies: self.inject_dependency(dependent=address, dependency=dependency_address)
python
{ "resource": "" }
q27327
BuildGraph.inject_dependency
train
def inject_dependency(self, dependent, dependency): """Injects a dependency from `dependent` onto `dependency`. It is an error to inject a dependency if the dependent doesn't already exist, but the reverse is not an error. :API: public :param Address dependent: The (already injected) address of a Target to which `dependency` is being added. :param Address dependency: The dependency to be injected. """ if dependent not in self._target_by_address: raise ValueError('Cannot inject dependency from {dependent} on {dependency} because the' ' dependent is not in the BuildGraph.' .format(dependent=dependent, dependency=dependency)) # TODO(pl): Unfortunately this is an unhelpful time to error due to a cycle. Instead, we warn # and allow the cycle to appear. It is the caller's responsibility to call sort_targets on the # entire graph to generate a friendlier CycleException that actually prints the cycle. # Alternatively, we could call sort_targets after every inject_dependency/inject_target, but # that could have nasty performance implications. Alternative 2 would be to have an internal # data structure of the topologically sorted graph which would have acceptable amortized # performance for inserting new nodes, and also cycle detection on each insert. if dependency not in self._target_by_address: logger.warning('Injecting dependency from {dependent} on {dependency}, but the dependency' ' is not in the BuildGraph. This probably indicates a dependency cycle, but' ' it is not an error until sort_targets is called on a subgraph containing' ' the cycle.' .format(dependent=dependent, dependency=dependency)) if dependency in self.dependencies_of(dependent): logger.debug('{dependent} already depends on {dependency}' .format(dependent=dependent, dependency=dependency)) else: self._target_dependencies_by_address[dependent].add(dependency) self._target_dependees_by_address[dependency].add(dependent)
python
{ "resource": "" }
q27328
BuildGraph._walk_factory
train
def _walk_factory(self, dep_predicate): """Construct the right context object for managing state during a transitive walk.""" walk = None if dep_predicate: walk = self.DepPredicateWalk(dep_predicate) else: walk = self.NoDepPredicateWalk() return walk
python
{ "resource": "" }
q27329
BuildGraph.walk_transitive_dependency_graph
train
def walk_transitive_dependency_graph(self, addresses, work, predicate=None, postorder=False, dep_predicate=None, prelude=None, epilogue=None): """Given a work function, walks the transitive dependency closure of `addresses` using DFS. :API: public :param list<Address> addresses: The closure of `addresses` will be walked. :param function work: The function that will be called on every target in the closure using the specified traversal order. :param bool postorder: When ``True``, the traversal order is postorder (children before parents), else it is preorder (parents before children). :param function predicate: If this parameter is not given, no Targets will be filtered out of the closure. If it is given, any Target which fails the predicate will not be walked, nor will its dependencies. Thus predicate effectively trims out any subgraph that would only be reachable through Targets that fail the predicate. :param function dep_predicate: Takes two parameters, the current target and the dependency of the current target. If this parameter is not given, no dependencies will be filtered when traversing the closure. If it is given, when the predicate fails, the edge to the dependency will not be expanded. :param function prelude: Function to run before any dependency expansion. It takes the currently explored target as an argument. If ``postorder`` is ``False``, it will run after the current node is visited. It is not affected by ``dep_predicate``. It is not run if ``predicate`` does not succeed. :param function epilogue: Function to run after children nodes are visited. It takes the currently explored target as an argument. If ``postorder`` is ``True``, it runs before visiting the current node. It is not affected by ``dep_predicate``. It is not run if ``predicate`` is not passed. """ walk = self._walk_factory(dep_predicate) def _walk_rec(addr, level=0): # If we've followed an edge to this address, stop recursing. if not walk.expand_once(addr, level): return target = self._target_by_address[addr] if predicate and not predicate(target): return if not postorder and walk.do_work_once(addr): work(target) if prelude: prelude(target) for dep_address in self._target_dependencies_by_address[addr]: if walk.expanded_or_worked(dep_address): continue if walk.dep_predicate(target, self._target_by_address[dep_address], level): _walk_rec(dep_address, level + 1) if epilogue: epilogue(target) if postorder and walk.do_work_once(addr): work(target) for address in addresses: _walk_rec(address)
python
{ "resource": "" }
q27330
BuildGraph.transitive_dependees_of_addresses
train
def transitive_dependees_of_addresses(self, addresses, predicate=None, postorder=False): """Returns all transitive dependees of `addresses`. Note that this uses `walk_transitive_dependee_graph` and the predicate is passed through, hence it trims graphs rather than just filtering out Targets that do not match the predicate. See `walk_transitive_dependee_graph for more detail on `predicate`. :API: public :param list<Address> addresses: The root addresses to transitively close over. :param function predicate: The predicate passed through to `walk_transitive_dependee_graph`. """ ret = OrderedSet() self.walk_transitive_dependee_graph(addresses, ret.add, predicate=predicate, postorder=postorder) return ret
python
{ "resource": "" }
q27331
BuildGraph.transitive_subgraph_of_addresses
train
def transitive_subgraph_of_addresses(self, addresses, *vargs, **kwargs): """Returns all transitive dependencies of `addresses`. Note that this uses `walk_transitive_dependencies_graph` and the predicate is passed through, hence it trims graphs rather than just filtering out Targets that do not match the predicate. See `walk_transitive_dependency_graph for more detail on `predicate`. :API: public :param list<Address> addresses: The root addresses to transitively close over. :param function predicate: The predicate passed through to `walk_transitive_dependencies_graph`. :param bool postorder: When ``True``, the traversal order is postorder (children before parents), else it is preorder (parents before children). :param function predicate: If this parameter is not given, no Targets will be filtered out of the closure. If it is given, any Target which fails the predicate will not be walked, nor will its dependencies. Thus predicate effectively trims out any subgraph that would only be reachable through Targets that fail the predicate. :param function dep_predicate: Takes two parameters, the current target and the dependency of the current target. If this parameter is not given, no dependencies will be filtered when traversing the closure. If it is given, when the predicate fails, the edge to the dependency will not be expanded. """ ret = OrderedSet() self.walk_transitive_dependency_graph(addresses, ret.add, *vargs, **kwargs) return ret
python
{ "resource": "" }
q27332
BuildGraph.transitive_subgraph_of_addresses_bfs
train
def transitive_subgraph_of_addresses_bfs(self, addresses, predicate=None, dep_predicate=None): """Returns the transitive dependency closure of `addresses` using BFS. :API: public :param list<Address> addresses: The closure of `addresses` will be walked. :param function predicate: If this parameter is not given, no Targets will be filtered out of the closure. If it is given, any Target which fails the predicate will not be walked, nor will its dependencies. Thus predicate effectively trims out any subgraph that would only be reachable through Targets that fail the predicate. :param function dep_predicate: Takes two parameters, the current target and the dependency of the current target. If this parameter is not given, no dependencies will be filtered when traversing the closure. If it is given, when the predicate fails, the edge to the dependency will not be expanded. """ walk = self._walk_factory(dep_predicate) ordered_closure = OrderedSet() to_walk = deque((0, addr) for addr in addresses) while len(to_walk) > 0: level, address = to_walk.popleft() if not walk.expand_once(address, level): continue target = self._target_by_address[address] if predicate and not predicate(target): continue if walk.do_work_once(address): ordered_closure.add(target) for dep_address in self._target_dependencies_by_address[address]: if walk.expanded_or_worked(dep_address): continue if walk.dep_predicate(target, self._target_by_address[dep_address], level): to_walk.append((level + 1, dep_address)) return ordered_closure
python
{ "resource": "" }
q27333
IndexableJavaTargets.get
train
def get(self, context): """Return the indexable targets in the given context. Computes them lazily from the given context. They are then fixed for the duration of the run, even if this method is called again with a different context. """ if self.get_options().recursive: requested_targets = context.targets(exclude_scopes=Scope(self.get_options().exclude_scopes)) else: requested_targets = list(context.target_roots) expanded_targets = list(requested_targets) # We want to act on targets derived from the specified, e.g., if acting on a binary # jar_library we actually want to act on the derived java_library wrapping the decompiled # sources. for t in requested_targets: expanded_targets.extend(context.build_graph.get_all_derivatives(t.address)) return tuple(sorted( [t for t in expanded_targets if isinstance(t, JvmTarget) and t.has_sources('.java')], key=lambda t: t.address.spec))
python
{ "resource": "" }
q27334
OptionsBootstrapper.get_config_file_paths
train
def get_config_file_paths(env, args): """Get the location of the config files. The locations are specified by the --pants-config-files option. However we need to load the config in order to process the options. This method special-cases --pants-config-files in order to solve this chicken-and-egg problem. Note that, obviously, it's not possible to set the location of config files in a config file. Doing so will have no effect. """ # This exactly mirrors the logic applied in Option to all regular options. Note that we'll # also parse --pants-config as a regular option later, but there's no harm in that. In fact, # it's preferable, so that any code that happens to want to know where we read config from # can inspect the option. flag = '--pants-config-files=' evars = ['PANTS_GLOBAL_PANTS_CONFIG_FILES', 'PANTS_PANTS_CONFIG_FILES', 'PANTS_CONFIG_FILES'] path_list_values = [] if os.path.isfile(get_default_pants_config_file()): path_list_values.append(ListValueComponent.create(get_default_pants_config_file())) for var in evars: if var in env: path_list_values.append(ListValueComponent.create(env[var])) break for arg in args: # Technically this is very slightly incorrect, as we don't check scope. But it's # very unlikely that any task or subsystem will have an option named --pants-config-files. # TODO: Enforce a ban on options with a --pants- prefix outside our global options? if arg.startswith(flag): path_list_values.append(ListValueComponent.create(arg[len(flag):])) return ListValueComponent.merge(path_list_values).val
python
{ "resource": "" }
q27335
OptionsBootstrapper.create
train
def create(cls, env=None, args=None): """Parses the minimum amount of configuration necessary to create an OptionsBootstrapper. :param env: An environment dictionary, or None to use `os.environ`. :param args: An args array, or None to use `sys.argv`. """ env = {k: v for k, v in (os.environ if env is None else env).items() if k.startswith('PANTS_')} args = tuple(sys.argv if args is None else args) flags = set() short_flags = set() def filecontent_for(path): return FileContent(ensure_text(path), read_file(path, binary_mode=True)) def capture_the_flags(*args, **kwargs): for arg in args: flags.add(arg) if len(arg) == 2: short_flags.add(arg) elif kwargs.get('type') == bool: flags.add('--no-{}'.format(arg[2:])) GlobalOptionsRegistrar.register_bootstrap_options(capture_the_flags) def is_bootstrap_option(arg): components = arg.split('=', 1) if components[0] in flags: return True for flag in short_flags: if arg.startswith(flag): return True return False # Take just the bootstrap args, so we don't choke on other global-scope args on the cmd line. # Stop before '--' since args after that are pass-through and may have duplicate names to our # bootstrap options. bargs = tuple(filter(is_bootstrap_option, itertools.takewhile(lambda arg: arg != '--', args))) config_file_paths = cls.get_config_file_paths(env=env, args=args) config_files_products = [filecontent_for(p) for p in config_file_paths] pre_bootstrap_config = Config.load_file_contents(config_files_products) initial_bootstrap_options = cls.parse_bootstrap_options(env, bargs, pre_bootstrap_config) bootstrap_option_values = initial_bootstrap_options.for_global_scope() # Now re-read the config, post-bootstrapping. Note the order: First whatever we bootstrapped # from (typically pants.ini), then config override, then rcfiles. full_configpaths = pre_bootstrap_config.sources() if bootstrap_option_values.pantsrc: rcfiles = [os.path.expanduser(str(rcfile)) for rcfile in bootstrap_option_values.pantsrc_files] existing_rcfiles = list(filter(os.path.exists, rcfiles)) full_configpaths.extend(existing_rcfiles) full_config_files_products = [filecontent_for(p) for p in full_configpaths] post_bootstrap_config = Config.load_file_contents( full_config_files_products, seed_values=bootstrap_option_values ) env_tuples = tuple(sorted(iteritems(env), key=lambda x: x[0])) return cls(env_tuples=env_tuples, bootstrap_args=bargs, args=args, config=post_bootstrap_config)
python
{ "resource": "" }
q27336
OptionsBootstrapper.bootstrap_options
train
def bootstrap_options(self): """The post-bootstrap options, computed from the env, args, and fully discovered Config. Re-computing options after Config has been fully expanded allows us to pick up bootstrap values (such as backends) from a config override file, for example. Because this can be computed from the in-memory representation of these values, it is not part of the object's identity. """ return self.parse_bootstrap_options(self.env, self.bootstrap_args, self.config)
python
{ "resource": "" }
q27337
OptionsBootstrapper.verify_configs_against_options
train
def verify_configs_against_options(self, options): """Verify all loaded configs have correct scopes and options. :param options: Fully bootstrapped valid options. :return: None. """ error_log = [] for config in self.config.configs(): for section in config.sections(): if section == GLOBAL_SCOPE_CONFIG_SECTION: scope = GLOBAL_SCOPE else: scope = section try: valid_options_under_scope = set(options.for_scope(scope)) # Only catch ConfigValidationError. Other exceptions will be raised directly. except Config.ConfigValidationError: error_log.append("Invalid scope [{}] in {}".format(section, config.configpath)) else: # All the options specified under [`section`] in `config` excluding bootstrap defaults. all_options_under_scope = (set(config.configparser.options(section)) - set(config.configparser.defaults())) for option in all_options_under_scope: if option not in valid_options_under_scope: error_log.append("Invalid option '{}' under [{}] in {}".format(option, section, config.configpath)) if error_log: for error in error_log: logger.error(error) raise Config.ConfigValidationError("Invalid config entries detected. " "See log for details on which entries to update or remove.\n" "(Specify --no-verify-config to disable this check.)")
python
{ "resource": "" }
q27338
CmdLineSpecParser.parse_spec
train
def parse_spec(self, spec): """Parse the given spec into a `specs.Spec` object. :param spec: a single spec string. :return: a single specs.Specs object. :raises: CmdLineSpecParser.BadSpecError if the address selector could not be parsed. """ if spec.endswith('::'): spec_path = spec[:-len('::')] return DescendantAddresses(self._normalize_spec_path(spec_path)) elif spec.endswith(':'): spec_path = spec[:-len(':')] return SiblingAddresses(self._normalize_spec_path(spec_path)) else: spec_parts = spec.rsplit(':', 1) spec_path = self._normalize_spec_path(spec_parts[0]) name = spec_parts[1] if len(spec_parts) > 1 else os.path.basename(spec_path) return SingleAddress(spec_path, name)
python
{ "resource": "" }
q27339
MutexTaskMixin._require_homogeneous_roots
train
def _require_homogeneous_roots(self, accept_predicate, reject_predicate): """Ensures that there is no ambiguity in the context according to the given predicates. If any targets in the context satisfy the accept_predicate, and no targets satisfy the reject_predicate, returns the accepted targets. If no targets satisfy the accept_predicate, returns None. Otherwise throws TaskError. """ if len(self.context.target_roots) == 0: raise self.NoActivationsError('No target specified.') def resolve(targets): # Recursively resolve target aliases. for t in targets: if type(t) == Target: for r in resolve(t.dependencies): yield r else: yield t expanded_roots = list(resolve(self.context.target_roots)) accepted = list(filter(accept_predicate, expanded_roots)) rejected = list(filter(reject_predicate, expanded_roots)) if len(accepted) == 0: # no targets were accepted, regardless of rejects return None elif len(rejected) == 0: # we have at least one accepted target, and no rejected targets return accepted else: # both accepted and rejected targets # TODO: once https://github.com/pantsbuild/pants/issues/425 lands, we should add # language-specific flags that would resolve the ambiguity here def render_target(target): return '{} (a {})'.format(target.address.reference(), target.type_alias) raise self.IncompatibleActivationsError('Mutually incompatible targets specified: {} vs {} ' '(and {} others)' .format(render_target(accepted[0]), render_target(rejected[0]), len(accepted) + len(rejected) - 2))
python
{ "resource": "" }
q27340
check_header
train
def check_header(filename, is_newly_created=False): """Raises `HeaderCheckFailure` if the header doesn't match.""" try: with open(filename, 'r') as pyfile: buf = "" for lineno in range(1,7): line = pyfile.readline() # Skip shebang line if lineno == 1 and line.startswith('#!'): line = pyfile.readline() # Check if the copyright year can be parsed as within the current century, or the current # year if it is a new file. if line.startswith("# Copyright"): year = line[12:16] if is_newly_created: if not year == _current_year: raise HeaderCheckFailure('{}: copyright year must be {} (was {})' .format(filename, _current_year, year)) else: if not _current_century_regex.match(year): raise HeaderCheckFailure( "{}: copyright year must match '{}' (was {}): current year is {}" .format(filename, _current_century_regex.pattern, year, _current_year)) line = "# Copyright YYYY" + line[16:] buf += line if buf != EXPECTED_HEADER: raise HeaderCheckFailure('{}: failed to parse header at all' .format(filename)) except IOError as e: raise HeaderCheckFailure('{}: error while reading input ({})' .format(filename, str(e)))
python
{ "resource": "" }
q27341
check_dir
train
def check_dir(directory, newly_created_files): """Returns list of files that fail the check.""" header_parse_failures = [] for root, dirs, files in os.walk(directory): for f in files: if f.endswith('.py') and os.path.basename(f) != '__init__.py': filename = os.path.join(root, f) try: check_header(filename, filename in newly_created_files) except HeaderCheckFailure as e: header_parse_failures.append(e.message) return header_parse_failures
python
{ "resource": "" }
q27342
NodeTask.get_package_manager
train
def get_package_manager(self, target=None): """Returns package manager for target argument or global config.""" package_manager = None if target: target_package_manager_field = target.payload.get_field('package_manager') if target_package_manager_field: package_manager = target_package_manager_field.value return self.node_distribution.get_package_manager(package_manager=package_manager)
python
{ "resource": "" }
q27343
NodeTask.execute_node
train
def execute_node(self, args, workunit_name, workunit_labels=None, node_paths=None): """Executes node passing the given args. :param list args: The command line args to pass to `node`. :param string workunit_name: A name for the execution's work unit; defaults to 'node'. :param list workunit_labels: Any extra :class:`pants.base.workunit.WorkUnitLabel`s to apply. :param list node_paths: A list of node module paths to be included. :returns: A tuple of (returncode, command). :rtype: A tuple of (int, :class:`pants.contrib.node.subsystems.node_distribution.NodeDistribution.Command`) """ node_command = self.node_distribution.node_command(args=args, node_paths=node_paths) return self._execute_command(node_command, workunit_name=workunit_name, workunit_labels=workunit_labels)
python
{ "resource": "" }
q27344
NodeTask.add_package
train
def add_package( self, target=None, package_manager=None, package=None, type_option=None, version_option=None, node_paths=None, workunit_name=None, workunit_labels=None): """Add an additional package using requested package_manager.""" package_manager = package_manager or self.get_package_manager(target=target) command = package_manager.add_package( package, type_option=type_option, version_option=version_option, node_paths=node_paths, ) return self._execute_command( command, workunit_name=workunit_name, workunit_labels=workunit_labels)
python
{ "resource": "" }
q27345
NodeTask.install_module
train
def install_module( self, target=None, package_manager=None, install_optional=False, production_only=False, force=False, node_paths=None, frozen_lockfile=None, workunit_name=None, workunit_labels=None): """Installs node module using requested package_manager.""" package_manager = package_manager or self.get_package_manager(target=target) command = package_manager.install_module( install_optional=install_optional, force=force, production_only=production_only, node_paths=node_paths, frozen_lockfile=frozen_lockfile ) return self._execute_command( command, workunit_name=workunit_name, workunit_labels=workunit_labels)
python
{ "resource": "" }
q27346
NodeTask._execute_command
train
def _execute_command(self, command, workunit_name=None, workunit_labels=None): """Executes a node or npm command via self._run_node_distribution_command. :param NodeDistribution.Command command: The command to run. :param string workunit_name: A name for the execution's work unit; default command.executable. :param list workunit_labels: Any extra :class:`pants.base.workunit.WorkUnitLabel`s to apply. :returns: A tuple of (returncode, command). :rtype: A tuple of (int, :class:`pants.contrib.node.subsystems.node_distribution.NodeDistribution.Command`) """ workunit_name = workunit_name or command.executable workunit_labels = {WorkUnitLabel.TOOL} | set(workunit_labels or ()) with self.context.new_workunit(name=workunit_name, labels=workunit_labels, cmd=str(command)) as workunit: returncode = self._run_node_distribution_command(command, workunit) workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE) return returncode, command
python
{ "resource": "" }
q27347
NodeTask._run_node_distribution_command
train
def _run_node_distribution_command(self, command, workunit): """Runs a NodeDistribution.Command for _execute_command and returns its return code. Passes any additional kwargs to command.run (which passes them, modified, to subprocess.Popen). Override this in a Task subclass to do something more complicated than just calling command.run() and returning the result of wait(). :param NodeDistribution.Command command: The command to run. :param WorkUnit workunit: The WorkUnit the command is running under. :returns: returncode :rtype: int """ process = command.run(stdout=workunit.output('stdout'), stderr=workunit.output('stderr')) return process.wait()
python
{ "resource": "" }
q27348
load_config
train
def load_config(json_path): """Load config info from a .json file and return it.""" with open(json_path, 'r') as json_file: config = json.loads(json_file.read()) # sanity-test the config: assert(config['tree'][0]['page'] == 'index') return config
python
{ "resource": "" }
q27349
load_soups
train
def load_soups(config): """Generate BeautifulSoup AST for each page listed in config.""" soups = {} for page, path in config['sources'].items(): with open(path, 'r') as orig_file: soups[page] = beautiful_soup(orig_file.read(), features='html.parser') return soups
python
{ "resource": "" }
q27350
fixup_internal_links
train
def fixup_internal_links(config, soups): """Find href="..." links that link to pages in our docset; fix them up. We don't preserve relative paths between files as we copy-transform them from source to dest. So adjust the paths to work with new locations. """ # Pages can come from different dirs; they can go to different dirs. # Thus, there's some relative-path-computing here. reverse_directory = {} for d, s in config['sources'].items(): reverse_directory[s] = d for name, soup in soups.items(): old_src_dir = os.path.dirname(config['sources'][name]) for tag in soup.find_all(True): if not 'href' in tag.attrs: continue old_rel_path = tag['href'].split('#')[0] old_dst = os.path.normpath(os.path.join(old_src_dir, old_rel_path)) if not old_dst in reverse_directory: continue new_dst = reverse_directory[old_dst] + '.html' new_rel_path = rel_href(name, new_dst) # string replace instead of assign to not loose anchor in foo.html#anchor tag['href'] = tag['href'].replace(old_rel_path, new_rel_path, 1)
python
{ "resource": "" }
q27351
transform_soups
train
def transform_soups(config, soups, precomputed): """Mutate our soups to be better when we write them out later.""" fixup_internal_links(config, soups) ensure_headings_linkable(soups) # Do this after ensure_headings_linkable so that there will be links. generate_page_tocs(soups, precomputed) link_pantsrefs(soups, precomputed)
python
{ "resource": "" }
q27352
get_title
train
def get_title(soup): """Given a soup, pick out a title""" if soup.title: return soup.title.string if soup.h1: return soup.h1.string return ''
python
{ "resource": "" }
q27353
hdepth
train
def hdepth(tag): """Compute an h tag's "outline depth". E.g., h1 at top level is 1, h1 in a section is 2, h2 at top level is 2. """ if not _heading_re.search(tag.name): raise TaskError("Can't compute heading depth of non-heading {}".format(tag)) depth = int(tag.name[1], 10) # get the 2 from 'h2' cursor = tag while cursor: if cursor.name == 'section': depth += 1 cursor = cursor.parent return depth
python
{ "resource": "" }
q27354
CoursierMixin._compute_jars_to_resolve_and_pin
train
def _compute_jars_to_resolve_and_pin(raw_jars, artifact_set, manager): """ This method provides settled lists of jar dependencies and coordinates based on conflict management. :param raw_jars: a collection of `JarDependencies` :param artifact_set: PinnedJarArtifactSet :param manager: JarDependencyManagement :return: (list of settled `JarDependency`, set of pinned `M2Coordinate`) """ if artifact_set is None: artifact_set = PinnedJarArtifactSet() untouched_pinned_artifact = {M2Coordinate.create(x) for x in artifact_set} jar_list = list(raw_jars) for i, dep in enumerate(jar_list): direct_coord = M2Coordinate.create(dep) # Portion to manage pinned jars in case of conflict if direct_coord in artifact_set: managed_coord = artifact_set[direct_coord] untouched_pinned_artifact.remove(managed_coord) if direct_coord.rev != managed_coord.rev: # It may be necessary to actually change the version number of the jar we want to resolve # here, because overrides do not apply directly (they are exclusively transitive). This is # actually a good thing, because it gives us more control over what happens. coord = manager.resolve_version_conflict(managed_coord, direct_coord, force=dep.force) # Once a version is settled, we force it anyway jar_list[i] = dep.copy(rev=coord.rev, force=True) return jar_list, untouched_pinned_artifact
python
{ "resource": "" }
q27355
CoursierMixin.resolve
train
def resolve(self, targets, compile_classpath, sources, javadoc, executor): """ This is the core function for coursier resolve. Validation strategy: 1. All targets are going through the `invalidated` to get fingerprinted in the target level. No cache is fetched at this stage because it is disabled. 2. Once each target is fingerprinted, we combine them into a `VersionedTargetSet` where they are fingerprinted together, because each run of 3rdparty resolve is context sensitive. Artifacts are stored in `VersionedTargetSet`'s results_dir, the contents are the aggregation of each coursier run happened within that context. Caching: (TODO): https://github.com/pantsbuild/pants/issues/5187 Currently it is disabled due to absolute paths in the coursier results. :param targets: a collection of targets to do 3rdparty resolve against :param compile_classpath: classpath product that holds the resolution result. IMPORTANT: this parameter will be changed. :param sources: if True, fetch sources for 3rdparty :param javadoc: if True, fetch javadoc for 3rdparty :param executor: An instance of `pants.java.executor.Executor`. If None, a subprocess executor will be assigned. :return: n/a """ manager = JarDependencyManagement.global_instance() jar_targets = manager.targets_by_artifact_set(targets) executor = executor or SubprocessExecutor(DistributionLocator.cached()) if not isinstance(executor, Executor): raise ValueError('The executor argument must be an Executor instance, given {} of type {}'.format( executor, type(executor))) for artifact_set, target_subset in jar_targets.items(): # TODO(wisechengyi): this is the only place we are using IvyUtil method, which isn't specific to ivy really. raw_jar_deps, global_excludes = IvyUtils.calculate_classpath(target_subset) # ['sources'] * False = [], ['sources'] * True = ['sources'] confs_for_fingerprint = ['sources'] * sources + ['javadoc'] * javadoc fp_strategy = CoursierResolveFingerprintStrategy(confs_for_fingerprint) compile_classpath.add_excludes_for_targets(target_subset) with self.invalidated(target_subset, invalidate_dependents=False, silent=False, fingerprint_strategy=fp_strategy) as invalidation_check: if not invalidation_check.all_vts: continue resolve_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts) vt_set_results_dir = self._prepare_vts_results_dir(resolve_vts) pants_jar_base_dir = self._prepare_workdir() coursier_cache_dir = CoursierSubsystem.global_instance().get_options().cache_dir # If a report is requested, do not proceed with loading validated result. if not self.get_options().report: # Check each individual target without context first # If the individuals are valid, check them as a VersionedTargetSet if not invalidation_check.invalid_vts and resolve_vts.valid: # Load up from the results dir success = self._load_from_results_dir(compile_classpath, vt_set_results_dir, coursier_cache_dir, invalidation_check, pants_jar_base_dir) if success: return jars_to_resolve, pinned_coords = self._compute_jars_to_resolve_and_pin(raw_jar_deps, artifact_set, manager) results = self._get_result_from_coursier(jars_to_resolve, global_excludes, pinned_coords, coursier_cache_dir, sources, javadoc, executor) for conf, result_list in results.items(): for result in result_list: self._load_json_result(conf, compile_classpath, coursier_cache_dir, invalidation_check, pants_jar_base_dir, result, self._override_classifiers_for_conf(conf)) self._populate_results_dir(vt_set_results_dir, results) resolve_vts.update()
python
{ "resource": "" }
q27356
CoursierMixin._prepare_vts_results_dir
train
def _prepare_vts_results_dir(self, vts): """ Given a `VergetTargetSet`, prepare its results dir. """ vt_set_results_dir = os.path.join(self.versioned_workdir, 'results', vts.cache_key.hash) safe_mkdir(vt_set_results_dir) return vt_set_results_dir
python
{ "resource": "" }
q27357
CoursierMixin._prepare_workdir
train
def _prepare_workdir(self): """Prepare the location in our task workdir to store all the hardlinks to coursier cache dir.""" pants_jar_base_dir = os.path.join(self.versioned_workdir, 'cache') safe_mkdir(pants_jar_base_dir) return pants_jar_base_dir
python
{ "resource": "" }
q27358
CoursierMixin._get_result_from_coursier
train
def _get_result_from_coursier(self, jars_to_resolve, global_excludes, pinned_coords, coursier_cache_path, sources, javadoc, executor): """ Calling coursier and return the result per invocation. If coursier was called once for classifier '' and once for classifier 'tests', then the return value would be: {'default': [<first coursier output>, <second coursier output>]} :param jars_to_resolve: List of `JarDependency`s to resolve :param global_excludes: List of `M2Coordinate`s to exclude globally :param pinned_coords: List of `M2Coordinate`s that need to be pinned. :param coursier_cache_path: path to where coursier cache is stored. :param executor: An instance of `pants.java.executor.Executor` :return: The aggregation of results by conf from coursier. Each coursier call could return the following: { "conflict_resolution": { "org:name:version" (requested): "org:name:version" (reconciled) }, "dependencies": [ { "coord": "orgA:nameA:versionA", "file": <path>, "dependencies": [ // coodinates for its transitive dependencies <orgX:nameX:versionX>, <orgY:nameY:versionY>, ] }, { "coord": "orgB:nameB:jar:classifier:versionB", "file": <path>, "dependencies": [ // coodinates for its transitive dependencies <orgX:nameX:versionX>, <orgZ:nameZ:versionZ>, ] }, ... // more about orgX:nameX:versionX, orgY:nameY:versionY, orgZ:nameZ:versionZ ] } Hence the aggregation of the results will be in the following format, for example when default classifier and sources are fetched: { 'default': [<result from coursier call with default conf with classifier X>, <result from coursier call with default conf with classifier Y>], 'src_doc': [<result from coursier call with --sources and/or --javadoc>], } """ # Prepare coursier args coursier_subsystem_instance = CoursierSubsystem.global_instance() coursier_jar = coursier_subsystem_instance.bootstrap_coursier(self.context.new_workunit) repos = coursier_subsystem_instance.get_options().repos # make [repoX, repoY] -> ['-r', repoX, '-r', repoY] repo_args = list(itertools.chain(*list(zip(['-r'] * len(repos), repos)))) artifact_types_arg = ['-A', ','.join(coursier_subsystem_instance.get_options().artifact_types)] advanced_options = coursier_subsystem_instance.get_options().fetch_options common_args = ['fetch', # Print the resolution tree '-t', '--cache', coursier_cache_path ] + repo_args + artifact_types_arg + advanced_options coursier_work_temp_dir = os.path.join(self.versioned_workdir, 'tmp') safe_mkdir(coursier_work_temp_dir) results_by_conf = self._get_default_conf_results(common_args, coursier_jar, global_excludes, jars_to_resolve, coursier_work_temp_dir, pinned_coords, executor) if sources or javadoc: non_default_conf_results = self._get_non_default_conf_results(common_args, coursier_jar, global_excludes, jars_to_resolve, coursier_work_temp_dir, pinned_coords, sources, javadoc, executor) results_by_conf.update(non_default_conf_results) return results_by_conf
python
{ "resource": "" }
q27359
CoursierMixin._load_json_result
train
def _load_json_result(self, conf, compile_classpath, coursier_cache_path, invalidation_check, pants_jar_path_base, result, override_classifiers=None): """ Given a coursier run result, load it into compile_classpath by target. :param compile_classpath: `ClasspathProducts` that will be modified :param coursier_cache_path: cache location that is managed by coursier :param invalidation_check: InvalidationCheck :param pants_jar_path_base: location under pants workdir that contains all the hardlinks to coursier cache :param result: result dict converted from the json produced by one coursier run :return: n/a """ # Parse the coursier result flattened_resolution = self._extract_dependencies_by_root(result) coord_to_resolved_jars = self._map_coord_to_resolved_jars(result, coursier_cache_path, pants_jar_path_base) # Construct a map from org:name to the reconciled org:name:version coordinate # This is used when there is won't be a conflict_resolution entry because the conflict # was resolved in pants. org_name_to_org_name_rev = {} for coord in coord_to_resolved_jars.keys(): org_name_to_org_name_rev['{}:{}'.format(coord.org, coord.name)] = coord jars_per_target = [] for vt in invalidation_check.all_vts: t = vt.target jars_to_digest = [] if isinstance(t, JarLibrary): def get_transitive_resolved_jars(my_coord, resolved_jars): transitive_jar_path_for_coord = [] coord_str = str(my_coord) if coord_str in flattened_resolution and my_coord in resolved_jars: transitive_jar_path_for_coord.append(resolved_jars[my_coord]) for c in flattened_resolution[coord_str]: j = resolved_jars.get(self.to_m2_coord(c)) if j: transitive_jar_path_for_coord.append(j) return transitive_jar_path_for_coord for jar in t.jar_dependencies: # if there are override classifiers, then force use of those. coord_candidates = [] if override_classifiers: coord_candidates = [jar.coordinate.copy(classifier=c) for c in override_classifiers] else: coord_candidates = [jar.coordinate] # if conflict resolution entries, then update versions to the resolved ones. if jar.coordinate.simple_coord in result['conflict_resolution']: parsed_conflict = self.to_m2_coord( result['conflict_resolution'][jar.coordinate.simple_coord]) coord_candidates = [c.copy(rev=parsed_conflict.rev) for c in coord_candidates] elif '{}:{}'.format(jar.coordinate.org, jar.coordinate.name) in org_name_to_org_name_rev: parsed_conflict = org_name_to_org_name_rev['{}:{}'.format(jar.coordinate.org, jar.coordinate.name)] coord_candidates = [c.copy(rev=parsed_conflict.rev) for c in coord_candidates] for coord in coord_candidates: transitive_resolved_jars = get_transitive_resolved_jars(coord, coord_to_resolved_jars) if transitive_resolved_jars: for jar in transitive_resolved_jars: jars_to_digest.append(jar) jars_per_target.append((t, jars_to_digest)) for target, jars_to_add in self.add_directory_digests_for_jars(jars_per_target): compile_classpath.add_jars_for_targets([target], conf, jars_to_add)
python
{ "resource": "" }
q27360
CoursierMixin._load_from_results_dir
train
def _load_from_results_dir(self, compile_classpath, vts_results_dir, coursier_cache_path, invalidation_check, pants_jar_path_base): """ Given vts_results_dir, load the results which can be from multiple runs of coursier into compile_classpath. :return: True if success; False if any of the classpath is not valid anymore. """ result_file_path = os.path.join(vts_results_dir, self.RESULT_FILENAME) if not os.path.exists(result_file_path): return with open(result_file_path, 'r') as f: results = json.load(f) for conf, result_list in results.items(): for result in result_list: try: self._load_json_result(conf, compile_classpath, coursier_cache_path, invalidation_check, pants_jar_path_base, result, self._override_classifiers_for_conf(conf)) except CoursierResultNotFound: return False return True
python
{ "resource": "" }
q27361
CoursierMixin._extract_dependencies_by_root
train
def _extract_dependencies_by_root(cls, result): """ Only extracts the transitive dependencies for the given coursier resolve. Note the "dependencies" field is already transitive. Example: { "conflict_resolution": {}, "dependencies": [ { "coord": "a", "dependencies": ["b", "c"] "file": ... }, { "coord": "b", "dependencies": [] "file": ... }, { "coord": "c", "dependencies": [] "file": ... } ] } Should return { "a": ["b", "c"], "b": [], "c": [] } :param result: coursier result like the example. :return: a simplified view with the top artifact as the roots. """ flat_result = defaultdict(list) for artifact in result['dependencies']: flat_result[artifact['coord']].extend(artifact['dependencies']) return flat_result
python
{ "resource": "" }
q27362
GoalRunnerFactory._determine_v1_goals
train
def _determine_v1_goals(self, address_mapper, options): """Check and populate the requested goals for a given run.""" v1_goals, ambiguous_goals, _ = options.goals_by_version requested_goals = v1_goals + ambiguous_goals spec_parser = CmdLineSpecParser(self._root_dir) for goal in requested_goals: if address_mapper.is_valid_single_address(spec_parser.parse_spec(goal)): logger.warning("Command-line argument '{0}' is ambiguous and was assumed to be " "a goal. If this is incorrect, disambiguate it with ./{0}.".format(goal)) return [Goal.by_name(goal) for goal in requested_goals]
python
{ "resource": "" }
q27363
GoalRunnerFactory._roots_to_targets
train
def _roots_to_targets(self, build_graph, target_roots): """Populate the BuildGraph and target list from a set of input TargetRoots.""" with self._run_tracker.new_workunit(name='parse', labels=[WorkUnitLabel.SETUP]): return [ build_graph.get_target(address) for address in build_graph.inject_roots_closure(target_roots, self._fail_fast) ]
python
{ "resource": "" }
q27364
Report.log
train
def log(self, workunit, level, *msg_elements): """Log a message. Each element of msg_elements is either a message string or a (message, detail) pair. """ with self._lock: for reporter in self._reporters.values(): reporter.handle_log(workunit, level, *msg_elements)
python
{ "resource": "" }
q27365
Cobertura.initialize_instrument_classpath
train
def initialize_instrument_classpath(output_dir, settings, targets, instrumentation_classpath): """Clones the existing runtime_classpath and corresponding binaries to instrumentation specific paths. :param targets: the targets for which we should create an instrumentation_classpath entry based on their runtime_classpath entry. """ instrument_dir = os.path.join(output_dir, 'coverage', 'classes') settings.safe_makedir(instrument_dir, clean=True) for target in targets: if not Cobertura.is_coverage_target(target): continue # Do not instrument transitive dependencies. paths = instrumentation_classpath.get_for_target(target) target_instrumentation_path = os.path.join(instrument_dir, target.id) for (index, (config, path)) in enumerate(paths): # There are two sorts of classpath entries we see in the compile classpath: jars and dirs. # The branches below handle the cloning of those respectively. entry_instrumentation_path = os.path.join(target_instrumentation_path, str(index)) if settings.is_file(path): settings.safe_makedir(entry_instrumentation_path, clean=True) settings.copy2(path, entry_instrumentation_path) new_path = os.path.join(entry_instrumentation_path, os.path.basename(path)) else: settings.copytree(path, entry_instrumentation_path) new_path = entry_instrumentation_path instrumentation_classpath.remove_for_target(target, [(config, path)]) instrumentation_classpath.add_for_target(target, [(config, new_path)]) settings.log.debug( "runtime_classpath ({}) cloned to instrument_classpath ({})".format(path, new_path))
python
{ "resource": "" }
q27366
NailgunProtocol.send_request
train
def send_request(cls, sock, working_dir, command, *arguments, **environment): """Send the initial Nailgun request over the specified socket.""" for argument in arguments: cls.write_chunk(sock, ChunkType.ARGUMENT, argument) for item_tuple in environment.items(): cls.write_chunk(sock, ChunkType.ENVIRONMENT, cls.ENVIRON_SEP.join(cls._decode_unicode_seq(item_tuple))) cls.write_chunk(sock, ChunkType.WORKING_DIR, working_dir) cls.write_chunk(sock, ChunkType.COMMAND, command)
python
{ "resource": "" }
q27367
NailgunProtocol.write_chunk
train
def write_chunk(cls, sock, chunk_type, payload=b''): """Write a single chunk to the connected client.""" chunk = cls.construct_chunk(chunk_type, payload) sock.sendall(chunk)
python
{ "resource": "" }
q27368
NailgunProtocol.construct_chunk
train
def construct_chunk(cls, chunk_type, payload, encoding='utf-8'): """Construct and return a single chunk.""" if isinstance(payload, str): payload = payload.encode(encoding) elif not isinstance(payload, bytes): raise TypeError('cannot encode type: {}'.format(type(payload))) header = struct.pack(cls.HEADER_FMT, len(payload), chunk_type) return header + payload
python
{ "resource": "" }
q27369
NailgunProtocol._read_until
train
def _read_until(cls, sock, desired_size): """Read a certain amount of content from a socket before returning.""" buf = b'' while len(buf) < desired_size: recv_bytes = sock.recv(desired_size - len(buf)) if not recv_bytes: raise cls.TruncatedRead('Expected {} bytes before socket shutdown, instead received {}' .format(desired_size, len(buf))) buf += recv_bytes return buf
python
{ "resource": "" }
q27370
NailgunProtocol.read_chunk
train
def read_chunk(cls, sock, return_bytes=False): """Read a single chunk from the connected client. A "chunk" is a variable-length block of data beginning with a 5-byte chunk header and followed by an optional payload. The chunk header consists of: 1) The length of the chunk's payload (not including the header) as a four-byte big-endian unsigned long. The high-order byte is header[0] and the low-order byte is header[3]. 2) A single byte identifying the type of chunk. """ try: # Read the chunk header from the socket. header = cls._read_until(sock, cls.HEADER_BYTES) except cls.TruncatedRead as e: raise cls.TruncatedHeaderError('Failed to read nailgun chunk header ({!r}).'.format(e)) # Unpack the chunk header. payload_len, chunk_type = struct.unpack(cls.HEADER_FMT, header) try: # Read the chunk payload. payload = cls._read_until(sock, payload_len) except cls.TruncatedRead as e: raise cls.TruncatedPayloadError('Failed to read nailgun chunk payload ({!r}).'.format(e)) # In the case we get an otherwise well-formed chunk, check the chunk_type for validity _after_ # we've drained the payload from the socket to avoid subsequent reads of a stale payload. if chunk_type not in ChunkType.VALID_TYPES: raise cls.ProtocolError('invalid chunk type: {}'.format(chunk_type)) if not return_bytes: payload = payload.decode('utf-8') return chunk_type, payload
python
{ "resource": "" }
q27371
NailgunProtocol.iter_chunks
train
def iter_chunks(cls, sock, return_bytes=False, timeout_object=None): """Generates chunks from a connected socket until an Exit chunk is sent or a timeout occurs. :param sock: the socket to read from. :param bool return_bytes: If False, decode the payload into a utf-8 string. :param cls.TimeoutProvider timeout_object: If provided, will be checked every iteration for a possible timeout. :raises: :class:`cls.ProcessStreamTimeout` """ assert(timeout_object is None or isinstance(timeout_object, cls.TimeoutProvider)) orig_timeout_time = None timeout_interval = None while 1: if orig_timeout_time is not None: remaining_time = time.time() - (orig_timeout_time + timeout_interval) if remaining_time > 0: original_timestamp = datetime.datetime.fromtimestamp(orig_timeout_time).isoformat() raise cls.ProcessStreamTimeout( "iterating over bytes from nailgun timed out with timeout interval {} starting at {}, " "overtime seconds: {}" .format(timeout_interval, original_timestamp, remaining_time)) elif timeout_object is not None: opts = timeout_object.maybe_timeout_options() if opts: orig_timeout_time = opts.start_time timeout_interval = opts.interval continue remaining_time = None else: remaining_time = None with cls._set_socket_timeout(sock, timeout=remaining_time): chunk_type, payload = cls.read_chunk(sock, return_bytes) yield chunk_type, payload if chunk_type == ChunkType.EXIT: break
python
{ "resource": "" }
q27372
NailgunProtocol.send_stdout
train
def send_stdout(cls, sock, payload): """Send the Stdout chunk over the specified socket.""" cls.write_chunk(sock, ChunkType.STDOUT, payload)
python
{ "resource": "" }
q27373
NailgunProtocol.send_stderr
train
def send_stderr(cls, sock, payload): """Send the Stderr chunk over the specified socket.""" cls.write_chunk(sock, ChunkType.STDERR, payload)
python
{ "resource": "" }
q27374
NailgunProtocol.send_exit
train
def send_exit(cls, sock, payload=b''): """Send the Exit chunk over the specified socket.""" cls.write_chunk(sock, ChunkType.EXIT, payload)
python
{ "resource": "" }
q27375
NailgunProtocol.send_exit_with_code
train
def send_exit_with_code(cls, sock, code): """Send an Exit chunk over the specified socket, containing the specified return code.""" encoded_exit_status = cls.encode_int(code) cls.send_exit(sock, payload=encoded_exit_status)
python
{ "resource": "" }
q27376
NailgunProtocol.send_pid
train
def send_pid(cls, sock, pid): """Send the PID chunk over the specified socket.""" assert(isinstance(pid, IntegerForPid) and pid > 0) encoded_int = cls.encode_int(pid) cls.write_chunk(sock, ChunkType.PID, encoded_int)
python
{ "resource": "" }
q27377
NailgunProtocol.send_pgrp
train
def send_pgrp(cls, sock, pgrp): """Send the PGRP chunk over the specified socket.""" assert(isinstance(pgrp, IntegerForPid) and pgrp < 0) encoded_int = cls.encode_int(pgrp) cls.write_chunk(sock, ChunkType.PGRP, encoded_int)
python
{ "resource": "" }
q27378
NailgunProtocol.encode_int
train
def encode_int(cls, obj): """Verify the object is an int, and ASCII-encode it. :param int obj: An integer to be encoded. :raises: :class:`TypeError` if `obj` is not an integer. :return: A binary representation of the int `obj` suitable to pass as the `payload` to send_exit(). """ if not isinstance(obj, int): raise TypeError("cannot encode non-integer object in encode_int(): object was {} (type '{}')." .format(obj, type(obj))) return str(obj).encode('ascii')
python
{ "resource": "" }
q27379
NailgunProtocol.isatty_to_env
train
def isatty_to_env(cls, stdin, stdout, stderr): """Generate nailgun tty capability environment variables based on checking a set of fds. :param file stdin: The stream to check for stdin tty capabilities. :param file stdout: The stream to check for stdout tty capabilities. :param file stderr: The stream to check for stderr tty capabilities. :returns: A dict containing the tty capability environment variables. """ def gen_env_vars(): for fd_id, fd in zip(STDIO_DESCRIPTORS, (stdin, stdout, stderr)): is_atty = fd.isatty() yield (cls.TTY_ENV_TMPL.format(fd_id), cls.encode_env_var_value(int(is_atty))) if is_atty: yield (cls.TTY_PATH_ENV.format(fd_id), os.ttyname(fd.fileno()) or b'') return dict(gen_env_vars())
python
{ "resource": "" }
q27380
NailgunProtocol.isatty_from_env
train
def isatty_from_env(cls, env): """Determine whether remote file descriptors are tty capable using std nailgunned env variables. :param dict env: A dictionary representing the environment. :returns: A tuple of boolean values indicating istty or not for (stdin, stdout, stderr). """ def str_int_bool(i): return i.isdigit() and bool(int(i)) # Environment variable values should always be strings. return tuple( str_int_bool(env.get(cls.TTY_ENV_TMPL.format(fd_id), '0')) for fd_id in STDIO_DESCRIPTORS )
python
{ "resource": "" }
q27381
Engine.execute
train
def execute(self, context, goals): """Executes the supplied goals and their dependencies against the given context. :param context: The pants run context. :param list goals: A list of ``Goal`` objects representing the command line goals explicitly requested. :returns int: An exit code of 0 upon success and non-zero otherwise. """ try: self.attempt(context, goals) return 0 except TaskError as e: message = str(e) if message: print('\nFAILURE: {0}\n'.format(message)) else: print('\nFAILURE\n') return e.exit_code
python
{ "resource": "" }
q27382
JarDependency.copy
train
def copy(self, **replacements): """Returns a clone of this JarDependency with the given replacements kwargs overlaid.""" cls = type(self) kwargs = self._asdict() for key, val in replacements.items(): if key == 'excludes': val = JarDependency._prepare_excludes(val) kwargs[key] = val org = kwargs.pop('org') base_name = kwargs.pop('base_name') return cls(org, base_name, **kwargs)
python
{ "resource": "" }
q27383
JarDependency.coordinate
train
def coordinate(self): """Returns the maven coordinate of this jar. :rtype: :class:`pants.java.jar.M2Coordinate` """ return M2Coordinate(org=self.org, name=self.name, rev=self.rev, classifier=self.classifier, ext=self.ext)
python
{ "resource": "" }
q27384
AnalysisExtraction._create_products_if_should_run
train
def _create_products_if_should_run(self): """If this task should run, initialize empty products that it will populate. Returns true if the task should run. """ should_run = False if self.context.products.is_required_data('classes_by_source'): should_run = True make_products = lambda: defaultdict(MultipleRootedProducts) self.context.products.safe_create_data('classes_by_source', make_products) if self.context.products.is_required_data('product_deps_by_src'): should_run = True self.context.products.safe_create_data('product_deps_by_src', dict) return should_run
python
{ "resource": "" }
q27385
ScmPublishMixin.check_clean_master
train
def check_clean_master(self, commit=False): """Perform a sanity check on SCM publishing constraints. Checks for uncommitted tracked files and ensures we're on an allowed branch configured to push to an allowed server if `commit` is `True`. :param bool commit: `True` if a commit is in progress. :raise TaskError: on failure """ if commit: if self.restrict_push_branches: branch = self.scm.branch_name if branch not in self.restrict_push_branches: raise self.InvalidBranchError('Can only push from {}, currently on branch: {}' .format(' '.join(sorted(self.restrict_push_branches)), branch)) if self.restrict_push_urls: url = self.scm.server_url if url not in self.restrict_push_urls: raise self.InvalidRemoteError('Can only push to {}, currently the remote url is: {}' .format(' '.join(sorted(self.restrict_push_urls)), url)) changed_files = self.scm.changed_files() if changed_files: raise self.DirtyWorkspaceError('Can only push from a clean branch, found : {}' .format(' '.join(changed_files))) elif self.scm: self.log.info('Skipping check for a clean {} branch in test mode.' .format(self.scm.branch_name))
python
{ "resource": "" }
q27386
ScmPublishMixin.commit_pushdb
train
def commit_pushdb(self, coordinates, postscript=None): """Commit changes to the pushdb with a message containing the provided coordinates.""" self.scm.commit('pants build committing publish data for push of {coordinates}' '{postscript}'.format(coordinates=coordinates, postscript=postscript or ''), verify=self.get_options().verify_commit)
python
{ "resource": "" }
q27387
ScmPublishMixin.publish_pushdb_changes_to_remote_scm
train
def publish_pushdb_changes_to_remote_scm(self, pushdb_file, coordinate, tag_name, tag_message, postscript=None): """Push pushdb changes to the remote scm repository, and then tag the commit if it succeeds.""" self._add_pushdb(pushdb_file) self.commit_pushdb(coordinate, postscript=postscript) self._push_and_tag_changes( tag_name=tag_name, tag_message='{message}{postscript}'.format(message=tag_message, postscript=postscript or '') )
python
{ "resource": "" }
q27388
GoTarget.package_path
train
def package_path(cls, root, path): """Returns a normalized package path constructed from the given path and its root. A remote package path is the portion of the remote Go package's import path after the remote root path. For example, the remote import path 'https://github.com/bitly/go-simplejson' has a remote root of 'https://github.com/bitly/go-simplejson' and there is only 1 package in that remote root. The package path in this case is '' or '.' and is normalized to ''. Some remote roots have no root package and others have both a root and sub-packages. The remote root of 'github.com/docker/docker' is an example of the former. One of the packages you might import from it is 'github.com/docker/docker/daemon/events' and that package has a normalized remote package path of 'daemon/events'. :param string root: The portion of `path` pointing to the go source code root. This would be `src` under a standard $GOPATH root. :param string path: The full path to the package. This would be the full path to the package directory relative to the $GOPATH. :raises: `ValueError` if the path does not lay within the root. """ package_path = os.path.relpath(path, root) return cls.normalize_package_path(package_path)
python
{ "resource": "" }
q27389
GoTarget.normalize_package_path
train
def normalize_package_path(cls, package_path): """Returns a normalized version of the given package path. The root package might by denoted by '' or '.' and is normalized to ''. All other packages are of the form 'path' or 'path/subpath', etc. If the given path is either absolute or relative (includes the parent dir path signifier, '..'), then an error is raised since the path cannot be noramlized. :param string package_path: The Go package path to normalize. :raises: `ValueError` if the package path cannot be normalized. """ if package_path.startswith(os.pardir + os.sep): raise ValueError('Relative package paths are not allowed. Given: {!r}'.format(package_path)) if os.path.isabs(package_path): raise ValueError('Absolute package paths are not allowed. Given: {!r}'.format(package_path)) return '' if not package_path or package_path == os.curdir else package_path.lstrip('/')
python
{ "resource": "" }
q27390
_StoppableDaemonThread.join
train
def join(self, timeout=None): """Joins with a default timeout exposed on the class.""" return super(_StoppableDaemonThread, self).join(timeout or self.JOIN_TIMEOUT)
python
{ "resource": "" }
q27391
NailgunStreamWriter.open
train
def open(cls, sock, chunk_type, isatty, chunk_eof_type=None, buf_size=None, select_timeout=None): """Yields the write side of a pipe that will copy appropriately chunked values to a socket.""" with cls.open_multi(sock, (chunk_type,), (isatty,), chunk_eof_type, buf_size, select_timeout) as ctx: yield ctx
python
{ "resource": "" }
q27392
NailgunStreamWriter.open_multi
train
def open_multi(cls, sock, chunk_types, isattys, chunk_eof_type=None, buf_size=None, select_timeout=None): """Yields the write sides of pipes that will copy appropriately chunked values to the socket.""" cls._assert_aligned(chunk_types, isattys) # N.B. This is purely to permit safe handling of a dynamic number of contextmanagers. with ExitStack() as stack: read_fds, write_fds = list(zip( # Allocate one pipe pair per chunk type provided. *(stack.enter_context(_pipe(isatty)) for isatty in isattys) )) writer = NailgunStreamWriter( read_fds, sock, chunk_types, chunk_eof_type, buf_size=buf_size, select_timeout=select_timeout ) with writer.running(): yield write_fds, writer
python
{ "resource": "" }
q27393
CacheKey.combine_cache_keys
train
def combine_cache_keys(cls, cache_keys): """Returns a cache key for a list of target sets that already have cache keys. This operation is 'idempotent' in the sense that if cache_keys contains a single key then that key is returned. Note that this operation is commutative but not associative. We use the term 'combine' rather than 'merge' or 'union' to remind the user of this. Associativity is not a necessary property, in practice. """ if len(cache_keys) == 1: return cache_keys[0] else: combined_id = Target.maybe_readable_combine_ids(cache_key.id for cache_key in cache_keys) combined_hash = hash_all(sorted(cache_key.hash for cache_key in cache_keys)) return cls(combined_id, combined_hash)
python
{ "resource": "" }
q27394
BuildInvalidator.previous_key
train
def previous_key(self, cache_key): """If there was a previous successful build for the given key, return the previous key. :param cache_key: A CacheKey object (as returned by CacheKeyGenerator.key_for(). :returns: The previous cache_key, or None if there was not a previous build. """ if not self.cacheable(cache_key): # We should never successfully cache an uncacheable CacheKey. return None previous_hash = self._read_sha(cache_key) if not previous_hash: return None return CacheKey(cache_key.id, previous_hash)
python
{ "resource": "" }
q27395
BuildInvalidator.needs_update
train
def needs_update(self, cache_key): """Check if the given cached item is invalid. :param cache_key: A CacheKey object (as returned by CacheKeyGenerator.key_for(). :returns: True if the cached version of the item is out of date. """ if not self.cacheable(cache_key): # An uncacheable CacheKey is always out of date. return True return self._read_sha(cache_key) != cache_key.hash
python
{ "resource": "" }
q27396
BuildInvalidator.force_invalidate
train
def force_invalidate(self, cache_key): """Force-invalidate the cached item.""" try: if self.cacheable(cache_key): os.unlink(self._sha_file(cache_key)) except OSError as e: if e.errno != errno.ENOENT: raise
python
{ "resource": "" }
q27397
PackageManager.run_command
train
def run_command(self, args=None, node_paths=None): """Returns a command that when executed will run an arbitury command via package manager.""" return command_gen( self.tool_installations, self.name, args=args, node_paths=node_paths )
python
{ "resource": "" }
q27398
PackageManager.install_module
train
def install_module( self, install_optional=False, production_only=False, force=False, frozen_lockfile=True, node_paths=None): """Returns a command that when executed will install node package. :param install_optional: True to install optional dependencies. :param production_only: True to only install production dependencies, i.e. ignore devDependencies. :param force: True to force re-download dependencies. :param frozen_lockfile: True to disallow automatic update of lock files. :param node_paths: A list of path that should be included in $PATH when running installation. """ args=self._get_installation_args( install_optional=install_optional, production_only=production_only, force=force, frozen_lockfile=frozen_lockfile) return self.run_command(args=args, node_paths=node_paths)
python
{ "resource": "" }
q27399
PackageManager.run_script
train
def run_script(self, script_name, script_args=None, node_paths=None): """Returns a command to execute a package.json script. :param script_name: Name of the script to name. Note that script name 'test' can be used to run node tests. :param script_args: Args to be passed to package.json script. :param node_paths: A list of path that should be included in $PATH when running the script. """ # TODO: consider add a pants.util function to manipulate command line. package_manager_args = self._get_run_script_args() package_manager_args.append(script_name) if script_args: package_manager_args.append('--') package_manager_args.extend(script_args) return self.run_command(args=package_manager_args, node_paths=node_paths)
python
{ "resource": "" }