_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q27600
PailgunHandler.handle
train
def handle(self): """Request handler for a single Pailgun request.""" # Parse the Nailgun request portion. _, _, arguments, environment = NailgunProtocol.parse_request(self.request) # N.B. the first and second nailgun request arguments (working_dir and command) are currently # ignored in favor of a get_buildroot() call within LocalPantsRunner.run() and an assumption # that anyone connecting to this nailgun server always intends to run pants itself. # Prepend the command to our arguments so it aligns with the expected sys.argv format of python # (e.g. [list', '::'] -> ['./pants', 'list', '::']). arguments.insert(0, './pants') self.logger.info('handling pailgun request: `{}`'.format(' '.join(arguments))) self.logger.debug('pailgun request environment: %s', environment) # Execute the requested command with optional daemon-side profiling. with maybe_profiled(environment.get('PANTSD_PROFILE')): self._run_pants(self.request, arguments, environment) # NB: This represents the end of pantsd's involvement in the request, but the request will # continue to run post-fork. self.logger.info('pailgun request completed: `{}`'.format(' '.join(arguments)))
python
{ "resource": "" }
q27601
IdeaPluginGen._generate_to_tempfile
train
def _generate_to_tempfile(self, generator): """Applies the specified generator to a temp file and returns the path to that file. We generate into a temp file so that we don't lose any manual customizations on error.""" with temporary_file(cleanup=False, binary_mode=False) as output: generator.write(output) return output.name
python
{ "resource": "" }
q27602
JavaThriftyGen._compute_include_paths
train
def _compute_include_paths(self, target): """Computes the set of paths that thrifty uses to lookup imports. The IDL files under these paths are not compiled, but they are required to compile downstream IDL files. :param target: the JavaThriftyLibrary target to compile. :return: an ordered set of directories to pass along to thrifty. """ paths = OrderedSet() paths.add(os.path.join(get_buildroot(), target.target_base)) def collect_paths(dep): if not dep.has_sources('.thrift'): return paths.add(os.path.join(get_buildroot(), dep.target_base)) collect_paths(target) target.walk(collect_paths) return paths
python
{ "resource": "" }
q27603
ProcessGroup._instance_from_process
train
def _instance_from_process(self, process): """Default converter from psutil.Process to process instance classes for subclassing.""" return ProcessManager(name=process.name(), pid=process.pid, process_name=process.name(), metadata_base_dir=self._metadata_base_dir)
python
{ "resource": "" }
q27604
ProcessGroup.iter_processes
train
def iter_processes(self, proc_filter=None): """Yields processes from psutil.process_iter with an optional filter and swallows psutil errors. If a psutil exception is raised during execution of the filter, that process will not be yielded but subsequent processes will. On the other hand, if psutil.process_iter raises an exception, no more processes will be yielded. """ with swallow_psutil_exceptions(): # process_iter may raise for proc in psutil.process_iter(): with swallow_psutil_exceptions(): # proc_filter may raise if (proc_filter is None) or proc_filter(proc): yield proc
python
{ "resource": "" }
q27605
ProcessMetadataManager._maybe_init_metadata_dir_by_name
train
def _maybe_init_metadata_dir_by_name(self, name): """Initialize the metadata directory for a named identity if it doesn't exist.""" safe_mkdir(self.__class__._get_metadata_dir_by_name(name, self._metadata_base_dir))
python
{ "resource": "" }
q27606
ProcessMetadataManager.read_metadata_by_name
train
def read_metadata_by_name(self, name, metadata_key, caster=None): """Read process metadata using a named identity. :param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd'). :param string metadata_key: The metadata key (e.g. 'pid'). :param func caster: A casting callable to apply to the read value (e.g. `int`). """ file_path = self._metadata_file_path(name, metadata_key) try: metadata = read_file(file_path).strip() return self._maybe_cast(metadata, caster) except (IOError, OSError): return None
python
{ "resource": "" }
q27607
ProcessMetadataManager.write_metadata_by_name
train
def write_metadata_by_name(self, name, metadata_key, metadata_value): """Write process metadata using a named identity. :param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd'). :param string metadata_key: The metadata key (e.g. 'pid'). :param string metadata_value: The metadata value (e.g. '1729'). """ self._maybe_init_metadata_dir_by_name(name) file_path = self._metadata_file_path(name, metadata_key) safe_file_dump(file_path, metadata_value)
python
{ "resource": "" }
q27608
ProcessMetadataManager.await_metadata_by_name
train
def await_metadata_by_name(self, name, metadata_key, timeout, caster=None): """Block up to a timeout for process metadata to arrive on disk. :param string name: The ProcessMetadataManager identity/name (e.g. 'pantsd'). :param string metadata_key: The metadata key (e.g. 'pid'). :param int timeout: The deadline to write metadata. :param type caster: A type-casting callable to apply to the read value (e.g. int, str). :returns: The value of the metadata key (read from disk post-write). :raises: :class:`ProcessMetadataManager.Timeout` on timeout. """ file_path = self._metadata_file_path(name, metadata_key) self._wait_for_file(file_path, timeout=timeout) return self.read_metadata_by_name(name, metadata_key, caster)
python
{ "resource": "" }
q27609
ProcessMetadataManager.purge_metadata_by_name
train
def purge_metadata_by_name(self, name): """Purge a processes metadata directory. :raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal. """ meta_dir = self._get_metadata_dir_by_name(name, self._metadata_base_dir) logger.debug('purging metadata directory: {}'.format(meta_dir)) try: rm_rf(meta_dir) except OSError as e: raise ProcessMetadataManager.MetadataError('failed to purge metadata directory {}: {!r}'.format(meta_dir, e))
python
{ "resource": "" }
q27610
ProcessManager.lifecycle_lock
train
def lifecycle_lock(self): """An identity-keyed inter-process lock for safeguarding lifecycle and other operations.""" safe_mkdir(self._metadata_base_dir) return OwnerPrintingInterProcessFileLock( # N.B. This lock can't key into the actual named metadata dir (e.g. `.pids/pantsd/lock` # via `ProcessMetadataManager._get_metadata_dir_by_name()`) because of a need to purge # the named metadata dir on startup to avoid stale metadata reads. os.path.join(self._metadata_base_dir, '.lock.{}'.format(self._name)) )
python
{ "resource": "" }
q27611
ProcessManager.get_subprocess_output
train
def get_subprocess_output(cls, command, ignore_stderr=True, **kwargs): """Get the output of an executed command. :param command: An iterable representing the command to execute (e.g. ['ls', '-al']). :param ignore_stderr: Whether or not to ignore stderr output vs interleave it with stdout. :raises: `ProcessManager.ExecutionError` on `OSError` or `CalledProcessError`. :returns: The output of the command. """ if ignore_stderr is False: kwargs.setdefault('stderr', subprocess.STDOUT) try: return subprocess.check_output(command, **kwargs).decode('utf-8').strip() except (OSError, subprocess.CalledProcessError) as e: subprocess_output = getattr(e, 'output', '').strip() raise cls.ExecutionError(str(e), subprocess_output)
python
{ "resource": "" }
q27612
ProcessManager.await_socket
train
def await_socket(self, timeout): """Wait up to a given timeout for a process to write socket info.""" return self.await_metadata_by_name(self._name, 'socket', timeout, self._socket_type)
python
{ "resource": "" }
q27613
ProcessManager.write_pid
train
def write_pid(self, pid=None): """Write the current processes PID to the pidfile location""" pid = pid or os.getpid() self.write_metadata_by_name(self._name, 'pid', str(pid))
python
{ "resource": "" }
q27614
ProcessManager.read_named_socket
train
def read_named_socket(self, socket_name, socket_type): """A multi-tenant, named alternative to ProcessManager.socket.""" return self.read_metadata_by_name(self._name, 'socket_{}'.format(socket_name), socket_type)
python
{ "resource": "" }
q27615
ProcessManager._as_process
train
def _as_process(self): """Returns a psutil `Process` object wrapping our pid. NB: Even with a process object in hand, subsequent method calls against it can always raise `NoSuchProcess`. Care is needed to document the raises in the public API or else trap them and do something sensible for the API. :returns: a psutil Process object or else None if we have no pid. :rtype: :class:`psutil.Process` :raises: :class:`psutil.NoSuchProcess` if the process identified by our pid has died. """ if self._process is None and self.pid: self._process = psutil.Process(self.pid) return self._process
python
{ "resource": "" }
q27616
ProcessManager.is_alive
train
def is_alive(self, extended_check=None): """Return a boolean indicating whether the process is running or not. :param func extended_check: An additional callable that will be invoked to perform an extended liveness check. This callable should take a single argument of a `psutil.Process` instance representing the context-local process and return a boolean True/False to indicate alive vs not alive. """ try: process = self._as_process() return not ( # Can happen if we don't find our pid. (not process) or # Check for walkers. (process.status() == psutil.STATUS_ZOMBIE) or # Check for stale pids. (self.process_name and self.process_name != process.name()) or # Extended checking. (extended_check and not extended_check(process)) ) except (psutil.NoSuchProcess, psutil.AccessDenied): # On some platforms, accessing attributes of a zombie'd Process results in NoSuchProcess. return False
python
{ "resource": "" }
q27617
ProcessManager._kill
train
def _kill(self, kill_sig): """Send a signal to the current process.""" if self.pid: os.kill(self.pid, kill_sig)
python
{ "resource": "" }
q27618
ProcessManager.daemonize
train
def daemonize(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None, fork_context=None, write_pid=True): """Perform a double-fork, execute callbacks and write the child pid file. The double-fork here is necessary to truly daemonize the subprocess such that it can never take control of a tty. The initial fork and setsid() creates a new, isolated process group and also makes the first child a session leader (which can still acquire a tty). By forking a second time, we ensure that the second child can never acquire a controlling terminal because it's no longer a session leader - but it now has its own separate process group. Additionally, a normal daemon implementation would typically perform an os.umask(0) to reset the processes file mode creation mask post-fork. We do not do this here (and in daemon_spawn below) due to the fact that the daemons that pants would run are typically personal user daemons. Having a disparate umask from pre-vs-post fork causes files written in each phase to differ in their permissions without good reason - in this case, we want to inherit the umask. :param fork_context: A function which accepts and calls a function that will call fork. This is not a contextmanager/generator because that would make interacting with native code more challenging. If no fork_context is passed, the fork function is called directly. """ def double_fork(): logger.debug('forking %s', self) pid = os.fork() if pid == 0: os.setsid() second_pid = os.fork() if second_pid == 0: return False, True else: if write_pid: self.write_pid(second_pid) return False, False else: # This prevents un-reaped, throw-away parent processes from lingering in the process table. os.waitpid(pid, 0) return True, False fork_func = functools.partial(fork_context, double_fork) if fork_context else double_fork # Perform the double fork (optionally under the fork_context). Three outcomes are possible after # the double fork: we're either the original parent process, the middle double-fork process, or # the child. We assert below that a process is not somehow both the parent and the child. self.purge_metadata() self.pre_fork(**pre_fork_opts or {}) is_parent, is_child = fork_func() try: if not is_parent and not is_child: # Middle process. os._exit(0) elif is_parent: assert not is_child self.post_fork_parent(**post_fork_parent_opts or {}) else: assert not is_parent os.chdir(self._buildroot) self.post_fork_child(**post_fork_child_opts or {}) except Exception: logger.critical(traceback.format_exc()) os._exit(0)
python
{ "resource": "" }
q27619
ProcessManager.daemon_spawn
train
def daemon_spawn(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None): """Perform a single-fork to run a subprocess and write the child pid file. Use this if your post_fork_child block invokes a subprocess via subprocess.Popen(). In this case, a second fork such as used in daemonize() is extraneous given that Popen() also forks. Using this daemonization method vs daemonize() leaves the responsibility of writing the pid to the caller to allow for library-agnostic flexibility in subprocess execution. """ self.purge_metadata() self.pre_fork(**pre_fork_opts or {}) pid = os.fork() if pid == 0: try: os.setsid() os.chdir(self._buildroot) self.post_fork_child(**post_fork_child_opts or {}) except Exception: logger.critical(traceback.format_exc()) finally: os._exit(0) else: try: self.post_fork_parent(**post_fork_parent_opts or {}) except Exception: logger.critical(traceback.format_exc())
python
{ "resource": "" }
q27620
FingerprintedProcessManager.fingerprint
train
def fingerprint(self): """The fingerprint of the current process. This can either read the current fingerprint from the running process's psutil.Process.cmdline (if the managed process supports that) or from the `ProcessManager` metadata. :returns: The fingerprint of the running process as read from the process table, ProcessManager metadata or `None`. :rtype: string """ return ( self.parse_fingerprint(self.cmdline) or self.read_metadata_by_name(self.name, self.FINGERPRINT_KEY) )
python
{ "resource": "" }
q27621
FingerprintedProcessManager.parse_fingerprint
train
def parse_fingerprint(self, cmdline, key=None, sep=None): """Given a psutil.Process.cmdline, parse and return a fingerprint. :param list cmdline: The psutil.Process.cmdline of the current process. :param string key: The key for fingerprint discovery. :param string sep: The key/value separator for fingerprint discovery. :returns: The parsed fingerprint or `None`. :rtype: string or `None` """ key = key or self.FINGERPRINT_CMD_KEY if key: sep = sep or self.FINGERPRINT_CMD_SEP cmdline = cmdline or [] for cmd_part in cmdline: if cmd_part.startswith('{}{}'.format(key, sep)): return cmd_part.split(sep)[1]
python
{ "resource": "" }
q27622
ManagedJarDependencies.library_specs
train
def library_specs(self): """Lists of specs to resolve to jar_libraries containing more jars.""" return [Address.parse(spec, relative_to=self.address.spec_path).spec for spec in self.payload.library_specs]
python
{ "resource": "" }
q27623
GoDistribution.go_env
train
def go_env(self, gopath=None): """Return an env dict that represents a proper Go environment mapping for this distribution.""" # Forcibly nullify the GOPATH if the command does not need one - this can prevent bad user # GOPATHs from erroring out commands; see: https://github.com/pantsbuild/pants/issues/2321. # NB: As of go 1.8, when GOPATH is unset (set to ''), it defaults to ~/go (assuming HOME is # set - and we can't unset that since it might legitimately be used by the subcommand); so we # set the GOPATH here to a valid value that nonetheless will fail to work if GOPATH is # actually used by the subcommand. no_gopath = os.devnull return OrderedDict(GOROOT=self.goroot, GOPATH=gopath or no_gopath)
python
{ "resource": "" }
q27624
GoDistribution.create_go_cmd
train
def create_go_cmd(self, cmd, gopath=None, args=None): """Creates a Go command that is optionally targeted to a Go workspace. :param string cmd: Go command to execute, e.g. 'test' for `go test` :param string gopath: An optional $GOPATH which points to a valid Go workspace from which to run the command. :param list args: A list of arguments and flags to pass to the Go command. :returns: A go command that can be executed later. :rtype: :class:`GoDistribution.GoCommand` """ return self.GoCommand._create(self.goroot, cmd, go_env=self.go_env(gopath=gopath), args=args)
python
{ "resource": "" }
q27625
GoDistribution.execute_go_cmd
train
def execute_go_cmd(self, cmd, gopath=None, args=None, env=None, workunit_factory=None, workunit_name=None, workunit_labels=None, **kwargs): """Runs a Go command that is optionally targeted to a Go workspace. If a `workunit_factory` is supplied the command will run in a work unit context. :param string cmd: Go command to execute, e.g. 'test' for `go test` :param string gopath: An optional $GOPATH which points to a valid Go workspace from which to run the command. :param list args: An optional list of arguments and flags to pass to the Go command. :param dict env: A custom environment to launch the Go command in. If `None` the current environment is used. :param workunit_factory: An optional callable that can produce a `WorkUnit` context :param string workunit_name: An optional name for the work unit; defaults to the `cmd` :param list workunit_labels: An optional sequence of labels for the work unit. :param kwargs: Keyword arguments to pass through to `subprocess.Popen`. :returns: A tuple of the exit code and the go command that was run. :rtype: (int, :class:`GoDistribution.GoCommand`) """ go_cmd = self.create_go_cmd(cmd, gopath=gopath, args=args) if workunit_factory is None: return go_cmd.spawn(**kwargs).wait() else: name = workunit_name or cmd labels = [WorkUnitLabel.TOOL] + (workunit_labels or []) with workunit_factory(name=name, labels=labels, cmd=str(go_cmd)) as workunit: process = go_cmd.spawn(env=env, stdout=workunit.output('stdout'), stderr=workunit.output('stderr'), **kwargs) returncode = process.wait() workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE) return returncode, go_cmd
python
{ "resource": "" }
q27626
target_internal_dependencies
train
def target_internal_dependencies(target): """Returns internal Jarable dependencies that were "directly" declared. Directly declared deps are those that are explicitly listed in the definition of a target, rather than being depended on transitively. But in order to walk through aggregator targets such as `target`, `dependencies`, or `jar_library`, this recursively descends the dep graph and stops at Jarable instances.""" for dep in target.dependencies: if isinstance(dep, Jarable): yield dep else: for childdep in target_internal_dependencies(dep): yield childdep
python
{ "resource": "" }
q27627
PushDb.load
train
def load(path): """Loads a pushdb maintained in a properties file at the given path.""" with open(path, 'r') as props: properties = Properties.load(props) return PushDb(properties)
python
{ "resource": "" }
q27628
PushDb.get_entry
train
def get_entry(self, target): """Given an internal target, return a PushDb.Entry, which might contain defaults.""" db_get, _ = self._accessors_for_target(target) major = int(db_get('revision.major', '0')) minor = int(db_get('revision.minor', '0')) patch = int(db_get('revision.patch', '0')) snapshot = str(db_get('revision.snapshot', 'false')).lower() == 'true' named_version = db_get('revision.named_version', None) named_is_latest = str(db_get('revision.named_is_latest', 'false')).lower() == 'true' sha = db_get('revision.sha', None) fingerprint = db_get('revision.fingerprint', None) sem_ver = Semver(major, minor, patch, snapshot=snapshot) named_ver = Namedver(named_version) if named_version else None return self.Entry(sem_ver, named_ver, named_is_latest, sha, fingerprint)
python
{ "resource": "" }
q27629
PushDb.dump
train
def dump(self, path): """Saves the pushdb as a properties file to the given path.""" with open(path, 'w') as props: Properties.dump(self._props, props)
python
{ "resource": "" }
q27630
PomWriter._as_versioned_jar
train
def _as_versioned_jar(self, internal_target): """Fetches the jar representation of the given target, and applies the latest pushdb version.""" jar, _ = internal_target.get_artifact_info() pushdb_entry = self._get_db(internal_target).get_entry(internal_target) return jar.copy(rev=pushdb_entry.version().version())
python
{ "resource": "" }
q27631
JarPublish.confirm_push
train
def confirm_push(self, coord, version): """Ask the user if a push should be done for a particular version of a particular coordinate. Return True if the push should be done""" if not self.get_options().prompt: return True try: isatty = os.isatty(sys.stdin.fileno()) except ValueError: # In tests, sys.stdin might not have a fileno isatty = False if not isatty: return True push = input('\nPublish {} with revision {} ? [y|N] '.format( coord, version )) print('\n') return push.strip().lower() == 'y'
python
{ "resource": "" }
q27632
JarPublish._ivy_jvm_options
train
def _ivy_jvm_options(self, repo): """Get the JVM options for ivy authentication, if needed.""" # Get authentication for the publish repo if needed. if not repo.get('auth'): # No need to copy here, as this list isn't modified by the caller. return self._jvm_options # Create a copy of the options, so that the modification is appropriately transient. jvm_options = copy(self._jvm_options) user = repo.get('username') password = repo.get('password') if user and password: jvm_options.append('-Dlogin={}'.format(user)) jvm_options.append('-Dpassword={}'.format(password)) else: raise TaskError('Unable to publish to {}. {}' .format(repo.get('resolver'), repo.get('help', ''))) return jvm_options
python
{ "resource": "" }
q27633
JarPublish.create_doc_jar
train
def create_doc_jar(self, target, open_jar, version): """Returns a doc jar if either scala or java docs are available for the given target.""" javadoc = self._java_doc(target) scaladoc = self._scala_doc(target) if javadoc or scaladoc: jar_path = self.artifact_path(open_jar, version, suffix='-javadoc') with self.open_jar(jar_path, overwrite=True, compressed=True) as open_jar: def add_docs(docs): if docs: for basedir, doc_files in docs.items(): for doc_file in doc_files: open_jar.write(os.path.join(basedir, doc_file), doc_file) add_docs(javadoc) add_docs(scaladoc) return jar_path else: return None
python
{ "resource": "" }
q27634
BuildConfiguration.registered_aliases
train
def registered_aliases(self): """Return the registered aliases exposed in BUILD files. These returned aliases aren't so useful for actually parsing BUILD files. They are useful for generating things like http://pantsbuild.github.io/build_dictionary.html. :returns: A new BuildFileAliases instance containing this BuildConfiguration's registered alias mappings. :rtype: :class:`pants.build_graph.build_file_aliases.BuildFileAliases` """ target_factories_by_alias = self._target_by_alias.copy() target_factories_by_alias.update(self._target_macro_factory_by_alias) return BuildFileAliases( targets=target_factories_by_alias, objects=self._exposed_object_by_alias.copy(), context_aware_object_factories=self._exposed_context_aware_object_factory_by_alias.copy() )
python
{ "resource": "" }
q27635
BuildConfiguration.register_aliases
train
def register_aliases(self, aliases): """Registers the given aliases to be exposed in parsed BUILD files. :param aliases: The BuildFileAliases to register. :type aliases: :class:`pants.build_graph.build_file_aliases.BuildFileAliases` """ if not isinstance(aliases, BuildFileAliases): raise TypeError('The aliases must be a BuildFileAliases, given {}'.format(aliases)) for alias, target_type in aliases.target_types.items(): self._register_target_alias(alias, target_type) for alias, target_macro_factory in aliases.target_macro_factories.items(): self._register_target_macro_factory_alias(alias, target_macro_factory) for alias, obj in aliases.objects.items(): self._register_exposed_object(alias, obj) for alias, context_aware_object_factory in aliases.context_aware_object_factories.items(): self._register_exposed_context_aware_object_factory(alias, context_aware_object_factory)
python
{ "resource": "" }
q27636
BuildConfiguration.register_optionables
train
def register_optionables(self, optionables): """Registers the given subsystem types. :param optionables: The Optionable types to register. :type optionables: :class:`collections.Iterable` containing :class:`pants.option.optionable.Optionable` subclasses. """ if not isinstance(optionables, Iterable): raise TypeError('The optionables must be an iterable, given {}'.format(optionables)) optionables = tuple(optionables) if not optionables: return invalid_optionables = [s for s in optionables if not isinstance(s, type) or not issubclass(s, Optionable)] if invalid_optionables: raise TypeError('The following items from the given optionables are not Optionable ' 'subclasses:\n\t{}'.format('\n\t'.join(str(i) for i in invalid_optionables))) self._optionables.update(optionables)
python
{ "resource": "" }
q27637
BuildConfiguration.register_rules
train
def register_rules(self, rules): """Registers the given rules. param rules: The rules to register. :type rules: :class:`collections.Iterable` containing :class:`pants.engine.rules.Rule` instances. """ if not isinstance(rules, Iterable): raise TypeError('The rules must be an iterable, given {!r}'.format(rules)) # "Index" the rules to normalize them and expand their dependencies. normalized_rules = RuleIndex.create(rules).normalized_rules() indexed_rules = normalized_rules.rules union_rules = normalized_rules.union_rules # Store the rules and record their dependency Optionables. self._rules.update(indexed_rules) self._union_rules.update(union_rules) dependency_optionables = {do for rule in indexed_rules for do in rule.dependency_optionables if rule.dependency_optionables} self.register_optionables(dependency_optionables)
python
{ "resource": "" }
q27638
BuildConfiguration.initialize_parse_state
train
def initialize_parse_state(self, build_file): """Creates a fresh parse state for the given build file. :param build_file: The BUILD file to set up a new ParseState for. :type build_file: :class:`pants.base.build_file.BuildFile` :returns: A fresh ParseState for parsing the given `build_file` with. :rtype: :class:`BuildConfiguration.ParseState` """ # TODO(John Sirois): Introduce a factory method to seal the BuildConfiguration and add a check # there that all anonymous types are covered by context aware object factories that are # Macro instances. Without this, we could have non-Macro context aware object factories being # asked to be a BuildFileTargetFactory when they are not (in SourceRoot registration context). # See: https://github.com/pantsbuild/pants/issues/2125 type_aliases = self._exposed_object_by_alias.copy() parse_context = ParseContext(rel_path=build_file.spec_path, type_aliases=type_aliases) def create_call_proxy(tgt_type, tgt_alias=None): def registration_callback(address, addressable): parse_context._storage.add(addressable, name=address.target_name) addressable_factory = self._get_addressable_factory(tgt_type, tgt_alias) return AddressableCallProxy(addressable_factory=addressable_factory, build_file=build_file, registration_callback=registration_callback) # Expose all aliased Target types. for alias, target_type in self._target_by_alias.items(): proxy = create_call_proxy(target_type, alias) type_aliases[alias] = proxy # Expose aliases for exposed objects and targets in the BUILD file. parse_globals = type_aliases.copy() # Now its safe to add mappings from both the directly exposed and macro-created target types to # their call proxies for context awares and macros to use to manufacture targets by type # instead of by alias. for alias, target_type in self._target_by_alias.items(): proxy = type_aliases[alias] type_aliases[target_type] = proxy for target_macro_factory in self._target_macro_factory_by_alias.values(): for target_type in target_macro_factory.target_types: proxy = create_call_proxy(target_type) type_aliases[target_type] = proxy for alias, object_factory in self._exposed_context_aware_object_factory_by_alias.items(): parse_globals[alias] = object_factory(parse_context) for alias, target_macro_factory in self._target_macro_factory_by_alias.items(): parse_globals[alias] = target_macro_factory.target_macro(parse_context) return self.ParseState(parse_context, parse_globals)
python
{ "resource": "" }
q27639
WorkUnit.end
train
def end(self): """Mark the time at which this workunit ended.""" self.end_time = time.time() return self.path(), self.duration(), self._self_time(), self.has_label(WorkUnitLabel.TOOL)
python
{ "resource": "" }
q27640
WorkUnit.set_outcome
train
def set_outcome(self, outcome): """Set the outcome of this work unit. We can set the outcome on a work unit directly, but that outcome will also be affected by those of its subunits. The right thing happens: The outcome of a work unit is the worst outcome of any of its subunits and any outcome set on it directly.""" if outcome not in range(0, 5): raise Exception('Invalid outcome: {}'.format(outcome)) if outcome < self._outcome: self._outcome = outcome if self.parent: self.parent.set_outcome(self._outcome)
python
{ "resource": "" }
q27641
WorkUnit.start_delta_string
train
def start_delta_string(self): """A convenient string representation of how long after the run started we started. :API: public """ delta = int(self.start_time) - int(self.root().start_time) return '{:02}:{:02}'.format(int(delta / 60), delta % 60)
python
{ "resource": "" }
q27642
WorkUnit.ancestors
train
def ancestors(self): """Returns a list consisting of this workunit and those enclosing it, up to the root. :API: public """ ret = [] workunit = self while workunit is not None: ret.append(workunit) workunit = workunit.parent return ret
python
{ "resource": "" }
q27643
WorkUnit.to_dict
train
def to_dict(self): """Useful for providing arguments to templates. :API: public """ ret = {} for key in ['name', 'cmd', 'id', 'start_time', 'end_time', 'outcome', 'start_time_string', 'start_delta_string']: val = getattr(self, key) ret[key] = val() if hasattr(val, '__call__') else val ret['parent'] = self.parent.to_dict() if self.parent else None return ret
python
{ "resource": "" }
q27644
WorkUnit._self_time
train
def _self_time(self): """Returns the time spent in this workunit outside of any children.""" return self.duration() - sum([child.duration() for child in self.children])
python
{ "resource": "" }
q27645
longest_dir_prefix
train
def longest_dir_prefix(path, prefixes): """Given a list of prefixes, return the one that is the longest prefix to the given path. Returns None if there are no matches. """ longest_match, longest_prefix = 0, None for prefix in prefixes: if fast_relpath_optional(path, prefix) is not None and len(prefix) > longest_match: longest_match, longest_prefix = len(prefix), prefix return longest_prefix
python
{ "resource": "" }
q27646
safe_mkdir_for
train
def safe_mkdir_for(path, clean=False): """Ensure that the parent directory for a file is present. If it's not there, create it. If it is, no-op. """ safe_mkdir(os.path.dirname(path), clean=clean)
python
{ "resource": "" }
q27647
safe_mkdir_for_all
train
def safe_mkdir_for_all(paths): """Make directories which would contain all of the passed paths. This avoids attempting to re-make the same directories, which may be noticeably expensive if many paths mostly fall in the same set of directories. :param list of str paths: The paths for which containing directories should be created. """ created_dirs = set() for path in paths: dir_to_make = os.path.dirname(path) if dir_to_make not in created_dirs: safe_mkdir(dir_to_make) created_dirs.add(dir_to_make)
python
{ "resource": "" }
q27648
safe_file_dump
train
def safe_file_dump(filename, payload='', mode='w'): """Write a string to a file. This method is "safe" to the extent that `safe_open` is "safe". See the explanation on the method doc there. When `payload` is an empty string (the default), this method can be used as a concise way to create an empty file along with its containing directory (or truncate it if it already exists). :param string filename: The filename of the file to write to. :param string payload: The string to write to the file. :param string mode: A mode argument for the python `open` builtin which should be a write mode variant. Defaults to 'w'. """ with safe_open(filename, mode=mode) as f: f.write(payload)
python
{ "resource": "" }
q27649
mergetree
train
def mergetree(src, dst, symlinks=False, ignore=None, file_filter=None): """Just like `shutil.copytree`, except the `dst` dir may exist. The `src` directory will be walked and its contents copied into `dst`. If `dst` already exists the `src` tree will be overlayed in it; ie: existing files in `dst` will be over-written with files from `src` when they have the same subtree path. """ safe_mkdir(dst) if not file_filter: file_filter = lambda _: True for src_path, dirnames, filenames in safe_walk(src, topdown=True, followlinks=True): ignorenames = () if ignore: to_ignore = ignore(src_path, dirnames + filenames) if to_ignore: ignorenames = frozenset(to_ignore) src_relpath = os.path.relpath(src_path, src) dst_path = os.path.join(dst, src_relpath) visit_dirs = [] for dirname in dirnames: if dirname in ignorenames: continue src_dir = os.path.join(src_path, dirname) dst_dir = os.path.join(dst_path, dirname) if os.path.exists(dst_dir): if not os.path.isdir(dst_dir): raise ExistingFileError('While copying the tree at {} to {}, encountered directory {} in ' 'the source tree that already exists in the destination as a ' 'non-directory.'.format(src, dst, dst_dir)) visit_dirs.append(dirname) elif symlinks and os.path.islink(src_dir): link = os.readlink(src_dir) os.symlink(link, dst_dir) # We need to halt the walk at a symlink dir; so we do not place dirname in visit_dirs # here. else: os.makedirs(dst_dir) visit_dirs.append(dirname) # In-place mutate dirnames to halt the walk when the dir is ignored by the caller. dirnames[:] = visit_dirs for filename in filenames: if filename in ignorenames: continue src_file_relpath = os.path.join(src_relpath, filename) if not file_filter(src_file_relpath): continue dst_filename = os.path.join(dst_path, filename) if os.path.exists(dst_filename): if not os.path.isfile(dst_filename): raise ExistingDirError('While copying the tree at {} to {}, encountered file {} in the ' 'source tree that already exists in the destination as a non-file.' .format(src, dst, dst_filename)) else: os.unlink(dst_filename) src_filename = os.path.join(src_path, filename) if symlinks and os.path.islink(src_filename): link = os.readlink(src_filename) os.symlink(link, dst_filename) else: shutil.copy2(src_filename, dst_filename)
python
{ "resource": "" }
q27650
safe_mkdtemp
train
def safe_mkdtemp(cleaner=_mkdtemp_atexit_cleaner, **kw): """Create a temporary directory that is cleaned up on process exit. Arguments are as to tempfile.mkdtemp. :API: public """ # Proper lock sanitation on fork [issue 6721] would be desirable here. with _MKDTEMP_LOCK: return register_rmtree(tempfile.mkdtemp(**kw), cleaner=cleaner)
python
{ "resource": "" }
q27651
register_rmtree
train
def register_rmtree(directory, cleaner=_mkdtemp_atexit_cleaner): """Register an existing directory to be cleaned up at process exit.""" with _MKDTEMP_LOCK: _mkdtemp_register_cleaner(cleaner) _MKDTEMP_DIRS[os.getpid()].add(directory) return directory
python
{ "resource": "" }
q27652
safe_open
train
def safe_open(filename, *args, **kwargs): """Open a file safely, ensuring that its directory exists. :API: public """ safe_mkdir_for(filename) return open(filename, *args, **kwargs)
python
{ "resource": "" }
q27653
safe_concurrent_rename
train
def safe_concurrent_rename(src, dst): """Rename src to dst, ignoring errors due to dst already existing. Useful when concurrent processes may attempt to create dst, and it doesn't matter who wins. """ # Delete dst, in case it existed (with old content) even before any concurrent processes # attempted this write. This ensures that at least one process writes the new content. if os.path.isdir(src): # Note that dst may not exist, so we test for the type of src. safe_rmtree(dst) else: safe_delete(dst) try: shutil.move(src, dst) except IOError as e: if e.errno != errno.EEXIST: raise
python
{ "resource": "" }
q27654
safe_concurrent_creation
train
def safe_concurrent_creation(target_path): """A contextmanager that yields a temporary path and renames it to a final target path when the contextmanager exits. Useful when concurrent processes may attempt to create a file, and it doesn't matter who wins. :param target_path: The final target path to rename the temporary path to. :yields: A temporary path containing the original path with a unique (uuid4) suffix. """ safe_mkdir_for(target_path) tmp_path = '{}.tmp.{}'.format(target_path, uuid.uuid4().hex) try: yield tmp_path except Exception: rm_rf(tmp_path) raise else: if os.path.exists(tmp_path): safe_concurrent_rename(tmp_path, target_path)
python
{ "resource": "" }
q27655
absolute_symlink
train
def absolute_symlink(source_path, target_path): """Create a symlink at target pointing to source using the absolute path. :param source_path: Absolute path to source file :param target_path: Absolute path to intended symlink :raises ValueError if source_path or link_path are not unique, absolute paths :raises OSError on failure UNLESS file already exists or no such file/directory """ if not os.path.isabs(source_path): raise ValueError("Path for source : {} must be absolute".format(source_path)) if not os.path.isabs(target_path): raise ValueError("Path for link : {} must be absolute".format(target_path)) if source_path == target_path: raise ValueError("Path for link is identical to source : {}".format(source_path)) try: if os.path.lexists(target_path): if os.path.islink(target_path) or os.path.isfile(target_path): os.unlink(target_path) else: shutil.rmtree(target_path) safe_mkdir_for(target_path) os.symlink(source_path, target_path) except OSError as e: # Another run may beat us to deletion or creation. if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT): raise
python
{ "resource": "" }
q27656
relative_symlink
train
def relative_symlink(source_path, link_path): """Create a symlink at link_path pointing to relative source :param source_path: Absolute path to source file :param link_path: Absolute path to intended symlink :raises ValueError if source_path or link_path are not unique, absolute paths :raises OSError on failure UNLESS file already exists or no such file/directory """ if not os.path.isabs(source_path): raise ValueError("Path for source:{} must be absolute".format(source_path)) if not os.path.isabs(link_path): raise ValueError("Path for link:{} must be absolute".format(link_path)) if source_path == link_path: raise ValueError("Path for link is identical to source:{}".format(source_path)) # The failure state below had a long life as an uncaught error. No behavior was changed here, it just adds a catch. # Raising an exception does differ from absolute_symlink, which takes the liberty of deleting existing directories. if os.path.isdir(link_path) and not os.path.islink(link_path): raise ValueError("Path for link would overwrite an existing directory: {}".format(link_path)) try: if os.path.lexists(link_path): os.unlink(link_path) rel_path = os.path.relpath(source_path, os.path.dirname(link_path)) safe_mkdir_for(link_path) os.symlink(rel_path, link_path) except OSError as e: # Another run may beat us to deletion or creation. if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT): raise
python
{ "resource": "" }
q27657
get_basedir
train
def get_basedir(path): """Returns the base directory of a path. Examples: get_basedir('foo/bar/baz') --> 'foo' get_basedir('/foo/bar/baz') --> '' get_basedir('foo') --> 'foo' """ return path[:path.index(os.sep)] if os.sep in path else path
python
{ "resource": "" }
q27658
is_executable
train
def is_executable(path): """Returns whether a path names an existing executable file.""" return os.path.isfile(path) and os.access(path, os.X_OK)
python
{ "resource": "" }
q27659
check_no_overlapping_paths
train
def check_no_overlapping_paths(paths): """Given a list of paths, ensure that all are unique and do not have the same prefix.""" for path in paths: list_copy_without_path = list(paths) list_copy_without_path.remove(path) if path in list_copy_without_path: raise ValueError('{} appeared more than once. All paths must be unique.'.format(path)) for p in list_copy_without_path: if path in p: raise ValueError('{} and {} have the same prefix. All paths must be unique and cannot overlap.'.format(path, p))
python
{ "resource": "" }
q27660
is_readable_dir
train
def is_readable_dir(path): """Returns whether a path names an existing directory we can list and read files from.""" return os.path.isdir(path) and os.access(path, os.R_OK) and os.access(path, os.X_OK)
python
{ "resource": "" }
q27661
Scheduler._register_rules
train
def _register_rules(self, rule_index): """Record the given RuleIndex on `self._tasks`.""" registered = set() for output_type, rules in rule_index.rules.items(): for rule in rules: key = (output_type, rule) if key in registered: continue registered.add(key) if type(rule) is TaskRule: self._register_task(output_type, rule, rule_index.union_rules) else: raise ValueError('Unexpected Rule type: {}'.format(rule))
python
{ "resource": "" }
q27662
Scheduler._register_task
train
def _register_task(self, output_type, rule, union_rules): """Register the given TaskRule with the native scheduler.""" func = Function(self._to_key(rule.func)) self._native.lib.tasks_task_begin(self._tasks, func, self._to_type(output_type), rule.cacheable) for selector in rule.input_selectors: self._native.lib.tasks_add_select(self._tasks, self._to_type(selector)) def add_get_edge(product, subject): self._native.lib.tasks_add_get(self._tasks, self._to_type(product), self._to_type(subject)) for the_get in rule.input_gets: union_members = union_rules.get(the_get.subject_declared_type, None) if union_members: # If the registered subject type is a union, add Get edges to all registered union members. for union_member in union_members: add_get_edge(the_get.product, union_member) else: # Otherwise, the Get subject is a "concrete" type, so add a single Get edge. add_get_edge(the_get.product, the_get.subject_declared_type) self._native.lib.tasks_task_end(self._tasks)
python
{ "resource": "" }
q27663
Scheduler.with_fork_context
train
def with_fork_context(self, func): """See the rustdocs for `scheduler_fork_context` for more information.""" res = self._native.lib.scheduler_fork_context(self._scheduler, Function(self._to_key(func))) return self._raise_or_return(res)
python
{ "resource": "" }
q27664
Scheduler.capture_snapshots
train
def capture_snapshots(self, path_globs_and_roots): """Synchronously captures Snapshots for each matching PathGlobs rooted at a its root directory. This is a blocking operation, and should be avoided where possible. :param path_globs_and_roots tuple<PathGlobsAndRoot>: The PathGlobs to capture, and the root directory relative to which each should be captured. :returns: A tuple of Snapshots. """ result = self._native.lib.capture_snapshots( self._scheduler, self._to_value(_PathGlobsAndRootCollection(path_globs_and_roots)), ) return self._raise_or_return(result)
python
{ "resource": "" }
q27665
Scheduler.merge_directories
train
def merge_directories(self, directory_digests): """Merges any number of directories. :param directory_digests: Tuple of DirectoryDigests. :return: A Digest. """ result = self._native.lib.merge_directories( self._scheduler, self._to_value(_DirectoryDigests(directory_digests)), ) return self._raise_or_return(result)
python
{ "resource": "" }
q27666
Scheduler.materialize_directories
train
def materialize_directories(self, directories_paths_and_digests): """Creates the specified directories on the file system. :param directories_paths_and_digests tuple<DirectoryToMaterialize>: Tuple of the path and digest of the directories to materialize. :returns: Nothing or an error. """ # Ensure there isn't more than one of the same directory paths and paths do not have the same prefix. dir_list = [dpad.path for dpad in directories_paths_and_digests] check_no_overlapping_paths(dir_list) result = self._native.lib.materialize_directories( self._scheduler, self._to_value(_DirectoriesToMaterialize(directories_paths_and_digests)), ) return self._raise_or_return(result)
python
{ "resource": "" }
q27667
Scheduler.new_session
train
def new_session(self, zipkin_trace_v2, v2_ui=False): """Creates a new SchedulerSession for this Scheduler.""" return SchedulerSession(self, self._native.new_session( self._scheduler, zipkin_trace_v2, v2_ui, multiprocessing.cpu_count()) )
python
{ "resource": "" }
q27668
SchedulerSession.trace
train
def trace(self, execution_request): """Yields a stringified 'stacktrace' starting from the scheduler's roots.""" for line in self._scheduler.graph_trace(execution_request.native): yield line
python
{ "resource": "" }
q27669
SchedulerSession.execution_request
train
def execution_request(self, products, subjects): """Create and return an ExecutionRequest for the given products and subjects. The resulting ExecutionRequest object will contain keys tied to this scheduler's product Graph, and so it will not be directly usable with other scheduler instances without being re-created. NB: This method does a "cross product", mapping all subjects to all products. To create a request for just the given list of subject -> product tuples, use `execution_request_literal()`! :param products: A list of product types to request for the roots. :type products: list of types :param subjects: A list of Spec and/or PathGlobs objects. :type subject: list of :class:`pants.base.specs.Spec`, `pants.build_graph.Address`, and/or :class:`pants.engine.fs.PathGlobs` objects. :returns: An ExecutionRequest for the given products and subjects. """ roots = (tuple((s, p) for s in subjects for p in products)) return self.execution_request_literal(roots)
python
{ "resource": "" }
q27670
SchedulerSession.invalidate_files
train
def invalidate_files(self, direct_filenames): """Invalidates the given filenames in an internal product Graph instance.""" invalidated = self._scheduler.invalidate_files(direct_filenames) self._maybe_visualize() return invalidated
python
{ "resource": "" }
q27671
SchedulerSession.execute
train
def execute(self, execution_request): """Invoke the engine for the given ExecutionRequest, returning Return and Throw states. :return: A tuple of (root, Return) tuples and (root, Throw) tuples. """ start_time = time.time() roots = list(zip(execution_request.roots, self._scheduler._run_and_return_roots(self._session, execution_request.native))) self._maybe_visualize() logger.debug( 'computed %s nodes in %f seconds. there are %s total nodes.', len(roots), time.time() - start_time, self._scheduler.graph_len() ) returns = tuple((root, state) for root, state in roots if type(state) is Return) throws = tuple((root, state) for root, state in roots if type(state) is Throw) return returns, throws
python
{ "resource": "" }
q27672
SchedulerSession.product_request
train
def product_request(self, product, subjects): """Executes a request for a single product for some subjects, and returns the products. :param class product: A product type for the request. :param list subjects: A list of subjects or Params instances for the request. :returns: A list of the requested products, with length match len(subjects). """ request = self.execution_request([product], subjects) returns, throws = self.execute(request) # Throw handling. if throws: unique_exceptions = tuple({t.exc for _, t in throws}) self._trace_on_error(unique_exceptions, request) # Everything is a Return: we rely on the fact that roots are ordered to preserve subject # order in output lists. return [ret.value for _, ret in returns]
python
{ "resource": "" }
q27673
NodeResolverBase.parse_file_path
train
def parse_file_path(cls, file_path): """Parse a file address path without the file specifier""" address = None pattern = cls.file_regex.match(file_path) if pattern: address = pattern.group(1) return address
python
{ "resource": "" }
q27674
NodeResolverBase._copy_sources
train
def _copy_sources(self, target, results_dir): """Copy sources from a target to a results directory. :param NodePackage target: A subclass of NodePackage :param string results_dir: The results directory """ buildroot = get_buildroot() source_relative_to = target.address.spec_path for source in target.sources_relative_to_buildroot(): dest = os.path.join(results_dir, os.path.relpath(source, source_relative_to)) safe_mkdir(os.path.dirname(dest)) shutil.copyfile(os.path.join(buildroot, source), dest)
python
{ "resource": "" }
q27675
NodeResolverBase._get_target_from_package_name
train
def _get_target_from_package_name(self, target, package_name, file_path): """Get a dependent target given the package name and relative file path. This will only traverse direct dependencies of the passed target. It is not necessary to traverse further than that because transitive dependencies will be resolved under the direct dependencies and every direct dependencies is symlinked to the target. Returns `None` if the target does not exist. :param NodePackage target: A subclass of NodePackage :param string package_name: A package.json name that is required to be the same as the target name :param string file_path: Relative filepath from target to the package in the format 'file:<address_path>' """ address_path = self.parse_file_path(file_path) if not address_path: return None dep_spec_path = os.path.normpath(os.path.join(target.address.spec_path, address_path)) for dep in target.dependencies: if dep.package_name == package_name and dep.address.spec_path == dep_spec_path: return dep return None
python
{ "resource": "" }
q27676
Duplicate.validate_action
train
def validate_action(cls, action): """Verifies the given action is a valid duplicate jar rule action. :returns: The action if it is valid. :raises: ``ValueError`` if the action is invalid. """ if action not in cls._VALID_ACTIONS: raise ValueError('The supplied action must be one of {valid}, given: {given}' .format(valid=cls._VALID_ACTIONS, given=action)) return action
python
{ "resource": "" }
q27677
JarRules.skip_signatures_and_duplicates_concat_well_known_metadata
train
def skip_signatures_and_duplicates_concat_well_known_metadata(cls, default_dup_action=None, additional_rules=None): """Produces a rule set useful in many deploy jar creation contexts. The rule set skips duplicate entries by default, retaining the 1st encountered. In addition it has the following special handling: - jar signature metadata is dropped - jar indexing files INDEX.LIST are dropped - ``java.util.ServiceLoader`` provider-configuration files are concatenated in the order encountered :param default_dup_action: An optional default action to take for duplicates. Defaults to `Duplicate.SKIP` if not specified. :param additional_rules: Optionally one or more jar rules to add to those described above. :returns: JarRules """ default_dup_action = Duplicate.validate_action(default_dup_action or Duplicate.SKIP) additional_rules = assert_list(additional_rules, expected_type=(Duplicate, Skip)) rules = [Skip(r'^META-INF/[^/]+\.SF$'), # signature file Skip(r'^META-INF/[^/]+\.DSA$'), # default signature alg. file Skip(r'^META-INF/[^/]+\.RSA$'), # default signature alg. file Skip(r'^META-INF/INDEX.LIST$'), # interferes with Class-Path: see man jar for i option Duplicate(r'^META-INF/services/', Duplicate.CONCAT_TEXT)] # 1 svc fqcn per line return JarRules(rules=rules + additional_rules, default_dup_action=default_dup_action)
python
{ "resource": "" }
q27678
JarRules.default
train
def default(cls): """Returns the default set of jar rules. Can be set with `set_default` but otherwise defaults to `skip_signatures_and_duplicates_concat_well_known_metadata`. :API: public """ if cls._DEFAULT is None: cls._DEFAULT = cls.skip_signatures_and_duplicates_concat_well_known_metadata() return cls._DEFAULT
python
{ "resource": "" }
q27679
JarRules.set_default
train
def set_default(cls, rules): """Sets the default site-wide jar rules.""" if not isinstance(rules, JarRules): raise ValueError('The default rules must be a JarRules instance.') cls._DEFAULT = rules
python
{ "resource": "" }
q27680
BuildRoot.path
train
def path(self): """Returns the build root for the current workspace.""" if self._root_dir is None: # This env variable is for testing purpose. override_buildroot = os.environ.get('PANTS_BUILDROOT_OVERRIDE', None) if override_buildroot: self._root_dir = override_buildroot else: self._root_dir = os.path.realpath(self.find_buildroot()) if PY2: self._root_dir = self._root_dir.decode('utf-8') return self._root_dir
python
{ "resource": "" }
q27681
BuildRoot.path
train
def path(self, root_dir): """Manually establishes the build root for the current workspace.""" path = os.path.realpath(root_dir) if not os.path.exists(path): raise ValueError('Build root does not exist: {}'.format(root_dir)) self._root_dir = path
python
{ "resource": "" }
q27682
BuildRoot.temporary
train
def temporary(self, path): """Establishes a temporary build root, restoring the prior build root on exit.""" if path is None: raise ValueError('Can only temporarily establish a build root given a path.') prior = self._root_dir self._root_dir = path try: yield finally: self._root_dir = prior
python
{ "resource": "" }
q27683
HelpPrinter.print_help
train
def print_help(self): """Print help to the console. :return: 0 on success, 1 on failure """ def print_hint(): print('Use `pants goals` to list goals.') print('Use `pants help` to get help.') if isinstance(self._help_request, VersionHelp): print(pants_version()) elif isinstance(self._help_request, OptionsHelp): self._print_options_help() elif isinstance(self._help_request, GoalsHelp): self._print_goals_help() elif isinstance(self._help_request, UnknownGoalHelp): print('Unknown goals: {}'.format(', '.join(self._help_request.unknown_goals))) print_hint() return 1 elif isinstance(self._help_request, NoGoalHelp): print('No goals specified.') print_hint() return 1 return 0
python
{ "resource": "" }
q27684
HelpPrinter._print_options_help
train
def _print_options_help(self): """Print a help screen. Assumes that self._help_request is an instance of OptionsHelp. Note: Ony useful if called after options have been registered. """ show_all_help = self._help_request.all_scopes if show_all_help: help_scopes = list(self._options.known_scope_to_info.keys()) else: # The scopes explicitly mentioned by the user on the cmd line. help_scopes = set(self._options.scope_to_flags.keys()) - {GLOBAL_SCOPE} scope_infos = list(ScopeInfoIterator(self._options.known_scope_to_info).iterate(help_scopes)) if scope_infos: for scope_info in scope_infos: help_str = self._format_help(scope_info) if help_str: print(help_str) return else: print(pants_release()) print('\nUsage:') print(' ./pants [option ...] [goal ...] [target...] Attempt the specified goals.') print(' ./pants help Get help.') print(' ./pants help [goal] Get help for a goal.') print(' ./pants help-advanced [goal] Get help for a goal\'s advanced options.') print(' ./pants help-all Get help for all goals.') print(' ./pants goals List all installed goals.') print('') print(' [target] accepts two special forms:') print(' dir: to include all targets in the specified directory.') print(' dir:: to include all targets found recursively under the directory.') print('\nFriendly docs:\n http://pantsbuild.org/') print(self._format_help(ScopeInfo(GLOBAL_SCOPE, ScopeInfo.GLOBAL)))
python
{ "resource": "" }
q27685
HelpPrinter._format_help
train
def _format_help(self, scope_info): """Return a help message for the options registered on this object. Assumes that self._help_request is an instance of OptionsHelp. :param scope_info: Scope of the options. """ scope = scope_info.scope description = scope_info.description show_recursive = self._help_request.advanced show_advanced = self._help_request.advanced color = sys.stdout.isatty() help_formatter = HelpFormatter(scope, show_recursive, show_advanced, color) return '\n'.join(help_formatter.format_options(scope, description, self._options.get_parser(scope).option_registrations_iter()))
python
{ "resource": "" }
q27686
generate_travis_yml
train
def generate_travis_yml(): """Generates content for a .travis.yml file from templates.""" def get_mustache_file(file_name): return pkg_resources.resource_string(__name__, file_name).decode('utf-8') template = get_mustache_file('travis.yml.mustache') before_install_linux = get_mustache_file('before_install_linux.mustache') before_install_osx = get_mustache_file('before_install_osx.mustache') env_osx_with_pyenv = get_mustache_file('env_osx_with_pyenv.mustache') docker_build_image = get_mustache_file('docker_build_image.mustache') docker_run_image = get_mustache_file('docker_run_image.mustache') context = { 'header': HEADER, 'integration_shards': range(0, num_integration_shards), 'integration_shards_length': num_integration_shards, } renderer = pystache.Renderer(partials={ 'before_install_linux': before_install_linux, 'before_install_osx': before_install_osx, 'env_osx_with_pyenv': env_osx_with_pyenv, 'docker_build_image': docker_build_image, 'docker_run_image': docker_run_image }) print(renderer.render(template, context))
python
{ "resource": "" }
q27687
JvmPlatformAnalysisMixin._unfiltered_jvm_dependency_map
train
def _unfiltered_jvm_dependency_map(self, fully_transitive=False): """Jvm dependency map without filtering out non-JvmTarget keys, exposed for testing. Unfiltered because the keys in the resulting map include non-JvmTargets. See the explanation in the jvm_dependency_map() docs for what this method produces. :param fully_transitive: if true, the elements of the map will be the full set of transitive JvmTarget dependencies, not just the "direct" ones. (see jvm_dependency_map for the definition of "direct") :return: map of target -> set of JvmTarget "direct" dependencies. """ targets = self.jvm_targets jvm_deps = defaultdict(set) def accumulate_jvm_deps(target): for dep in target.dependencies: if self._is_jvm_target(dep): jvm_deps[target].add(dep) if not fully_transitive: continue # If 'dep' isn't in jvm_deps, that means that it isn't in the `targets` list at all # (since this is a post-order traversal). If it's not in the targets list at all, # that means it cannot have any JvmTargets as transitive dependencies. In which case # we don't care about it, so it's fine that the line below is a no-op. # # Otherwise, we add in any transitive dependencies that were previously collected. jvm_deps[target].update(jvm_deps[dep]) # Vanilla DFS runs in O(|V|+|E|), and the code inside the loop in accumulate_jvm_deps ends up # being run once for each in the graph over the course of the entire search, which means that # the total asymptotic runtime complexity is O(|V|+2|E|), which is still O(|V|+|E|). self.context.build_graph.walk_transitive_dependency_graph( addresses=[t.address for t in targets], work=accumulate_jvm_deps, postorder=True ) return jvm_deps
python
{ "resource": "" }
q27688
JvmPlatformAnalysisMixin.jvm_dependency_map
train
def jvm_dependency_map(self): """A map of each JvmTarget in the context to the set of JvmTargets it depends on "directly". "Directly" is in quotes here because it isn't quite the same as its normal use, which would be filter(self._is_jvm_target, target.dependencies). For this method, we define the set of dependencies which `target` depends on "directly" as: { dep | dep is a JvmTarget and exists a directed path p from target to dep such that |p| = 1 } Where |p| is computed as the weighted sum of all edges in the path, where edges to a JvmTarget have weight 1, and all other edges have weight 0. In other words, a JvmTarget 'A' "directly" depends on a JvmTarget 'B' iff there exists a path in the directed dependency graph from 'A' to 'B' such that there are no internal vertices in the path that are JvmTargets. This set is a (not necessarily proper) subset of the set of all JvmTargets that the target transitively depends on. The algorithms using this map *would* operate correctly on the full transitive superset, but it is more efficient to use this subset. The intuition for why we can get away with using this subset: Consider targets A, b, C, D, such that A depends on b, which depends on C, which depends on D. Say A,C,D are JvmTargets. If A is on java 6 and C is on java 7, we obviously have a problem, and this will be correctly identified when verifying the jvm dependencies of A, because the path A->b->C has length 1. If instead, A is on java 6, and C is on java 6, but D is on java 7, we still have a problem. It will not be detected when processing A, because A->b->C->D has length 2. But when we process C, it will be picked up, because C->D has length 1. Unfortunately, we can't do something as simple as just using actual direct dependencies, because it's perfectly legal for a java 6 A to depend on b (which is a non-JvmTarget), and legal for b to depend on a java 7 C, so the transitive information is needed to correctly identify the problem. :return: the dict mapping JvmTarget -> set of JvmTargets. """ jvm_deps = self._unfiltered_jvm_dependency_map() return {target: deps for target, deps in jvm_deps.items() if deps and self._is_jvm_target(target)}
python
{ "resource": "" }
q27689
JvmPlatformValidate.validate_platform_dependencies
train
def validate_platform_dependencies(self): """Check all jvm targets in the context, throwing an error or warning if there are bad targets. If there are errors, this method fails slow rather than fails fast -- that is, it continues checking the rest of the targets before spitting error messages. This is useful, because it's nice to have a comprehensive list of all errors rather than just the first one we happened to hit. """ conflicts = [] def is_conflicting(target, dependency): return self.jvm_version(dependency) > self.jvm_version(target) try: sort_targets(self.jvm_targets) except CycleException: self.context.log.warn('Cannot validate dependencies when cycles exist in the build graph.') return try: with self.invalidated(self.jvm_targets, fingerprint_strategy=self.PlatformFingerprintStrategy(), invalidate_dependents=True) as vts: dependency_map = self.jvm_dependency_map for vts_target in vts.invalid_vts: for target in vts_target.targets: if target in dependency_map: deps = dependency_map[target] invalid_dependencies = [dep for dep in deps if is_conflicting(target, dep)] if invalid_dependencies: conflicts.append((target, invalid_dependencies)) if conflicts: # NB(gmalmquist): It's important to unconditionally raise an exception, then decide later # whether to continue raising it or just print a warning, to make sure the targets aren't # marked as valid if there are invalid platform dependencies. error_message = self._create_full_error_message(conflicts) raise self.IllegalJavaTargetLevelDependency(error_message) except self.IllegalJavaTargetLevelDependency as e: if self.check == 'fatal': raise e else: assert self.check == 'warn' self.context.log.warn(error_message) return error_message
python
{ "resource": "" }
q27690
JvmPlatformExplain.possible_version_evaluation
train
def possible_version_evaluation(self): """Evaluate the possible range of versions for each target, yielding the output analysis.""" only_broken = self.get_options().only_broken ranges = self._ranges yield 'Allowable JVM platform ranges (* = anything):' for target in sorted(filter(self._is_relevant, self.jvm_targets)): min_version = ranges.min_allowed_version.get(target) max_version = ranges.max_allowed_version.get(target) current_valid = True if min_version and self.jvm_version(target) < min_version: current_valid = False if max_version and self.jvm_version(target) > max_version: current_valid = False current_text = str(self.jvm_version(target)) if not current_valid: current_text = self._format_error(current_text) elif only_broken: continue if min_version and max_version: range_text = '{} to {}'.format(min_version, max_version) if min_version > max_version: range_text = self._format_error(range_text) elif min_version: range_text = '{}+'.format(min_version) elif max_version: range_text = '<={}'.format(max_version) else: range_text = '*' yield '{address}: {range} (is {current})'.format(address=target.address.spec, range=range_text, current=current_text,) if self.get_options().detailed or not current_valid: if min_version: min_because = [t for t in ranges.target_dependencies[target] if self.jvm_version(t) == min_version] yield ' min={} because of dependencies:'.format(min_version) for dep in sorted(min_because): yield ' {}'.format(dep.address.spec) if max_version: max_because = [t for t in ranges.target_dependees[target] if self.jvm_version(t) == max_version] yield ' max={} because of dependees:'.format(max_version) for dep in sorted(max_because): yield ' {}'.format(dep.address.spec) yield ''
python
{ "resource": "" }
q27691
RscCompile._nailgunnable_combined_classpath
train
def _nailgunnable_combined_classpath(self): """Register all of the component tools of the rsc compile task as a "combined" jvm tool. This allows us to invoke their combined classpath in a single nailgun instance (see #7089 and #7092). We still invoke their classpaths separately when not using nailgun, however. """ cp = [] cp.extend(self.tool_classpath('rsc')) # Add zinc's classpath so that it can be invoked from the same nailgun instance. cp.extend(super(RscCompile, self).get_zinc_compiler_classpath()) return cp
python
{ "resource": "" }
q27692
RscCompile._classify_target_compile_workflow
train
def _classify_target_compile_workflow(self, target): """Return the compile workflow to use for this target.""" if target.has_sources('.java') or target.has_sources('.scala'): return self.get_scalar_mirrored_target_option('workflow', target) return None
python
{ "resource": "" }
q27693
RscCompile._on_invalid_compile_dependency
train
def _on_invalid_compile_dependency(self, dep, compile_target, contexts): """Decide whether to continue searching for invalid targets to use in the execution graph. If a necessary dep is a rsc-then-zinc dep and the root is a zinc-only one, continue to recurse because otherwise we'll drop the path between Zinc compile of the zinc-only target and a Zinc compile of a transitive rsc-then-zinc dependency. This is only an issue for graphs like J -> S1 -> S2, where J is a zinc-only target, S1/2 are rsc-then-zinc targets and S2 must be on the classpath to compile J successfully. """ return contexts[compile_target][0].workflow.resolve_for_enum_variant({ 'zinc-only': lambda : contexts[dep][0].workflow == self.JvmCompileWorkflowType.rsc_then_zinc, 'rsc-then-zinc': lambda : False })()
python
{ "resource": "" }
q27694
TargetFilterTaskMixin.target_types_for_alias
train
def target_types_for_alias(self, alias): """Returns all the target types that might be produced by the given alias. Normally there is 1 target type per alias, but macros can expand a single alias to several target types. :param string alias: The alias to look up associated target types for. :returns: The set of target types that can be produced by the given alias. :raises :class:`TargetFilterTaskMixin.InvalidTargetType`: when no target types correspond to the given `alias`. """ registered_aliases = self.context.build_configuration.registered_aliases() target_types = registered_aliases.target_types_by_alias.get(alias, None) if not target_types: raise self.InvalidTargetType('Not a target type: {}'.format(alias)) return target_types
python
{ "resource": "" }
q27695
WorkerPool.submit_work_and_wait
train
def submit_work_and_wait(self, work, workunit_parent=None): """Submit work to be executed on this pool, but wait for it to complete. - work: The work to execute. - workunit_parent: If specified, work is accounted for under this workunit. Returns a list of return values of each invocation, in order. Throws if any invocation does. """ if work is None or len(work.args_tuples) == 0: # map hangs on 0-length iterables. return [] else: def do_work(*args): return self._do_work(work.func, *args, workunit_name=work.workunit_name, workunit_parent=workunit_parent) # We need to specify a timeout explicitly, because otherwise python ignores SIGINT when waiting # on a condition variable, so we won't be able to ctrl-c out. return self._pool.map_async(do_work, work.args_tuples, chunksize=1).get(timeout=1000000000)
python
{ "resource": "" }
q27696
command_gen
train
def command_gen(tool_installations, tool_executable, args=None, node_paths=None): """Generate a Command object with required tools installed and paths set up. :param list tool_installations: A list of functions to install required tools. Those functions should take no parameter and return an installation path to be included in the runtime path. :param tool_executable: Name of the tool to be executed. :param list args: A list of arguments to be passed to the executable :param list node_paths: A list of path to node_modules. node_modules/.bin will be appended to the run time path. :rtype: class: `Command` """ node_module_bin_dir = 'node_modules/.bin' extra_paths = [] for t in tool_installations: # Calling tool_installation[i]() triggers installation if tool is not installed extra_paths.append(t()) if node_paths: for node_path in node_paths: if not node_path.endswith(node_module_bin_dir): node_path = os.path.join(node_path, node_module_bin_dir) extra_paths.append(node_path) return Command(executable=tool_executable, args=args, extra_paths=extra_paths)
python
{ "resource": "" }
q27697
Command.run
train
def run(self, **kwargs): """Runs this command. :param kwargs: Any extra keyword arguments to pass along to `subprocess.Popen`. :returns: A handle to the running command. :rtype: :class:`subprocess.Popen` """ env, kwargs = self._prepare_env(kwargs) logger.debug('Running command {}'.format(self.cmd)) return subprocess.Popen(self.cmd, env=env, **kwargs)
python
{ "resource": "" }
q27698
Command.check_output
train
def check_output(self, **kwargs): """Runs this command returning its captured stdout. :param kwargs: Any extra keyword arguments to pass along to `subprocess.Popen`. :returns: The captured standard output stream of the command. :rtype: string :raises: :class:`subprocess.CalledProcessError` if the command fails. """ env, kwargs = self._prepare_env(kwargs) return subprocess.check_output(self.cmd, env=env, **kwargs).decode('utf-8')
python
{ "resource": "" }
q27699
JvmBinaryTask.add_main_manifest_entry
train
def add_main_manifest_entry(jar, binary): """Creates a jar manifest for the given binary. If the binary declares a main then a 'Main-Class' manifest entry will be included. """ main = binary.main if main is not None: jar.main(main)
python
{ "resource": "" }