_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q27500
_FFISpecification.extern_generator_send
train
def extern_generator_send(self, context_handle, func, arg): """Given a generator, send it the given value and return a response.""" c = self._ffi.from_handle(context_handle) response = self._ffi.new('PyGeneratorResponse*') try: res = c.from_value(func[0]).send(c.from_value(arg[0])) if isinstance(res, Get): # Get. response.tag = self._lib.Get response.get = ( TypeId(c.to_id(res.product)), c.to_value(res.subject), c.identify(res.subject), ) elif type(res) in (tuple, list): # GetMulti. response.tag = self._lib.GetMulti response.get_multi = ( c.type_ids_buf([TypeId(c.to_id(g.product)) for g in res]), c.vals_buf([c.to_value(g.subject) for g in res]), c.identities_buf([c.identify(g.subject) for g in res]), ) else: # Break. response.tag = self._lib.Broke response.broke = (c.to_value(res),) except Exception as e: # Throw. response.tag = self._lib.Throw val = e val._formatted_exc = traceback.format_exc() response.throw = (c.to_value(val),) return response[0]
python
{ "resource": "" }
q27501
_FFISpecification.extern_call
train
def extern_call(self, context_handle, func, args_ptr, args_len): """Given a callable, call it.""" c = self._ffi.from_handle(context_handle) runnable = c.from_value(func[0]) args = tuple(c.from_value(arg[0]) for arg in self._ffi.unpack(args_ptr, args_len)) return self.call(c, runnable, args)
python
{ "resource": "" }
q27502
_FFISpecification.extern_eval
train
def extern_eval(self, context_handle, python_code_str_ptr, python_code_str_len): """Given an evalable string, eval it and return a Handle for its result.""" c = self._ffi.from_handle(context_handle) return self.call(c, eval, [self.to_py_str(python_code_str_ptr, python_code_str_len)])
python
{ "resource": "" }
q27503
ExternContext.identify
train
def identify(self, obj): """Return an Ident-shaped tuple for the given object.""" hash_ = hash(obj) type_id = self.to_id(type(obj)) return (hash_, TypeId(type_id))
python
{ "resource": "" }
q27504
Native.binary
train
def binary(self): """Load and return the path to the native engine binary.""" lib_name = '{}.so'.format(NATIVE_ENGINE_MODULE) lib_path = os.path.join(safe_mkdtemp(), lib_name) try: with closing(pkg_resources.resource_stream(__name__, lib_name)) as input_fp: # NB: The header stripping code here must be coordinated with header insertion code in # build-support/bin/native/bootstrap_code.sh engine_version = input_fp.readline().decode('utf-8').strip() repo_version = input_fp.readline().decode('utf-8').strip() logger.debug('using {} built at {}'.format(engine_version, repo_version)) with open(lib_path, 'wb') as output_fp: output_fp.write(input_fp.read()) except (IOError, OSError) as e: raise self.BinaryLocationError( "Error unpacking the native engine binary to path {}: {}".format(lib_path, e), e) return lib_path
python
{ "resource": "" }
q27505
Native.lib
train
def lib(self): """Load and return the native engine module.""" lib = self.ffi.dlopen(self.binary) _FFISpecification(self.ffi, lib).register_cffi_externs() return lib
python
{ "resource": "" }
q27506
Native._ffi_module
train
def _ffi_module(self): """Load the native engine as a python module and register CFFI externs.""" native_bin_dir = os.path.dirname(self.binary) logger.debug('loading native engine python module from: %s', native_bin_dir) sys.path.insert(0, native_bin_dir) return importlib.import_module(NATIVE_ENGINE_MODULE)
python
{ "resource": "" }
q27507
Native.new_scheduler
train
def new_scheduler(self, tasks, root_subject_types, build_root, work_dir, local_store_dir, ignore_patterns, execution_options, construct_directory_digest, construct_snapshot, construct_file_content, construct_files_content, construct_process_result, type_address, type_path_globs, type_directory_digest, type_snapshot, type_merge_snapshots_request, type_files_content, type_dir, type_file, type_link, type_process_request, type_process_result, type_generator, type_url_to_fetch): """Create and return an ExternContext and native Scheduler.""" def func(fn): return Function(self.context.to_key(fn)) def ti(type_obj): return TypeId(self.context.to_id(type_obj)) scheduler = self.lib.scheduler_create( tasks, # Constructors/functions. func(construct_directory_digest), func(construct_snapshot), func(construct_file_content), func(construct_files_content), func(construct_process_result), # Types. ti(type_address), ti(type_path_globs), ti(type_directory_digest), ti(type_snapshot), ti(type_merge_snapshots_request), ti(type_files_content), ti(type_dir), ti(type_file), ti(type_link), ti(type_process_request), ti(type_process_result), ti(type_generator), ti(type_url_to_fetch), ti(text_type), ti(binary_type), # Project tree. self.context.utf8_buf(build_root), self.context.utf8_buf(work_dir), self.context.utf8_buf(local_store_dir), self.context.utf8_buf_buf(ignore_patterns), self.to_ids_buf(root_subject_types), # Remote execution config. self.context.utf8_buf_buf(execution_options.remote_store_server), # We can't currently pass Options to the rust side, so we pass empty strings for None. self.context.utf8_buf(execution_options.remote_execution_server or ""), self.context.utf8_buf(execution_options.remote_execution_process_cache_namespace or ""), self.context.utf8_buf(execution_options.remote_instance_name or ""), self.context.utf8_buf(execution_options.remote_ca_certs_path or ""), self.context.utf8_buf(execution_options.remote_oauth_bearer_token_path or ""), execution_options.remote_store_thread_count, execution_options.remote_store_chunk_bytes, execution_options.remote_store_chunk_upload_timeout_seconds, execution_options.remote_store_rpc_retries, execution_options.process_execution_parallelism, execution_options.process_execution_cleanup_local_dirs, ) return self.gc(scheduler, self.lib.scheduler_destroy)
python
{ "resource": "" }
q27508
parse_spec
train
def parse_spec(spec, relative_to=None, subproject_roots=None): """Parses a target address spec and returns the path from the root of the repo to this Target and Target name. :API: public :param string spec: Target address spec. :param string relative_to: path to use for sibling specs, ie: ':another_in_same_build_family', interprets the missing spec_path part as `relative_to`. :param list subproject_roots: Paths that correspond with embedded build roots under the current build root. For Example:: some_target(name='mytarget', dependencies=['path/to/buildfile:targetname'] ) Where ``path/to/buildfile:targetname`` is the dependent target address spec In case the target name is empty it returns the last component of the path as target name, ie:: spec_path, target_name = parse_spec('path/to/buildfile/foo') Will return spec_path as 'path/to/buildfile/foo' and target_name as 'foo'. Optionally, specs can be prefixed with '//' to denote an absolute spec path. This is normally not significant except when a spec referring to a root level target is needed from deeper in the tree. For example, in ``path/to/buildfile/BUILD``:: some_target(name='mytarget', dependencies=[':targetname'] ) The ``targetname`` spec refers to a target defined in ``path/to/buildfile/BUILD*``. If instead you want to reference ``targetname`` in a root level BUILD file, use the absolute form. For example:: some_target(name='mytarget', dependencies=['//:targetname'] ) """ def normalize_absolute_refs(ref): return strip_prefix(ref, '//') subproject = longest_dir_prefix(relative_to, subproject_roots) if subproject_roots else None def prefix_subproject(spec_path): if not subproject: return spec_path elif spec_path: return os.path.join(subproject, spec_path) else: return os.path.normpath(subproject) spec_parts = spec.rsplit(':', 1) if len(spec_parts) == 1: default_target_spec = spec_parts[0] spec_path = prefix_subproject(normalize_absolute_refs(default_target_spec)) target_name = os.path.basename(spec_path) else: spec_path, target_name = spec_parts if not spec_path and relative_to: spec_path = fast_relpath(relative_to, subproject) if subproject else relative_to spec_path = prefix_subproject(normalize_absolute_refs(spec_path)) return spec_path, target_name
python
{ "resource": "" }
q27509
Address.parse
train
def parse(cls, spec, relative_to='', subproject_roots=None): """Parses an address from its serialized form. :param string spec: An address in string form <path>:<name>. :param string relative_to: For sibling specs, ie: ':another_in_same_build_family', interprets the missing spec_path part as `relative_to`. :param list subproject_roots: Paths that correspond with embedded build roots under the current build root. :returns: A new address. :rtype: :class:`pants.base.address.Address` """ spec_path, target_name = parse_spec(spec, relative_to=relative_to, subproject_roots=subproject_roots) return cls(spec_path, target_name)
python
{ "resource": "" }
q27510
Address.reference
train
def reference(self, referencing_path=None): """How to reference this address in a BUILD file. :API: public """ if referencing_path is not None and self._spec_path == referencing_path: return self.relative_spec elif os.path.basename(self._spec_path) != self._target_name: return self.spec else: return self._spec_path
python
{ "resource": "" }
q27511
SimpleCodegenTask.is_gentarget
train
def is_gentarget(self, target): """Predicate which determines whether the target in question is relevant to this codegen task. E.g., the JaxbGen task considers JaxbLibrary targets to be relevant, and nothing else. :API: public :param Target target: The target to check. :return: True if this class can generate code for the given target, False otherwise. """ if self.gentarget_type: return isinstance(target, self.gentarget_type) else: raise NotImplementedError
python
{ "resource": "" }
q27512
SimpleCodegenTask._do_validate_sources_present
train
def _do_validate_sources_present(self, target): """Checks whether sources is empty, and either raises a TaskError or just returns False. The specifics of this behavior are defined by whether the user sets --allow-empty to True/False: --allow-empty=False will result in a TaskError being raised in the event of an empty source set. If --allow-empty=True, this method will just return false and log a warning. Shared for all SimpleCodegenTask subclasses to help keep errors consistent and descriptive. :param target: Target to validate. :return: True if sources is not empty, False otherwise. """ if not self.validate_sources_present: return True sources = target.sources_relative_to_buildroot() if not sources: message = ('Target {} has no sources.'.format(target.address.spec)) if not self.get_options().allow_empty: raise TaskError(message) else: logging.warn(message) return False return True
python
{ "resource": "" }
q27513
SimpleCodegenTask._inject_synthetic_target
train
def _inject_synthetic_target(self, vt, sources): """Create, inject, and return a synthetic target for the given target and workdir. :param vt: A codegen input VersionedTarget to inject a synthetic target for. :param sources: A FilesetWithSpec to inject for the target. """ target = vt.target # NB: For stability, the injected target exposes the stable-symlinked `vt.results_dir`, # rather than the hash-named `vt.current_results_dir`. synthetic_target_dir = self.synthetic_target_dir(target, vt.results_dir) synthetic_target_type = self.synthetic_target_type(target) synthetic_extra_dependencies = self.synthetic_target_extra_dependencies(target, synthetic_target_dir) copied_attributes = {} for attribute in self._copy_target_attributes: copied_attributes[attribute] = getattr(target, attribute) if self._supports_exports(synthetic_target_type): extra_exports = self.synthetic_target_extra_exports(target, synthetic_target_dir) extra_exports_not_in_extra_dependencies = set(extra_exports).difference( set(synthetic_extra_dependencies)) if len(extra_exports_not_in_extra_dependencies) > 0: raise self.MismatchedExtraExports( 'Extra synthetic exports included targets not in the extra dependencies: {}. Affected target: {}' .format(extra_exports_not_in_extra_dependencies, target)) extra_export_specs = {e.address.spec for e in extra_exports} original_export_specs = self._original_export_specs(target) union = set(original_export_specs).union(extra_export_specs) copied_attributes['exports'] = sorted(union) synthetic_target = self.context.add_new_target( address=self._get_synthetic_address(target, synthetic_target_dir), target_type=synthetic_target_type, dependencies=synthetic_extra_dependencies, sources=sources, derived_from=target, **copied_attributes ) build_graph = self.context.build_graph # NB(pl): This bypasses the convenience function (Target.inject_dependency) in order # to improve performance. Note that we can walk the transitive dependee subgraph once # for transitive invalidation rather than walking a smaller subgraph for every single # dependency injected. for dependent_address in build_graph.dependents_of(target.address): build_graph.inject_dependency( dependent=dependent_address, dependency=synthetic_target.address, ) # NB(pl): See the above comment. The same note applies. for concrete_dependency_address in build_graph.dependencies_of(target.address): build_graph.inject_dependency( dependent=synthetic_target.address, dependency=concrete_dependency_address, ) if target in self.context.target_roots: self.context.target_roots.append(synthetic_target) return synthetic_target
python
{ "resource": "" }
q27514
SimpleCodegenTask._handle_duplicate_sources
train
def _handle_duplicate_sources(self, vt, sources): """Handles duplicate sources generated by the given gen target by either failure or deletion. This method should be called after all dependencies have been injected into the graph, but before injecting the synthetic version of this target. Returns a boolean indicating whether it modified the underlying filesystem. NB(gm): Some code generators may re-generate code that their dependent libraries generate. This results in targets claiming to generate sources that they really don't, so we try to filter out sources that were actually generated by dependencies of the target. This causes the code generated by the dependencies to 'win' over the code generated by dependees. By default, this behavior is disabled, and duplication in generated sources will raise a TaskError. This is controlled by the --allow-dups flag. """ target = vt.target target_workdir = vt.results_dir # Walk dependency gentargets and record any sources owned by those targets that are also # owned by this target. duplicates_by_target = OrderedDict() def record_duplicates(dep): if dep == target or not self.is_gentarget(dep.concrete_derived_from): return False duped_sources = [s for s in dep.sources_relative_to_source_root() if s in sources.files and not self.ignore_dup(target, dep, s)] if duped_sources: duplicates_by_target[dep] = duped_sources target.walk(record_duplicates) # If there were no dupes, we're done. if not duplicates_by_target: return False # If there were duplicates warn or error. messages = ['{target} generated sources that had already been generated by dependencies.' .format(target=target.address.spec)] for dep, duped_sources in duplicates_by_target.items(): messages.append('\t{} also generated:'.format(dep.concrete_derived_from.address.spec)) messages.extend(['\t\t{}'.format(source) for source in duped_sources]) message = '\n'.join(messages) if self.get_options().allow_dups: logger.warn(message) else: raise self.DuplicateSourceError(message) did_modify = False # Finally, remove duplicates from the workdir. This prevents us from having to worry # about them during future incremental compiles. for dep, duped_sources in duplicates_by_target.items(): for duped_source in duped_sources: safe_delete(os.path.join(target_workdir, duped_source)) did_modify = True if did_modify: Digest.clear(vt.current_results_dir) return did_modify
python
{ "resource": "" }
q27515
PythonNativeCode._get_targets_by_declared_platform_with_placeholders
train
def _get_targets_by_declared_platform_with_placeholders(self, targets_by_platform): """ Aggregates a dict that maps a platform string to a list of targets that specify the platform. If no targets have platforms arguments, return a dict containing platforms inherited from the PythonSetup object. :param tgts: a list of :class:`Target` objects. :returns: a dict mapping a platform string to a list of targets that specify the platform. """ if not targets_by_platform: for platform in self._python_setup.platforms: targets_by_platform[platform] = ['(No target) Platform inherited from either the ' '--platforms option or a pants.ini file.'] return targets_by_platform
python
{ "resource": "" }
q27516
PythonNativeCode.check_build_for_current_platform_only
train
def check_build_for_current_platform_only(self, targets): """ Performs a check of whether the current target closure has native sources and if so, ensures that Pants is only targeting the current platform. :param tgts: a list of :class:`Target` objects. :return: a boolean value indicating whether the current target closure has native sources. :raises: :class:`pants.base.exceptions.IncompatiblePlatformsError` """ if not self._any_targets_have_native_sources(targets): return False targets_by_platform = pex_build_util.targets_by_platform(targets, self._python_setup) platforms_with_sources = self._get_targets_by_declared_platform_with_placeholders(targets_by_platform) platform_names = list(platforms_with_sources.keys()) if len(platform_names) < 1: raise self.PythonNativeCodeError( "Error: there should be at least one platform in the target closure, because " "we checked that there are native sources.") if platform_names == ['current']: return True raise IncompatiblePlatformsError( 'The target set contains one or more targets that depend on ' 'native code. Please ensure that the platform arguments in all relevant targets and build ' 'options are compatible with the current platform. Found targets for platforms: {}' .format(str(platforms_with_sources)))
python
{ "resource": "" }
q27517
MissingDependencyFinder.find
train
def find(self, compile_failure_log, target): """Find missing deps on a best-effort basis from target's transitive dependencies. Returns (class2deps, no_dep_found) tuple. `class2deps` contains classname to deps that contain the class mapping. `no_dep_found` are the classnames that are unable to find the deps. """ not_found_classnames = [err.classname for err in self.compile_error_extractor.extract(compile_failure_log)] return self._select_target_candidates_for_class(not_found_classnames, target)
python
{ "resource": "" }
q27518
MissingDependencyFinder._select_target_candidates_for_class
train
def _select_target_candidates_for_class(self, classnames, target): """Select a target that contains the given classname. When multiple candidates are available, not uncommon in 3rdparty dependencies, they are ranked according to their string similiarities with the classname because the way 3rdparty targets are conventionally named often shares similar naming structure. """ class2deps, no_dep_found = {}, set() for classname in classnames: if classname not in class2deps: candidates_for_class = [] for tgt in self.dep_analyzer.targets_for_class(target, classname): if tgt.is_synthetic and tgt.derived_from: tgt = tgt.derived_from candidates_for_class.append(tgt.address.spec) if candidates_for_class: candidates_for_class = StringSimilarityRanker(classname).sort(candidates_for_class) class2deps[classname] = OrderedSet(candidates_for_class) else: no_dep_found.add(classname) return class2deps, no_dep_found
python
{ "resource": "" }
q27519
PythonBinaryCreate._create_binary
train
def _create_binary(self, binary_tgt, results_dir): """Create a .pex file for the specified binary target.""" # Note that we rebuild a chroot from scratch, instead of using the REQUIREMENTS_PEX # and PYTHON_SOURCES products, because those products are already-built pexes, and there's # no easy way to merge them into a single pex file (for example, they each have a __main__.py, # metadata, and so on, which the merging code would have to handle specially). interpreter = self.context.products.get_data(PythonInterpreter) with temporary_dir() as tmpdir: # Create the pex_info for the binary. run_info_dict = self.context.run_tracker.run_info.get_as_dict() build_properties = PexInfo.make_build_properties() build_properties.update(run_info_dict) pex_info = binary_tgt.pexinfo.copy() pex_info.build_properties = build_properties pex_builder = PexBuilderWrapper.Factory.create( builder=PEXBuilder(path=tmpdir, interpreter=interpreter, pex_info=pex_info, copy=True), log=self.context.log) if binary_tgt.shebang: self.context.log.info('Found Python binary target {} with customized shebang, using it: {}' .format(binary_tgt.name, binary_tgt.shebang)) pex_builder.set_shebang(binary_tgt.shebang) else: self.context.log.debug('No customized shebang found for {}'.format(binary_tgt.name)) # Find which targets provide sources and which specify requirements. source_tgts = [] req_tgts = [] constraint_tgts = [] for tgt in binary_tgt.closure(exclude_scopes=Scopes.COMPILE): if has_python_sources(tgt) or has_resources(tgt): source_tgts.append(tgt) elif has_python_requirements(tgt): req_tgts.append(tgt) if is_python_target(tgt): constraint_tgts.append(tgt) # Add interpreter compatibility constraints to pex info. This will first check the targets for any # constraints, and if they do not have any will resort to the global constraints. pex_builder.add_interpreter_constraints_from(constraint_tgts) # Dump everything into the builder's chroot. for tgt in source_tgts: pex_builder.add_sources_from(tgt) # We need to ensure that we are resolving for only the current platform if we are # including local python dist targets that have native extensions. self._python_native_code_settings.check_build_for_current_platform_only(self.context.targets()) pex_builder.add_requirement_libs_from(req_tgts, platforms=binary_tgt.platforms) # Build the .pex file. pex_path = os.path.join(results_dir, '{}.pex'.format(binary_tgt.name)) pex_builder.build(pex_path) return pex_path
python
{ "resource": "" }
q27520
Matcher.matches
train
def matches(self, s): """Whether the pattern matches anywhere in the string s.""" regex_matches = self.compiled_regex.search(s) is not None return not regex_matches if self.inverted else regex_matches
python
{ "resource": "" }
q27521
MultiMatcher.check_content
train
def check_content(self, content_pattern_names, content, encoding): """Check which of the named patterns matches the given content. Returns a pair (matching, nonmatching), in which each element is a tuple of pattern names. :param iterable content_pattern_names: names of content patterns to check. :param bytes content: the content to check. :param str encoding: the expected encoding of content. """ if not content_pattern_names or not encoding: return (), () matching = [] nonmatching = [] for content_pattern_name in content_pattern_names: if self._content_matchers[content_pattern_name].matches(content.decode(encoding)): matching.append(content_pattern_name) else: nonmatching.append(content_pattern_name) return tuple(matching), tuple(nonmatching)
python
{ "resource": "" }
q27522
MultiMatcher.get_applicable_content_pattern_names
train
def get_applicable_content_pattern_names(self, path): """Return the content patterns applicable to a given path. Returns a tuple (applicable_content_pattern_names, content_encoding). If path matches no path patterns, the returned content_encoding will be None (and applicable_content_pattern_names will be empty). """ encodings = set() applicable_content_pattern_names = set() for path_pattern_name, content_pattern_names in self._required_matches.items(): m = self._path_matchers[path_pattern_name] if m.matches(path): encodings.add(m.content_encoding) applicable_content_pattern_names.update(content_pattern_names) if len(encodings) > 1: raise ValueError('Path matched patterns with multiple content encodings ({}): {}'.format( ', '.join(sorted(encodings)), path )) content_encoding = next(iter(encodings)) if encodings else None return applicable_content_pattern_names, content_encoding
python
{ "resource": "" }
q27523
enum
train
def enum(all_values): """A datatype which can take on a finite set of values. This method is experimental and unstable. Any enum subclass can be constructed with its create() classmethod. This method will use the first element of `all_values` as the default value, but enum classes can override this behavior by setting `default_value` in the class body. If `all_values` contains only strings, then each variant is made into an attribute on the generated enum class object. This allows code such as the following: class MyResult(enum(['success', 'not-success'])): pass MyResult.success # The same as: MyResult('success') MyResult.not_success # The same as: MyResult('not-success') Note that like with option names, hyphenated ('-') enum values are converted into attribute names with underscores ('_'). :param Iterable all_values: A nonempty iterable of objects representing all possible values for the enum. This argument must be a finite, non-empty iterable with unique values. :raises: :class:`ValueError` """ # namedtuple() raises a ValueError if you try to use a field with a leading underscore. field_name = 'value' # This call to list() will eagerly evaluate any `all_values` which would otherwise be lazy, such # as a generator. all_values_realized = list(all_values) unique_values = OrderedSet(all_values_realized) if len(unique_values) == 0: raise ValueError("all_values must be a non-empty iterable!") elif len(unique_values) < len(all_values_realized): raise ValueError("When converting all_values ({}) to a set, at least one duplicate " "was detected. The unique elements of all_values were: {}." .format(all_values_realized, list(unique_values))) class ChoiceDatatype(datatype([field_name]), ChoicesMixin): # Overriden from datatype() so providing an invalid variant is catchable as a TypeCheckError, # but more specific. type_check_error_type = EnumVariantSelectionError @memoized_classproperty def _singletons(cls): """Generate memoized instances of this enum wrapping each of this enum's allowed values. NB: The implementation of enum() should use this property as the source of truth for allowed values and enum instances from those values. """ return OrderedDict((value, cls._make_singleton(value)) for value in all_values_realized) @classmethod def _make_singleton(cls, value): """ We convert uses of the constructor to call create(), so we then need to go around __new__ to bootstrap singleton creation from datatype()'s __new__. """ return super(ChoiceDatatype, cls).__new__(cls, value) @classproperty def _allowed_values(cls): """The values provided to the enum() type constructor, for use in error messages.""" return list(cls._singletons.keys()) def __new__(cls, value): """Create an instance of this enum. :param value: Use this as the enum value. If `value` is an instance of this class, return it, otherwise it is checked against the enum's allowed values. """ if isinstance(value, cls): return value if value not in cls._singletons: raise cls.make_type_error( "Value {!r} must be one of: {!r}." .format(value, cls._allowed_values)) return cls._singletons[value] # TODO: figure out if this will always trigger on primitives like strings, and what situations # won't call this __eq__ (and therefore won't raise like we want). Also look into whether there # is a way to return something more conventional like `NotImplemented` here that maintains the # extra caution we're looking for. def __eq__(self, other): """Redefine equality to avoid accidentally comparing against a non-enum.""" if other is None: return False if type(self) != type(other): raise self.make_type_error( "when comparing {!r} against {!r} with type '{}': " "enum equality is only defined for instances of the same enum class!" .format(self, other, type(other).__name__)) return super(ChoiceDatatype, self).__eq__(other) # Redefine the canary so datatype __new__ doesn't raise. __eq__._eq_override_canary = None # NB: as noted in datatype(), __hash__ must be explicitly implemented whenever __eq__ is # overridden. See https://docs.python.org/3/reference/datamodel.html#object.__hash__. def __hash__(self): return super(ChoiceDatatype, self).__hash__() def resolve_for_enum_variant(self, mapping): """Return the object in `mapping` with the key corresponding to the enum value. `mapping` is a dict mapping enum variant value -> arbitrary object. All variant values must be provided. NB: The objects in `mapping` should be made into lambdas if lazy execution is desired, as this will "evaluate" all of the values in `mapping`. """ keys = frozenset(mapping.keys()) if keys != frozenset(self._allowed_values): raise self.make_type_error( "pattern matching must have exactly the keys {} (was: {})" .format(self._allowed_values, list(keys))) match_for_variant = mapping[self.value] return match_for_variant @classproperty def all_variants(cls): """Iterate over all instances of this enum, in the declared order. NB: resolve_for_enum_variant() should be used instead of this method for performing conditional logic based on an enum instance's value. """ return cls._singletons.values() # Python requires creating an explicit closure to save the value on each loop iteration. accessor_generator = lambda case: lambda cls: cls(case) for case in all_values_realized: if _string_type_constraint.satisfied_by(case): accessor = classproperty(accessor_generator(case)) attr_name = re.sub(r'-', '_', case) setattr(ChoiceDatatype, attr_name, accessor) return ChoiceDatatype
python
{ "resource": "" }
q27524
DatatypeMixin.make_type_error
train
def make_type_error(cls, msg, *args, **kwargs): """A helper method to generate an exception type for type checking errors. This method uses `cls.type_check_error_type` to ensure that type checking errors can be caught with a reliable exception type. The type returned by `cls.type_check_error_type` should ensure that the exception messages are prefixed with enough context to be useful and *not* confusing. """ return cls.type_check_error_type(cls.__name__, msg, *args, **kwargs)
python
{ "resource": "" }
q27525
TypeConstraint.validate_satisfied_by
train
def validate_satisfied_by(self, obj): """Return `obj` if the object satisfies this type constraint, or raise. :raises: `TypeConstraintError` if `obj` does not satisfy the constraint. """ if self.satisfied_by(obj): return obj raise self.make_type_constraint_error(obj, self)
python
{ "resource": "" }
q27526
replace_in_file
train
def replace_in_file(workspace, src_file_path, from_str, to_str): """Replace from_str with to_str in the name and content of the given file. If any edits were necessary, returns the new filename (which may be the same as the old filename). """ from_bytes = from_str.encode('ascii') to_bytes = to_str.encode('ascii') data = read_file(os.path.join(workspace, src_file_path), binary_mode=True) if from_bytes not in data and from_str not in src_file_path: return None dst_file_path = src_file_path.replace(from_str, to_str) safe_file_dump(os.path.join(workspace, dst_file_path), data.replace(from_bytes, to_bytes), mode='wb') if src_file_path != dst_file_path: os.unlink(os.path.join(workspace, src_file_path)) return dst_file_path
python
{ "resource": "" }
q27527
fingerprint_file
train
def fingerprint_file(workspace, filename): """Given a relative filename located in a workspace, fingerprint the file. Returns a tuple of fingerprint string and size string. """ content = read_file(os.path.join(workspace, filename), binary_mode=True) fingerprint = hashlib.sha256(content) b64_encoded = base64.b64encode(fingerprint.digest()) return 'sha256={}'.format(b64_encoded.decode('utf-8')), str(len(content))
python
{ "resource": "" }
q27528
rewrite_record_file
train
def rewrite_record_file(workspace, src_record_file, mutated_file_tuples): """Given a RECORD file and list of mutated file tuples, update the RECORD file in place. The RECORD file should always be a member of the mutated files, due to both containing versions, and having a version in its filename. """ mutated_files = set() dst_record_file = None for src, dst in mutated_file_tuples: if src == src_record_file: dst_record_file = dst else: mutated_files.add(dst) if not dst_record_file: raise Exception('Malformed whl or bad globs: `{}` was not rewritten.'.format(src_record_file)) output_records = [] file_name = os.path.join(workspace, dst_record_file) for line in read_file(file_name).splitlines(): filename, fingerprint_str, size_str = line.rsplit(',', 3) if filename in mutated_files: fingerprint_str, size_str = fingerprint_file(workspace, filename) output_line = ','.join((filename, fingerprint_str, size_str)) else: output_line = line output_records.append(output_line) safe_file_dump(file_name, '\r\n'.join(output_records) + '\r\n')
python
{ "resource": "" }
q27529
main
train
def main(): """Given an input whl file and target version, create a copy of the whl with that version. This is accomplished via string replacement in files matching a list of globs. Pass the optional `--glob` argument to add additional globs: ie `--glob='thing-to-match*.txt'`. """ parser = argparse.ArgumentParser() parser.add_argument('whl_file', help='The input whl file.') parser.add_argument('dest_dir', help='The destination directory for the output whl.') parser.add_argument('target_version', help='The target version of the output whl.') parser.add_argument('--glob', action='append', default=[ '*.dist-info/*', '*-nspkg.pth', ], help='Globs (fnmatch) to rewrite within the whl: may be specified multiple times.') args = parser.parse_args() reversion(args)
python
{ "resource": "" }
q27530
AddressMapper.is_valid_single_address
train
def is_valid_single_address(self, single_address): """Check if a potentially ambiguous single address spec really exists. :param single_address: A SingleAddress spec. :return: True if given spec exists, False otherwise. """ if not isinstance(single_address, SingleAddress): raise TypeError( 'Parameter "{}" is of type {}, expecting type {}.'.format( single_address, type(single_address), SingleAddress)) try: return bool(self.scan_specs([single_address])) except AddressLookupError: return False
python
{ "resource": "" }
q27531
JarDependencyManagement.resolve_version_conflict
train
def resolve_version_conflict(self, managed_coord, direct_coord, force=False): """Resolves an artifact version conflict between directly specified and managed jars. This uses the user-defined --conflict-strategy to pick the appropriate artifact version (or to raise an error). This assumes the two conflict coordinates differ only by their version. :param M2Coordinate managed_coord: the artifact coordinate as defined by a managed_jar_dependencies object. :param M2Coordinate direct_coord: the artifact coordinate as defined by a jar_library target. :param bool force: Whether the artifact defined by the jar_library() was marked with force=True. This is checked only if one of the *_IF_FORCED conflict strategies is being used. :return: the coordinate of the artifact that should be resolved. :rtype: M2Coordinate :raises: JarDependencyManagement.DirectManagedVersionConflict if the versions are different and the --conflict-strategy is 'FAIL' (which is the default). """ if M2Coordinate.unversioned(managed_coord) != M2Coordinate.unversioned(direct_coord): raise ValueError('Illegal arguments passed to resolve_version_conflict: managed_coord and ' 'direct_coord must only differ by their version!\n' ' Managed: {}\n Direct: {}\n'.format( M2Coordinate.unversioned(managed_coord), M2Coordinate.unversioned(direct_coord), )) if direct_coord.rev is None or direct_coord.rev == managed_coord.rev: return managed_coord strategy = self.get_options().conflict_strategy message = dedent(""" An artifact directly specified by a jar_library target has a different version than what is specified by managed_jar_dependencies. Artifact: jar(org={org}, name={name}, classifier={classifier}, ext={ext}) Direct version: {direct} Managed version: {managed} """).format( org=direct_coord.org, name=direct_coord.name, classifier=direct_coord.classifier, ext=direct_coord.ext, direct=direct_coord.rev, managed=managed_coord.rev, ) if strategy == 'FAIL': raise self.DirectManagedVersionConflict( '{}\nThis raises an error due to the current --jar-dependency-management-conflict-strategy.' .format(message) ) is_silent = self.get_options().suppress_conflict_warnings log = logger.debug if is_silent else logger.warn if strategy == 'USE_DIRECT': log(message) log('[{}] Using direct version: {}'.format(strategy, direct_coord)) return direct_coord if strategy == 'USE_DIRECT_IF_FORCED': log(message) if force: log('[{}] Using direct version, because force=True: {}'.format(strategy, direct_coord)) return direct_coord else: log('[{}] Using managed version, because force=False: {}'.format(strategy, managed_coord)) return managed_coord if strategy == 'USE_MANAGED': log(message) log('[{}] Using managed version: {}'.format(strategy, managed_coord)) return managed_coord if strategy == 'USE_NEWER': newer = max([managed_coord, direct_coord], key=lambda coord: Revision.lenient(coord.rev)) log(message) log('[{}] Using newer version: {}'.format(strategy, newer)) return newer raise TaskError('Unknown value for --conflict-strategy: {}'.format(strategy))
python
{ "resource": "" }
q27532
JarDependencyManagement.targets_by_artifact_set
train
def targets_by_artifact_set(self, targets): """Partitions the input targets by the sets of pinned artifacts they are managed by. :param collections.Iterable targets: the input targets (typically just JarLibrary targets). :return: a mapping of PinnedJarArtifactSet -> list of targets. :rtype: dict """ sets_to_targets = defaultdict(list) for target in targets: sets_to_targets[self.for_target(target)].append(target) return dict(sets_to_targets)
python
{ "resource": "" }
q27533
JarDependencyManagement.for_target
train
def for_target(self, target): """Computes and returns the PinnedJarArtifactSet that should be used to manage the given target. This returns None if the target is not a JarLibrary. :param Target target: The jar_library for which to find the managed_jar_dependencies object. :return: The the artifact set of the managed_jar_dependencies object for the target, or the default artifact set from --default-target. :rtype: PinnedJarArtifactSet """ if not isinstance(target, JarLibrary): return None found_target = target.managed_dependencies if not found_target: return self.default_artifact_set return self._artifact_set_map[found_target.id]
python
{ "resource": "" }
q27534
PinnedJarArtifactSet.id
train
def id(self): """A unique, stable, hashable id over the set of pinned artifacts.""" if not self._id: # NB(gmalmquist): This id is not cheap to compute if there are a large number of artifacts. # We cache it here, but invalidate the cached value if an artifact gets added or changed. self._id = tuple(sorted(map(str, self))) return self._id
python
{ "resource": "" }
q27535
PinnedJarArtifactSet.put
train
def put(self, artifact): """Adds the given coordinate to the set, using its version to pin it. If this set already contains an artifact with the same coordinates other than the version, it is replaced by the new artifact. :param M2Coordinate artifact: the artifact coordinate. """ artifact = M2Coordinate.create(artifact) if artifact.rev is None: raise self.MissingVersion('Cannot pin an artifact to version "None"! {}'.format(artifact)) key = self._key(artifact) previous = self._artifacts_to_versions.get(key) self._artifacts_to_versions[key] = artifact if previous != artifact: self._id = None
python
{ "resource": "" }
q27536
PinnedJarArtifactSet.get
train
def get(self, artifact): """Gets the coordinate with the correct version for the given artifact coordinate. :param M2Coordinate artifact: the coordinate to lookup. :return: a coordinate which is the same as the input, but with the correct pinned version. If this artifact set does not pin a version for the input artifact, this just returns the original coordinate. :rtype: M2Coordinate """ coord = self._key(artifact) if coord in self._artifacts_to_versions: return self._artifacts_to_versions[coord] return artifact
python
{ "resource": "" }
q27537
JarDependencyManagementSetup._compute_artifact_set
train
def _compute_artifact_set(self, management_target): """Computes the set of pinned artifacts specified by this target, and any of its dependencies. An error is raised if a conflict exists between a pinned version between a ManagedJarDependencies target and any of its dependencies, or if two versions of a jar exist in the same ManagedJarDependencies target. :param Target management_target: a target object which is (or at least depends on) a ManagedJarDependencies target. :return: the computed transitive artifact set (approximately the union of all pinned artifacts in the transitive closure of the input target). :rtype: PinnedJarArtifactSet """ artifact_set = PinnedJarArtifactSet() # Keeps track of where pinned artifacts came from for logging purposes. specs_by_coordinate = {} def handle_managed_jar_dependencies(target): subset = PinnedJarArtifactSet() for jar in self._jar_iterator(target): if jar.rev is None: raise self.MissingVersion(target, jar) if jar in subset and subset[jar].rev != jar.rev: raise self.DuplicateCoordinateError(target, jar, artifact_set[jar].rev, jar.rev) subset.put(jar) return subset def handle_conflict(artifact, target): previous_coord = artifact_set[artifact] previous_spec = specs_by_coordinate[previous_coord] message = ('Artifact {previous_coord} (from {previous_target}) overridden by {new_coord} ' '(in {new_target}).'.format(previous_coord=previous_coord, previous_target=previous_spec, new_coord=artifact, new_target=target.address.spec)) raise self.IllegalVersionOverride(message) def handle_target(target): if not isinstance(target, ManagedJarDependencies): return for artifact in handle_managed_jar_dependencies(target): if artifact.rev != artifact_set[artifact].rev: handle_conflict(artifact, target) specs_by_coordinate[M2Coordinate.create(artifact)] = target.address.spec artifact_set.put(artifact) self.context.build_graph.walk_transitive_dependency_graph( addresses=[management_target.address], work=handle_target, postorder=True, # Process dependencies first. ) return artifact_set
python
{ "resource": "" }
q27538
ui_open
train
def ui_open(*files): """Attempts to open the given files using the preferred desktop viewer or editor. :raises :class:`OpenError`: if there is a problem opening any of the files. """ if files: osname = get_os_name() opener = _OPENER_BY_OS.get(osname) if opener: opener(files) else: raise OpenError('Open currently not supported for ' + osname)
python
{ "resource": "" }
q27539
TargetAddressable.factory
train
def factory(cls, target_type, alias=None): """Creates an addressable factory for the given target type and alias. :returns: A factory that can capture :class:`TargetAddressable` instances. :rtype: :class:`Addressable.Factory` """ class Factory(Addressable.Factory): @property def target_types(self): return (target_type,) def capture(self, *args, **kwargs): return TargetAddressable(alias, target_type, *args, **kwargs) return Factory()
python
{ "resource": "" }
q27540
WireGen.format_args_for_target
train
def format_args_for_target(self, target, target_workdir): """Calculate the arguments to pass to the command line for a single target.""" args = ['--java_out={0}'.format(target_workdir)] # Add all params in payload to args relative_sources, source_roots = self._compute_sources(target) if target.payload.get_field_value('no_options'): args.append('--no_options') if target.payload.service_writer: args.append('--service_writer={}'.format(target.payload.service_writer)) if target.payload.service_writer_options: for opt in target.payload.service_writer_options: args.append('--service_writer_opt') args.append(opt) registry_class = target.payload.registry_class if registry_class: args.append('--registry_class={0}'.format(registry_class)) if target.payload.roots: args.append('--roots={0}'.format(','.join(target.payload.roots))) if target.payload.enum_options: args.append('--enum_options={0}'.format(','.join(target.payload.enum_options))) for source_root in source_roots: args.append('--proto_path={0}'.format(os.path.join(get_buildroot(), source_root))) args.extend(relative_sources) return args
python
{ "resource": "" }
q27541
Jar.main
train
def main(self, main): """Specifies a Main-Class entry for this jar's manifest. :param string main: a fully qualified class name """ if not main or not isinstance(main, string_types): raise ValueError('The main entry must be a non-empty string') self._main = main
python
{ "resource": "" }
q27542
Jar.write
train
def write(self, src, dest=None): """Schedules a write of the file at ``src`` to the ``dest`` path in this jar. If the ``src`` is a file, then ``dest`` must be specified. If the ``src`` is a directory then by default all descendant files will be added to the jar as entries carrying their relative path. If ``dest`` is specified it will be prefixed to each descendant's relative path to form its jar entry path. :param string src: the path to the pre-existing source file or directory :param string dest: the path the source file or directory should have in this jar """ if not src or not isinstance(src, string_types): raise ValueError('The src path must be a non-empty string, got {} of type {}.'.format( src, type(src))) if dest and not isinstance(dest, string_types): raise ValueError('The dest entry path must be a non-empty string, got {} of type {}.'.format( dest, type(dest))) if not os.path.isdir(src) and not dest: raise self.Error('Source file {} must have a jar destination specified'.format(src)) self._add_entry(self.FileSystemEntry(src, dest))
python
{ "resource": "" }
q27543
Jar.writejar
train
def writejar(self, jar): """Schedules all entries from the given ``jar``'s to be added to this jar save for the manifest. :param string jar: the path to the pre-existing jar to graft into this jar """ if not jar or not isinstance(jar, string_types): raise ValueError('The jar path must be a non-empty string') self._jars.append(jar)
python
{ "resource": "" }
q27544
Jar._render_jar_tool_args
train
def _render_jar_tool_args(self, options): """Format the arguments to jar-tool. :param Options options: """ args = [] with temporary_dir() as manifest_stage_dir: # relativize urls in canonical classpath, this needs to be stable too therefore # do not follow the symlinks because symlinks may vary from platform to platform. classpath = relativize_classpath(self.classpath, os.path.dirname(self._path), followlinks=False) def as_cli_entry(entry): src = entry.materialize(manifest_stage_dir) return '{}={}'.format(src, entry.dest) if entry.dest else src files = [as_cli_entry(entry) for entry in self._entries] if self._entries else [] jars = self._jars or [] with safe_args(classpath, options, delimiter=',') as classpath_args: with safe_args(files, options, delimiter=',') as files_args: with safe_args(jars, options, delimiter=',') as jars_args: # If you specify --manifest to jar-tool you cannot specify --main. if self._manifest_entry: manifest_file = self._manifest_entry.materialize(manifest_stage_dir) else: manifest_file = None if self._main and manifest_file: main_arg = None with open(manifest_file, 'a') as f: f.write("Main-Class: {}\n".format(self._main)) else: main_arg = self._main if main_arg: args.append('-main={}'.format(self._main)) if classpath_args: args.append('-classpath={}'.format(','.join(classpath_args))) if manifest_file: args.append('-manifest={}'.format(manifest_file)) if files_args: args.append('-files={}'.format(','.join(files_args))) if jars_args: args.append('-jars={}'.format(','.join(jars_args))) yield args
python
{ "resource": "" }
q27545
JarTask.open_jar
train
def open_jar(self, path, overwrite=False, compressed=True, jar_rules=None): """Yields a Jar that will be written when the context exits. :API: public :param string path: the path to the jar file :param bool overwrite: overwrite the file at ``path`` if it exists; ``False`` by default; ie: update the pre-existing jar at ``path`` :param bool compressed: entries added to the jar should be compressed; ``True`` by default :param jar_rules: an optional set of rules for handling jar exclusions and duplicates """ jar = Jar(path) try: yield jar except jar.Error as e: raise TaskError('Failed to write to jar at {}: {}'.format(path, e)) with jar._render_jar_tool_args(self.get_options()) as args: if args: # Don't build an empty jar args.append('-update={}'.format(self._flag(not overwrite))) args.append('-compress={}'.format(self._flag(compressed))) jar_rules = jar_rules or JarRules.default() args.append('-default_action={}'.format(self._action_name(jar_rules.default_dup_action))) skip_patterns = [] duplicate_actions = [] for rule in jar_rules.rules: if isinstance(rule, Skip): skip_patterns.append(rule.apply_pattern) elif isinstance(rule, Duplicate): duplicate_actions.append('{}={}'.format( rule.apply_pattern.pattern, self._action_name(rule.action))) else: raise ValueError('Unrecognized rule: {}'.format(rule)) if skip_patterns: args.append('-skip={}'.format(','.join(p.pattern for p in skip_patterns))) if duplicate_actions: args.append('-policies={}'.format(','.join(duplicate_actions))) args.append(path) if JarTool.global_instance().run(context=self.context, runjava=self.runjava, args=args): raise TaskError('jar-tool failed')
python
{ "resource": "" }
q27546
JarBuilderTask.create_jar_builder
train
def create_jar_builder(self, jar): """Creates a ``JarTask.JarBuilder`` ready for use. This method should be called during in `execute` context and only after ensuring `JarTask.JarBuilder.prepare` has already been called in `prepare` context. :param jar: An opened ``pants.backend.jvm.tasks.jar_task.Jar`. """ builder = self.JarBuilder(self.context, jar) yield builder builder.commit_manifest(jar)
python
{ "resource": "" }
q27547
Watchman._make_client
train
def _make_client(self): """Create a new watchman client using the BSER protocol over a UNIX socket.""" self._logger.debug('setting initial watchman timeout to %s', self._startup_timeout) return StreamableWatchmanClient(sockpath=self.socket, transport='local', timeout=self._startup_timeout)
python
{ "resource": "" }
q27548
Watchman.launch
train
def launch(self): """Launch and synchronously write metadata. This is possible due to watchman's built-in async server startup - no double-forking required. """ cmd = self._construct_cmd((self._watchman_path, 'get-pid'), state_file=self._state_file, sock_file=self._sock_file, pid_file=self._pid_file, log_file=self._log_file, log_level=str(self._log_level)) self._logger.debug('watchman cmd is: {}'.format(' '.join(cmd))) self._maybe_init_metadata() # Watchman is launched via its cli. By running the 'get-pid' command on the client we implicitly # launch the Watchman daemon. This approach is somewhat error-prone - in some cases the client # can successfully trigger the launch of the Watchman daemon, but fail to return successfully # for the 'get-pid' result due to server <-> daemon socket issues - these can look like: # # 2016-04-01T17:31:23,820: [cli] unable to talk to your watchman # on .../watchman.sock! (Permission denied) # # This results in a subprocess execution failure and leaves us with no pid information to write # to the metadata directory - while in reality a Watchman daemon is actually running but now # untracked. To safeguard against this, we retry the (idempotent) 'get-pid' command a few times # to give the server-side socket setup a few chances to quiesce before potentially orphaning it. get_output = functools.partial(self.get_subprocess_output, cmd) output = retry_on_exception(get_output, 3, (ProcessManager.ExecutionError,), lambda n: n * .5) # Parse the watchman PID from the cli output. pid = self._parse_pid_from_output(output) # Write the process metadata to disk. self.write_pid(pid) self.write_socket(self._sock_file)
python
{ "resource": "" }
q27549
Watchman._attempt_set_timeout
train
def _attempt_set_timeout(self, timeout): """Sets a timeout on the inner watchman client's socket.""" try: self.client.setTimeout(timeout) except Exception: self._logger.debug('failed to set post-startup watchman timeout to %s', self._timeout) else: self._logger.debug('set post-startup watchman timeout to %s', self._timeout)
python
{ "resource": "" }
q27550
Watchman.watch_project
train
def watch_project(self, path): """Issues the watch-project command to watchman to begin watching the buildroot. :param string path: the path to the watchman project root/pants build root. """ # TODO(kwlzn): Add a client.query(timeout=X) param to the upstream pywatchman project. try: return self.client.query('watch-project', os.path.realpath(path)) finally: self._attempt_set_timeout(self._timeout)
python
{ "resource": "" }
q27551
Watchman.subscribed
train
def subscribed(self, build_root, handlers): """Bulk subscribe generator for StreamableWatchmanClient. :param str build_root: the build_root for all subscriptions. :param iterable handlers: a sequence of Watchman.EventHandler namedtuple objects. :yields: a stream of tuples in the form (subscription_name: str, subscription_event: dict). """ command_list = [['subscribe', build_root, handler.name, handler.metadata] for handler in handlers] self._logger.debug('watchman command_list is: {}'.format(command_list)) try: for event in self.client.stream_query(command_list): if event is None: yield None, None elif 'subscribe' in event: self._logger.info('confirmed watchman subscription: {}'.format(event)) yield None, None elif 'subscription' in event: yield event.get('subscription'), event else: self._logger.warning('encountered non-subscription event: {}'.format(event)) except self.client.WatchmanError as e: raise self.WatchmanCrash(e)
python
{ "resource": "" }
q27552
BuildFileManipulator.add_dependency
train
def add_dependency(self, address): """Add a dependency to this target. This will deduplicate existing dependencies.""" if address in self._dependencies_by_address: if self._dependencies_by_address[address].has_comment(): logger.warn('BuildFileManipulator would have added {address} as a dependency of ' '{target_address}, but that dependency was already forced with a comment.' .format(address=address.spec, target_address=self.target_address.spec)) return spec = address.reference(referencing_path=self.build_file.spec_path) self._dependencies_by_address[address] = DependencySpec(spec)
python
{ "resource": "" }
q27553
BuildFileManipulator.clear_unforced_dependencies
train
def clear_unforced_dependencies(self): """Remove all dependencies not forced by a comment. This is useful when existing analysis can infer exactly what the correct dependencies should be. Typical use is to call `clear_unforced_dependencies`, then call `add_dependency` for each dependency inferred from analysis. The resulting dependency set should be the pruned set of all dependencies, plus dependencies hand forced by a user comment. """ self._dependencies_by_address = dict( (address, dep) for address, dep in self._dependencies_by_address.items() if dep.has_comment() )
python
{ "resource": "" }
q27554
BuildFileManipulator.build_file_lines
train
def build_file_lines(self): """Like `target_lines`, the entire BUILD file's lines after dependency manipulation.""" build_file_lines = self._build_file_source_lines[:] target_begin, target_end = self._target_interval build_file_lines[target_begin:target_end] = self.target_lines() return build_file_lines
python
{ "resource": "" }
q27555
BuildFileManipulator.diff_lines
train
def diff_lines(self): """A diff between the original BUILD file and the resulting BUILD file.""" start_lines = self._build_file_source_lines[:] end_lines = self.build_file_lines() diff_generator = unified_diff(start_lines, end_lines, fromfile=self.build_file.relpath, tofile=self.build_file.relpath, lineterm='') return list(diff_generator)
python
{ "resource": "" }
q27556
BuildFileManipulator.write
train
def write(self, dry_run=True): """Write out the changes made to the BUILD file, and print the diff to stderr. :param dry_run: Don't actually write out the BUILD file, but do print the diff to stderr. """ start_lines = self._build_file_source_lines[:] end_lines = self.build_file_lines() diff_generator = unified_diff(start_lines, end_lines, fromfile=self.build_file.relpath, tofile=self.build_file.relpath, lineterm='') if dry_run: msg = 'DRY RUN, would have written this diff:' else: msg = 'REAL RUN, about to write the following diff:' sys.stderr.write(msg + '\n') sys.stderr.write('*' * 40 + '\n') sys.stderr.write('target at: ') sys.stderr.write(str(self.target_address) + '\n') for line in diff_generator: sys.stderr.write(line + '\n') sys.stderr.write('*' * 40 + '\n') if not dry_run: with open(self.build_file.full_path, 'w') as bf: bf.write('\n'.join(end_lines)) sys.stderr.write('WROTE to {full_path}\n'.format(full_path=self.build_file.full_path))
python
{ "resource": "" }
q27557
ConanFetch._remotes_txt_content
train
def _remotes_txt_content(self): """Generate a file containing overrides for Conan remotes which get applied to registry.json.""" return '{}\n'.format('\n'.join( '{name} {url} {is_ssl}'.format( name=name, url=url, is_ssl=re.match(r'^https://', url) is not None) for name, url in self.get_options().conan_remotes.items()))
python
{ "resource": "" }
q27558
ConanFetch._conan_user_home
train
def _conan_user_home(self, conan, in_workdir=False): """Create the CONAN_USER_HOME for this task fingerprint and initialize the Conan remotes. See https://docs.conan.io/en/latest/reference/commands/consumer/config.html#conan-config-install for docs on configuring remotes. """ # This argument is exposed so tests don't leak out of the workdir. if in_workdir: base_cache_dir = self.workdir else: base_cache_dir = get_pants_cachedir() user_home_base = os.path.join(base_cache_dir, 'conan-support', 'conan-user-home') # Locate the subdirectory of the pants shared cachedir specific to this task's option values. user_home = os.path.join(user_home_base, self.fingerprint) conan_install_base = os.path.join(user_home, '.conan') # Conan doesn't copy remotes.txt into the .conan subdir after the "config install" command, it # simply edits registry.json. However, it is valid to have this file there, and Conan won't # touch it, so we use its presence to detect whether we have appropriately initialized the # Conan installation. remotes_txt_sentinel = os.path.join(conan_install_base, 'remotes.txt') if not os.path.isfile(remotes_txt_sentinel): safe_mkdir(conan_install_base) # Conan doesn't consume the remotes.txt file just by being in the conan directory -- we need # to create another directory containing any selection of files detailed in # https://docs.conan.io/en/latest/reference/commands/consumer/config.html#conan-config-install # and "install" from there to our desired conan directory. with temporary_dir() as remotes_install_dir: # Create an artificial conan configuration dir containing just remotes.txt. remotes_txt_for_install = os.path.join(remotes_install_dir, 'remotes.txt') safe_file_dump(remotes_txt_for_install, self._remotes_txt_content) # Configure the desired user home from this artificial config dir. argv = ['config', 'install', remotes_install_dir] workunit_factory = functools.partial( self.context.new_workunit, name='initial-conan-config', labels=[WorkUnitLabel.TOOL]) env = { 'CONAN_USER_HOME': user_home, } cmdline, exit_code = conan.run(workunit_factory, argv, env=env) if exit_code != 0: raise self.ConanConfigError( 'Error configuring conan with argv {} and environment {}: exited non-zero ({}).' .format(cmdline, env, exit_code), exit_code=exit_code) # Generate the sentinel file so that we know the remotes have been successfully configured for # this particular task fingerprint in successive pants runs. safe_file_dump(remotes_txt_sentinel, self._remotes_txt_content) return user_home
python
{ "resource": "" }
q27559
ConanFetch.execute_codegen
train
def execute_codegen(self, target, target_workdir): """ Invoke the conan pex to fetch conan packages specified by a `ExternalNativeLibrary` target. :param ExternalNativeLibrary target: a target containing conan package specifications. :param str target_workdir: where to copy the installed package contents to. """ conan = self.context.products.get_data(ConanPrep.tool_instance_cls) # TODO: we should really be able to download all of these in one go, and we should make an # upstream PR to allow that against Conan if not. for conan_requirement in target.packages: # See https://docs.conan.io/en/latest/reference/commands/consumer/install.html for # documentation on the 'install' command. argv = [ 'install', conan_requirement.pkg_spec, '--settings', 'os={}'.format(self._conan_os_name), ] for remote in self.get_options().conan_remotes: argv.extend(['--remote', remote]) workunit_factory = functools.partial( self.context.new_workunit, name='install-conan-{}'.format(conan_requirement.pkg_spec), labels=[WorkUnitLabel.TOOL]) # CONAN_USER_HOME is somewhat documented at # https://docs.conan.io/en/latest/mastering/sharing_settings_and_config.html. user_home = self._conan_user_home(conan) env = { 'CONAN_USER_HOME': user_home, } with conan.run_with(workunit_factory, argv, env=env) as (cmdline, exit_code, workunit): if exit_code != 0: raise self.ConanFetchError( 'Error performing conan install with argv {} and environment {}: exited non-zero ({}).' .format(cmdline, env, exit_code), exit_code=exit_code) # Read the stdout from the read-write buffer, from the beginning of the output, and convert # to unicode. conan_install_stdout = workunit.output('stdout').read_from(0).decode('utf-8') pkg_sha = conan_requirement.parse_conan_stdout_for_pkg_sha(conan_install_stdout) installed_data_dir = os.path.join( user_home, '.conan', 'data', conan_requirement.directory_path, 'package', pkg_sha) # Copy over the contents of the installed package into the target output directory. These # paths are currently hardcoded -- see `ExternalNativeLibrary`. mergetree(os.path.join(installed_data_dir, conan_requirement.include_relpath), os.path.join(target_workdir, 'include')) mergetree(os.path.join(installed_data_dir, conan_requirement.lib_relpath), os.path.join(target_workdir, 'lib'))
python
{ "resource": "" }
q27560
Properties.load
train
def load(data): """Loads properties from an open stream or the contents of a string. :API: public :param (string | open stream) data: An open stream or a string. :returns: A dict of parsed property data. :rtype: dict """ if hasattr(data, 'read') and callable(data.read): contents = data.read() elif isinstance(data, six.string_types): contents = data else: raise TypeError('Can only process data from a string or a readable object, given: %s' % data) return Properties._parse(contents.splitlines())
python
{ "resource": "" }
q27561
Properties.dump
train
def dump(props, output): """Dumps a dict of properties to the specified open stream or file path. :API: public """ def escape(token): return re.sub(r'([=:\s])', r'\\\1', token) def write(out): for k, v in props.items(): out.write('%s=%s\n' % (escape(str(k)), escape(str(v)))) if hasattr(output, 'write') and callable(output.write): write(output) elif isinstance(output, six.string_types): with open(output, 'w+') as out: write(out) else: raise TypeError('Can only dump data to a path or a writable object, given: %s' % output)
python
{ "resource": "" }
q27562
classproperty
train
def classproperty(func): """Use as a decorator on a method definition to make it a class-level attribute. This decorator can be applied to a method, a classmethod, or a staticmethod. This decorator will bind the first argument to the class object. Usage: >>> class Foo(object): ... @classproperty ... def name(cls): ... return cls.__name__ ... >>> Foo.name 'Foo' Setting or deleting the attribute of this name will overwrite this property. The docstring of the classproperty `x` for a class `C` can be obtained by `C.__dict__['x'].__doc__`. """ doc = func.__doc__ if not isinstance(func, (classmethod, staticmethod)): func = classmethod(func) return ClassPropertyDescriptor(func, doc)
python
{ "resource": "" }
q27563
MinimalCover._collect_internal_deps
train
def _collect_internal_deps(self, targets): """Collect one level of dependencies from the given targets, and then transitively walk. This is different from directly executing `Target.closure_for_targets`, because the resulting set will not include the roots unless the roots depend on one another. """ roots = set() for target in targets: roots.update(target.dependencies) return Target.closure_for_targets(roots)
python
{ "resource": "" }
q27564
safe_args
train
def safe_args(args, options, max_args=None, argfile=None, delimiter='\n', quoter=None, delete=True): """Yields args if there are less than a limit otherwise writes args to an argfile and yields an argument list with one argument formed from the path of the argfile. :param args: The args to work with. :param OptionValueContainer options: scoped options object for this task :param max_args: The maximum number of args to let though without writing an argfile. If not specified then the maximum will be loaded from the --max-subprocess-args option. :param argfile: The file to write args to when there are too many; defaults to a temporary file. :param delimiter: The delimiter to insert between args written to the argfile, defaults to '\n' :param quoter: A function that can take the argfile path and return a single argument value; defaults to: <code>lambda f: '@' + f<code> :param delete: If True deletes any arg files created upon exit from this context; defaults to True. """ max_args = max_args or options.max_subprocess_args if len(args) > max_args: def create_argfile(f): logger.debug('Creating argfile {} with contents {}'.format(f.name, ' '.join(args))) f.write(delimiter.join(args)) f.close() return [quoter(f.name) if quoter else '@{}'.format(f.name)] if argfile: try: with safe_open(argfile, 'w') as fp: yield create_argfile(fp) finally: if delete and os.path.exists(argfile): os.unlink(argfile) else: with temporary_file(cleanup=delete, binary_mode=False) as fp: yield create_argfile(fp) else: yield args
python
{ "resource": "" }
q27565
JVM.get_jvm_options
train
def get_jvm_options(self): """Return the options to run this JVM with. These are options to the JVM itself, such as -Dfoo=bar, -Xmx=1g, -XX:-UseParallelGC and so on. Thus named because get_options() already exists (and returns this object's Pants options). """ ret = [] for opt in self.get_options().options: ret.extend(safe_shlex_split(opt)) if (self.get_options().debug or self.get_options().is_flagged('debug_port') or self.get_options().is_flagged('debug_args')): debug_port = self.get_options().debug_port ret.extend(arg.format(debug_port=debug_port) for arg in self.get_options().debug_args) return ret
python
{ "resource": "" }
q27566
JVM.get_program_args
train
def get_program_args(self): """Get the program args to run this JVM with. These are the arguments passed to main() and are program-specific. """ ret = [] for arg in self.get_options().program_args: ret.extend(safe_shlex_split(arg)) return ret
python
{ "resource": "" }
q27567
VersionedTargetSet.ensure_legal
train
def ensure_legal(self): """Return True as long as the state does not break any internal contracts.""" # Do our best to provide complete feedback, it's easy to imagine the frustration of flipping between error states. if self._results_dir: errors = '' if not os.path.islink(self._results_dir): errors += '\nThe results_dir is no longer a symlink:\n\t* {}'.format(self._results_dir) if not os.path.isdir(self._current_results_dir): errors += '\nThe current_results_dir directory was not found\n\t* {}'.format(self._current_results_dir) if errors: raise self.IllegalResultsDir( '\nThe results_dirs state should not be manually cleaned or recreated by tasks.\n{}'.format(errors) ) return True
python
{ "resource": "" }
q27568
VersionedTargetSet.live_dirs
train
def live_dirs(self): """Yields directories that must exist for this VersionedTarget to function.""" # The only caller of this function is the workdir cleaning pipeline. It is not clear that the previous_results_dir # should be returned for that purpose. And, by the time this is called, the contents have already been copied. if self.has_results_dir: yield self.results_dir yield self.current_results_dir if self.has_previous_results_dir: yield self.previous_results_dir
python
{ "resource": "" }
q27569
VersionedTarget.create_results_dir
train
def create_results_dir(self): """Ensure that the empty results directory and a stable symlink exist for these versioned targets.""" self._current_results_dir = self._cache_manager._results_dir_path(self.cache_key, stable=False) self._results_dir = self._cache_manager._results_dir_path(self.cache_key, stable=True) if not self.valid: # Clean the workspace for invalid vts. safe_mkdir(self._current_results_dir, clean=True) relative_symlink(self._current_results_dir, self._results_dir) self.ensure_legal()
python
{ "resource": "" }
q27570
VersionedTarget.copy_previous_results
train
def copy_previous_results(self): """Use the latest valid results_dir as the starting contents of the current results_dir. Should be called after the cache is checked, since previous_results are not useful if there is a cached artifact. """ # TODO(mateo): This should probably be managed by the task, which manages the rest of the # incremental support. if not self.previous_cache_key: return None previous_path = self._cache_manager._results_dir_path(self.previous_cache_key, stable=False) if os.path.isdir(previous_path): self.is_incremental = True safe_rmtree(self._current_results_dir) shutil.copytree(previous_path, self._current_results_dir) safe_mkdir(self._current_results_dir) relative_symlink(self._current_results_dir, self.results_dir) # Set the self._previous last, so that it is only True after the copy completed. self._previous_results_dir = previous_path
python
{ "resource": "" }
q27571
InvalidationCacheManager.update
train
def update(self, vts): """Mark a changed or invalidated VersionedTargetSet as successfully processed.""" for vt in vts.versioned_targets: vt.ensure_legal() if not vt.valid: self._invalidator.update(vt.cache_key) vt.valid = True self._artifact_write_callback(vt) if not vts.valid: vts.ensure_legal() self._invalidator.update(vts.cache_key) vts.valid = True self._artifact_write_callback(vts)
python
{ "resource": "" }
q27572
InvalidationCacheManager.force_invalidate
train
def force_invalidate(self, vts): """Force invalidation of a VersionedTargetSet.""" for vt in vts.versioned_targets: self._invalidator.force_invalidate(vt.cache_key) vt.valid = False self._invalidator.force_invalidate(vts.cache_key) vts.valid = False
python
{ "resource": "" }
q27573
InvalidationCacheManager.check
train
def check(self, targets, topological_order=False): """Checks whether each of the targets has changed and invalidates it if so. Returns a list of VersionedTargetSet objects (either valid or invalid). The returned sets 'cover' the input targets, with one caveat: if the FingerprintStrategy opted out of fingerprinting a target because it doesn't contribute to invalidation, then that target will be excluded from all_vts and invalid_vts. Callers can inspect these vts and rebuild the invalid ones, for example. """ all_vts = self.wrap_targets(targets, topological_order=topological_order) invalid_vts = [vt for vt in all_vts if not vt.valid] return InvalidationCheck(all_vts, invalid_vts)
python
{ "resource": "" }
q27574
InvalidationCacheManager._results_dir_path
train
def _results_dir_path(self, key, stable): """Return a results directory path for the given key. :param key: A CacheKey to generate an id for. :param stable: True to use a stable subdirectory, false to use a portion of the cache key to generate a path unique to the key. """ # TODO: Shorten cache_key hashes in general? return os.path.join( self._results_dir_prefix, key.id, self._STABLE_DIR_NAME if stable else sha1(key.hash.encode('utf-8')).hexdigest()[:12] )
python
{ "resource": "" }
q27575
InvalidationCacheManager.wrap_targets
train
def wrap_targets(self, targets, topological_order=False): """Wrap targets and their computed cache keys in VersionedTargets. If the FingerprintStrategy opted out of providing a fingerprint for a target, that target will not have an associated VersionedTarget returned. Returns a list of VersionedTargets, each representing one input target. """ def vt_iter(): if topological_order: target_set = set(targets) sorted_targets = [t for t in reversed(sort_targets(targets)) if t in target_set] else: sorted_targets = sorted(targets) for target in sorted_targets: target_key = self._key_for(target) if target_key is not None: yield VersionedTarget(self, target, target_key) return list(vt_iter())
python
{ "resource": "" }
q27576
hermetic_environment_as
train
def hermetic_environment_as(**kwargs): """Set the environment to the supplied values from an empty state.""" old_environment = os.environ.copy() if PY3 else _copy_and_decode_env(os.environ) _purge_env() try: with environment_as(**kwargs): yield finally: _purge_env() _restore_env(old_environment)
python
{ "resource": "" }
q27577
_stdio_stream_as
train
def _stdio_stream_as(src_fd, dst_fd, dst_sys_attribute, mode): """Replace the given dst_fd and attribute on `sys` with an open handle to the given src_fd.""" if src_fd == -1: src = open('/dev/null', mode) src_fd = src.fileno() # Capture the python and os level file handles. old_dst = getattr(sys, dst_sys_attribute) old_dst_fd = os.dup(dst_fd) if src_fd != dst_fd: os.dup2(src_fd, dst_fd) # Open up a new file handle to temporarily replace the python-level io object, then yield. new_dst = os.fdopen(dst_fd, mode) setattr(sys, dst_sys_attribute, new_dst) try: yield finally: new_dst.close() # Restore the python and os level file handles. os.dup2(old_dst_fd, dst_fd) setattr(sys, dst_sys_attribute, old_dst)
python
{ "resource": "" }
q27578
signal_handler_as
train
def signal_handler_as(sig, handler): """Temporarily replaces a signal handler for the given signal and restores the old handler. :param int sig: The target signal to replace the handler for (e.g. signal.SIGINT). :param func handler: The new temporary handler. """ old_handler = signal.signal(sig, handler) try: yield finally: signal.signal(sig, old_handler)
python
{ "resource": "" }
q27579
temporary_dir
train
def temporary_dir(root_dir=None, cleanup=True, suffix='', permissions=None, prefix=tempfile.template): """ A with-context that creates a temporary directory. :API: public You may specify the following keyword args: :param string root_dir: The parent directory to create the temporary directory. :param bool cleanup: Whether or not to clean up the temporary directory. :param int permissions: If provided, sets the directory permissions to this mode. """ path = tempfile.mkdtemp(dir=root_dir, suffix=suffix, prefix=prefix) try: if permissions is not None: os.chmod(path, permissions) yield path finally: if cleanup: shutil.rmtree(path, ignore_errors=True)
python
{ "resource": "" }
q27580
temporary_file_path
train
def temporary_file_path(root_dir=None, cleanup=True, suffix='', permissions=None): """ A with-context that creates a temporary file and returns its path. :API: public You may specify the following keyword args: :param str root_dir: The parent directory to create the temporary file. :param bool cleanup: Whether or not to clean up the temporary file. """ with temporary_file(root_dir, cleanup=cleanup, suffix=suffix, permissions=permissions) as fd: fd.close() yield fd.name
python
{ "resource": "" }
q27581
temporary_file
train
def temporary_file(root_dir=None, cleanup=True, suffix='', permissions=None, binary_mode=True): """ A with-context that creates a temporary file and returns a writeable file descriptor to it. You may specify the following keyword args: :param str root_dir: The parent directory to create the temporary file. :param bool cleanup: Whether or not to clean up the temporary file. :param str suffix: If suffix is specified, the file name will end with that suffix. Otherwise there will be no suffix. mkstemp() does not put a dot between the file name and the suffix; if you need one, put it at the beginning of suffix. See :py:class:`tempfile.NamedTemporaryFile`. :param int permissions: If provided, sets the file to use these permissions. :param bool binary_mode: Whether file opens in binary or text mode. """ mode = 'w+b' if binary_mode else 'w+' # tempfile's default is 'w+b' with tempfile.NamedTemporaryFile(suffix=suffix, dir=root_dir, delete=False, mode=mode) as fd: try: if permissions is not None: os.chmod(fd.name, permissions) yield fd finally: if cleanup: safe_delete(fd.name)
python
{ "resource": "" }
q27582
safe_file
train
def safe_file(path, suffix=None, cleanup=True): """A with-context that copies a file, and copies the copy back to the original file on success. This is useful for doing work on a file but only changing its state on success. :param str suffix: Use this suffix to create the copy. Otherwise use a random string. :param bool cleanup: Whether or not to clean up the copy. """ safe_path = '{0}.{1}'.format(path, suffix or uuid.uuid4()) if os.path.exists(path): shutil.copy(path, safe_path) try: yield safe_path if cleanup: shutil.move(safe_path, path) else: shutil.copy(safe_path, path) finally: if cleanup: safe_delete(safe_path)
python
{ "resource": "" }
q27583
open_zip
train
def open_zip(path_or_file, *args, **kwargs): """A with-context for zip files. Passes through *args and **kwargs to zipfile.ZipFile. :API: public :param path_or_file: Full path to zip file. :param args: Any extra args accepted by `zipfile.ZipFile`. :param kwargs: Any extra keyword args accepted by `zipfile.ZipFile`. :raises: `InvalidZipPath` if path_or_file is invalid. :raises: `zipfile.BadZipfile` if zipfile.ZipFile cannot open a zip at path_or_file. :returns: `class 'contextlib.GeneratorContextManager`. """ if not path_or_file: raise InvalidZipPath('Invalid zip location: {}'.format(path_or_file)) allowZip64 = kwargs.pop('allowZip64', True) try: zf = zipfile.ZipFile(path_or_file, *args, allowZip64=allowZip64, **kwargs) except zipfile.BadZipfile as bze: # Use the realpath in order to follow symlinks back to the problem source file. raise zipfile.BadZipfile("Bad Zipfile {0}: {1}".format(os.path.realpath(path_or_file), bze)) try: yield zf finally: zf.close()
python
{ "resource": "" }
q27584
open_tar
train
def open_tar(path_or_file, *args, **kwargs): """ A with-context for tar files. Passes through positional and kwargs to tarfile.open. If path_or_file is a file, caller must close it separately. """ (path, fileobj) = ((path_or_file, None) if isinstance(path_or_file, string_types) else (None, path_or_file)) # TODO(#6071): stop using six.string_types # This should only accept python3 `str`, not byte strings. with closing(TarFile.open(path, *args, fileobj=fileobj, **kwargs)) as tar: yield tar
python
{ "resource": "" }
q27585
maybe_profiled
train
def maybe_profiled(profile_path): """A profiling context manager. :param string profile_path: The path to write profile information to. If `None`, this will no-op. """ if not profile_path: yield return import cProfile profiler = cProfile.Profile() try: profiler.enable() yield finally: profiler.disable() profiler.dump_stats(profile_path) view_cmd = green('gprof2dot -f pstats {path} | dot -Tpng -o {path}.png && open {path}.png' .format(path=profile_path)) logging.getLogger().info( 'Dumped profile data to: {}\nUse e.g. {} to render and view.'.format(profile_path, view_cmd) )
python
{ "resource": "" }
q27586
plugins
train
def plugins(): """Returns a tuple of the plugin classes registered with the python style checker. :rtype: tuple of :class:`pants.contrib.python.checks.checker.common.CheckstylePlugin` subtypes """ return ( ClassFactoring, ConstantLogic, ExceptStatements, FutureCompatibility, ImportOrder, Indentation, MissingContextManager, NewStyleClasses, Newlines, PrintStatements, TrailingWhitespace, PEP8VariableNames, PyflakesChecker, PyCodeStyleChecker, )
python
{ "resource": "" }
q27587
Checker._get_nits
train
def _get_nits(self, filename): """Iterate over the instances style checker and yield Nits. :param filename: str pointing to a file within the buildroot. """ try: python_file = PythonFile.parse(filename, root=self._root_dir) except CheckSyntaxError as e: yield e.as_nit() return if noqa_file_filter(python_file): return if self._excluder: # Filter out any suppressed plugins check_plugins = [(plugin_name, plugin_factory) for plugin_name, plugin_factory in self._plugin_factories.items() if self._excluder.should_include(filename, plugin_name)] else: check_plugins = self._plugin_factories.items() for plugin_name, plugin_factory in check_plugins: for i, nit in enumerate(plugin_factory(python_file)): if i == 0: # NB: Add debug log header for nits from each plugin, but only if there are nits from it. self.log.debug('Nits from plugin {} for {}'.format(plugin_name, filename)) if not nit.has_lines_to_display: yield nit continue if all(not line_contains_noqa(line) for line in nit.lines): yield nit
python
{ "resource": "" }
q27588
Checker._check_file
train
def _check_file(self, filename): """Process python file looking for indications of problems. :param filename: (str) Python source filename :return: (int) number of failures """ # If the user specifies an invalid severity use comment. log_threshold = Nit.SEVERITY.get(self._severity, Nit.COMMENT) failure_count = 0 fail_threshold = Nit.WARNING if self._strict else Nit.ERROR for i, nit in enumerate(self._get_nits(filename)): if i == 0: print() # Add an extra newline to clean up the output only if we have nits. if nit.severity >= log_threshold: print('{nit}\n'.format(nit=nit)) if nit.severity >= fail_threshold: failure_count += 1 return failure_count
python
{ "resource": "" }
q27589
BinaryToolFetcher._select_binary_stream
train
def _select_binary_stream(self, name, urls): """Download a file from a list of urls, yielding a stream after downloading the file. URLs are tried in order until they succeed. :raises: :class:`BinaryToolFetcher.BinaryNotFound` if requests to all the given urls fail. """ downloaded_successfully = False accumulated_errors = [] for url in OrderedSet(urls): # De-dup URLS: we only want to try each URL once. logger.info('Attempting to fetch {name} binary from: {url} ...'.format(name=name, url=url)) try: with temporary_file() as dest: logger.debug("in BinaryToolFetcher: url={}, timeout_secs={}" .format(url, self._timeout_secs)) self._fetcher.download(url, listener=Fetcher.ProgressListener(), path_or_fd=dest, timeout_secs=self._timeout_secs) logger.info('Fetched {name} binary from: {url} .'.format(name=name, url=url)) downloaded_successfully = True dest.seek(0) yield dest break except (IOError, Fetcher.Error, ValueError) as e: accumulated_errors.append('Failed to fetch binary from {url}: {error}' .format(url=url, error=e)) if not downloaded_successfully: raise self.BinaryNotFound(name, accumulated_errors)
python
{ "resource": "" }
q27590
BinaryToolFetcher.fetch_binary
train
def fetch_binary(self, fetch_request): """Fulfill a binary fetch request.""" bootstrap_dir = os.path.realpath(os.path.expanduser(self._bootstrap_dir)) bootstrapped_binary_path = os.path.join(bootstrap_dir, fetch_request.download_path) logger.debug("bootstrapped_binary_path: {}".format(bootstrapped_binary_path)) file_name = fetch_request.file_name urls = fetch_request.urls if self._ignore_cached_download or not os.path.exists(bootstrapped_binary_path): self._do_fetch(bootstrapped_binary_path, file_name, urls) logger.debug('Selected {binary} binary bootstrapped to: {path}' .format(binary=file_name, path=bootstrapped_binary_path)) return bootstrapped_binary_path
python
{ "resource": "" }
q27591
BinaryUtil.select
train
def select(self, binary_request): """Fetches a file, unpacking it if necessary.""" logger.debug("binary_request: {!r}".format(binary_request)) try: download_path = self._get_download_path(binary_request) except self.MissingMachineInfo as e: raise self.BinaryResolutionError(binary_request, e) try: url_generator = self._get_url_generator(binary_request) except self.NoBaseUrlsError as e: raise self.BinaryResolutionError(binary_request, e) urls = self._get_urls(url_generator, binary_request) if not isinstance(urls, list): # TODO: add test for this error! raise self.BinaryResolutionError( binary_request, TypeError("urls must be a list: was '{}'.".format(urls))) fetch_request = BinaryFetchRequest( download_path=download_path, urls=urls) logger.debug("fetch_request: {!r}".format(fetch_request)) try: downloaded_file = self._binary_tool_fetcher.fetch_binary(fetch_request) except BinaryToolFetcher.BinaryNotFound as e: raise self.BinaryResolutionError(binary_request, e) # NB: we mark the downloaded file executable if it is not an archive. archiver = binary_request.archiver if archiver is None: chmod_plus_x(downloaded_file) return downloaded_file download_dir = os.path.dirname(downloaded_file) # Use the 'name' given in the request as the directory name to extract to. unpacked_dirname = os.path.join(download_dir, binary_request.name) if not os.path.isdir(unpacked_dirname): logger.info("Extracting {} to {} .".format(downloaded_file, unpacked_dirname)) archiver.extract(downloaded_file, unpacked_dirname, concurrency_safe=True) return unpacked_dirname
python
{ "resource": "" }
q27592
safe_shlex_split
train
def safe_shlex_split(text_or_binary): """Split a string using shell-like syntax. Safe even on python versions whose shlex.split() method doesn't accept unicode. """ value = ensure_text(text_or_binary) if PY3 else ensure_binary(text_or_binary) return shlex.split(value)
python
{ "resource": "" }
q27593
create_path_env_var
train
def create_path_env_var(new_entries, env=None, env_var='PATH', delimiter=':', prepend=False): """Join path entries, combining with an environment variable if specified.""" if env is None: env = {} prev_path = env.get(env_var, None) if prev_path is None: path_dirs = list() else: path_dirs = list(prev_path.split(delimiter)) new_entries_list = list(new_entries) if prepend: path_dirs = new_entries_list + path_dirs else: path_dirs += new_entries_list return delimiter.join(path_dirs)
python
{ "resource": "" }
q27594
pluralize
train
def pluralize(count, item_type): """Pluralizes the item_type if the count does not equal one. For example `pluralize(1, 'apple')` returns '1 apple', while `pluralize(0, 'apple') returns '0 apples'. :return The count and inflected item_type together as a string :rtype string """ def pluralize_string(x): if x.endswith('s'): return x + 'es' else: return x + 's' text = '{} {}'.format(count, item_type if count == 1 else pluralize_string(item_type)) return text
python
{ "resource": "" }
q27595
strip_prefix
train
def strip_prefix(string, prefix): """Returns a copy of the string from which the multi-character prefix has been stripped. Use strip_prefix() instead of lstrip() to remove a substring (instead of individual characters) from the beginning of a string, if the substring is present. lstrip() does not match substrings but rather treats a substring argument as a set of characters. :param str string: The string from which to strip the specified prefix. :param str prefix: The substring to strip from the left of string, if present. :return: The string with prefix stripped from the left, if present. :rtype: string """ if string.startswith(prefix): return string[len(prefix):] else: return string
python
{ "resource": "" }
q27596
OptionableFactory.signature
train
def signature(cls): """Returns kwargs to construct a `TaskRule` that will construct the target Optionable. TODO: This indirection avoids a cycle between this module and the `rules` module. """ snake_scope = cls.options_scope.replace('-', '_') partial_construct_optionable = functools.partial(_construct_optionable, cls) partial_construct_optionable.__name__ = 'construct_scope_{}'.format(snake_scope) return dict( output_type=cls.optionable_cls, input_selectors=tuple(), func=partial_construct_optionable, input_gets=(Get.create_statically_for_rule_graph(ScopedOptions, Scope),), dependency_optionables=(cls.optionable_cls,), )
python
{ "resource": "" }
q27597
Optionable.get_scope_info
train
def get_scope_info(cls): """Returns a ScopeInfo instance representing this Optionable's options scope.""" if cls.options_scope is None or cls.options_scope_category is None: raise OptionsError( '{} must set options_scope and options_scope_category.'.format(cls.__name__)) return ScopeInfo(cls.options_scope, cls.options_scope_category, cls)
python
{ "resource": "" }
q27598
FilesetRelPathWrapper.to_filespec
train
def to_filespec(cls, args, root='', exclude=None): """Return a dict representation of this glob list, relative to the buildroot. The format of the dict is {'globs': [ 'list', 'of' , 'strings' ] (optional) 'exclude' : [{'globs' : ... }, ...] } The globs are in zglobs format. """ result = {'globs': [os.path.join(root, arg) for arg in args]} if exclude: result['exclude'] = [] for exclude in exclude: if hasattr(exclude, 'filespec'): result['exclude'].append(exclude.filespec) else: result['exclude'].append({'globs': [os.path.join(root, x) for x in exclude]}) return result
python
{ "resource": "" }
q27599
PailgunHandler._run_pants
train
def _run_pants(self, sock, arguments, environment): """Execute a given run with a pants runner.""" self.server.runner_factory(sock, arguments, environment).run()
python
{ "resource": "" }