_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q25000
ImpalaClient.insert
train
def insert( self, table_name, obj=None, database=None, overwrite=False, partition=None, values=None, validate=True, ): """ Insert into existing table. See ImpalaTable.insert for other parameters. Parameters ---------- table_name : string database : string, default None Examples -------- >>> table = 'my_table' >>> con.insert(table, table_expr) # doctest: +SKIP # Completely overwrite contents >>> con.insert(table, table_expr, overwrite=True) # doctest: +SKIP """ table = self.table(table_name, database=database) return table.insert( obj=obj, overwrite=overwrite, partition=partition, values=values, validate=validate, )
python
{ "resource": "" }
q25001
ImpalaClient.drop_table
train
def drop_table(self, table_name, database=None, force=False): """ Drop an Impala table Parameters ---------- table_name : string database : string, default None (optional) force : boolean, default False Database may throw exception if table does not exist Examples -------- >>> table = 'my_table' >>> db = 'operations' >>> con.drop_table(table, database=db, force=True) # doctest: +SKIP """ statement = ddl.DropTable( table_name, database=database, must_exist=not force ) self._execute(statement)
python
{ "resource": "" }
q25002
ImpalaClient.cache_table
train
def cache_table(self, table_name, database=None, pool='default'): """ Caches a table in cluster memory in the given pool. Parameters ---------- table_name : string database : string default None (optional) pool : string, default 'default' The name of the pool in which to cache the table Examples -------- >>> table = 'my_table' >>> db = 'operations' >>> pool = 'op_4GB_pool' >>> con.cache_table('my_table', database=db, pool=pool) # noqa: E501 # doctest: +SKIP """ statement = ddl.CacheTable(table_name, database=database, pool=pool) self._execute(statement)
python
{ "resource": "" }
q25003
ImpalaClient.create_function
train
def create_function(self, func, name=None, database=None): """ Creates a function within Impala Parameters ---------- func : ImpalaUDF or ImpalaUDA Created with wrap_udf or wrap_uda name : string (optional) database : string (optional) """ if name is None: name = func.name database = database or self.current_database if isinstance(func, udf.ImpalaUDF): stmt = ddl.CreateUDF(func, name=name, database=database) elif isinstance(func, udf.ImpalaUDA): stmt = ddl.CreateUDA(func, name=name, database=database) else: raise TypeError(func) self._execute(stmt)
python
{ "resource": "" }
q25004
ImpalaClient.drop_udf
train
def drop_udf( self, name, input_types=None, database=None, force=False, aggregate=False, ): """ Drops a UDF If only name is given, this will search for the relevant UDF and drop it. To delete an overloaded UDF, give only a name and force=True Parameters ---------- name : string input_types : list of strings (optional) force : boolean, default False Must be set to true to drop overloaded UDFs database : string, default None aggregate : boolean, default False """ if not input_types: if not database: database = self.current_database result = self.list_udfs(database=database, like=name) if len(result) > 1: if force: for func in result: self._drop_single_function( func.name, func.inputs, database=database, aggregate=aggregate, ) return else: raise Exception( "More than one function " + "with {0} found.".format(name) + "Please specify force=True" ) elif len(result) == 1: func = result.pop() self._drop_single_function( func.name, func.inputs, database=database, aggregate=aggregate, ) return else: raise Exception("No function found with name {0}".format(name)) self._drop_single_function( name, input_types, database=database, aggregate=aggregate )
python
{ "resource": "" }
q25005
ImpalaClient.drop_uda
train
def drop_uda(self, name, input_types=None, database=None, force=False): """ Drop aggregate function. See drop_udf for more information on the parameters. """ return self.drop_udf( name, input_types=input_types, database=database, force=force )
python
{ "resource": "" }
q25006
ImpalaClient.list_udfs
train
def list_udfs(self, database=None, like=None): """ Lists all UDFs associated with given database Parameters ---------- database : string like : string for searching (optional) """ if not database: database = self.current_database statement = ddl.ListFunction(database, like=like, aggregate=False) with self._execute(statement, results=True) as cur: result = self._get_udfs(cur, udf.ImpalaUDF) return result
python
{ "resource": "" }
q25007
ImpalaClient.list_udas
train
def list_udas(self, database=None, like=None): """ Lists all UDAFs associated with a given database Parameters ---------- database : string like : string for searching (optional) """ if not database: database = self.current_database statement = ddl.ListFunction(database, like=like, aggregate=True) with self._execute(statement, results=True) as cur: result = self._get_udfs(cur, udf.ImpalaUDA) return result
python
{ "resource": "" }
q25008
ImpalaClient.exists_udf
train
def exists_udf(self, name, database=None): """ Checks if a given UDF exists within a specified database Parameters ---------- name : string, UDF name database : string, database name Returns ------- if_exists : boolean """ return len(self.list_udfs(database=database, like=name)) > 0
python
{ "resource": "" }
q25009
ImpalaClient.exists_uda
train
def exists_uda(self, name, database=None): """ Checks if a given UDAF exists within a specified database Parameters ---------- name : string, UDAF name database : string, database name Returns ------- if_exists : boolean """ return len(self.list_udas(database=database, like=name)) > 0
python
{ "resource": "" }
q25010
ImpalaClient.compute_stats
train
def compute_stats(self, name, database=None, incremental=False): """ Issue COMPUTE STATS command for a given table Parameters ---------- name : string Can be fully qualified (with database name) database : string, optional incremental : boolean, default False If True, issue COMPUTE INCREMENTAL STATS """ maybe_inc = 'INCREMENTAL ' if incremental else '' cmd = 'COMPUTE {0}STATS'.format(maybe_inc) stmt = self._table_command(cmd, name, database=database) self._execute(stmt)
python
{ "resource": "" }
q25011
ImpalaClient.invalidate_metadata
train
def invalidate_metadata(self, name=None, database=None): """ Issue INVALIDATE METADATA command, optionally only applying to a particular table. See Impala documentation. Parameters ---------- name : string, optional Table name. Can be fully qualified (with database) database : string, optional """ stmt = 'INVALIDATE METADATA' if name is not None: stmt = self._table_command(stmt, name, database=database) self._execute(stmt)
python
{ "resource": "" }
q25012
ImpalaClient.refresh
train
def refresh(self, name, database=None): """ Reload HDFS block location metadata for a table, for example after ingesting data as part of an ETL pipeline. Related to INVALIDATE METADATA. See Impala documentation for more. Parameters ---------- name : string Table name. Can be fully qualified (with database) database : string, optional """ # TODO(wesm): can this statement be cancelled? stmt = self._table_command('REFRESH', name, database=database) self._execute(stmt)
python
{ "resource": "" }
q25013
ImpalaClient.describe_formatted
train
def describe_formatted(self, name, database=None): """ Retrieve results of DESCRIBE FORMATTED command. See Impala documentation for more. Parameters ---------- name : string Table name. Can be fully qualified (with database) database : string, optional """ from ibis.impala.metadata import parse_metadata stmt = self._table_command( 'DESCRIBE FORMATTED', name, database=database ) query = ImpalaQuery(self, stmt) result = query.execute() # Leave formatting to pandas for c in result.columns: result[c] = result[c].str.strip() return parse_metadata(result)
python
{ "resource": "" }
q25014
ImpalaClient.show_files
train
def show_files(self, name, database=None): """ Retrieve results of SHOW FILES command for a table. See Impala documentation for more. Parameters ---------- name : string Table name. Can be fully qualified (with database) database : string, optional """ stmt = self._table_command('SHOW FILES IN', name, database=database) return self._exec_statement(stmt)
python
{ "resource": "" }
q25015
ImpalaClient.table_stats
train
def table_stats(self, name, database=None): """ Return results of SHOW TABLE STATS for indicated table. See also ImpalaTable.stats """ stmt = self._table_command('SHOW TABLE STATS', name, database=database) return self._exec_statement(stmt)
python
{ "resource": "" }
q25016
ImpalaClient.column_stats
train
def column_stats(self, name, database=None): """ Return results of SHOW COLUMN STATS for indicated table. See also ImpalaTable.column_stats """ stmt = self._table_command( 'SHOW COLUMN STATS', name, database=database ) return self._exec_statement(stmt)
python
{ "resource": "" }
q25017
GroupedTableExpr.projection
train
def projection(self, exprs): """ Like mutate, but do not include existing table columns """ w = self._get_window() windowed_exprs = [] exprs = self.table._resolve(exprs) for expr in exprs: expr = L.windowize_function(expr, w=w) windowed_exprs.append(expr) return self.table.projection(windowed_exprs)
python
{ "resource": "" }
q25018
GroupedTableExpr.over
train
def over(self, window): """ Add a window clause to be applied to downstream analytic expressions """ return GroupedTableExpr( self.table, self.by, having=self._having, order_by=self._order_by, window=window, )
python
{ "resource": "" }
q25019
bucket
train
def bucket( arg, buckets, closed='left', close_extreme=True, include_under=False, include_over=False, ): """ Compute a discrete binning of a numeric array Parameters ---------- arg : numeric array expression buckets : list closed : {'left', 'right'}, default 'left' Which side of each interval is closed. For example buckets = [0, 100, 200] closed = 'left': 100 falls in 2nd bucket closed = 'right': 100 falls in 1st bucket close_extreme : boolean, default True Returns ------- bucketed : coded value expression """ op = Bucket( arg, buckets, closed=closed, close_extreme=close_extreme, include_under=include_under, include_over=include_over, ) return op.to_expr()
python
{ "resource": "" }
q25020
histogram
train
def histogram( arg, nbins=None, binwidth=None, base=None, closed='left', aux_hash=None ): """ Compute a histogram with fixed width bins Parameters ---------- arg : numeric array expression nbins : int, default None If supplied, will be used to compute the binwidth binwidth : number, default None If not supplied, computed from the data (actual max and min values) base : number, default None closed : {'left', 'right'}, default 'left' Which side of each interval is closed Returns ------- histogrammed : coded value expression """ op = Histogram( arg, nbins, binwidth, base, closed=closed, aux_hash=aux_hash ) return op.to_expr()
python
{ "resource": "" }
q25021
category_label
train
def category_label(arg, labels, nulls=None): """ Format a known number of categories as strings Parameters ---------- labels : list of string nulls : string, optional How to label any null values among the categories Returns ------- string_categories : string value expression """ op = CategoryLabel(arg, labels, nulls) return op.to_expr()
python
{ "resource": "" }
q25022
isolated
train
def isolated(): """Returns a chroot for third_party isolated from the ``sys.path``. PEX will typically be installed in site-packages flat alongside many other distributions; as such, adding the location of the pex distribution to the ``sys.path`` will typically expose many other distributions. An isolated chroot can be used as a ``sys.path`` entry to effect only the exposure of pex. :return: The path of the chroot. :rtype: str """ global _ISOLATED if _ISOLATED is None: from pex import vendor from pex.common import safe_mkdtemp, Chroot chroot = Chroot(safe_mkdtemp()) with _tracer().timed('Isolating pex in {}'.format(chroot)): pex_path = os.path.join(vendor.VendorSpec.ROOT, 'pex') for root, _, files in os.walk(pex_path): for f in files: if not f.endswith('.pyc'): abs_file_path = os.path.join(root, f) relpath = os.path.relpath(abs_file_path, pex_path) chroot.copy(abs_file_path, os.path.join('pex', relpath), label='pex') _ISOLATED = chroot return _ISOLATED.path()
python
{ "resource": "" }
q25023
expose
train
def expose(dists): """Exposes vendored code in isolated chroots. Any vendored distributions listed in ``dists`` will be unpacked to individual chroots for addition to the ``sys.path``; ie: ``expose(['setuptools', 'wheel'])`` will unpack these vendored distributions and yield the two chroot paths they were unpacked to. :param dists: A list of vendored distribution names to expose. :type dists: list of str :raise: :class:`ValueError` if any distributions to expose cannot be found. :returns: An iterator of exposed vendored distribution chroot paths. """ from pex.common import safe_delete for path in VendorImporter.expose(dists, root=isolated()): safe_delete(os.path.join(path, '__init__.py')) yield path
python
{ "resource": "" }
q25024
VendorImporter.install_vendored
train
def install_vendored(cls, prefix, root=None, expose=None): """Install an importer for all vendored code with the given import prefix. All distributions listed in ``expose`` will also be made available for import in direct, un-prefixed form. :param str prefix: The import prefix the installed importer will be responsible for. :param str root: The root path of the distribution containing the vendored code. NB: This is the the path to the pex code, which serves as the root under which code is vendored at ``pex/vendor/_vendored``. :param expose: Optional names of distributions to expose for direct, un-prefixed import. :type expose: list of str :raise: :class:`ValueError` if any distributions to expose cannot be found. """ from pex import vendor root = cls._abs_root(root) vendored_path_items = [spec.relpath for spec in vendor.iter_vendor_specs()] installed = list(cls._iter_installed_vendor_importers(prefix, root, vendored_path_items)) assert len(installed) <= 1, ( 'Unexpected extra importers installed for vendored code:\n\t{}' .format('\n\t'.join(map(str, installed))) ) if installed: vendor_importer = installed[0] else: # Install all vendored code for pex internal access to it through the vendor import `prefix`. vendor_importer = cls.install(uninstallable=True, prefix=prefix, path_items=vendored_path_items, root=root) if expose: # But only expose the bits needed. exposed_paths = [] for path in cls.expose(expose, root): sys.path.insert(0, path) exposed_paths.append(os.path.relpath(path, root)) vendor_importer._expose(exposed_paths)
python
{ "resource": "" }
q25025
VendorImporter.install
train
def install(cls, uninstallable, prefix, path_items, root=None, warning=None): """Install an importer for modules found under ``path_items`` at the given import ``prefix``. :param bool uninstallable: ``True`` if the installed importer should be uninstalled and any imports it performed be un-imported when ``uninstall`` is called. :param str prefix: The import prefix the installed importer will be responsible for. :param path_items: The paths relative to ``root`` containing modules to expose for import under ``prefix``. :param str root: The root path of the distribution containing the vendored code. NB: This is the the path to the pex code, which serves as the root under which code is vendored at ``pex/vendor/_vendored``. :param str warning: An optional warning to emit if any imports are made through the installed importer. :return: """ root = cls._abs_root(root) importables = tuple(cls._iter_importables(root=root, path_items=path_items, prefix=prefix)) vendor_importer = cls(root=root, importables=importables, uninstallable=uninstallable, warning=warning) sys.meta_path.insert(0, vendor_importer) _tracer().log('Installed {}'.format(vendor_importer), V=3) return vendor_importer
python
{ "resource": "" }
q25026
VendorImporter.uninstall
train
def uninstall(self): """Uninstall this importer if possible and un-import any modules imported by it.""" if not self._uninstallable: _tracer().log('Not uninstalling {}'.format(self), V=9) return if self in sys.meta_path: sys.meta_path.remove(self) maybe_exposed = frozenset(os.path.join(self._root, importable.path) for importable in self._importables) sys.path[:] = [path_item for path_item in sys.path if path_item not in maybe_exposed] for loader in self._loaders: loader.unload() _tracer().log('Uninstalled {}'.format(self), V=3)
python
{ "resource": "" }
q25027
bdist_wheel.wheel_dist_name
train
def wheel_dist_name(self): """Return distribution full name with - replaced with _""" components = (safer_name(self.distribution.get_name()), safer_version(self.distribution.get_version())) if self.build_number: components += (self.build_number,) return '-'.join(components)
python
{ "resource": "" }
q25028
bdist_wheel.egg2dist
train
def egg2dist(self, egginfo_path, distinfo_path): """Convert an .egg-info directory into a .dist-info directory""" def adios(p): """Appropriately delete directory, file or link.""" if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p): shutil.rmtree(p) elif os.path.exists(p): os.unlink(p) adios(distinfo_path) if not os.path.exists(egginfo_path): # There is no egg-info. This is probably because the egg-info # file/directory is not named matching the distribution name used # to name the archive file. Check for this case and report # accordingly. import glob pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info') possible = glob.glob(pat) err = "Egg metadata expected at %s but not found" % (egginfo_path,) if possible: alt = os.path.basename(possible[0]) err += " (%s found - possible misnamed archive file?)" % (alt,) raise ValueError(err) if os.path.isfile(egginfo_path): # .egg-info is a single file pkginfo_path = egginfo_path pkg_info = pkginfo_to_metadata(egginfo_path, egginfo_path) os.mkdir(distinfo_path) else: # .egg-info is a directory pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO') pkg_info = pkginfo_to_metadata(egginfo_path, pkginfo_path) # ignore common egg metadata that is useless to wheel shutil.copytree(egginfo_path, distinfo_path, ignore=lambda x, y: {'PKG-INFO', 'requires.txt', 'SOURCES.txt', 'not-zip-safe'} ) # delete dependency_links if it is only whitespace dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt') with open(dependency_links_path, 'r') as dependency_links_file: dependency_links = dependency_links_file.read().strip() if not dependency_links: adios(dependency_links_path) write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info) # XXX heuristically copy any LICENSE/LICENSE.txt? license = self.license_file() if license: license_filename = 'LICENSE.txt' shutil.copy(license, os.path.join(distinfo_path, license_filename)) adios(egginfo_path)
python
{ "resource": "" }
q25029
sign
train
def sign(wheelfile, replace=False, get_keyring=get_keyring): """Sign a wheel""" warn_signatures() WheelKeys, keyring = get_keyring() ed25519ll = signatures.get_ed25519ll() wf = WheelFile(wheelfile, append=True) wk = WheelKeys().load() name = wf.parsed_filename.group('name') sign_with = wk.signers(name)[0] print("Signing {} with {}".format(name, sign_with[1])) vk = sign_with[1] kr = keyring.get_keyring() sk = kr.get_password('wheel', vk) keypair = ed25519ll.Keypair(urlsafe_b64decode(binary(vk)), urlsafe_b64decode(binary(sk))) record_name = wf.distinfo_name + '/RECORD' sig_name = wf.distinfo_name + '/RECORD.jws' if sig_name in wf.zipfile.namelist(): raise WheelError("Wheel is already signed.") record_data = wf.zipfile.read(record_name) payload = {"hash": "sha256=" + native(urlsafe_b64encode(hashlib.sha256(record_data).digest()))} sig = signatures.sign(payload, keypair) wf.zipfile.writestr(sig_name, json.dumps(sig, sort_keys=True)) wf.zipfile.close()
python
{ "resource": "" }
q25030
verify
train
def verify(wheelfile): """Verify a wheel. The signature will be verified for internal consistency ONLY and printed. Wheel's own unpack/install commands verify the manifest against the signature and file contents. """ warn_signatures() wf = WheelFile(wheelfile) sig_name = wf.distinfo_name + '/RECORD.jws' try: sig = json.loads(native(wf.zipfile.open(sig_name).read())) except KeyError: raise WheelError('The wheel is not signed (RECORD.jws not found at end of the archive).') verified = signatures.verify(sig) print("Signatures are internally consistent.", file=sys.stderr) print(json.dumps(verified, indent=2))
python
{ "resource": "" }
q25031
install_scripts
train
def install_scripts(distributions): """ Regenerate the entry_points console_scripts for the named distribution. """ try: if "__PEX_UNVENDORED__" in __import__("os").environ: from setuptools.command import easy_install # vendor:skip else: from pex.third_party.setuptools.command import easy_install if "__PEX_UNVENDORED__" in __import__("os").environ: import pkg_resources # vendor:skip else: import pex.third_party.pkg_resources as pkg_resources except ImportError: raise RuntimeError("'wheel install_scripts' needs setuptools.") for dist in distributions: pkg_resources_dist = pkg_resources.get_distribution(dist) install = get_install_command(dist) command = easy_install.easy_install(install.distribution) command.args = ['wheel'] # dummy argument command.finalize_options() command.install_egg_scripts(pkg_resources_dist)
python
{ "resource": "" }
q25032
Variables.from_rc
train
def from_rc(cls, rc=None): """Read pex runtime configuration variables from a pexrc file. :param rc: an absolute path to a pexrc file. :return: A dict of key value pairs found in processed pexrc files. :rtype: dict """ ret_vars = {} rc_locations = ['/etc/pexrc', '~/.pexrc', os.path.join(os.path.dirname(sys.argv[0]), '.pexrc')] if rc: rc_locations.append(rc) for filename in rc_locations: try: with open(os.path.expanduser(filename)) as fh: rc_items = map(cls._get_kv, fh) ret_vars.update(dict(filter(None, rc_items))) except IOError: continue return ret_vars
python
{ "resource": "" }
q25033
Variables.patch
train
def patch(self, **kw): """Update the environment for the duration of a context.""" old_environ = self._environ self._environ = self._environ.copy() self._environ.update(kw) yield self._environ = old_environ
python
{ "resource": "" }
q25034
iter_pth_paths
train
def iter_pth_paths(filename): """Given a .pth file, extract and yield all inner paths without honoring imports. This shadows python's site.py behavior, which is invoked at interpreter startup.""" try: f = open(filename, 'rU') # noqa except IOError: return dirname = os.path.dirname(filename) known_paths = set() with f: for line in f: line = line.rstrip() if not line or line.startswith('#'): continue elif line.startswith(('import ', 'import\t')): try: exec_function(line, globals_map={}) continue except Exception: # NB: import lines are routinely abused with extra code appended using `;` so the class of # exceptions that might be raised in broader than ImportError. As such we cacth broadly # here. # Defer error handling to the higher level site.py logic invoked at startup. return else: extras_dir, extras_dir_case_insensitive = makepath(dirname, line) if extras_dir_case_insensitive not in known_paths and os.path.exists(extras_dir): yield extras_dir known_paths.add(extras_dir_case_insensitive)
python
{ "resource": "" }
q25035
merge_split
train
def merge_split(*paths): """Merge paths into a single path delimited by colons and split on colons to return a list of paths. :param paths: a variable length list of path strings :return: a list of paths from the merged path list split by colons """ filtered_paths = filter(None, paths) return [p for p in ':'.join(filtered_paths).split(':') if p]
python
{ "resource": "" }
q25036
DistributionHelper.walk_data
train
def walk_data(cls, dist, path='/'): """Yields filename, stream for files identified as data in the distribution""" for rel_fn in filter(None, dist.resource_listdir(path)): full_fn = os.path.join(path, rel_fn) if dist.resource_isdir(full_fn): for fn, stream in cls.walk_data(dist, full_fn): yield fn, stream else: yield full_fn[1:], dist.get_resource_stream(dist._provider, full_fn)
python
{ "resource": "" }
q25037
DistributionHelper.zipsafe
train
def zipsafe(dist): """Returns whether or not we determine a distribution is zip-safe.""" # zip-safety is only an attribute of eggs. wheels are considered never # zip safe per implications of PEP 427. if hasattr(dist, 'egg_info') and dist.egg_info.endswith('EGG-INFO'): egg_metadata = dist.metadata_listdir('') return 'zip-safe' in egg_metadata and 'native_libs.txt' not in egg_metadata else: return False
python
{ "resource": "" }
q25038
DistributionHelper.access_zipped_assets
train
def access_zipped_assets(cls, static_module_name, static_path, dir_location=None): """ Create a copy of static resource files as we can't serve them from within the pex file. :param static_module_name: Module name containing module to cache in a tempdir :type static_module_name: string, for example 'twitter.common.zookeeper' or similar :param static_path: Module name, for example 'serverset' :param dir_location: create a new temporary directory inside, or None to have one created :returns temp_dir: Temporary directory with the zipped assets inside :rtype: str """ # asset_path is initially a module name that's the same as the static_path, but will be # changed to walk the directory tree def walk_zipped_assets(static_module_name, static_path, asset_path, temp_dir): for asset in resource_listdir(static_module_name, asset_path): asset_target = os.path.normpath( os.path.join(os.path.relpath(asset_path, static_path), asset)) if resource_isdir(static_module_name, os.path.join(asset_path, asset)): safe_mkdir(os.path.join(temp_dir, asset_target)) walk_zipped_assets(static_module_name, static_path, os.path.join(asset_path, asset), temp_dir) else: with open(os.path.join(temp_dir, asset_target), 'wb') as fp: path = os.path.join(static_path, asset_target) file_data = resource_string(static_module_name, path) fp.write(file_data) if dir_location is None: temp_dir = safe_mkdtemp() else: temp_dir = dir_location walk_zipped_assets(static_module_name, static_path, static_path, temp_dir) return temp_dir
python
{ "resource": "" }
q25039
DistributionHelper.distribution_from_path
train
def distribution_from_path(cls, path, name=None): """Return a distribution from a path. If name is provided, find the distribution. If none is found matching the name, return None. If name is not provided and there is unambiguously a single distribution, return that distribution otherwise None. """ # Monkeypatch pkg_resources finders should it not already be so. register_finders() if name is None: distributions = set(find_distributions(path)) if len(distributions) == 1: return distributions.pop() else: for dist in find_distributions(path): if dist.project_name == name: return dist
python
{ "resource": "" }
q25040
CacheHelper.update_hash
train
def update_hash(cls, filelike, digest): """Update the digest of a single file in a memory-efficient manner.""" block_size = digest.block_size * 1024 for chunk in iter(lambda: filelike.read(block_size), b''): digest.update(chunk)
python
{ "resource": "" }
q25041
CacheHelper.hash
train
def hash(cls, path, digest=None, hasher=sha1): """Return the digest of a single file in a memory-efficient manner.""" if digest is None: digest = hasher() with open(path, 'rb') as fh: cls.update_hash(fh, digest) return digest.hexdigest()
python
{ "resource": "" }
q25042
CacheHelper.zip_hash
train
def zip_hash(cls, zf, prefix=''): """Return the hash of the contents of a zipfile, comparable with a cls.dir_hash.""" prefix_length = len(prefix) names = sorted(name[prefix_length:] for name in zf.namelist() if name.startswith(prefix) and not name.endswith('.pyc') and not name.endswith('/')) def stream_factory(name): return zf.open(prefix + name) return cls._compute_hash(names, stream_factory)
python
{ "resource": "" }
q25043
CacheHelper.pex_hash
train
def pex_hash(cls, d): """Return a reproducible hash of the contents of a directory.""" names = sorted(f for f in cls._iter_files(d) if not (f.endswith('.pyc') or f.startswith('.'))) def stream_factory(name): return open(os.path.join(d, name), 'rb') # noqa: T802 return cls._compute_hash(names, stream_factory)
python
{ "resource": "" }
q25044
CacheHelper.cache_distribution
train
def cache_distribution(cls, zf, source, target_dir): """Possibly cache an egg from within a zipfile into target_cache. Given a zipfile handle and a filename corresponding to an egg distribution within that zip, maybe write to the target cache and return a Distribution.""" dependency_basename = os.path.basename(source) if not os.path.exists(target_dir): target_dir_tmp = target_dir + '.' + uuid.uuid4().hex for name in zf.namelist(): if name.startswith(source) and not name.endswith('/'): zf.extract(name, target_dir_tmp) os.rename(os.path.join(target_dir_tmp, source), os.path.join(target_dir_tmp, dependency_basename)) rename_if_empty(target_dir_tmp, target_dir) dist = DistributionHelper.distribution_from_path(target_dir) assert dist is not None, 'Failed to cache distribution %s' % source return dist
python
{ "resource": "" }
q25045
Package.register
train
def register(cls, package_type): """Register a concrete implementation of a Package to be recognized by pex.""" if not issubclass(package_type, cls): raise TypeError('package_type must be a subclass of Package.') cls._REGISTRY.add(package_type)
python
{ "resource": "" }
q25046
Package.from_href
train
def from_href(cls, href, **kw): """Convert from a url to Package. :param href: The url to parse :type href: string :returns: A Package object if a valid concrete implementation exists, otherwise None. """ package = cls._HREF_TO_PACKAGE_CACHE.get(href) if package is not None: return package link_href = Link.wrap(href) for package_type in cls._REGISTRY: try: package = package_type(link_href.url, **kw) break except package_type.InvalidPackage: continue if package is not None: cls._HREF_TO_PACKAGE_CACHE.store(href, package) return package
python
{ "resource": "" }
q25047
Package.satisfies
train
def satisfies(self, requirement, allow_prereleases=None): """Determine whether this package matches the requirement. :param requirement: The requirement to compare this Package against :type requirement: string or :class:`pkg_resources.Requirement` :param Optional[bool] allow_prereleases: Whether to allow prereleases to satisfy the `requirement`. :returns: True if the package matches the requirement, otherwise False """ requirement = maybe_requirement(requirement) link_name = safe_name(self.name).lower() if link_name != requirement.key: return False # NB: If we upgrade to setuptools>=34 the SpecifierSet used here (requirement.specifier) will # come from a non-vendored `packaging` package and pex's bootstrap code in `PEXBuilder` will # need an update. return requirement.specifier.contains(self.raw_version, prereleases=allow_prereleases)
python
{ "resource": "" }
q25048
_add_finder
train
def _add_finder(importer, finder): """Register a new pkg_resources path finder that does not replace the existing finder.""" existing_finder = _get_finder(importer) if not existing_finder: pkg_resources.register_finder(importer, finder) else: pkg_resources.register_finder(importer, ChainedFinder.of(existing_finder, finder))
python
{ "resource": "" }
q25049
_remove_finder
train
def _remove_finder(importer, finder): """Remove an existing finder from pkg_resources.""" existing_finder = _get_finder(importer) if not existing_finder: return if isinstance(existing_finder, ChainedFinder): try: existing_finder.finders.remove(finder) except ValueError: return if len(existing_finder.finders) == 1: pkg_resources.register_finder(importer, existing_finder.finders[0]) elif len(existing_finder.finders) == 0: pkg_resources.register_finder(importer, pkg_resources.find_nothing) else: pkg_resources.register_finder(importer, pkg_resources.find_nothing)
python
{ "resource": "" }
q25050
register_finders
train
def register_finders(): """Register finders necessary for PEX to function properly.""" # If the previous finder is set, then we've already monkeypatched, so skip. global __PREVIOUS_FINDER if __PREVIOUS_FINDER: return # save previous finder so that it can be restored previous_finder = _get_finder(zipimport.zipimporter) assert previous_finder, 'This appears to be using an incompatible setuptools.' # Enable finding zipped wheels. pkg_resources.register_finder( zipimport.zipimporter, ChainedFinder.of(pkg_resources.find_eggs_in_zip, find_wheels_in_zip)) # append the wheel finder _add_finder(pkgutil.ImpImporter, find_wheels_on_path) if importlib_machinery is not None: _add_finder(importlib_machinery.FileFinder, find_wheels_on_path) __PREVIOUS_FINDER = previous_finder
python
{ "resource": "" }
q25051
unregister_finders
train
def unregister_finders(): """Unregister finders necessary for PEX to function properly.""" global __PREVIOUS_FINDER if not __PREVIOUS_FINDER: return pkg_resources.register_finder(zipimport.zipimporter, __PREVIOUS_FINDER) _remove_finder(pkgutil.ImpImporter, find_wheels_on_path) if importlib_machinery is not None: _remove_finder(importlib_machinery.FileFinder, find_wheels_on_path) __PREVIOUS_FINDER = None
python
{ "resource": "" }
q25052
patched_packing_env
train
def patched_packing_env(env): """Monkey patch packaging.markers.default_environment""" old_env = pkg_resources.packaging.markers.default_environment new_env = lambda: env pkg_resources._vendor.packaging.markers.default_environment = new_env try: yield finally: pkg_resources._vendor.packaging.markers.default_environment = old_env
python
{ "resource": "" }
q25053
platform_to_tags
train
def platform_to_tags(platform, interpreter): """Splits a "platform" like linux_x86_64-36-cp-cp36m into its components. If a simple platform without hyphens is specified, we will fall back to using the current interpreter's tags. """ if platform.count('-') >= 3: tags = platform.rsplit('-', 3) else: tags = [platform, interpreter.identity.impl_ver, interpreter.identity.abbr_impl, interpreter.identity.abi_tag] tags[0] = tags[0].replace('.', '_').replace('-', '_') return tags
python
{ "resource": "" }
q25054
_ResolvableSet.merge
train
def merge(self, resolvable, packages, parent=None): """Add a resolvable and its resolved packages.""" self.__tuples.append(_ResolvedPackages(resolvable, OrderedSet(packages), parent, resolvable.is_constraint)) self._check()
python
{ "resource": "" }
q25055
_ResolvableSet.get
train
def get(self, name): """Get the set of compatible packages given a resolvable name.""" resolvable, packages, parent, constraint_only = self._collapse().get( self.normalize(name), _ResolvedPackages.empty()) return packages
python
{ "resource": "" }
q25056
_ResolvableSet.replace_built
train
def replace_built(self, built_packages): """Return a copy of this resolvable set but with built packages. :param dict built_packages: A mapping from a resolved package to its locally built package. :returns: A new resolvable set with built package replacements made. """ def map_packages(resolved_packages): packages = OrderedSet(built_packages.get(p, p) for p in resolved_packages.packages) return _ResolvedPackages(resolved_packages.resolvable, packages, resolved_packages.parent, resolved_packages.constraint_only) return _ResolvableSet([map_packages(rp) for rp in self.__tuples])
python
{ "resource": "" }
q25057
get_ed25519ll
train
def get_ed25519ll(): """Lazy import-and-test of ed25519 module""" global ed25519ll if not ed25519ll: try: import ed25519ll # fast (thousands / s) except (ImportError, OSError): # pragma nocover from . import ed25519py as ed25519ll # pure Python (hundreds / s) test() return ed25519ll
python
{ "resource": "" }
q25058
sign
train
def sign(payload, keypair): """Return a JWS-JS format signature given a JSON-serializable payload and an Ed25519 keypair.""" get_ed25519ll() # header = { "alg": ALG, "jwk": { "kty": ALG, # alg -> kty in jwk-08. "vk": native(urlsafe_b64encode(keypair.vk)) } } encoded_header = urlsafe_b64encode(binary(json.dumps(header, sort_keys=True))) encoded_payload = urlsafe_b64encode(binary(json.dumps(payload, sort_keys=True))) secured_input = b".".join((encoded_header, encoded_payload)) sig_msg = ed25519ll.crypto_sign(secured_input, keypair.sk) signature = sig_msg[:ed25519ll.SIGNATUREBYTES] encoded_signature = urlsafe_b64encode(signature) return {"recipients": [{"header": native(encoded_header), "signature": native(encoded_signature)}], "payload": native(encoded_payload)}
python
{ "resource": "" }
q25059
PEXEnvironment.load_internal_cache
train
def load_internal_cache(cls, pex, pex_info): """Possibly cache out the internal cache.""" internal_cache = os.path.join(pex, pex_info.internal_cache) with TRACER.timed('Searching dependency cache: %s' % internal_cache, V=2): if os.path.isdir(pex): for dist in find_distributions(internal_cache): yield dist else: for dist in itertools.chain(*cls.write_zipped_internal_cache(pex, pex_info)): yield dist
python
{ "resource": "" }
q25060
Context.read
train
def read(self, link): """Return the binary content associated with the link. :param link: The :class:`Link` to read. """ with contextlib.closing(self.open(link)) as fp: return fp.read()
python
{ "resource": "" }
q25061
Context.fetch
train
def fetch(self, link, into=None): """Fetch the binary content associated with the link and write to a file. :param link: The :class:`Link` to fetch. :keyword into: If specified, write into the directory ``into``. If ``None``, creates a new temporary directory that persists for the duration of the interpreter. """ target = os.path.join(into or safe_mkdtemp(), link.filename) if os.path.exists(target): # Assume that if the local file already exists, it is safe to use. return target with TRACER.timed('Fetching %s' % link.url, V=2): target_tmp = '%s.%s' % (target, uuid.uuid4()) with contextlib.closing(self.open(link)) as in_fp: with safe_open(target_tmp, 'wb') as out_fp: shutil.copyfileobj(in_fp, out_fp) os.rename(target_tmp, target) return target
python
{ "resource": "" }
q25062
StreamFilelike.detect_algorithm
train
def detect_algorithm(cls, link): """Detect the hashing algorithm from the fragment in the link, if any.""" if any(link.fragment.startswith('%s=' % algorithm) for algorithm in HASHLIB_ALGORITHMS): algorithm, value = link.fragment.split('=', 2) try: return hashlib.new(algorithm), value except ValueError: # unsupported algorithm return None, None return None, None
python
{ "resource": "" }
q25063
pkginfo_to_metadata
train
def pkginfo_to_metadata(egg_info_path, pkginfo_path): """ Convert .egg-info directory with PKG-INFO to the Metadata 2.1 format """ pkg_info = read_pkg_info(pkginfo_path) pkg_info.replace_header('Metadata-Version', '2.1') requires_path = os.path.join(egg_info_path, 'requires.txt') if os.path.exists(requires_path): with open(requires_path) as requires_file: requires = requires_file.read() for extra, reqs in sorted(pkg_resources.split_sections(requires), key=lambda x: x[0] or ''): for item in generate_requirements({extra: reqs}): pkg_info[item[0]] = item[1] description = pkg_info['Description'] if description: pkg_info.set_payload(dedent_description(pkg_info)) del pkg_info['Description'] return pkg_info
python
{ "resource": "" }
q25064
WheelKeys.trust
train
def trust(self, scope, vk): """Start trusting a particular key for given scope.""" self.data['verifiers'].append({'scope': scope, 'vk': vk}) return self
python
{ "resource": "" }
q25065
WheelKeys.untrust
train
def untrust(self, scope, vk): """Stop trusting a particular key for given scope.""" self.data['verifiers'].remove({'scope': scope, 'vk': vk}) return self
python
{ "resource": "" }
q25066
PEXBuilder.clone
train
def clone(self, into=None): """Clone this PEX environment into a new PEXBuilder. :keyword into: (optional) An optional destination directory to clone this PEXBuilder into. If not specified, a temporary directory will be created. Clones PEXBuilder into a new location. This is useful if the PEXBuilder has been frozen and rendered immutable. .. versionchanged:: 0.8 The temporary directory created when ``into`` is not specified is now garbage collected on interpreter exit. """ chroot_clone = self._chroot.clone(into=into) clone = self.__class__( chroot=chroot_clone, interpreter=self._interpreter, pex_info=self._pex_info.copy(), preamble=self._preamble, copy=self._copy) clone.set_shebang(self._shebang) clone._distributions = self._distributions.copy() return clone
python
{ "resource": "" }
q25067
PEXBuilder.add_source
train
def add_source(self, filename, env_filename): """Add a source to the PEX environment. :param filename: The source filename to add to the PEX; None to create an empty file at `env_filename`. :param env_filename: The destination filename in the PEX. This path must be a relative path. """ self._ensure_unfrozen('Adding source') self._copy_or_link(filename, env_filename, "source")
python
{ "resource": "" }
q25068
PEXBuilder.add_resource
train
def add_resource(self, filename, env_filename): """Add a resource to the PEX environment. :param filename: The source filename to add to the PEX; None to create an empty file at `env_filename`. :param env_filename: The destination filename in the PEX. This path must be a relative path. """ self._ensure_unfrozen('Adding a resource') self._copy_or_link(filename, env_filename, "resource")
python
{ "resource": "" }
q25069
PEXBuilder.set_executable
train
def set_executable(self, filename, env_filename=None): """Set the executable for this environment. :param filename: The file that should be executed within the PEX environment when the PEX is invoked. :keyword env_filename: (optional) The name that the executable file should be stored as within the PEX. By default this will be the base name of the given filename. The entry point of the PEX may also be specified via ``PEXBuilder.set_entry_point``. """ self._ensure_unfrozen('Setting the executable') if self._pex_info.script: raise self.InvalidExecutableSpecification('Cannot set both entry point and script of PEX!') if env_filename is None: env_filename = os.path.basename(filename) if self._chroot.get("executable"): raise self.InvalidExecutableSpecification( "Setting executable on a PEXBuilder that already has one!") self._copy_or_link(filename, env_filename, "executable") entry_point = env_filename entry_point = entry_point.replace(os.path.sep, '.') self._pex_info.entry_point = entry_point.rpartition('.')[0]
python
{ "resource": "" }
q25070
PEXBuilder.set_script
train
def set_script(self, script): """Set the entry point of this PEX environment based upon a distribution script. :param script: The script name as defined either by a console script or ordinary script within the setup.py of one of the distributions added to the PEX. :raises: :class:`PEXBuilder.InvalidExecutableSpecification` if the script is not found in any distribution added to the PEX. """ # check if 'script' is a console_script dist, entry_point = get_entry_point_from_console_script(script, self._distributions) if entry_point: self.set_entry_point(entry_point) TRACER.log('Set entrypoint to console_script %r in %r' % (entry_point, dist)) return # check if 'script' is an ordinary script dist, _, _ = get_script_from_distributions(script, self._distributions) if dist: if self._pex_info.entry_point: raise self.InvalidExecutableSpecification('Cannot set both entry point and script of PEX!') self._pex_info.script = script TRACER.log('Set entrypoint to script %r in %r' % (script, dist)) return raise self.InvalidExecutableSpecification( 'Could not find script %r in any distribution %s within PEX!' % ( script, ', '.join(str(d) for d in self._distributions)))
python
{ "resource": "" }
q25071
PEXBuilder._get_installer_paths
train
def _get_installer_paths(self, base): """Set up an overrides dict for WheelFile.install that installs the contents of a wheel into its own base in the pex dependencies cache. """ return { 'purelib': base, 'headers': os.path.join(base, 'headers'), 'scripts': os.path.join(base, 'bin'), 'platlib': base, 'data': base }
python
{ "resource": "" }
q25072
PEXBuilder.add_dist_location
train
def add_dist_location(self, dist, name=None): """Add a distribution by its location on disk. :param dist: The path to the distribution to add. :keyword name: (optional) The name of the distribution, should the dist directory alone be ambiguous. Packages contained within site-packages directories may require specifying ``name``. :raises PEXBuilder.InvalidDistribution: When the path does not contain a matching distribution. PEX supports packed and unpacked .whl and .egg distributions, as well as any distribution supported by setuptools/pkg_resources. """ self._ensure_unfrozen('Adding a distribution') bdist = DistributionHelper.distribution_from_path(dist) if bdist is None: raise self.InvalidDistribution('Could not find distribution at %s' % dist) self.add_distribution(bdist) self.add_requirement(bdist.as_requirement())
python
{ "resource": "" }
q25073
PEXBuilder.freeze
train
def freeze(self, bytecode_compile=True): """Freeze the PEX. :param bytecode_compile: If True, precompile .py files into .pyc files when freezing code. Freezing the PEX writes all the necessary metadata and environment bootstrapping code. It may only be called once and renders the PEXBuilder immutable. """ self._ensure_unfrozen('Freezing the environment') self._prepare_code_hash() self._prepare_manifest() self._prepare_bootstrap() self._prepare_main() if bytecode_compile: self._precompile_source() self._frozen = True
python
{ "resource": "" }
q25074
PEXBuilder.build
train
def build(self, filename, bytecode_compile=True): """Package the PEX into a zipfile. :param filename: The filename where the PEX should be stored. :param bytecode_compile: If True, precompile .py files into .pyc files. If the PEXBuilder is not yet frozen, it will be frozen by ``build``. This renders the PEXBuilder immutable. """ if not self._frozen: self.freeze(bytecode_compile=bytecode_compile) try: os.unlink(filename + '~') self._logger.warn('Previous binary unexpectedly exists, cleaning: %s' % (filename + '~')) except OSError: # The expectation is that the file does not exist, so continue pass if os.path.dirname(filename): safe_mkdir(os.path.dirname(filename)) with open(filename + '~', 'ab') as pexfile: assert os.path.getsize(pexfile.name) == 0 pexfile.write(to_bytes('%s\n' % self._shebang)) self._chroot.zip(filename + '~', mode='a') if os.path.exists(filename): os.unlink(filename) os.rename(filename + '~', filename) chmod_plus_x(filename)
python
{ "resource": "" }
q25075
resolvables_from_iterable
train
def resolvables_from_iterable(iterable, builder, interpreter=None): """Given an iterable of resolvable-like objects, return list of Resolvable objects. :param iterable: An iterable of :class:`Resolvable`, :class:`Requirement`, :class:`Package`, or `str` to map into an iterable of :class:`Resolvable` objects. :returns: A list of :class:`Resolvable` objects. """ def translate(obj): if isinstance(obj, Resolvable): return obj elif isinstance(obj, Requirement): return ResolvableRequirement(obj, builder.build(obj.key)) elif isinstance(obj, Package): return ResolvablePackage(obj, builder.build(obj.name)) elif isinstance(obj, compatibility_string): return Resolvable.get(obj, builder, interpreter=interpreter) else: raise ValueError('Do not know how to resolve %s' % type(obj)) return list(map(translate, iterable))
python
{ "resource": "" }
q25076
matched_interpreters
train
def matched_interpreters(interpreters, constraints): """Given some filters, yield any interpreter that matches at least one of them. :param interpreters: a list of PythonInterpreter objects for filtering :param constraints: A sequence of strings that constrain the interpreter compatibility for this pex. Each string uses the Requirement-style format, e.g. 'CPython>=3' or '>=2.7,<3' for requirements agnostic to interpreter class. Multiple requirement strings may be combined into a list to OR the constraints, such as ['CPython>=2.7,<3', 'CPython>=3.4']. :return interpreter: returns a generator that yields compatible interpreters """ for interpreter in interpreters: if any(interpreter.identity.matches(filt) for filt in constraints): TRACER.log("Constraints on interpreters: %s, Matching Interpreter: %s" % (constraints, interpreter.binary), V=3) yield interpreter
python
{ "resource": "" }
q25077
parse_version
train
def parse_version(version): """Use parse_version from pkg_resources or distutils as available.""" global parse_version try: if "__PEX_UNVENDORED__" in __import__("os").environ: from pkg_resources import parse_version # vendor:skip else: from pex.third_party.pkg_resources import parse_version except ImportError: from distutils.version import LooseVersion as parse_version return parse_version(version)
python
{ "resource": "" }
q25078
WheelFile.compatibility_rank
train
def compatibility_rank(self, supported): """Rank the wheel against the supported tags. Smaller ranks are more compatible! :param supported: A list of compatibility tags that the current Python implemenation can run. """ preferences = [] for tag in self.compatibility_tags: try: preferences.append(supported.index(tag)) # Tag not present except ValueError: pass if len(preferences): return (min(preferences), self.arity) return (_big_number, 0)
python
{ "resource": "" }
q25079
PythonIdentity.matches
train
def matches(self, requirement): """Given a Requirement, check if this interpreter matches.""" try: requirement = self.parse_requirement(requirement, self._interpreter) except ValueError as e: raise self.UnknownRequirement(str(e)) return self.distribution in requirement
python
{ "resource": "" }
q25080
PythonIdentity.pkg_resources_env
train
def pkg_resources_env(self, platform_str): """Returns a dict that can be used in place of packaging.default_environment.""" os_name = '' platform_machine = '' platform_release = '' platform_system = '' platform_version = '' sys_platform = '' if 'win' in platform_str: os_name = 'nt' platform_machine = 'AMD64' if '64' in platform_str else 'x86' platform_system = 'Windows' sys_platform = 'win32' elif 'linux' in platform_str: os_name = 'posix' platform_machine = 'x86_64' if '64' in platform_str else 'i686' platform_system = 'Linux' sys_platform = 'linux2' if self._version[0] == 2 else 'linux' elif 'macosx' in platform_str: os_name = 'posix' platform_str = platform_str.replace('.', '_') platform_machine = platform_str.split('_', 3)[-1] # Darwin version are macOS version + 4 platform_release = '{}.0.0'.format(int(platform_str.split('_')[2]) + 4) platform_system = 'Darwin' platform_version = 'Darwin Kernel Version {}'.format(platform_release) sys_platform = 'darwin' return { 'implementation_name': self.interpreter.lower(), 'implementation_version': self.version_str, 'os_name': os_name, 'platform_machine': platform_machine, 'platform_release': platform_release, 'platform_system': platform_system, 'platform_version': platform_version, 'python_full_version': self.version_str, 'platform_python_implementation': self.interpreter, 'python_version': self.version_str[:3], 'sys_platform': sys_platform, }
python
{ "resource": "" }
q25081
PythonInterpreter.from_binary
train
def from_binary(cls, binary): """Create an interpreter from the given `binary`. :param str binary: The path to the python interpreter binary. :return: an interpreter created from the given `binary` with only the specified extras. :rtype: :class:`PythonInterpreter` """ if binary not in cls.CACHE: if binary == sys.executable: cls.CACHE[binary] = cls._from_binary_internal() else: cls.CACHE[binary] = cls._from_binary_external(binary) return cls.CACHE[binary]
python
{ "resource": "" }
q25082
PythonInterpreter.find
train
def find(cls, paths): """ Given a list of files or directories, try to detect python interpreters amongst them. Returns a list of PythonInterpreter objects. """ pythons = [] for path in paths: for fn in cls.expand_path(path): basefile = os.path.basename(fn) if cls._matches_binary_name(basefile): try: pythons.append(cls.from_binary(fn)) except Exception as e: TRACER.log('Could not identify %s: %s' % (fn, e)) continue return pythons
python
{ "resource": "" }
q25083
Executor.execute
train
def execute(cls, cmd, stdin_payload=None, **kwargs): """Execute a command via subprocess.Popen and returns the stdio. :param string|list cmd: A list or string representing the command to run. :param string stdin_payload: A string representing the stdin payload, if any, to send. :param **kwargs: Additional kwargs to pass through to subprocess.Popen. :return: A tuple of strings representing (stdout, stderr), pre-decoded for utf-8. :raises: `Executor.ExecutableNotFound` when the executable requested to run does not exist. `Executor.NonZeroExit` when the execution fails with a non-zero exit code. """ process = cls.open_process(cmd=cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) stdout_raw, stderr_raw = process.communicate(input=stdin_payload) # N.B. In cases where `stdout` or `stderr` is passed as parameters, these can be None. stdout = stdout_raw.decode('utf-8') if stdout_raw is not None else stdout_raw stderr = stderr_raw.decode('utf-8') if stderr_raw is not None else stderr_raw if process.returncode != 0: raise cls.NonZeroExit(cmd, process.returncode, stdout, stderr) return stdout, stderr
python
{ "resource": "" }
q25084
ConfigHandler.parse
train
def parse(self): """Parses configuration file items from one or more related sections. """ for section_name, section_options in self.sections.items(): method_postfix = '' if section_name: # [section.option] variant method_postfix = '_%s' % section_name section_parser_method = getattr( self, # Dots in section names are tranlsated into dunderscores. ('parse_section%s' % method_postfix).replace('.', '__'), None) if section_parser_method is None: raise DistutilsOptionError( 'Unsupported distribution option section: [%s.%s]' % ( self.section_prefix, section_name)) section_parser_method(section_options)
python
{ "resource": "" }
q25085
_get_supported_for_any_abi
train
def _get_supported_for_any_abi(version=None, platform=None, impl=None, force_manylinux=False): """Generates supported tags for unspecified ABI types to support more intuitive cross-platform resolution.""" unique_tags = { tag for abi in _gen_all_abis(impl, version) for tag in _get_supported(version=version, platform=platform, impl=impl, abi=abi, force_manylinux=force_manylinux) } return list(unique_tags)
python
{ "resource": "" }
q25086
Platform.supported_tags
train
def supported_tags(self, interpreter=None, force_manylinux=True): """Returns a list of supported PEP425 tags for the current platform.""" if interpreter and not self.is_extended: # N.B. If we don't get an extended platform specifier, we generate # all possible ABI permutations to mimic earlier pex version # behavior and make cross-platform resolution more intuitive. return _get_supported_for_any_abi( platform=self.platform, impl=interpreter.identity.abbr_impl, version=interpreter.identity.impl_ver, force_manylinux=force_manylinux ) else: return _get_supported( platform=self.platform, impl=self.impl, version=self.version, abi=self.abi, force_manylinux=force_manylinux )
python
{ "resource": "" }
q25087
SourceTranslator.translate
train
def translate(self, package, into=None): """From a SourcePackage, translate to a binary distribution.""" if not isinstance(package, SourcePackage): return None if not package.local: raise ValueError('SourceTranslator cannot translate remote packages.') installer = None version = self._interpreter.version unpack_path = Archiver.unpack(package.local_path) into = into or safe_mkdtemp() try: if self._use_2to3 and version >= (3,): with TRACER.timed('Translating 2->3 %s' % package.name): self.run_2to3(unpack_path) installer = self._installer_impl(unpack_path, interpreter=self._interpreter) with TRACER.timed('Packaging %s' % package.name): try: dist_path = installer.bdist() except self._installer_impl.InstallFailure as e: TRACER.log('Failed to install package at %s: %s' % (unpack_path, e)) return None target_path = os.path.join(into, os.path.basename(dist_path)) safe_copy(dist_path, target_path) target_package = Package.from_href(target_path) if not target_package: TRACER.log('Target path %s does not look like a Package.' % target_path) return None if not target_package.compatible(self._supported_tags): TRACER.log('Target package %s is not compatible with %s' % ( target_package, self._supported_tags)) return None return DistributionHelper.distribution_from_path(target_path) except Exception as e: TRACER.log('Failed to translate %s' % package) TRACER.log(traceback.format_exc()) finally: if installer: installer.cleanup() if unpack_path: safe_rmtree(unpack_path)
python
{ "resource": "" }
q25088
BinaryTranslator.translate
train
def translate(self, package, into=None): """From a binary package, translate to a local binary distribution.""" if not package.local: raise ValueError('BinaryTranslator cannot translate remote packages.') if not isinstance(package, self._package_type): return None if not package.compatible(self._supported_tags): TRACER.log('Target package %s is not compatible with %s' % ( package, self._supported_tags)) return None into = into or safe_mkdtemp() target_path = os.path.join(into, package.filename) safe_copy(package.local_path, target_path) return DistributionHelper.distribution_from_path(target_path)
python
{ "resource": "" }
q25089
setup_interpreter
train
def setup_interpreter(distributions, interpreter=None): """Return an interpreter configured with vendored distributions as extras. Any distributions that are present in the vendored set will be added to the interpreter as extras. :param distributions: The names of distributions to setup the interpreter with. :type distributions: list of str :param interpreter: An optional interpreter to configure. If ``None``, the current interpreter is used. :type interpreter: :class:`pex.interpreter.PythonInterpreter` :return: An bare interpreter configured with vendored extras. :rtype: :class:`pex.interpreter.PythonInterpreter` """ from pex.interpreter import PythonInterpreter interpreter = interpreter or PythonInterpreter.get() for dist in _vendored_dists(OrderedSet(distributions)): interpreter = interpreter.with_extra(dist.key, dist.version, dist.location) return interpreter
python
{ "resource": "" }
q25090
vendor_runtime
train
def vendor_runtime(chroot, dest_basedir, label, root_module_names): """Includes portions of vendored distributions in a chroot. The portion to include is selected by root module name. If the module is a file, just it is included. If the module represents a package, the package and all its sub-packages are added recursively. :param chroot: The chroot to add vendored code to. :type chroot: :class:`pex.common.Chroot` :param str dest_basedir: The prefix to store the vendored code under in the ``chroot``. :param str label: The chroot label for the vendored code fileset. :param root_module_names: The names of the root vendored modules to include in the chroot. :type root_module_names: :class:`collections.Iterable` of str :raise: :class:`ValueError` if any of the given ``root_module_names`` could not be found amongst the vendored code and added to the chroot. """ vendor_module_names = {root_module_name: False for root_module_name in root_module_names} for spec in iter_vendor_specs(): for root, dirs, files in os.walk(spec.target_dir): if root == spec.target_dir: dirs[:] = [pkg_name for pkg_name in dirs if pkg_name in vendor_module_names] files[:] = [mod_name for mod_name in files if mod_name[:-3] in vendor_module_names] vendored_names = dirs + files if vendored_names: pkg_path = '' for pkg in spec.relpath.split(os.sep): pkg_path = os.path.join(pkg_path, pkg) pkg_file = os.path.join(pkg_path, '__init__.py') src = os.path.join(VendorSpec.ROOT, pkg_file) dest = os.path.join(dest_basedir, pkg_file) if os.path.exists(src): chroot.copy(src, dest, label) else: # We delete `pex/vendor/_vendored/<dist>/__init__.py` when isolating third_party. chroot.touch(dest, label) for name in vendored_names: vendor_module_names[name] = True TRACER.log('Vendoring {} from {} @ {}'.format(name, spec, spec.target_dir), V=3) for filename in files: if not filename.endswith('.pyc'): # Sources and data only. src = os.path.join(root, filename) dest = os.path.join(dest_basedir, spec.relpath, os.path.relpath(src, spec.target_dir)) chroot.copy(src, dest, label) if not all(vendor_module_names.values()): raise ValueError('Failed to extract {module_names} from:\n\t{specs}'.format( module_names=', '.join(module for module, written in vendor_module_names.items() if not written), specs='\n\t'.join('{} @ {}'.format(spec, spec.target_dir) for spec in iter_vendor_specs())))
python
{ "resource": "" }
q25091
VendorSpec.create_packages
train
def create_packages(self): """Create missing packages joining the vendor root to the base of the vendored distribution. For example, given a root at ``/home/jake/dev/pantsbuild/pex`` and a vendored distribution at ``pex/vendor/_vendored/requests`` this method would create the following package files:: pex/vendor/_vendored/__init__.py pex/vendor/_vendored/requests/__init__.py These package files allow for standard python importers to find vendored code via re-directs from a `PEP-302 <https://www.python.org/dev/peps/pep-0302/>`_ importer like :class:`pex.third_party.VendorImporter`. """ for index, _ in enumerate(self._subpath_components): relpath = _PACKAGE_COMPONENTS + self._subpath_components[:index + 1] + ['__init__.py'] touch(os.path.join(self.ROOT, *relpath))
python
{ "resource": "" }
q25092
PageParser.rel_links
train
def rel_links(cls, page): """return rel= links that should be scraped, skipping obviously data links.""" for match in cls.REL_RE.finditer(page): href, rel = match.group(0), match.group(1) if rel not in cls.REL_TYPES: continue href_match = cls.HREF_RE.search(href) if href_match: href = cls.href_match_to_url(href_match) parsed_href = urlparse(href) if any(parsed_href.path.endswith(ext) for ext in cls.REL_SKIP_EXTENSIONS): continue yield href
python
{ "resource": "" }
q25093
PageParser.links
train
def links(cls, page): """return all links on a page, including potentially rel= links.""" for match in cls.HREF_RE.finditer(page): yield cls.href_match_to_url(match)
python
{ "resource": "" }
q25094
archive_wheelfile
train
def archive_wheelfile(base_name, base_dir): """Archive all files under `base_dir` in a whl file and name it like `base_name`. """ olddir = os.path.abspath(os.curdir) base_name = os.path.abspath(base_name) try: os.chdir(base_dir) return make_wheelfile_inner(base_name) finally: os.chdir(olddir)
python
{ "resource": "" }
q25095
make_wheelfile_inner
train
def make_wheelfile_inner(base_name, base_dir='.'): """Create a whl file from all the files under 'base_dir'. Places .dist-info at the end of the archive.""" zip_filename = base_name + ".whl" log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) # Some applications need reproducible .whl files, but they can't do this # without forcing the timestamp of the individual ZipInfo objects. See # issue #143. timestamp = os.environ.get('SOURCE_DATE_EPOCH') if timestamp is None: date_time = None else: date_time = time.gmtime(int(timestamp))[0:6] score = {'WHEEL': 1, 'METADATA': 2, 'RECORD': 3} def writefile(path, date_time): st = os.stat(path) if date_time is None: mtime = time.gmtime(st.st_mtime) date_time = mtime[0:6] zinfo = zipfile.ZipInfo(path, date_time) zinfo.external_attr = st.st_mode << 16 zinfo.compress_type = zipfile.ZIP_DEFLATED with open(path, 'rb') as fp: zip.writestr(zinfo, fp.read()) log.info("adding '%s'" % path) with zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zip: deferred = [] for dirpath, dirnames, filenames in os.walk(base_dir): # Sort the directory names so that `os.walk` will walk them in a # defined order on the next iteration. dirnames.sort() for name in sorted(filenames): path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): if dirpath.endswith('.dist-info'): deferred.append((score.get(name, 0), path)) else: writefile(path, date_time) deferred.sort() for score, path in deferred: writefile(path, date_time) return zip_filename
python
{ "resource": "" }
q25096
open_with_auth
train
def open_with_auth(url, opener=urllib.request.urlopen): """Open a urllib2 request, handling HTTP authentication""" scheme, netloc, path, params, query, frag = urllib.parse.urlparse(url) # Double scheme does not raise on Mac OS X as revealed by a # failing test. We would expect "nonnumeric port". Refs #20. if netloc.endswith(':'): raise http_client.InvalidURL("nonnumeric port: ''") if scheme in ('http', 'https'): auth, host = urllib.parse.splituser(netloc) else: auth = None if not auth: cred = PyPIConfig().find_credential(url) if cred: auth = str(cred) info = cred.username, url log.info('Authenticating as %s for %s (from .pypirc)', *info) if auth: auth = "Basic " + _encode_auth(auth) parts = scheme, host, path, params, query, frag new_url = urllib.parse.urlunparse(parts) request = urllib.request.Request(new_url) request.add_header("Authorization", auth) else: request = urllib.request.Request(url) request.add_header('User-Agent', user_agent) fp = opener(request) if auth: # Put authentication info back into request URL if same host, # so that links found on the page will work s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url) if s2 == scheme and h2 == host: parts = s2, netloc, path2, param2, query2, frag2 fp.url = urllib.parse.urlunparse(parts) return fp
python
{ "resource": "" }
q25097
open_zip
train
def open_zip(path, *args, **kwargs): """A contextmanager for zip files. Passes through positional and kwargs to zipfile.ZipFile.""" with contextlib.closing(PermPreservingZipFile(path, *args, **kwargs)) as zip: yield zip
python
{ "resource": "" }
q25098
safe_mkdir
train
def safe_mkdir(directory, clean=False): """Safely create a directory. Ensures a directory is present. If it's not there, it is created. If it is, it's a no-op. If clean is True, ensures the directory is empty. """ if clean: safe_rmtree(directory) try: os.makedirs(directory) except OSError as e: if e.errno != errno.EEXIST: raise
python
{ "resource": "" }
q25099
safe_open
train
def safe_open(filename, *args, **kwargs): """Safely open a file. ``safe_open`` ensures that the directory components leading up the specified file have been created first. """ safe_mkdir(os.path.dirname(filename)) return open(filename, *args, **kwargs)
python
{ "resource": "" }