code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
try: os.makedirs(path, mode) return True except OSError as e: if e.errno != errno.EEXIST: # We don't want to swallow errors other than EEXIST, # because we could be obscuring a real problem. raise return False
def makedirs(path, mode=0o777)
Create a directory if it doesn't already exist (keeping concurrency in mind). :param path: The pathname of the directory to create (a string). :param mode: The mode to apply to newly created directories (an integer, defaults to the octal number ``0777``). :returns: :data:`True` when the directory was created, :data:`False` if it already existed. :raises: Any exceptions raised by :func:`os.makedirs()` except for :data:`errno.EEXIST` (this error is swallowed and :data:`False` is returned instead).
3.266283
3.556978
0.918275
if all(os.path.isdir(p) for p in (path1, path2)): try: return os.path.samefile(path1, path2) except AttributeError: # On Windows and Python 2 os.path.samefile() is unavailable. return os.path.realpath(path1) == os.path.realpath(path2) else: return False
def same_directories(path1, path2)
Check if two pathnames refer to the same directory. :param path1: The first pathname (a string). :param path2: The second pathname (a string). :returns: :data:`True` if both pathnames refer to the same directory, :data:`False` otherwise.
2.507443
2.596193
0.965816
context = hashlib.new(method) for filename in files: with open(filename, 'rb') as handle: while True: chunk = handle.read(4096) if not chunk: break context.update(chunk) return context.hexdigest()
def hash_files(method, *files)
Calculate the hexadecimal digest of one or more local files. :param method: The hash method (a string, given to :func:`hashlib.new()`). :param files: The pathname(s) of file(s) to hash (zero or more strings). :returns: The calculated hex digest (a string).
2.009362
2.273447
0.883839
# Try os.replace() which was introduced in Python 3.3 # (this should work on POSIX as well as Windows systems). try: os.replace(src, dst) return except AttributeError: pass # Try os.rename() which is atomic on UNIX but refuses to overwrite existing # files on Windows. try: os.rename(src, dst) return except OSError as e: if e.errno != errno.EEXIST: raise # Finally we fall back to the dumb approach required only on Windows. # See https://bugs.python.org/issue8828 for a long winded discussion. os.remove(dst) os.rename(src, dst)
def replace_file(src, dst)
Overwrite a file (in an atomic fashion when possible). :param src: The pathname of the source file (a string). :param dst: The pathname of the destination file (a string).
4.166975
4.24978
0.980516
required_dist = next(parse_requirements(expr)) try: installed_dist = get_distribution(required_dist.key) return installed_dist in required_dist except DistributionNotFound: return False
def requirement_is_installed(expr)
Check whether a requirement is installed. :param expr: A requirement specification similar to those used in pip requirement files (a string). :returns: :data:`True` if the requirement is available (installed), :data:`False` otherwise.
3.906519
5.150713
0.758442
command = UninstallCommand() opts, args = command.parse_args(['--yes'] + list(package_names)) command.run(opts, args)
def uninstall(*package_names)
Uninstall one or more packages using the Python equivalent of ``pip uninstall --yes``. The package(s) to uninstall must be installed, otherwise pip will raise an ``UninstallationError``. You can check for installed packages using :func:`is_installed()`. :param package_names: The names of one or more Python packages (strings).
3.640535
5.662596
0.642909
return short_option[1] in argument[1:] if is_short_option(argument) else argument == long_option
def match_option(argument, short_option, long_option)
Match a command line argument against a short and long option. :param argument: The command line argument (a string). :param short_option: The short option (a string). :param long_option: The long option (a string). :returns: :data:`True` if the argument matches, :data:`False` otherwise.
5.188576
10.549869
0.491814
return ('%s=%s' % (option, value) in arguments or contains_sublist(arguments, [option, value]))
def match_option_with_value(arguments, option, value)
Check if a list of command line options contains an option with a value. :param arguments: The command line arguments (a list of strings). :param option: The long option (a string). :param value: The expected value (a string). :returns: :data:`True` if the command line contains the option/value pair, :data:`False` otherwise.
6.039196
9.924401
0.60852
n = len(sublst) return any((sublst == lst[i:i + n]) for i in range(len(lst) - n + 1))
def contains_sublist(lst, sublst)
Check if one list contains the items from another list (in the same order). :param lst: The main list. :param sublist: The sublist to check for. :returns: :data:`True` if the main list contains the items from the sublist in the same order, :data:`False` otherwise. Based on `this StackOverflow answer <http://stackoverflow.com/a/3314913>`_.
2.549692
3.457653
0.737405
try: return numpy.fromfile(file, dtype=dtype, count=count, *args, **kwargs) except (TypeError, IOError): return numpy.frombuffer(file.read(count * numpy.dtype(dtype).itemsize), dtype=dtype, count=count, *args, **kwargs)
def fromfile(file, dtype, count, *args, **kwargs)
Wrapper around np.fromfile to support any file-like object.
2.174092
2.258636
0.962568
if compensate: raise ParserFeatureNotImplementedError(u'Compensation has not been implemented yet.') read_data = not meta_data_only fcs_parser = FCSParser(path, read_data=read_data, channel_naming=channel_naming, data_set=data_set, encoding=encoding) if reformat_meta: fcs_parser.reformat_meta() meta = fcs_parser.annotation if meta_data_only: return meta else: # Then include both meta and dataframe. df = fcs_parser.dataframe df = df.astype(dtype) if dtype else df return meta, df
def parse(path, meta_data_only=False, compensate=False, channel_naming='$PnS', reformat_meta=False, data_set=0, dtype='float32', encoding="utf-8")
Parse an fcs file at the location specified by the path. Parameters ---------- path: str Path of .fcs file meta_data_only: bool If True, the parse_fcs only returns the meta_data (the TEXT segment of the FCS file) output_format: 'DataFrame' | 'ndarray' If set to 'DataFrame' the returned channel_naming: '$PnS' | '$PnN' Determines which meta data field is used for naming the channels. The default should be $PnS (even though it is not guaranteed to be unique) $PnN stands for the short name (guaranteed to be unique). Will look like 'FL1-H' $PnS stands for the actual name (not guaranteed to be unique). Will look like 'FSC-H' (Forward scatter) The chosen field will be used to population self.channels Note: These names are not flipped in the implementation. It looks like they were swapped for some reason in the official FCS specification. reformat_meta: bool If true, the meta data is reformatted with the channel information organized into a DataFrame and moved into the '_channels_' key data_set: int Index of retrieved data set in the fcs file. This value specifies the data set being retrieved from an fcs file with multiple data sets. dtype: str | None If provided, will force convert all data into this dtype. This is set by default to auto-convert to float32 to deal with cases in which the original data has been stored using a smaller data type (e.g., unit8). This modifies the original data, but should make follow up analysis safer in basically all cases. encoding: str Provide encoding type of the text section. Returns ------- if meta_data_only is True: meta_data: dict Contains a dictionary with the meta data information Otherwise: a 2-tuple with the first element the meta_data (dictionary) the second element the data (in either DataFrame or numpy format) Examples -------- fname = '../tests/data/EY_2013-05-03_EID_214_PID_1120_Piperacillin_Well_B7.001.fcs' meta = parse_fcs(fname, meta_data_only=True) meta, data_pandas = parse_fcs(fname, meta_data_only=False)
3.73905
4.228105
0.884332
file_handle.seek(0, 2) self._file_size = file_handle.tell() file_handle.seek(0) data_segments = 0 # seek the correct data set in fcs nextdata_offset = 0 while data_segments <= data_set: self.read_header(file_handle, nextdata_offset) self.read_text(file_handle) if '$NEXTDATA' in self.annotation: data_segments += 1 nextdata_offset = self.annotation['$NEXTDATA'] file_handle.seek(nextdata_offset) if nextdata_offset == 0 and data_segments < data_set: warnings.warn("File does not contain the number of data sets.") break else: if data_segments != 0: warnings.warn('File does not contain $NEXTDATA information.') break if read_data: self.read_data(file_handle)
def load_file(self, file_handle, data_set=0, read_data=True)
Load the requested parts of the file into memory.
3.179104
3.125593
1.01712
obj = cls() with contextlib.closing(BytesIO(data)) as file_handle: obj.load_file(file_handle) return obj
def from_data(cls, data)
Load an FCS file from a bytes-like object. Args: data: buffer containing contents of an FCS file. Returns: FCSParser instance with data loaded
4.339969
5.827878
0.744691
header = {'FCS format': file_handle.read(6)} file_handle.read(4) # 4 space characters after the FCS format for field in ('text start', 'text end', 'data start', 'data end', 'analysis start', 'analysis end'): s = file_handle.read(8) try: field_value = int(s) except ValueError: field_value = 0 header[field] = field_value + nextdata_offset # Checking that the location of the TEXT segment is specified for k in ('text start', 'text end'): if header[k] == 0: raise ValueError(u'The FCS file "{}" seems corrupted. (Parser cannot locate ' u'information about the "{}" segment.)'.format(self.path, k)) elif header[k] > self._file_size: raise ValueError(u'The FCS file "{}" is corrupted. "{}" segment ' u'is larger than file size'.format(self.path, k)) else: # All OK pass self._data_start = header['data start'] self._data_end = header['data start'] if header['analysis end'] - header['analysis start'] != 0: warnings.warn(u'There appears to be some information in the ANALYSIS segment of file ' u'{0}. However, it might not be read correctly.'.format(self.path)) self.annotation['__header__'] = header
def read_header(self, file_handle, nextdata_offset=0)
Read the header of the FCS file. The header specifies where the annotation, data and analysis are located inside the binary file. Args: file_handle: buffer containing FCS file. nextdata_offset: byte offset of a set header from file start specified by $NEXTDATA
4.120269
3.983016
1.03446
delimiter = raw_text[0] if raw_text[-1] != delimiter: raw_text = raw_text.strip() if raw_text[-1] != delimiter: msg = (u'The first two characters were:\n {}. The last two characters were: {}\n' u'Parser expects the same delimiter character in beginning ' u'and end of TEXT segment'.format(raw_text[:2], raw_text[-2:])) raise ParserFeatureNotImplementedError(msg) # The delimiter is escaped by being repeated (two consecutive delimiters). This code splits # on the escaped delimiter first, so there is no need for extra logic to distinguish # actual delimiters from escaped delimiters. nested_split_list = [x.split(delimiter) for x in raw_text[1:-1].split(delimiter * 2)] # 1:-1 above removes the first and last characters which are reserved for the delimiter. # Flatten the nested list to a list of elements (alternating keys and values) raw_text_elements = nested_split_list[0] for partial_element_list in nested_split_list[1:]: # Rejoin two parts of an element that was split by an escaped delimiter (the end and # start of two successive sub-lists in nested_split_list) raw_text_elements[-1] += (delimiter + partial_element_list[0]) raw_text_elements.extend(partial_element_list[1:]) keys, values = raw_text_elements[0::2], raw_text_elements[1::2] return dict(zip(keys, values))
def _extract_text_dict(self, raw_text)
Parse the TEXT segment of the FCS file into a python dictionary.
4.883101
4.638112
1.052821
header = self.annotation['__header__'] # For convenience ##### # Read in the TEXT segment of the FCS file # There are some differences in how the file_handle.seek(header['text start'], 0) raw_text = file_handle.read(header['text end'] - header['text start'] + 1) try: raw_text = raw_text.decode(self._encoding) except UnicodeDecodeError as e: # Catching the exception and logging it in this way kills the traceback, but # we can worry about this later. logger.warning(u'Encountered an illegal utf-8 byte in the header.\n Illegal utf-8 ' u'characters will be ignored.\n{}'.format(e)) raw_text = raw_text.decode(self._encoding, errors='ignore') text = self._extract_text_dict(raw_text) ## # Extract channel names and convert some of the channel properties # and other fields into numeric data types (from string) # Note: do not use regular expressions for manipulations here. # Regular expressions are too heavy in terms of computation time. pars = int(text['$PAR']) if '$P0B' in text.keys(): # Checking whether channel number count starts from 0 or from 1 self.channel_numbers = range(0, pars) # Channel number count starts from 0 else: self.channel_numbers = range(1, pars + 1) # Channel numbers start from 1 # Extract parameter names try: names_n = tuple([text['$P{0}N'.format(i)] for i in self.channel_numbers]) except KeyError: names_n = [] try: names_s = tuple([text['$P{0}S'.format(i)] for i in self.channel_numbers]) except KeyError: names_s = [] self.channel_names_s = names_s self.channel_names_n = names_n # Convert some of the fields into integer values keys_encoding_bits = ['$P{0}B'.format(i) for i in self.channel_numbers] add_keys_to_convert_to_int = ['$NEXTDATA', '$PAR', '$TOT'] keys_to_convert_to_int = keys_encoding_bits + add_keys_to_convert_to_int for key in keys_to_convert_to_int: value = text[key] text[key] = int(value) self.annotation.update(text) # Update data start segments if needed if self._data_start == 0: self._data_start = int(text['$BEGINDATA']) if self._data_end == 0: self._data_end = int(text['$ENDDATA'])
def read_text(self, file_handle)
Parse the TEXT segment of the FCS file. The TEXT segment contains meta data associated with the FCS file. Converting all meta keywords to lower case.
4.156642
3.980895
1.044148
start = self.annotation['__header__']['analysis start'] end = self.annotation['__header__']['analysis end'] if start != 0 and end != 0: file_handle.seek(start, 0) self._analysis = file_handle.read(end - start) else: self._analysis = None
def read_analysis(self, file_handle)
Read the ANALYSIS segment of the FCS file and store it in self.analysis. Warning: This has never been tested with an actual fcs file that contains an analysis segment. Args: file_handle: buffer containing FCS data
3.166427
3.726477
0.84971
text = self.annotation keys = text.keys() if '$MODE' not in text or text['$MODE'] != 'L': raise ParserFeatureNotImplementedError(u'Mode not implemented') if '$P0B' in keys: raise ParserFeatureNotImplementedError(u'Not expecting a parameter starting at 0') if text['$BYTEORD'] not in ['1,2,3,4', '4,3,2,1', '1,2', '2,1']: raise ParserFeatureNotImplementedError(u'$BYTEORD {} ' u'not implemented'.format(text['$BYTEORD']))
def _verify_assumptions(self)
Verify that all assumptions made by the parser hold.
6.307889
5.780038
1.091323
names_s, names_n = self.channel_names_s, self.channel_names_n # Figure out which channel names to use if self._channel_naming == '$PnS': channel_names, channel_names_alternate = names_s, names_n else: channel_names, channel_names_alternate = names_n, names_s if len(channel_names) == 0: channel_names = channel_names_alternate if len(set(channel_names)) != len(channel_names): msg = (u'The default channel names (defined by the {} ' u'parameter in the FCS file) were not unique. To avoid ' u'problems in downstream analysis, the channel names ' u'have been switched to the alternate channel names ' u'defined in the FCS file. To avoid ' u'seeing this warning message, explicitly instruct ' u'the FCS parser to use the alternate channel names by ' u'specifying the channel_naming parameter.') msg = msg.format(self._channel_naming) warnings.warn(msg) channel_names = channel_names_alternate return channel_names
def get_channel_names(self)
Get list of channel names. Raises a warning if the names are not unique.
3.497808
3.37851
1.035311
self._verify_assumptions() text = self.annotation if (self._data_start > self._file_size) or (self._data_end > self._file_size): raise ValueError(u'The FCS file "{}" is corrupted. Part of the data segment ' u'is missing.'.format(self.path)) num_events = text['$TOT'] # Number of events recorded num_pars = text['$PAR'] # Number of parameters recorded if text['$BYTEORD'].strip() == '1,2,3,4' or text['$BYTEORD'].strip() == '1,2': endian = '<' elif text['$BYTEORD'].strip() == '4,3,2,1' or text['$BYTEORD'].strip() == '2,1': endian = '>' else: msg = 'Unrecognized byte order ({})'.format(text['$BYTEORD']) raise ParserFeatureNotImplementedError(msg) # dictionary to convert from FCS format to numpy convention conversion_dict = {'F': 'f', 'D': 'f', 'I': 'u'} if text['$DATATYPE'] not in conversion_dict.keys(): raise ParserFeatureNotImplementedError('$DATATYPE = {0} is not yet ' 'supported.'.format(text['$DATATYPE'])) # Calculations to figure out data types of each of parameters # $PnB specifies the number of bits reserved for a measurement of parameter n bytes_per_par_list = [int(text['$P{0}B'.format(i)] / 8) for i in self.channel_numbers] par_numeric_type_list = [ '{endian}{type}{size}'.format(endian=endian, type=conversion_dict[text['$DATATYPE']], size=bytes_per_par) for bytes_per_par in bytes_per_par_list ] # Parser for list mode. Here, the order is a list of tuples. # Each tuple stores event related information file_handle.seek(self._data_start, 0) # Go to the part of the file where data starts ## # Read in the data if len(set(par_numeric_type_list)) > 1: # This branch deals with files in which the different columns (channels) # were encoded with different types; i.e., a mixed data format. dtype = ','.join(par_numeric_type_list) data = fromfile(file_handle, dtype, num_events) # The dtypes in the numpy array `data` above are associated with both a name # and a type; i.e., # https://docs.scipy.org/doc/numpy/reference/generated/numpy.recarray.html # The names are assigned automatically. # In order for this code to work correctly with the pandas DataFrame constructor, # we convert the *names* of the dtypes to the channel names we want to use. names = self.get_channel_names() if six.PY2: encoded_names = [name.encode('ascii', errors='replace') for name in names] else: # Assume that python3 or older then. encoded_names = [name for name in names] data.dtype.names = tuple(encoded_names) else: # values saved in a single data format dtype = par_numeric_type_list[0] data = fromfile(file_handle, dtype, num_events * num_pars) data = data.reshape((num_events, num_pars)) ## # Convert to native byte order # This is needed for working with pandas data structures native_code = '<' if (sys.byteorder == 'little') else '>' if endian != native_code: # swaps the actual bytes and also the endianness data = data.byteswap().newbyteorder() self._data = data
def read_data(self, file_handle)
Read the DATA segment of the FCS file.
4.588406
4.507584
1.01793
if self._data is None: with open(self.path, 'rb') as f: self.read_data(f) return self._data
def data(self)
Get parsed DATA segment of the FCS file.
3.481134
3.134141
1.110714
if self._analysis is None: with open(self.path, 'rb') as f: self.read_analysis(f) return self._analysis
def analysis(self)
Get ANALYSIS segment of the FCS file.
4.498753
4.226617
1.064386
meta = self.annotation # For shorthand (passed by reference) channel_properties = [] for key, value in meta.items(): if key[:3] == '$P1': if key[3] not in string.digits: channel_properties.append(key[3:]) # Capture all the channel information in a list of lists -- used to create a data frame channel_matrix = [ [meta.get('$P{0}{1}'.format(ch, p)) for p in channel_properties] for ch in self.channel_numbers ] # Remove this information from the dictionary for ch in self.channel_numbers: for p in channel_properties: key = '$P{0}{1}'.format(ch, p) if key in meta: meta.pop(key) num_channels = meta['$PAR'] column_names = ['$Pn{0}'.format(p) for p in channel_properties] df = pd.DataFrame(channel_matrix, columns=column_names, index=(1 + numpy.arange(num_channels))) if '$PnE' in column_names: df['$PnE'] = df['$PnE'].apply(lambda x: x.split(',')) df.index.name = 'Channel Number' meta['_channels_'] = df meta['_channel_names_'] = self.get_channel_names()
def reformat_meta(self)
Collect the meta data information in a more user friendly format. Function looks through the meta data, collecting the channel related information into a dataframe and moving it into the _channels_ key.
4.165298
3.933127
1.05903
data = self.data channel_names = self.get_channel_names() return pd.DataFrame(data, columns=channel_names)
def dataframe(self)
Construct Pandas dataframe.
4.422435
3.597589
1.229277
cache_file = self.cache.get(requirement) if cache_file: if self.needs_invalidation(requirement, cache_file): logger.info("Invalidating old %s binary (source has changed) ..", requirement) cache_file = None else: logger.debug("%s hasn't been cached yet, doing so now.", requirement) if not cache_file: # Build the binary distribution. try: raw_file = self.build_binary_dist(requirement) except BuildFailed: logger.warning("Build of %s failed, checking for missing dependencies ..", requirement) if self.system_package_manager.install_dependencies(requirement): raw_file = self.build_binary_dist(requirement) else: raise # Transform the binary distribution archive into a form that we can re-use. fd, transformed_file = tempfile.mkstemp(prefix='pip-accel-bdist-', suffix='.tar.gz') try: archive = tarfile.open(transformed_file, 'w:gz') try: for member, from_handle in self.transform_binary_dist(raw_file): archive.addfile(member, from_handle) finally: archive.close() # Push the binary distribution archive to all available backends. with open(transformed_file, 'rb') as handle: self.cache.put(requirement, handle) finally: # Close file descriptor before removing the temporary file. # Without closing Windows is complaining that the file cannot # be removed because it is used by another process. os.close(fd) # Cleanup the temporary file. os.remove(transformed_file) # Get the absolute pathname of the file in the local cache. cache_file = self.cache.get(requirement) # Enable checksum based cache invalidation. self.persist_checksum(requirement, cache_file) archive = tarfile.open(cache_file, 'r:gz') try: for member in archive.getmembers(): yield member, archive.extractfile(member.name) finally: archive.close()
def get_binary_dist(self, requirement)
Get or create a cached binary distribution archive. :param requirement: A :class:`.Requirement` object. :returns: An iterable of tuples with two values each: A :class:`tarfile.TarInfo` object and a file-like object. Gets the cached binary distribution that was previously built for the given requirement. If no binary distribution has been cached yet, a new binary distribution is built and added to the cache. Uses :func:`build_binary_dist()` to build binary distribution archives. If this fails with a build error :func:`get_binary_dist()` will use :class:`.SystemPackageManager` to check for and install missing system packages and retry the build when missing system packages were installed.
3.514962
3.353827
1.048045
if self.config.trust_mod_times: return requirement.last_modified > os.path.getmtime(cache_file) else: checksum = self.recall_checksum(cache_file) return checksum and checksum != requirement.checksum
def needs_invalidation(self, requirement, cache_file)
Check whether a cached binary distribution needs to be invalidated. :param requirement: A :class:`.Requirement` object. :param cache_file: The pathname of a cached binary distribution (a string). :returns: :data:`True` if the cached binary distribution needs to be invalidated, :data:`False` otherwise.
5.512504
5.934484
0.928894
# EAFP instead of LBYL because of concurrency between pip-accel # processes (https://docs.python.org/2/glossary.html#term-lbyl). checksum_file = '%s.txt' % cache_file try: with open(checksum_file) as handle: contents = handle.read() return contents.strip() except IOError as e: if e.errno == errno.ENOENT: # Gracefully handle missing checksum files. return None else: # Don't swallow exceptions we don't expect! raise
def recall_checksum(self, cache_file)
Get the checksum of the input used to generate a binary distribution archive. :param cache_file: The pathname of the binary distribution archive (a string). :returns: The checksum (a string) or :data:`None` (when no checksum is available).
5.503837
5.409207
1.017494
if not self.config.trust_mod_times: checksum_file = '%s.txt' % cache_file with AtomicReplace(checksum_file) as temporary_file: with open(temporary_file, 'w') as handle: handle.write('%s\n' % requirement.checksum)
def persist_checksum(self, requirement, cache_file)
Persist the checksum of the input used to generate a binary distribution. :param requirement: A :class:`.Requirement` object. :param cache_file: The pathname of a cached binary distribution (a string). .. note:: The checksum is only calculated and persisted when :attr:`~.Config.trust_mod_times` is :data:`False`.
4.688095
3.550522
1.320396
try: return self.build_binary_dist_helper(requirement, ['bdist_dumb', '--format=tar']) except (BuildFailed, NoBuildOutput): logger.warning("Build of %s failed, falling back to alternative method ..", requirement) return self.build_binary_dist_helper(requirement, ['bdist', '--formats=gztar'])
def build_binary_dist(self, requirement)
Build a binary distribution archive from an unpacked source distribution. :param requirement: A :class:`.Requirement` object. :returns: The pathname of a binary distribution archive (a string). :raises: :exc:`.BinaryDistributionError` when the original command and the fall back both fail to produce a binary distribution archive. This method uses the following command to build binary distributions: .. code-block:: sh $ python setup.py bdist_dumb --format=tar This command can fail for two main reasons: 1. The package is missing binary dependencies. 2. The ``setup.py`` script doesn't (properly) implement ``bdist_dumb`` binary distribution format support. The first case is dealt with in :func:`get_binary_dist()`. To deal with the second case this method falls back to the following command: .. code-block:: sh $ python setup.py bdist This fall back is almost never needed, but there are Python packages out there which require this fall back (this method was added because the installation of ``Paver==1.2.3`` failed, see `issue 37`_ for details about that). .. _issue 37: https://github.com/paylogic/pip-accel/issues/37
5.098399
4.281759
1.190725
# Copy the tar archive file by file so we can rewrite the pathnames. logger.debug("Transforming binary distribution: %s.", archive_path) archive = tarfile.open(archive_path, 'r') for member in archive.getmembers(): # Some source distribution archives on PyPI that are distributed as ZIP # archives contain really weird permissions: the world readable bit is # missing. I've encountered this with the httplib2 (0.9) and # google-api-python-client (1.2) packages. I assume this is a bug of # some kind in the packaging process on "their" side. if member.mode & stat.S_IXUSR: # If the owner has execute permissions we'll give everyone read and # execute permissions (only the owner gets write permissions). member.mode = 0o755 else: # If the owner doesn't have execute permissions we'll give everyone # read permissions (only the owner gets write permissions). member.mode = 0o644 # In my testing the `dumb' tar files created with the `python # setup.py bdist' and `python setup.py bdist_dumb' commands contain # pathnames that are relative to `/' in one way or another: # # - In almost all cases the pathnames look like this: # # ./home/peter/.virtualenvs/pip-accel/lib/python2.7/site-packages/pip_accel/__init__.py # # - After working on pip-accel for several years I encountered # a pathname like this (Python 2.6 on Mac OS X 10.10.5): # # Users/peter/.virtualenvs/pip-accel/lib/python2.6/site-packages/pip_accel/__init__.py # # Both of the above pathnames are relative to `/' but in different # ways :-). The following normpath(join('/', ...))) pathname # manipulation logic is intended to handle both cases. original_pathname = member.name absolute_pathname = os.path.normpath(os.path.join('/', original_pathname)) if member.isdev(): logger.warn("Ignoring device file: %s.", absolute_pathname) elif not member.isdir(): modified_pathname = os.path.relpath(absolute_pathname, self.config.install_prefix) if os.path.isabs(modified_pathname): logger.warn("Failed to transform pathname in binary distribution" " to relative path! (original: %r, modified: %r)", original_pathname, modified_pathname) else: # Rewrite /usr/local to /usr (same goes for all prefixes of course). modified_pathname = re.sub('^local/', '', modified_pathname) # Rewrite /dist-packages/ to /site-packages/. For details see # https://wiki.debian.org/Python#Deviations_from_upstream. if self.config.on_debian: modified_pathname = modified_pathname.replace('/dist-packages/', '/site-packages/') # Enable operators to debug the transformation process. logger.debug("Transformed %r -> %r.", original_pathname, modified_pathname) # Get the file data from the input archive. handle = archive.extractfile(original_pathname) # Yield the modified metadata and a handle to the data. member.name = modified_pathname yield member, handle archive.close()
def transform_binary_dist(self, archive_path)
Transform binary distributions into a form that can be cached for future use. :param archive_path: The pathname of the original binary distribution archive. :returns: An iterable of tuples with two values each: 1. A :class:`tarfile.TarInfo` object. 2. A file-like object. This method transforms a binary distribution archive created by :func:`build_binary_dist()` into a form that can be cached for future use. This comes down to making the pathnames inside the archive relative to the `prefix` that the binary distribution was built for.
5.166022
5.054017
1.022162
# TODO This is quite slow for modules like Django. Speed it up! Two choices: # 1. Run the external tar program to unpack the archive. This will # slightly complicate the fixing up of hashbangs. # 2. Using links? The plan: We can maintain a "seed" environment under # $PIP_ACCEL_CACHE and use symbolic and/or hard links to populate other # places based on the "seed" environment. module_search_path = set(map(os.path.normpath, sys.path)) prefix = os.path.normpath(prefix or self.config.install_prefix) python = os.path.normpath(python or self.config.python_executable) installed_files = [] for member, from_handle in members: pathname = member.name if virtualenv_compatible: # Some binary distributions include C header files (see for example # the greenlet package) however the subdirectory of include/ in a # virtual environment is a symbolic link to a subdirectory of # /usr/include/ so we should never try to install C header files # inside the directory pointed to by the symbolic link. Instead we # implement the same workaround that pip uses to avoid this # problem. pathname = re.sub('^include/', 'include/site/', pathname) if self.config.on_debian and '/site-packages/' in pathname: # On Debian based system wide Python installs the /site-packages/ # directory is not in Python's module search path while # /dist-packages/ is. We try to be compatible with this. match = re.match('^(.+?)/site-packages', pathname) if match: site_packages = os.path.normpath(os.path.join(prefix, match.group(0))) dist_packages = os.path.normpath(os.path.join(prefix, match.group(1), 'dist-packages')) if dist_packages in module_search_path and site_packages not in module_search_path: pathname = pathname.replace('/site-packages/', '/dist-packages/') pathname = os.path.join(prefix, pathname) if track_installed_files: # Track the installed file's absolute pathname. installed_files.append(pathname) directory = os.path.dirname(pathname) if not os.path.isdir(directory): logger.debug("Creating directory: %s ..", directory) makedirs(directory) logger.debug("Creating file: %s ..", pathname) with open(pathname, 'wb') as to_handle: contents = from_handle.read() if contents.startswith(b'#!/'): contents = self.fix_hashbang(contents, python) to_handle.write(contents) os.chmod(pathname, member.mode) if track_installed_files: self.update_installed_files(installed_files)
def install_binary_dist(self, members, virtualenv_compatible=True, prefix=None, python=None, track_installed_files=False)
Install a binary distribution into the given prefix. :param members: An iterable of tuples with two values each: 1. A :class:`tarfile.TarInfo` object. 2. A file-like object. :param prefix: The "prefix" under which the requirements should be installed. This will be a pathname like ``/usr``, ``/usr/local`` or the pathname of a virtual environment. Defaults to :attr:`.Config.install_prefix`. :param python: The pathname of the Python executable to use in the shebang line of all executable Python scripts inside the binary distribution. Defaults to :attr:`.Config.python_executable`. :param virtualenv_compatible: Whether to enable workarounds to make the resulting filenames compatible with virtual environments (defaults to :data:`True`). :param track_installed_files: If this is :data:`True` (not the default for this method because of backwards compatibility) pip-accel will create ``installed-files.txt`` as required by pip to properly uninstall packages. This method installs a binary distribution created by :class:`build_binary_dist()` into the given prefix (a directory like ``/usr``, ``/usr/local`` or a virtual environment).
4.967408
4.738803
1.048241
lines = contents.splitlines() if lines: hashbang = lines[0] # Get the base name of the command in the hashbang. executable = os.path.basename(hashbang) # Deal with hashbangs like `#!/usr/bin/env python'. executable = re.sub(b'^env ', b'', executable) # Only rewrite hashbangs that actually involve Python. if re.match(b'^python(\\d+(\\.\\d+)*)?$', executable): lines[0] = b'#!' + python.encode('ascii') logger.debug("Rewriting hashbang %r to %r!", hashbang, lines[0]) contents = b'\n'.join(lines) return contents
def fix_hashbang(self, contents, python)
Rewrite hashbangs_ to use the correct Python executable. :param contents: The contents of the script whose hashbang should be fixed (a string). :param python: The absolute pathname of the Python executable (a string). :returns: The modified contents of the script (a string). .. _hashbangs: http://en.wikipedia.org/wiki/Shebang_(Unix)
3.871368
3.902416
0.992044
# Find the *.egg-info directory where installed-files.txt should be created. pkg_info_files = [fn for fn in installed_files if fnmatch.fnmatch(fn, '*.egg-info/PKG-INFO')] # I'm not (yet) sure how reliable the above logic is, so for now # I'll err on the side of caution and only act when the results # seem to be reliable. if len(pkg_info_files) != 1: logger.warning("Not tracking installed files (couldn't reliably determine *.egg-info directory)") else: egg_info_directory = os.path.dirname(pkg_info_files[0]) installed_files_path = os.path.join(egg_info_directory, 'installed-files.txt') logger.debug("Tracking installed files in %s ..", installed_files_path) with open(installed_files_path, 'w') as handle: for pathname in installed_files: handle.write('%s\n' % os.path.relpath(pathname, egg_info_directory))
def update_installed_files(self, installed_files)
Track the files installed by a package so pip knows how to remove the package. This method is used by :func:`install_binary_dist()` (which collects the list of installed files for :func:`update_installed_files()`). :param installed_files: A list of absolute pathnames (strings) with the files that were just installed.
3.510929
3.475338
1.010241
known_files = [GLOBAL_CONFIG, LOCAL_CONFIG, self.environment.get('PIP_ACCEL_CONFIG')] absolute_paths = [parse_path(pathname) for pathname in known_files if pathname] return [pathname for pathname in absolute_paths if os.path.isfile(pathname)]
def available_configuration_files(self)
A list of strings with the absolute pathnames of the available configuration files.
7.000463
6.224244
1.124709
configuration_file = parse_path(configuration_file) logger.debug("Loading configuration file: %s", configuration_file) parser = configparser.RawConfigParser() files_loaded = parser.read(configuration_file) if len(files_loaded) != 1: msg = "Failed to load configuration file! (%s)" raise Exception(msg % configuration_file) elif not parser.has_section('pip-accel'): msg = "Missing 'pip-accel' section in configuration file! (%s)" raise Exception(msg % configuration_file) else: self.configuration.update(parser.items('pip-accel'))
def load_configuration_file(self, configuration_file)
Load configuration defaults from a configuration file. :param configuration_file: The pathname of a configuration file (a string). :raises: :exc:`Exception` when the configuration file cannot be loaded.
2.519744
2.67486
0.94201
if self.overrides.get(property_name) is not None: return self.overrides[property_name] elif environment_variable and self.environment.get(environment_variable): return self.environment[environment_variable] elif self.configuration.get(configuration_option) is not None: return self.configuration[configuration_option] else: return default
def get(self, property_name=None, environment_variable=None, configuration_option=None, default=None)
Internal shortcut to get a configuration option's value. :param property_name: The name of the property that users can set on the :class:`Config` class (a string). :param environment_variable: The name of the environment variable (a string). :param configuration_option: The name of the option in the configuration file (a string). :param default: The default value. :returns: The value of the environment variable or configuration file option or the default value.
1.90266
2.092638
0.909216
return self.get(property_name='source_index', default=os.path.join(self.data_directory, 'sources'))
def source_index(self)
The absolute pathname of pip-accel's source index directory (a string). This is the ``sources`` subdirectory of :data:`data_directory`.
8.319409
5.025047
1.655588
return expand_path(self.get(property_name='data_directory', environment_variable='PIP_ACCEL_CACHE', configuration_option='data-directory', default='/var/cache/pip-accel' if is_root() else '~/.pip-accel'))
def data_directory(self)
The absolute pathname of the directory where pip-accel's data files are stored (a string). - Environment variable: ``$PIP_ACCEL_CACHE`` - Configuration option: ``data-directory`` - Default: ``/var/cache/pip-accel`` if running as ``root``, ``~/.pip-accel`` otherwise
7.473978
3.422629
2.183695
return self.get(property_name='install_prefix', default='/usr/local' if sys.prefix == '/usr' and self.on_debian else sys.prefix)
def install_prefix(self)
The absolute pathname of the installation prefix to use (a string). This property is based on :data:`sys.prefix` except that when :data:`sys.prefix` is ``/usr`` and we're running on a Debian derived system ``/usr/local`` is used instead. The reason for this is that on Debian derived systems only apt (dpkg) should be allowed to touch files in ``/usr/lib/pythonX.Y/dist-packages`` and ``python setup.py install`` knows this (see the ``posix_local`` installation scheme in ``/usr/lib/pythonX.Y/sysconfig.py`` on Debian derived systems). Because pip-accel replaces ``python setup.py install`` it has to replicate this logic. Inferring all of this from the :mod:`sysconfig` module would be nice but that module wasn't available in Python 2.6.
8.295375
5.836298
1.421342
return self.get(property_name='python_executable', default=sys.executable or os.path.join(self.install_prefix, 'bin', 'python'))
def python_executable(self)
The absolute pathname of the Python executable (a string).
5.598079
5.87207
0.95334
value = self.get(property_name='auto_install', environment_variable='PIP_ACCEL_AUTO_INSTALL', configuration_option='auto-install') if value is not None: return coerce_boolean(value)
def auto_install(self)
Whether automatic installation of missing system packages is enabled. :data:`True` if automatic installation of missing system packages is enabled, :data:`False` if it is disabled, :data:`None` otherwise (in this case the user will be prompted at the appropriate time). - Environment variable: ``$PIP_ACCEL_AUTO_INSTALL`` (refer to :func:`~humanfriendly.coerce_boolean()` for details on how the value of the environment variable is interpreted) - Configuration option: ``auto-install`` (also parsed using :func:`~humanfriendly.coerce_boolean()`) - Default: :data:`None`
7.628026
4.128444
1.847676
on_appveyor = coerce_boolean(os.environ.get('APPVEYOR', 'False')) return coerce_boolean(self.get(property_name='trust_mod_times', environment_variable='PIP_ACCEL_TRUST_MOD_TIMES', configuration_option='trust-mod-times', default=(not on_appveyor)))
def trust_mod_times(self)
Whether to trust file modification times for cache invalidation. - Environment variable: ``$PIP_ACCEL_TRUST_MOD_TIMES`` - Configuration option: ``trust-mod-times`` - Default: :data:`True` unless the AppVeyor_ continuous integration environment is detected (see `issue 62`_). .. _AppVeyor: http://www.appveyor.com .. _issue 62: https://github.com/paylogic/pip-accel/issues/62
5.839927
3.986008
1.465107
return coerce_boolean(self.get(property_name='s3_cache_readonly', environment_variable='PIP_ACCEL_S3_READONLY', configuration_option='s3-readonly', default=False))
def s3_cache_readonly(self)
Whether the Amazon S3 bucket is considered read only. If this is :data:`True` then the Amazon S3 bucket will only be used for :class:`~pip_accel.caches.s3.S3CacheBackend.get()` operations (all :class:`~pip_accel.caches.s3.S3CacheBackend.put()` operations will be disabled). - Environment variable: ``$PIP_ACCEL_S3_READONLY`` (refer to :func:`~humanfriendly.coerce_boolean()` for details on how the value of the environment variable is interpreted) - Configuration option: ``s3-readonly`` (also parsed using :func:`~humanfriendly.coerce_boolean()`) - Default: :data:`False` For details please refer to the :mod:`pip_accel.caches.s3` module.
6.686495
3.126032
2.138972
value = self.get(property_name='s3_cache_timeout', environment_variable='PIP_ACCEL_S3_TIMEOUT', configuration_option='s3-timeout') try: n = int(value) if n >= 0: return n except: return 60
def s3_cache_timeout(self)
The socket timeout in seconds for connections to Amazon S3 (an integer). This value is injected into Boto's configuration to override the default socket timeout used for connections to Amazon S3. - Environment variable: ``$PIP_ACCEL_S3_TIMEOUT`` - Configuration option: ``s3-timeout`` - Default: ``60`` (`Boto's default`_) .. _Boto's default: http://boto.readthedocs.org/en/latest/boto_config_tut.html
5.777425
3.490401
1.655232
timer = Timer() self.check_prerequisites() with PatchedBotoConfig(): # Check if the distribution archive is available. raw_key = self.get_cache_key(filename) logger.info("Checking if distribution archive is available in S3 bucket: %s", raw_key) key = self.s3_bucket.get_key(raw_key) if key is None: logger.debug("Distribution archive is not available in S3 bucket.") else: # Download the distribution archive to the local binary index. # TODO Shouldn't this use LocalCacheBackend.put() instead of # implementing the same steps manually?! logger.info("Downloading distribution archive from S3 bucket ..") file_in_cache = os.path.join(self.config.binary_cache, filename) makedirs(os.path.dirname(file_in_cache)) with AtomicReplace(file_in_cache) as temporary_file: key.get_contents_to_filename(temporary_file) logger.debug("Finished downloading distribution archive from S3 bucket in %s.", timer) return file_in_cache
def get(self, filename)
Download a distribution archive from the configured Amazon S3 bucket. :param filename: The filename of the distribution archive (a string). :returns: The pathname of a distribution archive on the local file system or :data:`None`. :raises: :exc:`.CacheBackendError` when any underlying method fails.
4.535517
4.098672
1.106582
if self.config.s3_cache_readonly: logger.info('Skipping upload to S3 bucket (using S3 in read only mode).') else: timer = Timer() self.check_prerequisites() with PatchedBotoConfig(): from boto.s3.key import Key raw_key = self.get_cache_key(filename) logger.info("Uploading distribution archive to S3 bucket: %s", raw_key) key = Key(self.s3_bucket) key.key = raw_key try: key.set_contents_from_file(handle) except Exception as e: logger.info("Encountered error writing to S3 bucket, " "falling back to read only mode (exception: %s)", e) self.config.s3_cache_readonly = True else: logger.info("Finished uploading distribution archive to S3 bucket in %s.", timer)
def put(self, filename, handle)
Upload a distribution archive to the configured Amazon S3 bucket. If the :attr:`~.Config.s3_cache_readonly` configuration option is enabled this method does nothing. :param filename: The filename of the distribution archive (a string). :param handle: A file-like object that provides access to the distribution archive. :raises: :exc:`.CacheBackendError` when any underlying method fails.
3.788227
3.257897
1.162783
if not hasattr(self, 'cached_bucket'): self.check_prerequisites() with PatchedBotoConfig(): from boto.exception import BotoClientError, BotoServerError, S3ResponseError # The following try/except block translates unexpected exceptions # raised by Boto into a CacheBackendError exception. try: # The following try/except block handles the expected exception # raised by Boto when an Amazon S3 bucket does not exist. try: logger.debug("Connecting to Amazon S3 bucket: %s", self.config.s3_cache_bucket) self.cached_bucket = self.s3_connection.get_bucket(self.config.s3_cache_bucket) except S3ResponseError as e: if e.status == 404 and self.config.s3_cache_create_bucket: logger.info("Amazon S3 bucket doesn't exist yet, creating it now: %s", self.config.s3_cache_bucket) self.s3_connection.create_bucket(self.config.s3_cache_bucket) self.cached_bucket = self.s3_connection.get_bucket(self.config.s3_cache_bucket) else: # Don't swallow exceptions we can't handle. raise except (BotoClientError, BotoServerError): raise CacheBackendError(, bucket=repr(self.config.s3_cache_bucket)) return self.cached_bucket
def s3_bucket(self)
Connect to the user defined Amazon S3 bucket. Called on demand by :func:`get()` and :func:`put()`. Caches its return value so that only a single connection is created. :returns: A :class:`boto.s3.bucket.Bucket` object. :raises: :exc:`.CacheBackendDisabledError` when the user hasn't defined :attr:`.Config.s3_cache_bucket`. :raises: :exc:`.CacheBackendError` when the connection to the Amazon S3 bucket fails.
2.875436
2.594875
1.108121
if not hasattr(self, 'cached_connection'): self.check_prerequisites() with PatchedBotoConfig(): import boto from boto.exception import BotoClientError, BotoServerError, NoAuthHandlerFound from boto.s3.connection import S3Connection, SubdomainCallingFormat, OrdinaryCallingFormat try: # Configure the number of retries and the socket timeout used # by Boto. Based on the snippet given in the following email: # https://groups.google.com/d/msg/boto-users/0osmP0cUl5Y/X4NdlMGWKiEJ if not boto.config.has_section(BOTO_CONFIG_SECTION): boto.config.add_section(BOTO_CONFIG_SECTION) boto.config.set(BOTO_CONFIG_SECTION, BOTO_CONFIG_NUM_RETRIES_OPTION, str(self.config.s3_cache_retries)) boto.config.set(BOTO_CONFIG_SECTION, BOTO_CONFIG_SOCKET_TIMEOUT_OPTION, str(self.config.s3_cache_timeout)) logger.debug("Connecting to Amazon S3 API ..") endpoint = urlparse(self.config.s3_cache_url) host, _, port = endpoint.netloc.partition(':') kw = dict( host=host, port=int(port) if port else None, is_secure=(endpoint.scheme == 'https'), calling_format=(SubdomainCallingFormat() if host == S3Connection.DefaultHost else OrdinaryCallingFormat()), ) try: self.cached_connection = S3Connection(**kw) except NoAuthHandlerFound: logger.debug("Amazon S3 API credentials missing, retrying with anonymous connection ..") self.cached_connection = S3Connection(anon=True, **kw) except (BotoClientError, BotoServerError): raise CacheBackendError() return self.cached_connection
def s3_connection(self)
Connect to the Amazon S3 API. If the connection attempt fails because Boto can't find credentials the attempt is retried once with an anonymous connection. Called on demand by :attr:`s3_bucket`. :returns: A :class:`boto.s3.connection.S3Connection` object. :raises: :exc:`.CacheBackendError` when the connection to the Amazon S3 API fails.
3.279479
3.13325
1.04667
try: return self.unbound_method(self.instance, section, name, **kw) except Exception: return default
def get(self, section, name, default=None, **kw)
Replacement for :func:`boto.pyami.config.Config.get()`.
6.604995
6.874976
0.96073
# Escape the requirement's name for use in a regular expression. name_pattern = escape_name(self.name) # Escape the requirement's version for in a regular expression. version_pattern = re.escape(self.version) # Create a regular expression that matches any of the known source # distribution archive extensions. extension_pattern = '|'.join(re.escape(ext) for ext in ARCHIVE_EXTENSIONS if ext != '.whl') # Compose the regular expression pattern to match filenames of source # distribution archives in the local source index directory. pattern = '^%s-%s(%s)$' % (name_pattern, version_pattern, extension_pattern) # Compile the regular expression for case insensitive matching. compiled_pattern = re.compile(pattern, re.IGNORECASE) # Find the matching source distribution archives. return [os.path.join(self.config.source_index, fn) for fn in os.listdir(self.config.source_index) if compiled_pattern.match(fn)]
def related_archives(self)
The pathnames of the source distribution(s) for this requirement (a list of strings). .. note:: This property is very new in pip-accel and its logic may need some time to mature. For now any misbehavior by this property shouldn't be too much of a problem because the pathnames reported by this property are only used for cache invalidation (see the :attr:`last_modified` and :attr:`checksum` properties).
3.200819
3.047514
1.050305
mtimes = list(map(os.path.getmtime, self.related_archives)) return max(mtimes) if mtimes else time.time()
def last_modified(self)
The last modified time of the requirement's source distribution archive(s) (a number). The value of this property is based on the :attr:`related_archives` property. If no related archives are found the current time is reported. In the balance between not invalidating cached binary distributions enough and invalidating them too frequently, this property causes the latter to happen.
5.983157
3.379438
1.770459
probably_sdist = os.path.isfile(os.path.join(self.source_directory, 'setup.py')) probably_wheel = len(glob.glob(os.path.join(self.source_directory, '*.dist-info', 'WHEEL'))) > 0 if probably_wheel and not probably_sdist: return True elif probably_sdist and not probably_wheel: return False elif probably_sdist and probably_wheel: variables = dict(requirement=self.setuptools_requirement, directory=self.source_directory) raise UnknownDistributionFormat(, **variables) else: variables = dict(requirement=self.setuptools_requirement, directory=self.source_directory) raise UnknownDistributionFormat(, **variables)
def is_wheel(self)
:data:`True` when the requirement is a wheel, :data:`False` otherwise. .. note:: To my surprise it seems to be non-trivial to determine whether a given :class:`pip.req.InstallRequirement` object produced by pip's internal Python API concerns a source distribution or a wheel distribution. There's a :class:`pip.req.InstallRequirement.is_wheel` property but I'm currently looking at a wheel distribution whose ``is_wheel`` property returns :data:`None`, apparently because the requirement's ``url`` property is also :data:`None`. Whether this is an obscure implementation detail of pip or caused by the way pip-accel invokes pip, I really can't tell (yet).
2.901427
2.687514
1.079595
if not self.is_wheel: raise TypeError("Requirement is not a wheel distribution!") for distribution in find_distributions(self.source_directory): return distribution msg = "pkg_resources didn't find a wheel distribution in %s!" raise Exception(msg % self.source_directory)
def wheel_metadata(self)
Get the distribution metadata of an unpacked wheel distribution.
6.432342
5.313571
1.21055
environment = os.environ.get('VIRTUAL_ENV') if environment: if not same_directories(sys.prefix, environment): raise EnvironmentMismatchError(, environment=environment, prefix=sys.prefix)
def validate_environment(self)
Make sure :data:`sys.prefix` matches ``$VIRTUAL_ENV`` (if defined). This may seem like a strange requirement to dictate but it avoids hairy issues like `documented here <https://github.com/paylogic/pip-accel/issues/5>`_. The most sneaky thing is that ``pip`` doesn't have this problem (de-facto) because ``virtualenv`` copies ``pip`` wherever it goes... (``pip-accel`` on the other hand has to be installed by the user).
7.286853
5.42936
1.34212
makedirs(self.config.source_index) makedirs(self.config.eggs_cache)
def initialize_directories(self)
Automatically create local directories required by pip-accel.
10.661097
8.2684
1.289378
cleanup_timer = Timer() cleanup_counter = 0 for entry in os.listdir(self.config.source_index): pathname = os.path.join(self.config.source_index, entry) if os.path.islink(pathname) and not os.path.exists(pathname): logger.warn("Cleaning up broken symbolic link: %s", pathname) os.unlink(pathname) cleanup_counter += 1 logger.debug("Cleaned up %i broken symbolic links from source index in %s.", cleanup_counter, cleanup_timer)
def clean_source_index(self)
Cleanup broken symbolic links in the local source distribution index. The purpose of this method requires some context to understand. Let me preface this by stating that I realize I'm probably overcomplicating things, but I like to preserve forward / backward compatibility when possible and I don't feel like dropping everyone's locally cached source distribution archives without a good reason to do so. With that out of the way: - Versions of pip-accel based on pip 1.4.x maintained a local source distribution index based on a directory containing symbolic links pointing directly into pip's download cache. When files were removed from pip's download cache, broken symbolic links remained in pip-accel's local source distribution index directory. This resulted in very confusing error messages. To avoid this :func:`clean_source_index()` cleaned up broken symbolic links whenever pip-accel was about to invoke pip. - More recent versions of pip (6.x) no longer support the same style of download cache that contains source distribution archives that can be re-used directly by pip-accel. To cope with the changes in pip 6.x new versions of pip-accel tell pip to download source distribution archives directly into the local source distribution index directory maintained by pip-accel. - It is very reasonable for users of pip-accel to have multiple versions of pip-accel installed on their system (imagine a dozen Python virtual environments that won't all be updated at the same time; this is the situation I always find myself in :-). These versions of pip-accel will be sharing the same local source distribution index directory. - All of this leads up to the local source distribution index directory containing a mixture of symbolic links and regular files with no obvious way to atomically and gracefully upgrade the local source distribution index directory while avoiding fights between old and new versions of pip-accel :-). - I could of course switch to storing the new local source distribution index in a differently named directory (avoiding potential conflicts between multiple versions of pip-accel) but then I would have to introduce a new configuration option, otherwise everyone who has configured pip-accel to store its source index in a non-default location could still be bitten by compatibility issues. For now I've decided to keep using the same directory for the local source distribution index and to keep cleaning up broken symbolic links. This enables cooperating between old and new versions of pip-accel and avoids trashing user's local source distribution indexes. The main disadvantage is that pip-accel is still required to clean up broken symbolic links...
2.922097
2.650668
1.1024
try: requirements = self.get_requirements(arguments, use_wheels=self.arguments_allow_wheels(arguments)) have_wheels = any(req.is_wheel for req in requirements) if have_wheels and not self.setuptools_supports_wheels(): logger.info("Preparing to upgrade to setuptools >= 0.8 to enable wheel support ..") requirements.extend(self.get_requirements(['setuptools >= 0.8'])) if requirements: if '--user' in arguments: from site import USER_BASE kw.setdefault('prefix', USER_BASE) return self.install_requirements(requirements, **kw) else: logger.info("Nothing to do! (requirements already installed)") return 0 finally: self.cleanup_temporary_directories()
def install_from_arguments(self, arguments, **kw)
Download, unpack, build and install the specified requirements. This function is a simple wrapper for :func:`get_requirements()`, :func:`install_requirements()` and :func:`cleanup_temporary_directories()` that implements the default behavior of the pip accelerator. If you're extending or embedding pip-accel you may want to call the underlying methods instead. If the requirement set includes wheels and ``setuptools >= 0.8`` is not yet installed, it will be added to the requirement set and installed together with the other requirement(s) in order to enable the usage of distributions installed from wheels (their metadata is different). :param arguments: The command line arguments to ``pip install ..`` (a list of strings). :param kw: Any keyword arguments are passed on to :func:`install_requirements()`. :returns: The result of :func:`install_requirements()`.
4.525355
3.544575
1.276699
arguments = self.decorate_arguments(arguments) # Demote hash sum mismatch log messages from CRITICAL to DEBUG (hiding # implementation details from users unless they want to see them). with DownloadLogFilter(): with SetupRequiresPatch(self.config, self.eggs_links): # Use a new build directory for each run of get_requirements(). self.create_build_directory() # Check whether -U or --upgrade was given. if any(match_option(a, '-U', '--upgrade') for a in arguments): logger.info("Checking index(es) for new version (-U or --upgrade was given) ..") else: # If -U or --upgrade wasn't given and all requirements can be # satisfied using the archives in pip-accel's local source # index we don't need pip to connect to PyPI looking for new # versions (that will just slow us down). try: return self.unpack_source_dists(arguments, use_wheels=use_wheels) except DistributionNotFound: logger.info("We don't have all distribution archives yet!") # Get the maximum number of retries from the configuration if the # caller didn't specify a preference. if max_retries is None: max_retries = self.config.max_retries # If not all requirements are available locally we use pip to # download the missing source distribution archives from PyPI (we # retry a couple of times in case pip reports recoverable # errors). for i in range(max_retries): try: return self.download_source_dists(arguments, use_wheels=use_wheels) except Exception as e: if i + 1 < max_retries: # On all but the last iteration we swallow exceptions # during downloading. logger.warning("pip raised exception while downloading distributions: %s", e) else: # On the last iteration we don't swallow exceptions # during downloading because the error reported by pip # is the most sensible error for us to report. raise logger.info("Retrying after pip failed (%i/%i) ..", i + 1, max_retries)
def get_requirements(self, arguments, max_retries=None, use_wheels=False)
Use pip to download and unpack the requested source distribution archives. :param arguments: The command line arguments to ``pip install ...`` (a list of strings). :param max_retries: The maximum number of times that pip will be asked to download distribution archives (this helps to deal with intermittent failures). If this is :data:`None` then :attr:`~.Config.max_retries` is used. :param use_wheels: Whether pip and pip-accel are allowed to use wheels_ (:data:`False` by default for backwards compatibility with callers that use pip-accel as a Python API). .. warning:: Requirements which are already installed are not included in the result. If this breaks your use case consider using pip's ``--ignore-installed`` option.
6.425458
6.19822
1.036662
arguments = list(arguments) for i, value in enumerate(arguments): is_constraint_file = (i >= 1 and match_option(arguments[i - 1], '-c', '--constraint')) is_requirement_file = (i >= 1 and match_option(arguments[i - 1], '-r', '--requirement')) if not is_constraint_file and not is_requirement_file and os.path.isfile(value): arguments[i] = '%s#md5=%s' % (create_file_url(value), hash_files('md5', value)) return arguments
def decorate_arguments(self, arguments)
Change pathnames of local files into ``file://`` URLs with ``#md5=...`` fragments. :param arguments: The command line arguments to ``pip install ...`` (a list of strings). :returns: A copy of the command line arguments with pathnames of local files rewritten to ``file://`` URLs. When pip-accel calls pip to download missing distribution archives and the user specified the pathname of a local distribution archive on the command line, pip will (by default) *not* copy the archive into the download directory if an archive for the same package name and version is already present. This can lead to the confusing situation where the user specifies a local distribution archive to install, a different (older) archive for the same package and version is present in the download directory and `pip-accel` installs the older archive instead of the newer archive. To avoid this confusing behavior, the :func:`decorate_arguments()` method rewrites the command line arguments given to ``pip install`` so that pathnames of local archives are changed into ``file://`` URLs that include a fragment with the hash of the file's contents. Here's an example: - Local pathname: ``/tmp/pep8-1.6.3a0.tar.gz`` - File URL: ``file:///tmp/pep8-1.6.3a0.tar.gz#md5=19cbf0b633498ead63fb3c66e5f1caf6`` When pip fills the download directory and encounters a previously cached distribution archive it will check the hash, realize the contents have changed and replace the archive in the download directory.
3.375492
3.212589
1.050708
unpack_timer = Timer() logger.info("Unpacking distribution(s) ..") with PatchedAttribute(pip_install_module, 'PackageFinder', CustomPackageFinder): requirements = self.get_pip_requirement_set(arguments, use_remote_index=False, use_wheels=use_wheels) logger.info("Finished unpacking %s in %s.", pluralize(len(requirements), "distribution"), unpack_timer) return requirements
def unpack_source_dists(self, arguments, use_wheels=False)
Find and unpack local source distributions and discover their metadata. :param arguments: The command line arguments to ``pip install ...`` (a list of strings). :param use_wheels: Whether pip and pip-accel are allowed to use wheels_ (:data:`False` by default for backwards compatibility with callers that use pip-accel as a Python API). :returns: A list of :class:`pip_accel.req.Requirement` objects. :raises: Any exceptions raised by pip, for example :exc:`pip.exceptions.DistributionNotFound` when not all requirements can be satisfied. This function checks whether there are local source distributions available for all requirements, unpacks the source distribution archives and finds the names and versions of the requirements. By using the ``pip install --download`` command we avoid reimplementing the following pip features: - Parsing of ``requirements.txt`` (including recursive parsing). - Resolution of possibly conflicting pinned requirements. - Unpacking source distributions in multiple formats. - Finding the name & version of a given source distribution.
6.223573
6.598703
0.943151
download_timer = Timer() logger.info("Downloading missing distribution(s) ..") requirements = self.get_pip_requirement_set(arguments, use_remote_index=True, use_wheels=use_wheels) logger.info("Finished downloading distribution(s) in %s.", download_timer) return requirements
def download_source_dists(self, arguments, use_wheels=False)
Download missing source distributions. :param arguments: The command line arguments to ``pip install ...`` (a list of strings). :param use_wheels: Whether pip and pip-accel are allowed to use wheels_ (:data:`False` by default for backwards compatibility with callers that use pip-accel as a Python API). :raises: Any exceptions raised by pip.
5.5335
5.532075
1.000258
filtered_requirements = [] for requirement in requirement_set.requirements.values(): # The `satisfied_by' property is set by pip when a requirement is # already satisfied (i.e. a version of the package that satisfies # the requirement is already installed) and -I, --ignore-installed # is not used. We filter out these requirements because pip never # unpacks distributions for these requirements, so pip-accel can't # do anything useful with such requirements. if requirement.satisfied_by: continue # The `constraint' property marks requirement objects that # constrain the acceptable version(s) of another requirement but # don't define a requirement themselves, so we filter them out. if requirement.constraint: continue # All other requirements are reported to callers. filtered_requirements.append(requirement) self.reported_requirements.append(requirement) return sorted([Requirement(self.config, r) for r in filtered_requirements], key=lambda r: r.name.lower())
def transform_pip_requirement_set(self, requirement_set)
Transform pip's requirement set into one that `pip-accel` can work with. :param requirement_set: The :class:`pip.req.RequirementSet` object reported by pip. :returns: A list of :class:`pip_accel.req.Requirement` objects. This function converts the :class:`pip.req.RequirementSet` object reported by pip into a list of :class:`pip_accel.req.Requirement` objects.
5.800132
5.696985
1.018105
install_timer = Timer() install_types = [] if any(not req.is_wheel for req in requirements): install_types.append('binary') if any(req.is_wheel for req in requirements): install_types.append('wheel') logger.info("Installing from %s distributions ..", concatenate(install_types)) # Track installed files by default (unless the caller specifically opted out). kw.setdefault('track_installed_files', True) num_installed = 0 for requirement in requirements: # When installing setuptools we need to uninstall distribute, # otherwise distribute will shadow setuptools and all sorts of # strange issues can occur (e.g. upgrading to the latest # setuptools to gain wheel support and then having everything # blow up because distribute doesn't know about wheels). if requirement.name == 'setuptools' and is_installed('distribute'): uninstall('distribute') if requirement.is_editable: logger.debug("Installing %s in editable form using pip.", requirement) with TransactionalUpdate(requirement): command = InstallCommand() opts, args = command.parse_args(['--no-deps', '--editable', requirement.source_directory]) command.run(opts, args) elif requirement.is_wheel: logger.info("Installing %s wheel distribution using pip ..", requirement) with TransactionalUpdate(requirement): wheel_version = pip_wheel_module.wheel_version(requirement.source_directory) pip_wheel_module.check_compatibility(wheel_version, requirement.name) requirement.pip_requirement.move_wheel_files(requirement.source_directory) else: logger.info("Installing %s binary distribution using pip-accel ..", requirement) with TransactionalUpdate(requirement): binary_distribution = self.bdists.get_binary_dist(requirement) self.bdists.install_binary_dist(binary_distribution, **kw) num_installed += 1 logger.info("Finished installing %s in %s.", pluralize(num_installed, "requirement"), install_timer) return num_installed
def install_requirements(self, requirements, **kw)
Manually install a requirement set from binary and/or wheel distributions. :param requirements: A list of :class:`pip_accel.req.Requirement` objects. :param kw: Any keyword arguments are passed on to :func:`~pip_accel.bdist.BinaryDistributionManager.install_binary_dist()`. :returns: The number of packages that were just installed (an integer).
4.718241
4.284459
1.101245
stat = os.stat(self.build_directory) shutil.rmtree(self.build_directory) os.makedirs(self.build_directory, stat.st_mode)
def clear_build_directory(self)
Clear the build directory where pip unpacks the source distribution archives.
2.647906
2.642046
1.002218
while self.build_directories: shutil.rmtree(self.build_directories.pop()) for requirement in self.reported_requirements: requirement.remove_temporary_source() while self.eggs_links: symbolic_link = self.eggs_links.pop() if os.path.islink(symbolic_link): os.unlink(symbolic_link)
def cleanup_temporary_directories(self)
Delete the build directories and any temporary directories created by pip.
3.930565
3.567502
1.101769
if isinstance(record.msg, basestring): message = record.msg.lower() if all(kw in message for kw in self.KEYWORDS): record.levelname = 'DEBUG' record.levelno = logging.DEBUG return 1
def filter(self, record)
Change the severity of selected log records.
3.608731
3.181656
1.13423
arguments = sys.argv[1:] # If no arguments are given, the help text of pip-accel is printed. if not arguments: usage() sys.exit(0) # If no install subcommand is given we pass the command line straight # to pip without any changes and exit immediately afterwards. if 'install' not in arguments: # This will not return. os.execvp('pip', ['pip'] + arguments) else: arguments = [arg for arg in arguments if arg != 'install'] config = Config() # Initialize logging output. coloredlogs.install( fmt=config.log_format, level=config.log_verbosity, ) # Adjust verbosity based on -v, -q, --verbose, --quiet options. for argument in list(arguments): if match_option(argument, '-v', '--verbose'): coloredlogs.increase_verbosity() elif match_option(argument, '-q', '--quiet'): coloredlogs.decrease_verbosity() # Perform the requested action(s). try: accelerator = PipAccelerator(config) accelerator.install_from_arguments(arguments) except NothingToDoError as e: # Don't print a traceback for this (it's not very user friendly) and # exit with status zero to stay compatible with pip. For more details # please refer to https://github.com/paylogic/pip-accel/issues/47. logger.warning("%s", e) sys.exit(0) except Exception: logger.exception("Caught unhandled exception!") sys.exit(1)
def main()
The command line interface for the ``pip-accel`` program.
4.21976
4.006878
1.053129
pathname = os.path.join(self.config.binary_cache, filename) if os.path.isfile(pathname): logger.debug("Distribution archive exists in local cache (%s).", pathname) return pathname else: logger.debug("Distribution archive doesn't exist in local cache (%s).", pathname) return None
def get(self, filename)
Check if a distribution archive exists in the local cache. :param filename: The filename of the distribution archive (a string). :returns: The pathname of a distribution archive on the local file system or :data:`None`.
3.620987
2.858989
1.266527
file_in_cache = os.path.join(self.config.binary_cache, filename) logger.debug("Storing distribution archive in local cache: %s", file_in_cache) makedirs(os.path.dirname(file_in_cache)) # Stream the contents of the distribution archive to a temporary file # to avoid race conditions (e.g. partial reads) between multiple # processes that are using the local cache at the same time. with AtomicReplace(file_in_cache) as temporary_file: with open(temporary_file, 'wb') as temporary_file_handle: shutil.copyfileobj(handle, temporary_file_handle) logger.debug("Finished caching distribution archive in local cache.")
def put(self, filename, handle)
Store a distribution archive in the local cache. :param filename: The filename of the distribution archive (a string). :param handle: A file-like object that provides access to the distribution archive.
3.654816
3.316538
1.101997
"Normalize data to a list of strings." # Return None if no input was given. if not value: return [] return [v.strip() for v in value.splitlines() if v != ""]
def to_python(self, value)
Normalize data to a list of strings.
7.578749
5.253165
1.442702
"Check if value consists only of valid emails." # Use the parent's handling of required fields, etc. super(MultiEmailField, self).validate(value) try: for email in value: validate_email(email) except ValidationError: raise ValidationError(self.message, code=self.code)
def validate(self, value)
Check if value consists only of valid emails.
5.50346
4.106556
1.340164
if value in MULTI_EMAIL_FIELD_EMPTY_VALUES: return "" elif isinstance(value, six.string_types): return value elif isinstance(value, list): return "\n".join(value) raise ValidationError('Invalid format.')
def prep_value(self, value)
Prepare value before effectively render widget
5.410858
5.329253
1.015313
if _logger is not None: _log(INFO, "Process", local.name, "pause") Process.current().rsim()._gr.switch()
def pause() -> None
Pauses the current process indefinitely -- it will require another process to `resume()` it. When this resumption happens, the process returns from this function.
48.381775
35.339565
1.369054
if _logger is not None: _log(INFO, "Process", local.name, "advance", delay=delay) curr = Process.current() rsim = curr.rsim id_wakeup = rsim()._schedule(delay, curr.switch) # type: ignore try: rsim()._gr.switch() # type: ignore except Interrupt: rsim()._cancel(id_wakeup) # type: ignore raise
def advance(delay: float) -> None
Pauses the current process for the given delay (in simulated time). The process will be resumed when the simulation has advanced to the moment corresponding to `now() + delay`.
9.944774
8.961218
1.109757
def hook(event: Callable): def make_happen(*args_event: Any, **kwargs_event: Any) -> None: if name is not None: local.name = cast(str, name) for interval in intervals: advance(interval) add(event, *args_event, **kwargs_event) return make_happen return hook
def happens(intervals: Iterable[float], name: Optional[str] = None) -> Callable
Decorator used to set up a process that adds a new instance of another process at intervals dictated by the given sequence (which may be infinite). Example: the following program runs process named `my_process` 5 times, each time spaced by 2.0 time units. ``` from itertools import repeat sim = Simulator() log = [] @happens(repeat(2.0, 5)) def my_process(the_log): the_log.append(now()) sim.add(my_process, log) sim.run() print(str(log)) # Expect: [2.0, 4.0, 6.0, 8.0, 10.0] ```
4.401642
5.238499
0.840249
global GREENSIM_TAG_ATTRIBUTE def hook(event: Callable): def wrapper(*args, **kwargs): event(*args, **kwargs) setattr(wrapper, GREENSIM_TAG_ATTRIBUTE, tags) return wrapper return hook
def tagged(*tags: Tags) -> Callable
Decorator for adding a label to the process. These labels are applied to any child Processes produced by event
10.764654
10.39826
1.035236
class CleanUp(Interrupt): pass timeout = kwargs.get("timeout", None) if not isinstance(timeout, (float, int, type(None))): raise ValueError("The timeout keyword parameter can be either None or a number.") def wait_one(signal: Signal, common: Signal) -> None: try: signal.wait() common.turn_on() except CleanUp: pass # We simply sets up multiple sub-processes respectively waiting for one of the signals. Once one of them has fired, # the others will all run no-op eventually, so no need for any explicit clean-up. common = Signal(name=local.name + "-selector").turn_off() if _logger is not None: _log(INFO, "select", "select", "select", signals=[sig.name for sig in signals]) procs = [] for signal in signals: procs.append(add(wait_one, signal, common)) try: common.wait(timeout) finally: for proc in procs: # Clean up the support processes. proc.interrupt(CleanUp()) return [signal for signal in signals if signal.is_on]
def select(*signals: Signal, **kwargs) -> List[Signal]
Allows the current process to wait for multiple concurrent signals. Waits until one of the signals turns on, at which point this signal is returned. :param timeout: If this parameter is not ``None``, it is taken as a delay at the end of which the process times out, and stops waiting on the set of :py:class:`Signal`s. In such a situation, a :py:class:`Timeout` exception is raised on the process.
6.569901
6.487915
1.012637
return ( (event.timestamp, event.fn, event.args, event.kwargs) for event in self._events if not event.is_cancelled )
def events(self) -> Iterable[Tuple[Optional[float], Callable, Sequence[Any], Mapping[str, Any]]]
Iterates over scheduled events. Each event is a 4-tuple composed of the moment (on the simulated clock) the event should execute, the function corresponding to the event, its positional parameters (as a tuple of arbitrary length), and its keyword parameters (as a dictionary).
3.098402
3.740116
0.828424
if _logger is not None: self._log( DEBUG, "schedule", delay=delay, fn=event, args=args, kwargs=kwargs, counter=self._counter, __now=self.now() ) delay = float(delay) if delay < 0.0: raise ValueError("Delay must be positive.") # Use counter to strictly order events happening at the same simulated time. This gives a total order on events, # working around the heap queue not yielding a stable ordering. id_event = self._counter heappush(self._events, _Event(self._ts_now + delay, id_event, event, *args, **kwargs)) self._counter += 1 return id_event
def _schedule(self, delay: float, event: Callable, *args: Any, **kwargs: Any) -> int
Schedules a one-time event to be run along the simulation. The event is scheduled relative to current simulator time, so delay is expected to be a positive simulation time interval. The `event' parameter corresponds to a callable object (e.g. a function): it will be called so as to "execute" the event, with the positional and keyword parameters that follow `event` in the call to `_schedule()` (note that the value of these arguments are evaluated when `_schedule()` is called, not when the event is executed). Once this event function returns, the simulation carries on to the next event, or stops if none remain. Remark that this method is private, and is meant for internal usage by the :py:class:`Simulator` and :py:class:`Process` classes, and helper functions of this module. :return: Unique identifier for the scheduled event.
5.751576
5.834318
0.985818
if _logger is not None: self._log(DEBUG, "cancel", id=id_cancel) for event in self._events: if event.identifier == id_cancel: event.cancel() break
def _cancel(self, id_cancel) -> None
Cancels a previously scheduled event. This method is private, and is meant for internal usage by the :py:class:`Simulator` and :py:class:`Process` classes, and helper functions of this module.
5.862509
4.944449
1.185675
return self.add_in(0.0, fn_process, *args, **kwargs)
def add(self, fn_process: Callable, *args: Any, **kwargs: Any) -> 'Process'
Adds a process to the simulation. The process is embodied by a function, which will be called with the given positional and keyword parameters when the simulation runs. As a process, this function runs on a special green thread, and thus will be able to call functions `now()`, `advance()`, `pause()` and `stop()` to articulate its events across the simulated timeline and control the simulation's flow.
4.981164
5.431179
0.917142
process = Process(self, fn_process, self._gr) if _logger is not None: self._log(INFO, "add", __now=self.now(), fn=fn_process, args=args, kwargs=kwargs) self._schedule(delay, process.switch, *args, **kwargs) return process
def add_in(self, delay: float, fn_process: Callable, *args: Any, **kwargs: Any) -> 'Process'
Adds a process to the simulation, which is made to start after the given delay in simulated time. See method add() for more details.
6.82341
7.116077
0.958872
delay = moment - self.now() if delay < 0.0: raise ValueError( f"The given moment to start the process ({moment:f}) is in the past (now is {self.now():f})." ) return self.add_in(delay, fn_process, *args, **kwargs)
def add_at(self, moment: float, fn_process: Callable, *args: Any, **kwargs: Any) -> 'Process'
Adds a process to the simulation, which is made to start at the given exact time on the simulated clock. Note that times in the past when compared to the current moment on the simulated clock are forbidden. See method add() for more details.
3.581806
3.351792
1.068624
if _logger is not None: self._log(INFO, "run", __now=self.now(), duration=duration) counter_stop_event = None if duration != inf: counter_stop_event = self._counter self._schedule(duration, self.stop) self._is_running = True while self.is_running and len(self._events) > 0: event = heappop(self._events) self._ts_now = event.timestamp or self._ts_now event.execute(self) if len(self._events) == 0: if _logger is not None: self._log(DEBUG, "out-of-events", __now=self.now()) self.stop() if counter_stop_event is not None: # Change the planned stop to a no-op. We would rather eliminate it, but this would force a re-sort of the # event queue. for (i, event) in enumerate(self._events): if event.identifier == counter_stop_event: if _logger is not None: self._log(DEBUG, "cancel-stop", counter=counter_stop_event) event.cancel() break
def run(self, duration: float = inf) -> None
Runs the simulation until a stopping condition is met (no more events, or an event invokes method stop()), or until the simulated clock hits the given duration.
4.436845
4.17611
1.062435
event = heappop(self._events) self._ts_now = event.timestamp or self._ts_now event.execute(self)
def step(self) -> None
Runs a single event of the simulation.
9.08449
7.514197
1.208977
if self.is_running: if _logger is not None: self._log(INFO, "stop", __now=self.now()) self._is_running = False
def stop(self) -> None
Stops the running simulation once the current event is done executing.
10.272399
8.47585
1.211961
for _, event, _, _ in self.events(): if hasattr(event, "__self__") and isinstance(event.__self__, Process): # type: ignore event.__self__.throw() # type: ignore self._events.clear() self._ts_now = 0.0
def _clear(self) -> None
Resets the internal state of the simulator, and sets the simulated clock back to 0.0. This discards all outstanding events and tears down hanging process instances.
6.691518
5.390939
1.241253
try: self._body(*args, **kwargs) if _logger is not None: _log(INFO, "Process", self.local.name, "die-finish") except Interrupt: if _logger is not None: _log(INFO, "Process", self.local.name, "die-interrupt")
def _run(self, *args: Any, **kwargs: Any) -> None
Wraps around the process body (the function that implements a process within the simulation) so as to catch the eventual Interrupt that may terminate the process.
5.90727
4.246343
1.391143
t.__init__.__get__(self)(*args)
def _bind_and_call_constructor(self, t: type, *args) -> None
Accesses the __init__ method of a type directly and calls it with *args This allows the constructors of both superclasses to be called, as described in get_binding.md This could be done using two calls to super() with a hack based on how Python searches __mro__: ``` super().__init__(run, parent) # calls greenlet.greenlet.__init__ super(greenlet.greenlet, self).__init__() # calls TaggedObject.__init__ ``` Python will always find greenlet.greenlet first since it is specified first, but will ignore it if it is the first argument to super, which is meant to indicate the subclass and thus is not meant to be called on See: https://docs.python.org/3.7/library/functions.html#super This is indirect, confusing, and not in following with the purpose of super(), so the direct method was used
21.04595
17.691027
1.18964
curr = greenlet.getcurrent() if not isinstance(curr, Process): raise TypeError("Current greenlet does not correspond to a Process instance.") return cast(Process, greenlet.getcurrent())
def current() -> 'Process'
Returns the instance of the process that is executing at the current moment.
4.629602
4.613133
1.00357
if _logger is not None: _log(INFO, "Process", self.local.name, "resume") self.rsim()._schedule(0.0, self.switch)
def resume(self) -> None
Resumes a process that has been previously paused by invoking function `pause()`. This does not interrupt the current process or event: it merely schedules again the target process, so that its execution carries on at the return of the `pause()` function, when this new wake-up event fires.
24.782295
18.665783
1.327686
if inter is None: inter = Interrupt() if _logger is not None: _log(INFO, "Process", self.local.name, "interrupt", type=type(inter).__name__) self.rsim()._schedule(0.0, self.throw, inter)
def interrupt(self, inter: Optional[Interrupt] = None) -> None
Interrupts a process that has been previously :py:meth:`pause`d or made to :py:meth:`advance`, by resuming it immediately and raising an :py:class:`Interrupt` exception on it. This exception can be captured by the interrupted process and leveraged for various purposes, such as timing out on a wait or generating activity prompting immediate reaction. :param inter: Exception to raise on the :py:class:`Process`; if ``None`` is given, an instance of :py:class:`Interrupt` is raised. This allows one to use specialized :py:class:`Interrupt` subclasses to as to implement non-interfering mixed interruption stacks. For instance, a process may advance towards a certain timeout as it waits for multiple resources concurrently. Should it hit the timeout, it would :py:meth:`interrupt` the waiting processes so as to clean up after itself. If these processes have themselves a timeout mechanism of their own, also based on interrupts, using a subclass can help them distinguish between these and the clean-up interrupts.
12.696233
11.580562
1.09634
class CancelBalk(Interrupt): pass self._counter += 1 if _logger is not None: self._log(INFO, "join") heappush(self._waiting, (self._get_order_token(self._counter), Process.current())) proc_balk = None if timeout is not None: def balk(proc): nonlocal proc_balk try: advance(cast(float, timeout)) proc.interrupt(Timeout()) except CancelBalk: pass finally: proc_balk = None # The balking process is started here. proc_balk = add(balk, Process.current()) try: pause() except Interrupt: current = Process.current() for index in reversed([i for i, (_, proc) in enumerate(self._waiting) if proc is current]): del self._waiting[index] heapify(self._waiting) raise finally: # Three situations can prompt a process to exit a queue: # # 1. The process is pop()ped out of the queue by a peer. # 2. The process balk()s out after a timeout. # 3. The process leaves the queue because of a distinct interrupt (besides CancelBalk). # # In cases 1 and 3, the balking process has never exited and is still in the advance() call. In both these # cases, the balking process should itself be interrupted, otherwise it may prompt the balking of a future # queue traversal. However, if we exit the queue because of case no. 2, the balking process is finished. # Interrupting it would do no harm (it has been tested by accident), but we mean to be deliberate about when # this interruption is necessary. So we perform the interrupt of the balking process only in cases 1 and 3; # in case 2, the balk() function exits, thereby clearing the reference we have here to it. Do remark that # whenever a timeout is not set, proc_balk remains None all the way, reducing the situation to case 1. if proc_balk is not None: proc_balk.interrupt(CancelBalk())
def join(self, timeout: Optional[float] = None)
Can be invoked only by a process: makes it join the queue. The order token is computed once for the process, before it is enqueued. Another process or event, or control code of some sort, must invoke method `pop()` of the queue so that the process can eventually leave the queue and carry on with its execution. :param timeout: If this parameter is not ``None``, it is taken as a delay at the end of which the process times out, and leaves the queue forcibly. In such a situation, a :py:class:`Timeout` exception is raised on the process.
8.233397
7.742668
1.06338
if not self.is_empty(): _, process = heappop(self._waiting) if _logger is not None: self._log(INFO, "pop", process=process.local.name) process.resume()
def pop(self)
Removes the top process from the queue, and resumes its execution. For an empty queue, this method is a no-op. This method may be invoked from anywhere (its use is not confined to processes, as method `join()` is).
12.202663
7.921525
1.540444
if _logger is not None: self._log(INFO, "turn-on") self._is_on = True while not self._queue.is_empty(): self._queue.pop() return self
def turn_on(self) -> "Signal"
Turns on the signal. If processes are waiting, they are all resumed. This may be invoked from any code. Remark that while processes are simultaneously resumed in simulated time, they are effectively resumed in the sequence corresponding to the queue discipline. Therefore, if one of the resumed processes turns the signal back off, remaining resumed processes join back the queue. If the queue discipline is not monotonic (for instance, if it bears a random component), then this toggling of the signal may reorder the processes.
5.402
5.66847
0.952991
if _logger is not None: self._log(INFO, "turn-off") self._is_on = False return self
def turn_off(self) -> "Signal"
Turns off the signal. This may be invoked from any code.
6.821876
6.76339
1.008647
if _logger is not None: self._log(INFO, "wait") while not self.is_on: self._queue.join(timeout)
def wait(self, timeout: Optional[float] = None) -> None
Makes the current process wait for the signal. If it is closed, it will join the signal's queue. :param timeout: If this parameter is not ``None``, it is taken as a delay at the end of which the process times out, and stops waiting for the :py:class:`Signal`. In such a situation, a :py:class:`Timeout` exception is raised on the process.
9.953361
12.053802
0.825744
if num_instances < 1: raise ValueError(f"Process must request at least 1 instance; here requested {num_instances}.") if num_instances > self.num_instances_total: raise ValueError( f"Process must request at most {self.num_instances_total} instances; here requested {num_instances}." ) if _logger is not None: self._log(INFO, "take", num_instances=num_instances, free=self.num_instances_free) proc = Process.current() if self._num_instances_free < num_instances: proc.local.__num_instances_required = num_instances try: self._waiting.join(timeout) finally: del proc.local.__num_instances_required self._num_instances_free -= num_instances if _logger is not None and proc in self._usage: self._log(WARNING, "take-again", already=self._usage[proc], more=num_instances) self._usage.setdefault(proc, 0) self._usage[proc] += num_instances
def take(self, num_instances: int = 1, timeout: Optional[float] = None) -> None
The current process reserves a certain number of instances. If there are not enough instances available, the process is made to join a queue. When this method returns, the process holds the instances it has requested to take. :param num_instances: Number of resource instances to take. :param timeout: If this parameter is not ``None``, it is taken as a delay at the end of which the process times out, and leaves the queue forcibly. In such a situation, a :py:class:`Timeout` exception is raised on the process.
3.61754
3.466196
1.043663
proc = Process.current() error_format = "Process %s holds %s instances, but requests to release more (%s)" if self._usage.get(proc, 0) > 0: if num_instances > self._usage[proc]: raise ValueError( error_format % (proc.local.name, self._usage[proc], num_instances) ) self._usage[proc] -= num_instances self._num_instances_free += num_instances if _logger is not None: self._log( INFO, "release", num_instances=num_instances, keeping=self._usage[proc], free=self.num_instances_free ) if self._usage[proc] <= 0: del self._usage[proc] if not self._waiting.is_empty(): num_instances_next = cast(int, self._waiting.peek().local.__num_instances_required) if num_instances_next <= self.num_instances_free: self._waiting.pop() elif _logger is not None: self._log(DEBUG, "release-nopop", next_requires=num_instances_next, free=self.num_instances_free) elif _logger is not None: self._log(DEBUG, "release-queueempty") else: raise RuntimeError( f"Process {proc.local.name} tries to release {num_instances} instances, but is holding none.)" )
def release(self, num_instances: int = 1) -> None
The current process releases instances it has previously taken. It may thus release less than it has taken. These released instances become free. If the total number of free instances then satisfy the request of the top process of the waiting queue, it is popped off the queue and resumed.
3.530949
3.276914
1.077523
self.take(num_instances, timeout) yield self self.release(num_instances)
def using(self, num_instances: int = 1, timeout: Optional[float] = None)
Context manager around resource reservation: when the code block under the with statement is entered, the current process holds the instances it requested. When it exits, all these instances are released. Do not explicitly `release()` instances within the context block, at the risk of breaking instance management. If one needs to `release()` instances piecemeal, it should instead reserve the instances using `take()`. :param num_instances: Number of resource instances to take. :param timeout: If this parameter is not ``None``, it is taken as a delay at the end of which the process times out, and leaves the queue forcibly. In such a situation, a :py:class:`Timeout` exception is raised on the process.
9.135584
6.799055
1.343655