id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
28,200
__init__.py
DamnWidget_anaconda/anaconda_lib/__init__.py
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details
128
Python
.py
2
62.5
65
0.776
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,201
aenum.py
DamnWidget_anaconda/anaconda_lib/aenum.py
"""Python Enumerations""" import sys as _sys __all__ = ['Enum', 'IntEnum', 'unique'] version = 1, 1, 6 pyver = float('%s.%s' % _sys.version_info[:2]) try: any except NameError: def any(iterable): for element in iterable: if element: return True return False try: from collections import OrderedDict except ImportError: OrderedDict = None try: basestring except NameError: # In Python 2 basestring is the ancestor of both str and unicode # in Python 3 it's just str, but was missing in 3.1 basestring = str try: unicode except NameError: # In Python 3 unicode no longer exists (it's just str) unicode = str class _RouteClassAttributeToGetattr(object): """Route attribute access on a class to __getattr__. This is a descriptor, used to define attributes that act differently when accessed through an instance and through a class. Instance access remains normal, but access to an attribute through a class will be routed to the class's __getattr__ method; this is done by raising AttributeError. """ def __init__(self, fget=None): self.fget = fget def __get__(self, instance, ownerclass=None): if instance is None: raise AttributeError() return self.fget(instance) def __set__(self, instance, value): raise AttributeError("can't set attribute") def __delete__(self, instance): raise AttributeError("can't delete attribute") def _is_descriptor(obj): """Returns True if obj is a descriptor, False otherwise.""" return ( hasattr(obj, '__get__') or hasattr(obj, '__set__') or hasattr(obj, '__delete__')) def _is_dunder(name): """Returns True if a __dunder__ name, False otherwise.""" return (name[:2] == name[-2:] == '__' and name[2:3] != '_' and name[-3:-2] != '_' and len(name) > 4) def _is_sunder(name): """Returns True if a _sunder_ name, False otherwise.""" return (name[0] == name[-1] == '_' and name[1:2] != '_' and name[-2:-1] != '_' and len(name) > 2) def _make_class_unpicklable(cls): """Make the given class un-picklable.""" def _break_on_call_reduce(self, protocol=None): raise TypeError('%r cannot be pickled' % self) cls.__reduce_ex__ = _break_on_call_reduce cls.__module__ = '<unknown>' class _EnumDict(dict): """Track enum member order and ensure member names are not reused. EnumMeta will use the names found in self._member_names as the enumeration member names. """ def __init__(self): super(_EnumDict, self).__init__() self._member_names = [] def __setitem__(self, key, value): """Changes anything not dundered or not a descriptor. If a descriptor is added with the same name as an enum member, the name is removed from _member_names (this may leave a hole in the numerical sequence of values). If an enum member name is used twice, an error is raised; duplicate values are not checked for. Single underscore (sunder) names are reserved. Note: in 3.x __order__ is simply discarded as a not necessary piece leftover from 2.x """ if pyver >= 3.0 and key in ('_order_', '__order__'): return elif key == '__order__': key = '_order_' if _is_sunder(key): if key != '_order_': raise ValueError('_names_ are reserved for future Enum use') elif _is_dunder(key): pass elif key in self._member_names: # descriptor overwriting an enum? raise TypeError('Attempted to reuse key: %r' % key) elif not _is_descriptor(value): if key in self: # enum overwriting a descriptor? raise TypeError('Key already defined as: %r' % self[key]) self._member_names.append(key) super(_EnumDict, self).__setitem__(key, value) # Dummy value for Enum as EnumMeta explicity checks for it, but of course until # EnumMeta finishes running the first time the Enum class doesn't exist. This # is also why there are checks in EnumMeta like `if Enum is not None` Enum = None class EnumMeta(type): """Metaclass for Enum""" @classmethod def __prepare__(metacls, cls, bases): return _EnumDict() def __new__(metacls, cls, bases, classdict): # an Enum class is final once enumeration items have been defined; it # cannot be mixed with other types (int, float, etc.) if it has an # inherited __new__ unless a new __new__ is defined (or the resulting # class will fail). if type(classdict) is dict: original_dict = classdict classdict = _EnumDict() for k, v in original_dict.items(): classdict[k] = v member_type, first_enum = metacls._get_mixins_(bases) __new__, save_new, use_args = metacls._find_new_(classdict, member_type, first_enum) # save enum items into separate mapping so they don't get baked into # the new class members = dict((k, classdict[k]) for k in classdict._member_names) for name in classdict._member_names: del classdict[name] # py2 support for definition order _order_ = classdict.get('_order_') if _order_ is None: if pyver < 3.0: try: _order_ = [name for (name, value) in sorted(members.items(), key=lambda item: item[1])] except TypeError: _order_ = [name for name in sorted(members.keys())] else: _order_ = classdict._member_names else: del classdict['_order_'] if pyver < 3.0: _order_ = _order_.replace(',', ' ').split() aliases = [name for name in members if name not in _order_] _order_ += aliases # check for illegal enum names (any others?) invalid_names = set(members) & set(['mro']) if invalid_names: raise ValueError('Invalid enum member name(s): %s' % ( ', '.join(invalid_names), )) # save attributes from super classes so we know if we can take # the shortcut of storing members in the class dict base_attributes = set([a for b in bases for a in b.__dict__]) # create our new Enum type enum_class = super(EnumMeta, metacls).__new__(metacls, cls, bases, classdict) enum_class._member_names_ = [] # names in random order if OrderedDict is not None: enum_class._member_map_ = OrderedDict() else: enum_class._member_map_ = {} # name->value map enum_class._member_type_ = member_type # Reverse value->name map for hashable values. enum_class._value2member_map_ = {} # instantiate them, checking for duplicates as we go # we instantiate first instead of checking for duplicates first in case # a custom __new__ is doing something funky with the values -- such as # auto-numbering ;) if __new__ is None: __new__ = enum_class.__new__ for member_name in _order_: value = members[member_name] if not isinstance(value, tuple): args = (value, ) else: args = value if member_type is tuple: # special case for tuple enums args = (args, ) # wrap it one more time if not use_args or not args: enum_member = __new__(enum_class) if not hasattr(enum_member, '_value_'): enum_member._value_ = value else: enum_member = __new__(enum_class, *args) if not hasattr(enum_member, '_value_'): enum_member._value_ = member_type(*args) value = enum_member._value_ enum_member._name_ = member_name enum_member.__objclass__ = enum_class enum_member.__init__(*args) # If another member with the same value was already defined, the # new member becomes an alias to the existing one. for name, canonical_member in enum_class._member_map_.items(): if canonical_member.value == enum_member._value_: enum_member = canonical_member break else: # Aliases don't appear in member names (only in __members__). enum_class._member_names_.append(member_name) # performance boost for any member that would not shadow # a DynamicClassAttribute (aka _RouteClassAttributeToGetattr) if member_name not in base_attributes: setattr(enum_class, member_name, enum_member) # now add to _member_map_ enum_class._member_map_[member_name] = enum_member try: # This may fail if value is not hashable. We can't add the value # to the map, and by-value lookups for this value will be # linear. enum_class._value2member_map_[value] = enum_member except TypeError: pass # If a custom type is mixed into the Enum, and it does not know how # to pickle itself, pickle.dumps will succeed but pickle.loads will # fail. Rather than have the error show up later and possibly far # from the source, sabotage the pickle protocol for this class so # that pickle.dumps also fails. # # However, if the new class implements its own __reduce_ex__, do not # sabotage -- it's on them to make sure it works correctly. We use # __reduce_ex__ instead of any of the others as it is preferred by # pickle over __reduce__, and it handles all pickle protocols. unpicklable = False if '__reduce_ex__' not in classdict: if member_type is not object: methods = ('__getnewargs_ex__', '__getnewargs__', '__reduce_ex__', '__reduce__') if not any(m in member_type.__dict__ for m in methods): _make_class_unpicklable(enum_class) unpicklable = True # double check that repr and friends are not the mixin's or various # things break (such as pickle) for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'): class_method = getattr(enum_class, name) obj_method = getattr(member_type, name, None) enum_method = getattr(first_enum, name, None) if name not in classdict and class_method is not enum_method: if name == '__reduce_ex__' and unpicklable: continue setattr(enum_class, name, enum_method) # method resolution and int's are not playing nice # Python's less than 2.6 use __cmp__ if pyver < 2.6: if issubclass(enum_class, int): setattr(enum_class, '__cmp__', getattr(int, '__cmp__')) elif pyver < 3.0: if issubclass(enum_class, int): for method in ( '__le__', '__lt__', '__gt__', '__ge__', '__eq__', '__ne__', '__hash__', ): setattr(enum_class, method, getattr(int, method)) # replace any other __new__ with our own (as long as Enum is not None, # anyway) -- again, this is to support pickle if Enum is not None: # if the user defined their own __new__, save it before it gets # clobbered in case they subclass later if save_new: setattr(enum_class, '__member_new__', enum_class.__dict__['__new__']) setattr(enum_class, '__new__', Enum.__dict__['__new__']) return enum_class def __bool__(cls): """ classes/types should always be True. """ return True def __call__(cls, value, names=None, module=None, type=None, start=1): """Either returns an existing member, or creates a new enum class. This method is used both when an enum class is given a value to match to an enumeration member (i.e. Color(3)) and for the functional API (i.e. Color = Enum('Color', names='red green blue')). When used for the functional API: `module`, if set, will be stored in the new class' __module__ attribute; `type`, if set, will be mixed in as the first base class. Note: if `module` is not set this routine will attempt to discover the calling module by walking the frame stack; if this is unsuccessful the resulting class will not be pickleable. """ if names is None: # simple value lookup return cls.__new__(cls, value) # otherwise, functional API: we're creating a new Enum type return cls._create_(value, names, module=module, type=type, start=start) def __contains__(cls, member): return isinstance(member, cls) and member.name in cls._member_map_ def __delattr__(cls, attr): # nicer error message when someone tries to delete an attribute # (see issue19025). if attr in cls._member_map_: raise AttributeError( "%s: cannot delete Enum member." % cls.__name__) super(EnumMeta, cls).__delattr__(attr) def __dir__(self): return (['__class__', '__doc__', '__members__', '__module__'] + self._member_names_) @property def __members__(cls): """Returns a mapping of member name->value. This mapping lists all enum members, including aliases. Note that this is a copy of the internal mapping. """ return cls._member_map_.copy() def __getattr__(cls, name): """Return the enum member matching `name` We use __getattr__ instead of descriptors or inserting into the enum class' __dict__ in order to support `name` and `value` being both properties for enum members (which live in the class' __dict__) and enum members themselves. """ if _is_dunder(name): raise AttributeError(name) try: return cls._member_map_[name] except KeyError: raise AttributeError(name) def __getitem__(cls, name): return cls._member_map_[name] def __iter__(cls): return (cls._member_map_[name] for name in cls._member_names_) def __reversed__(cls): return (cls._member_map_[name] for name in reversed(cls._member_names_)) def __len__(cls): return len(cls._member_names_) __nonzero__ = __bool__ def __repr__(cls): return "<enum %r>" % cls.__name__ def __setattr__(cls, name, value): """Block attempts to reassign Enum members. A simple assignment to the class namespace only changes one of the several possible ways to get an Enum member from the Enum class, resulting in an inconsistent Enumeration. """ member_map = cls.__dict__.get('_member_map_', {}) if name in member_map: raise AttributeError('Cannot reassign members.') super(EnumMeta, cls).__setattr__(name, value) def _create_(cls, class_name, names=None, module=None, type=None, start=1): """Convenience method to create a new Enum class. `names` can be: * A string containing member names, separated either with spaces or commas. Values are auto-numbered from 1. * An iterable of member names. Values are auto-numbered from 1. * An iterable of (member name, value) pairs. * A mapping of member name -> value. """ if pyver < 3.0: # if class_name is unicode, attempt a conversion to ASCII if isinstance(class_name, unicode): try: class_name = class_name.encode('ascii') except UnicodeEncodeError: raise TypeError('%r is not representable in ASCII' % class_name) metacls = cls.__class__ if type is None: bases = (cls, ) else: bases = (type, cls) classdict = metacls.__prepare__(class_name, bases) _order_ = [] # special processing needed for names? if isinstance(names, basestring): names = names.replace(',', ' ').split() if isinstance(names, (tuple, list)) and isinstance(names[0], basestring): names = [(e, i+start) for (i, e) in enumerate(names)] # Here, names is either an iterable of (name, value) or a mapping. item = None # in case names is empty for item in names: if isinstance(item, basestring): member_name, member_value = item, names[item] else: member_name, member_value = item classdict[member_name] = member_value _order_.append(member_name) # only set _order_ in classdict if name/value was not from a mapping if not isinstance(item, basestring): classdict['_order_'] = ' '.join(_order_) enum_class = metacls.__new__(metacls, class_name, bases, classdict) # TODO: replace the frame hack if a blessed way to know the calling # module is ever developed if module is None: try: module = _sys._getframe(2).f_globals['__name__'] except (AttributeError, ValueError): pass if module is None: _make_class_unpicklable(enum_class) else: enum_class.__module__ = module return enum_class @staticmethod def _get_mixins_(bases): """Returns the type for creating enum members, and the first inherited enum class. bases: the tuple of bases that was given to __new__ """ if not bases or Enum is None: return object, Enum # double check that we are not subclassing a class with existing # enumeration members; while we're at it, see if any other data # type has been mixed in so we can use the correct __new__ member_type = first_enum = None for base in bases: if (base is not Enum and issubclass(base, Enum) and base._member_names_): raise TypeError("Cannot extend enumerations") # base is now the last base in bases if not issubclass(base, Enum): raise TypeError("new enumerations must be created as " "`ClassName([mixin_type,] enum_type)`") # get correct mix-in type (either mix-in type of Enum subclass, or # first base if last base is Enum) if not issubclass(bases[0], Enum): member_type = bases[0] # first data type first_enum = bases[-1] # enum type else: for base in bases[0].__mro__: # most common: (IntEnum, int, Enum, object) # possible: (<Enum 'AutoIntEnum'>, <Enum 'IntEnum'>, # <class 'int'>, <Enum 'Enum'>, # <class 'object'>) if issubclass(base, Enum): if first_enum is None: first_enum = base else: if member_type is None: member_type = base return member_type, first_enum if pyver < 3.0: @staticmethod def _find_new_(classdict, member_type, first_enum): """Returns the __new__ to be used for creating the enum members. classdict: the class dictionary given to __new__ member_type: the data type whose __new__ will be used by default first_enum: enumeration to check for an overriding __new__ """ # now find the correct __new__, checking to see of one was defined # by the user; also check earlier enum classes in case a __new__ was # saved as __member_new__ __new__ = classdict.get('__new__', None) if __new__: return None, True, True # __new__, save_new, use_args N__new__ = getattr(None, '__new__') O__new__ = getattr(object, '__new__') if Enum is None: E__new__ = N__new__ else: E__new__ = Enum.__dict__['__new__'] # check all possibles for __member_new__ before falling back to # __new__ for method in ('__member_new__', '__new__'): for possible in (member_type, first_enum): try: target = possible.__dict__[method] except (AttributeError, KeyError): target = getattr(possible, method, None) if target not in [ None, N__new__, O__new__, E__new__, ]: if method == '__member_new__': classdict['__new__'] = target return None, False, True if isinstance(target, staticmethod): target = target.__get__(member_type) __new__ = target break if __new__ is not None: break else: __new__ = object.__new__ # if a non-object.__new__ is used then whatever value/tuple was # assigned to the enum member name will be passed to __new__ and to the # new enum member's __init__ if __new__ is object.__new__: use_args = False else: use_args = True return __new__, False, use_args else: @staticmethod def _find_new_(classdict, member_type, first_enum): """Returns the __new__ to be used for creating the enum members. classdict: the class dictionary given to __new__ member_type: the data type whose __new__ will be used by default first_enum: enumeration to check for an overriding __new__ """ # now find the correct __new__, checking to see of one was defined # by the user; also check earlier enum classes in case a __new__ was # saved as __member_new__ __new__ = classdict.get('__new__', None) # should __new__ be saved as __member_new__ later? save_new = __new__ is not None if __new__ is None: # check all possibles for __member_new__ before falling back to # __new__ for method in ('__member_new__', '__new__'): for possible in (member_type, first_enum): target = getattr(possible, method, None) if target not in ( None, None.__new__, object.__new__, Enum.__new__, ): __new__ = target break if __new__ is not None: break else: __new__ = object.__new__ # if a non-object.__new__ is used then whatever value/tuple was # assigned to the enum member name will be passed to __new__ and to the # new enum member's __init__ if __new__ is object.__new__: use_args = False else: use_args = True return __new__, save_new, use_args ######################################################## # In order to support Python 2 and 3 with a single # codebase we have to create the Enum methods separately # and then use the `type(name, bases, dict)` method to # create the class. ######################################################## temp_enum_dict = {} temp_enum_dict['__doc__'] = "Generic enumeration.\n\n Derive from this class to define new enumerations.\n\n" def __new__(cls, value): # all enum instances are actually created during class construction # without calling this method; this method is called by the metaclass' # __call__ (i.e. Color(3) ), and by pickle if type(value) is cls: # For lookups like Color(Color.red) value = value.value #return value # by-value search for a matching enum member # see if it's in the reverse mapping (for hashable values) try: if value in cls._value2member_map_: return cls._value2member_map_[value] except TypeError: # not there, now do long search -- O(n) behavior for member in cls._member_map_.values(): if member.value == value: return member raise ValueError("%s is not a valid %s" % (value, cls.__name__)) temp_enum_dict['__new__'] = __new__ del __new__ def __repr__(self): return "<%s.%s: %r>" % ( self.__class__.__name__, self._name_, self._value_) temp_enum_dict['__repr__'] = __repr__ del __repr__ def __str__(self): return "%s.%s" % (self.__class__.__name__, self._name_) temp_enum_dict['__str__'] = __str__ del __str__ if pyver >= 3.0: def __dir__(self): added_behavior = [ m for cls in self.__class__.mro() for m in cls.__dict__ if m[0] != '_' and m not in self._member_map_ ] return (['__class__', '__doc__', '__module__', ] + added_behavior) temp_enum_dict['__dir__'] = __dir__ del __dir__ def __format__(self, format_spec): # mixed-in Enums should use the mixed-in type's __format__, otherwise # we can get strange results with the Enum name showing up instead of # the value # pure Enum branch if self._member_type_ is object: cls = str val = str(self) # mix-in branch else: cls = self._member_type_ val = self.value return cls.__format__(val, format_spec) temp_enum_dict['__format__'] = __format__ del __format__ #################################### # Python's less than 2.6 use __cmp__ if pyver < 2.6: def __cmp__(self, other): if type(other) is self.__class__: if self is other: return 0 return -1 return NotImplemented raise TypeError("unorderable types: %s() and %s()" % (self.__class__.__name__, other.__class__.__name__)) temp_enum_dict['__cmp__'] = __cmp__ del __cmp__ else: def __le__(self, other): raise TypeError("unorderable types: %s() <= %s()" % (self.__class__.__name__, other.__class__.__name__)) temp_enum_dict['__le__'] = __le__ del __le__ def __lt__(self, other): raise TypeError("unorderable types: %s() < %s()" % (self.__class__.__name__, other.__class__.__name__)) temp_enum_dict['__lt__'] = __lt__ del __lt__ def __ge__(self, other): raise TypeError("unorderable types: %s() >= %s()" % (self.__class__.__name__, other.__class__.__name__)) temp_enum_dict['__ge__'] = __ge__ del __ge__ def __gt__(self, other): raise TypeError("unorderable types: %s() > %s()" % (self.__class__.__name__, other.__class__.__name__)) temp_enum_dict['__gt__'] = __gt__ del __gt__ def __eq__(self, other): if type(other) is self.__class__: return self is other return NotImplemented temp_enum_dict['__eq__'] = __eq__ del __eq__ def __ne__(self, other): if type(other) is self.__class__: return self is not other return NotImplemented temp_enum_dict['__ne__'] = __ne__ del __ne__ def __hash__(self): return hash(self._name_) temp_enum_dict['__hash__'] = __hash__ del __hash__ def __reduce_ex__(self, proto): return self.__class__, (self._value_, ) temp_enum_dict['__reduce_ex__'] = __reduce_ex__ del __reduce_ex__ # _RouteClassAttributeToGetattr is used to provide access to the `name` # and `value` properties of enum members while keeping some measure of # protection from modification, while still allowing for an enumeration # to have members named `name` and `value`. This works because enumeration # members are not set directly on the enum class -- __getattr__ is # used to look them up. @_RouteClassAttributeToGetattr def name(self): return self._name_ temp_enum_dict['name'] = name del name @_RouteClassAttributeToGetattr def value(self): return self._value_ temp_enum_dict['value'] = value del value @classmethod def _convert(cls, name, module, filter, source=None): """ Create a new Enum subclass that replaces a collection of global constants """ # convert all constants from source (or module) that pass filter() to # a new Enum called name, and export the enum and its members back to # module; # also, replace the __reduce_ex__ method so unpickling works in # previous Python versions module_globals = vars(_sys.modules[module]) if source: source = vars(source) else: source = module_globals members = dict((name, value) for name, value in source.items() if filter(name)) cls = cls(name, members, module=module) cls.__reduce_ex__ = _reduce_ex_by_name module_globals.update(cls.__members__) module_globals[name] = cls return cls temp_enum_dict['_convert'] = _convert del _convert Enum = EnumMeta('Enum', (object, ), temp_enum_dict) del temp_enum_dict # Enum has now been created ########################### class IntEnum(int, Enum): """Enum where members are also (and must be) ints""" def _reduce_ex_by_name(self, proto): return self.name def unique(enumeration): """Class decorator that ensures only unique members exist in an enumeration.""" duplicates = [] for name, member in enumeration.__members__.items(): if name != member.name: duplicates.append((name, member.name)) if duplicates: duplicate_names = ', '.join( ["%s -> %s" % (alias, name) for (alias, name) in duplicates] ) raise ValueError('duplicate names found in %r: %s' % (enumeration, duplicate_names) ) return enumeration
31,054
Python
.py
702
33.713675
113
0.55525
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,202
vagrant.py
DamnWidget_anaconda/anaconda_lib/vagrant.py
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details import os import threading import subprocess from .logger import Log from .contexts import vagrant_root from .helpers import create_subprocess PIPE = subprocess.PIPE class VagrantBase(threading.Thread): """Spawn vagrant shell to execute commands (base class) """ def __init__(self, callback, vagrant_root, machine): super(VagrantBase, self).__init__() self.machine = machine if machine is not None else 'default' self.callback = callback self.vagrant_root = vagrant_root def wait_answer(self, args): """Wait for an answer from the subprocess """ with vagrant_root(self.vagrant_root): proc = create_subprocess( args, stdout=PIPE, stderr=PIPE, cwd=os.getcwd() ) output, error = proc.communicate() self.callback((proc.poll() == 0, output, error)) class VagrantInit(VagrantBase): """Init a new vagrant environment with the given box """ def __init__(self, callback, vagrant_root, box): super(VagrantInit, self).__init__(callback, vagrant_root) self.box = box self.start() def run(self): """Init the vagrant machine """ self.wait_answer(['vagrant', 'init', self.box]) class VagrantUp(VagrantBase): """Start a vagrant box """ def __init__(self, callback, vagrant_root, machine=None): super(VagrantUp, self).__init__(callback, vagrant_root, machine) self.start() def run(self): """Start the vagrant box machine """ self.wait_answer(['vagrant', 'up', self.machine]) class VagrantReload(VagrantBase): """Reload a vagrant box """ def __init__(self, callback, vagrant_root, machine=None): super(VagrantReload, self).__init__(callback, vagrant_root, machine) self.start() def run(self): """Reload the vagrant box machine """ self.wait_answer(['vagrant', 'reload', self.machine]) class VagrantStatus(VagrantBase): """Check vagrant box status """ def __init__(self, callback, vagrant_root, machine=None, full=False): super(VagrantStatus, self).__init__(callback, vagrant_root, machine) self.full = full self.start() def run(self): """Check the vagrant box machine status """ args = ['vagrant', 'status', self.machine] with vagrant_root(self.vagrant_root): proc = create_subprocess( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.getcwd() ) output, error = proc.communicate() if proc.poll() != 0: self.callback((False, error)) else: running = b'running' in output self.callback((True, running if not self.full else output)) class VagrantSSH(VagrantBase): """Execute a remote SSH command in a vagrant box """ def __init__(self, callback, vagrant_root, cmd, machine=None): super(VagrantSSH, self).__init__(callback, vagrant_root, machine) self.cmd = cmd self.start() def run(self): """Execute a command through SSH in a vagrant box machine """ self.wait_answer(['vagrant', 'ssh', self.machine, '-c', self.cmd]) class VagrantIPAddress(object): """Get back the remote guest IP address in synchronous way """ def __init__(self, root, machine=None, iface='eth1'): with vagrant_root(root): cmd = ( 'python -c "import socket, fcntl, struct;' 's = socket.socket(socket.AF_INET, socket.SOCK_DGRAM);' 'print(socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, ' 'struct.pack(b\'256s\', b\'{}\'))[20:24]))"\n' ).format(iface) proc = create_subprocess( ['vagrant', 'ssh', machine, '-c', cmd], stdout=PIPE, stderr=PIPE, cwd=os.getcwd() ) output, error = proc.communicate() if proc.poll() != 0: self.ip_address = None else: self.ip_address = output class VagrantIPAddressGlobal(object): """Get back the remote guest IP address in synchronous way from global """ def __init__(self, machine, iface='eth1'): cmd = ( 'python -c "import socket, fcntl, struct;' 's = socket.socket(socket.AF_INET, socket.SOCK_DGRAM);' 'print(socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, ' 'struct.pack(b\'256s\', b\'{}\'))[20:24]))"\n' ).format(iface) proc = create_subprocess( ['vagrant', 'ssh', machine, '-c', cmd], stdout=PIPE, stderr=PIPE, cwd=os.getcwd() ) output, error = proc.communicate() if proc.poll() != 0: self.ip_address = None else: self.ip_address = output class VagrantMachineGlobalInfo(object): """Get vagrant machine information looking up in global stats """ def __init__(self, machine): self.machine = machine self.status = '' self.machine_id = '' args = ('vagrant', 'global-status') p = create_subprocess(args, stdout=PIPE, stderr=PIPE) if p is None: raise RuntimeError('vagrant command not found') output, err = p.communicate() if err: raise RuntimeError(err) for line in output.splitlines()[2:]: if not line: continue if line.startswith(b'The'): break data = line.split() if not data: continue if data[1].decode('utf8') == machine: self.machine_id = data[0].decode('utf8') self.status = data[3].decode('utf8') self.directory = data[4].decode('utf8') break class VagrantStartMachine(object): """Start a vagrant machine using it's global ID """ def __init__(self, machine, directory): with vagrant_root(directory): args = ('vagrant', 'up', machine) p = create_subprocess( args, stdout=PIPE, stderr=PIPE, cwd=os.getcwd()) if p is None: raise RuntimeError('vagrant command not found') output, err = p.communicate() Log.info(output.decode('utf8')) if err: # check if the machine is running using global stats info = VagrantMachineGlobalInfo(machine) if info.status != 'running': raise RuntimeError(err) Log.error(err.decode('utf8'))
6,824
Python
.py
174
29.689655
76
0.581942
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,203
logger.py
DamnWidget_anaconda/anaconda_lib/logger.py
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details import sys import logging import functools import sublime from .helpers import get_settings, active_view def prevent_spam(func): """Prevent spamming in the logger """ _last_messages = {} def _remove_from_cache(args): m = _last_messages.pop(args) if m == 1: return method = getattr(Log._logger, args[0]) method( '{}\n ...last message repeated {} in the last 10s'.format( args[1], 'one more time' if m == 2 else '{} times'.format(m) ) ) @functools.wraps(func) def wrapper(cls, *args, **kwargs): if args in _last_messages: _last_messages[args] += 1 return _last_messages[args] = 1 sublime.set_timeout_async(lambda: _remove_from_cache(args), 10000) func(cls, args[0], *args[1:], **kwargs) return wrapper class MetaLog(type): def __new__(cls, name, bases, attrs, **kwargs): log_level = get_settings(active_view(), 'log_level', 'info') if log_level not in ['debug', 'info', 'warning', 'error', 'fatal']: log_level = 'warning' cls._logger = logging.getLogger('anacondaST3') cls._logger.setLevel(logging.__getattribute__(log_level.upper())) log_handler = logging.StreamHandler(sys.stdout) log_handler.setFormatter(logging.Formatter( '%(name)s: %(levelname)s - %(message)s' )) cls._logger.addHandler(log_handler) cls._logger.propagate = False obj = super().__new__(cls, name, bases, attrs) for method in ['debug', 'info', 'warning', 'error', 'fatal']: setattr(obj, method, functools.partial(obj.write, method)) return obj class Log(metaclass=MetaLog): """The class is responsible to log errors """ @classmethod @prevent_spam def write(cls, method, *args, **kwargs): """Info wrapper """ f = getattr(cls._logger, method) f(*args, **kwargs)
2,150
Python
.py
57
29.929825
76
0.6
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,204
jsonclient.py
DamnWidget_anaconda/anaconda_lib/jsonclient.py
# -*- coding: utf8 -*- # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details """Minimalist standard library Asynchronous JSON Client """ import sys import uuid import socket import logging import traceback try: import sublime except ImportError: pass try: import ujson as json except ImportError: import json from .callback import Callback from .ioloop import EventHandler from ._typing import Callable, Any logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stdout)) logger.setLevel(logging.DEBUG) class AsynClient(EventHandler): """Asynchronous JSON connection to anaconda server """ def __init__(self, port: int, host: str='localhost') -> None: if port == 0: # use an Unix Socket Domain EventHandler.__init__( self, host, socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)) else: EventHandler.__init__(self, (host, port)) self.callbacks = {} self.rbuffer = [] def ready_to_write(self) -> bool: """I am ready to send some data? """ return True if self.outbuffer else False def handle_read(self, data: bytes) -> None: """Called when data is ready to be read """ self.rbuffer.append(data) def add_callback(self, callback: Callable) -> str: """Add a new callback to the callbacks dictionary The hex representation of the callback's uuid4 is used as index. In case that the callback is a regular callable and not a Callback class instance, a new uuid4 code is created on the fly. """ if not isinstance(callback, Callback): hexid = uuid.uuid4().hex else: hexid = callback.hexid self.callbacks[hexid] = callback return hexid def pop_callback(self, hexid: str) -> Callable: """Remove and return a callback callable from the callback dictionary """ return self.callbacks.pop(hexid) def process_message(self) -> None: """Called when a full line has been read from the socket """ message = b''.join(self.rbuffer) self.rbuffer = [] try: data = sublime.decode_value(message.decode('utf8')) except (NameError, ValueError): data = json.loads(message.replace(b'\t', b' ' * 8).decode('utf8')) callback = self.pop_callback(data.pop('uid')) if callback is None: logger.error( 'Received {} from the JSONServer but there is not callback ' 'to handle it. Aborting....'.format(message) ) try: callback(data) except Exception as error: logging.error(error) for traceback_line in traceback.format_exc().splitlines(): logging.error(traceback_line) def send_command(self, callback: Callable, **data: Any) -> None: """Send the given command that should be handled bu the given callback """ data['uid'] = self.add_callback(callback) try: self.push( bytes('{}\r\n'.format(sublime.encode_value(data)), 'utf8') ) except NameError: self.push(bytes('{}\r\n'.format(json.dumps(data)), 'utf8')) def __repr__(self): """String representation of the client """ return '{}:{} ({})'.format( self.address[0], self.address[1], 'connected' if self.connected else 'disconnected' )
3,640
Python
.py
98
29.010204
78
0.616064
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,205
contexts.py
DamnWidget_anaconda/anaconda_lib/contexts.py
# -*- coding: utf8 -*- # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details """ Anaconda contexts """ import os from contextlib import contextmanager @contextmanager def vagrant_root(directory): current_dir = os.getcwd() os.chdir(os.path.expanduser(directory)) yield os.chdir(current_dir)
384
Python
.py
14
24.928571
65
0.750685
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,206
helpers.py
DamnWidget_anaconda/anaconda_lib/helpers.py
# -*- coding: utf8 -*- # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details """ Anaconda helpers """ import os import json import logging import functools import traceback import subprocess from collections import defaultdict import sublime from .kite import Integration # define if we are in a git installation git_installation = False try: import Anaconda assert Anaconda except ImportError: git_installation = True NONE = 0x00 ONLY_CODE = 0x01 NOT_SCRATCH = 0x02 LINTING_ENABLED = 0x04 ENVIRON_HOOK_INVALID = defaultdict(lambda: False) AUTO_COMPLETION_DOT_VIEWS = [] def dot_completion(view): """Determine if autocompletion on dot is enabled for the view """ if view is None: return False if (view.window().id(), view.id()) in AUTO_COMPLETION_DOT_VIEWS: return True for trigger in view.settings().get('auto_complete_triggers', []): if trigger.get('characters', '') == '.': if 'source.python' in trigger.get('selector'): return True return False def enable_dot_completion(view): """Enable dot completion for the given view """ global AUTO_COMPLETION_DOT_VIEWS if view is None: return triggers = view.settings().get('auto_complete_triggers', []) triggers.append({ 'characters': '.', 'selector': 'source.python - string - comment - constant.numeric, ' 'meta.interpolated.format.fstring' }) view.settings().set('auto_complete_triggers', triggers) AUTO_COMPLETION_DOT_VIEWS.append((view.window().id(), view.id())) def completion_is_disabled(view): """Determine if the anaconda completion is disabled or not """ if view is None: return False # check if Kite integration is enabled if Integration.enabled(): return True return get_settings(view, "disable_anaconda_completion", False) def is_code(view, lang='python', ignore_comments=False, ignore_repl=False): """Determine if the given view location is `lang` code """ if view is None: return False # diable in SublimeREPL if view.settings().get('repl', False): if not ignore_repl: return False try: location = view.sel()[0].begin() except IndexError: return False if ignore_comments is True: matcher = 'source.{}'.format(lang) else: matcher = 'source.{} - string - comment, meta.interpolated.format.fstring'.format(lang) return view.match_selector(location, matcher) def is_python(view, ignore_comments=False, autocomplete_ignore_repl=False): """Determine if the given view location is python code """ if view is None: return False # disable in SublimeREPL if view.settings().get('repl', False): if not autocomplete_ignore_repl: return False try: location = view.sel()[0].begin() except IndexError: return False if ignore_comments is True: matcher = 'source.python' else: matcher = 'source.python - string - comment, meta.interpolated.format.fstring' return view.match_selector(location, matcher) def check_linting(view, mask, code='python'): """Check common linting constraints """ if mask & ONLY_CODE and not is_code(view, lang=code, ignore_comments=True): return False if mask & NOT_SCRATCH and view.is_scratch(): return False if (mask & LINTING_ENABLED and not get_settings(view, 'anaconda_linting', False)): return False return True def check_linting_behaviour(view, behaviours): """Make sure the correct behaviours are applied """ b = get_settings(view, 'anaconda_linting_behaviour', 'always') return b in behaviours def create_subprocess(args, **kwargs): """Create a subprocess and return it back """ if 'cwd' not in kwargs: kwargs['cwd'] = os.path.dirname(os.path.abspath(__file__)) kwargs['bufsize'] = -1 if os.name == 'nt': startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW kwargs['startupinfo'] = startupinfo if sublime.platform() == 'osx': env = kwargs['env'] if 'env' in kwargs else os.environ.copy() if 'env' in kwargs: env = env env['PYTHONIOENCODING'] = 'utf8' kwargs['env'] = env try: return subprocess.Popen(args, **kwargs) except Exception as e: logging.error( 'Your operating system denied the spawn of {}' ' process. Make sure your configured interpreter is a valid python' ' binary executable and is in the PATH\n' 'The OS did return {}'.format(args[0], e) ) def get_settings(view, name, default=None): """Get settings """ global ENVIRON_HOOK_INVALID if view is None: return default plugin_settings = sublime.load_settings('Anaconda.sublime-settings') if (name in ('python_interpreter', 'extra_paths') and not ENVIRON_HOOK_INVALID[view.id()]): if view.window() is not None and view.window().folders(): allow_multiple_env_hooks = get_settings( view, 'anaconda_allow_project_environment_hooks', False ) if allow_multiple_env_hooks: dirname = os.path.dirname(view.file_name()) else: dirname = view.window().folders()[0] while True: environfile = os.path.join(dirname, '.anaconda') if os.path.exists(environfile) and os.path.isfile(environfile): # print("Environ found on %s" % environfile) with open(environfile, 'r') as jsonfile: try: data = json.loads(jsonfile.read()) except Exception as error: sublime.error_message( "Anaconda Message:\n" "I found an .anaconda environment file in {} " "path but it doesn't seems to be a valid JSON " "file.\n\nThat means that your .anaconda " "hook file is being ignored.".format( environfile ) ) logging.error(error) ENVIRON_HOOK_INVALID[view.id()] = True break # stop loop else: r = data.get( name, view.settings().get( name, plugin_settings.get(name, default) ) ) w = view.window() if w is not None: return sublime.expand_variables( r, w.extract_variables() ) return r else: parts = os.path.split(dirname) if len(parts[1]) > 0: dirname = os.path.dirname(dirname) else: break # stop loop r = view.settings().get(name, plugin_settings.get(name, default)) if name == 'python_interpreter': r = expand(view, r) elif name == 'extra_paths': if isinstance(r, (list, tuple)): r = [expand(view, e) for e in r] else: r = expand(view, r) return r def expand(view, path): """Expand the given path """ window = view.window() if window is not None: tmp = os.path.expanduser(os.path.expandvars(path)) tmp = sublime.expand_variables(tmp, window.extract_variables()) else: return path return tmp def active_view(): """Return the active view """ return sublime.active_window().active_view() def is_remote_session(view): """Returns True if we are in a remote session """ if '://' in get_interpreter(view): return True return False def prepare_send_data(location, method, handler): """Prepare dict that has to be sended trough the socket """ view = active_view() return { 'source': view.substr(sublime.Region(0, view.size())), 'line': location[0] + 1, 'offset': location[1], 'filename': view.file_name() or '', 'method': method, 'handler': handler } def project_name(): """ Generates and returns back a valid project name for the window If there is not worker yet for this window, we create it and set a name for it. If we don't have a project file we just use the first folder name in the window's folders as name, if we don't have any folders in the window we just use the window.window_id """ window = sublime.active_window() project_name = window.project_file_name() if project_name is None: folders = window.folders() if len(folders) > 0: try: project_name = window.folders()[0].rsplit(os.sep, 1)[1] except IndexError: # ST3 on Windows behave weird and sometimes doesn't # return back a valid folders path for the active # window, this is a workaround to fix #253 v = active_view() if v is not None and v.file_name() is not None: project_name = v.file_name().rsplit(os.sep, 1)[1] else: project_name = 'anaconda-{id}'.format(id=window.window_id) else: project_name = 'anaconda-{id}'.format(id=window.window_id) else: project_name = project_name.rsplit(os.sep, 1)[1].split('.')[0] return project_name def get_traceback(): """Get traceback log """ traceback_log = [] for traceback_line in traceback.format_exc().splitlines(): traceback_log.append(traceback_line) return '\n'.join(traceback_log) def get_view(window, vid): """ Look for the given view id in the window opened views and return it back """ for view in window.views(): if view.id() == vid: return view def get_window_view(vid): """Look for the given vid in all the opened windows """ for window in sublime.windows(): view = get_view(window, vid) if view is not None: return view def get_socket_timeout(default): """ Some systems with weird enterprise security stuff can take a while to return a socket - permit overrides to default timeouts. Same thing occurs with certain other sublime plugins added. This lets the user set a timeout appropriate to their system without swallowing all startup errors """ return get_settings(active_view(), 'socket_timeout', default) def cache(func): """ Stupid and simplistic cache system that caches results from functions decorated with it unless the invalidate flag is passed in its args. note:: this is not intend to be used as a general cache solution """ cache = {} @functools.wraps(func) def wrapper(*args, **kwargs): if 'invalidate' in kwargs: cache.pop(func.__name__) result = cache.get( func.__name__, cache.setdefault(func.__name__, func(*args, **kwargs)) ) return result return wrapper @cache def valid_languages(**kwargs): """Return back valid languages for anaconda plugins """ path = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir) languages = [ f.rsplit('_', 1)[1].lower() for f in os.listdir(path) if f.startswith('anaconda_') and 'vagrant' not in f ] return ['python'] + languages def get_interpreter(view): """Return back the python interpreter configured for the given view """ return get_settings(view, 'python_interpreter', 'python') def debug_enabled(view): """Returns True if the debug is enable """ return get_settings(active_view(), 'jsonserver_debug', False) is True
12,480
Python
.py
333
28.165165
95
0.59302
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,207
progress_bar.py
DamnWidget_anaconda/anaconda_lib/progress_bar.py
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details """Package Control progress bar like """ import threading import sublime class ProgressBar(threading.Thread): """A progress bar animation that runs in other thread """ class Status(object): NONE = None SUCCESS = 'end' FAILURE = 'fail' TIMEOUT = 'timeout' def __init__(self, messages): threading.Thread.__init__(self) self.messages = messages self.addition = 1 self.die = False def run(self): """Just run the thread """ sublime.set_timeout(lambda: self.update(0), 100) def update(self, i): """Update the progress bar """ if self.die: return size = 8 pos = i % size status = '{}={}'.format(' ' * pos, ' ' * ((size - 1) - pos)) sublime.status_message('{} [{}]'.format( self.messages['start'], status) ) if not (size - 1) - pos: self.addition = -1 if not pos: self.addition = 1 i += self.addition sublime.set_timeout_async(lambda: self.update(i), 100) def terminate(self, status=None): """Terminate this thread """ status = status or self.Status.SUCCESS message = self.messages.get(status) or self.messages[self.Status.SUCCESS] # noqa sublime.status_message(message) self.die = True
1,534
Python
.py
47
24.680851
89
0.579019
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,208
unix_socket.py
DamnWidget_anaconda/anaconda_lib/unix_socket.py
# Copyright (C) 2013 ~ 2016 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software se LICENSE file for details import os import platform import tempfile class UnixSocketPath(object): """Encapsulate logic to handle with paths to UNIX domain sockets """ socketpath = { 'linux': os.path.join('~', '.local', 'share', 'anaconda', 'run'), 'darwin': os.path.join( '~', 'Library', 'Application Support', 'Anaconda') } def __init__(self, project): self.__project = project self.__socket_file = os.path.join( os.path.expanduser( UnixSocketPath.socketpath.get(platform.system().lower()) ), project or 'anaconda', 'anaconda.sock' ) @property def socket(self): """Return back a valid socket path always """ if len(self.__socket_file) < 103: return self.__socket_file socket_path = os.path.join( tempfile.gettempdir(), self.__project or 'anaconda', 'anaconda.sock' ) if len(socket_path) > 103: # probably the project name is crazy long socket_path = os.path.join( tempfile.gettempdir(), self.__project[:10], 'anaconda.sock' ) return socket_path def get_current_umask(): 'Return the current umask without changing it.' current_umask = os.umask(0) os.umask(current_umask) return current_umask
1,527
Python
.py
44
26.25
75
0.590075
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,209
decorators.py
DamnWidget_anaconda/anaconda_lib/decorators.py
# -*- coding: utf8 -*- # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details """ Anaconda decorators """ import os import sys import time import pstats import logging import functools try: import cProfile CPROFILE_AVAILABLE = True except ImportError: CPROFILE_AVAILABLE = False try: import sublime from .helpers import get_settings, project_name, is_remote_session except ImportError: # we just imported the file from jsonserver so we don't need get_settings pass from .constants import WorkerStatus def auto_project_switch(func): """Auto kill and start a new jsonserver on project switching """ @functools.wraps(func) def wrapper(self, *args, **kwargs): if self.status != WorkerStatus.healthy: return def reconnect(proc): proc.kill() self.reconnecting = True self.start() view = sublime.active_window().active_view() auto_project_switch = get_settings(view, 'auto_project_switch', False) # expand user and shell vars python_interpreter = os.path.expandvars( os.path.expanduser(get_settings(view, 'python_interpreter'))) process = self.processer._process if auto_project_switch and not is_remote_session(view) and \ hasattr(self, 'project_name') and \ (project_name() != self.project_name or process.args[0] != python_interpreter): print('Project or iterpreter switch detected...') reconnect(process) else: func(self, *args, **kwargs) return wrapper def auto_project_switch_ng(func): """If auto_project_switch is set tries to switch/reconnects workers """ @functools.wraps(func) def wrapper(self, *args, **kwargs): if self.status != WorkerStatus.healthy: return view = sublime.active_window().active_view() project_switch = get_settings(view, 'auto_project_switch', False) if project_switch: python_interpreter = get_settings(view, 'python_interpreter') if python_interpreter != self.interpreter.raw_interpreter: print('anacondaST3: Project or interpreter switch detected...') self.on_python_interpreter_switch(python_interpreter) return func(self, *args, **kwargs) return wrapper def timeit(logger): """Decorator for timeit timeit timeit """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): starttime = time.time() result = func(*args, **kwargs) endtime = time.time() total = endtime - starttime logger.debug( 'Func {} took {} secs'.format(func.__name__, total) ) return result return wrapper return decorator def profile(func): """Run the profiler in the given function """ @functools.wraps(func) def wrapper(*args, **kwargs): view = sublime.active_window().active_view() if get_settings(view, 'anaconda_debug', False) == 'profiler': if CPROFILE_AVAILABLE: pr = cProfile.Profile() pr.enable() result = func(*args, **kwargs) pr.disable() ps = pstats.Stats(pr, stream=sys.stdout) ps.sort_stats('time') ps.print_stats(15) else: logging.error( 'cProfile doesn\'t seems to can be imported on ST3 + {}, ' 'sorry. You may want to use @timeit instead, so sorry ' 'really'.format(sys.platform) ) result = func(*args, **kwargs) else: result = func(*args, **kwargs) return result return wrapper
3,981
Python
.py
109
27.293578
79
0.601825
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,210
six.py
DamnWidget_anaconda/anaconda_lib/six.py
# Copyright (c) 2010-2017 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """Utilities for writing code that runs on Python 2 and 3""" from __future__ import absolute_import import functools import itertools import operator import sys import types __author__ = "Benjamin Peterson <benjamin@python.org>" __version__ = "1.10.0" # Useful for very coarse version differentiation. PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 PY34 = sys.version_info[0:2] >= (3, 4) if PY3: string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str if sys.platform.startswith("java"): # Jython always uses 32 bits. MAXSIZE = int((1 << 31) - 1) else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): def __len__(self): return 1 << 31 try: len(X()) except OverflowError: # 32-bit MAXSIZE = int((1 << 31) - 1) else: # 64-bit MAXSIZE = int((1 << 63) - 1) del X def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name] class _LazyDescr(object): def __init__(self, name): self.name = name def __get__(self, obj, tp): result = self._resolve() setattr(obj, self.name, result) # Invokes __set__. try: # This is a bit ugly, but it avoids running this again by # removing this descriptor. delattr(obj.__class__, self.name) except AttributeError: pass return result class MovedModule(_LazyDescr): def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: if new is None: new = name self.mod = new else: self.mod = old def _resolve(self): return _import_module(self.mod) def __getattr__(self, attr): _module = self._resolve() value = getattr(_module, attr) setattr(self, attr, value) return value class _LazyModule(types.ModuleType): def __init__(self, name): super(_LazyModule, self).__init__(name) self.__doc__ = self.__class__.__doc__ def __dir__(self): attrs = ["__doc__", "__name__"] attrs += [attr.name for attr in self._moved_attributes] return attrs # Subclasses should override this _moved_attributes = [] class MovedAttribute(_LazyDescr): def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: if new_mod is None: new_mod = name self.mod = new_mod if new_attr is None: if old_attr is None: new_attr = name else: new_attr = old_attr self.attr = new_attr else: self.mod = old_mod if old_attr is None: old_attr = name self.attr = old_attr def _resolve(self): module = _import_module(self.mod) return getattr(module, self.attr) class _SixMetaPathImporter(object): """ A meta path importer to import six.moves and its submodules. This class implements a PEP302 finder and loader. It should be compatible with Python 2.5 and all existing versions of Python3 """ def __init__(self, six_module_name): self.name = six_module_name self.known_modules = {} def _add_module(self, mod, *fullnames): for fullname in fullnames: self.known_modules[self.name + "." + fullname] = mod def _get_module(self, fullname): return self.known_modules[self.name + "." + fullname] def find_module(self, fullname, path=None): if fullname in self.known_modules: return self return None def __get_module(self, fullname): try: return self.known_modules[fullname] except KeyError: raise ImportError("This loader does not know module " + fullname) def load_module(self, fullname): try: # in case of a reload return sys.modules[fullname] except KeyError: pass mod = self.__get_module(fullname) if isinstance(mod, MovedModule): mod = mod._resolve() else: mod.__loader__ = self sys.modules[fullname] = mod return mod def is_package(self, fullname): """ Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451) """ return hasattr(self.__get_module(fullname), "__path__") def get_code(self, fullname): """Return None Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None get_source = get_code # same as get_code _importer = _SixMetaPathImporter(__name__) class _MovedItems(_LazyModule): """Lazy loading of moved objects""" __path__ = [] # mark as package _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), MovedAttribute("getstatusoutput", "commands", "subprocess"), MovedAttribute("getoutput", "commands", "subprocess"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), MovedAttribute("UserDict", "UserDict", "collections"), MovedAttribute("UserList", "UserList", "collections"), MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), MovedModule("cPickle", "cPickle", "pickle"), MovedModule("queue", "Queue"), MovedModule("reprlib", "repr"), MovedModule("socketserver", "SocketServer"), MovedModule("_thread", "thread", "_thread"), MovedModule("tkinter", "Tkinter"), MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), MovedModule("tkinter_tix", "Tix", "tkinter.tix"), MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), ] # Add windows specific modules. if sys.platform == "win32": _moved_attributes += [ MovedModule("winreg", "_winreg"), ] for attr in _moved_attributes: setattr(_MovedItems, attr.name, attr) if isinstance(attr, MovedModule): _importer._add_module(attr, "moves." + attr.name) del attr _MovedItems._moved_attributes = _moved_attributes moves = _MovedItems(__name__ + ".moves") _importer._add_module(moves, "moves") class Module_six_moves_urllib_parse(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_parse""" _urllib_parse_moved_attributes = [ MovedAttribute("ParseResult", "urlparse", "urllib.parse"), MovedAttribute("SplitResult", "urlparse", "urllib.parse"), MovedAttribute("parse_qs", "urlparse", "urllib.parse"), MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), MovedAttribute("urldefrag", "urlparse", "urllib.parse"), MovedAttribute("urljoin", "urlparse", "urllib.parse"), MovedAttribute("urlparse", "urlparse", "urllib.parse"), MovedAttribute("urlsplit", "urlparse", "urllib.parse"), MovedAttribute("urlunparse", "urlparse", "urllib.parse"), MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), MovedAttribute("quote", "urllib", "urllib.parse"), MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), MovedAttribute("splituser", "urllib", "urllib.parse"), MovedAttribute("splitvalue", "urllib", "urllib.parse"), MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), MovedAttribute("uses_params", "urlparse", "urllib.parse"), MovedAttribute("uses_query", "urlparse", "urllib.parse"), MovedAttribute("uses_relative", "urlparse", "urllib.parse"), ] for attr in _urllib_parse_moved_attributes: setattr(Module_six_moves_urllib_parse, attr.name, attr) del attr Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes _importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), "moves.urllib_parse", "moves.urllib.parse") class Module_six_moves_urllib_error(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_error""" _urllib_error_moved_attributes = [ MovedAttribute("URLError", "urllib2", "urllib.error"), MovedAttribute("HTTPError", "urllib2", "urllib.error"), MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), ] for attr in _urllib_error_moved_attributes: setattr(Module_six_moves_urllib_error, attr.name, attr) del attr Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes _importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), "moves.urllib_error", "moves.urllib.error") class Module_six_moves_urllib_request(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_request""" _urllib_request_moved_attributes = [ MovedAttribute("urlopen", "urllib2", "urllib.request"), MovedAttribute("install_opener", "urllib2", "urllib.request"), MovedAttribute("build_opener", "urllib2", "urllib.request"), MovedAttribute("pathname2url", "urllib", "urllib.request"), MovedAttribute("url2pathname", "urllib", "urllib.request"), MovedAttribute("getproxies", "urllib", "urllib.request"), MovedAttribute("Request", "urllib2", "urllib.request"), MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), MovedAttribute("BaseHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), MovedAttribute("FileHandler", "urllib2", "urllib.request"), MovedAttribute("FTPHandler", "urllib2", "urllib.request"), MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), MovedAttribute("urlretrieve", "urllib", "urllib.request"), MovedAttribute("urlcleanup", "urllib", "urllib.request"), MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) del attr Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes _importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), "moves.urllib_request", "moves.urllib.request") class Module_six_moves_urllib_response(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_response""" _urllib_response_moved_attributes = [ MovedAttribute("addbase", "urllib", "urllib.response"), MovedAttribute("addclosehook", "urllib", "urllib.response"), MovedAttribute("addinfo", "urllib", "urllib.response"), MovedAttribute("addinfourl", "urllib", "urllib.response"), ] for attr in _urllib_response_moved_attributes: setattr(Module_six_moves_urllib_response, attr.name, attr) del attr Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes _importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), "moves.urllib_response", "moves.urllib.response") class Module_six_moves_urllib_robotparser(_LazyModule): """Lazy loading of moved objects in six.moves.urllib_robotparser""" _urllib_robotparser_moved_attributes = [ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), ] for attr in _urllib_robotparser_moved_attributes: setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes _importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), "moves.urllib_robotparser", "moves.urllib.robotparser") class Module_six_moves_urllib(types.ModuleType): """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" __path__ = [] # mark as package parse = _importer._get_module("moves.urllib_parse") error = _importer._get_module("moves.urllib_error") request = _importer._get_module("moves.urllib_request") response = _importer._get_module("moves.urllib_response") robotparser = _importer._get_module("moves.urllib_robotparser") def __dir__(self): return ['parse', 'error', 'request', 'response', 'robotparser'] _importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib") def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move) def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,)) if PY3: _meth_func = "__func__" _meth_self = "__self__" _func_closure = "__closure__" _func_code = "__code__" _func_defaults = "__defaults__" _func_globals = "__globals__" else: _meth_func = "im_func" _meth_self = "im_self" _func_closure = "func_closure" _func_code = "func_code" _func_defaults = "func_defaults" _func_globals = "func_globals" try: advance_iterator = next except NameError: def advance_iterator(it): return it.next() next = advance_iterator try: callable = callable except NameError: def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) if PY3: def get_unbound_function(unbound): return unbound create_bound_method = types.MethodType def create_unbound_method(func, cls): return func Iterator = object else: def get_unbound_function(unbound): return unbound.im_func def create_bound_method(func, obj): return types.MethodType(func, obj, obj.__class__) def create_unbound_method(func, cls): return types.MethodType(func, None, cls) class Iterator(object): def next(self): return type(self).__next__(self) callable = callable _add_doc(get_unbound_function, """Get the function out of a possibly unbound function""") get_method_function = operator.attrgetter(_meth_func) get_method_self = operator.attrgetter(_meth_self) get_function_closure = operator.attrgetter(_func_closure) get_function_code = operator.attrgetter(_func_code) get_function_defaults = operator.attrgetter(_func_defaults) get_function_globals = operator.attrgetter(_func_globals) if PY3: def iterkeys(d, **kw): return iter(d.keys(**kw)) def itervalues(d, **kw): return iter(d.values(**kw)) def iteritems(d, **kw): return iter(d.items(**kw)) def iterlists(d, **kw): return iter(d.lists(**kw)) viewkeys = operator.methodcaller("keys") viewvalues = operator.methodcaller("values") viewitems = operator.methodcaller("items") else: def iterkeys(d, **kw): return d.iterkeys(**kw) def itervalues(d, **kw): return d.itervalues(**kw) def iteritems(d, **kw): return d.iteritems(**kw) def iterlists(d, **kw): return d.iterlists(**kw) viewkeys = operator.methodcaller("viewkeys") viewvalues = operator.methodcaller("viewvalues") viewitems = operator.methodcaller("viewitems") _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") _add_doc(itervalues, "Return an iterator over the values of a dictionary.") _add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.") _add_doc(iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary.") if PY3: def b(s): return s.encode("latin-1") def u(s): return s unichr = chr import struct int2byte = struct.Struct(">B").pack del struct byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io StringIO = io.StringIO BytesIO = io.BytesIO _assertCountEqual = "assertCountEqual" if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" else: _assertRaisesRegex = "assertRaisesRegex" _assertRegex = "assertRegex" else: def b(s): return s # Workaround for standalone backslash def u(s): return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") unichr = unichr int2byte = chr def byte2int(bs): return ord(bs[0]) def indexbytes(buf, i): return ord(buf[i]) iterbytes = functools.partial(itertools.imap, ord) import StringIO StringIO = BytesIO = StringIO.StringIO _assertCountEqual = "assertItemsEqual" _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") def assertCountEqual(self, *args, **kwargs): return getattr(self, _assertCountEqual)(*args, **kwargs) def assertRaisesRegex(self, *args, **kwargs): return getattr(self, _assertRaisesRegex)(*args, **kwargs) def assertRegex(self, *args, **kwargs): return getattr(self, _assertRegex)(*args, **kwargs) if PY3: exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): try: if value is None: value = tp() if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value finally: value = None tb = None else: def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): try: raise tp, value, tb finally: tb = None """) if sys.version_info[:2] == (3, 2): exec_("""def raise_from(value, from_value): try: if from_value is None: raise value raise value from from_value finally: value = None """) elif sys.version_info[:2] > (3, 2): exec_("""def raise_from(value, from_value): try: raise value from from_value finally: value = None """) else: def raise_from(value, from_value): raise value print_ = getattr(moves.builtins, "print", None) if print_ is None: def print_(*args, **kwargs): """The new-style print function for Python 2.4 and 2.5.""" fp = kwargs.pop("file", sys.stdout) if fp is None: return def write(data): if not isinstance(data, basestring): data = str(data) # If the file has an encoding, encode unicode with it. if (isinstance(fp, file) and isinstance(data, unicode) and fp.encoding is not None): errors = getattr(fp, "errors", None) if errors is None: errors = "strict" data = data.encode(fp.encoding, errors) fp.write(data) want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: if isinstance(sep, unicode): want_unicode = True elif not isinstance(sep, str): raise TypeError("sep must be None or a string") end = kwargs.pop("end", None) if end is not None: if isinstance(end, unicode): want_unicode = True elif not isinstance(end, str): raise TypeError("end must be None or a string") if kwargs: raise TypeError("invalid keyword arguments to print()") if not want_unicode: for arg in args: if isinstance(arg, unicode): want_unicode = True break if want_unicode: newline = unicode("\n") space = unicode(" ") else: newline = "\n" space = " " if sep is None: sep = space if end is None: end = newline for i, arg in enumerate(args): if i: write(sep) write(arg) write(end) if sys.version_info[:2] < (3, 3): _print = print_ def print_(*args, **kwargs): fp = kwargs.get("file", sys.stdout) flush = kwargs.pop("flush", False) _print(*args, **kwargs) if flush and fp is not None: fp.flush() _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): def wrapper(f): f = functools.wraps(wrapped, assigned, updated)(f) f.__wrapped__ = wrapped return f return wrapper else: wraps = functools.wraps def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, 'temporary_class', (), {}) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. """ if PY2: if '__str__' not in klass.__dict__: raise ValueError("@python_2_unicode_compatible cannot be applied " "to %s because it doesn't define __str__()." % klass.__name__) klass.__unicode__ = klass.__str__ klass.__str__ = lambda self: self.__unicode__().encode('utf-8') return klass # Complete the moves implementation. # This code is at the end of this module to speed up module loading. # Turn this module into a package. __path__ = [] # required for PEP 302 and PEP 451 __package__ = __name__ # see PEP 366 @ReservedAssignment if globals().get("__spec__") is not None: __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable # Remove other six meta path importers, since they cause problems. This can # happen if six is removed from sys.modules and then reloaded. (Setuptools does # this for some reason.) if sys.meta_path: for i, importer in enumerate(sys.meta_path): # Here's some real nastiness: Another "instance" of the six module might # be floating around. Therefore, we can't use isinstance() to check for # the six meta path importer, since the other six instance will have # inserted an importer with different class. if (type(importer).__name__ == "_SixMetaPathImporter" and importer.name == __name__): del sys.meta_path[i] break del i, importer # Finally, add the importer to the meta path import hook. sys.meta_path.append(_importer)
30,698
Python
.py
717
35.990237
98
0.648497
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,211
info.py
DamnWidget_anaconda/anaconda_lib/info.py
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details class Repr(type): """Metaclass to define __repr__ for class instead of instance """ def __repr__(cls): if hasattr(cls, '_repr'): return getattr(cls, '_repr')() return super(Repr, cls).__repr__()
374
Python
.py
9
35.555556
72
0.633333
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,212
import_validator.py
DamnWidget_anaconda/anaconda_lib/import_validator.py
# -*- coding: utf8 -*- # Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details """ Anaconda imports validator """ from jedi import Script, get_default_project class Validator: """Try to import whatever import that is in the given source """ def __init__(self, source, filename, settings): self.source = source self.errors = [] # type: List self.filename = filename self.settings = settings def is_valid(self): """Determine if the source imports are valid or not """ for line, lineno in self._extract_imports(): error, valid = self._validate_import(line, lineno) if not valid: self.errors.append((error, lineno)) return not self.errors def _validate_import(self, module_line, lineno): """Try to validate the given iport line """ if 'noqa' in module_line: return True jedi_project = get_default_project(self.filename) error = [] error_string = 'can\'t import {0}' valid = True for word in module_line.split(): if word in ('from', 'import', 'as'): continue offset = int(module_line.find(word) + len(word) / 2) s = Script(self.source, lineno, offset, self.filename, project=jedi_project) if not self.filename: s = Script(module_line, 1, offset, project=jedi_project) if not s.goto_assignments(): if valid is True: valid = False error.append(word) err = '' if valid else error_string.format(' '.join(error)) return err, valid def _extract_imports(self): """Extract imports from the source """ found = [] lineno = 1 buffer_found = [] # type: List in_docstring = False for line in self.source.splitlines(): if self.__detect_docstring(line): if in_docstring: in_docstring = False else: in_docstring = True lineno += 1 continue else: line = line.strip() if len(buffer_found) > 0: if ')' in line: buffer_found.append(line.replace(')', '').strip()) found.append((' '.join(buffer_found), lineno)) buffer_found = [] else: buffer_found.append(line) else: if self.__detect_docstring(line): continue if line.startswith('import ') or line.startswith('from '): if '(' in line: buffer_found.append(line.replace('(', '').strip()) else: found.append((line, lineno)) lineno += 1 return found def __detect_docstring(self, line): """Detects if there is a docstring """ if '"""' in line or "'''" in line: return True return False
3,247
Python
.py
85
25.8
88
0.513531
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,213
autopep_wrapper.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep_wrapper.py
# -*- coding: utf8 -*- # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details """ This file is a wrapper for autopep8 library. """ import os import sys sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../linting')) import threading from .autopep8_lib import autopep8 class AnacondaAutopep8(threading.Thread): """Wrapper class around native autopep8 implementation """ def __init__(self, settings, code, callback): threading.Thread.__init__(self) self.code = code self.callback = callback self.options, _ = autopep8.parse_args(self.parse_settings(settings)) def run(self): self.callback(autopep8.fix_string(self.code, options=self.options)) def parse_settings(self, settings): """Map anaconda settings to autopep8 settings """ args = [] args += ['-a'] * settings.get('aggressive', 0) if len(settings.get('autoformat_ignore', [])) > 0: args += ['--ignore={}'.format( ','.join(settings.get('autoformat_ignore')))] if len(settings.get('autoformat_select', [])) > 0: args += ['--select={}'.format( ','.join(settings.get('autoformat_select')))] args += ['--max-line-length={}'.format( settings.get('pep8_max_line_length', 79))] args += ['anaconda_rocks'] return args
1,459
Python
.py
36
33.555556
76
0.620739
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,214
__init__.py
DamnWidget_anaconda/anaconda_lib/autopep/__init__.py
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details from .autopep_wrapper import AnacondaAutopep8 __all__ = ['AnacondaAutopep8']
208
Python
.py
4
50
65
0.78
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,215
autopep8.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/autopep8.py
#!/usr/bin/env python # Copyright (C) 2010-2011 Hideo Hattori # Copyright (C) 2011-2013 Hideo Hattori, Steven Myint # Copyright (C) 2013-2016 Hideo Hattori, Steven Myint, Bill Wendling # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """Automatically formats Python code to conform to the PEP 8 style guide. Fixes that only need be done once can be added by adding a function of the form "fix_<code>(source)" to this module. They should return the fixed source code. These fixes are picked up by apply_global_fixes(). Fixes that depend on pycodestyle should be added as methods to FixPEP8. See the class documentation for more information. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import codecs import collections import copy import difflib import fnmatch import inspect import io import keyword import locale import os import re import signal import sys import textwrap import token import tokenize import pycodestyle try: unicode except NameError: unicode = str __version__ = '1.3.2' CR = '\r' LF = '\n' CRLF = '\r\n' PYTHON_SHEBANG_REGEX = re.compile(r'^#!.*\bpython[23]?\b\s*$') LAMBDA_REGEX = re.compile(r'([\w.]+)\s=\slambda\s*([\(\)\w,\s.]*):') COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+([^][)(}{]+)\s+(in|is)\s') COMPARE_NEGATIVE_REGEX_THROUGH = re.compile(r'\b(not\s+in)\s') BARE_EXCEPT_REGEX = re.compile(r'except\s*:') STARTSWITH_DEF_REGEX = re.compile(r'^(async\s+def|def)\s.*\):') # For generating line shortening candidates. SHORTEN_OPERATOR_GROUPS = frozenset([ frozenset([',']), frozenset(['%']), frozenset([',', '(', '[', '{']), frozenset(['%', '(', '[', '{']), frozenset([',', '(', '[', '{', '%', '+', '-', '*', '/', '//']), frozenset(['%', '+', '-', '*', '/', '//']), ]) DEFAULT_IGNORE = 'E24,W503' DEFAULT_INDENT_SIZE = 4 # W602 is handled separately due to the need to avoid "with_traceback". CODE_TO_2TO3 = { 'E231': ['ws_comma'], 'E721': ['idioms'], 'W601': ['has_key'], 'W603': ['ne'], 'W604': ['repr'], 'W690': ['apply', 'except', 'exitfunc', 'numliterals', 'operator', 'paren', 'reduce', 'renames', 'standarderror', 'sys_exc', 'throw', 'tuple_params', 'xreadlines']} if sys.platform == 'win32': # pragma: no cover DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8') else: DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'), 'pep8') PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8') MAX_PYTHON_FILE_DETECTION_BYTES = 1024 def open_with_encoding(filename, encoding=None, mode='r', limit_byte_check=-1): """Return opened file with a specific encoding.""" if not encoding: encoding = detect_encoding(filename, limit_byte_check=limit_byte_check) return io.open(filename, mode=mode, encoding=encoding, newline='') # Preserve line endings def detect_encoding(filename, limit_byte_check=-1): """Return file encoding.""" try: with open(filename, 'rb') as input_file: from lib2to3.pgen2 import tokenize as lib2to3_tokenize encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0] with open_with_encoding(filename, encoding) as test_file: test_file.read(limit_byte_check) return encoding except (LookupError, SyntaxError, UnicodeDecodeError): return 'latin-1' def readlines_from_file(filename): """Return contents of file.""" with open_with_encoding(filename) as input_file: return input_file.readlines() def extended_blank_lines(logical_line, blank_lines, blank_before, indent_level, previous_logical): """Check for missing blank lines after class declaration.""" if previous_logical.startswith('def '): if blank_lines and pycodestyle.DOCSTRING_REGEX.match(logical_line): yield (0, 'E303 too many blank lines ({0})'.format(blank_lines)) elif pycodestyle.DOCSTRING_REGEX.match(previous_logical): # Missing blank line between class docstring and method declaration. if ( indent_level and not blank_lines and not blank_before and logical_line.startswith(('def ')) and '(self' in logical_line ): yield (0, 'E301 expected 1 blank line, found 0') pycodestyle.register_check(extended_blank_lines) def continued_indentation(logical_line, tokens, indent_level, indent_char, noqa): """Override pycodestyle's function to provide indentation information.""" first_row = tokens[0][2][0] nrows = 1 + tokens[-1][2][0] - first_row if noqa or nrows == 1: return # indent_next tells us whether the next block is indented. Assuming # that it is indented by 4 spaces, then we should not allow 4-space # indents on the final continuation line. In turn, some other # indents are allowed to have an extra 4 spaces. indent_next = logical_line.endswith(':') row = depth = 0 valid_hangs = ( (DEFAULT_INDENT_SIZE,) if indent_char != '\t' else (DEFAULT_INDENT_SIZE, 2 * DEFAULT_INDENT_SIZE) ) # Remember how many brackets were opened on each line. parens = [0] * nrows # Relative indents of physical lines. rel_indent = [0] * nrows # For each depth, collect a list of opening rows. open_rows = [[0]] # For each depth, memorize the hanging indentation. hangs = [None] # Visual indents. indent_chances = {} last_indent = tokens[0][2] indent = [last_indent[1]] last_token_multiline = None line = None last_line = '' last_line_begins_with_multiline = False for token_type, text, start, end, line in tokens: newline = row < start[0] - first_row if newline: row = start[0] - first_row newline = (not last_token_multiline and token_type not in (tokenize.NL, tokenize.NEWLINE)) last_line_begins_with_multiline = last_token_multiline if newline: # This is the beginning of a continuation line. last_indent = start # Record the initial indent. rel_indent[row] = pycodestyle.expand_indent(line) - indent_level # Identify closing bracket. close_bracket = (token_type == tokenize.OP and text in ']})') # Is the indent relative to an opening bracket line? for open_row in reversed(open_rows[depth]): hang = rel_indent[row] - rel_indent[open_row] hanging_indent = hang in valid_hangs if hanging_indent: break if hangs[depth]: hanging_indent = (hang == hangs[depth]) visual_indent = (not close_bracket and hang > 0 and indent_chances.get(start[1])) if close_bracket and indent[depth]: # Closing bracket for visual indent. if start[1] != indent[depth]: yield (start, 'E124 {0}'.format(indent[depth])) elif close_bracket and not hang: pass elif indent[depth] and start[1] < indent[depth]: # Visual indent is broken. yield (start, 'E128 {0}'.format(indent[depth])) elif (hanging_indent or (indent_next and rel_indent[row] == 2 * DEFAULT_INDENT_SIZE)): # Hanging indent is verified. if close_bracket: yield (start, 'E123 {0}'.format(indent_level + rel_indent[open_row])) hangs[depth] = hang elif visual_indent is True: # Visual indent is verified. indent[depth] = start[1] elif visual_indent in (text, unicode): # Ignore token lined up with matching one from a previous line. pass else: one_indented = (indent_level + rel_indent[open_row] + DEFAULT_INDENT_SIZE) # Indent is broken. if hang <= 0: error = ('E122', one_indented) elif indent[depth]: error = ('E127', indent[depth]) elif not close_bracket and hangs[depth]: error = ('E131', one_indented) elif hang > DEFAULT_INDENT_SIZE: error = ('E126', one_indented) else: hangs[depth] = hang error = ('E121', one_indented) yield (start, '{0} {1}'.format(*error)) # Look for visual indenting. if ( parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT) and not indent[depth] ): indent[depth] = start[1] indent_chances[start[1]] = True # Deal with implicit string concatenation. elif (token_type in (tokenize.STRING, tokenize.COMMENT) or text in ('u', 'ur', 'b', 'br')): indent_chances[start[1]] = unicode # Special case for the "if" statement because len("if (") is equal to # 4. elif not indent_chances and not row and not depth and text == 'if': indent_chances[end[1] + 1] = True elif text == ':' and line[end[1]:].isspace(): open_rows[depth].append(row) # Keep track of bracket depth. if token_type == tokenize.OP: if text in '([{': depth += 1 indent.append(0) hangs.append(None) if len(open_rows) == depth: open_rows.append([]) open_rows[depth].append(row) parens[row] += 1 elif text in ')]}' and depth > 0: # Parent indents should not be more than this one. prev_indent = indent.pop() or last_indent[1] hangs.pop() for d in range(depth): if indent[d] > prev_indent: indent[d] = 0 for ind in list(indent_chances): if ind >= prev_indent: del indent_chances[ind] del open_rows[depth + 1:] depth -= 1 if depth: indent_chances[indent[depth]] = True for idx in range(row, -1, -1): if parens[idx]: parens[idx] -= 1 break assert len(indent) == depth + 1 if ( start[1] not in indent_chances and # This is for purposes of speeding up E121 (GitHub #90). not last_line.rstrip().endswith(',') ): # Allow to line up tokens. indent_chances[start[1]] = text last_token_multiline = (start[0] != end[0]) if last_token_multiline: rel_indent[end[0] - first_row] = rel_indent[row] last_line = line if ( indent_next and not last_line_begins_with_multiline and pycodestyle.expand_indent(line) == indent_level + DEFAULT_INDENT_SIZE ): pos = (start[0], indent[0] + 4) desired_indent = indent_level + 2 * DEFAULT_INDENT_SIZE if visual_indent: yield (pos, 'E129 {0}'.format(desired_indent)) else: yield (pos, 'E125 {0}'.format(desired_indent)) del pycodestyle._checks['logical_line'][pycodestyle.continued_indentation] pycodestyle.register_check(continued_indentation) class FixPEP8(object): """Fix invalid code. Fixer methods are prefixed "fix_". The _fix_source() method looks for these automatically. The fixer method can take either one or two arguments (in addition to self). The first argument is "result", which is the error information from pycodestyle. The second argument, "logical", is required only for logical-line fixes. The fixer method can return the list of modified lines or None. An empty list would mean that no changes were made. None would mean that only the line reported in the pycodestyle error was modified. Note that the modified line numbers that are returned are indexed at 1. This typically would correspond with the line number reported in the pycodestyle error information. [fixed method list] - e111,e114,e115,e116 - e121,e122,e123,e124,e125,e126,e127,e128,e129 - e201,e202,e203 - e211 - e221,e222,e223,e224,e225 - e231 - e251 - e261,e262 - e271,e272,e273,e274 - e301,e302,e303,e304,e306 - e401 - e502 - e701,e702,e703,e704 - e711,e712,e713,e714 - e722 - e731 - w291 - w503 """ def __init__(self, filename, options, contents=None, long_line_ignore_cache=None): self.filename = filename if contents is None: self.source = readlines_from_file(filename) else: sio = io.StringIO(contents) self.source = sio.readlines() self.options = options self.indent_word = _get_indentword(''.join(self.source)) self.long_line_ignore_cache = ( set() if long_line_ignore_cache is None else long_line_ignore_cache) # Many fixers are the same even though pycodestyle categorizes them # differently. self.fix_e115 = self.fix_e112 self.fix_e116 = self.fix_e113 self.fix_e121 = self._fix_reindent self.fix_e122 = self._fix_reindent self.fix_e123 = self._fix_reindent self.fix_e124 = self._fix_reindent self.fix_e126 = self._fix_reindent self.fix_e127 = self._fix_reindent self.fix_e128 = self._fix_reindent self.fix_e129 = self._fix_reindent self.fix_e202 = self.fix_e201 self.fix_e203 = self.fix_e201 self.fix_e211 = self.fix_e201 self.fix_e221 = self.fix_e271 self.fix_e222 = self.fix_e271 self.fix_e223 = self.fix_e271 self.fix_e226 = self.fix_e225 self.fix_e227 = self.fix_e225 self.fix_e228 = self.fix_e225 self.fix_e241 = self.fix_e271 self.fix_e242 = self.fix_e224 self.fix_e261 = self.fix_e262 self.fix_e272 = self.fix_e271 self.fix_e273 = self.fix_e271 self.fix_e274 = self.fix_e271 self.fix_e306 = self.fix_e301 self.fix_e501 = ( self.fix_long_line_logically if options and (options.aggressive >= 2 or options.experimental) else self.fix_long_line_physically) self.fix_e703 = self.fix_e702 self.fix_w293 = self.fix_w291 def _fix_source(self, results): try: (logical_start, logical_end) = _find_logical(self.source) logical_support = True except (SyntaxError, tokenize.TokenError): # pragma: no cover logical_support = False completed_lines = set() for result in sorted(results, key=_priority_key): if result['line'] in completed_lines: continue fixed_methodname = 'fix_' + result['id'].lower() if hasattr(self, fixed_methodname): fix = getattr(self, fixed_methodname) line_index = result['line'] - 1 original_line = self.source[line_index] is_logical_fix = len(_get_parameters(fix)) > 2 if is_logical_fix: logical = None if logical_support: logical = _get_logical(self.source, result, logical_start, logical_end) if logical and set(range( logical[0][0] + 1, logical[1][0] + 1)).intersection( completed_lines): continue modified_lines = fix(result, logical) else: modified_lines = fix(result) if modified_lines is None: # Force logical fixes to report what they modified. assert not is_logical_fix if self.source[line_index] == original_line: modified_lines = [] if modified_lines: completed_lines.update(modified_lines) elif modified_lines == []: # Empty list means no fix if self.options.verbose >= 2: print( '---> Not fixing {error} on line {line}'.format( error=result['id'], line=result['line']), file=sys.stderr) else: # We assume one-line fix when None. completed_lines.add(result['line']) else: if self.options.verbose >= 3: print( "---> '{0}' is not defined.".format(fixed_methodname), file=sys.stderr) info = result['info'].strip() print('---> {0}:{1}:{2}:{3}'.format(self.filename, result['line'], result['column'], info), file=sys.stderr) def fix(self): """Return a version of the source code with PEP 8 violations fixed.""" pep8_options = { 'ignore': self.options.ignore, 'select': self.options.select, 'max_line_length': self.options.max_line_length, } results = _execute_pep8(pep8_options, self.source) if self.options.verbose: progress = {} for r in results: if r['id'] not in progress: progress[r['id']] = set() progress[r['id']].add(r['line']) print('---> {n} issue(s) to fix {progress}'.format( n=len(results), progress=progress), file=sys.stderr) if self.options.line_range: start, end = self.options.line_range results = [r for r in results if start <= r['line'] <= end] self._fix_source(filter_results(source=''.join(self.source), results=results, aggressive=self.options.aggressive)) if self.options.line_range: # If number of lines has changed then change line_range. count = sum(sline.count('\n') for sline in self.source[start - 1:end]) self.options.line_range[1] = start + count - 1 return ''.join(self.source) def _fix_reindent(self, result): """Fix a badly indented line. This is done by adding or removing from its initial indent only. """ num_indent_spaces = int(result['info'].split()[1]) line_index = result['line'] - 1 target = self.source[line_index] self.source[line_index] = ' ' * num_indent_spaces + target.lstrip() def fix_e112(self, result): """Fix under-indented comments.""" line_index = result['line'] - 1 target = self.source[line_index] if not target.lstrip().startswith('#'): # Don't screw with invalid syntax. return [] self.source[line_index] = self.indent_word + target def fix_e113(self, result): """Fix over-indented comments.""" line_index = result['line'] - 1 target = self.source[line_index] indent = _get_indentation(target) stripped = target.lstrip() if not stripped.startswith('#'): # Don't screw with invalid syntax. return [] self.source[line_index] = indent[1:] + stripped def fix_e125(self, result): """Fix indentation undistinguish from the next logical line.""" num_indent_spaces = int(result['info'].split()[1]) line_index = result['line'] - 1 target = self.source[line_index] spaces_to_add = num_indent_spaces - len(_get_indentation(target)) indent = len(_get_indentation(target)) modified_lines = [] while len(_get_indentation(self.source[line_index])) >= indent: self.source[line_index] = (' ' * spaces_to_add + self.source[line_index]) modified_lines.append(1 + line_index) # Line indexed at 1. line_index -= 1 return modified_lines def fix_e131(self, result): """Fix indentation undistinguish from the next logical line.""" num_indent_spaces = int(result['info'].split()[1]) line_index = result['line'] - 1 target = self.source[line_index] spaces_to_add = num_indent_spaces - len(_get_indentation(target)) if spaces_to_add >= 0: self.source[line_index] = (' ' * spaces_to_add + self.source[line_index]) else: offset = abs(spaces_to_add) self.source[line_index] = self.source[line_index][offset:] def fix_e201(self, result): """Remove extraneous whitespace.""" line_index = result['line'] - 1 target = self.source[line_index] offset = result['column'] - 1 fixed = fix_whitespace(target, offset=offset, replacement='') self.source[line_index] = fixed def fix_e224(self, result): """Remove extraneous whitespace around operator.""" target = self.source[result['line'] - 1] offset = result['column'] - 1 fixed = target[:offset] + target[offset:].replace('\t', ' ') self.source[result['line'] - 1] = fixed def fix_e225(self, result): """Fix missing whitespace around operator.""" target = self.source[result['line'] - 1] offset = result['column'] - 1 fixed = target[:offset] + ' ' + target[offset:] # Only proceed if non-whitespace characters match. # And make sure we don't break the indentation. if ( fixed.replace(' ', '') == target.replace(' ', '') and _get_indentation(fixed) == _get_indentation(target) ): self.source[result['line'] - 1] = fixed error_code = result.get('id', 0) try: ts = generate_tokens(fixed) except tokenize.TokenError: return if not check_syntax(fixed.lstrip()): return errors = list( pycodestyle.missing_whitespace_around_operator(fixed, ts)) for e in reversed(errors): if error_code != e[1].split()[0]: continue offset = e[0][1] fixed = fixed[:offset] + ' ' + fixed[offset:] self.source[result['line'] - 1] = fixed else: return [] def fix_e231(self, result): """Add missing whitespace.""" line_index = result['line'] - 1 target = self.source[line_index] offset = result['column'] fixed = target[:offset].rstrip() + ' ' + target[offset:].lstrip() self.source[line_index] = fixed def fix_e251(self, result): """Remove whitespace around parameter '=' sign.""" line_index = result['line'] - 1 target = self.source[line_index] # This is necessary since pycodestyle sometimes reports columns that # goes past the end of the physical line. This happens in cases like, # foo(bar\n=None) c = min(result['column'] - 1, len(target) - 1) if target[c].strip(): fixed = target else: fixed = target[:c].rstrip() + target[c:].lstrip() # There could be an escaped newline # # def foo(a=\ # 1) if fixed.endswith(('=\\\n', '=\\\r\n', '=\\\r')): self.source[line_index] = fixed.rstrip('\n\r \t\\') self.source[line_index + 1] = self.source[line_index + 1].lstrip() return [line_index + 1, line_index + 2] # Line indexed at 1 self.source[result['line'] - 1] = fixed def fix_e262(self, result): """Fix spacing after comment hash.""" target = self.source[result['line'] - 1] offset = result['column'] code = target[:offset].rstrip(' \t#') comment = target[offset:].lstrip(' \t#') fixed = code + (' # ' + comment if comment.strip() else '\n') self.source[result['line'] - 1] = fixed def fix_e271(self, result): """Fix extraneous whitespace around keywords.""" line_index = result['line'] - 1 target = self.source[line_index] offset = result['column'] - 1 fixed = fix_whitespace(target, offset=offset, replacement=' ') if fixed == target: return [] else: self.source[line_index] = fixed def fix_e301(self, result): """Add missing blank line.""" cr = '\n' self.source[result['line'] - 1] = cr + self.source[result['line'] - 1] def fix_e302(self, result): """Add missing 2 blank lines.""" add_linenum = 2 - int(result['info'].split()[-1]) cr = '\n' * add_linenum self.source[result['line'] - 1] = cr + self.source[result['line'] - 1] def fix_e303(self, result): """Remove extra blank lines.""" delete_linenum = int(result['info'].split('(')[1].split(')')[0]) - 2 delete_linenum = max(1, delete_linenum) # We need to count because pycodestyle reports an offset line number if # there are comments. cnt = 0 line = result['line'] - 2 modified_lines = [] while cnt < delete_linenum and line >= 0: if not self.source[line].strip(): self.source[line] = '' modified_lines.append(1 + line) # Line indexed at 1 cnt += 1 line -= 1 return modified_lines def fix_e304(self, result): """Remove blank line following function decorator.""" line = result['line'] - 2 if not self.source[line].strip(): self.source[line] = '' def fix_e305(self, result): """Add missing 2 blank lines after end of function or class.""" cr = '\n' # check comment line offset = result['line'] - 2 while True: if offset < 0: break line = self.source[offset].lstrip() if not line: break if line[0] != '#': break offset -= 1 offset += 1 self.source[offset] = cr + self.source[offset] def fix_e401(self, result): """Put imports on separate lines.""" line_index = result['line'] - 1 target = self.source[line_index] offset = result['column'] - 1 if not target.lstrip().startswith('import'): return [] indentation = re.split(pattern=r'\bimport\b', string=target, maxsplit=1)[0] fixed = (target[:offset].rstrip('\t ,') + '\n' + indentation + 'import ' + target[offset:].lstrip('\t ,')) self.source[line_index] = fixed def fix_long_line_logically(self, result, logical): """Try to make lines fit within --max-line-length characters.""" if ( not logical or len(logical[2]) == 1 or self.source[result['line'] - 1].lstrip().startswith('#') ): return self.fix_long_line_physically(result) start_line_index = logical[0][0] end_line_index = logical[1][0] logical_lines = logical[2] previous_line = get_item(self.source, start_line_index - 1, default='') next_line = get_item(self.source, end_line_index + 1, default='') single_line = join_logical_line(''.join(logical_lines)) try: fixed = self.fix_long_line( target=single_line, previous_line=previous_line, next_line=next_line, original=''.join(logical_lines)) except (SyntaxError, tokenize.TokenError): return self.fix_long_line_physically(result) if fixed: for line_index in range(start_line_index, end_line_index + 1): self.source[line_index] = '' self.source[start_line_index] = fixed return range(start_line_index + 1, end_line_index + 1) return [] def fix_long_line_physically(self, result): """Try to make lines fit within --max-line-length characters.""" line_index = result['line'] - 1 target = self.source[line_index] previous_line = get_item(self.source, line_index - 1, default='') next_line = get_item(self.source, line_index + 1, default='') try: fixed = self.fix_long_line( target=target, previous_line=previous_line, next_line=next_line, original=target) except (SyntaxError, tokenize.TokenError): return [] if fixed: self.source[line_index] = fixed return [line_index + 1] return [] def fix_long_line(self, target, previous_line, next_line, original): cache_entry = (target, previous_line, next_line) if cache_entry in self.long_line_ignore_cache: return [] if target.lstrip().startswith('#'): if self.options.aggressive: # Wrap commented lines. return shorten_comment( line=target, max_line_length=self.options.max_line_length, last_comment=not next_line.lstrip().startswith('#')) else: return [] fixed = get_fixed_long_line( target=target, previous_line=previous_line, original=original, indent_word=self.indent_word, max_line_length=self.options.max_line_length, aggressive=self.options.aggressive, experimental=self.options.experimental, verbose=self.options.verbose) if fixed and not code_almost_equal(original, fixed): return fixed self.long_line_ignore_cache.add(cache_entry) return None def fix_e502(self, result): """Remove extraneous escape of newline.""" (line_index, _, target) = get_index_offset_contents(result, self.source) self.source[line_index] = target.rstrip('\n\r \t\\') + '\n' def fix_e701(self, result): """Put colon-separated compound statement on separate lines.""" line_index = result['line'] - 1 target = self.source[line_index] c = result['column'] fixed_source = (target[:c] + '\n' + _get_indentation(target) + self.indent_word + target[c:].lstrip('\n\r \t\\')) self.source[result['line'] - 1] = fixed_source return [result['line'], result['line'] + 1] def fix_e702(self, result, logical): """Put semicolon-separated compound statement on separate lines.""" if not logical: return [] # pragma: no cover logical_lines = logical[2] # Avoid applying this when indented. # https://docs.python.org/reference/compound_stmts.html for line in logical_lines: if ':' in line: return [] line_index = result['line'] - 1 target = self.source[line_index] if target.rstrip().endswith('\\'): # Normalize '1; \\\n2' into '1; 2'. self.source[line_index] = target.rstrip('\n \r\t\\') self.source[line_index + 1] = self.source[line_index + 1].lstrip() return [line_index + 1, line_index + 2] if target.rstrip().endswith(';'): self.source[line_index] = target.rstrip('\n \r\t;') + '\n' return [line_index + 1] offset = result['column'] - 1 first = target[:offset].rstrip(';').rstrip() second = (_get_indentation(logical_lines[0]) + target[offset:].lstrip(';').lstrip()) # Find inline comment. inline_comment = None if target[offset:].lstrip(';').lstrip()[:2] == '# ': inline_comment = target[offset:].lstrip(';') if inline_comment: self.source[line_index] = first + inline_comment else: self.source[line_index] = first + '\n' + second return [line_index + 1] def fix_e704(self, result): """Fix multiple statements on one line def""" (line_index, _, target) = get_index_offset_contents(result, self.source) match = STARTSWITH_DEF_REGEX.match(target) if match: self.source[line_index] = '{0}\n{1}{2}'.format( match.group(0), _get_indentation(target) + self.indent_word, target[match.end(0):].lstrip()) def fix_e711(self, result): """Fix comparison with None.""" (line_index, offset, target) = get_index_offset_contents(result, self.source) right_offset = offset + 2 if right_offset >= len(target): return [] left = target[:offset].rstrip() center = target[offset:right_offset] right = target[right_offset:].lstrip() if not right.startswith('None'): return [] if center.strip() == '==': new_center = 'is' elif center.strip() == '!=': new_center = 'is not' else: return [] self.source[line_index] = ' '.join([left, new_center, right]) def fix_e712(self, result): """Fix (trivial case of) comparison with boolean.""" (line_index, offset, target) = get_index_offset_contents(result, self.source) # Handle very easy "not" special cases. if re.match(r'^\s*if [\w.]+ == False:$', target): self.source[line_index] = re.sub(r'if ([\w.]+) == False:', r'if not \1:', target, count=1) elif re.match(r'^\s*if [\w.]+ != True:$', target): self.source[line_index] = re.sub(r'if ([\w.]+) != True:', r'if not \1:', target, count=1) else: right_offset = offset + 2 if right_offset >= len(target): return [] left = target[:offset].rstrip() center = target[offset:right_offset] right = target[right_offset:].lstrip() # Handle simple cases only. new_right = None if center.strip() == '==': if re.match(r'\bTrue\b', right): new_right = re.sub(r'\bTrue\b *', '', right, count=1) elif center.strip() == '!=': if re.match(r'\bFalse\b', right): new_right = re.sub(r'\bFalse\b *', '', right, count=1) if new_right is None: return [] if new_right[0].isalnum(): new_right = ' ' + new_right self.source[line_index] = left + new_right def fix_e713(self, result): """Fix (trivial case of) non-membership check.""" (line_index, offset, target) = get_index_offset_contents(result, self.source) # to convert once 'not in' -> 'in' before_target = target[:offset] target = target[offset:] match_notin = COMPARE_NEGATIVE_REGEX_THROUGH.search(target) notin_pos_start, notin_pos_end = 0, 0 if match_notin: notin_pos_start = match_notin.start(1) notin_pos_end = match_notin.end() target = '{0}{1} {2}'.format( target[:notin_pos_start], 'in', target[notin_pos_end:]) # fix 'not in' match = COMPARE_NEGATIVE_REGEX.search(target) if match: if match.group(3) == 'in': pos_start = match.start(1) new_target = '{5}{0}{1} {2} {3} {4}'.format( target[:pos_start], match.group(2), match.group(1), match.group(3), target[match.end():], before_target) if match_notin: # revert 'in' -> 'not in' pos_start = notin_pos_start + offset pos_end = notin_pos_end + offset - 4 # len('not ') new_target = '{0}{1} {2}'.format( new_target[:pos_start], 'not in', new_target[pos_end:]) self.source[line_index] = new_target def fix_e714(self, result): """Fix object identity should be 'is not' case.""" (line_index, _, target) = get_index_offset_contents(result, self.source) match = COMPARE_NEGATIVE_REGEX.search(target) if match: if match.group(3) == 'is': pos_start = match.start(1) self.source[line_index] = '{0}{1} {2} {3} {4}'.format( target[:pos_start], match.group(2), match.group(3), match.group(1), target[match.end():]) def fix_e722(self, result): """fix bare except""" (line_index, _, target) = get_index_offset_contents(result, self.source) match = BARE_EXCEPT_REGEX.search(target) if match: self.source[line_index] = '{0}{1}{2}'.format( target[:result['column'] - 1], "except BaseException:", target[match.end():]) def fix_e731(self, result): """Fix do not assign a lambda expression check.""" (line_index, _, target) = get_index_offset_contents(result, self.source) match = LAMBDA_REGEX.search(target) if match: end = match.end() self.source[line_index] = '{0}def {1}({2}): return {3}'.format( target[:match.start(0)], match.group(1), match.group(2), target[end:].lstrip()) def fix_w291(self, result): """Remove trailing whitespace.""" fixed_line = self.source[result['line'] - 1].rstrip() self.source[result['line'] - 1] = fixed_line + '\n' def fix_w391(self, _): """Remove trailing blank lines.""" blank_count = 0 for line in reversed(self.source): line = line.rstrip() if line: break else: blank_count += 1 original_length = len(self.source) self.source = self.source[:original_length - blank_count] return range(1, 1 + original_length) def fix_w503(self, result): (line_index, _, target) = get_index_offset_contents(result, self.source) one_string_token = target.split()[0] try: ts = generate_tokens(one_string_token) except tokenize.TokenError: return if not _is_binary_operator(ts[0][0], one_string_token): return # find comment comment_index = None for i in range(5): # NOTE: try to parse code in 5 times if (line_index - i) < 0: break from_index = line_index - i - 1 to_index = line_index + 1 try: ts = generate_tokens("".join(self.source[from_index:to_index])) except Exception: continue newline_count = 0 newline_index = [] for i, t in enumerate(ts): if t[0] in (tokenize.NEWLINE, tokenize.NL): newline_index.append(i) newline_count += 1 if newline_count > 2: tts = ts[newline_index[-3]:] else: tts = ts old = None for t in tts: if tokenize.COMMENT == t[0]: if old is None: comment_index = 0 else: comment_index = old[3][1] break old = t break i = target.index(one_string_token) self.source[line_index] = '{0}{1}'.format( target[:i], target[i + len(one_string_token):]) nl = find_newline(self.source[line_index - 1:line_index]) before_line = self.source[line_index - 1] bl = before_line.index(nl) if comment_index: self.source[line_index - 1] = '{0} {1} {2}'.format( before_line[:comment_index], one_string_token, before_line[comment_index + 1:]) else: self.source[line_index - 1] = '{0} {1}{2}'.format( before_line[:bl], one_string_token, before_line[bl:]) def get_index_offset_contents(result, source): """Return (line_index, column_offset, line_contents).""" line_index = result['line'] - 1 return (line_index, result['column'] - 1, source[line_index]) def get_fixed_long_line(target, previous_line, original, indent_word=' ', max_line_length=79, aggressive=False, experimental=False, verbose=False): """Break up long line and return result. Do this by generating multiple reformatted candidates and then ranking the candidates to heuristically select the best option. """ indent = _get_indentation(target) source = target[len(indent):] assert source.lstrip() == source assert not target.lstrip().startswith('#') # Check for partial multiline. tokens = list(generate_tokens(source)) candidates = shorten_line( tokens, source, indent, indent_word, max_line_length, aggressive=aggressive, experimental=experimental, previous_line=previous_line) # Also sort alphabetically as a tie breaker (for determinism). candidates = sorted( sorted(set(candidates).union([target, original])), key=lambda x: line_shortening_rank( x, indent_word, max_line_length, experimental=experimental)) if verbose >= 4: print(('-' * 79 + '\n').join([''] + candidates + ['']), file=wrap_output(sys.stderr, 'utf-8')) if candidates: best_candidate = candidates[0] # Don't allow things to get longer. if longest_line_length(best_candidate) > longest_line_length(original): return None return best_candidate def longest_line_length(code): """Return length of longest line.""" return max(len(line) for line in code.splitlines()) def join_logical_line(logical_line): """Return single line based on logical line input.""" indentation = _get_indentation(logical_line) return indentation + untokenize_without_newlines( generate_tokens(logical_line.lstrip())) + '\n' def untokenize_without_newlines(tokens): """Return source code based on tokens.""" text = '' last_row = 0 last_column = -1 for t in tokens: token_string = t[1] (start_row, start_column) = t[2] (end_row, end_column) = t[3] if start_row > last_row: last_column = 0 if ( (start_column > last_column or token_string == '\n') and not text.endswith(' ') ): text += ' ' if token_string != '\n': text += token_string last_row = end_row last_column = end_column return text.rstrip() def _find_logical(source_lines): # Make a variable which is the index of all the starts of lines. logical_start = [] logical_end = [] last_newline = True parens = 0 for t in generate_tokens(''.join(source_lines)): if t[0] in [tokenize.COMMENT, tokenize.DEDENT, tokenize.INDENT, tokenize.NL, tokenize.ENDMARKER]: continue if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]: last_newline = True logical_end.append((t[3][0] - 1, t[2][1])) continue if last_newline and not parens: logical_start.append((t[2][0] - 1, t[2][1])) last_newline = False if t[0] == tokenize.OP: if t[1] in '([{': parens += 1 elif t[1] in '}])': parens -= 1 return (logical_start, logical_end) def _get_logical(source_lines, result, logical_start, logical_end): """Return the logical line corresponding to the result. Assumes input is already E702-clean. """ row = result['line'] - 1 col = result['column'] - 1 ls = None le = None for i in range(0, len(logical_start), 1): assert logical_end x = logical_end[i] if x[0] > row or (x[0] == row and x[1] > col): le = x ls = logical_start[i] break if ls is None: return None original = source_lines[ls[0]:le[0] + 1] return ls, le, original def get_item(items, index, default=None): if 0 <= index < len(items): return items[index] return default def reindent(source, indent_size): """Reindent all lines.""" reindenter = Reindenter(source) return reindenter.run(indent_size) def code_almost_equal(a, b): """Return True if code is similar. Ignore whitespace when comparing specific line. """ split_a = split_and_strip_non_empty_lines(a) split_b = split_and_strip_non_empty_lines(b) if len(split_a) != len(split_b): return False for (index, _) in enumerate(split_a): if ''.join(split_a[index].split()) != ''.join(split_b[index].split()): return False return True def split_and_strip_non_empty_lines(text): """Return lines split by newline. Ignore empty lines. """ return [line.strip() for line in text.splitlines() if line.strip()] def fix_e265(source, aggressive=False): # pylint: disable=unused-argument """Format block comments.""" if '#' not in source: # Optimization. return source ignored_line_numbers = multiline_string_lines( source, include_docstrings=True) | set(commented_out_code_lines(source)) fixed_lines = [] sio = io.StringIO(source) for (line_number, line) in enumerate(sio.readlines(), start=1): if ( line.lstrip().startswith('#') and line_number not in ignored_line_numbers and not pycodestyle.noqa(line) ): indentation = _get_indentation(line) line = line.lstrip() # Normalize beginning if not a shebang. if len(line) > 1: pos = next((index for index, c in enumerate(line) if c != '#')) if ( # Leave multiple spaces like '# ' alone. (line[:pos].count('#') > 1 or line[1].isalnum()) and # Leave stylistic outlined blocks alone. not line.rstrip().endswith('#') ): line = '# ' + line.lstrip('# \t') fixed_lines.append(indentation + line) else: fixed_lines.append(line) return ''.join(fixed_lines) def refactor(source, fixer_names, ignore=None, filename=''): """Return refactored code using lib2to3. Skip if ignore string is produced in the refactored code. """ from lib2to3 import pgen2 try: new_text = refactor_with_2to3(source, fixer_names=fixer_names, filename=filename) except (pgen2.parse.ParseError, SyntaxError, UnicodeDecodeError, UnicodeEncodeError): return source if ignore: if ignore in new_text and ignore not in source: return source return new_text def code_to_2to3(select, ignore): fixes = set() for code, fix in CODE_TO_2TO3.items(): if code_match(code, select=select, ignore=ignore): fixes |= set(fix) return fixes def fix_2to3(source, aggressive=True, select=None, ignore=None, filename=''): """Fix various deprecated code (via lib2to3).""" if not aggressive: return source select = select or [] ignore = ignore or [] return refactor(source, code_to_2to3(select=select, ignore=ignore), filename=filename) def fix_w602(source, aggressive=True): """Fix deprecated form of raising exception.""" if not aggressive: return source return refactor(source, ['raise'], ignore='with_traceback') def find_newline(source): """Return type of newline used in source. Input is a list of lines. """ assert not isinstance(source, unicode) counter = collections.defaultdict(int) for line in source: if line.endswith(CRLF): counter[CRLF] += 1 elif line.endswith(CR): counter[CR] += 1 elif line.endswith(LF): counter[LF] += 1 return (sorted(counter, key=counter.get, reverse=True) or [LF])[0] def _get_indentword(source): """Return indentation type.""" indent_word = ' ' # Default in case source has no indentation try: for t in generate_tokens(source): if t[0] == token.INDENT: indent_word = t[1] break except (SyntaxError, tokenize.TokenError): pass return indent_word def _get_indentation(line): """Return leading whitespace.""" if line.strip(): non_whitespace_index = len(line) - len(line.lstrip()) return line[:non_whitespace_index] return '' def get_diff_text(old, new, filename): """Return text of unified diff between old and new.""" newline = '\n' diff = difflib.unified_diff( old, new, 'original/' + filename, 'fixed/' + filename, lineterm=newline) text = '' for line in diff: text += line # Work around missing newline (http://bugs.python.org/issue2142). if text and not line.endswith(newline): text += newline + r'\ No newline at end of file' + newline return text def _priority_key(pep8_result): """Key for sorting PEP8 results. Global fixes should be done first. This is important for things like indentation. """ priority = [ # Fix multiline colon-based before semicolon based. 'e701', # Break multiline statements early. 'e702', # Things that make lines longer. 'e225', 'e231', # Remove extraneous whitespace before breaking lines. 'e201', # Shorten whitespace in comment before resorting to wrapping. 'e262' ] middle_index = 10000 lowest_priority = [ # We need to shorten lines last since the logical fixer can get in a # loop, which causes us to exit early. 'e501', 'w503' ] key = pep8_result['id'].lower() try: return priority.index(key) except ValueError: try: return middle_index + lowest_priority.index(key) + 1 except ValueError: return middle_index def shorten_line(tokens, source, indentation, indent_word, max_line_length, aggressive=False, experimental=False, previous_line=''): """Separate line at OPERATOR. Multiple candidates will be yielded. """ for candidate in _shorten_line(tokens=tokens, source=source, indentation=indentation, indent_word=indent_word, aggressive=aggressive, previous_line=previous_line): yield candidate if aggressive: for key_token_strings in SHORTEN_OPERATOR_GROUPS: shortened = _shorten_line_at_tokens( tokens=tokens, source=source, indentation=indentation, indent_word=indent_word, key_token_strings=key_token_strings, aggressive=aggressive) if shortened is not None and shortened != source: yield shortened if experimental: for shortened in _shorten_line_at_tokens_new( tokens=tokens, source=source, indentation=indentation, max_line_length=max_line_length): yield shortened def _shorten_line(tokens, source, indentation, indent_word, aggressive=False, previous_line=''): """Separate line at OPERATOR. The input is expected to be free of newlines except for inside multiline strings and at the end. Multiple candidates will be yielded. """ for (token_type, token_string, start_offset, end_offset) in token_offsets(tokens): if ( token_type == tokenize.COMMENT and not is_probably_part_of_multiline(previous_line) and not is_probably_part_of_multiline(source) and not source[start_offset + 1:].strip().lower().startswith( ('noqa', 'pragma:', 'pylint:')) ): # Move inline comments to previous line. first = source[:start_offset] second = source[start_offset:] yield (indentation + second.strip() + '\n' + indentation + first.strip() + '\n') elif token_type == token.OP and token_string != '=': # Don't break on '=' after keyword as this violates PEP 8. assert token_type != token.INDENT first = source[:end_offset] second_indent = indentation if first.rstrip().endswith('('): second_indent += indent_word elif '(' in first: second_indent += ' ' * (1 + first.find('(')) else: second_indent += indent_word second = (second_indent + source[end_offset:].lstrip()) if ( not second.strip() or second.lstrip().startswith('#') ): continue # Do not begin a line with a comma if second.lstrip().startswith(','): continue # Do end a line with a dot if first.rstrip().endswith('.'): continue if token_string in '+-*/': fixed = first + ' \\' + '\n' + second else: fixed = first + '\n' + second # Only fix if syntax is okay. if check_syntax(normalize_multiline(fixed) if aggressive else fixed): yield indentation + fixed def _is_binary_operator(token_type, text): return ((token_type == tokenize.OP or text in ['and', 'or']) and text not in '()[]{},:.;@=%~') # A convenient way to handle tokens. Token = collections.namedtuple('Token', ['token_type', 'token_string', 'spos', 'epos', 'line']) class ReformattedLines(object): """The reflowed lines of atoms. Each part of the line is represented as an "atom." They can be moved around when need be to get the optimal formatting. """ ########################################################################### # Private Classes class _Indent(object): """Represent an indentation in the atom stream.""" def __init__(self, indent_amt): self._indent_amt = indent_amt def emit(self): return ' ' * self._indent_amt @property def size(self): return self._indent_amt class _Space(object): """Represent a space in the atom stream.""" def emit(self): return ' ' @property def size(self): return 1 class _LineBreak(object): """Represent a line break in the atom stream.""" def emit(self): return '\n' @property def size(self): return 0 def __init__(self, max_line_length): self._max_line_length = max_line_length self._lines = [] self._bracket_depth = 0 self._prev_item = None self._prev_prev_item = None def __repr__(self): return self.emit() ########################################################################### # Public Methods def add(self, obj, indent_amt, break_after_open_bracket): if isinstance(obj, Atom): self._add_item(obj, indent_amt) return self._add_container(obj, indent_amt, break_after_open_bracket) def add_comment(self, item): num_spaces = 2 if len(self._lines) > 1: if isinstance(self._lines[-1], self._Space): num_spaces -= 1 if len(self._lines) > 2: if isinstance(self._lines[-2], self._Space): num_spaces -= 1 while num_spaces > 0: self._lines.append(self._Space()) num_spaces -= 1 self._lines.append(item) def add_indent(self, indent_amt): self._lines.append(self._Indent(indent_amt)) def add_line_break(self, indent): self._lines.append(self._LineBreak()) self.add_indent(len(indent)) def add_line_break_at(self, index, indent_amt): self._lines.insert(index, self._LineBreak()) self._lines.insert(index + 1, self._Indent(indent_amt)) def add_space_if_needed(self, curr_text, equal=False): if ( not self._lines or isinstance( self._lines[-1], (self._LineBreak, self._Indent, self._Space)) ): return prev_text = unicode(self._prev_item) prev_prev_text = ( unicode(self._prev_prev_item) if self._prev_prev_item else '') if ( # The previous item was a keyword or identifier and the current # item isn't an operator that doesn't require a space. ((self._prev_item.is_keyword or self._prev_item.is_string or self._prev_item.is_name or self._prev_item.is_number) and (curr_text[0] not in '([{.,:}])' or (curr_text[0] == '=' and equal))) or # Don't place spaces around a '.', unless it's in an 'import' # statement. ((prev_prev_text != 'from' and prev_text[-1] != '.' and curr_text != 'import') and # Don't place a space before a colon. curr_text[0] != ':' and # Don't split up ending brackets by spaces. ((prev_text[-1] in '}])' and curr_text[0] not in '.,}])') or # Put a space after a colon or comma. prev_text[-1] in ':,' or # Put space around '=' if asked to. (equal and prev_text == '=') or # Put spaces around non-unary arithmetic operators. ((self._prev_prev_item and (prev_text not in '+-' and (self._prev_prev_item.is_name or self._prev_prev_item.is_number or self._prev_prev_item.is_string)) and prev_text in ('+', '-', '%', '*', '/', '//', '**', 'in'))))) ): self._lines.append(self._Space()) def previous_item(self): """Return the previous non-whitespace item.""" return self._prev_item def fits_on_current_line(self, item_extent): return self.current_size() + item_extent <= self._max_line_length def current_size(self): """The size of the current line minus the indentation.""" size = 0 for item in reversed(self._lines): size += item.size if isinstance(item, self._LineBreak): break return size def line_empty(self): return (self._lines and isinstance(self._lines[-1], (self._LineBreak, self._Indent))) def emit(self): string = '' for item in self._lines: if isinstance(item, self._LineBreak): string = string.rstrip() string += item.emit() return string.rstrip() + '\n' ########################################################################### # Private Methods def _add_item(self, item, indent_amt): """Add an item to the line. Reflow the line to get the best formatting after the item is inserted. The bracket depth indicates if the item is being inserted inside of a container or not. """ if self._prev_item and self._prev_item.is_string and item.is_string: # Place consecutive string literals on separate lines. self._lines.append(self._LineBreak()) self._lines.append(self._Indent(indent_amt)) item_text = unicode(item) if self._lines and self._bracket_depth: # Adding the item into a container. self._prevent_default_initializer_splitting(item, indent_amt) if item_text in '.,)]}': self._split_after_delimiter(item, indent_amt) elif self._lines and not self.line_empty(): # Adding the item outside of a container. if self.fits_on_current_line(len(item_text)): self._enforce_space(item) else: # Line break for the new item. self._lines.append(self._LineBreak()) self._lines.append(self._Indent(indent_amt)) self._lines.append(item) self._prev_item, self._prev_prev_item = item, self._prev_item if item_text in '([{': self._bracket_depth += 1 elif item_text in '}])': self._bracket_depth -= 1 assert self._bracket_depth >= 0 def _add_container(self, container, indent_amt, break_after_open_bracket): actual_indent = indent_amt + 1 if ( unicode(self._prev_item) != '=' and not self.line_empty() and not self.fits_on_current_line( container.size + self._bracket_depth + 2) ): if unicode(container)[0] == '(' and self._prev_item.is_name: # Don't split before the opening bracket of a call. break_after_open_bracket = True actual_indent = indent_amt + 4 elif ( break_after_open_bracket or unicode(self._prev_item) not in '([{' ): # If the container doesn't fit on the current line and the # current line isn't empty, place the container on the next # line. self._lines.append(self._LineBreak()) self._lines.append(self._Indent(indent_amt)) break_after_open_bracket = False else: actual_indent = self.current_size() + 1 break_after_open_bracket = False if isinstance(container, (ListComprehension, IfExpression)): actual_indent = indent_amt # Increase the continued indentation only if recursing on a # container. container.reflow(self, ' ' * actual_indent, break_after_open_bracket=break_after_open_bracket) def _prevent_default_initializer_splitting(self, item, indent_amt): """Prevent splitting between a default initializer. When there is a default initializer, it's best to keep it all on the same line. It's nicer and more readable, even if it goes over the maximum allowable line length. This goes back along the current line to determine if we have a default initializer, and, if so, to remove extraneous whitespaces and add a line break/indent before it if needed. """ if unicode(item) == '=': # This is the assignment in the initializer. Just remove spaces for # now. self._delete_whitespace() return if (not self._prev_item or not self._prev_prev_item or unicode(self._prev_item) != '='): return self._delete_whitespace() prev_prev_index = self._lines.index(self._prev_prev_item) if ( isinstance(self._lines[prev_prev_index - 1], self._Indent) or self.fits_on_current_line(item.size + 1) ): # The default initializer is already the only item on this line. # Don't insert a newline here. return # Replace the space with a newline/indent combo. if isinstance(self._lines[prev_prev_index - 1], self._Space): del self._lines[prev_prev_index - 1] self.add_line_break_at(self._lines.index(self._prev_prev_item), indent_amt) def _split_after_delimiter(self, item, indent_amt): """Split the line only after a delimiter.""" self._delete_whitespace() if self.fits_on_current_line(item.size): return last_space = None for current_item in reversed(self._lines): if ( last_space and (not isinstance(current_item, Atom) or not current_item.is_colon) ): break else: last_space = None if isinstance(current_item, self._Space): last_space = current_item if isinstance(current_item, (self._LineBreak, self._Indent)): return if not last_space: return self.add_line_break_at(self._lines.index(last_space), indent_amt) def _enforce_space(self, item): """Enforce a space in certain situations. There are cases where we will want a space where normally we wouldn't put one. This just enforces the addition of a space. """ if isinstance(self._lines[-1], (self._Space, self._LineBreak, self._Indent)): return if not self._prev_item: return item_text = unicode(item) prev_text = unicode(self._prev_item) # Prefer a space around a '.' in an import statement, and between the # 'import' and '('. if ( (item_text == '.' and prev_text == 'from') or (item_text == 'import' and prev_text == '.') or (item_text == '(' and prev_text == 'import') ): self._lines.append(self._Space()) def _delete_whitespace(self): """Delete all whitespace from the end of the line.""" while isinstance(self._lines[-1], (self._Space, self._LineBreak, self._Indent)): del self._lines[-1] class Atom(object): """The smallest unbreakable unit that can be reflowed.""" def __init__(self, atom): self._atom = atom def __repr__(self): return self._atom.token_string def __len__(self): return self.size def reflow( self, reflowed_lines, continued_indent, extent, break_after_open_bracket=False, is_list_comp_or_if_expr=False, next_is_dot=False ): if self._atom.token_type == tokenize.COMMENT: reflowed_lines.add_comment(self) return total_size = extent if extent else self.size if self._atom.token_string not in ',:([{}])': # Some atoms will need an extra 1-sized space token after them. total_size += 1 prev_item = reflowed_lines.previous_item() if ( not is_list_comp_or_if_expr and not reflowed_lines.fits_on_current_line(total_size) and not (next_is_dot and reflowed_lines.fits_on_current_line(self.size + 1)) and not reflowed_lines.line_empty() and not self.is_colon and not (prev_item and prev_item.is_name and unicode(self) == '(') ): # Start a new line if there is already something on the line and # adding this atom would make it go over the max line length. reflowed_lines.add_line_break(continued_indent) else: reflowed_lines.add_space_if_needed(unicode(self)) reflowed_lines.add(self, len(continued_indent), break_after_open_bracket) def emit(self): return self.__repr__() @property def is_keyword(self): return keyword.iskeyword(self._atom.token_string) @property def is_string(self): return self._atom.token_type == tokenize.STRING @property def is_name(self): return self._atom.token_type == tokenize.NAME @property def is_number(self): return self._atom.token_type == tokenize.NUMBER @property def is_comma(self): return self._atom.token_string == ',' @property def is_colon(self): return self._atom.token_string == ':' @property def size(self): return len(self._atom.token_string) class Container(object): """Base class for all container types.""" def __init__(self, items): self._items = items def __repr__(self): string = '' last_was_keyword = False for item in self._items: if item.is_comma: string += ', ' elif item.is_colon: string += ': ' else: item_string = unicode(item) if ( string and (last_was_keyword or (not string.endswith(tuple('([{,.:}]) ')) and not item_string.startswith(tuple('([{,.:}])')))) ): string += ' ' string += item_string last_was_keyword = item.is_keyword return string def __iter__(self): for element in self._items: yield element def __getitem__(self, idx): return self._items[idx] def reflow(self, reflowed_lines, continued_indent, break_after_open_bracket=False): last_was_container = False for (index, item) in enumerate(self._items): next_item = get_item(self._items, index + 1) if isinstance(item, Atom): is_list_comp_or_if_expr = ( isinstance(self, (ListComprehension, IfExpression))) item.reflow(reflowed_lines, continued_indent, self._get_extent(index), is_list_comp_or_if_expr=is_list_comp_or_if_expr, next_is_dot=(next_item and unicode(next_item) == '.')) if last_was_container and item.is_comma: reflowed_lines.add_line_break(continued_indent) last_was_container = False else: # isinstance(item, Container) reflowed_lines.add(item, len(continued_indent), break_after_open_bracket) last_was_container = not isinstance(item, (ListComprehension, IfExpression)) if ( break_after_open_bracket and index == 0 and # Prefer to keep empty containers together instead of # separating them. unicode(item) == self.open_bracket and (not next_item or unicode(next_item) != self.close_bracket) and (len(self._items) != 3 or not isinstance(next_item, Atom)) ): reflowed_lines.add_line_break(continued_indent) break_after_open_bracket = False else: next_next_item = get_item(self._items, index + 2) if ( unicode(item) not in ['.', '%', 'in'] and next_item and not isinstance(next_item, Container) and unicode(next_item) != ':' and next_next_item and (not isinstance(next_next_item, Atom) or unicode(next_item) == 'not') and not reflowed_lines.line_empty() and not reflowed_lines.fits_on_current_line( self._get_extent(index + 1) + 2) ): reflowed_lines.add_line_break(continued_indent) def _get_extent(self, index): """The extent of the full element. E.g., the length of a function call or keyword. """ extent = 0 prev_item = get_item(self._items, index - 1) seen_dot = prev_item and unicode(prev_item) == '.' while index < len(self._items): item = get_item(self._items, index) index += 1 if isinstance(item, (ListComprehension, IfExpression)): break if isinstance(item, Container): if prev_item and prev_item.is_name: if seen_dot: extent += 1 else: extent += item.size prev_item = item continue elif (unicode(item) not in ['.', '=', ':', 'not'] and not item.is_name and not item.is_string): break if unicode(item) == '.': seen_dot = True extent += item.size prev_item = item return extent @property def is_string(self): return False @property def size(self): return len(self.__repr__()) @property def is_keyword(self): return False @property def is_name(self): return False @property def is_comma(self): return False @property def is_colon(self): return False @property def open_bracket(self): return None @property def close_bracket(self): return None class Tuple(Container): """A high-level representation of a tuple.""" @property def open_bracket(self): return '(' @property def close_bracket(self): return ')' class List(Container): """A high-level representation of a list.""" @property def open_bracket(self): return '[' @property def close_bracket(self): return ']' class DictOrSet(Container): """A high-level representation of a dictionary or set.""" @property def open_bracket(self): return '{' @property def close_bracket(self): return '}' class ListComprehension(Container): """A high-level representation of a list comprehension.""" @property def size(self): length = 0 for item in self._items: if isinstance(item, IfExpression): break length += item.size return length class IfExpression(Container): """A high-level representation of an if-expression.""" def _parse_container(tokens, index, for_or_if=None): """Parse a high-level container, such as a list, tuple, etc.""" # Store the opening bracket. items = [Atom(Token(*tokens[index]))] index += 1 num_tokens = len(tokens) while index < num_tokens: tok = Token(*tokens[index]) if tok.token_string in ',)]}': # First check if we're at the end of a list comprehension or # if-expression. Don't add the ending token as part of the list # comprehension or if-expression, because they aren't part of those # constructs. if for_or_if == 'for': return (ListComprehension(items), index - 1) elif for_or_if == 'if': return (IfExpression(items), index - 1) # We've reached the end of a container. items.append(Atom(tok)) # If not, then we are at the end of a container. if tok.token_string == ')': # The end of a tuple. return (Tuple(items), index) elif tok.token_string == ']': # The end of a list. return (List(items), index) elif tok.token_string == '}': # The end of a dictionary or set. return (DictOrSet(items), index) elif tok.token_string in '([{': # A sub-container is being defined. (container, index) = _parse_container(tokens, index) items.append(container) elif tok.token_string == 'for': (container, index) = _parse_container(tokens, index, 'for') items.append(container) elif tok.token_string == 'if': (container, index) = _parse_container(tokens, index, 'if') items.append(container) else: items.append(Atom(tok)) index += 1 return (None, None) def _parse_tokens(tokens): """Parse the tokens. This converts the tokens into a form where we can manipulate them more easily. """ index = 0 parsed_tokens = [] num_tokens = len(tokens) while index < num_tokens: tok = Token(*tokens[index]) assert tok.token_type != token.INDENT if tok.token_type == tokenize.NEWLINE: # There's only one newline and it's at the end. break if tok.token_string in '([{': (container, index) = _parse_container(tokens, index) if not container: return None parsed_tokens.append(container) else: parsed_tokens.append(Atom(tok)) index += 1 return parsed_tokens def _reflow_lines(parsed_tokens, indentation, max_line_length, start_on_prefix_line): """Reflow the lines so that it looks nice.""" if unicode(parsed_tokens[0]) == 'def': # A function definition gets indented a bit more. continued_indent = indentation + ' ' * 2 * DEFAULT_INDENT_SIZE else: continued_indent = indentation + ' ' * DEFAULT_INDENT_SIZE break_after_open_bracket = not start_on_prefix_line lines = ReformattedLines(max_line_length) lines.add_indent(len(indentation.lstrip('\r\n'))) if not start_on_prefix_line: # If splitting after the opening bracket will cause the first element # to be aligned weirdly, don't try it. first_token = get_item(parsed_tokens, 0) second_token = get_item(parsed_tokens, 1) if ( first_token and second_token and unicode(second_token)[0] == '(' and len(indentation) + len(first_token) + 1 == len(continued_indent) ): return None for item in parsed_tokens: lines.add_space_if_needed(unicode(item), equal=True) save_continued_indent = continued_indent if start_on_prefix_line and isinstance(item, Container): start_on_prefix_line = False continued_indent = ' ' * (lines.current_size() + 1) item.reflow(lines, continued_indent, break_after_open_bracket) continued_indent = save_continued_indent return lines.emit() def _shorten_line_at_tokens_new(tokens, source, indentation, max_line_length): """Shorten the line taking its length into account. The input is expected to be free of newlines except for inside multiline strings and at the end. """ # Yield the original source so to see if it's a better choice than the # shortened candidate lines we generate here. yield indentation + source parsed_tokens = _parse_tokens(tokens) if parsed_tokens: # Perform two reflows. The first one starts on the same line as the # prefix. The second starts on the line after the prefix. fixed = _reflow_lines(parsed_tokens, indentation, max_line_length, start_on_prefix_line=True) if fixed and check_syntax(normalize_multiline(fixed.lstrip())): yield fixed fixed = _reflow_lines(parsed_tokens, indentation, max_line_length, start_on_prefix_line=False) if fixed and check_syntax(normalize_multiline(fixed.lstrip())): yield fixed def _shorten_line_at_tokens(tokens, source, indentation, indent_word, key_token_strings, aggressive): """Separate line by breaking at tokens in key_token_strings. The input is expected to be free of newlines except for inside multiline strings and at the end. """ offsets = [] for (index, _t) in enumerate(token_offsets(tokens)): (token_type, token_string, start_offset, end_offset) = _t assert token_type != token.INDENT if token_string in key_token_strings: # Do not break in containers with zero or one items. unwanted_next_token = { '(': ')', '[': ']', '{': '}'}.get(token_string) if unwanted_next_token: if ( get_item(tokens, index + 1, default=[None, None])[1] == unwanted_next_token or get_item(tokens, index + 2, default=[None, None])[1] == unwanted_next_token ): continue if ( index > 2 and token_string == '(' and tokens[index - 1][1] in ',(%[' ): # Don't split after a tuple start, or before a tuple start if # the tuple is in a list. continue if end_offset < len(source) - 1: # Don't split right before newline. offsets.append(end_offset) else: # Break at adjacent strings. These were probably meant to be on # separate lines in the first place. previous_token = get_item(tokens, index - 1) if ( token_type == tokenize.STRING and previous_token and previous_token[0] == tokenize.STRING ): offsets.append(start_offset) current_indent = None fixed = None for line in split_at_offsets(source, offsets): if fixed: fixed += '\n' + current_indent + line for symbol in '([{': if line.endswith(symbol): current_indent += indent_word else: # First line. fixed = line assert not current_indent current_indent = indent_word assert fixed is not None if check_syntax(normalize_multiline(fixed) if aggressive > 1 else fixed): return indentation + fixed return None def token_offsets(tokens): """Yield tokens and offsets.""" end_offset = 0 previous_end_row = 0 previous_end_column = 0 for t in tokens: token_type = t[0] token_string = t[1] (start_row, start_column) = t[2] (end_row, end_column) = t[3] # Account for the whitespace between tokens. end_offset += start_column if previous_end_row == start_row: end_offset -= previous_end_column # Record the start offset of the token. start_offset = end_offset # Account for the length of the token itself. end_offset += len(token_string) yield (token_type, token_string, start_offset, end_offset) previous_end_row = end_row previous_end_column = end_column def normalize_multiline(line): """Normalize multiline-related code that will cause syntax error. This is for purposes of checking syntax. """ if line.startswith('def ') and line.rstrip().endswith(':'): return line + ' pass' elif line.startswith('return '): return 'def _(): ' + line elif line.startswith('@'): return line + 'def _(): pass' elif line.startswith('class '): return line + ' pass' elif line.startswith(('if ', 'elif ', 'for ', 'while ')): return line + ' pass' return line def fix_whitespace(line, offset, replacement): """Replace whitespace at offset and return fixed line.""" # Replace escaped newlines too left = line[:offset].rstrip('\n\r \t\\') right = line[offset:].lstrip('\n\r \t\\') if right.startswith('#'): return line return left + replacement + right def _execute_pep8(pep8_options, source): """Execute pycodestyle via python method calls.""" class QuietReport(pycodestyle.BaseReport): """Version of checker that does not print.""" def __init__(self, options): super(QuietReport, self).__init__(options) self.__full_error_results = [] def error(self, line_number, offset, text, check): """Collect errors.""" code = super(QuietReport, self).error(line_number, offset, text, check) if code: self.__full_error_results.append( {'id': code, 'line': line_number, 'column': offset + 1, 'info': text}) def full_error_results(self): """Return error results in detail. Results are in the form of a list of dictionaries. Each dictionary contains 'id', 'line', 'column', and 'info'. """ return self.__full_error_results checker = pycodestyle.Checker('', lines=source, reporter=QuietReport, **pep8_options) checker.check_all() return checker.report.full_error_results() def _remove_leading_and_normalize(line): return line.lstrip().rstrip(CR + LF) + '\n' class Reindenter(object): """Reindents badly-indented code to uniformly use four-space indentation. Released to the public domain, by Tim Peters, 03 October 2000. """ def __init__(self, input_text): sio = io.StringIO(input_text) source_lines = sio.readlines() self.string_content_line_numbers = multiline_string_lines(input_text) # File lines, rstripped & tab-expanded. Dummy at start is so # that we can use tokenize's 1-based line numbering easily. # Note that a line is all-blank iff it is a newline. self.lines = [] for line_number, line in enumerate(source_lines, start=1): # Do not modify if inside a multiline string. if line_number in self.string_content_line_numbers: self.lines.append(line) else: # Only expand leading tabs. self.lines.append(_get_indentation(line).expandtabs() + _remove_leading_and_normalize(line)) self.lines.insert(0, None) self.index = 1 # index into self.lines of next line self.input_text = input_text def run(self, indent_size=DEFAULT_INDENT_SIZE): """Fix indentation and return modified line numbers. Line numbers are indexed at 1. """ if indent_size < 1: return self.input_text try: stats = _reindent_stats(tokenize.generate_tokens(self.getline)) except (SyntaxError, tokenize.TokenError): return self.input_text # Remove trailing empty lines. lines = self.lines # Sentinel. stats.append((len(lines), 0)) # Map count of leading spaces to # we want. have2want = {} # Program after transformation. after = [] # Copy over initial empty lines -- there's nothing to do until # we see a line with *something* on it. i = stats[0][0] after.extend(lines[1:i]) for i in range(len(stats) - 1): thisstmt, thislevel = stats[i] nextstmt = stats[i + 1][0] have = _leading_space_count(lines[thisstmt]) want = thislevel * indent_size if want < 0: # A comment line. if have: # An indented comment line. If we saw the same # indentation before, reuse what it most recently # mapped to. want = have2want.get(have, -1) if want < 0: # Then it probably belongs to the next real stmt. for j in range(i + 1, len(stats) - 1): jline, jlevel = stats[j] if jlevel >= 0: if have == _leading_space_count(lines[jline]): want = jlevel * indent_size break if want < 0: # Maybe it's a hanging # comment like this one, # in which case we should shift it like its base # line got shifted. for j in range(i - 1, -1, -1): jline, jlevel = stats[j] if jlevel >= 0: want = (have + _leading_space_count( after[jline - 1]) - _leading_space_count(lines[jline])) break if want < 0: # Still no luck -- leave it alone. want = have else: want = 0 assert want >= 0 have2want[have] = want diff = want - have if diff == 0 or have == 0: after.extend(lines[thisstmt:nextstmt]) else: for line_number, line in enumerate(lines[thisstmt:nextstmt], start=thisstmt): if line_number in self.string_content_line_numbers: after.append(line) elif diff > 0: if line == '\n': after.append(line) else: after.append(' ' * diff + line) else: remove = min(_leading_space_count(line), -diff) after.append(line[remove:]) return ''.join(after) def getline(self): """Line-getter for tokenize.""" if self.index >= len(self.lines): line = '' else: line = self.lines[self.index] self.index += 1 return line def _reindent_stats(tokens): """Return list of (lineno, indentlevel) pairs. One for each stmt and comment line. indentlevel is -1 for comment lines, as a signal that tokenize doesn't know what to do about them; indeed, they're our headache! """ find_stmt = 1 # Next token begins a fresh stmt? level = 0 # Current indent level. stats = [] for t in tokens: token_type = t[0] sline = t[2][0] line = t[4] if token_type == tokenize.NEWLINE: # A program statement, or ENDMARKER, will eventually follow, # after some (possibly empty) run of tokens of the form # (NL | COMMENT)* (INDENT | DEDENT+)? find_stmt = 1 elif token_type == tokenize.INDENT: find_stmt = 1 level += 1 elif token_type == tokenize.DEDENT: find_stmt = 1 level -= 1 elif token_type == tokenize.COMMENT: if find_stmt: stats.append((sline, -1)) # But we're still looking for a new stmt, so leave # find_stmt alone. elif token_type == tokenize.NL: pass elif find_stmt: # This is the first "real token" following a NEWLINE, so it # must be the first token of the next program statement, or an # ENDMARKER. find_stmt = 0 if line: # Not endmarker. stats.append((sline, level)) return stats def _leading_space_count(line): """Return number of leading spaces in line.""" i = 0 while i < len(line) and line[i] == ' ': i += 1 return i def refactor_with_2to3(source_text, fixer_names, filename=''): """Use lib2to3 to refactor the source. Return the refactored source code. """ from lib2to3.refactor import RefactoringTool fixers = ['lib2to3.fixes.fix_' + name for name in fixer_names] tool = RefactoringTool(fixer_names=fixers, explicit=fixers) from lib2to3.pgen2 import tokenize as lib2to3_tokenize try: # The name parameter is necessary particularly for the "import" fixer. return unicode(tool.refactor_string(source_text, name=filename)) except lib2to3_tokenize.TokenError: return source_text def check_syntax(code): """Return True if syntax is okay.""" try: return compile(code, '<string>', 'exec') except (SyntaxError, TypeError, UnicodeDecodeError): return False def filter_results(source, results, aggressive): """Filter out spurious reports from pycodestyle. If aggressive is True, we allow possibly unsafe fixes (E711, E712). """ non_docstring_string_line_numbers = multiline_string_lines( source, include_docstrings=False) all_string_line_numbers = multiline_string_lines( source, include_docstrings=True) commented_out_code_line_numbers = commented_out_code_lines(source) has_e901 = any(result['id'].lower() == 'e901' for result in results) for r in results: issue_id = r['id'].lower() if r['line'] in non_docstring_string_line_numbers: if issue_id.startswith(('e1', 'e501', 'w191')): continue if r['line'] in all_string_line_numbers: if issue_id in ['e501']: continue # We must offset by 1 for lines that contain the trailing contents of # multiline strings. if not aggressive and (r['line'] + 1) in all_string_line_numbers: # Do not modify multiline strings in non-aggressive mode. Remove # trailing whitespace could break doctests. if issue_id.startswith(('w29', 'w39')): continue if aggressive <= 0: if issue_id.startswith(('e711', 'e72', 'w6')): continue if aggressive <= 1: if issue_id.startswith(('e712', 'e713', 'e714', 'w5')): continue if aggressive <= 2: if issue_id.startswith(('e704', 'w5')): continue if r['line'] in commented_out_code_line_numbers: if issue_id.startswith(('e26', 'e501')): continue # Do not touch indentation if there is a token error caused by # incomplete multi-line statement. Otherwise, we risk screwing up the # indentation. if has_e901: if issue_id.startswith(('e1', 'e7')): continue yield r def multiline_string_lines(source, include_docstrings=False): """Return line numbers that are within multiline strings. The line numbers are indexed at 1. Docstrings are ignored. """ line_numbers = set() previous_token_type = '' try: for t in generate_tokens(source): token_type = t[0] start_row = t[2][0] end_row = t[3][0] if token_type == tokenize.STRING and start_row != end_row: if ( include_docstrings or previous_token_type != tokenize.INDENT ): # We increment by one since we want the contents of the # string. line_numbers |= set(range(1 + start_row, 1 + end_row)) previous_token_type = token_type except (SyntaxError, tokenize.TokenError): pass return line_numbers def commented_out_code_lines(source): """Return line numbers of comments that are likely code. Commented-out code is bad practice, but modifying it just adds even more clutter. """ line_numbers = [] try: for t in generate_tokens(source): token_type = t[0] token_string = t[1] start_row = t[2][0] line = t[4] # Ignore inline comments. if not line.lstrip().startswith('#'): continue if token_type == tokenize.COMMENT: stripped_line = token_string.lstrip('#').strip() if ( ' ' in stripped_line and '#' not in stripped_line and check_syntax(stripped_line) ): line_numbers.append(start_row) except (SyntaxError, tokenize.TokenError): pass return line_numbers def shorten_comment(line, max_line_length, last_comment=False): """Return trimmed or split long comment line. If there are no comments immediately following it, do a text wrap. Doing this wrapping on all comments in general would lead to jagged comment text. """ assert len(line) > max_line_length line = line.rstrip() # PEP 8 recommends 72 characters for comment text. indentation = _get_indentation(line) + '# ' max_line_length = min(max_line_length, len(indentation) + 72) MIN_CHARACTER_REPEAT = 5 if ( len(line) - len(line.rstrip(line[-1])) >= MIN_CHARACTER_REPEAT and not line[-1].isalnum() ): # Trim comments that end with things like --------- return line[:max_line_length] + '\n' elif last_comment and re.match(r'\s*#+\s*\w+', line): split_lines = textwrap.wrap(line.lstrip(' \t#'), initial_indent=indentation, subsequent_indent=indentation, width=max_line_length, break_long_words=False, break_on_hyphens=False) return '\n'.join(split_lines) + '\n' return line + '\n' def normalize_line_endings(lines, newline): """Return fixed line endings. All lines will be modified to use the most common line ending. """ return [line.rstrip('\n\r') + newline for line in lines] def mutual_startswith(a, b): return b.startswith(a) or a.startswith(b) def code_match(code, select, ignore): if ignore: assert not isinstance(ignore, unicode) for ignored_code in [c.strip() for c in ignore]: if mutual_startswith(code.lower(), ignored_code.lower()): return False if select: assert not isinstance(select, unicode) for selected_code in [c.strip() for c in select]: if mutual_startswith(code.lower(), selected_code.lower()): return True return False return True def fix_code(source, options=None, encoding=None, apply_config=False): """Return fixed source code. "encoding" will be used to decode "source" if it is a byte string. """ options = _get_options(options, apply_config) if not isinstance(source, unicode): source = source.decode(encoding or get_encoding()) sio = io.StringIO(source) return fix_lines(sio.readlines(), options=options) def _get_options(raw_options, apply_config): """Return parsed options.""" if not raw_options: return parse_args([''], apply_config=apply_config) if isinstance(raw_options, dict): options = parse_args([''], apply_config=apply_config) for name, value in raw_options.items(): if not hasattr(options, name): raise ValueError("No such option '{}'".format(name)) # Check for very basic type errors. expected_type = type(getattr(options, name)) if not isinstance(expected_type, (str, unicode)): if isinstance(value, (str, unicode)): raise ValueError( "Option '{}' should not be a string".format(name)) setattr(options, name, value) else: options = raw_options return options def fix_lines(source_lines, options, filename=''): """Return fixed source code.""" # Transform everything to line feed. Then change them back to original # before returning fixed source code. original_newline = find_newline(source_lines) tmp_source = ''.join(normalize_line_endings(source_lines, '\n')) # Keep a history to break out of cycles. previous_hashes = set() if options.line_range: # Disable "apply_local_fixes()" for now due to issue #175. fixed_source = tmp_source else: # Apply global fixes only once (for efficiency). fixed_source = apply_global_fixes(tmp_source, options, filename=filename) passes = 0 long_line_ignore_cache = set() while hash(fixed_source) not in previous_hashes: if options.pep8_passes >= 0 and passes > options.pep8_passes: break passes += 1 previous_hashes.add(hash(fixed_source)) tmp_source = copy.copy(fixed_source) fix = FixPEP8( filename, options, contents=tmp_source, long_line_ignore_cache=long_line_ignore_cache) fixed_source = fix.fix() sio = io.StringIO(fixed_source) return ''.join(normalize_line_endings(sio.readlines(), original_newline)) def fix_file(filename, options=None, output=None, apply_config=False): if not options: options = parse_args([filename], apply_config=apply_config) original_source = readlines_from_file(filename) fixed_source = original_source if options.in_place or output: encoding = detect_encoding(filename) if output: output = LineEndingWrapper(wrap_output(output, encoding=encoding)) fixed_source = fix_lines(fixed_source, options, filename=filename) if options.diff: new = io.StringIO(fixed_source) new = new.readlines() diff = get_diff_text(original_source, new, filename) if output: output.write(diff) output.flush() else: return diff elif options.in_place: fp = open_with_encoding(filename, encoding=encoding, mode='w') fp.write(fixed_source) fp.close() else: if output: output.write(fixed_source) output.flush() else: return fixed_source def global_fixes(): """Yield multiple (code, function) tuples.""" for function in list(globals().values()): if inspect.isfunction(function): arguments = _get_parameters(function) if arguments[:1] != ['source']: continue code = extract_code_from_function(function) if code: yield (code, function) def _get_parameters(function): # pylint: disable=deprecated-method if sys.version_info >= (3, 3): # We need to match "getargspec()", which includes "self" as the first # value for methods. # https://bugs.python.org/issue17481#msg209469 if inspect.ismethod(function): function = function.__func__ return list(inspect.signature(function).parameters) else: return inspect.getargspec(function)[0] def apply_global_fixes(source, options, where='global', filename=''): """Run global fixes on source code. These are fixes that only need be done once (unlike those in FixPEP8, which are dependent on pycodestyle). """ if any(code_match(code, select=options.select, ignore=options.ignore) for code in ['E101', 'E111']): source = reindent(source, indent_size=options.indent_size) for (code, function) in global_fixes(): if code_match(code, select=options.select, ignore=options.ignore): if options.verbose: print('---> Applying {0} fix for {1}'.format(where, code.upper()), file=sys.stderr) source = function(source, aggressive=options.aggressive) source = fix_2to3(source, aggressive=options.aggressive, select=options.select, ignore=options.ignore, filename=filename) return source def extract_code_from_function(function): """Return code handled by function.""" if not function.__name__.startswith('fix_'): return None code = re.sub('^fix_', '', function.__name__) if not code: return None try: int(code[1:]) except ValueError: return None return code def _get_package_version(): packages = ["pycodestyle: {0}".format(pycodestyle.__version__)] return ", ".join(packages) def create_parser(): """Return command-line parser.""" # Do import locally to be friendly to those who use autopep8 as a library # and are supporting Python 2.6. import argparse parser = argparse.ArgumentParser(description=docstring_summary(__doc__), prog='autopep8') parser.add_argument('--version', action='version', version='%(prog)s {0} ({1})'.format( __version__, _get_package_version())) parser.add_argument('-v', '--verbose', action='count', default=0, help='print verbose messages; ' 'multiple -v result in more verbose messages') parser.add_argument('-d', '--diff', action='store_true', help='print the diff for the fixed source') parser.add_argument('-i', '--in-place', action='store_true', help='make changes to files in place') parser.add_argument('--global-config', metavar='filename', default=DEFAULT_CONFIG, help='path to a global pep8 config file; if this file ' 'does not exist then this is ignored ' '(default: {0})'.format(DEFAULT_CONFIG)) parser.add_argument('--ignore-local-config', action='store_true', help="don't look for and apply local config files; " 'if not passed, defaults are updated with any ' "config files in the project's root directory") parser.add_argument('-r', '--recursive', action='store_true', help='run recursively over directories; ' 'must be used with --in-place or --diff') parser.add_argument('-j', '--jobs', type=int, metavar='n', default=1, help='number of parallel jobs; ' 'match CPU count if value is less than 1') parser.add_argument('-p', '--pep8-passes', metavar='n', default=-1, type=int, help='maximum number of additional pep8 passes ' '(default: infinite)') parser.add_argument('-a', '--aggressive', action='count', default=0, help='enable non-whitespace changes; ' 'multiple -a result in more aggressive changes') parser.add_argument('--experimental', action='store_true', help='enable experimental fixes') parser.add_argument('--exclude', metavar='globs', help='exclude file/directory names that match these ' 'comma-separated globs') parser.add_argument('--list-fixes', action='store_true', help='list codes for fixes; ' 'used by --ignore and --select') parser.add_argument('--ignore', metavar='errors', default='', help='do not fix these errors/warnings ' '(default: {0})'.format(DEFAULT_IGNORE)) parser.add_argument('--select', metavar='errors', default='', help='fix only these errors/warnings (e.g. E4,W)') parser.add_argument('--max-line-length', metavar='n', default=79, type=int, help='set maximum allowed line length ' '(default: %(default)s)') parser.add_argument('--line-range', '--range', metavar='line', default=None, type=int, nargs=2, help='only fix errors found within this inclusive ' 'range of line numbers (e.g. 1 99); ' 'line numbers are indexed at 1') parser.add_argument('--indent-size', default=DEFAULT_INDENT_SIZE, type=int, help=argparse.SUPPRESS) parser.add_argument('files', nargs='*', help="files to format or '-' for standard in") return parser def parse_args(arguments, apply_config=False): """Parse command-line options.""" parser = create_parser() args = parser.parse_args(arguments) if not args.files and not args.list_fixes: parser.error('incorrect number of arguments') args.files = [decode_filename(name) for name in args.files] if apply_config: parser = read_config(args, parser) args = parser.parse_args(arguments) args.files = [decode_filename(name) for name in args.files] if '-' in args.files: if len(args.files) > 1: parser.error('cannot mix stdin and regular files') if args.diff: parser.error('--diff cannot be used with standard input') if args.in_place: parser.error('--in-place cannot be used with standard input') if args.recursive: parser.error('--recursive cannot be used with standard input') if len(args.files) > 1 and not (args.in_place or args.diff): parser.error('autopep8 only takes one filename as argument ' 'unless the "--in-place" or "--diff" args are ' 'used') if args.recursive and not (args.in_place or args.diff): parser.error('--recursive must be used with --in-place or --diff') if args.in_place and args.diff: parser.error('--in-place and --diff are mutually exclusive') if args.max_line_length <= 0: parser.error('--max-line-length must be greater than 0') if args.select: args.select = _split_comma_separated(args.select) if args.ignore: args.ignore = _split_comma_separated(args.ignore) elif not args.select: if args.aggressive: # Enable everything by default if aggressive. args.select = set(['E', 'W']) else: args.ignore = _split_comma_separated(DEFAULT_IGNORE) if args.exclude: args.exclude = _split_comma_separated(args.exclude) else: args.exclude = set([]) if args.jobs < 1: # Do not import multiprocessing globally in case it is not supported # on the platform. import multiprocessing args.jobs = multiprocessing.cpu_count() if args.jobs > 1 and not args.in_place: parser.error('parallel jobs requires --in-place') if args.line_range: if args.line_range[0] <= 0: parser.error('--range must be positive numbers') if args.line_range[0] > args.line_range[1]: parser.error('First value of --range should be less than or equal ' 'to the second') return args def read_config(args, parser): """Read both user configuration and local configuration.""" try: from configparser import ConfigParser as SafeConfigParser from configparser import Error except ImportError: from ConfigParser import SafeConfigParser from ConfigParser import Error config = SafeConfigParser() try: config.read(args.global_config) if not args.ignore_local_config: parent = tail = args.files and os.path.abspath( os.path.commonprefix(args.files)) while tail: if config.read([os.path.join(parent, fn) for fn in PROJECT_CONFIG]): break (parent, tail) = os.path.split(parent) defaults = dict() option_list = dict([(o.dest, o.type or type(o.default)) for o in parser._actions]) for section in ['pep8', 'pycodestyle']: if not config.has_section(section): continue for (k, _) in config.items(section): norm_opt = k.lstrip('-').replace('-', '_') opt_type = option_list[norm_opt] if opt_type is int: value = config.getint(section, k) elif opt_type is bool: value = config.getboolean(section, k) else: value = config.get(section, k) defaults[norm_opt] = value parser.set_defaults(**defaults) except Error: # Ignore for now. pass return parser def _split_comma_separated(string): """Return a set of strings.""" return set(text.strip() for text in string.split(',') if text.strip()) def decode_filename(filename): """Return Unicode filename.""" if isinstance(filename, unicode): return filename return filename.decode(sys.getfilesystemencoding()) def supported_fixes(): """Yield pep8 error codes that autopep8 fixes. Each item we yield is a tuple of the code followed by its description. """ yield ('E101', docstring_summary(reindent.__doc__)) instance = FixPEP8(filename=None, options=None, contents='') for attribute in dir(instance): code = re.match('fix_([ew][0-9][0-9][0-9])', attribute) if code: yield ( code.group(1).upper(), re.sub(r'\s+', ' ', docstring_summary(getattr(instance, attribute).__doc__)) ) for (code, function) in sorted(global_fixes()): yield (code.upper() + (4 - len(code)) * ' ', re.sub(r'\s+', ' ', docstring_summary(function.__doc__))) for code in sorted(CODE_TO_2TO3): yield (code.upper() + (4 - len(code)) * ' ', re.sub(r'\s+', ' ', docstring_summary(fix_2to3.__doc__))) def docstring_summary(docstring): """Return summary of docstring.""" return docstring.split('\n')[0] if docstring else '' def line_shortening_rank(candidate, indent_word, max_line_length, experimental=False): """Return rank of candidate. This is for sorting candidates. """ if not candidate.strip(): return 0 rank = 0 lines = candidate.rstrip().split('\n') offset = 0 if ( not lines[0].lstrip().startswith('#') and lines[0].rstrip()[-1] not in '([{' ): for (opening, closing) in ('()', '[]', '{}'): # Don't penalize empty containers that aren't split up. Things like # this "foo(\n )" aren't particularly good. opening_loc = lines[0].find(opening) closing_loc = lines[0].find(closing) if opening_loc >= 0: if closing_loc < 0 or closing_loc != opening_loc + 1: offset = max(offset, 1 + opening_loc) current_longest = max(offset + len(x.strip()) for x in lines) rank += 4 * max(0, current_longest - max_line_length) rank += len(lines) # Too much variation in line length is ugly. rank += 2 * standard_deviation(len(line) for line in lines) bad_staring_symbol = { '(': ')', '[': ']', '{': '}'}.get(lines[0][-1]) if len(lines) > 1: if ( bad_staring_symbol and lines[1].lstrip().startswith(bad_staring_symbol) ): rank += 20 for lineno, current_line in enumerate(lines): current_line = current_line.strip() if current_line.startswith('#'): continue for bad_start in ['.', '%', '+', '-', '/']: if current_line.startswith(bad_start): rank += 100 # Do not tolerate operators on their own line. if current_line == bad_start: rank += 1000 if ( current_line.endswith(('.', '%', '+', '-', '/')) and "': " in current_line ): rank += 1000 if current_line.endswith(('(', '[', '{', '.')): # Avoid lonely opening. They result in longer lines. if len(current_line) <= len(indent_word): rank += 100 # Avoid the ugliness of ", (\n". if ( current_line.endswith('(') and current_line[:-1].rstrip().endswith(',') ): rank += 100 # Avoid the ugliness of "something[\n" and something[index][\n. if ( current_line.endswith('[') and len(current_line) > 1 and (current_line[-2].isalnum() or current_line[-2] in ']') ): rank += 300 # Also avoid the ugliness of "foo.\nbar" if current_line.endswith('.'): rank += 100 if has_arithmetic_operator(current_line): rank += 100 # Avoid breaking at unary operators. if re.match(r'.*[(\[{]\s*[\-\+~]$', current_line.rstrip('\\ ')): rank += 1000 if re.match(r'.*lambda\s*\*$', current_line.rstrip('\\ ')): rank += 1000 if current_line.endswith(('%', '(', '[', '{')): rank -= 20 # Try to break list comprehensions at the "for". if current_line.startswith('for '): rank -= 50 if current_line.endswith('\\'): # If a line ends in \-newline, it may be part of a # multiline string. In that case, we would like to know # how long that line is without the \-newline. If it's # longer than the maximum, or has comments, then we assume # that the \-newline is an okay candidate and only # penalize it a bit. total_len = len(current_line) lineno += 1 while lineno < len(lines): total_len += len(lines[lineno]) if lines[lineno].lstrip().startswith('#'): total_len = max_line_length break if not lines[lineno].endswith('\\'): break lineno += 1 if total_len < max_line_length: rank += 10 else: rank += 100 if experimental else 1 # Prefer breaking at commas rather than colon. if ',' in current_line and current_line.endswith(':'): rank += 10 # Avoid splitting dictionaries between key and value. if current_line.endswith(':'): rank += 100 rank += 10 * count_unbalanced_brackets(current_line) return max(0, rank) def standard_deviation(numbers): """Return standard devation.""" numbers = list(numbers) if not numbers: return 0 mean = sum(numbers) / len(numbers) return (sum((n - mean) ** 2 for n in numbers) / len(numbers)) ** .5 def has_arithmetic_operator(line): """Return True if line contains any arithmetic operators.""" for operator in pycodestyle.ARITHMETIC_OP: if operator in line: return True return False def count_unbalanced_brackets(line): """Return number of unmatched open/close brackets.""" count = 0 for opening, closing in ['()', '[]', '{}']: count += abs(line.count(opening) - line.count(closing)) return count def split_at_offsets(line, offsets): """Split line at offsets. Return list of strings. """ result = [] previous_offset = 0 current_offset = 0 for current_offset in sorted(offsets): if current_offset < len(line) and previous_offset != current_offset: result.append(line[previous_offset:current_offset].strip()) previous_offset = current_offset result.append(line[current_offset:]) return result class LineEndingWrapper(object): r"""Replace line endings to work with sys.stdout. It seems that sys.stdout expects only '\n' as the line ending, no matter the platform. Otherwise, we get repeated line endings. """ def __init__(self, output): self.__output = output def write(self, s): self.__output.write(s.replace('\r\n', '\n').replace('\r', '\n')) def flush(self): self.__output.flush() def match_file(filename, exclude): """Return True if file is okay for modifying/recursing.""" base_name = os.path.basename(filename) if base_name.startswith('.'): return False for pattern in exclude: if fnmatch.fnmatch(base_name, pattern): return False if fnmatch.fnmatch(filename, pattern): return False if not os.path.isdir(filename) and not is_python_file(filename): return False return True def find_files(filenames, recursive, exclude): """Yield filenames.""" while filenames: name = filenames.pop(0) if recursive and os.path.isdir(name): for root, directories, children in os.walk(name): filenames += [os.path.join(root, f) for f in children if match_file(os.path.join(root, f), exclude)] directories[:] = [d for d in directories if match_file(os.path.join(root, d), exclude)] else: yield name def _fix_file(parameters): """Helper function for optionally running fix_file() in parallel.""" if parameters[1].verbose: print('[file:{0}]'.format(parameters[0]), file=sys.stderr) try: fix_file(*parameters) except IOError as error: print(unicode(error), file=sys.stderr) def fix_multiple_files(filenames, options, output=None): """Fix list of files. Optionally fix files recursively. """ filenames = find_files(filenames, options.recursive, options.exclude) if options.jobs > 1: import multiprocessing pool = multiprocessing.Pool(options.jobs) pool.map(_fix_file, [(name, options) for name in filenames]) else: for name in filenames: _fix_file((name, options, output)) def is_python_file(filename): """Return True if filename is Python file.""" if filename.endswith('.py'): return True try: with open_with_encoding( filename, limit_byte_check=MAX_PYTHON_FILE_DETECTION_BYTES) as f: text = f.read(MAX_PYTHON_FILE_DETECTION_BYTES) if not text: return False first_line = text.splitlines()[0] except (IOError, IndexError): return False if not PYTHON_SHEBANG_REGEX.match(first_line): return False return True def is_probably_part_of_multiline(line): """Return True if line is likely part of a multiline string. When multiline strings are involved, pep8 reports the error as being at the start of the multiline string, which doesn't work for us. """ return ( '"""' in line or "'''" in line or line.rstrip().endswith('\\') ) def wrap_output(output, encoding): """Return output with specified encoding.""" return codecs.getwriter(encoding)(output.buffer if hasattr(output, 'buffer') else output) def get_encoding(): """Return preferred encoding.""" return locale.getpreferredencoding() or sys.getdefaultencoding() def main(argv=None, apply_config=True): """Command-line entry.""" if argv is None: argv = sys.argv try: # Exit on broken pipe. signal.signal(signal.SIGPIPE, signal.SIG_DFL) except AttributeError: # pragma: no cover # SIGPIPE is not available on Windows. pass try: args = parse_args(argv[1:], apply_config=apply_config) if args.list_fixes: for code, description in sorted(supported_fixes()): print('{code} - {description}'.format( code=code, description=description)) return 0 if args.files == ['-']: assert not args.in_place encoding = sys.stdin.encoding or get_encoding() # LineEndingWrapper is unnecessary here due to the symmetry between # standard in and standard out. wrap_output(sys.stdout, encoding=encoding).write( fix_code(sys.stdin.read(), args, encoding=encoding)) else: if args.in_place or args.diff: args.files = list(set(args.files)) else: assert len(args.files) == 1 assert not args.recursive fix_multiple_files(args.files, args, sys.stdout) except KeyboardInterrupt: return 1 # pragma: no cover class CachedTokenizer(object): """A one-element cache around tokenize.generate_tokens(). Original code written by Ned Batchelder, in coverage.py. """ def __init__(self): self.last_text = None self.last_tokens = None def generate_tokens(self, text): """A stand-in for tokenize.generate_tokens().""" if text != self.last_text: string_io = io.StringIO(text) self.last_tokens = list( tokenize.generate_tokens(string_io.readline) ) self.last_text = text return self.last_tokens _cached_tokenizer = CachedTokenizer() generate_tokens = _cached_tokenizer.generate_tokens if __name__ == '__main__': sys.exit(main())
130,342
Python
.py
3,083
30.650016
79
0.552821
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,216
btm_utils.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/btm_utils.py
"Utility functions used by the btm_matcher module" from . import pytree from .pgen2 import grammar, token from .pygram import pattern_symbols, python_symbols syms = pattern_symbols pysyms = python_symbols tokens = grammar.opmap token_labels = token TYPE_ANY = -1 TYPE_ALTERNATIVES = -2 TYPE_GROUP = -3 class MinNode(object): """This class serves as an intermediate representation of the pattern tree during the conversion to sets of leaf-to-root subpatterns""" def __init__(self, type=None, name=None): self.type = type self.name = name self.children = [] self.leaf = False self.parent = None self.alternatives = [] self.group = [] def __repr__(self): return str(self.type) + ' ' + str(self.name) def leaf_to_root(self): """Internal method. Returns a characteristic path of the pattern tree. This method must be run for all leaves until the linear subpatterns are merged into a single""" node = self subp = [] while node: if node.type == TYPE_ALTERNATIVES: node.alternatives.append(subp) if len(node.alternatives) == len(node.children): #last alternative subp = [tuple(node.alternatives)] node.alternatives = [] node = node.parent continue else: node = node.parent subp = None break if node.type == TYPE_GROUP: node.group.append(subp) #probably should check the number of leaves if len(node.group) == len(node.children): subp = get_characteristic_subpattern(node.group) node.group = [] node = node.parent continue else: node = node.parent subp = None break if node.type == token_labels.NAME and node.name: #in case of type=name, use the name instead subp.append(node.name) else: subp.append(node.type) node = node.parent return subp def get_linear_subpattern(self): """Drives the leaf_to_root method. The reason that leaf_to_root must be run multiple times is because we need to reject 'group' matches; for example the alternative form (a | b c) creates a group [b c] that needs to be matched. Since matching multiple linear patterns overcomes the automaton's capabilities, leaf_to_root merges each group into a single choice based on 'characteristic'ity, i.e. (a|b c) -> (a|b) if b more characteristic than c Returns: The most 'characteristic'(as defined by get_characteristic_subpattern) path for the compiled pattern tree. """ for l in self.leaves(): subp = l.leaf_to_root() if subp: return subp def leaves(self): "Generator that returns the leaves of the tree" for child in self.children: for x in child.leaves(): yield x if not self.children: yield self def reduce_tree(node, parent=None): """ Internal function. Reduces a compiled pattern tree to an intermediate representation suitable for feeding the automaton. This also trims off any optional pattern elements(like [a], a*). """ new_node = None #switch on the node type if node.type == syms.Matcher: #skip node = node.children[0] if node.type == syms.Alternatives : #2 cases if len(node.children) <= 2: #just a single 'Alternative', skip this node new_node = reduce_tree(node.children[0], parent) else: #real alternatives new_node = MinNode(type=TYPE_ALTERNATIVES) #skip odd children('|' tokens) for child in node.children: if node.children.index(child)%2: continue reduced = reduce_tree(child, new_node) if reduced is not None: new_node.children.append(reduced) elif node.type == syms.Alternative: if len(node.children) > 1: new_node = MinNode(type=TYPE_GROUP) for child in node.children: reduced = reduce_tree(child, new_node) if reduced: new_node.children.append(reduced) if not new_node.children: # delete the group if all of the children were reduced to None new_node = None else: new_node = reduce_tree(node.children[0], parent) elif node.type == syms.Unit: if (isinstance(node.children[0], pytree.Leaf) and node.children[0].value == '('): #skip parentheses return reduce_tree(node.children[1], parent) if ((isinstance(node.children[0], pytree.Leaf) and node.children[0].value == '[') or (len(node.children)>1 and hasattr(node.children[1], "value") and node.children[1].value == '[')): #skip whole unit if its optional return None leaf = True details_node = None alternatives_node = None has_repeater = False repeater_node = None has_variable_name = False for child in node.children: if child.type == syms.Details: leaf = False details_node = child elif child.type == syms.Repeater: has_repeater = True repeater_node = child elif child.type == syms.Alternatives: alternatives_node = child if hasattr(child, 'value') and child.value == '=': # variable name has_variable_name = True #skip variable name if has_variable_name: #skip variable name, '=' name_leaf = node.children[2] if hasattr(name_leaf, 'value') and name_leaf.value == '(': # skip parenthesis name_leaf = node.children[3] else: name_leaf = node.children[0] #set node type if name_leaf.type == token_labels.NAME: #(python) non-name or wildcard if name_leaf.value == 'any': new_node = MinNode(type=TYPE_ANY) else: if hasattr(token_labels, name_leaf.value): new_node = MinNode(type=getattr(token_labels, name_leaf.value)) else: new_node = MinNode(type=getattr(pysyms, name_leaf.value)) elif name_leaf.type == token_labels.STRING: #(python) name or character; remove the apostrophes from #the string value name = name_leaf.value.strip("'") if name in tokens: new_node = MinNode(type=tokens[name]) else: new_node = MinNode(type=token_labels.NAME, name=name) elif name_leaf.type == syms.Alternatives: new_node = reduce_tree(alternatives_node, parent) #handle repeaters if has_repeater: if repeater_node.children[0].value == '*': #reduce to None new_node = None elif repeater_node.children[0].value == '+': #reduce to a single occurence i.e. do nothing pass else: #TODO: handle {min, max} repeaters raise NotImplementedError pass #add children if details_node and new_node is not None: for child in details_node.children[1:-1]: #skip '<', '>' markers reduced = reduce_tree(child, new_node) if reduced is not None: new_node.children.append(reduced) if new_node: new_node.parent = parent return new_node def get_characteristic_subpattern(subpatterns): """Picks the most characteristic from a list of linear patterns Current order used is: names > common_names > common_chars """ if not isinstance(subpatterns, list): return subpatterns if len(subpatterns)==1: return subpatterns[0] # first pick out the ones containing variable names subpatterns_with_names = [] subpatterns_with_common_names = [] common_names = ['in', 'for', 'if' , 'not', 'None'] subpatterns_with_common_chars = [] common_chars = "[]().,:" for subpattern in subpatterns: if any(rec_test(subpattern, lambda x: type(x) is str)): if any(rec_test(subpattern, lambda x: isinstance(x, str) and x in common_chars)): subpatterns_with_common_chars.append(subpattern) elif any(rec_test(subpattern, lambda x: isinstance(x, str) and x in common_names)): subpatterns_with_common_names.append(subpattern) else: subpatterns_with_names.append(subpattern) if subpatterns_with_names: subpatterns = subpatterns_with_names elif subpatterns_with_common_names: subpatterns = subpatterns_with_common_names elif subpatterns_with_common_chars: subpatterns = subpatterns_with_common_chars # of the remaining subpatterns pick out the longest one return max(subpatterns, key=len) def rec_test(sequence, test_func): """Tests test_func on all items of sequence and items of included sub-iterables""" for x in sequence: if isinstance(x, (list, tuple)): for y in rec_test(x, test_func): yield y else: yield test_func(x)
10,011
Python
.py
249
28.62249
83
0.567229
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,217
fixer_base.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixer_base.py
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Base class for fixers (optional, but recommended).""" # Python imports import logging import itertools # Local imports from .patcomp import PatternCompiler from . import pygram from .fixer_util import does_tree_import class BaseFix(object): """Optional base class for fixers. The subclass name must be FixFooBar where FooBar is the result of removing underscores and capitalizing the words of the fix name. For example, the class name for a fixer named 'has_key' should be FixHasKey. """ PATTERN = None # Most subclasses should override with a string literal pattern = None # Compiled pattern, set by compile_pattern() pattern_tree = None # Tree representation of the pattern options = None # Options object passed to initializer filename = None # The filename (set by set_filename) logger = None # A logger (set by set_filename) numbers = itertools.count(1) # For new_name() used_names = set() # A set of all used NAMEs order = "post" # Does the fixer prefer pre- or post-order traversal explicit = False # Is this ignored by refactor.py -f all? run_order = 5 # Fixers will be sorted by run order before execution # Lower numbers will be run first. _accept_type = None # [Advanced and not public] This tells RefactoringTool # which node type to accept when there's not a pattern. keep_line_order = False # For the bottom matcher: match with the # original line order BM_compatible = False # Compatibility with the bottom matching # module; every fixer should set this # manually # Shortcut for access to Python grammar symbols syms = pygram.python_symbols def __init__(self, options, log): """Initializer. Subclass may override. Args: options: an dict containing the options passed to RefactoringTool that could be used to customize the fixer through the command line. log: a list to append warnings and other messages to. """ self.options = options self.log = log self.compile_pattern() def compile_pattern(self): """Compiles self.PATTERN into self.pattern. Subclass may override if it doesn't want to use self.{pattern,PATTERN} in .match(). """ if self.PATTERN is not None: PC = PatternCompiler() self.pattern, self.pattern_tree = PC.compile_pattern(self.PATTERN, with_tree=True) def set_filename(self, filename): """Set the filename, and a logger derived from it. The main refactoring tool should call this. """ self.filename = filename self.logger = logging.getLogger(filename) def match(self, node): """Returns match for a given parse tree node. Should return a true or false object (not necessarily a bool). It may return a non-empty dict of matching sub-nodes as returned by a matching pattern. Subclass may override. """ results = {"node": node} return self.pattern.match(node, results) and results def transform(self, node, results): """Returns the transformation for a given parse tree node. Args: node: the root of the parse tree that matched the fixer. results: a dict mapping symbolic names to part of the match. Returns: None, or a node that is a modified copy of the argument node. The node argument may also be modified in-place to effect the same change. Subclass *must* override. """ raise NotImplementedError() def new_name(self, template="xxx_todo_changeme"): """Return a string suitable for use as an identifier The new name is guaranteed not to conflict with other identifiers. """ name = template while name in self.used_names: name = template + str(next(self.numbers)) self.used_names.add(name) return name def log_message(self, message): if self.first_log: self.first_log = False self.log.append("### In file %s ###" % self.filename) self.log.append(message) def cannot_convert(self, node, reason=None): """Warn the user that a given chunk of code is not valid Python 3, but that it cannot be converted automatically. First argument is the top-level node for the code in question. Optional second argument is why it can't be converted. """ lineno = node.get_lineno() for_output = node.clone() for_output.prefix = "" msg = "Line %d: could not convert: %s" self.log_message(msg % (lineno, for_output)) if reason: self.log_message(reason) def warning(self, node, reason): """Used for warning the user about possible uncertainty in the translation. First argument is the top-level node for the code in question. Optional second argument is why it can't be converted. """ lineno = node.get_lineno() self.log_message("Line %d: %s" % (lineno, reason)) def start_tree(self, tree, filename): """Some fixers need to maintain tree-wide state. This method is called once, at the start of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from. """ self.used_names = tree.used_names self.set_filename(filename) self.numbers = itertools.count(1) self.first_log = True def finish_tree(self, tree, filename): """Some fixers need to maintain tree-wide state. This method is called once, at the conclusion of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from. """ pass class ConditionalFix(BaseFix): """ Base class for fixers which not execute if an import is found. """ # This is the name of the import which, if found, will cause the test to be skipped skip_on = None def start_tree(self, *args): super(ConditionalFix, self).start_tree(*args) self._should_skip = None def should_skip(self, node): if self._should_skip is not None: return self._should_skip pkg = self.skip_on.split(".") name = pkg[-1] pkg = ".".join(pkg[:-1]) self._should_skip = does_tree_import(pkg, name, node) return self._should_skip
6,839
Python
.py
151
36.417219
87
0.638647
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,218
btm_matcher.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/btm_matcher.py
"""A bottom-up tree matching algorithm implementation meant to speed up 2to3's matching process. After the tree patterns are reduced to their rarest linear path, a linear Aho-Corasick automaton is created. The linear automaton traverses the linear paths from the leaves to the root of the AST and returns a set of nodes for further matching. This reduces significantly the number of candidate nodes.""" __author__ = "George Boutsioukis <gboutsioukis@gmail.com>" import logging import itertools from collections import defaultdict from . import pytree from .btm_utils import reduce_tree class BMNode(object): """Class for a node of the Aho-Corasick automaton used in matching""" count = itertools.count() def __init__(self): self.transition_table = {} self.fixers = [] self.id = next(BMNode.count) self.content = '' class BottomMatcher(object): """The main matcher class. After instantiating the patterns should be added using the add_fixer method""" def __init__(self): self.match = set() self.root = BMNode() self.nodes = [self.root] self.fixers = [] self.logger = logging.getLogger("RefactoringTool") def add_fixer(self, fixer): """Reduces a fixer's pattern tree to a linear path and adds it to the matcher(a common Aho-Corasick automaton). The fixer is appended on the matching states and called when they are reached""" self.fixers.append(fixer) tree = reduce_tree(fixer.pattern_tree) linear = tree.get_linear_subpattern() match_nodes = self.add(linear, start=self.root) for match_node in match_nodes: match_node.fixers.append(fixer) def add(self, pattern, start): "Recursively adds a linear pattern to the AC automaton" #print("adding pattern", pattern, "to", start) if not pattern: #print("empty pattern") return [start] if isinstance(pattern[0], tuple): #alternatives #print("alternatives") match_nodes = [] for alternative in pattern[0]: #add all alternatives, and add the rest of the pattern #to each end node end_nodes = self.add(alternative, start=start) for end in end_nodes: match_nodes.extend(self.add(pattern[1:], end)) return match_nodes else: #single token #not last if pattern[0] not in start.transition_table: #transition did not exist, create new next_node = BMNode() start.transition_table[pattern[0]] = next_node else: #transition exists already, follow next_node = start.transition_table[pattern[0]] if pattern[1:]: end_nodes = self.add(pattern[1:], start=next_node) else: end_nodes = [next_node] return end_nodes def run(self, leaves): """The main interface with the bottom matcher. The tree is traversed from the bottom using the constructed automaton. Nodes are only checked once as the tree is retraversed. When the automaton fails, we give it one more shot(in case the above tree matches as a whole with the rejected leaf), then we break for the next leaf. There is the special case of multiple arguments(see code comments) where we recheck the nodes Args: The leaves of the AST tree to be matched Returns: A dictionary of node matches with fixers as the keys """ current_ac_node = self.root results = defaultdict(list) for leaf in leaves: current_ast_node = leaf while current_ast_node: current_ast_node.was_checked = True for child in current_ast_node.children: # multiple statements, recheck if isinstance(child, pytree.Leaf) and child.value == ";": current_ast_node.was_checked = False break if current_ast_node.type == 1: #name node_token = current_ast_node.value else: node_token = current_ast_node.type if node_token in current_ac_node.transition_table: #token matches current_ac_node = current_ac_node.transition_table[node_token] for fixer in current_ac_node.fixers: if not fixer in results: results[fixer] = [] results[fixer].append(current_ast_node) else: #matching failed, reset automaton current_ac_node = self.root if (current_ast_node.parent is not None and current_ast_node.parent.was_checked): #the rest of the tree upwards has been checked, next leaf break #recheck the rejected node once from the root if node_token in current_ac_node.transition_table: #token matches current_ac_node = current_ac_node.transition_table[node_token] for fixer in current_ac_node.fixers: if not fixer in results.keys(): results[fixer] = [] results[fixer].append(current_ast_node) current_ast_node = current_ast_node.parent return results def print_ac(self): "Prints a graphviz diagram of the BM automaton(for debugging)" print("digraph g{") def print_node(node): for subnode_key in node.transition_table.keys(): subnode = node.transition_table[subnode_key] print("%d -> %d [label=%s] //%s" % (node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers))) if subnode_key == 1: print(subnode.content) print_node(subnode) print_node(self.root) print("}") # taken from pytree.py for debugging; only used by print_ac _type_reprs = {} def type_repr(type_num): global _type_reprs if not _type_reprs: from .pygram import python_symbols # printing tokens is possible but not as useful # from .pgen2 import token // token.__dict__.items(): for name, val in python_symbols.__dict__.items(): if type(val) == int: _type_reprs[val] = name return _type_reprs.setdefault(type_num, type_num)
6,833
Python
.py
150
33.046667
89
0.581395
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,219
fixer_util.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixer_util.py
"""Utility functions, node construction macros, etc.""" # Author: Collin Winter from itertools import islice # Local imports from .pgen2 import token from .pytree import Leaf, Node from .pygram import python_symbols as syms from . import patcomp ########################################################### ### Common node-construction "macros" ########################################################### def KeywordArg(keyword, value): return Node(syms.argument, [keyword, Leaf(token.EQUAL, "="), value]) def LParen(): return Leaf(token.LPAR, "(") def RParen(): return Leaf(token.RPAR, ")") def Assign(target, source): """Build an assignment statement""" if not isinstance(target, list): target = [target] if not isinstance(source, list): source.prefix = " " source = [source] return Node(syms.atom, target + [Leaf(token.EQUAL, "=", prefix=" ")] + source) def Name(name, prefix=None): """Return a NAME leaf""" return Leaf(token.NAME, name, prefix=prefix) def Attr(obj, attr): """A node tuple for obj.attr""" return [obj, Node(syms.trailer, [Dot(), attr])] def Comma(): """A comma leaf""" return Leaf(token.COMMA, ",") def Dot(): """A period (.) leaf""" return Leaf(token.DOT, ".") def ArgList(args, lparen=LParen(), rparen=RParen()): """A parenthesised argument list, used by Call()""" node = Node(syms.trailer, [lparen.clone(), rparen.clone()]) if args: node.insert_child(1, Node(syms.arglist, args)) return node def Call(func_name, args=None, prefix=None): """A function call""" node = Node(syms.power, [func_name, ArgList(args)]) if prefix is not None: node.prefix = prefix return node def Newline(): """A newline literal""" return Leaf(token.NEWLINE, "\n") def BlankLine(): """A blank line""" return Leaf(token.NEWLINE, "") def Number(n, prefix=None): return Leaf(token.NUMBER, n, prefix=prefix) def Subscript(index_node): """A numeric or string subscript""" return Node(syms.trailer, [Leaf(token.LBRACE, "["), index_node, Leaf(token.RBRACE, "]")]) def String(string, prefix=None): """A string leaf""" return Leaf(token.STRING, string, prefix=prefix) def ListComp(xp, fp, it, test=None): """A list comprehension of the form [xp for fp in it if test]. If test is None, the "if test" part is omitted. """ xp.prefix = "" fp.prefix = " " it.prefix = " " for_leaf = Leaf(token.NAME, "for") for_leaf.prefix = " " in_leaf = Leaf(token.NAME, "in") in_leaf.prefix = " " inner_args = [for_leaf, fp, in_leaf, it] if test: test.prefix = " " if_leaf = Leaf(token.NAME, "if") if_leaf.prefix = " " inner_args.append(Node(syms.comp_if, [if_leaf, test])) inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)]) return Node(syms.atom, [Leaf(token.LBRACE, "["), inner, Leaf(token.RBRACE, "]")]) def FromImport(package_name, name_leafs): """ Return an import statement in the form: from package import name_leafs""" # XXX: May not handle dotted imports properly (eg, package_name='foo.bar') #assert package_name == '.' or '.' not in package_name, "FromImport has "\ # "not been tested with dotted package names -- use at your own "\ # "peril!" for leaf in name_leafs: # Pull the leaves out of their old tree leaf.remove() children = [Leaf(token.NAME, "from"), Leaf(token.NAME, package_name, prefix=" "), Leaf(token.NAME, "import", prefix=" "), Node(syms.import_as_names, name_leafs)] imp = Node(syms.import_from, children) return imp ########################################################### ### Determine whether a node represents a given literal ########################################################### def is_tuple(node): """Does the node represent a tuple literal?""" if isinstance(node, Node) and node.children == [LParen(), RParen()]: return True return (isinstance(node, Node) and len(node.children) == 3 and isinstance(node.children[0], Leaf) and isinstance(node.children[1], Node) and isinstance(node.children[2], Leaf) and node.children[0].value == "(" and node.children[2].value == ")") def is_list(node): """Does the node represent a list literal?""" return (isinstance(node, Node) and len(node.children) > 1 and isinstance(node.children[0], Leaf) and isinstance(node.children[-1], Leaf) and node.children[0].value == "[" and node.children[-1].value == "]") ########################################################### ### Misc ########################################################### def parenthesize(node): return Node(syms.atom, [LParen(), node, RParen()]) consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum", "min", "max", "enumerate"]) def attr_chain(obj, attr): """Follow an attribute chain. If you have a chain of objects where a.foo -> b, b.foo-> c, etc, use this to iterate over all objects in the chain. Iteration is terminated by getattr(x, attr) is None. Args: obj: the starting object attr: the name of the chaining attribute Yields: Each successive object in the chain. """ next = getattr(obj, attr) while next: yield next next = getattr(next, attr) p0 = """for_stmt< 'for' any 'in' node=any ':' any* > | comp_for< 'for' any 'in' node=any any* > """ p1 = """ power< ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' | 'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) ) trailer< '(' node=any ')' > any* > """ p2 = """ power< ( 'sorted' | 'enumerate' ) trailer< '(' arglist<node=any any*> ')' > any* > """ pats_built = False def in_special_context(node): """ Returns true if node is in an environment where all that is required of it is being iterable (ie, it doesn't matter if it returns a list or an iterator). See test_map_nochange in test_fixers.py for some examples and tests. """ global p0, p1, p2, pats_built if not pats_built: p0 = patcomp.compile_pattern(p0) p1 = patcomp.compile_pattern(p1) p2 = patcomp.compile_pattern(p2) pats_built = True patterns = [p0, p1, p2] for pattern, parent in zip(patterns, attr_chain(node, "parent")): results = {} if pattern.match(parent, results) and results["node"] is node: return True return False def is_probably_builtin(node): """ Check that something isn't an attribute or function name etc. """ prev = node.prev_sibling if prev is not None and prev.type == token.DOT: # Attribute lookup. return False parent = node.parent if parent.type in (syms.funcdef, syms.classdef): return False if parent.type == syms.expr_stmt and parent.children[0] is node: # Assignment. return False if parent.type == syms.parameters or \ (parent.type == syms.typedargslist and ( (prev is not None and prev.type == token.COMMA) or parent.children[0] is node )): # The name of an argument. return False return True def find_indentation(node): """Find the indentation of *node*.""" while node is not None: if node.type == syms.suite and len(node.children) > 2: indent = node.children[1] if indent.type == token.INDENT: return indent.value node = node.parent return "" ########################################################### ### The following functions are to find bindings in a suite ########################################################### def make_suite(node): if node.type == syms.suite: return node node = node.clone() parent, node.parent = node.parent, None suite = Node(syms.suite, [node]) suite.parent = parent return suite def find_root(node): """Find the top level namespace.""" # Scamper up to the top level namespace while node.type != syms.file_input: node = node.parent if not node: raise ValueError("root found before file_input node was found.") return node def does_tree_import(package, name, node): """ Returns true if name is imported from package at the top level of the tree which node belongs to. To cover the case of an import like 'import foo', use None for the package and 'foo' for the name. """ binding = find_binding(name, find_root(node), package) return bool(binding) def is_import(node): """Returns true if the node is an import statement.""" return node.type in (syms.import_name, syms.import_from) def touch_import(package, name, node): """ Works like `does_tree_import` but adds an import statement if it was not imported. """ def is_import_stmt(node): return (node.type == syms.simple_stmt and node.children and is_import(node.children[0])) root = find_root(node) if does_tree_import(package, name, root): return # figure out where to insert the new import. First try to find # the first import and then skip to the last one. insert_pos = offset = 0 for idx, node in enumerate(root.children): if not is_import_stmt(node): continue for offset, node2 in enumerate(root.children[idx:]): if not is_import_stmt(node2): break insert_pos = idx + offset break # if there are no imports where we can insert, find the docstring. # if that also fails, we stick to the beginning of the file if insert_pos == 0: for idx, node in enumerate(root.children): if (node.type == syms.simple_stmt and node.children and node.children[0].type == token.STRING): insert_pos = idx + 1 break if package is None: import_ = Node(syms.import_name, [ Leaf(token.NAME, "import"), Leaf(token.NAME, name, prefix=" ") ]) else: import_ = FromImport(package, [Leaf(token.NAME, name, prefix=" ")]) children = [import_, Newline()] root.insert_child(insert_pos, Node(syms.simple_stmt, children)) _def_syms = set([syms.classdef, syms.funcdef]) def find_binding(name, node, package=None): """ Returns the node which binds variable name, otherwise None. If optional argument package is supplied, only imports will be returned. See test cases for examples.""" for child in node.children: ret = None if child.type == syms.for_stmt: if _find(name, child.children[1]): return child n = find_binding(name, make_suite(child.children[-1]), package) if n: ret = n elif child.type in (syms.if_stmt, syms.while_stmt): n = find_binding(name, make_suite(child.children[-1]), package) if n: ret = n elif child.type == syms.try_stmt: n = find_binding(name, make_suite(child.children[2]), package) if n: ret = n else: for i, kid in enumerate(child.children[3:]): if kid.type == token.COLON and kid.value == ":": # i+3 is the colon, i+4 is the suite n = find_binding(name, make_suite(child.children[i+4]), package) if n: ret = n elif child.type in _def_syms and child.children[1].value == name: ret = child elif _is_import_binding(child, name, package): ret = child elif child.type == syms.simple_stmt: ret = find_binding(name, child, package) elif child.type == syms.expr_stmt: if _find(name, child.children[0]): ret = child if ret: if not package: return ret if is_import(ret): return ret return None _block_syms = set([syms.funcdef, syms.classdef, syms.trailer]) def _find(name, node): nodes = [node] while nodes: node = nodes.pop() if node.type > 256 and node.type not in _block_syms: nodes.extend(node.children) elif node.type == token.NAME and node.value == name: return node return None def _is_import_binding(node, name, package=None): """ Will reuturn node if node will import name, or node will import * from package. None is returned otherwise. See test cases for examples. """ if node.type == syms.import_name and not package: imp = node.children[1] if imp.type == syms.dotted_as_names: for child in imp.children: if child.type == syms.dotted_as_name: if child.children[2].value == name: return node elif child.type == token.NAME and child.value == name: return node elif imp.type == syms.dotted_as_name: last = imp.children[-1] if last.type == token.NAME and last.value == name: return node elif imp.type == token.NAME and imp.value == name: return node elif node.type == syms.import_from: # str(...) is used to make life easier here, because # from a.b import parses to ['import', ['a', '.', 'b'], ...] if package and str(node.children[1]).strip() != package: return None n = node.children[3] if package and _find("as", n): # See test_from_import_as for explanation return None elif n.type == syms.import_as_names and _find(name, n): return node elif n.type == syms.import_as_name: child = n.children[2] if child.type == token.NAME and child.value == name: return node elif n.type == token.NAME and n.value == name: return node elif package and n.type == token.STAR: return node return None
14,552
Python
.py
373
31.147453
88
0.574646
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,220
pytree.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/pytree.py
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """ Python parse tree definitions. This is a very concrete parse tree; we need to keep every token and even the comments and whitespace between tokens. There's also a pattern matching implementation here. """ __author__ = "Guido van Rossum <guido@python.org>" import sys import warnings from io import StringIO HUGE = 0x7FFFFFFF # maximum repeat count, default max _type_reprs = {} def type_repr(type_num): global _type_reprs if not _type_reprs: from .pygram import python_symbols # printing tokens is possible but not as useful # from .pgen2 import token // token.__dict__.items(): for name, val in python_symbols.__dict__.items(): if type(val) == int: _type_reprs[val] = name return _type_reprs.setdefault(type_num, type_num) class Base(object): """ Abstract base class for Node and Leaf. This provides some default functionality and boilerplate using the template pattern. A node may be a subnode of at most one parent. """ # Default values for instance variables type = None # int: token number (< 256) or symbol number (>= 256) parent = None # Parent node pointer, or None children = () # Tuple of subnodes was_changed = False was_checked = False def __new__(cls, *args, **kwds): """Constructor that prevents Base from being instantiated.""" assert cls is not Base, "Cannot instantiate Base" return object.__new__(cls) def __eq__(self, other): """ Compare two nodes for equality. This calls the method _eq(). """ if self.__class__ is not other.__class__: return NotImplemented return self._eq(other) __hash__ = None # For Py3 compatibility. def __ne__(self, other): """ Compare two nodes for inequality. This calls the method _eq(). """ if self.__class__ is not other.__class__: return NotImplemented return not self._eq(other) def _eq(self, other): """ Compare two nodes for equality. This is called by __eq__ and __ne__. It is only called if the two nodes have the same type. This must be implemented by the concrete subclass. Nodes should be considered equal if they have the same structure, ignoring the prefix string and other context information. """ raise NotImplementedError def clone(self): """ Return a cloned (deep) copy of self. This must be implemented by the concrete subclass. """ raise NotImplementedError def post_order(self): """ Return a post-order iterator for the tree. This must be implemented by the concrete subclass. """ raise NotImplementedError def pre_order(self): """ Return a pre-order iterator for the tree. This must be implemented by the concrete subclass. """ raise NotImplementedError def set_prefix(self, prefix): """ Set the prefix for the node (see Leaf class). DEPRECATED; use the prefix property directly. """ warnings.warn("set_prefix() is deprecated; use the prefix property", DeprecationWarning, stacklevel=2) self.prefix = prefix def get_prefix(self): """ Return the prefix for the node (see Leaf class). DEPRECATED; use the prefix property directly. """ warnings.warn("get_prefix() is deprecated; use the prefix property", DeprecationWarning, stacklevel=2) return self.prefix def replace(self, new): """Replace this node with a new one in the parent.""" assert self.parent is not None, str(self) assert new is not None if not isinstance(new, list): new = [new] l_children = [] found = False for ch in self.parent.children: if ch is self: assert not found, (self.parent.children, self, new) if new is not None: l_children.extend(new) found = True else: l_children.append(ch) assert found, (self.children, self, new) self.parent.changed() self.parent.children = l_children for x in new: x.parent = self.parent self.parent = None def get_lineno(self): """Return the line number which generated the invocant node.""" node = self while not isinstance(node, Leaf): if not node.children: return node = node.children[0] return node.lineno def changed(self): if self.parent: self.parent.changed() self.was_changed = True def remove(self): """ Remove the node from the tree. Returns the position of the node in its parent's children before it was removed. """ if self.parent: for i, node in enumerate(self.parent.children): if node is self: self.parent.changed() del self.parent.children[i] self.parent = None return i @property def next_sibling(self): """ The node immediately following the invocant in their parent's children list. If the invocant does not have a next sibling, it is None """ if self.parent is None: return None # Can't use index(); we need to test by identity for i, child in enumerate(self.parent.children): if child is self: try: return self.parent.children[i+1] except IndexError: return None @property def prev_sibling(self): """ The node immediately preceding the invocant in their parent's children list. If the invocant does not have a previous sibling, it is None. """ if self.parent is None: return None # Can't use index(); we need to test by identity for i, child in enumerate(self.parent.children): if child is self: if i == 0: return None return self.parent.children[i-1] def leaves(self): for child in self.children: for x in child.leaves(): yield x def depth(self): if self.parent is None: return 0 return 1 + self.parent.depth() def get_suffix(self): """ Return the string immediately following the invocant node. This is effectively equivalent to node.next_sibling.prefix """ next_sib = self.next_sibling if next_sib is None: return "" return next_sib.prefix if sys.version_info < (3, 0): def __str__(self): return str(self).encode("ascii") class Node(Base): """Concrete implementation for interior nodes.""" def __init__(self,type, children, context=None, prefix=None, fixers_applied=None): """ Initializer. Takes a type constant (a symbol number >= 256), a sequence of child nodes, and an optional context keyword argument. As a side effect, the parent pointers of the children are updated. """ assert type >= 256, type self.type = type self.children = list(children) for ch in self.children: assert ch.parent is None, repr(ch) ch.parent = self if prefix is not None: self.prefix = prefix if fixers_applied: self.fixers_applied = fixers_applied[:] else: self.fixers_applied = None def __repr__(self): """Return a canonical string representation.""" return "%s(%s, %r)" % (self.__class__.__name__, type_repr(self.type), self.children) def __unicode__(self): """ Return a pretty string representation. This reproduces the input source exactly. """ return "".join(map(str, self.children)) if sys.version_info > (3, 0): __str__ = __unicode__ def _eq(self, other): """Compare two nodes for equality.""" return (self.type, self.children) == (other.type, other.children) def clone(self): """Return a cloned (deep) copy of self.""" return Node(self.type, [ch.clone() for ch in self.children], fixers_applied=self.fixers_applied) def post_order(self): """Return a post-order iterator for the tree.""" for child in self.children: for node in child.post_order(): yield node yield self def pre_order(self): """Return a pre-order iterator for the tree.""" yield self for child in self.children: for node in child.pre_order(): yield node def _prefix_getter(self): """ The whitespace and comments preceding this node in the input. """ if not self.children: return "" return self.children[0].prefix def _prefix_setter(self, prefix): if self.children: self.children[0].prefix = prefix prefix = property(_prefix_getter, _prefix_setter) def set_child(self, i, child): """ Equivalent to 'node.children[i] = child'. This method also sets the child's parent attribute appropriately. """ child.parent = self self.children[i].parent = None self.children[i] = child self.changed() def insert_child(self, i, child): """ Equivalent to 'node.children.insert(i, child)'. This method also sets the child's parent attribute appropriately. """ child.parent = self self.children.insert(i, child) self.changed() def append_child(self, child): """ Equivalent to 'node.children.append(child)'. This method also sets the child's parent attribute appropriately. """ child.parent = self self.children.append(child) self.changed() class Leaf(Base): """Concrete implementation for leaf nodes.""" # Default values for instance variables _prefix = "" # Whitespace and comments preceding this token in the input lineno = 0 # Line where this token starts in the input column = 0 # Column where this token tarts in the input def __init__(self, type, value, context=None, prefix=None, fixers_applied=[]): """ Initializer. Takes a type constant (a token number < 256), a string value, and an optional context keyword argument. """ assert 0 <= type < 256, type if context is not None: self._prefix, (self.lineno, self.column) = context self.type = type self.value = value if prefix is not None: self._prefix = prefix self.fixers_applied = fixers_applied[:] def __repr__(self): """Return a canonical string representation.""" return "%s(%r, %r)" % (self.__class__.__name__, self.type, self.value) def __unicode__(self): """ Return a pretty string representation. This reproduces the input source exactly. """ return self.prefix + str(self.value) if sys.version_info > (3, 0): __str__ = __unicode__ def _eq(self, other): """Compare two nodes for equality.""" return (self.type, self.value) == (other.type, other.value) def clone(self): """Return a cloned (deep) copy of self.""" return Leaf(self.type, self.value, (self.prefix, (self.lineno, self.column)), fixers_applied=self.fixers_applied) def leaves(self): yield self def post_order(self): """Return a post-order iterator for the tree.""" yield self def pre_order(self): """Return a pre-order iterator for the tree.""" yield self def _prefix_getter(self): """ The whitespace and comments preceding this token in the input. """ return self._prefix def _prefix_setter(self, prefix): self.changed() self._prefix = prefix prefix = property(_prefix_getter, _prefix_setter) def convert(gr, raw_node): """ Convert raw node information to a Node or Leaf instance. This is passed to the parser driver which calls it whenever a reduction of a grammar rule produces a new complete node, so that the tree is build strictly bottom-up. """ type, value, context, children = raw_node if children or type in gr.number2symbol: # If there's exactly one child, return that child instead of # creating a new node. if len(children) == 1: return children[0] return Node(type, children, context=context) else: return Leaf(type, value, context=context) class BasePattern(object): """ A pattern is a tree matching pattern. It looks for a specific node type (token or symbol), and optionally for a specific content. This is an abstract base class. There are three concrete subclasses: - LeafPattern matches a single leaf node; - NodePattern matches a single node (usually non-leaf); - WildcardPattern matches a sequence of nodes of variable length. """ # Defaults for instance variables type = None # Node type (token if < 256, symbol if >= 256) content = None # Optional content matching pattern name = None # Optional name used to store match in results dict def __new__(cls, *args, **kwds): """Constructor that prevents BasePattern from being instantiated.""" assert cls is not BasePattern, "Cannot instantiate BasePattern" return object.__new__(cls) def __repr__(self): args = [type_repr(self.type), self.content, self.name] while args and args[-1] is None: del args[-1] return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args))) def optimize(self): """ A subclass can define this as a hook for optimizations. Returns either self or another node with the same effect. """ return self def match(self, node, results=None): """ Does this pattern exactly match a node? Returns True if it matches, False if not. If results is not None, it must be a dict which will be updated with the nodes matching named subpatterns. Default implementation for non-wildcard patterns. """ if self.type is not None and node.type != self.type: return False if self.content is not None: r = None if results is not None: r = {} if not self._submatch(node, r): return False if r: results.update(r) if results is not None and self.name: results[self.name] = node return True def match_seq(self, nodes, results=None): """ Does this pattern exactly match a sequence of nodes? Default implementation for non-wildcard patterns. """ if len(nodes) != 1: return False return self.match(nodes[0], results) def generate_matches(self, nodes): """ Generator yielding all matches for this pattern. Default implementation for non-wildcard patterns. """ r = {} if nodes and self.match(nodes[0], r): yield 1, r class LeafPattern(BasePattern): def __init__(self, type=None, content=None, name=None): """ Initializer. Takes optional type, content, and name. The type, if given must be a token type (< 256). If not given, this matches any *leaf* node; the content may still be required. The content, if given, must be a string. If a name is given, the matching node is stored in the results dict under that key. """ if type is not None: assert 0 <= type < 256, type if content is not None: assert isinstance(content, str), repr(content) self.type = type self.content = content self.name = name def match(self, node, results=None): """Override match() to insist on a leaf node.""" if not isinstance(node, Leaf): return False return BasePattern.match(self, node, results) def _submatch(self, node, results=None): """ Match the pattern's content to the node's children. This assumes the node type matches and self.content is not None. Returns True if it matches, False if not. If results is not None, it must be a dict which will be updated with the nodes matching named subpatterns. When returning False, the results dict may still be updated. """ return self.content == node.value class NodePattern(BasePattern): wildcards = False def __init__(self, type=None, content=None, name=None): """ Initializer. Takes optional type, content, and name. The type, if given, must be a symbol type (>= 256). If the type is None this matches *any* single node (leaf or not), except if content is not None, in which it only matches non-leaf nodes that also match the content pattern. The content, if not None, must be a sequence of Patterns that must match the node's children exactly. If the content is given, the type must not be None. If a name is given, the matching node is stored in the results dict under that key. """ if type is not None: assert type >= 256, type if content is not None: assert not isinstance(content, str), repr(content) content = list(content) for i, item in enumerate(content): assert isinstance(item, BasePattern), (i, item) if isinstance(item, WildcardPattern): self.wildcards = True self.type = type self.content = content self.name = name def _submatch(self, node, results=None): """ Match the pattern's content to the node's children. This assumes the node type matches and self.content is not None. Returns True if it matches, False if not. If results is not None, it must be a dict which will be updated with the nodes matching named subpatterns. When returning False, the results dict may still be updated. """ if self.wildcards: for c, r in generate_matches(self.content, node.children): if c == len(node.children): if results is not None: results.update(r) return True return False if len(self.content) != len(node.children): return False for subpattern, child in zip(self.content, node.children): if not subpattern.match(child, results): return False return True class WildcardPattern(BasePattern): """ A wildcard pattern can match zero or more nodes. This has all the flexibility needed to implement patterns like: .* .+ .? .{m,n} (a b c | d e | f) (...)* (...)+ (...)? (...){m,n} except it always uses non-greedy matching. """ def __init__(self, content=None, min=0, max=HUGE, name=None): """ Initializer. Args: content: optional sequence of subsequences of patterns; if absent, matches one node; if present, each subsequence is an alternative [*] min: optional minimum number of times to match, default 0 max: optional maximum number of times to match, default HUGE name: optional name assigned to this match [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is equivalent to (a b c | d e | f g h); if content is None, this is equivalent to '.' in regular expression terms. The min and max parameters work as follows: min=0, max=maxint: .* min=1, max=maxint: .+ min=0, max=1: .? min=1, max=1: . If content is not None, replace the dot with the parenthesized list of alternatives, e.g. (a b c | d e | f g h)* """ assert 0 <= min <= max <= HUGE, (min, max) if content is not None: content = tuple(map(tuple, content)) # Protect against alterations # Check sanity of alternatives assert len(content), repr(content) # Can't have zero alternatives for alt in content: assert len(alt), repr(alt) # Can have empty alternatives self.content = content self.min = min self.max = max self.name = name def optimize(self): """Optimize certain stacked wildcard patterns.""" subpattern = None if (self.content is not None and len(self.content) == 1 and len(self.content[0]) == 1): subpattern = self.content[0][0] if self.min == 1 and self.max == 1: if self.content is None: return NodePattern(name=self.name) if subpattern is not None and self.name == subpattern.name: return subpattern.optimize() if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and subpattern.min <= 1 and self.name == subpattern.name): return WildcardPattern(subpattern.content, self.min*subpattern.min, self.max*subpattern.max, subpattern.name) return self def match(self, node, results=None): """Does this pattern exactly match a node?""" return self.match_seq([node], results) def match_seq(self, nodes, results=None): """Does this pattern exactly match a sequence of nodes?""" for c, r in self.generate_matches(nodes): if c == len(nodes): if results is not None: results.update(r) if self.name: results[self.name] = list(nodes) return True return False def generate_matches(self, nodes): """ Generator yielding matches for a sequence of nodes. Args: nodes: sequence of nodes Yields: (count, results) tuples where: count: the match comprises nodes[:count]; results: dict containing named submatches. """ if self.content is None: # Shortcut for special case (see __init__.__doc__) for count in range(self.min, 1 + min(len(nodes), self.max)): r = {} if self.name: r[self.name] = nodes[:count] yield count, r elif self.name == "bare_name": yield self._bare_name_matches(nodes) else: # The reason for this is that hitting the recursion limit usually # results in some ugly messages about how RuntimeErrors are being # ignored. We only have to do this on CPython, though, because other # implementations don't have this nasty bug in the first place. if hasattr(sys, "getrefcount"): save_stderr = sys.stderr sys.stderr = StringIO() try: for count, r in self._recursive_matches(nodes, 0): if self.name: r[self.name] = nodes[:count] yield count, r except RuntimeError: # We fall back to the iterative pattern matching scheme if the recursive # scheme hits the recursion limit. for count, r in self._iterative_matches(nodes): if self.name: r[self.name] = nodes[:count] yield count, r finally: if hasattr(sys, "getrefcount"): sys.stderr = save_stderr def _iterative_matches(self, nodes): """Helper to iteratively yield the matches.""" nodelen = len(nodes) if 0 >= self.min: yield 0, {} results = [] # generate matches that use just one alt from self.content for alt in self.content: for c, r in generate_matches(alt, nodes): yield c, r results.append((c, r)) # for each match, iterate down the nodes while results: new_results = [] for c0, r0 in results: # stop if the entire set of nodes has been matched if c0 < nodelen and c0 <= self.max: for alt in self.content: for c1, r1 in generate_matches(alt, nodes[c0:]): if c1 > 0: r = {} r.update(r0) r.update(r1) yield c0 + c1, r new_results.append((c0 + c1, r)) results = new_results def _bare_name_matches(self, nodes): """Special optimized matcher for bare_name.""" count = 0 r = {} done = False max = len(nodes) while not done and count < max: done = True for leaf in self.content: if leaf[0].match(nodes[count], r): count += 1 done = False break r[self.name] = nodes[:count] return count, r def _recursive_matches(self, nodes, count): """Helper to recursively yield the matches.""" assert self.content is not None if count >= self.min: yield 0, {} if count < self.max: for alt in self.content: for c0, r0 in generate_matches(alt, nodes): for c1, r1 in self._recursive_matches(nodes[c0:], count+1): r = {} r.update(r0) r.update(r1) yield c0 + c1, r class NegatedPattern(BasePattern): def __init__(self, content=None): """ Initializer. The argument is either a pattern or None. If it is None, this only matches an empty sequence (effectively '$' in regex lingo). If it is not None, this matches whenever the argument pattern doesn't have any matches. """ if content is not None: assert isinstance(content, BasePattern), repr(content) self.content = content def match(self, node): # We never match a node in its entirety return False def match_seq(self, nodes): # We only match an empty sequence of nodes in its entirety return len(nodes) == 0 def generate_matches(self, nodes): if self.content is None: # Return a match if there is an empty sequence if len(nodes) == 0: yield 0, {} else: # Return a match if the argument pattern has no matches for c, r in self.content.generate_matches(nodes): return yield 0, {} def generate_matches(patterns, nodes): """ Generator yielding matches for a sequence of patterns and nodes. Args: patterns: a sequence of patterns nodes: a sequence of nodes Yields: (count, results) tuples where: count: the entire sequence of patterns matches nodes[:count]; results: dict containing named submatches. """ if not patterns: yield 0, {} else: p, rest = patterns[0], patterns[1:] for c0, r0 in p.generate_matches(nodes): if not rest: yield c0, r0 else: for c1, r1 in generate_matches(rest, nodes[c0:]): r = {} r.update(r0) r.update(r1) yield c0 + c1, r
29,039
Python
.py
732
29.064208
88
0.572535
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,221
refactor.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/refactor.py
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Refactoring framework. Used as a main program, this can refactor any number of files and/or recursively descend down directories. Imported as a module, this provides infrastructure to write your own refactoring tool. """ from __future__ import with_statement __author__ = "Guido van Rossum <guido@python.org>" # Python imports import os import sys import logging import operator import collections import io from itertools import chain # Local imports from .pgen2 import driver, tokenize, token from .fixer_util import find_root from . import pytree, pygram from . import btm_utils as bu from . import btm_matcher as bm def get_all_fix_names(fixer_pkg, remove_prefix=True): """Return a sorted list of all available fix names in the given package.""" pkg = __import__(fixer_pkg, [], [], ["*"]) fixer_dir = os.path.dirname(pkg.__file__) fix_names = [] for name in sorted(os.listdir(fixer_dir)): if name.startswith("fix_") and name.endswith(".py"): if remove_prefix: name = name[4:] fix_names.append(name[:-3]) return fix_names class _EveryNode(Exception): pass def _get_head_types(pat): """ Accepts a pytree Pattern Node and returns a set of the pattern types which will match first. """ if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)): # NodePatters must either have no type and no content # or a type and content -- so they don't get any farther # Always return leafs if pat.type is None: raise _EveryNode return set([pat.type]) if isinstance(pat, pytree.NegatedPattern): if pat.content: return _get_head_types(pat.content) raise _EveryNode # Negated Patterns don't have a type if isinstance(pat, pytree.WildcardPattern): # Recurse on each node in content r = set() for p in pat.content: for x in p: r.update(_get_head_types(x)) return r raise Exception("Oh no! I don't understand pattern %s" %(pat)) def _get_headnode_dict(fixer_list): """ Accepts a list of fixers and returns a dictionary of head node type --> fixer list. """ head_nodes = collections.defaultdict(list) every = [] for fixer in fixer_list: if fixer.pattern: try: heads = _get_head_types(fixer.pattern) except _EveryNode: every.append(fixer) else: for node_type in heads: head_nodes[node_type].append(fixer) else: if fixer._accept_type is not None: head_nodes[fixer._accept_type].append(fixer) else: every.append(fixer) for node_type in chain(pygram.python_grammar.symbol2number.values(), pygram.python_grammar.tokens): head_nodes[node_type].extend(every) return dict(head_nodes) def get_fixers_from_package(pkg_name): """ Return the fully qualified names for fixers in the package pkg_name. """ return [pkg_name + "." + fix_name for fix_name in get_all_fix_names(pkg_name, False)] def _identity(obj): return obj if sys.version_info < (3, 0): import codecs _open_with_encoding = codecs.open # codecs.open doesn't translate newlines sadly. def _from_system_newlines(input): return input.replace("\r\n", "\n") def _to_system_newlines(input): if os.linesep != "\n": return input.replace("\n", os.linesep) else: return input else: _open_with_encoding = open _from_system_newlines = _identity _to_system_newlines = _identity def _detect_future_features(source): have_docstring = False gen = tokenize.generate_tokens(io.StringIO(source).readline) def advance(): tok = next(gen) return tok[0], tok[1] ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT)) features = set() try: while True: tp, value = advance() if tp in ignore: continue elif tp == token.STRING: if have_docstring: break have_docstring = True elif tp == token.NAME and value == "from": tp, value = advance() if tp != token.NAME or value != "__future__": break tp, value = advance() if tp != token.NAME or value != "import": break tp, value = advance() if tp == token.OP and value == "(": tp, value = advance() while tp == token.NAME: features.add(value) tp, value = advance() if tp != token.OP or value != ",": break tp, value = advance() else: break except StopIteration: pass return frozenset(features) class FixerError(Exception): """A fixer could not be loaded.""" class RefactoringTool(object): _default_options = {"print_function" : False, "write_unchanged_files" : False} CLASS_PREFIX = "Fix" # The prefix for fixer classes FILE_PREFIX = "fix_" # The prefix for modules with a fixer within def __init__(self, fixer_names, options=None, explicit=None): """Initializer. Args: fixer_names: a list of fixers to import options: an dict with configuration. explicit: a list of fixers to run even if they are explicit. """ self.fixers = fixer_names self.explicit = explicit or [] self.options = self._default_options.copy() if options is not None: self.options.update(options) if self.options["print_function"]: self.grammar = pygram.python_grammar_no_print_statement else: self.grammar = pygram.python_grammar # When this is True, the refactor*() methods will call write_file() for # files processed even if they were not changed during refactoring. If # and only if the refactor method's write parameter was True. self.write_unchanged_files = self.options.get("write_unchanged_files") self.errors = [] self.logger = logging.getLogger("RefactoringTool") self.fixer_log = [] self.wrote = False self.driver = driver.Driver(self.grammar, convert=pytree.convert, logger=self.logger) self.pre_order, self.post_order = self.get_fixers() self.files = [] # List of files that were or should be modified self.BM = bm.BottomMatcher() self.bmi_pre_order = [] # Bottom Matcher incompatible fixers self.bmi_post_order = [] for fixer in chain(self.post_order, self.pre_order): if fixer.BM_compatible: self.BM.add_fixer(fixer) # remove fixers that will be handled by the bottom-up # matcher elif fixer in self.pre_order: self.bmi_pre_order.append(fixer) elif fixer in self.post_order: self.bmi_post_order.append(fixer) self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order) self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order) def get_fixers(self): """Inspects the options to load the requested patterns and handlers. Returns: (pre_order, post_order), where pre_order is the list of fixers that want a pre-order AST traversal, and post_order is the list that want post-order traversal. """ pre_order_fixers = [] post_order_fixers = [] for fix_mod_path in self.fixers: mod = __import__(fix_mod_path, {}, {}, ["*"]) fix_name = fix_mod_path.rsplit(".", 1)[-1] if fix_name.startswith(self.FILE_PREFIX): fix_name = fix_name[len(self.FILE_PREFIX):] parts = fix_name.split("_") class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts]) try: fix_class = getattr(mod, class_name) except AttributeError: raise FixerError("Can't find %s.%s" % (fix_name, class_name)) fixer = fix_class(self.options, self.fixer_log) if fixer.explicit and self.explicit is not True and \ fix_mod_path not in self.explicit: self.log_message("Skipping implicit fixer: %s", fix_name) continue self.log_debug("Adding transformation: %s", fix_name) if fixer.order == "pre": pre_order_fixers.append(fixer) elif fixer.order == "post": post_order_fixers.append(fixer) else: raise FixerError("Illegal fixer order: %r" % fixer.order) key_func = operator.attrgetter("run_order") pre_order_fixers.sort(key=key_func) post_order_fixers.sort(key=key_func) return (pre_order_fixers, post_order_fixers) def log_error(self, msg, *args, **kwds): """Called when an error occurs.""" raise def log_message(self, msg, *args): """Hook to log a message.""" if args: msg = msg % args self.logger.info(msg) def log_debug(self, msg, *args): if args: msg = msg % args self.logger.debug(msg) def print_output(self, old_text, new_text, filename, equal): """Called with the old version, new version, and filename of a refactored file.""" pass def refactor(self, items, write=False, doctests_only=False): """Refactor a list of files and directories.""" for dir_or_file in items: if os.path.isdir(dir_or_file): self.refactor_dir(dir_or_file, write, doctests_only) else: self.refactor_file(dir_or_file, write, doctests_only) def refactor_dir(self, dir_name, write=False, doctests_only=False): """Descends down a directory and refactor every Python file found. Python files are assumed to have a .py extension. Files and subdirectories starting with '.' are skipped. """ py_ext = os.extsep + "py" for dirpath, dirnames, filenames in os.walk(dir_name): self.log_debug("Descending into %s", dirpath) dirnames.sort() filenames.sort() for name in filenames: if (not name.startswith(".") and os.path.splitext(name)[1] == py_ext): fullname = os.path.join(dirpath, name) self.refactor_file(fullname, write, doctests_only) # Modify dirnames in-place to remove subdirs with leading dots dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")] def _read_python_source(self, filename): """ Do our best to decode a Python source file correctly. """ try: f = open(filename, "rb") except IOError as err: self.log_error("Can't open %s: %s", filename, err) return None, None try: encoding = tokenize.detect_encoding(f.readline)[0] finally: f.close() with _open_with_encoding(filename, "r", encoding=encoding) as f: return _from_system_newlines(f.read()), encoding def refactor_file(self, filename, write=False, doctests_only=False): """Refactors a file.""" input, encoding = self._read_python_source(filename) if input is None: # Reading the file failed. return input += "\n" # Silence certain parse errors if doctests_only: self.log_debug("Refactoring doctests in %s", filename) output = self.refactor_docstring(input, filename) if self.write_unchanged_files or output != input: self.processed_file(output, filename, input, write, encoding) else: self.log_debug("No doctest changes in %s", filename) else: tree = self.refactor_string(input, filename) if self.write_unchanged_files or (tree and tree.was_changed): # The [:-1] is to take off the \n we added earlier self.processed_file(str(tree)[:-1], filename, write=write, encoding=encoding) else: self.log_debug("No changes in %s", filename) def refactor_string(self, data, name): """Refactor a given input string. Args: data: a string holding the code to be refactored. name: a human-readable name for use in error/log messages. Returns: An AST corresponding to the refactored input stream; None if there were errors during the parse. """ features = _detect_future_features(data) if "print_function" in features: self.driver.grammar = pygram.python_grammar_no_print_statement try: tree = self.driver.parse_string(data) except Exception as err: self.log_error("Can't parse %s: %s: %s", name, err.__class__.__name__, err) return finally: self.driver.grammar = self.grammar tree.future_features = features self.log_debug("Refactoring %s", name) self.refactor_tree(tree, name) return tree def refactor_stdin(self, doctests_only=False): input = sys.stdin.read() if doctests_only: self.log_debug("Refactoring doctests in stdin") output = self.refactor_docstring(input, "<stdin>") if self.write_unchanged_files or output != input: self.processed_file(output, "<stdin>", input) else: self.log_debug("No doctest changes in stdin") else: tree = self.refactor_string(input, "<stdin>") if self.write_unchanged_files or (tree and tree.was_changed): self.processed_file(str(tree), "<stdin>", input) else: self.log_debug("No changes in stdin") def refactor_tree(self, tree, name): """Refactors a parse tree (modifying the tree in place). For compatible patterns the bottom matcher module is used. Otherwise the tree is traversed node-to-node for matches. Args: tree: a pytree.Node instance representing the root of the tree to be refactored. name: a human-readable name for this tree. Returns: True if the tree was modified, False otherwise. """ for fixer in chain(self.pre_order, self.post_order): fixer.start_tree(tree, name) #use traditional matching for the incompatible fixers self.traverse_by(self.bmi_pre_order_heads, tree.pre_order()) self.traverse_by(self.bmi_post_order_heads, tree.post_order()) # obtain a set of candidate nodes match_set = self.BM.run(tree.leaves()) while any(match_set.values()): for fixer in self.BM.fixers: if fixer in match_set and match_set[fixer]: #sort by depth; apply fixers from bottom(of the AST) to top match_set[fixer].sort(key=pytree.Base.depth, reverse=True) if fixer.keep_line_order: #some fixers(eg fix_imports) must be applied #with the original file's line order match_set[fixer].sort(key=pytree.Base.get_lineno) for node in list(match_set[fixer]): if node in match_set[fixer]: match_set[fixer].remove(node) try: find_root(node) except ValueError: # this node has been cut off from a # previous transformation ; skip continue if node.fixers_applied and fixer in node.fixers_applied: # do not apply the same fixer again continue results = fixer.match(node) if results: new = fixer.transform(node, results) if new is not None: node.replace(new) #new.fixers_applied.append(fixer) for node in new.post_order(): # do not apply the fixer again to # this or any subnode if not node.fixers_applied: node.fixers_applied = [] node.fixers_applied.append(fixer) # update the original match set for # the added code new_matches = self.BM.run(new.leaves()) for fxr in new_matches: if not fxr in match_set: match_set[fxr]=[] match_set[fxr].extend(new_matches[fxr]) for fixer in chain(self.pre_order, self.post_order): fixer.finish_tree(tree, name) return tree.was_changed def traverse_by(self, fixers, traversal): """Traverse an AST, applying a set of fixers to each node. This is a helper method for refactor_tree(). Args: fixers: a list of fixer instances. traversal: a generator that yields AST nodes. Returns: None """ if not fixers: return for node in traversal: for fixer in fixers[node.type]: results = fixer.match(node) if results: new = fixer.transform(node, results) if new is not None: node.replace(new) node = new def processed_file(self, new_text, filename, old_text=None, write=False, encoding=None): """ Called when a file has been refactored and there may be changes. """ self.files.append(filename) if old_text is None: old_text = self._read_python_source(filename)[0] if old_text is None: return equal = old_text == new_text self.print_output(old_text, new_text, filename, equal) if equal: self.log_debug("No changes to %s", filename) if not self.write_unchanged_files: return if write: self.write_file(new_text, filename, old_text, encoding) else: self.log_debug("Not writing changes to %s", filename) def write_file(self, new_text, filename, old_text, encoding=None): """Writes a string to a file. It first shows a unified diff between the old text and the new text, and then rewrites the file; the latter is only done if the write option is set. """ try: f = _open_with_encoding(filename, "w", encoding=encoding) except os.error as err: self.log_error("Can't create %s: %s", filename, err) return try: f.write(_to_system_newlines(new_text)) except os.error as err: self.log_error("Can't write %s: %s", filename, err) finally: f.close() self.log_debug("Wrote changes to %s", filename) self.wrote = True PS1 = ">>> " PS2 = "... " def refactor_docstring(self, input, filename): """Refactors a docstring, looking for doctests. This returns a modified version of the input string. It looks for doctests, which start with a ">>>" prompt, and may be continued with "..." prompts, as long as the "..." is indented the same as the ">>>". (Unfortunately we can't use the doctest module's parser, since, like most parsers, it is not geared towards preserving the original source.) """ result = [] block = None block_lineno = None indent = None lineno = 0 for line in input.splitlines(True): lineno += 1 if line.lstrip().startswith(self.PS1): if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block_lineno = lineno block = [line] i = line.find(self.PS1) indent = line[:i] elif (indent is not None and (line.startswith(indent + self.PS2) or line == indent + self.PS2.rstrip() + "\n")): block.append(line) else: if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block = None indent = None result.append(line) if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) return "".join(result) def refactor_doctest(self, block, lineno, indent, filename): """Refactors one doctest. A doctest is given as a block of lines, the first of which starts with ">>>" (possibly indented), while the remaining lines start with "..." (identically indented). """ try: tree = self.parse_block(block, lineno, indent) except Exception as err: if self.logger.isEnabledFor(logging.DEBUG): for line in block: self.log_debug("Source: %s", line.rstrip("\n")) self.log_error("Can't parse docstring in %s line %s: %s: %s", filename, lineno, err.__class__.__name__, err) return block if self.refactor_tree(tree, filename): new = str(tree).splitlines(True) # Undo the adjustment of the line numbers in wrap_toks() below. clipped, new = new[:lineno-1], new[lineno-1:] assert clipped == ["\n"] * (lineno-1), clipped if not new[-1].endswith("\n"): new[-1] += "\n" block = [indent + self.PS1 + new.pop(0)] if new: block += [indent + self.PS2 + line for line in new] return block def summarize(self): if self.wrote: were = "were" else: were = "need to be" if not self.files: self.log_message("No files %s modified.", were) else: self.log_message("Files that %s modified:", were) for file in self.files: self.log_message(file) if self.fixer_log: self.log_message("Warnings/messages while refactoring:") for message in self.fixer_log: self.log_message(message) if self.errors: if len(self.errors) == 1: self.log_message("There was 1 error:") else: self.log_message("There were %d errors:", len(self.errors)) for msg, args, kwds in self.errors: self.log_message(msg, *args, **kwds) def parse_block(self, block, lineno, indent): """Parses a block into a tree. This is necessary to get correct line number / offset information in the parser diagnostics and embedded into the parse tree. """ tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent)) tree.future_features = frozenset() return tree def wrap_toks(self, block, lineno, indent): """Wraps a tokenize stream to systematically modify start/end.""" tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__) for type, value, (line0, col0), (line1, col1), line_text in tokens: line0 += lineno - 1 line1 += lineno - 1 # Don't bother updating the columns; this is too complicated # since line_text would also have to be updated and it would # still break for tokens spanning lines. Let the user guess # that the column numbers for doctests are relative to the # end of the prompt string (PS1 or PS2). yield type, value, (line0, col0), (line1, col1), line_text def gen_lines(self, block, indent): """Generates lines as expected by tokenize from a list of lines. This strips the first len(indent + self.PS1) characters off each line. """ prefix1 = indent + self.PS1 prefix2 = indent + self.PS2 prefix = prefix1 for line in block: if line.startswith(prefix): yield line[len(prefix):] elif line == prefix.rstrip() + "\n": yield "\n" else: raise AssertionError("line=%r, prefix=%r" % (line, prefix)) prefix = prefix2 while True: yield "" class MultiprocessingUnsupported(Exception): pass class MultiprocessRefactoringTool(RefactoringTool): def __init__(self, *args, **kwargs): super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs) self.queue = None self.output_lock = None def refactor(self, items, write=False, doctests_only=False, num_processes=1): if num_processes == 1: return super(MultiprocessRefactoringTool, self).refactor( items, write, doctests_only) try: import multiprocessing except ImportError: raise MultiprocessingUnsupported if self.queue is not None: raise RuntimeError("already doing multiple processes") self.queue = multiprocessing.JoinableQueue() self.output_lock = multiprocessing.Lock() processes = [multiprocessing.Process(target=self._child) for i in range(num_processes)] try: for p in processes: p.start() super(MultiprocessRefactoringTool, self).refactor(items, write, doctests_only) finally: self.queue.join() for i in range(num_processes): self.queue.put(None) for p in processes: if p.is_alive(): p.join() self.queue = None def _child(self): task = self.queue.get() while task is not None: args, kwargs = task try: super(MultiprocessRefactoringTool, self).refactor_file( *args, **kwargs) finally: self.queue.task_done() task = self.queue.get() def refactor_file(self, *args, **kwargs): if self.queue is not None: self.queue.put((args, kwargs)) else: return super(MultiprocessRefactoringTool, self).refactor_file( *args, **kwargs)
28,024
Python
.py
642
31.140187
81
0.558639
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,222
patcomp.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/patcomp.py
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Pattern compiler. The grammer is taken from PatternGrammar.txt. The compiler compiles a pattern to a pytree.*Pattern instance. """ __author__ = "Guido van Rossum <guido@python.org>" # Python imports import io import os # Fairly local imports from .pgen2 import driver, literals, token, tokenize, parse, grammar # Really local imports from . import pytree from . import pygram # The pattern grammar file _PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "PatternGrammar.txt") class PatternSyntaxError(Exception): pass def tokenize_wrapper(input): """Tokenizes a string suppressing significant whitespace.""" skip = set((token.NEWLINE, token.INDENT, token.DEDENT)) tokens = tokenize.generate_tokens(io.StringIO(input).readline) for quintuple in tokens: type, value, start, end, line_text = quintuple if type not in skip: yield quintuple class PatternCompiler(object): def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE): """Initializer. Takes an optional alternative filename for the pattern grammar. """ self.grammar = driver.load_grammar(grammar_file) self.syms = pygram.Symbols(self.grammar) self.pygrammar = pygram.python_grammar self.pysyms = pygram.python_symbols self.driver = driver.Driver(self.grammar, convert=pattern_convert) def compile_pattern(self, input, debug=False, with_tree=False): """Compiles a pattern string to a nested pytree.*Pattern object.""" tokens = tokenize_wrapper(input) try: root = self.driver.parse_tokens(tokens, debug=debug) except parse.ParseError as e: raise PatternSyntaxError(str(e)) if with_tree: return self.compile_node(root), root else: return self.compile_node(root) def compile_node(self, node): """Compiles a node, recursively. This is one big switch on the node type. """ # XXX Optimize certain Wildcard-containing-Wildcard patterns # that can be merged if node.type == self.syms.Matcher: node = node.children[0] # Avoid unneeded recursion if node.type == self.syms.Alternatives: # Skip the odd children since they are just '|' tokens alts = [self.compile_node(ch) for ch in node.children[::2]] if len(alts) == 1: return alts[0] p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1) return p.optimize() if node.type == self.syms.Alternative: units = [self.compile_node(ch) for ch in node.children] if len(units) == 1: return units[0] p = pytree.WildcardPattern([units], min=1, max=1) return p.optimize() if node.type == self.syms.NegatedUnit: pattern = self.compile_basic(node.children[1:]) p = pytree.NegatedPattern(pattern) return p.optimize() assert node.type == self.syms.Unit name = None nodes = node.children if len(nodes) >= 3 and nodes[1].type == token.EQUAL: name = nodes[0].value nodes = nodes[2:] repeat = None if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater: repeat = nodes[-1] nodes = nodes[:-1] # Now we've reduced it to: STRING | NAME [Details] | (...) | [...] pattern = self.compile_basic(nodes, repeat) if repeat is not None: assert repeat.type == self.syms.Repeater children = repeat.children child = children[0] if child.type == token.STAR: min = 0 max = pytree.HUGE elif child.type == token.PLUS: min = 1 max = pytree.HUGE elif child.type == token.LBRACE: assert children[-1].type == token.RBRACE assert len(children) in (3, 5) min = max = self.get_int(children[1]) if len(children) == 5: max = self.get_int(children[3]) else: assert False if min != 1 or max != 1: pattern = pattern.optimize() pattern = pytree.WildcardPattern([[pattern]], min=min, max=max) if name is not None: pattern.name = name return pattern.optimize() def compile_basic(self, nodes, repeat=None): # Compile STRING | NAME [Details] | (...) | [...] assert len(nodes) >= 1 node = nodes[0] if node.type == token.STRING: value = str(literals.evalString(node.value)) return pytree.LeafPattern(_type_of_literal(value), value) elif node.type == token.NAME: value = node.value if value.isupper(): if value not in TOKEN_MAP: raise PatternSyntaxError("Invalid token: %r" % value) if nodes[1:]: raise PatternSyntaxError("Can't have details for token") return pytree.LeafPattern(TOKEN_MAP[value]) else: if value == "any": type = None elif not value.startswith("_"): type = getattr(self.pysyms, value, None) if type is None: raise PatternSyntaxError("Invalid symbol: %r" % value) if nodes[1:]: # Details present content = [self.compile_node(nodes[1].children[1])] else: content = None return pytree.NodePattern(type, content) elif node.value == "(": return self.compile_node(nodes[1]) elif node.value == "[": assert repeat is None subpattern = self.compile_node(nodes[1]) return pytree.WildcardPattern([[subpattern]], min=0, max=1) assert False, node def get_int(self, node): assert node.type == token.NUMBER return int(node.value) # Map named tokens to the type value for a LeafPattern TOKEN_MAP = {"NAME": token.NAME, "STRING": token.STRING, "NUMBER": token.NUMBER, "TOKEN": None} def _type_of_literal(value): if value[0].isalpha(): return token.NAME elif value in grammar.opmap: return grammar.opmap[value] else: return None def pattern_convert(grammar, raw_node_info): """Converts raw node information to a Node or Leaf instance.""" type, value, context, children = raw_node_info if children or type in grammar.number2symbol: return pytree.Node(type, children, context=context) else: return pytree.Leaf(type, value, context=context) def compile_pattern(pattern): return PatternCompiler().compile_pattern(pattern)
7,075
Python
.py
168
31.630952
79
0.589229
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,223
pygram.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/pygram.py
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Export the Python grammar and symbols.""" # Python imports import os # Local imports from .pgen2 import token from .pgen2 import driver from . import pytree # The grammar file _GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt") _PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "PatternGrammar.txt") class Symbols(object): def __init__(self, grammar): """Initializer. Creates an attribute for each grammar symbol (nonterminal), whose value is the symbol's type (an int >= 256). """ for name, symbol in grammar.symbol2number.items(): setattr(self, name, symbol) python_grammar = driver.load_grammar(_GRAMMAR_FILE) python_symbols = Symbols(python_grammar) python_grammar_no_print_statement = python_grammar.copy() del python_grammar_no_print_statement.keywords["print"] pattern_grammar = driver.load_grammar(_PATTERN_GRAMMAR_FILE) pattern_symbols = Symbols(pattern_grammar)
1,114
Python
.py
27
36.333333
70
0.711359
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,224
main.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/main.py
""" Main program for 2to3. """ from __future__ import with_statement import sys import os import difflib import logging import shutil import optparse from . import refactor def diff_texts(a, b, filename): """Return a unified diff of two strings.""" a = a.splitlines() b = b.splitlines() return difflib.unified_diff(a, b, filename, filename, "(original)", "(refactored)", lineterm="") class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool): """ A refactoring tool that can avoid overwriting its input files. Prints output to stdout. Output files can optionally be written to a different directory and or have an extra file suffix appended to their name for use in situations where you do not want to replace the input files. """ def __init__(self, fixers, options, explicit, nobackups, show_diffs, input_base_dir='', output_dir='', append_suffix=''): """ Args: fixers: A list of fixers to import. options: A dict with RefactoringTool configuration. explicit: A list of fixers to run even if they are explicit. nobackups: If true no backup '.bak' files will be created for those files that are being refactored. show_diffs: Should diffs of the refactoring be printed to stdout? input_base_dir: The base directory for all input files. This class will strip this path prefix off of filenames before substituting it with output_dir. Only meaningful if output_dir is supplied. All files processed by refactor() must start with this path. output_dir: If supplied, all converted files will be written into this directory tree instead of input_base_dir. append_suffix: If supplied, all files output by this tool will have this appended to their filename. Useful for changing .py to .py3 for example by passing append_suffix='3'. """ self.nobackups = nobackups self.show_diffs = show_diffs if input_base_dir and not input_base_dir.endswith(os.sep): input_base_dir += os.sep self._input_base_dir = input_base_dir self._output_dir = output_dir self._append_suffix = append_suffix super(StdoutRefactoringTool, self).__init__(fixers, options, explicit) def log_error(self, msg, *args, **kwargs): self.errors.append((msg, args, kwargs)) self.logger.error(msg, *args, **kwargs) def write_file(self, new_text, filename, old_text, encoding): orig_filename = filename if self._output_dir: if filename.startswith(self._input_base_dir): filename = os.path.join(self._output_dir, filename[len(self._input_base_dir):]) else: raise ValueError('filename %s does not start with the ' 'input_base_dir %s' % ( filename, self._input_base_dir)) if self._append_suffix: filename += self._append_suffix if orig_filename != filename: output_dir = os.path.dirname(filename) if not os.path.isdir(output_dir): os.makedirs(output_dir) self.log_message('Writing converted %s to %s.', orig_filename, filename) if not self.nobackups: # Make backup backup = filename + ".bak" if os.path.lexists(backup): try: os.remove(backup) except os.error as err: self.log_message("Can't remove backup %s", backup) try: os.rename(filename, backup) except os.error as err: self.log_message("Can't rename %s to %s", filename, backup) # Actually write the new file write = super(StdoutRefactoringTool, self).write_file write(new_text, filename, old_text, encoding) if not self.nobackups: shutil.copymode(backup, filename) if orig_filename != filename: # Preserve the file mode in the new output directory. shutil.copymode(orig_filename, filename) def print_output(self, old, new, filename, equal): if equal: self.log_message("No changes to %s", filename) else: self.log_message("Refactored %s", filename) if self.show_diffs: diff_lines = diff_texts(old, new, filename) try: if self.output_lock is not None: with self.output_lock: for line in diff_lines: print(line) sys.stdout.flush() else: for line in diff_lines: print(line) except UnicodeEncodeError: warn("couldn't encode %s's diff for your terminal" % (filename,)) return def warn(msg): print("WARNING: %s" % (msg,), file=sys.stderr) def main(fixer_pkg, args=None): """Main program. Args: fixer_pkg: the name of a package where the fixers are located. args: optional; a list of command line arguments. If omitted, sys.argv[1:] is used. Returns a suggested exit status (0, 1, 2). """ # Set up option parser parser = optparse.OptionParser(usage="2to3 [options] file|dir ...") parser.add_option("-d", "--doctests_only", action="store_true", help="Fix up doctests only") parser.add_option("-f", "--fix", action="append", default=[], help="Each FIX specifies a transformation; default: all") parser.add_option("-j", "--processes", action="store", default=1, type="int", help="Run 2to3 concurrently") parser.add_option("-x", "--nofix", action="append", default=[], help="Prevent a transformation from being run") parser.add_option("-l", "--list-fixes", action="store_true", help="List available transformations") parser.add_option("-p", "--print-function", action="store_true", help="Modify the grammar so that print() is a function") parser.add_option("-v", "--verbose", action="store_true", help="More verbose logging") parser.add_option("--no-diffs", action="store_true", help="Don't show diffs of the refactoring") parser.add_option("-w", "--write", action="store_true", help="Write back modified files") parser.add_option("-n", "--nobackups", action="store_true", default=False, help="Don't write backups for modified files") parser.add_option("-o", "--output-dir", action="store", type="str", default="", help="Put output files in this directory " "instead of overwriting the input files. Requires -n.") parser.add_option("-W", "--write-unchanged-files", action="store_true", help="Also write files even if no changes were required" " (useful with --output-dir); implies -w.") parser.add_option("--add-suffix", action="store", type="str", default="", help="Append this string to all output filenames." " Requires -n if non-empty. " "ex: --add-suffix='3' will generate .py3 files.") # Parse command line arguments refactor_stdin = False flags = {} options, args = parser.parse_args(args) if options.write_unchanged_files: flags["write_unchanged_files"] = True if not options.write: warn("--write-unchanged-files/-W implies -w.") options.write = True # If we allowed these, the original files would be renamed to backup names # but not replaced. if options.output_dir and not options.nobackups: parser.error("Can't use --output-dir/-o without -n.") if options.add_suffix and not options.nobackups: parser.error("Can't use --add-suffix without -n.") if not options.write and options.no_diffs: warn("not writing files and not printing diffs; that's not very useful") if not options.write and options.nobackups: parser.error("Can't use -n without -w") if options.list_fixes: print("Available transformations for the -f/--fix option:") for fixname in refactor.get_all_fix_names(fixer_pkg): print(fixname) if not args: return 0 if not args: print("At least one file or directory argument required.", file=sys.stderr) print("Use --help to show usage.", file=sys.stderr) return 2 if "-" in args: refactor_stdin = True if options.write: print("Can't write to stdin.", file=sys.stderr) return 2 if options.print_function: flags["print_function"] = True # Set up logging handler level = logging.DEBUG if options.verbose else logging.INFO logging.basicConfig(format='%(name)s: %(message)s', level=level) logger = logging.getLogger('lib2to3.main') # Initialize the refactoring tool avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg)) unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix) explicit = set() if options.fix: all_present = False for fix in options.fix: if fix == "all": all_present = True else: explicit.add(fixer_pkg + ".fix_" + fix) requested = avail_fixes.union(explicit) if all_present else explicit else: requested = avail_fixes.union(explicit) fixer_names = requested.difference(unwanted_fixes) input_base_dir = os.path.commonprefix(args) if (input_base_dir and not input_base_dir.endswith(os.sep) and not os.path.isdir(input_base_dir)): # One or more similar names were passed, their directory is the base. # os.path.commonprefix() is ignorant of path elements, this corrects # for that weird API. input_base_dir = os.path.dirname(input_base_dir) if options.output_dir: input_base_dir = input_base_dir.rstrip(os.sep) logger.info('Output in %r will mirror the input directory %r layout.', options.output_dir, input_base_dir) rt = StdoutRefactoringTool( sorted(fixer_names), flags, sorted(explicit), options.nobackups, not options.no_diffs, input_base_dir=input_base_dir, output_dir=options.output_dir, append_suffix=options.add_suffix) # Refactor all files and directories passed as arguments if not rt.errors: if refactor_stdin: rt.refactor_stdin() else: try: rt.refactor(args, options.write, options.doctests_only, options.processes) except refactor.MultiprocessingUnsupported: assert options.processes > 1 print("Sorry, -j isn't supported on this platform.", file=sys.stderr) return 1 rt.summarize() # Return error status (0 if rt.errors is zero) return int(bool(rt.errors))
11,624
Python
.py
245
35.840816
83
0.591758
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,225
conv.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/pgen2/conv.py
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Convert graminit.[ch] spit out by pgen to Python code. Pgen is the Python parser generator. It is useful to quickly create a parser from a grammar file in Python's grammar notation. But I don't want my parsers to be written in C (yet), so I'm translating the parsing tables to Python data structures and writing a Python parse engine. Note that the token numbers are constants determined by the standard Python tokenizer. The standard token module defines these numbers and their names (the names are not used much). The token numbers are hardcoded into the Python tokenizer and into pgen. A Python implementation of the Python tokenizer is also available, in the standard tokenize module. On the other hand, symbol numbers (representing the grammar's non-terminals) are assigned by pgen based on the actual grammar input. Note: this module is pretty much obsolete; the pgen module generates equivalent grammar tables directly from the Grammar.txt input file without having to invoke the Python pgen C program. """ # Python imports import re # Local imports from pgen2 import grammar, token class Converter(grammar.Grammar): """Grammar subclass that reads classic pgen output files. The run() method reads the tables as produced by the pgen parser generator, typically contained in two C files, graminit.h and graminit.c. The other methods are for internal use only. See the base class for more documentation. """ def run(self, graminit_h, graminit_c): """Load the grammar tables from the text files written by pgen.""" self.parse_graminit_h(graminit_h) self.parse_graminit_c(graminit_c) self.finish_off() def parse_graminit_h(self, filename): """Parse the .h file written by pgen. (Internal) This file is a sequence of #define statements defining the nonterminals of the grammar as numbers. We build two tables mapping the numbers to names and back. """ try: f = open(filename) except IOError as err: print("Can't open %s: %s" % (filename, err)) return False self.symbol2number = {} self.number2symbol = {} lineno = 0 for line in f: lineno += 1 mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line) if not mo and line.strip(): print("%s(%s): can't parse %s" % (filename, lineno, line.strip())) else: symbol, number = mo.groups() number = int(number) assert symbol not in self.symbol2number assert number not in self.number2symbol self.symbol2number[symbol] = number self.number2symbol[number] = symbol return True def parse_graminit_c(self, filename): """Parse the .c file written by pgen. (Internal) The file looks as follows. The first two lines are always this: #include "pgenheaders.h" #include "grammar.h" After that come four blocks: 1) one or more state definitions 2) a table defining dfas 3) a table defining labels 4) a struct defining the grammar A state definition has the following form: - one or more arc arrays, each of the form: static arc arcs_<n>_<m>[<k>] = { {<i>, <j>}, ... }; - followed by a state array, of the form: static state states_<s>[<t>] = { {<k>, arcs_<n>_<m>}, ... }; """ try: f = open(filename) except IOError as err: print("Can't open %s: %s" % (filename, err)) return False # The code below essentially uses f's iterator-ness! lineno = 0 # Expect the two #include lines lineno, line = lineno+1, next(f) assert line == '#include "pgenheaders.h"\n', (lineno, line) lineno, line = lineno+1, next(f) assert line == '#include "grammar.h"\n', (lineno, line) # Parse the state definitions lineno, line = lineno+1, next(f) allarcs = {} states = [] while line.startswith("static arc "): while line.startswith("static arc "): mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$", line) assert mo, (lineno, line) n, m, k = list(map(int, mo.groups())) arcs = [] for _ in range(k): lineno, line = lineno+1, next(f) mo = re.match(r"\s+{(\d+), (\d+)},$", line) assert mo, (lineno, line) i, j = list(map(int, mo.groups())) arcs.append((i, j)) lineno, line = lineno+1, next(f) assert line == "};\n", (lineno, line) allarcs[(n, m)] = arcs lineno, line = lineno+1, next(f) mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line) assert mo, (lineno, line) s, t = list(map(int, mo.groups())) assert s == len(states), (lineno, line) state = [] for _ in range(t): lineno, line = lineno+1, next(f) mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line) assert mo, (lineno, line) k, n, m = list(map(int, mo.groups())) arcs = allarcs[n, m] assert k == len(arcs), (lineno, line) state.append(arcs) states.append(state) lineno, line = lineno+1, next(f) assert line == "};\n", (lineno, line) lineno, line = lineno+1, next(f) self.states = states # Parse the dfas dfas = {} mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line) assert mo, (lineno, line) ndfas = int(mo.group(1)) for i in range(ndfas): lineno, line = lineno+1, next(f) mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$', line) assert mo, (lineno, line) symbol = mo.group(2) number, x, y, z = list(map(int, mo.group(1, 3, 4, 5))) assert self.symbol2number[symbol] == number, (lineno, line) assert self.number2symbol[number] == symbol, (lineno, line) assert x == 0, (lineno, line) state = states[z] assert y == len(state), (lineno, line) lineno, line = lineno+1, next(f) mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line) assert mo, (lineno, line) first = {} rawbitset = eval(mo.group(1)) for i, c in enumerate(rawbitset): byte = ord(c) for j in range(8): if byte & (1<<j): first[i*8 + j] = 1 dfas[number] = (state, first) lineno, line = lineno+1, next(f) assert line == "};\n", (lineno, line) self.dfas = dfas # Parse the labels labels = [] lineno, line = lineno+1, next(f) mo = re.match(r"static label labels\[(\d+)\] = {$", line) assert mo, (lineno, line) nlabels = int(mo.group(1)) for i in range(nlabels): lineno, line = lineno+1, next(f) mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line) assert mo, (lineno, line) x, y = mo.groups() x = int(x) if y == "0": y = None else: y = eval(y) labels.append((x, y)) lineno, line = lineno+1, next(f) assert line == "};\n", (lineno, line) self.labels = labels # Parse the grammar struct lineno, line = lineno+1, next(f) assert line == "grammar _PyParser_Grammar = {\n", (lineno, line) lineno, line = lineno+1, next(f) mo = re.match(r"\s+(\d+),$", line) assert mo, (lineno, line) ndfas = int(mo.group(1)) assert ndfas == len(self.dfas) lineno, line = lineno+1, next(f) assert line == "\tdfas,\n", (lineno, line) lineno, line = lineno+1, next(f) mo = re.match(r"\s+{(\d+), labels},$", line) assert mo, (lineno, line) nlabels = int(mo.group(1)) assert nlabels == len(self.labels), (lineno, line) lineno, line = lineno+1, next(f) mo = re.match(r"\s+(\d+)$", line) assert mo, (lineno, line) start = int(mo.group(1)) assert start in self.number2symbol, (lineno, line) self.start = start lineno, line = lineno+1, next(f) assert line == "};\n", (lineno, line) try: lineno, line = lineno+1, next(f) except StopIteration: pass else: assert 0, (lineno, line) def finish_off(self): """Create additional useful structures. (Internal).""" self.keywords = {} # map from keyword strings to arc labels self.tokens = {} # map from numeric token values to arc labels for ilabel, (type, value) in enumerate(self.labels): if type == token.NAME and value is not None: self.keywords[value] = ilabel elif value is None: self.tokens[type] = ilabel
9,642
Python
.py
227
31.555066
78
0.537453
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,226
token.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/pgen2/token.py
#! /usr/bin/env python3 """Token constants (from "token.h").""" # Taken from Python (r53757) and modified to include some tokens # originally monkeypatched in by pgen2.tokenize #--start constants-- ENDMARKER = 0 NAME = 1 NUMBER = 2 STRING = 3 NEWLINE = 4 INDENT = 5 DEDENT = 6 LPAR = 7 RPAR = 8 LSQB = 9 RSQB = 10 COLON = 11 COMMA = 12 SEMI = 13 PLUS = 14 MINUS = 15 STAR = 16 SLASH = 17 VBAR = 18 AMPER = 19 LESS = 20 GREATER = 21 EQUAL = 22 DOT = 23 PERCENT = 24 BACKQUOTE = 25 LBRACE = 26 RBRACE = 27 EQEQUAL = 28 NOTEQUAL = 29 LESSEQUAL = 30 GREATEREQUAL = 31 TILDE = 32 CIRCUMFLEX = 33 LEFTSHIFT = 34 RIGHTSHIFT = 35 DOUBLESTAR = 36 PLUSEQUAL = 37 MINEQUAL = 38 STAREQUAL = 39 SLASHEQUAL = 40 PERCENTEQUAL = 41 AMPEREQUAL = 42 VBAREQUAL = 43 CIRCUMFLEXEQUAL = 44 LEFTSHIFTEQUAL = 45 RIGHTSHIFTEQUAL = 46 DOUBLESTAREQUAL = 47 DOUBLESLASH = 48 DOUBLESLASHEQUAL = 49 AT = 50 OP = 51 COMMENT = 52 NL = 53 RARROW = 54 ERRORTOKEN = 55 N_TOKENS = 56 NT_OFFSET = 256 #--end constants-- tok_name = {} for _name, _value in list(globals().items()): if type(_value) is type(0): tok_name[_value] = _name def ISTERMINAL(x): return x < NT_OFFSET def ISNONTERMINAL(x): return x >= NT_OFFSET def ISEOF(x): return x == ENDMARKER
1,251
Python
.py
74
15.472973
65
0.723695
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,227
__init__.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/pgen2/__init__.py
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """The pgen2 package."""
143
Python
.py
3
46.333333
67
0.776978
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,228
pgen.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/pgen2/pgen.py
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # Pgen imports from . import grammar, token, tokenize class PgenGrammar(grammar.Grammar): pass class ParserGenerator(object): def __init__(self, filename, stream=None): close_stream = None if stream is None: stream = open(filename) close_stream = stream.close self.filename = filename self.stream = stream self.generator = tokenize.generate_tokens(stream.readline) self.gettoken() # Initialize lookahead self.dfas, self.startsymbol = self.parse() if close_stream is not None: close_stream() self.first = {} # map from symbol name to set of tokens self.addfirstsets() def make_grammar(self): c = PgenGrammar() names = list(self.dfas.keys()) names.sort() names.remove(self.startsymbol) names.insert(0, self.startsymbol) for name in names: i = 256 + len(c.symbol2number) c.symbol2number[name] = i c.number2symbol[i] = name for name in names: dfa = self.dfas[name] states = [] for state in dfa: arcs = [] for label, next in state.arcs.items(): arcs.append((self.make_label(c, label), dfa.index(next))) if state.isfinal: arcs.append((0, dfa.index(state))) states.append(arcs) c.states.append(states) c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name)) c.start = c.symbol2number[self.startsymbol] return c def make_first(self, c, name): rawfirst = self.first[name] first = {} for label in rawfirst: ilabel = self.make_label(c, label) ##assert ilabel not in first # XXX failed on <> ... != first[ilabel] = 1 return first def make_label(self, c, label): # XXX Maybe this should be a method on a subclass of converter? ilabel = len(c.labels) if label[0].isalpha(): # Either a symbol name or a named token if label in c.symbol2number: # A symbol name (a non-terminal) if label in c.symbol2label: return c.symbol2label[label] else: c.labels.append((c.symbol2number[label], None)) c.symbol2label[label] = ilabel return ilabel else: # A named token (NAME, NUMBER, STRING) itoken = getattr(token, label, None) assert isinstance(itoken, int), label assert itoken in token.tok_name, label if itoken in c.tokens: return c.tokens[itoken] else: c.labels.append((itoken, None)) c.tokens[itoken] = ilabel return ilabel else: # Either a keyword or an operator assert label[0] in ('"', "'"), label value = eval(label) if value[0].isalpha(): # A keyword if value in c.keywords: return c.keywords[value] else: c.labels.append((token.NAME, value)) c.keywords[value] = ilabel return ilabel else: # An operator (any non-numeric token) itoken = grammar.opmap[value] # Fails if unknown token if itoken in c.tokens: return c.tokens[itoken] else: c.labels.append((itoken, None)) c.tokens[itoken] = ilabel return ilabel def addfirstsets(self): names = list(self.dfas.keys()) names.sort() for name in names: if name not in self.first: self.calcfirst(name) #print name, self.first[name].keys() def calcfirst(self, name): dfa = self.dfas[name] self.first[name] = None # dummy to detect left recursion state = dfa[0] totalset = {} overlapcheck = {} for label, next in state.arcs.items(): if label in self.dfas: if label in self.first: fset = self.first[label] if fset is None: raise ValueError("recursion for rule %r" % name) else: self.calcfirst(label) fset = self.first[label] totalset.update(fset) overlapcheck[label] = fset else: totalset[label] = 1 overlapcheck[label] = {label: 1} inverse = {} for label, itsfirst in overlapcheck.items(): for symbol in itsfirst: if symbol in inverse: raise ValueError("rule %s is ambiguous; %s is in the" " first sets of %s as well as %s" % (name, symbol, label, inverse[symbol])) inverse[symbol] = label self.first[name] = totalset def parse(self): dfas = {} startsymbol = None # MSTART: (NEWLINE | RULE)* ENDMARKER while self.type != token.ENDMARKER: while self.type == token.NEWLINE: self.gettoken() # RULE: NAME ':' RHS NEWLINE name = self.expect(token.NAME) self.expect(token.OP, ":") a, z = self.parse_rhs() self.expect(token.NEWLINE) #self.dump_nfa(name, a, z) dfa = self.make_dfa(a, z) #self.dump_dfa(name, dfa) oldlen = len(dfa) self.simplify_dfa(dfa) newlen = len(dfa) dfas[name] = dfa #print name, oldlen, newlen if startsymbol is None: startsymbol = name return dfas, startsymbol def make_dfa(self, start, finish): # To turn an NFA into a DFA, we define the states of the DFA # to correspond to *sets* of states of the NFA. Then do some # state reduction. Let's represent sets as dicts with 1 for # values. assert isinstance(start, NFAState) assert isinstance(finish, NFAState) def closure(state): base = {} addclosure(state, base) return base def addclosure(state, base): assert isinstance(state, NFAState) if state in base: return base[state] = 1 for label, next in state.arcs: if label is None: addclosure(next, base) states = [DFAState(closure(start), finish)] for state in states: # NB states grows while we're iterating arcs = {} for nfastate in state.nfaset: for label, next in nfastate.arcs: if label is not None: addclosure(next, arcs.setdefault(label, {})) for label, nfaset in arcs.items(): for st in states: if st.nfaset == nfaset: break else: st = DFAState(nfaset, finish) states.append(st) state.addarc(st, label) return states # List of DFAState instances; first one is start def dump_nfa(self, name, start, finish): print("Dump of NFA for", name) todo = [start] for i, state in enumerate(todo): print(" State", i, state is finish and "(final)" or "") for label, next in state.arcs: if next in todo: j = todo.index(next) else: j = len(todo) todo.append(next) if label is None: print(" -> %d" % j) else: print(" %s -> %d" % (label, j)) def dump_dfa(self, name, dfa): print("Dump of DFA for", name) for i, state in enumerate(dfa): print(" State", i, state.isfinal and "(final)" or "") for label, next in state.arcs.items(): print(" %s -> %d" % (label, dfa.index(next))) def simplify_dfa(self, dfa): # This is not theoretically optimal, but works well enough. # Algorithm: repeatedly look for two states that have the same # set of arcs (same labels pointing to the same nodes) and # unify them, until things stop changing. # dfa is a list of DFAState instances changes = True while changes: changes = False for i, state_i in enumerate(dfa): for j in range(i+1, len(dfa)): state_j = dfa[j] if state_i == state_j: #print " unify", i, j del dfa[j] for state in dfa: state.unifystate(state_j, state_i) changes = True break def parse_rhs(self): # RHS: ALT ('|' ALT)* a, z = self.parse_alt() if self.value != "|": return a, z else: aa = NFAState() zz = NFAState() aa.addarc(a) z.addarc(zz) while self.value == "|": self.gettoken() a, z = self.parse_alt() aa.addarc(a) z.addarc(zz) return aa, zz def parse_alt(self): # ALT: ITEM+ a, b = self.parse_item() while (self.value in ("(", "[") or self.type in (token.NAME, token.STRING)): c, d = self.parse_item() b.addarc(c) b = d return a, b def parse_item(self): # ITEM: '[' RHS ']' | ATOM ['+' | '*'] if self.value == "[": self.gettoken() a, z = self.parse_rhs() self.expect(token.OP, "]") a.addarc(z) return a, z else: a, z = self.parse_atom() value = self.value if value not in ("+", "*"): return a, z self.gettoken() z.addarc(a) if value == "+": return a, z else: return a, a def parse_atom(self): # ATOM: '(' RHS ')' | NAME | STRING if self.value == "(": self.gettoken() a, z = self.parse_rhs() self.expect(token.OP, ")") return a, z elif self.type in (token.NAME, token.STRING): a = NFAState() z = NFAState() a.addarc(z, self.value) self.gettoken() return a, z else: self.raise_error("expected (...) or NAME or STRING, got %s/%s", self.type, self.value) def expect(self, type, value=None): if self.type != type or (value is not None and self.value != value): self.raise_error("expected %s/%s, got %s/%s", type, value, self.type, self.value) value = self.value self.gettoken() return value def gettoken(self): tup = next(self.generator) while tup[0] in (tokenize.COMMENT, tokenize.NL): tup = next(self.generator) self.type, self.value, self.begin, self.end, self.line = tup #print token.tok_name[self.type], repr(self.value) def raise_error(self, msg, *args): if args: try: msg = msg % args except: msg = " ".join([msg] + list(map(str, args))) raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line)) class NFAState(object): def __init__(self): self.arcs = [] # list of (label, NFAState) pairs def addarc(self, next, label=None): assert label is None or isinstance(label, str) assert isinstance(next, NFAState) self.arcs.append((label, next)) class DFAState(object): def __init__(self, nfaset, final): assert isinstance(nfaset, dict) assert isinstance(next(iter(nfaset)), NFAState) assert isinstance(final, NFAState) self.nfaset = nfaset self.isfinal = final in nfaset self.arcs = {} # map from label to DFAState def addarc(self, next, label): assert isinstance(label, str) assert label not in self.arcs assert isinstance(next, DFAState) self.arcs[label] = next def unifystate(self, old, new): for label, next in self.arcs.items(): if next is old: self.arcs[label] = new def __eq__(self, other): # Equality test -- ignore the nfaset instance variable assert isinstance(other, DFAState) if self.isfinal != other.isfinal: return False # Can't just return self.arcs == other.arcs, because that # would invoke this method recursively, with cycles... if len(self.arcs) != len(other.arcs): return False for label, next in self.arcs.items(): if next is not other.arcs.get(label): return False return True __hash__ = None # For Py3 compatibility. def generate_grammar(filename="Grammar.txt"): p = ParserGenerator(filename) return p.make_grammar()
13,780
Python
.py
354
26.053672
78
0.508885
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,229
grammar.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/pgen2/grammar.py
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """This module defines the data structures used to represent a grammar. These are a bit arcane because they are derived from the data structures used by Python's 'pgen' parser generator. There's also a table here mapping operators to their names in the token module; the Python tokenize module reports all operators as the fallback token code OP, but the parser needs the actual token code. """ # Python imports import pickle # Local imports from . import token, tokenize class Grammar(object): """Pgen parsing tables conversion class. Once initialized, this class supplies the grammar tables for the parsing engine implemented by parse.py. The parsing engine accesses the instance variables directly. The class here does not provide initialization of the tables; several subclasses exist to do this (see the conv and pgen modules). The load() method reads the tables from a pickle file, which is much faster than the other ways offered by subclasses. The pickle file is written by calling dump() (after loading the grammar tables using a subclass). The report() method prints a readable representation of the tables to stdout, for debugging. The instance variables are as follows: symbol2number -- a dict mapping symbol names to numbers. Symbol numbers are always 256 or higher, to distinguish them from token numbers, which are between 0 and 255 (inclusive). number2symbol -- a dict mapping numbers to symbol names; these two are each other's inverse. states -- a list of DFAs, where each DFA is a list of states, each state is a list of arcs, and each arc is a (i, j) pair where i is a label and j is a state number. The DFA number is the index into this list. (This name is slightly confusing.) Final states are represented by a special arc of the form (0, j) where j is its own state number. dfas -- a dict mapping symbol numbers to (DFA, first) pairs, where DFA is an item from the states list above, and first is a set of tokens that can begin this grammar rule (represented by a dict whose values are always 1). labels -- a list of (x, y) pairs where x is either a token number or a symbol number, and y is either None or a string; the strings are keywords. The label number is the index in this list; label numbers are used to mark state transitions (arcs) in the DFAs. start -- the number of the grammar's start symbol. keywords -- a dict mapping keyword strings to arc labels. tokens -- a dict mapping token numbers to arc labels. """ def __init__(self): self.symbol2number = {} self.number2symbol = {} self.states = [] self.dfas = {} self.labels = [(0, "EMPTY")] self.keywords = {} self.tokens = {} self.symbol2label = {} self.start = 256 def dump(self, filename): """Dump the grammar tables to a pickle file.""" f = open(filename, "wb") pickle.dump(self.__dict__, f, 2) f.close() def load(self, filename): """Load the grammar tables from a pickle file.""" f = open(filename, "rb") d = pickle.load(f) f.close() self.__dict__.update(d) def copy(self): """ Copy the grammar. """ new = self.__class__() for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords", "tokens", "symbol2label"): setattr(new, dict_attr, getattr(self, dict_attr).copy()) new.labels = self.labels[:] new.states = self.states[:] new.start = self.start return new def report(self): """Dump the grammar tables to standard output, for debugging.""" from pprint import pprint print("s2n") pprint(self.symbol2number) print("n2s") pprint(self.number2symbol) print("states") pprint(self.states) print("dfas") pprint(self.dfas) print("labels") pprint(self.labels) print("start", self.start) # Map from operator to number (since tokenize doesn't do this) opmap_raw = """ ( LPAR ) RPAR [ LSQB ] RSQB : COLON , COMMA ; SEMI + PLUS - MINUS * STAR / SLASH | VBAR & AMPER < LESS > GREATER = EQUAL . DOT % PERCENT ` BACKQUOTE { LBRACE } RBRACE @ AT == EQEQUAL != NOTEQUAL <> NOTEQUAL <= LESSEQUAL >= GREATEREQUAL ~ TILDE ^ CIRCUMFLEX << LEFTSHIFT >> RIGHTSHIFT ** DOUBLESTAR += PLUSEQUAL -= MINEQUAL *= STAREQUAL /= SLASHEQUAL %= PERCENTEQUAL &= AMPEREQUAL |= VBAREQUAL ^= CIRCUMFLEXEQUAL <<= LEFTSHIFTEQUAL >>= RIGHTSHIFTEQUAL **= DOUBLESTAREQUAL // DOUBLESLASH //= DOUBLESLASHEQUAL -> RARROW """ opmap = {} for line in opmap_raw.splitlines(): if line: op, name = line.split() opmap[op] = getattr(token, name)
5,369
Python
.py
155
27.767742
79
0.627965
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,230
literals.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/pgen2/literals.py
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Safely evaluate Python string literals without using eval().""" import re simple_escapes = {"a": "\a", "b": "\b", "f": "\f", "n": "\n", "r": "\r", "t": "\t", "v": "\v", "'": "'", '"': '"', "\\": "\\"} def escape(m): all, tail = m.group(0, 1) assert all.startswith("\\") esc = simple_escapes.get(tail) if esc is not None: return esc if tail.startswith("x"): hexes = tail[1:] if len(hexes) < 2: raise ValueError("invalid hex string escape ('\\%s')" % tail) try: i = int(hexes, 16) except ValueError: raise ValueError("invalid hex string escape ('\\%s')" % tail) else: try: i = int(tail, 8) except ValueError: raise ValueError("invalid octal string escape ('\\%s')" % tail) return chr(i) def evalString(s): assert s.startswith("'") or s.startswith('"'), repr(s[:1]) q = s[0] if s[:3] == q*3: q = q*3 assert s.endswith(q), repr(s[-len(q):]) assert len(s) >= 2*len(q) s = s[len(q):-len(q)] return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s) def test(): for i in range(256): c = chr(i) s = repr(c) e = evalString(s) if e != c: print(i, c, s, e) if __name__ == "__main__": test()
1,615
Python
.py
52
22.326923
75
0.464309
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,231
parse.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/pgen2/parse.py
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Parser engine for the grammar tables generated by pgen. The grammar table must be loaded first. See Parser/parser.c in the Python distribution for additional info on how this parsing engine works. """ # Local imports from . import token class ParseError(Exception): """Exception to signal the parser is stuck.""" def __init__(self, msg, type, value, context): Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % (msg, type, value, context)) self.msg = msg self.type = type self.value = value self.context = context class Parser(object): """Parser engine. The proper usage sequence is: p = Parser(grammar, [converter]) # create instance p.setup([start]) # prepare for parsing <for each input token>: if p.addtoken(...): # parse a token; may raise ParseError break root = p.rootnode # root of abstract syntax tree A Parser instance may be reused by calling setup() repeatedly. A Parser instance contains state pertaining to the current token sequence, and should not be used concurrently by different threads to parse separate token sequences. See driver.py for how to get input tokens by tokenizing a file or string. Parsing is complete when addtoken() returns True; the root of the abstract syntax tree can then be retrieved from the rootnode instance variable. When a syntax error occurs, addtoken() raises the ParseError exception. There is no error recovery; the parser cannot be used after a syntax error was reported (but it can be reinitialized by calling setup()). """ def __init__(self, grammar, convert=None): """Constructor. The grammar argument is a grammar.Grammar instance; see the grammar module for more information. The parser is not ready yet for parsing; you must call the setup() method to get it started. The optional convert argument is a function mapping concrete syntax tree nodes to abstract syntax tree nodes. If not given, no conversion is done and the syntax tree produced is the concrete syntax tree. If given, it must be a function of two arguments, the first being the grammar (a grammar.Grammar instance), and the second being the concrete syntax tree node to be converted. The syntax tree is converted from the bottom up. A concrete syntax tree node is a (type, value, context, nodes) tuple, where type is the node type (a token or symbol number), value is None for symbols and a string for tokens, context is None or an opaque value used for error reporting (typically a (lineno, offset) pair), and nodes is a list of children for symbols, and None for tokens. An abstract syntax tree node may be anything; this is entirely up to the converter function. """ self.grammar = grammar self.convert = convert or (lambda grammar, node: node) def setup(self, start=None): """Prepare for parsing. This *must* be called before starting to parse. The optional argument is an alternative start symbol; it defaults to the grammar's start symbol. You can use a Parser instance to parse any number of programs; each time you call setup() the parser is reset to an initial state determined by the (implicit or explicit) start symbol. """ if start is None: start = self.grammar.start # Each stack entry is a tuple: (dfa, state, node). # A node is a tuple: (type, value, context, children), # where children is a list of nodes or None, and context may be None. newnode = (start, None, None, []) stackentry = (self.grammar.dfas[start], 0, newnode) self.stack = [stackentry] self.rootnode = None self.used_names = set() # Aliased to self.rootnode.used_names in pop() def addtoken(self, type, value, context): """Add a token; return True iff this is the end of the program.""" # Map from token to label ilabel = self.classify(type, value, context) # Loop until the token is shifted; may raise exceptions while True: dfa, state, node = self.stack[-1] states, first = dfa arcs = states[state] # Look for a state with this label for i, newstate in arcs: t, v = self.grammar.labels[i] if ilabel == i: # Look it up in the list of labels assert t < 256 # Shift a token; we're done with it self.shift(type, value, newstate, context) # Pop while we are in an accept-only state state = newstate while states[state] == [(0, state)]: self.pop() if not self.stack: # Done parsing! return True dfa, state, node = self.stack[-1] states, first = dfa # Done with this token return False elif t >= 256: # See if it's a symbol and if we're in its first set itsdfa = self.grammar.dfas[t] itsstates, itsfirst = itsdfa if ilabel in itsfirst: # Push a symbol self.push(t, self.grammar.dfas[t], newstate, context) break # To continue the outer while loop else: if (0, state) in arcs: # An accepting state, pop it and try something else self.pop() if not self.stack: # Done parsing, but another token is input raise ParseError("too much input", type, value, context) else: # No success finding a transition raise ParseError("bad input", type, value, context) def classify(self, type, value, context): """Turn a token into a label. (Internal)""" if type == token.NAME: # Keep a listing of all used names self.used_names.add(value) # Check for reserved words ilabel = self.grammar.keywords.get(value) if ilabel is not None: return ilabel ilabel = self.grammar.tokens.get(type) if ilabel is None: raise ParseError("bad token", type, value, context) return ilabel def shift(self, type, value, newstate, context): """Shift a token. (Internal)""" dfa, state, node = self.stack[-1] newnode = (type, value, context, None) newnode = self.convert(self.grammar, newnode) if newnode is not None: node[-1].append(newnode) self.stack[-1] = (dfa, newstate, node) def push(self, type, newdfa, newstate, context): """Push a nonterminal. (Internal)""" dfa, state, node = self.stack[-1] newnode = (type, None, context, []) self.stack[-1] = (dfa, newstate, node) self.stack.append((newdfa, 0, newnode)) def pop(self): """Pop a nonterminal. (Internal)""" popdfa, popstate, popnode = self.stack.pop() newnode = self.convert(self.grammar, popnode) if newnode is not None: if self.stack: dfa, state, node = self.stack[-1] node[-1].append(newnode) else: self.rootnode = newnode self.rootnode.used_names = self.used_names
8,053
Python
.py
169
36.094675
78
0.590168
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,232
driver.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/pgen2/driver.py
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # Modifications: # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Parser driver. This provides a high-level interface to parse a file into a syntax tree. """ __author__ = "Guido van Rossum <guido@python.org>" __all__ = ["Driver", "load_grammar"] # Python imports import codecs import io import os import logging import sys # Pgen imports from . import grammar, parse, token, tokenize, pgen class Driver(object): def __init__(self, grammar, convert=None, logger=None): self.grammar = grammar if logger is None: logger = logging.getLogger() self.logger = logger self.convert = convert def parse_tokens(self, tokens, debug=False): """Parse a series of tokens and return the syntax tree.""" # XXX Move the prefix computation into a wrapper around tokenize. p = parse.Parser(self.grammar, self.convert) p.setup() lineno = 1 column = 0 type = value = start = end = line_text = None prefix = "" for quintuple in tokens: type, value, start, end, line_text = quintuple if start != (lineno, column): assert (lineno, column) <= start, ((lineno, column), start) s_lineno, s_column = start if lineno < s_lineno: prefix += "\n" * (s_lineno - lineno) lineno = s_lineno column = 0 if column < s_column: prefix += line_text[column:s_column] column = s_column if type in (tokenize.COMMENT, tokenize.NL): prefix += value lineno, column = end if value.endswith("\n"): lineno += 1 column = 0 continue if type == token.OP: type = grammar.opmap[value] if debug: self.logger.debug("%s %r (prefix=%r)", token.tok_name[type], value, prefix) if p.addtoken(type, value, (prefix, start)): if debug: self.logger.debug("Stop.") break prefix = "" lineno, column = end if value.endswith("\n"): lineno += 1 column = 0 else: # We never broke out -- EOF is too soon (how can this happen???) raise parse.ParseError("incomplete input", type, value, (prefix, start)) return p.rootnode def parse_stream_raw(self, stream, debug=False): """Parse a stream and return the syntax tree.""" tokens = tokenize.generate_tokens(stream.readline) return self.parse_tokens(tokens, debug) def parse_stream(self, stream, debug=False): """Parse a stream and return the syntax tree.""" return self.parse_stream_raw(stream, debug) def parse_file(self, filename, encoding=None, debug=False): """Parse a file and return the syntax tree.""" stream = codecs.open(filename, "r", encoding) try: return self.parse_stream(stream, debug) finally: stream.close() def parse_string(self, text, debug=False): """Parse a string and return the syntax tree.""" tokens = tokenize.generate_tokens(io.StringIO(text).readline) return self.parse_tokens(tokens, debug) def load_grammar(gt="Grammar.txt", gp=None, save=True, force=False, logger=None): """Load the grammar (maybe from a pickle).""" if logger is None: logger = logging.getLogger() if gp is None: head, tail = os.path.splitext(gt) if tail == ".txt": tail = "" gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle" if force or not _newer(gp, gt): logger.info("Generating grammar tables from %s", gt) g = pgen.generate_grammar(gt) if save: logger.info("Writing grammar tables to %s", gp) try: g.dump(gp) except IOError as e: logger.info("Writing failed:"+str(e)) else: g = grammar.Grammar() g.load(gp) return g def _newer(a, b): """Inquire whether file a was written since file b.""" if not os.path.exists(a): return False if not os.path.exists(b): return True return os.path.getmtime(a) >= os.path.getmtime(b) def main(*args): """Main program, when run as a script: produce grammar pickle files. Calls load_grammar for each argument, a path to a grammar text file. """ if not args: args = sys.argv[1:] logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='%(message)s') for gt in args: load_grammar(gt, save=True, force=True) return True if __name__ == "__main__": sys.exit(int(not main()))
5,153
Python
.py
133
28.857143
76
0.570056
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,233
tokenize.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/pgen2/tokenize.py
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation. # All rights reserved. """Tokenization help for Python programs. generate_tokens(readline) is a generator that breaks a stream of text into Python tokens. It accepts a readline-like method which is called repeatedly to get the next line of input (or "" for EOF). It generates 5-tuples with these members: the token type (see token.py) the token (a string) the starting (row, column) indices of the token (a 2-tuple of ints) the ending (row, column) indices of the token (a 2-tuple of ints) the original line (string) It is designed to match the working of the Python tokenizer exactly, except that it produces COMMENT tokens for comments and gives type OP for all operators Older entry points tokenize_loop(readline, tokeneater) tokenize(readline, tokeneater=printtoken) are the same, except instead of generating tokens, tokeneater is a callback function to which the 5 fields described above are passed as 5 arguments, each time a new token is found.""" __author__ = 'Ka-Ping Yee <ping@lfw.org>' __credits__ = \ 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro' import string, re from codecs import BOM_UTF8, lookup from lib2to3.pgen2.token import * from . import token __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize", "generate_tokens", "untokenize"] del token try: bytes except NameError: # Support bytes type in Python <= 2.5, so 2to3 turns itself into # valid Python 3 code. bytes = str def group(*choices): return '(' + '|'.join(choices) + ')' def any(*choices): return group(*choices) + '*' def maybe(*choices): return group(*choices) + '?' Whitespace = r'[ \f\t]*' Comment = r'#[^\r\n]*' Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) Name = r'[a-zA-Z_]\w*' Binnumber = r'0[bB][01]*' Hexnumber = r'0[xX][\da-fA-F]*[lL]?' Octnumber = r'0[oO]?[0-7]*[lL]?' Decnumber = r'[1-9]\d*[lL]?' Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber) Exponent = r'[eE][-+]?\d+' Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent) Expfloat = r'\d+' + Exponent Floatnumber = group(Pointfloat, Expfloat) Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]') Number = group(Imagnumber, Floatnumber, Intnumber) # Tail end of ' string. Single = r"[^'\\]*(?:\\.[^'\\]*)*'" # Tail end of " string. Double = r'[^"\\]*(?:\\.[^"\\]*)*"' # Tail end of ''' string. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" # Tail end of """ string. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""') # Single-line ' or " string. String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'", r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"') # Because of leftmost-then-longest match semantics, be sure to put the # longest operators first (e.g., if = came before ==, == would get # recognized as two instances of =). Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=", r"//=?", r"->", r"[+\-*/%&|^=<>]=?", r"~") Bracket = '[][(){}]' Special = group(r'\r?\n', r'[:;.,`@]') Funny = group(Operator, Bracket, Special) PlainToken = group(Number, Funny, String, Name) Token = Ignore + PlainToken # First (or only) line of ' or " string. ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r'\\\r?\n'), r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r'\\\r?\n')) PseudoExtras = group(r'\\\r?\n', Comment, Triple) PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) tokenprog, pseudoprog, single3prog, double3prog = list(map( re.compile, (Token, PseudoToken, Single3, Double3))) endprogs = {"'": re.compile(Single), '"': re.compile(Double), "'''": single3prog, '"""': double3prog, "r'''": single3prog, 'r"""': double3prog, "u'''": single3prog, 'u"""': double3prog, "b'''": single3prog, 'b"""': double3prog, "ur'''": single3prog, 'ur"""': double3prog, "br'''": single3prog, 'br"""': double3prog, "R'''": single3prog, 'R"""': double3prog, "U'''": single3prog, 'U"""': double3prog, "B'''": single3prog, 'B"""': double3prog, "uR'''": single3prog, 'uR"""': double3prog, "Ur'''": single3prog, 'Ur"""': double3prog, "UR'''": single3prog, 'UR"""': double3prog, "bR'''": single3prog, 'bR"""': double3prog, "Br'''": single3prog, 'Br"""': double3prog, "BR'''": single3prog, 'BR"""': double3prog, 'r': None, 'R': None, 'u': None, 'U': None, 'b': None, 'B': None} triple_quoted = {} for t in ("'''", '"""', "r'''", 'r"""', "R'''", 'R"""', "u'''", 'u"""', "U'''", 'U"""', "b'''", 'b"""', "B'''", 'B"""', "ur'''", 'ur"""', "Ur'''", 'Ur"""', "uR'''", 'uR"""', "UR'''", 'UR"""', "br'''", 'br"""', "Br'''", 'Br"""', "bR'''", 'bR"""', "BR'''", 'BR"""',): triple_quoted[t] = t single_quoted = {} for t in ("'", '"', "r'", 'r"', "R'", 'R"', "u'", 'u"', "U'", 'U"', "b'", 'b"', "B'", 'B"', "ur'", 'ur"', "Ur'", 'Ur"', "uR'", 'uR"', "UR'", 'UR"', "br'", 'br"', "Br'", 'Br"', "bR'", 'bR"', "BR'", 'BR"', ): single_quoted[t] = t tabsize = 8 class TokenError(Exception): pass class StopTokenizing(Exception): pass def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing (srow, scol) = xxx_todo_changeme (erow, ecol) = xxx_todo_changeme1 print("%d,%d-%d,%d:\t%s\t%s" % \ (srow, scol, erow, ecol, tok_name[type], repr(token))) def tokenize(readline, tokeneater=printtoken): """ The tokenize() function accepts two parameters: one representing the input stream, and one providing an output mechanism for tokenize(). The first parameter, readline, must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. The second parameter, tokeneater, must also be a callable object. It is called once for each token, with five arguments, corresponding to the tuples generated by generate_tokens(). """ try: tokenize_loop(readline, tokeneater) except StopTokenizing: pass # backwards compatible interface def tokenize_loop(readline, tokeneater): for token_info in generate_tokens(readline): tokeneater(*token_info) class Untokenizer: def __init__(self): self.tokens = [] self.prev_row = 1 self.prev_col = 0 def add_whitespace(self, start): row, col = start assert row <= self.prev_row col_offset = col - self.prev_col if col_offset: self.tokens.append(" " * col_offset) def untokenize(self, iterable): for t in iterable: if len(t) == 2: self.compat(t, iterable) break tok_type, token, start, end, line = t self.add_whitespace(start) self.tokens.append(token) self.prev_row, self.prev_col = end if tok_type in (NEWLINE, NL): self.prev_row += 1 self.prev_col = 0 return "".join(self.tokens) def compat(self, token, iterable): startline = False indents = [] toks_append = self.tokens.append toknum, tokval = token if toknum in (NAME, NUMBER): tokval += ' ' if toknum in (NEWLINE, NL): startline = True for tok in iterable: toknum, tokval = tok[:2] if toknum in (NAME, NUMBER): tokval += ' ' if toknum == INDENT: indents.append(tokval) continue elif toknum == DEDENT: indents.pop() continue elif toknum in (NEWLINE, NL): startline = True elif startline and indents: toks_append(indents[-1]) startline = False toks_append(tokval) cookie_re = re.compile("coding[:=]\s*([-\w.]+)") def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if enc == "utf-8" or enc.startswith("utf-8-"): return "utf-8" if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): return "iso-8859-1" return orig_enc def detect_encoding(readline): """ The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argment, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. """ bom_found = False encoding = None default = 'utf-8' def read_or_stop(): try: return readline() except StopIteration: return bytes() def find_cookie(line): try: line_string = line.decode('ascii') except UnicodeDecodeError: return None matches = cookie_re.findall(line_string) if not matches: return None encoding = _get_normal_name(matches[0]) try: codec = lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter raise SyntaxError("unknown encoding: " + encoding) if bom_found: if codec.name != 'utf-8': # This behaviour mimics the Python interpreter raise SyntaxError('encoding problem: utf-8') encoding += '-sig' return encoding first = read_or_stop() if first.startswith(BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default, [] encoding = find_cookie(first) if encoding: return encoding, [first] second = read_or_stop() if not second: return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] return default, [first, second] def untokenize(iterable): """Transform tokens back into Python source code. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited intput: # Output text will tokenize the back to the input t1 = [tok[:2] for tok in generate_tokens(f.readline)] newcode = untokenize(t1) readline = iter(newcode.splitlines(1)).next t2 = [tok[:2] for tokin generate_tokens(readline)] assert t1 == t2 """ ut = Untokenizer() return ut.untokenize(iterable) def generate_tokens(readline): """ The generate_tokens() generator requires one argment, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. Alternately, readline can be a callable function terminating with StopIteration: readline = open(myfile).next # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the logical line; continuation lines are included. """ lnum = parenlev = continued = 0 namechars, numchars = string.ascii_letters + '_', '0123456789' contstr, needcont = '', 0 contline = None indents = [0] while 1: # loop over lines in stream try: line = readline() except StopIteration: line = '' lnum = lnum + 1 pos, max = 0, len(line) if contstr: # continued string if not line: raise TokenError("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) yield (STRING, contstr + line[:end], strstart, (lnum, end), contline + line) contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': yield (ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline) contstr = '' contline = None continue else: contstr = contstr + line contline = contline + line continue elif parenlev == 0 and not continued: # new statement if not line: break column = 0 while pos < max: # measure leading whitespace if line[pos] == ' ': column = column + 1 elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize elif line[pos] == '\f': column = 0 else: break pos = pos + 1 if pos == max: break if line[pos] in '#\r\n': # skip comments or blank lines if line[pos] == '#': comment_token = line[pos:].rstrip('\r\n') nl_pos = pos + len(comment_token) yield (COMMENT, comment_token, (lnum, pos), (lnum, pos + len(comment_token)), line) yield (NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line) else: yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], (lnum, pos), (lnum, len(line)), line) continue if column > indents[-1]: # count indents or dedents indents.append(column) yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: if column not in indents: raise IndentationError( "unindent does not match any outer indentation level", ("<tokenize>", lnum, pos, line)) indents = indents[:-1] yield (DEDENT, '', (lnum, pos), (lnum, pos), line) else: # continued statement if not line: raise TokenError("EOF in multi-line statement", (lnum, 0)) continued = 0 while pos < max: pseudomatch = pseudoprog.match(line, pos) if pseudomatch: # scan for tokens start, end = pseudomatch.span(1) spos, epos, pos = (lnum, start), (lnum, end), end token, initial = line[start:end], line[start] if initial in numchars or \ (initial == '.' and token != '.'): # ordinary number yield (NUMBER, token, spos, epos, line) elif initial in '\r\n': newline = NEWLINE if parenlev > 0: newline = NL yield (newline, token, spos, epos, line) elif initial == '#': assert not token.endswith("\n") yield (COMMENT, token, spos, epos, line) elif token in triple_quoted: endprog = endprogs[token] endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield (STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] contline = line break elif initial in single_quoted or \ token[:2] in single_quoted or \ token[:3] in single_quoted: if token[-1] == '\n': # continued string strstart = (lnum, start) endprog = (endprogs[initial] or endprogs[token[1]] or endprogs[token[2]]) contstr, needcont = line[start:], 1 contline = line break else: # ordinary string yield (STRING, token, spos, epos, line) elif initial in namechars: # ordinary name yield (NAME, token, spos, epos, line) elif initial == '\\': # continued stmt # This yield is new; needed for better idempotency: yield (NL, token, spos, (lnum, pos), line) continued = 1 else: if initial in '([{': parenlev = parenlev + 1 elif initial in ')]}': parenlev = parenlev - 1 yield (OP, token, spos, epos, line) else: yield (ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos+1), line) pos = pos + 1 for indent in indents[1:]: # pop remaining indent levels yield (DEDENT, '', (lnum, 0), (lnum, 0), '') yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '') if __name__ == '__main__': # testing import sys if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline) else: tokenize(sys.stdin.readline)
19,169
Python
.py
436
33.919725
87
0.536772
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,234
fix_getcwdu.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_getcwdu.py
""" Fixer that changes os.getcwdu() to os.getcwd(). """ # Author: Victor Stinner # Local imports from .. import fixer_base from ..fixer_util import Name class FixGetcwdu(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< 'os' trailer< dot='.' name='getcwdu' > any* > """ def transform(self, node, results): name = results["name"] name.replace(Name("getcwd", prefix=name.prefix))
451
Python
.py
15
25.066667
66
0.62963
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,235
fix_urllib.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_urllib.py
"""Fix changes imports of urllib which are now incompatible. This is rather similar to fix_imports, but because of the more complex nature of the fixing for urllib, it has its own fixer. """ # Author: Nick Edds # Local imports from lib2to3.fixes.fix_imports import alternates, FixImports from lib2to3 import fixer_base from lib2to3.fixer_util import (Name, Comma, FromImport, Newline, find_indentation, Node, syms) MAPPING = {"urllib": [ ("urllib.request", ["URLopener", "FancyURLopener", "urlretrieve", "_urlopener", "urlopen", "urlcleanup", "pathname2url", "url2pathname"]), ("urllib.parse", ["quote", "quote_plus", "unquote", "unquote_plus", "urlencode", "splitattr", "splithost", "splitnport", "splitpasswd", "splitport", "splitquery", "splittag", "splittype", "splituser", "splitvalue", ]), ("urllib.error", ["ContentTooShortError"])], "urllib2" : [ ("urllib.request", ["urlopen", "install_opener", "build_opener", "Request", "OpenerDirector", "BaseHandler", "HTTPDefaultErrorHandler", "HTTPRedirectHandler", "HTTPCookieProcessor", "ProxyHandler", "HTTPPasswordMgr", "HTTPPasswordMgrWithDefaultRealm", "AbstractBasicAuthHandler", "HTTPBasicAuthHandler", "ProxyBasicAuthHandler", "AbstractDigestAuthHandler", "HTTPDigestAuthHandler", "ProxyDigestAuthHandler", "HTTPHandler", "HTTPSHandler", "FileHandler", "FTPHandler", "CacheFTPHandler", "UnknownHandler"]), ("urllib.error", ["URLError", "HTTPError"]), ] } # Duplicate the url parsing functions for urllib2. MAPPING["urllib2"].append(MAPPING["urllib"][1]) def build_pattern(): bare = set() for old_module, changes in MAPPING.items(): for change in changes: new_module, members = change members = alternates(members) yield """import_name< 'import' (module=%r | dotted_as_names< any* module=%r any* >) > """ % (old_module, old_module) yield """import_from< 'from' mod_member=%r 'import' ( member=%s | import_as_name< member=%s 'as' any > | import_as_names< members=any* >) > """ % (old_module, members, members) yield """import_from< 'from' module_star=%r 'import' star='*' > """ % old_module yield """import_name< 'import' dotted_as_name< module_as=%r 'as' any > > """ % old_module # bare_with_attr has a special significance for FixImports.match(). yield """power< bare_with_attr=%r trailer< '.' member=%s > any* > """ % (old_module, members) class FixUrllib(FixImports): def build_pattern(self): return "|".join(build_pattern()) def transform_import(self, node, results): """Transform for the basic import case. Replaces the old import name with a comma separated list of its replacements. """ import_mod = results.get("module") pref = import_mod.prefix names = [] # create a Node list of the replacement modules for name in MAPPING[import_mod.value][:-1]: names.extend([Name(name[0], prefix=pref), Comma()]) names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref)) import_mod.replace(names) def transform_member(self, node, results): """Transform for imports of specific module elements. Replaces the module to be imported from with the appropriate new module. """ mod_member = results.get("mod_member") pref = mod_member.prefix member = results.get("member") # Simple case with only a single member being imported if member: # this may be a list of length one, or just a node if isinstance(member, list): member = member[0] new_name = None for change in MAPPING[mod_member.value]: if member.value in change[1]: new_name = change[0] break if new_name: mod_member.replace(Name(new_name, prefix=pref)) else: self.cannot_convert(node, "This is an invalid module element") # Multiple members being imported else: # a dictionary for replacements, order matters modules = [] mod_dict = {} members = results["members"] for member in members: # we only care about the actual members if member.type == syms.import_as_name: as_name = member.children[2].value member_name = member.children[0].value else: member_name = member.value as_name = None if member_name != ",": for change in MAPPING[mod_member.value]: if member_name in change[1]: if change[0] not in mod_dict: modules.append(change[0]) mod_dict.setdefault(change[0], []).append(member) new_nodes = [] indentation = find_indentation(node) first = True def handle_name(name, prefix): if name.type == syms.import_as_name: kids = [Name(name.children[0].value, prefix=prefix), name.children[1].clone(), name.children[2].clone()] return [Node(syms.import_as_name, kids)] return [Name(name.value, prefix=prefix)] for module in modules: elts = mod_dict[module] names = [] for elt in elts[:-1]: names.extend(handle_name(elt, pref)) names.append(Comma()) names.extend(handle_name(elts[-1], pref)) new = FromImport(module, names) if not first or node.parent.prefix.endswith(indentation): new.prefix = indentation new_nodes.append(new) first = False if new_nodes: nodes = [] for new_node in new_nodes[:-1]: nodes.extend([new_node, Newline()]) nodes.append(new_nodes[-1]) node.replace(nodes) else: self.cannot_convert(node, "All module elements are invalid") def transform_dot(self, node, results): """Transform for calls to module members in code.""" module_dot = results.get("bare_with_attr") member = results.get("member") new_name = None if isinstance(member, list): member = member[0] for change in MAPPING[module_dot.value]: if member.value in change[1]: new_name = change[0] break if new_name: module_dot.replace(Name(new_name, prefix=module_dot.prefix)) else: self.cannot_convert(node, "This is an invalid module element") def transform(self, node, results): if results.get("module"): self.transform_import(node, results) elif results.get("mod_member"): self.transform_member(node, results) elif results.get("bare_with_attr"): self.transform_dot(node, results) # Renaming and star imports are not supported for these modules. elif results.get("module_star"): self.cannot_convert(node, "Cannot handle star imports.") elif results.get("module_as"): self.cannot_convert(node, "This module is now multiple modules")
8,384
Python
.py
180
31.911111
79
0.528887
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,236
fix_sys_exc.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_sys_exc.py
"""Fixer for sys.exc_{type, value, traceback} sys.exc_type -> sys.exc_info()[0] sys.exc_value -> sys.exc_info()[1] sys.exc_traceback -> sys.exc_info()[2] """ # By Jeff Balogh and Benjamin Peterson # Local imports from .. import fixer_base from ..fixer_util import Attr, Call, Name, Number, Subscript, Node, syms class FixSysExc(fixer_base.BaseFix): # This order matches the ordering of sys.exc_info(). exc_info = ["exc_type", "exc_value", "exc_traceback"] BM_compatible = True PATTERN = """ power< 'sys' trailer< dot='.' attribute=(%s) > > """ % '|'.join("'%s'" % e for e in exc_info) def transform(self, node, results): sys_attr = results["attribute"][0] index = Number(self.exc_info.index(sys_attr.value)) call = Call(Name("exc_info"), prefix=sys_attr.prefix) attr = Attr(Name("sys"), call) attr[1].children[0].prefix = results["dot"].prefix attr.append(Subscript(index)) return Node(syms.power, attr, prefix=node.prefix)
1,034
Python
.py
24
37.5
72
0.633466
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,237
fix_has_key.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_has_key.py
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for has_key(). Calls to .has_key() methods are expressed in terms of the 'in' operator: d.has_key(k) -> k in d CAVEATS: 1) While the primary target of this fixer is dict.has_key(), the fixer will change any has_key() method call, regardless of its class. 2) Cases like this will not be converted: m = d.has_key if m(k): ... Only *calls* to has_key() are converted. While it is possible to convert the above to something like m = d.__contains__ if m(k): ... this is currently not done. """ # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, parenthesize class FixHasKey(fixer_base.BaseFix): BM_compatible = True PATTERN = """ anchor=power< before=any+ trailer< '.' 'has_key' > trailer< '(' ( not(arglist | argument<any '=' any>) arg=any | arglist<(not argument<any '=' any>) arg=any ','> ) ')' > after=any* > | negation=not_test< 'not' anchor=power< before=any+ trailer< '.' 'has_key' > trailer< '(' ( not(arglist | argument<any '=' any>) arg=any | arglist<(not argument<any '=' any>) arg=any ','> ) ')' > > > """ def transform(self, node, results): assert results syms = self.syms if (node.parent.type == syms.not_test and self.pattern.match(node.parent)): # Don't transform a node matching the first alternative of the # pattern when its parent matches the second alternative return None negation = results.get("negation") anchor = results["anchor"] prefix = node.prefix before = [n.clone() for n in results["before"]] arg = results["arg"].clone() after = results.get("after") if after: after = [n.clone() for n in after] if arg.type in (syms.comparison, syms.not_test, syms.and_test, syms.or_test, syms.test, syms.lambdef, syms.argument): arg = parenthesize(arg) if len(before) == 1: before = before[0] else: before = pytree.Node(syms.power, before) before.prefix = " " n_op = Name("in", prefix=" ") if negation: n_not = Name("not", prefix=" ") n_op = pytree.Node(syms.comp_op, (n_not, n_op)) new = pytree.Node(syms.comparison, (arg, n_op, before)) if after: new = parenthesize(new) new = pytree.Node(syms.power, (new,) + tuple(after)) if node.parent.type in (syms.comparison, syms.expr, syms.xor_expr, syms.and_expr, syms.shift_expr, syms.arith_expr, syms.term, syms.factor, syms.power): new = parenthesize(new) new.prefix = prefix return new
3,222
Python
.py
96
24.260417
78
0.541774
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,238
fix_reduce.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_reduce.py
# Copyright 2008 Armin Ronacher. # Licensed to PSF under a Contributor Agreement. """Fixer for reduce(). Makes sure reduce() is imported from the functools module if reduce is used in that module. """ from lib2to3 import fixer_base from lib2to3.fixer_util import touch_import class FixReduce(fixer_base.BaseFix): BM_compatible = True order = "pre" PATTERN = """ power< 'reduce' trailer< '(' arglist< ( (not(argument<any '=' any>) any ',' not(argument<any '=' any>) any) | (not(argument<any '=' any>) any ',' not(argument<any '=' any>) any ',' not(argument<any '=' any>) any) ) > ')' > > """ def transform(self, node, results): touch_import('functools', 'reduce', node)
837
Python
.py
26
24.730769
70
0.571072
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,239
fix_raise.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_raise.py
"""Fixer for 'raise E, V, T' raise -> raise raise E -> raise E raise E, V -> raise E(V) raise E, V, T -> raise E(V).with_traceback(T) raise E, None, T -> raise E.with_traceback(T) raise (((E, E'), E''), E'''), V -> raise E(V) raise "foo", V, T -> warns about string exceptions CAVEATS: 1) "raise E, V" will be incorrectly translated if V is an exception instance. The correct Python 3 idiom is raise E from V but since we can't detect instance-hood by syntax alone and since any client code would have to be changed as well, we don't automate this. """ # Author: Collin Winter # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, Call, Attr, ArgList, is_tuple class FixRaise(fixer_base.BaseFix): BM_compatible = True PATTERN = """ raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] > """ def transform(self, node, results): syms = self.syms exc = results["exc"].clone() if exc.type == token.STRING: msg = "Python 3 does not support string exceptions" self.cannot_convert(node, msg) return # Python 2 supports # raise ((((E1, E2), E3), E4), E5), V # as a synonym for # raise E1, V # Since Python 3 will not support this, we recurse down any tuple # literals, always taking the first element. if is_tuple(exc): while is_tuple(exc): # exc.children[1:-1] is the unparenthesized tuple # exc.children[1].children[0] is the first element of the tuple exc = exc.children[1].children[0].clone() exc.prefix = " " if "val" not in results: # One-argument raise new = pytree.Node(syms.raise_stmt, [Name("raise"), exc]) new.prefix = node.prefix return new val = results["val"].clone() if is_tuple(val): args = [c.clone() for c in val.children[1:-1]] else: val.prefix = "" args = [val] if "tb" in results: tb = results["tb"].clone() tb.prefix = "" e = exc # If there's a traceback and None is passed as the value, then don't # add a call, since the user probably just wants to add a # traceback. See issue #9661. if val.type != token.NAME or val.value != "None": e = Call(exc, args) with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])] new = pytree.Node(syms.simple_stmt, [Name("raise")] + with_tb) new.prefix = node.prefix return new else: return pytree.Node(syms.raise_stmt, [Name("raise"), Call(exc, args)], prefix=node.prefix)
2,926
Python
.py
74
30.459459
80
0.55677
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,240
fix_dict.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_dict.py
# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for dict methods. d.keys() -> list(d.keys()) d.items() -> list(d.items()) d.values() -> list(d.values()) d.iterkeys() -> iter(d.keys()) d.iteritems() -> iter(d.items()) d.itervalues() -> iter(d.values()) d.viewkeys() -> d.keys() d.viewitems() -> d.items() d.viewvalues() -> d.values() Except in certain very specific contexts: the iter() can be dropped when the context is list(), sorted(), iter() or for...in; the list() can be dropped when the context is list() or sorted() (but not iter() or for...in!). Special contexts that apply to both: list(), sorted(), tuple() set(), any(), all(), sum(). Note: iter(d.keys()) could be written as iter(d) but since the original d.iterkeys() was also redundant we don't fix this. And there are (rare) contexts where it makes a difference (e.g. when passing it as an argument to a function that introspects the argument). """ # Local imports from .. import pytree from .. import patcomp from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot from .. import fixer_util iter_exempt = fixer_util.consuming_calls | set(["iter"]) class FixDict(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< head=any+ trailer< '.' method=('keys'|'items'|'values'| 'iterkeys'|'iteritems'|'itervalues'| 'viewkeys'|'viewitems'|'viewvalues') > parens=trailer< '(' ')' > tail=any* > """ def transform(self, node, results): head = results["head"] method = results["method"][0] # Extract node for method name tail = results["tail"] syms = self.syms method_name = method.value isiter = method_name.startswith("iter") isview = method_name.startswith("view") if isiter or isview: method_name = method_name[4:] assert method_name in ("keys", "items", "values"), repr(method) head = [n.clone() for n in head] tail = [n.clone() for n in tail] special = not tail and self.in_special_context(node, isiter) args = head + [pytree.Node(syms.trailer, [Dot(), Name(method_name, prefix=method.prefix)]), results["parens"].clone()] new = pytree.Node(syms.power, args) if not (special or isview): new.prefix = "" new = Call(Name("iter" if isiter else "list"), [new]) if tail: new = pytree.Node(syms.power, [new] + tail) new.prefix = node.prefix return new P1 = "power< func=NAME trailer< '(' node=any ')' > any* >" p1 = patcomp.compile_pattern(P1) P2 = """for_stmt< 'for' any 'in' node=any ':' any* > | comp_for< 'for' any 'in' node=any any* > """ p2 = patcomp.compile_pattern(P2) def in_special_context(self, node, isiter): if node.parent is None: return False results = {} if (node.parent.parent is not None and self.p1.match(node.parent.parent, results) and results["node"] is node): if isiter: # iter(d.iterkeys()) -> iter(d.keys()), etc. return results["func"].value in iter_exempt else: # list(d.keys()) -> list(d.keys()), etc. return results["func"].value in fixer_util.consuming_calls if not isiter: return False # for ... in d.iterkeys() -> for ... in d.keys(), etc. return self.p2.match(node.parent, results) and results["node"] is node
3,816
Python
.py
91
33.406593
78
0.578862
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,241
fix_filter.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_filter.py
# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that changes filter(F, X) into list(filter(F, X)). We avoid the transformation if the filter() call is directly contained in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. NOTE: This is still not correct if the original code was depending on filter(F, X) to return a string if X is a string and a tuple if X is a tuple. That would require type inference, which we don't do. Let Python 2.6 figure it out. """ # Local imports from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, Call, ListComp, in_special_context class FixFilter(fixer_base.ConditionalFix): BM_compatible = True PATTERN = """ filter_lambda=power< 'filter' trailer< '(' arglist< lambdef< 'lambda' (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any > ',' it=any > ')' > > | power< 'filter' trailer< '(' arglist< none='None' ',' seq=any > ')' > > | power< 'filter' args=trailer< '(' [any] ')' > > """ skip_on = "future_builtins.filter" def transform(self, node, results): if self.should_skip(node): return if "filter_lambda" in results: new = ListComp(results.get("fp").clone(), results.get("fp").clone(), results.get("it").clone(), results.get("xp").clone()) elif "none" in results: new = ListComp(Name("_f"), Name("_f"), results["seq"].clone(), Name("_f")) else: if in_special_context(node): return None new = node.clone() new.prefix = "" new = Call(Name("list"), [new]) new.prefix = node.prefix return new
2,102
Python
.py
65
22.630769
73
0.508391
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,242
fix_methodattrs.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_methodattrs.py
"""Fix bound method attributes (method.im_? -> method.__?__). """ # Author: Christian Heimes # Local imports from .. import fixer_base from ..fixer_util import Name MAP = { "im_func" : "__func__", "im_self" : "__self__", "im_class" : "__self__.__class__" } class FixMethodattrs(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* > """ def transform(self, node, results): attr = results["attr"][0] new = MAP[attr.value] attr.replace(Name(new, prefix=attr.prefix))
606
Python
.py
20
26.1
79
0.589347
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,243
fix_next.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_next.py
"""Fixer for it.next() -> next(it), per PEP 3114.""" # Author: Collin Winter # Things that currently aren't covered: # - listcomp "next" names aren't warned # - "with" statement targets aren't checked # Local imports from ..pgen2 import token from ..pygram import python_symbols as syms from .. import fixer_base from ..fixer_util import Name, Call, find_binding bind_warning = "Calls to builtin next() possibly shadowed by global binding" class FixNext(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > > | power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > > | classdef< 'class' any+ ':' suite< any* funcdef< 'def' name='next' parameters< '(' NAME ')' > any+ > any* > > | global=global_stmt< 'global' any* 'next' any* > """ order = "pre" # Pre-order tree traversal def start_tree(self, tree, filename): super(FixNext, self).start_tree(tree, filename) n = find_binding('next', tree) if n: self.warning(n, bind_warning) self.shadowed_next = True else: self.shadowed_next = False def transform(self, node, results): assert results base = results.get("base") attr = results.get("attr") name = results.get("name") if base: if self.shadowed_next: attr.replace(Name("__next__", prefix=attr.prefix)) else: base = [n.clone() for n in base] base[0].prefix = "" node.replace(Call(Name("next", prefix=node.prefix), base)) elif name: n = Name("__next__", prefix=name.prefix) name.replace(n) elif attr: # We don't do this transformation if we're assigning to "x.next". # Unfortunately, it doesn't seem possible to do this in PATTERN, # so it's being done here. if is_assign_target(node): head = results["head"] if "".join([str(n) for n in head]).strip() == '__builtin__': self.warning(node, bind_warning) return attr.replace(Name("__next__")) elif "global" in results: self.warning(node, bind_warning) self.shadowed_next = True ### The following functions help test if node is part of an assignment ### target. def is_assign_target(node): assign = find_assign(node) if assign is None: return False for child in assign.children: if child.type == token.EQUAL: return False elif is_subtree(child, node): return True return False def find_assign(node): if node.type == syms.expr_stmt: return node if node.type == syms.simple_stmt or node.parent is None: return None return find_assign(node.parent) def is_subtree(root, node): if root == node: return True return any(is_subtree(c, node) for c in root.children)
3,174
Python
.py
86
27.895349
77
0.566916
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,244
fix_xreadlines.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_xreadlines.py
"""Fix "for x in f.xreadlines()" -> "for x in f". This fixer will also convert g(f.xreadlines) into g(f.__iter__).""" # Author: Collin Winter # Local imports from .. import fixer_base from ..fixer_util import Name class FixXreadlines(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > > | power< any+ trailer< '.' no_call='xreadlines' > > """ def transform(self, node, results): no_call = results.get("no_call") if no_call: no_call.replace(Name("__iter__", prefix=no_call.prefix)) else: node.replace([x.clone() for x in results["call"]])
689
Python
.py
19
30.947368
69
0.606928
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,245
fix_set_literal.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_set_literal.py
""" Optional fixer to transform set() calls to set literals. """ # Author: Benjamin Peterson from lib2to3 import fixer_base, pytree from lib2to3.fixer_util import token, syms class FixSetLiteral(fixer_base.BaseFix): BM_compatible = True explicit = True PATTERN = """power< 'set' trailer< '(' (atom=atom< '[' (items=listmaker< any ((',' any)* [',']) > | single=any) ']' > | atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' > ) ')' > > """ def transform(self, node, results): single = results.get("single") if single: # Make a fake listmaker fake = pytree.Node(syms.listmaker, [single.clone()]) single.replace(fake) items = fake else: items = results["items"] # Build the contents of the literal literal = [pytree.Leaf(token.LBRACE, "{")] literal.extend(n.clone() for n in items.children) literal.append(pytree.Leaf(token.RBRACE, "}")) # Set the prefix of the right brace to that of the ')' or ']' literal[-1].prefix = items.next_sibling.prefix maker = pytree.Node(syms.dictsetmaker, literal) maker.prefix = node.prefix # If the original was a one tuple, we need to remove the extra comma. if len(maker.children) == 4: n = maker.children[2] n.remove() maker.children[-1].prefix = n.prefix # Finally, replace the set call with our shiny new literal. return maker
1,697
Python
.py
42
29.261905
82
0.536496
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,246
fix_raw_input.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_raw_input.py
"""Fixer that changes raw_input(...) into input(...).""" # Author: Andre Roberge # Local imports from .. import fixer_base from ..fixer_util import Name class FixRawInput(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< name='raw_input' trailer< '(' [any] ')' > any* > """ def transform(self, node, results): name = results["name"] name.replace(Name("input", prefix=name.prefix))
454
Python
.py
13
29.307692
69
0.613272
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,247
fix_execfile.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_execfile.py
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for execfile. This converts usages of the execfile function into calls to the built-in exec() function. """ from .. import fixer_base from ..fixer_util import (Comma, Name, Call, LParen, RParen, Dot, Node, ArgList, String, syms) class FixExecfile(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > > | power< 'execfile' trailer< '(' filename=any ')' > > """ def transform(self, node, results): assert results filename = results["filename"] globals = results.get("globals") locals = results.get("locals") # Copy over the prefix from the right parentheses end of the execfile # call. execfile_paren = node.children[-1].children[-1].clone() # Construct open().read(). open_args = ArgList([filename.clone()], rparen=execfile_paren) open_call = Node(syms.power, [Name("open"), open_args]) read = [Node(syms.trailer, [Dot(), Name('read')]), Node(syms.trailer, [LParen(), RParen()])] open_expr = [open_call] + read # Wrap the open call in a compile call. This is so the filename will be # preserved in the execed code. filename_arg = filename.clone() filename_arg.prefix = " " exec_str = String("'exec'", " ") compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str] compile_call = Call(Name("compile"), compile_args, "") # Finally, replace the execfile call with an exec call. args = [compile_call] if globals is not None: args.extend([Comma(), globals.clone()]) if locals is not None: args.extend([Comma(), locals.clone()]) return Call(Name("exec"), args, prefix=node.prefix)
1,990
Python
.py
44
37.545455
103
0.612487
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,248
fix_isinstance.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_isinstance.py
# Copyright 2008 Armin Ronacher. # Licensed to PSF under a Contributor Agreement. """Fixer that cleans up a tuple argument to isinstance after the tokens in it were fixed. This is mainly used to remove double occurrences of tokens as a leftover of the long -> int / unicode -> str conversion. eg. isinstance(x, (int, long)) -> isinstance(x, (int, int)) -> isinstance(x, int) """ from .. import fixer_base from ..fixer_util import token class FixIsinstance(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< 'isinstance' trailer< '(' arglist< any ',' atom< '(' args=testlist_gexp< any+ > ')' > > ')' > > """ run_order = 6 def transform(self, node, results): names_inserted = set() testlist = results["args"] args = testlist.children new_args = [] iterator = enumerate(args) for idx, arg in iterator: if arg.type == token.NAME and arg.value in names_inserted: if idx < len(args) - 1 and args[idx + 1].type == token.COMMA: next(iterator) continue else: new_args.append(arg) if arg.type == token.NAME: names_inserted.add(arg.value) if new_args and new_args[-1].type == token.COMMA: del new_args[-1] if len(new_args) == 1: atom = testlist.parent new_args[0].prefix = atom.prefix atom.replace(new_args[0]) else: args[:] = new_args node.changed()
1,608
Python
.py
45
26.866667
77
0.565553
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,249
fix_exitfunc.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_exitfunc.py
""" Convert use of sys.exitfunc to use the atexit module. """ # Author: Benjamin Peterson from lib2to3 import pytree, fixer_base from lib2to3.fixer_util import Name, Attr, Call, Comma, Newline, syms class FixExitfunc(fixer_base.BaseFix): keep_line_order = True BM_compatible = True PATTERN = """ ( sys_import=import_name<'import' ('sys' | dotted_as_names< (any ',')* 'sys' (',' any)* > ) > | expr_stmt< power< 'sys' trailer< '.' 'exitfunc' > > '=' func=any > ) """ def __init__(self, *args): super(FixExitfunc, self).__init__(*args) def start_tree(self, tree, filename): super(FixExitfunc, self).start_tree(tree, filename) self.sys_import = None def transform(self, node, results): # First, find a the sys import. We'll just hope it's global scope. if "sys_import" in results: if self.sys_import is None: self.sys_import = results["sys_import"] return func = results["func"].clone() func.prefix = "" register = pytree.Node(syms.power, Attr(Name("atexit"), Name("register")) ) call = Call(register, [func], node.prefix) node.replace(call) if self.sys_import is None: # That's interesting. self.warning(node, "Can't find sys import; Please add an atexit " "import at the top of your file.") return # Now add an atexit import after the sys import. names = self.sys_import.children[1] if names.type == syms.dotted_as_names: names.append_child(Comma()) names.append_child(Name("atexit", " ")) else: containing_stmt = self.sys_import.parent position = containing_stmt.children.index(self.sys_import) stmt_container = containing_stmt.parent new_import = pytree.Node(syms.import_name, [Name("import"), Name("atexit", " ")] ) new = pytree.Node(syms.simple_stmt, [new_import]) containing_stmt.insert_child(position + 1, Newline()) containing_stmt.insert_child(position + 2, new)
2,497
Python
.py
61
28
77
0.519175
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,250
fix_exec.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_exec.py
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for exec. This converts usages of the exec statement into calls to a built-in exec() function. exec code in ns1, ns2 -> exec(code, ns1, ns2) """ # Local imports from .. import pytree from .. import fixer_base from ..fixer_util import Comma, Name, Call class FixExec(fixer_base.BaseFix): BM_compatible = True PATTERN = """ exec_stmt< 'exec' a=any 'in' b=any [',' c=any] > | exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any > """ def transform(self, node, results): assert results syms = self.syms a = results["a"] b = results.get("b") c = results.get("c") args = [a.clone()] args[0].prefix = "" if b is not None: args.extend([Comma(), b.clone()]) if c is not None: args.extend([Comma(), c.clone()]) return Call(Name("exec"), args, prefix=node.prefix)
1,001
Python
.py
31
26.741935
67
0.602497
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,251
fix_operator.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_operator.py
"""Fixer for operator functions. operator.isCallable(obj) -> hasattr(obj, '__call__') operator.sequenceIncludes(obj) -> operator.contains(obj) operator.isSequenceType(obj) -> isinstance(obj, collections.Sequence) operator.isMappingType(obj) -> isinstance(obj, collections.Mapping) operator.isNumberType(obj) -> isinstance(obj, numbers.Number) operator.repeat(obj, n) -> operator.mul(obj, n) operator.irepeat(obj, n) -> operator.imul(obj, n) """ import collections # Local imports from lib2to3 import fixer_base from lib2to3.fixer_util import Call, Name, String, touch_import def invocation(s): def dec(f): f.invocation = s return f return dec class FixOperator(fixer_base.BaseFix): BM_compatible = True order = "pre" methods = """ method=('isCallable'|'sequenceIncludes' |'isSequenceType'|'isMappingType'|'isNumberType' |'repeat'|'irepeat') """ obj = "'(' obj=any ')'" PATTERN = """ power< module='operator' trailer< '.' %(methods)s > trailer< %(obj)s > > | power< %(methods)s trailer< %(obj)s > > """ % dict(methods=methods, obj=obj) def transform(self, node, results): method = self._check_method(node, results) if method is not None: return method(node, results) @invocation("operator.contains(%s)") def _sequenceIncludes(self, node, results): return self._handle_rename(node, results, "contains") @invocation("hasattr(%s, '__call__')") def _isCallable(self, node, results): obj = results["obj"] args = [obj.clone(), String(", "), String("'__call__'")] return Call(Name("hasattr"), args, prefix=node.prefix) @invocation("operator.mul(%s)") def _repeat(self, node, results): return self._handle_rename(node, results, "mul") @invocation("operator.imul(%s)") def _irepeat(self, node, results): return self._handle_rename(node, results, "imul") @invocation("isinstance(%s, collections.Sequence)") def _isSequenceType(self, node, results): return self._handle_type2abc(node, results, "collections", "Sequence") @invocation("isinstance(%s, collections.Mapping)") def _isMappingType(self, node, results): return self._handle_type2abc(node, results, "collections", "Mapping") @invocation("isinstance(%s, numbers.Number)") def _isNumberType(self, node, results): return self._handle_type2abc(node, results, "numbers", "Number") def _handle_rename(self, node, results, name): method = results["method"][0] method.value = name method.changed() def _handle_type2abc(self, node, results, module, abc): touch_import(None, module, node) obj = results["obj"] args = [obj.clone(), String(", " + ".".join([module, abc]))] return Call(Name("isinstance"), args, prefix=node.prefix) def _check_method(self, node, results): method = getattr(self, "_" + results["method"][0].value) if isinstance(method, collections.Callable): if "module" in results: return method else: sub = (str(results["obj"]),) invocation_str = method.invocation % sub self.warning(node, "You should use '%s' here." % invocation_str) return None
3,471
Python
.py
79
36.037975
80
0.616958
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,252
fix_itertools_imports.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_itertools_imports.py
""" Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) """ # Local imports from lib2to3 import fixer_base from lib2to3.fixer_util import BlankLine, syms, token class FixItertoolsImports(fixer_base.BaseFix): BM_compatible = True PATTERN = """ import_from< 'from' 'itertools' 'import' imports=any > """ %(locals()) def transform(self, node, results): imports = results['imports'] if imports.type == syms.import_as_name or not imports.children: children = [imports] else: children = imports.children for child in children[::2]: if child.type == token.NAME: member = child.value name_node = child elif child.type == token.STAR: # Just leave the import as is. return else: assert child.type == syms.import_as_name name_node = child.children[0] member_name = name_node.value if member_name in ('imap', 'izip', 'ifilter'): child.value = None child.remove() elif member_name in ('ifilterfalse', 'izip_longest'): node.changed() name_node.value = ('filterfalse' if member_name[1] == 'f' else 'zip_longest') # Make sure the import statement is still sane children = imports.children[:] or [imports] remove_comma = True for child in children: if remove_comma and child.type == token.COMMA: child.remove() else: remove_comma ^= True while children and children[-1].type == token.COMMA: children.pop().remove() # If there are no imports left, just get rid of the entire statement if (not (imports.children or getattr(imports, 'value', None)) or imports.parent is None): p = node.prefix node = BlankLine() node.prefix = p return node
2,086
Python
.py
50
29.64
76
0.553475
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,253
fix_basestring.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_basestring.py
"""Fixer for basestring -> str.""" # Author: Christian Heimes # Local imports from .. import fixer_base from ..fixer_util import Name class FixBasestring(fixer_base.BaseFix): BM_compatible = True PATTERN = "'basestring'" def transform(self, node, results): return Name("str", prefix=node.prefix)
320
Python
.py
10
28.6
46
0.715686
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,254
fix_tuple_params.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_tuple_params.py
"""Fixer for function definitions with tuple parameters. def func(((a, b), c), d): ... -> def func(x, d): ((a, b), c) = x ... It will also support lambdas: lambda (x, y): x + y -> lambda t: t[0] + t[1] # The parens are a syntax error in Python 3 lambda (x): x + y -> lambda x: x + y """ # Author: Collin Winter # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Assign, Name, Newline, Number, Subscript, syms def is_docstring(stmt): return isinstance(stmt, pytree.Node) and \ stmt.children[0].type == token.STRING class FixTupleParams(fixer_base.BaseFix): run_order = 4 #use a lower order since lambda is part of other #patterns BM_compatible = True PATTERN = """ funcdef< 'def' any parameters< '(' args=any ')' > ['->' any] ':' suite=any+ > | lambda= lambdef< 'lambda' args=vfpdef< '(' inner=any ')' > ':' body=any > """ def transform(self, node, results): if "lambda" in results: return self.transform_lambda(node, results) new_lines = [] suite = results["suite"] args = results["args"] # This crap is so "def foo(...): x = 5; y = 7" is handled correctly. # TODO(cwinter): suite-cleanup if suite[0].children[1].type == token.INDENT: start = 2 indent = suite[0].children[1].value end = Newline() else: start = 0 indent = "; " end = pytree.Leaf(token.INDENT, "") # We need access to self for new_name(), and making this a method # doesn't feel right. Closing over self and new_lines makes the # code below cleaner. def handle_tuple(tuple_arg, add_prefix=False): n = Name(self.new_name()) arg = tuple_arg.clone() arg.prefix = "" stmt = Assign(arg, n.clone()) if add_prefix: n.prefix = " " tuple_arg.replace(n) new_lines.append(pytree.Node(syms.simple_stmt, [stmt, end.clone()])) if args.type == syms.tfpdef: handle_tuple(args) elif args.type == syms.typedargslist: for i, arg in enumerate(args.children): if arg.type == syms.tfpdef: # Without add_prefix, the emitted code is correct, # just ugly. handle_tuple(arg, add_prefix=(i > 0)) if not new_lines: return # This isn't strictly necessary, but it plays nicely with other fixers. # TODO(cwinter) get rid of this when children becomes a smart list for line in new_lines: line.parent = suite[0] # TODO(cwinter) suite-cleanup after = start if start == 0: new_lines[0].prefix = " " elif is_docstring(suite[0].children[start]): new_lines[0].prefix = indent after = start + 1 for line in new_lines: line.parent = suite[0] suite[0].children[after:after] = new_lines for i in range(after+1, after+len(new_lines)+1): suite[0].children[i].prefix = indent suite[0].changed() def transform_lambda(self, node, results): args = results["args"] body = results["body"] inner = simplify_args(results["inner"]) # Replace lambda ((((x)))): x with lambda x: x if inner.type == token.NAME: inner = inner.clone() inner.prefix = " " args.replace(inner) return params = find_params(args) to_index = map_to_index(params) tup_name = self.new_name(tuple_name(params)) new_param = Name(tup_name, prefix=" ") args.replace(new_param.clone()) for n in body.post_order(): if n.type == token.NAME and n.value in to_index: subscripts = [c.clone() for c in to_index[n.value]] new = pytree.Node(syms.power, [new_param.clone()] + subscripts) new.prefix = n.prefix n.replace(new) ### Helper functions for transform_lambda() def simplify_args(node): if node.type in (syms.vfplist, token.NAME): return node elif node.type == syms.vfpdef: # These look like vfpdef< '(' x ')' > where x is NAME # or another vfpdef instance (leading to recursion). while node.type == syms.vfpdef: node = node.children[1] return node raise RuntimeError("Received unexpected node %s" % node) def find_params(node): if node.type == syms.vfpdef: return find_params(node.children[1]) elif node.type == token.NAME: return node.value return [find_params(c) for c in node.children if c.type != token.COMMA] def map_to_index(param_list, prefix=[], d=None): if d is None: d = {} for i, obj in enumerate(param_list): trailer = [Subscript(Number(str(i)))] if isinstance(obj, list): map_to_index(obj, trailer, d=d) else: d[obj] = prefix + trailer return d def tuple_name(param_list): l = [] for obj in param_list: if isinstance(obj, list): l.append(tuple_name(obj)) else: l.append(obj) return "_".join(l)
5,565
Python
.py
147
27.918367
79
0.548237
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,255
fix_renames.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_renames.py
"""Fix incompatible renames Fixes: * sys.maxint -> sys.maxsize """ # Author: Christian Heimes # based on Collin Winter's fix_import # Local imports from .. import fixer_base from ..fixer_util import Name, attr_chain MAPPING = {"sys": {"maxint" : "maxsize"}, } LOOKUP = {} def alternates(members): return "(" + "|".join(map(repr, members)) + ")" def build_pattern(): #bare = set() for module, replace in list(MAPPING.items()): for old_attr, new_attr in list(replace.items()): LOOKUP[(module, old_attr)] = new_attr #bare.add(module) #bare.add(old_attr) #yield """ # import_name< 'import' (module=%r # | dotted_as_names< any* module=%r any* >) > # """ % (module, module) yield """ import_from< 'from' module_name=%r 'import' ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) > """ % (module, old_attr, old_attr) yield """ power< module_name=%r trailer< '.' attr_name=%r > any* > """ % (module, old_attr) #yield """bare_name=%s""" % alternates(bare) class FixRenames(fixer_base.BaseFix): BM_compatible = True PATTERN = "|".join(build_pattern()) order = "pre" # Pre-order tree traversal # Don't match the node if it's within another match def match(self, node): match = super(FixRenames, self).match results = match(node) if results: if any(match(obj) for obj in attr_chain(node, "parent")): return False return results return False #def start_tree(self, tree, filename): # super(FixRenames, self).start_tree(tree, filename) # self.replace = {} def transform(self, node, results): mod_name = results.get("module_name") attr_name = results.get("attr_name") #bare_name = results.get("bare_name") #import_mod = results.get("module") if mod_name and attr_name: new_attr = LOOKUP[(mod_name.value, attr_name.value)] attr_name.replace(Name(new_attr, prefix=attr_name.prefix))
2,221
Python
.py
57
30.54386
81
0.562994
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,256
fix_import.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_import.py
"""Fixer for import statements. If spam is being imported from the local directory, this import: from spam import eggs Becomes: from .spam import eggs And this import: import spam Becomes: from . import spam """ # Local imports from .. import fixer_base from os.path import dirname, join, exists, sep from ..fixer_util import FromImport, syms, token def traverse_imports(names): """ Walks over all the names imported in a dotted_as_names node. """ pending = [names] while pending: node = pending.pop() if node.type == token.NAME: yield node.value elif node.type == syms.dotted_name: yield "".join([ch.value for ch in node.children]) elif node.type == syms.dotted_as_name: pending.append(node.children[0]) elif node.type == syms.dotted_as_names: pending.extend(node.children[::-2]) else: raise AssertionError("unkown node type") class FixImport(fixer_base.BaseFix): BM_compatible = True PATTERN = """ import_from< 'from' imp=any 'import' ['('] any [')'] > | import_name< 'import' imp=any > """ def start_tree(self, tree, name): super(FixImport, self).start_tree(tree, name) self.skip = "absolute_import" in tree.future_features def transform(self, node, results): if self.skip: return imp = results['imp'] if node.type == syms.import_from: # Some imps are top-level (eg: 'import ham') # some are first level (eg: 'import ham.eggs') # some are third level (eg: 'import ham.eggs as spam') # Hence, the loop while not hasattr(imp, 'value'): imp = imp.children[0] if self.probably_a_local_import(imp.value): imp.value = "." + imp.value imp.changed() else: have_local = False have_absolute = False for mod_name in traverse_imports(imp): if self.probably_a_local_import(mod_name): have_local = True else: have_absolute = True if have_absolute: if have_local: # We won't handle both sibling and absolute imports in the # same statement at the moment. self.warning(node, "absolute and local imports together") return new = FromImport(".", [imp]) new.prefix = node.prefix return new def probably_a_local_import(self, imp_name): if imp_name.startswith("."): # Relative imports are certainly not local imports. return False imp_name = imp_name.split(".", 1)[0] base_path = dirname(self.filename) base_path = join(base_path, imp_name) # If there is no __init__.py next to the file its not in a package # so can't be a relative import. if not exists(join(dirname(base_path), "__init__.py")): return False for ext in [".py", sep, ".pyc", ".so", ".sl", ".pyd"]: if exists(base_path + ext): return True return False
3,255
Python
.py
87
27.724138
78
0.568441
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,257
fix_future.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_future.py
"""Remove __future__ imports from __future__ import foo is replaced with an empty line. """ # Author: Christian Heimes # Local imports from .. import fixer_base from ..fixer_util import BlankLine class FixFuture(fixer_base.BaseFix): BM_compatible = True PATTERN = """import_from< 'from' module_name="__future__" 'import' any >""" # This should be run last -- some things check for the import run_order = 10 def transform(self, node, results): new = BlankLine() new.prefix = node.prefix return new
547
Python
.py
16
30.0625
79
0.681905
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,258
fix_nonzero.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_nonzero.py
"""Fixer for __nonzero__ -> __bool__ methods.""" # Author: Collin Winter # Local imports from .. import fixer_base from ..fixer_util import Name, syms class FixNonzero(fixer_base.BaseFix): BM_compatible = True PATTERN = """ classdef< 'class' any+ ':' suite< any* funcdef< 'def' name='__nonzero__' parameters< '(' NAME ')' > any+ > any* > > """ def transform(self, node, results): name = results["name"] new = Name("__bool__", prefix=name.prefix) name.replace(new)
597
Python
.py
18
24.777778
63
0.527778
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,259
fix_except.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_except.py
"""Fixer for except statements with named exceptions. The following cases will be converted: - "except E, T:" where T is a name: except E as T: - "except E, T:" where T is not a name, tuple or list: except E as t: T = t This is done because the target of an "except" clause must be a name. - "except E, T:" where T is a tuple or list literal: except E as t: T = t.args """ # Author: Collin Winter # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Assign, Attr, Name, is_tuple, is_list, syms def find_excepts(nodes): for i, n in enumerate(nodes): if n.type == syms.except_clause: if n.children[0].value == 'except': yield (n, nodes[i+2]) class FixExcept(fixer_base.BaseFix): BM_compatible = True PATTERN = """ try_stmt< 'try' ':' (simple_stmt | suite) cleanup=(except_clause ':' (simple_stmt | suite))+ tail=(['except' ':' (simple_stmt | suite)] ['else' ':' (simple_stmt | suite)] ['finally' ':' (simple_stmt | suite)]) > """ def transform(self, node, results): syms = self.syms tail = [n.clone() for n in results["tail"]] try_cleanup = [ch.clone() for ch in results["cleanup"]] for except_clause, e_suite in find_excepts(try_cleanup): if len(except_clause.children) == 4: (E, comma, N) = except_clause.children[1:4] comma.replace(Name("as", prefix=" ")) if N.type != token.NAME: # Generate a new N for the except clause new_N = Name(self.new_name(), prefix=" ") target = N.clone() target.prefix = "" N.replace(new_N) new_N = new_N.clone() # Insert "old_N = new_N" as the first statement in # the except body. This loop skips leading whitespace # and indents #TODO(cwinter) suite-cleanup suite_stmts = e_suite.children for i, stmt in enumerate(suite_stmts): if isinstance(stmt, pytree.Node): break # The assignment is different if old_N is a tuple or list # In that case, the assignment is old_N = new_N.args if is_tuple(N) or is_list(N): assign = Assign(target, Attr(new_N, Name('args'))) else: assign = Assign(target, new_N) #TODO(cwinter) stopgap until children becomes a smart list for child in reversed(suite_stmts[:i]): e_suite.insert_child(0, child) e_suite.insert_child(i, assign) elif N.prefix == "": # No space after a comma is legal; no space after "as", # not so much. N.prefix = " " #TODO(cwinter) fix this when children becomes a smart list children = [c.clone() for c in node.children[:3]] + try_cleanup + tail return pytree.Node(node.type, children)
3,344
Python
.py
73
32.315068
78
0.519225
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,260
fix_paren.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_paren.py
"""Fixer that addes parentheses where they are required This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``.""" # By Taek Joo Kim and Benjamin Peterson # Local imports from .. import fixer_base from ..fixer_util import LParen, RParen # XXX This doesn't support nested for loops like [x for x in 1, 2 for x in 1, 2] class FixParen(fixer_base.BaseFix): BM_compatible = True PATTERN = """ atom< ('[' | '(') (listmaker< any comp_for< 'for' NAME 'in' target=testlist_safe< any (',' any)+ [','] > [any] > > | testlist_gexp< any comp_for< 'for' NAME 'in' target=testlist_safe< any (',' any)+ [','] > [any] > >) (']' | ')') > """ def transform(self, node, results): target = results["target"] lparen = LParen() lparen.prefix = target.prefix target.prefix = "" # Make it hug the parentheses target.insert_child(0, lparen) target.append_child(RParen())
1,227
Python
.py
37
22.081081
80
0.471682
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,261
fix_zip.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_zip.py
""" Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...) unless there exists a 'from future_builtins import zip' statement in the top-level namespace. We avoid the transformation if the zip() call is directly contained in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. """ # Local imports from .. import fixer_base from ..fixer_util import Name, Call, in_special_context class FixZip(fixer_base.ConditionalFix): BM_compatible = True PATTERN = """ power< 'zip' args=trailer< '(' [any] ')' > > """ skip_on = "future_builtins.zip" def transform(self, node, results): if self.should_skip(node): return if in_special_context(node): return None new = node.clone() new.prefix = "" new = Call(Name("list"), [new]) new.prefix = node.prefix return new
902
Python
.py
27
28.111111
72
0.620531
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,262
fix_throw.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_throw.py
"""Fixer for generator.throw(E, V, T). g.throw(E) -> g.throw(E) g.throw(E, V) -> g.throw(E(V)) g.throw(E, V, T) -> g.throw(E(V).with_traceback(T)) g.throw("foo"[, V[, T]]) will warn about string exceptions.""" # Author: Collin Winter # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, Call, ArgList, Attr, is_tuple class FixThrow(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< any trailer< '.' 'throw' > trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' > > | power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > > """ def transform(self, node, results): syms = self.syms exc = results["exc"].clone() if exc.type is token.STRING: self.cannot_convert(node, "Python 3 does not support string exceptions") return # Leave "g.throw(E)" alone val = results.get("val") if val is None: return val = val.clone() if is_tuple(val): args = [c.clone() for c in val.children[1:-1]] else: val.prefix = "" args = [val] throw_args = results["args"] if "tb" in results: tb = results["tb"].clone() tb.prefix = "" e = Call(exc, args) with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])] throw_args.replace(pytree.Node(syms.power, with_tb)) else: throw_args.replace(Call(exc, args))
1,582
Python
.py
45
27.622222
84
0.549803
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,263
fix_funcattrs.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_funcattrs.py
"""Fix function attribute names (f.func_x -> f.__x__).""" # Author: Collin Winter # Local imports from .. import fixer_base from ..fixer_util import Name class FixFuncattrs(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals' | 'func_name' | 'func_defaults' | 'func_code' | 'func_dict') > any* > """ def transform(self, node, results): attr = results["attr"][0] attr.replace(Name(("__%s__" % attr.value[5:]), prefix=attr.prefix))
644
Python
.py
16
30.8125
79
0.53451
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,264
fix_repr.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_repr.py
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that transforms `xyzzy` into repr(xyzzy).""" # Local imports from .. import fixer_base from ..fixer_util import Call, Name, parenthesize class FixRepr(fixer_base.BaseFix): BM_compatible = True PATTERN = """ atom < '`' expr=any '`' > """ def transform(self, node, results): expr = results["expr"].clone() if expr.type == self.syms.testlist1: expr = parenthesize(expr) return Call(Name("repr"), [expr], prefix=node.prefix)
613
Python
.py
16
32.125
61
0.642373
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,265
fix_ws_comma.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_ws_comma.py
"""Fixer that changes 'a ,b' into 'a, b'. This also changes '{a :b}' into '{a: b}', but does not touch other uses of colons. It does not touch other uses of whitespace. """ from .. import pytree from ..pgen2 import token from .. import fixer_base class FixWsComma(fixer_base.BaseFix): explicit = True # The user must ask for this fixers PATTERN = """ any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]> """ COMMA = pytree.Leaf(token.COMMA, ",") COLON = pytree.Leaf(token.COLON, ":") SEPS = (COMMA, COLON) def transform(self, node, results): new = node.clone() comma = False for child in new.children: if child in self.SEPS: prefix = child.prefix if prefix.isspace() and "\n" not in prefix: child.prefix = "" comma = True else: if comma: prefix = child.prefix if not prefix: child.prefix = " " comma = False return new
1,090
Python
.py
31
25.774194
66
0.527117
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,266
fix_itertools.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_itertools.py
""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363) imports from itertools are fixed in fix_itertools_import.py If itertools is imported as something else (ie: import itertools as it; it.izip(spam, eggs)) method calls will not get fixed. """ # Local imports from .. import fixer_base from ..fixer_util import Name class FixItertools(fixer_base.BaseFix): BM_compatible = True it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')" PATTERN = """ power< it='itertools' trailer< dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > > | power< func=%(it_funcs)s trailer< '(' [any] ')' > > """ %(locals()) # Needs to be run after fix_(map|zip|filter) run_order = 6 def transform(self, node, results): prefix = None func = results['func'][0] if ('it' in results and func.value not in ('ifilterfalse', 'izip_longest')): dot, it = (results['dot'], results['it']) # Remove the 'itertools' prefix = it.prefix it.remove() # Replace the node wich contains ('.', 'function') with the # function (to be consistant with the second part of the pattern) dot.remove() func.parent.replace(func) prefix = prefix or func.prefix func.replace(Name(func.value[1:], prefix=prefix))
1,547
Python
.py
36
33.805556
77
0.583112
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,267
fix_map.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_map.py
# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that changes map(F, ...) into list(map(F, ...)) unless there exists a 'from future_builtins import map' statement in the top-level namespace. As a special case, map(None, X) is changed into list(X). (This is necessary because the semantics are changed in this case -- the new map(None, X) is equivalent to [(x,) for x in X].) We avoid the transformation (except for the special case mentioned above) if the map() call is directly contained in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. NOTE: This is still not correct if the original code was depending on map(F, X, Y, ...) to go on until the longest argument is exhausted, substituting None for missing values -- like zip(), it now stops as soon as the shortest argument is exhausted. """ # Local imports from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, Call, ListComp, in_special_context from ..pygram import python_symbols as syms class FixMap(fixer_base.ConditionalFix): BM_compatible = True PATTERN = """ map_none=power< 'map' trailer< '(' arglist< 'None' ',' arg=any [','] > ')' > > | map_lambda=power< 'map' trailer< '(' arglist< lambdef< 'lambda' (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any > ',' it=any > ')' > > | power< 'map' trailer< '(' [arglist=any] ')' > > """ skip_on = 'future_builtins.map' def transform(self, node, results): if self.should_skip(node): return if node.parent.type == syms.simple_stmt: self.warning(node, "You should use a for loop here") new = node.clone() new.prefix = "" new = Call(Name("list"), [new]) elif "map_lambda" in results: new = ListComp(results["xp"].clone(), results["fp"].clone(), results["it"].clone()) else: if "map_none" in results: new = results["arg"].clone() else: if "arglist" in results: args = results["arglist"] if args.type == syms.arglist and \ args.children[0].type == token.NAME and \ args.children[0].value == "None": self.warning(node, "cannot convert map(None, ...) " "with multiple arguments because map() " "now truncates to the shortest sequence") return if in_special_context(node): return None new = node.clone() new.prefix = "" new = Call(Name("list"), [new]) new.prefix = node.prefix return new
3,058
Python
.py
81
27.358025
78
0.53185
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,268
fix_metaclass.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_metaclass.py
"""Fixer for __metaclass__ = X -> (metaclass=X) methods. The various forms of classef (inherits nothing, inherits once, inherints many) don't parse the same in the CST so we look at ALL classes for a __metaclass__ and if we find one normalize the inherits to all be an arglist. For one-liner classes ('class X: pass') there is no indent/dedent so we normalize those into having a suite. Moving the __metaclass__ into the classdef can also cause the class body to be empty so there is some special casing for that as well. This fixer also tries very hard to keep original indenting and spacing in all those corner cases. """ # Author: Jack Diederich # Local imports from .. import fixer_base from ..pygram import token from ..fixer_util import Name, syms, Node, Leaf def has_metaclass(parent): """ we have to check the cls_node without changing it. There are two possiblities: 1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta') 2) clsdef => simple_stmt => expr_stmt => Leaf('__meta') """ for node in parent.children: if node.type == syms.suite: return has_metaclass(node) elif node.type == syms.simple_stmt and node.children: expr_node = node.children[0] if expr_node.type == syms.expr_stmt and expr_node.children: left_side = expr_node.children[0] if isinstance(left_side, Leaf) and \ left_side.value == '__metaclass__': return True return False def fixup_parse_tree(cls_node): """ one-line classes don't get a suite in the parse tree so we add one to normalize the tree """ for node in cls_node.children: if node.type == syms.suite: # already in the preferred format, do nothing return # !%@#! oneliners have no suite node, we have to fake one up for i, node in enumerate(cls_node.children): if node.type == token.COLON: break else: raise ValueError("No class suite and no ':'!") # move everything into a suite node suite = Node(syms.suite, []) while cls_node.children[i+1:]: move_node = cls_node.children[i+1] suite.append_child(move_node.clone()) move_node.remove() cls_node.append_child(suite) node = suite def fixup_simple_stmt(parent, i, stmt_node): """ if there is a semi-colon all the parts count as part of the same simple_stmt. We just want the __metaclass__ part so we move everything efter the semi-colon into its own simple_stmt node """ for semi_ind, node in enumerate(stmt_node.children): if node.type == token.SEMI: # *sigh* break else: return node.remove() # kill the semicolon new_expr = Node(syms.expr_stmt, []) new_stmt = Node(syms.simple_stmt, [new_expr]) while stmt_node.children[semi_ind:]: move_node = stmt_node.children[semi_ind] new_expr.append_child(move_node.clone()) move_node.remove() parent.insert_child(i, new_stmt) new_leaf1 = new_stmt.children[0].children[0] old_leaf1 = stmt_node.children[0].children[0] new_leaf1.prefix = old_leaf1.prefix def remove_trailing_newline(node): if node.children and node.children[-1].type == token.NEWLINE: node.children[-1].remove() def find_metas(cls_node): # find the suite node (Mmm, sweet nodes) for node in cls_node.children: if node.type == syms.suite: break else: raise ValueError("No class suite!") # look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ] for i, simple_node in list(enumerate(node.children)): if simple_node.type == syms.simple_stmt and simple_node.children: expr_node = simple_node.children[0] if expr_node.type == syms.expr_stmt and expr_node.children: # Check if the expr_node is a simple assignment. left_node = expr_node.children[0] if isinstance(left_node, Leaf) and \ left_node.value == '__metaclass__': # We found a assignment to __metaclass__. fixup_simple_stmt(node, i, simple_node) remove_trailing_newline(simple_node) yield (node, i, simple_node) def fixup_indent(suite): """ If an INDENT is followed by a thing with a prefix then nuke the prefix Otherwise we get in trouble when removing __metaclass__ at suite start """ kids = suite.children[::-1] # find the first indent while kids: node = kids.pop() if node.type == token.INDENT: break # find the first Leaf while kids: node = kids.pop() if isinstance(node, Leaf) and node.type != token.DEDENT: if node.prefix: node.prefix = '' return else: kids.extend(node.children[::-1]) class FixMetaclass(fixer_base.BaseFix): BM_compatible = True PATTERN = """ classdef<any*> """ def transform(self, node, results): if not has_metaclass(node): return fixup_parse_tree(node) # find metaclasses, keep the last one last_metaclass = None for suite, i, stmt in find_metas(node): last_metaclass = stmt stmt.remove() text_type = node.children[0].type # always Leaf(nnn, 'class') # figure out what kind of classdef we have if len(node.children) == 7: # Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite]) # 0 1 2 3 4 5 6 if node.children[3].type == syms.arglist: arglist = node.children[3] # Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite]) else: parent = node.children[3].clone() arglist = Node(syms.arglist, [parent]) node.set_child(3, arglist) elif len(node.children) == 6: # Node(classdef, ['class', 'name', '(', ')', ':', suite]) # 0 1 2 3 4 5 arglist = Node(syms.arglist, []) node.insert_child(3, arglist) elif len(node.children) == 4: # Node(classdef, ['class', 'name', ':', suite]) # 0 1 2 3 arglist = Node(syms.arglist, []) node.insert_child(2, Leaf(token.RPAR, ')')) node.insert_child(2, arglist) node.insert_child(2, Leaf(token.LPAR, '(')) else: raise ValueError("Unexpected class definition") # now stick the metaclass in the arglist meta_txt = last_metaclass.children[0].children[0] meta_txt.value = 'metaclass' orig_meta_prefix = meta_txt.prefix if arglist.children: arglist.append_child(Leaf(token.COMMA, ',')) meta_txt.prefix = ' ' else: meta_txt.prefix = '' # compact the expression "metaclass = Meta" -> "metaclass=Meta" expr_stmt = last_metaclass.children[0] assert expr_stmt.type == syms.expr_stmt expr_stmt.children[1].prefix = '' expr_stmt.children[2].prefix = '' arglist.append_child(last_metaclass) fixup_indent(suite) # check for empty suite if not suite.children: # one-liner that was just __metaclass_ suite.remove() pass_leaf = Leaf(text_type, 'pass') pass_leaf.prefix = orig_meta_prefix node.append_child(pass_leaf) node.append_child(Leaf(token.NEWLINE, '\n')) elif len(suite.children) > 1 and \ (suite.children[-2].type == token.INDENT and suite.children[-1].type == token.DEDENT): # there was only one line in the class body and it was __metaclass__ pass_leaf = Leaf(text_type, 'pass') suite.insert_child(-1, pass_leaf) suite.insert_child(-1, Leaf(token.NEWLINE, '\n'))
8,201
Python
.py
190
33.768421
80
0.583971
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,269
fix_xrange.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_xrange.py
# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that changes xrange(...) into range(...).""" # Local imports from .. import fixer_base from ..fixer_util import Name, Call, consuming_calls from .. import patcomp class FixXrange(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< (name='range'|name='xrange') trailer< '(' args=any ')' > rest=any* > """ def start_tree(self, tree, filename): super(FixXrange, self).start_tree(tree, filename) self.transformed_xranges = set() def finish_tree(self, tree, filename): self.transformed_xranges = None def transform(self, node, results): name = results["name"] if name.value == "xrange": return self.transform_xrange(node, results) elif name.value == "range": return self.transform_range(node, results) else: raise ValueError(repr(name)) def transform_xrange(self, node, results): name = results["name"] name.replace(Name("range", prefix=name.prefix)) # This prevents the new range call from being wrapped in a list later. self.transformed_xranges.add(id(node)) def transform_range(self, node, results): if (id(node) not in self.transformed_xranges and not self.in_special_context(node)): range_call = Call(Name("range"), [results["args"].clone()]) # Encase the range call in list(). list_call = Call(Name("list"), [range_call], prefix=node.prefix) # Put things that were after the range() call after the list call. for n in results["rest"]: list_call.append_child(n) return list_call P1 = "power< func=NAME trailer< '(' node=any ')' > any* >" p1 = patcomp.compile_pattern(P1) P2 = """for_stmt< 'for' any 'in' node=any ':' any* > | comp_for< 'for' any 'in' node=any any* > | comparison< any 'in' node=any any*> """ p2 = patcomp.compile_pattern(P2) def in_special_context(self, node): if node.parent is None: return False results = {} if (node.parent.parent is not None and self.p1.match(node.parent.parent, results) and results["node"] is node): # list(d.keys()) -> list(d.keys()), etc. return results["func"].value in consuming_calls # for ... in d.iterkeys() -> for ... in d.keys(), etc. return self.p2.match(node.parent, results) and results["node"] is node
2,694
Python
.py
61
34.655738
78
0.58718
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,270
fix_imports2.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_imports2.py
"""Fix incompatible imports and module references that must be fixed after fix_imports.""" from . import fix_imports MAPPING = { 'whichdb': 'dbm', 'anydbm': 'dbm', } class FixImports2(fix_imports.FixImports): run_order = 7 mapping = MAPPING
289
Python
.py
10
23.1
74
0.644689
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,271
fix_buffer.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_buffer.py
# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that changes buffer(...) into memoryview(...).""" # Local imports from .. import fixer_base from ..fixer_util import Name class FixBuffer(fixer_base.BaseFix): BM_compatible = True explicit = True # The user must ask for this fixer PATTERN = """ power< name='buffer' trailer< '(' [any] ')' > any* > """ def transform(self, node, results): name = results["name"] name.replace(Name("memoryview", prefix=name.prefix))
590
Python
.py
15
33.866667
66
0.651408
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,272
fix_apply.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_apply.py
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for apply(). This converts apply(func, v, k) into (func)(*v, **k).""" # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Call, Comma, parenthesize class FixApply(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< 'apply' trailer< '(' arglist< (not argument<NAME '=' any>) func=any ',' (not argument<NAME '=' any>) args=any [',' (not argument<NAME '=' any>) kwds=any] [','] > ')' > > """ def transform(self, node, results): syms = self.syms assert results func = results["func"] args = results["args"] kwds = results.get("kwds") prefix = node.prefix func = func.clone() if (func.type not in (token.NAME, syms.atom) and (func.type != syms.power or func.children[-2].type == token.DOUBLESTAR)): # Need to parenthesize func = parenthesize(func) func.prefix = "" args = args.clone() args.prefix = "" if kwds is not None: kwds = kwds.clone() kwds.prefix = "" l_newargs = [pytree.Leaf(token.STAR, "*"), args] if kwds is not None: l_newargs.extend([Comma(), pytree.Leaf(token.DOUBLESTAR, "**"), kwds]) l_newargs[-2].prefix = " " # that's the ** token # XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t) # can be translated into f(x, y, *t) instead of f(*(x, y) + t) #new = pytree.Node(syms.power, (func, ArgList(l_newargs))) return Call(func, l_newargs, prefix=prefix)
1,901
Python
.py
53
26.509434
71
0.531488
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,273
fix_idioms.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_idioms.py
"""Adjust some old Python 2 idioms to their modern counterparts. * Change some type comparisons to isinstance() calls: type(x) == T -> isinstance(x, T) type(x) is T -> isinstance(x, T) type(x) != T -> not isinstance(x, T) type(x) is not T -> not isinstance(x, T) * Change "while 1:" into "while True:". * Change both v = list(EXPR) v.sort() foo(v) and the more general v = EXPR v.sort() foo(v) into v = sorted(EXPR) foo(v) """ # Author: Jacques Frechet, Collin Winter # Local imports from .. import fixer_base from ..fixer_util import Call, Comma, Name, Node, BlankLine, syms CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)" TYPE = "power< 'type' trailer< '(' x=any ')' > >" class FixIdioms(fixer_base.BaseFix): explicit = True # The user must ask for this fixer PATTERN = r""" isinstance=comparison< %s %s T=any > | isinstance=comparison< T=any %s %s > | while_stmt< 'while' while='1' ':' any+ > | sorted=any< any* simple_stmt< expr_stmt< id1=any '=' power< list='list' trailer< '(' (not arglist<any+>) any ')' > > > '\n' > sort= simple_stmt< power< id2=any trailer< '.' 'sort' > trailer< '(' ')' > > '\n' > next=any* > | sorted=any< any* simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' > sort= simple_stmt< power< id2=any trailer< '.' 'sort' > trailer< '(' ')' > > '\n' > next=any* > """ % (TYPE, CMP, CMP, TYPE) def match(self, node): r = super(FixIdioms, self).match(node) # If we've matched one of the sort/sorted subpatterns above, we # want to reject matches where the initial assignment and the # subsequent .sort() call involve different identifiers. if r and "sorted" in r: if r["id1"] == r["id2"]: return r return None return r def transform(self, node, results): if "isinstance" in results: return self.transform_isinstance(node, results) elif "while" in results: return self.transform_while(node, results) elif "sorted" in results: return self.transform_sort(node, results) else: raise RuntimeError("Invalid match") def transform_isinstance(self, node, results): x = results["x"].clone() # The thing inside of type() T = results["T"].clone() # The type being compared against x.prefix = "" T.prefix = " " test = Call(Name("isinstance"), [x, Comma(), T]) if "n" in results: test.prefix = " " test = Node(syms.not_test, [Name("not"), test]) test.prefix = node.prefix return test def transform_while(self, node, results): one = results["while"] one.replace(Name("True", prefix=one.prefix)) def transform_sort(self, node, results): sort_stmt = results["sort"] next_stmt = results["next"] list_call = results.get("list") simple_expr = results.get("expr") if list_call: list_call.replace(Name("sorted", prefix=list_call.prefix)) elif simple_expr: new = simple_expr.clone() new.prefix = "" simple_expr.replace(Call(Name("sorted"), [new], prefix=simple_expr.prefix)) else: raise RuntimeError("should not have reached here") sort_stmt.remove() btwn = sort_stmt.prefix # Keep any prefix lines between the sort_stmt and the list_call and # shove them right after the sorted() call. if "\n" in btwn: if next_stmt: # The new prefix should be everything from the sort_stmt's # prefix up to the last newline, then the old prefix after a new # line. prefix_lines = (btwn.rpartition("\n")[0], next_stmt[0].prefix) next_stmt[0].prefix = "\n".join(prefix_lines) else: assert list_call.parent assert list_call.next_sibling is None # Put a blank line after list_call and set its prefix. end_line = BlankLine() list_call.parent.append_child(end_line) assert list_call.next_sibling is end_line # The new prefix should be everything up to the first new line # of sort_stmt's prefix. end_line.prefix = btwn.rpartition("\n")[0]
4,876
Python
.py
133
26.270677
88
0.527731
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,274
fix_standarderror.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_standarderror.py
# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for StandardError -> Exception.""" # Local imports from .. import fixer_base from ..fixer_util import Name class FixStandarderror(fixer_base.BaseFix): BM_compatible = True PATTERN = """ 'StandardError' """ def transform(self, node, results): return Name("Exception", prefix=node.prefix)
449
Python
.py
13
29.461538
52
0.689095
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,275
fix_callable.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_callable.py
# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for callable(). This converts callable(obj) into isinstance(obj, collections.Callable), adding a collections import if needed.""" # Local imports from lib2to3 import fixer_base from lib2to3.fixer_util import Call, Name, String, Attr, touch_import class FixCallable(fixer_base.BaseFix): BM_compatible = True order = "pre" # Ignore callable(*args) or use of keywords. # Either could be a hint that the builtin callable() is not being used. PATTERN = """ power< 'callable' trailer< lpar='(' ( not(arglist | argument<any '=' any>) func=any | func=arglist<(not argument<any '=' any>) any ','> ) rpar=')' > after=any* > """ def transform(self, node, results): func = results['func'] touch_import(None, 'collections', node=node) args = [func.clone(), String(', ')] args.extend(Attr(Name('collections'), Name('Callable'))) return Call(Name('isinstance'), args, prefix=node.prefix)
1,151
Python
.py
28
34.071429
80
0.633752
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,276
fix_ne.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_ne.py
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that turns <> into !=.""" # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base class FixNe(fixer_base.BaseFix): # This is so simple that we don't need the pattern compiler. _accept_type = token.NOTEQUAL def match(self, node): # Override return node.value == "<>" def transform(self, node, results): new = pytree.Leaf(token.NOTEQUAL, "!=", prefix=node.prefix) return new
571
Python
.py
16
31.25
67
0.684307
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,277
fix_unicode.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_unicode.py
"""Fixer that changes unicode to str, unichr to chr, and u"..." into "...". """ import re from ..pgen2 import token from .. import fixer_base _mapping = {"unichr" : "chr", "unicode" : "str"} _literal_re = re.compile(r"[uU][rR]?[\'\"]") class FixUnicode(fixer_base.BaseFix): BM_compatible = True PATTERN = "STRING | 'unicode' | 'unichr'" def transform(self, node, results): if node.type == token.NAME: new = node.clone() new.value = _mapping[node.value] return new elif node.type == token.STRING: if _literal_re.match(node.value): new = node.clone() new.value = new.value[1:] return new
715
Python
.py
20
28.3
75
0.569565
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,278
fix_imports.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_imports.py
"""Fix incompatible imports and module references.""" # Authors: Collin Winter, Nick Edds # Local imports from .. import fixer_base from ..fixer_util import Name, attr_chain MAPPING = {'StringIO': 'io', 'cStringIO': 'io', 'cPickle': 'pickle', '__builtin__' : 'builtins', 'copy_reg': 'copyreg', 'Queue': 'queue', 'SocketServer': 'socketserver', 'ConfigParser': 'configparser', 'repr': 'reprlib', 'FileDialog': 'tkinter.filedialog', 'tkFileDialog': 'tkinter.filedialog', 'SimpleDialog': 'tkinter.simpledialog', 'tkSimpleDialog': 'tkinter.simpledialog', 'tkColorChooser': 'tkinter.colorchooser', 'tkCommonDialog': 'tkinter.commondialog', 'Dialog': 'tkinter.dialog', 'Tkdnd': 'tkinter.dnd', 'tkFont': 'tkinter.font', 'tkMessageBox': 'tkinter.messagebox', 'ScrolledText': 'tkinter.scrolledtext', 'Tkconstants': 'tkinter.constants', 'Tix': 'tkinter.tix', 'ttk': 'tkinter.ttk', 'Tkinter': 'tkinter', 'markupbase': '_markupbase', '_winreg': 'winreg', 'thread': '_thread', 'dummy_thread': '_dummy_thread', # anydbm and whichdb are handled by fix_imports2 'dbhash': 'dbm.bsd', 'dumbdbm': 'dbm.dumb', 'dbm': 'dbm.ndbm', 'gdbm': 'dbm.gnu', 'xmlrpclib': 'xmlrpc.client', 'DocXMLRPCServer': 'xmlrpc.server', 'SimpleXMLRPCServer': 'xmlrpc.server', 'httplib': 'http.client', 'htmlentitydefs' : 'html.entities', 'HTMLParser' : 'html.parser', 'Cookie': 'http.cookies', 'cookielib': 'http.cookiejar', 'BaseHTTPServer': 'http.server', 'SimpleHTTPServer': 'http.server', 'CGIHTTPServer': 'http.server', #'test.test_support': 'test.support', 'commands': 'subprocess', 'UserString' : 'collections', 'UserList' : 'collections', 'urlparse' : 'urllib.parse', 'robotparser' : 'urllib.robotparser', } def alternates(members): return "(" + "|".join(map(repr, members)) + ")" def build_pattern(mapping=MAPPING): mod_list = ' | '.join(["module_name='%s'" % key for key in mapping]) bare_names = alternates(mapping.keys()) yield """name_import=import_name< 'import' ((%s) | multiple_imports=dotted_as_names< any* (%s) any* >) > """ % (mod_list, mod_list) yield """import_from< 'from' (%s) 'import' ['('] ( any | import_as_name< any 'as' any > | import_as_names< any* >) [')'] > """ % mod_list yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > | multiple_imports=dotted_as_names< any* dotted_as_name< (%s) 'as' any > any* >) > """ % (mod_list, mod_list) # Find usages of module members in code e.g. thread.foo(bar) yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names class FixImports(fixer_base.BaseFix): BM_compatible = True keep_line_order = True # This is overridden in fix_imports2. mapping = MAPPING # We want to run this fixer late, so fix_import doesn't try to make stdlib # renames into relative imports. run_order = 6 def build_pattern(self): return "|".join(build_pattern(self.mapping)) def compile_pattern(self): # We override this, so MAPPING can be pragmatically altered and the # changes will be reflected in PATTERN. self.PATTERN = self.build_pattern() super(FixImports, self).compile_pattern() # Don't match the node if it's within another match. def match(self, node): match = super(FixImports, self).match results = match(node) if results: # Module usage could be in the trailer of an attribute lookup, so we # might have nested matches when "bare_with_attr" is present. if "bare_with_attr" not in results and \ any(match(obj) for obj in attr_chain(node, "parent")): return False return results return False def start_tree(self, tree, filename): super(FixImports, self).start_tree(tree, filename) self.replace = {} def transform(self, node, results): import_mod = results.get("module_name") if import_mod: mod_name = import_mod.value new_name = self.mapping[mod_name] import_mod.replace(Name(new_name, prefix=import_mod.prefix)) if "name_import" in results: # If it's not a "from x import x, y" or "import x as y" import, # marked its usage to be replaced. self.replace[mod_name] = new_name if "multiple_imports" in results: # This is a nasty hack to fix multiple imports on a line (e.g., # "import StringIO, urlparse"). The problem is that I can't # figure out an easy way to make a pattern recognize the keys of # MAPPING randomly sprinkled in an import statement. results = self.match(node) if results: self.transform(node, results) else: # Replace usage of the module. bare_name = results["bare_with_attr"][0] new_name = self.replace.get(bare_name.value) if new_name: bare_name.replace(Name(new_name, prefix=bare_name.prefix))
5,684
Python
.py
128
33.789063
80
0.567431
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,279
fix_types.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_types.py
# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for removing uses of the types module. These work for only the known names in the types module. The forms above can include types. or not. ie, It is assumed the module is imported either as: import types from types import ... # either * or specific types The import statements are not modified. There should be another fixer that handles at least the following constants: type([]) -> list type(()) -> tuple type('') -> str """ # Local imports from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name _TYPE_MAPPING = { 'BooleanType' : 'bool', 'BufferType' : 'memoryview', 'ClassType' : 'type', 'ComplexType' : 'complex', 'DictType': 'dict', 'DictionaryType' : 'dict', 'EllipsisType' : 'type(Ellipsis)', #'FileType' : 'io.IOBase', 'FloatType': 'float', 'IntType': 'int', 'ListType': 'list', 'LongType': 'int', 'ObjectType' : 'object', 'NoneType': 'type(None)', 'NotImplementedType' : 'type(NotImplemented)', 'SliceType' : 'slice', 'StringType': 'bytes', # XXX ? 'StringTypes' : 'str', # XXX ? 'TupleType': 'tuple', 'TypeType' : 'type', 'UnicodeType': 'str', 'XRangeType' : 'range', } _pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING] class FixTypes(fixer_base.BaseFix): BM_compatible = True PATTERN = '|'.join(_pats) def transform(self, node, results): new_value = _TYPE_MAPPING.get(results["name"].value) if new_value: return Name(new_value, prefix=node.prefix) return None
1,797
Python
.py
50
29.8
80
0.61268
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,280
fix_long.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_long.py
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that turns 'long' into 'int' everywhere. """ # Local imports from lib2to3 import fixer_base from lib2to3.fixer_util import is_probably_builtin class FixLong(fixer_base.BaseFix): BM_compatible = True PATTERN = "'long'" def transform(self, node, results): if is_probably_builtin(node): node.value = "int" node.changed()
476
Python
.py
14
29.5
50
0.704595
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,281
fix_print.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_print.py
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for print. Change: 'print' into 'print()' 'print ...' into 'print(...)' 'print ... ,' into 'print(..., end=" ")' 'print >>x, ...' into 'print(..., file=x)' No changes are applied if print_function is imported from __future__ """ # Local imports from .. import patcomp from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, Call, Comma, String, is_tuple parend_expr = patcomp.compile_pattern( """atom< '(' [atom|STRING|NAME] ')' >""" ) class FixPrint(fixer_base.BaseFix): BM_compatible = True PATTERN = """ simple_stmt< any* bare='print' any* > | print_stmt """ def transform(self, node, results): assert results bare_print = results.get("bare") if bare_print: # Special-case print all by itself bare_print.replace(Call(Name("print"), [], prefix=bare_print.prefix)) return assert node.children[0] == Name("print") args = node.children[1:] if len(args) == 1 and parend_expr.match(args[0]): # We don't want to keep sticking parens around an # already-parenthesised expression. return sep = end = file = None if args and args[-1] == Comma(): args = args[:-1] end = " " if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, ">>"): assert len(args) >= 2 file = args[1].clone() args = args[3:] # Strip a possible comma after the file expression # Now synthesize a print(args, sep=..., end=..., file=...) node. l_args = [arg.clone() for arg in args] if l_args: l_args[0].prefix = "" if sep is not None or end is not None or file is not None: if sep is not None: self.add_kwarg(l_args, "sep", String(repr(sep))) if end is not None: self.add_kwarg(l_args, "end", String(repr(end))) if file is not None: self.add_kwarg(l_args, "file", file) n_stmt = Call(Name("print"), l_args) n_stmt.prefix = node.prefix return n_stmt def add_kwarg(self, l_nodes, s_kwd, n_expr): # XXX All this prefix-setting may lose comments (though rarely) n_expr.prefix = "" n_argument = pytree.Node(self.syms.argument, (Name(s_kwd), pytree.Leaf(token.EQUAL, "="), n_expr)) if l_nodes: l_nodes.append(Comma()) n_argument.prefix = " " l_nodes.append(n_argument)
2,854
Python
.py
71
29.957746
78
0.536321
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,282
fix_numliterals.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_numliterals.py
"""Fixer that turns 1L into 1, 0755 into 0o755. """ # Copyright 2007 Georg Brandl. # Licensed to PSF under a Contributor Agreement. # Local imports from ..pgen2 import token from .. import fixer_base from ..fixer_util import Number class FixNumliterals(fixer_base.BaseFix): # This is so simple that we don't need the pattern compiler. _accept_type = token.NUMBER def match(self, node): # Override return (node.value.startswith("0") or node.value[-1] in "Ll") def transform(self, node, results): val = node.value if val[-1] in 'Ll': val = val[:-1] elif val.startswith('0') and val.isdigit() and len(set(val)) > 1: val = "0o" + val[1:] return Number(val, prefix=node.prefix)
768
Python
.py
21
31.047619
73
0.65
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,283
fix_input.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_input.py
"""Fixer that changes input(...) into eval(input(...)).""" # Author: Andre Roberge # Local imports from .. import fixer_base from ..fixer_util import Call, Name from .. import patcomp context = patcomp.compile_pattern("power< 'eval' trailer< '(' any ')' > >") class FixInput(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< 'input' args=trailer< '(' [any] ')' > > """ def transform(self, node, results): # If we're already wrapped in a eval() call, we're done. if context.match(node.parent.parent): return new = node.clone() new.prefix = "" return Call(Name("eval"), [new], prefix=node.prefix)
707
Python
.py
19
31
75
0.602056
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,284
fix_intern.py
DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_intern.py
# Copyright 2006 Georg Brandl. # Licensed to PSF under a Contributor Agreement. """Fixer for intern(). intern(s) -> sys.intern(s)""" # Local imports from .. import pytree from .. import fixer_base from ..fixer_util import Name, Attr, touch_import class FixIntern(fixer_base.BaseFix): BM_compatible = True order = "pre" PATTERN = """ power< 'intern' trailer< lpar='(' ( not(arglist | argument<any '=' any>) obj=any | obj=arglist<(not argument<any '=' any>) any ','> ) rpar=')' > after=any* > """ def transform(self, node, results): syms = self.syms obj = results["obj"].clone() if obj.type == syms.arglist: newarglist = obj.clone() else: newarglist = pytree.Node(syms.arglist, [obj.clone()]) after = results["after"] if after: after = [n.clone() for n in after] new = pytree.Node(syms.power, Attr(Name("sys"), Name("intern")) + [pytree.Node(syms.trailer, [results["lpar"].clone(), newarglist, results["rpar"].clone()])] + after) new.prefix = node.prefix touch_import(None, 'sys', node) return new
1,402
Python
.py
39
24.538462
75
0.500737
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,285
anaconda_pep257.py
DamnWidget_anaconda/anaconda_lib/linting/anaconda_pep257.py
# -*- coding: utf8 -*- # Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details """ Anaconda PyDocStyle wrapper """ try: import pydocstyle class PEP257(object): """PEP-257 class for Anaconda """ def __init__(self, code, filename, ignore): self.code = code self.filename = filename self.ignore = [] if ignore is None else ignore def execute(self): """Check the code with pep257 to find errors """ errors = [] try: for error in pydocstyle.ConventionChecker().check_source( self.code, self.filename ): error_code = getattr(error, 'code', None) if error_code is not None and error_code not in self.ignore: # noqa errors.append(self._convert(error)) except Exception as error: print(error) return errors def _convert(self, error): """ Convert the error text returned back from pep257 into something that anaconda can understand. """ return { 'level': 'V', 'lineno': error.line, 'offset': 0, 'code': error.code, 'raw_error': '[V] PEP 257 ({0}): {1}'.format( error.code, error.message.split(': ', 1)[1] ), 'message': '[V] PEP 257 (%s): %s', 'underline_range': True } except SyntaxError: # PyDocStyle dropped support for Python 2.6 class PEP257(object): """Dummy PEP257 class for Python 2.6 """ def __init__(self, code, filename, ignore): pass def execute(self): """Return an empty list """ return []
1,974
Python
.py
56
23.357143
88
0.503151
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,286
anaconda_mccabe.py
DamnWidget_anaconda/anaconda_lib/linting/anaconda_mccabe.py
# -*- coding: utf8 -*- # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details """ Anaconda McCabe """ import ast from .mccabe import McCabeChecker class AnacondaMcCabe(object): """Wrapper object around McCabe python script """ checker = McCabeChecker def __init__(self, code, filename): self.code = code self.filename = filename @property def tree(self): """Compile and send back an AST if buffer is able to be parsed """ try: code = self.code.encode('utf8') + b'\n' return compile(code, self.filename, 'exec', ast.PyCF_ONLY_AST) except SyntaxError: return None def get_code_complexity(self, threshold=7): """Get the code complexity for the current buffer and return it """ if self.tree is not None: self.checker.max_complexity = threshold return self.parse(self.checker(self.tree, self.filename).run()) return None def parse(self, complexities): """ Parse the given list of complexities to something that anaconda understand and is able to handle """ errors = [] for complexity in complexities: errors.append({ 'line': int(complexity[0]), 'offset': int(complexity[1] + 1), 'code': complexity[2].split(' ', 1)[0], 'message': complexity[2].split(' ', 1)[1] }) return errors
1,577
Python
.py
45
26.666667
75
0.597625
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,287
pycodestyle.py
DamnWidget_anaconda/anaconda_lib/linting/pycodestyle.py
#!/usr/bin/env python # pycodestyle.py - Check Python source code formatting, according to # PEP 8 # # Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net> # Copyright (C) 2009-2014 Florent Xicluna <florent.xicluna@gmail.com> # Copyright (C) 2014-2016 Ian Lee <ianlee1521@gmail.com> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. r""" Check Python source code formatting, according to PEP 8. For usage and a list of options, try this: $ python pycodestyle.py -h This program and its regression test suite live here: https://github.com/pycqa/pycodestyle Groups of errors and warnings: E errors W warnings 100 indentation 200 whitespace 300 blank lines 400 imports 500 line length 600 deprecation 700 statements 900 syntax error """ import bisect import inspect import keyword import os import re import sys import time import tokenize import warnings from fnmatch import fnmatch from functools import lru_cache from optparse import OptionParser try: from configparser import RawConfigParser from io import TextIOWrapper except ImportError: from ConfigParser import RawConfigParser # this is a performance hack. see https://bugs.python.org/issue43014 if ( sys.version_info < (3, 10) and callable(getattr(tokenize, '_compile', None)) ): # pragma: no cover (<py310) tokenize._compile = lru_cache()(tokenize._compile) # type: ignore __version__ = '2.8.0' DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox' DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704,W503,W504' try: if sys.platform == 'win32': USER_CONFIG = os.path.expanduser(r'~\.pycodestyle') else: USER_CONFIG = os.path.join( os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'), 'pycodestyle' ) except ImportError: USER_CONFIG = None PROJECT_CONFIG = ('setup.cfg', 'tox.ini') TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite') MAX_LINE_LENGTH = 79 # Number of blank lines between various code parts. BLANK_LINES_CONFIG = { # Top level class and function. 'top_level': 2, # Methods and nested class and function. 'method': 1, } MAX_DOC_LENGTH = 72 INDENT_SIZE = 4 REPORT_FORMAT = { 'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s', 'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s', } PyCF_ONLY_AST = 1024 SINGLETONS = frozenset(['False', 'None', 'True']) KEYWORDS = frozenset(keyword.kwlist + ['print', 'async']) - SINGLETONS UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-']) ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-', '@']) WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%']) ASSIGNMENT_EXPRESSION_OP = [':='] if sys.version_info >= (3, 8) else [] WS_NEEDED_OPERATORS = frozenset([ '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>', '%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '=', 'and', 'in', 'is', 'or', '->'] + ASSIGNMENT_EXPRESSION_OP) WHITESPACE = frozenset(' \t\xa0') NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE]) SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT]) # ERRORTOKEN is triggered by backticks in Python 3 SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN]) BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines'] INDENT_REGEX = re.compile(r'([ \t]*)') RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,') RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$') ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b') DOCSTRING_REGEX = re.compile(r'u?r?["\']') EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[\[({][ \t]|[ \t][\]}),;:](?!=)') WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)') COMPARE_SINGLETON_REGEX = re.compile(r'(\bNone|\bFalse|\bTrue)?\s*([=!]=)' r'\s*(?(1)|(None|False|True))\b') COMPARE_NEGATIVE_REGEX = re.compile(r'\b(?<!is\s)(not)\s+[^][)(}{ ]+\s+' r'(in|is)\s') COMPARE_TYPE_REGEX = re.compile( r'(?:[=!]=|is(?:\s+not)?)\s+type(?:\s*\(\s*([^)]*[^ )])\s*\))' + r'|\btype(?:\s*\(\s*([^)]*[^ )])\s*\))\s+(?:[=!]=|is(?:\s+not)?)' ) KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS)) OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+|:=)(\s*)') LAMBDA_REGEX = re.compile(r'\blambda\b') HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$') STARTSWITH_DEF_REGEX = re.compile(r'^(async\s+def|def)\b') STARTSWITH_TOP_LEVEL_REGEX = re.compile(r'^(async\s+def\s+|def\s+|class\s+|@)') STARTSWITH_INDENT_STATEMENT_REGEX = re.compile( r'^\s*({})\b'.format('|'.join(s.replace(' ', r'\s+') for s in ( 'def', 'async def', 'for', 'async for', 'if', 'elif', 'else', 'try', 'except', 'finally', 'with', 'async with', 'class', 'while', ))) ) DUNDER_REGEX = re.compile(r"^__([^\s]+)__(?::\s*[a-zA-Z.0-9_\[\]\"]+)? = ") BLANK_EXCEPT_REGEX = re.compile(r"except\s*:") _checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}} def _get_parameters(function): return [parameter.name for parameter in inspect.signature(function).parameters.values() if parameter.kind == parameter.POSITIONAL_OR_KEYWORD] def register_check(check, codes=None): """Register a new check object.""" def _add_check(check, kind, codes, args): if check in _checks[kind]: _checks[kind][check][0].extend(codes or []) else: _checks[kind][check] = (codes or [''], args) if inspect.isfunction(check): args = _get_parameters(check) if args and args[0] in ('physical_line', 'logical_line'): if codes is None: codes = ERRORCODE_REGEX.findall(check.__doc__ or '') _add_check(check, args[0], codes, args) elif inspect.isclass(check): if _get_parameters(check.__init__)[:2] == ['self', 'tree']: _add_check(check, 'tree', codes, None) return check ######################################################################## # Plugins (check functions) for physical lines ######################################################################## @register_check def tabs_or_spaces(physical_line, indent_char): r"""Never mix tabs and spaces. The most popular way of indenting Python is with spaces only. The second-most popular way is with tabs only. Code indented with a mixture of tabs and spaces should be converted to using spaces exclusively. When invoking the Python command line interpreter with the -t option, it issues warnings about code that illegally mixes tabs and spaces. When using -tt these warnings become errors. These options are highly recommended! Okay: if a == 0:\n a = 1\n b = 1 E101: if a == 0:\n a = 1\n\tb = 1 """ indent = INDENT_REGEX.match(physical_line).group(1) for offset, char in enumerate(indent): if char != indent_char: return offset, "E101 indentation contains mixed spaces and tabs" @register_check def tabs_obsolete(physical_line): r"""On new projects, spaces-only are strongly recommended over tabs. Okay: if True:\n return W191: if True:\n\treturn """ indent = INDENT_REGEX.match(physical_line).group(1) if '\t' in indent: return indent.index('\t'), "W191 indentation contains tabs" @register_check def trailing_whitespace(physical_line): r"""Trailing whitespace is superfluous. The warning returned varies on whether the line itself is blank, for easier filtering for those who want to indent their blank lines. Okay: spam(1)\n# W291: spam(1) \n# W293: class Foo(object):\n \n bang = 12 """ physical_line = physical_line.rstrip('\n') # chr(10), newline physical_line = physical_line.rstrip('\r') # chr(13), carriage return physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L stripped = physical_line.rstrip(' \t\v') if physical_line != stripped: if stripped: return len(stripped), "W291 trailing whitespace" else: return 0, "W293 blank line contains whitespace" @register_check def trailing_blank_lines(physical_line, lines, line_number, total_lines): r"""Trailing blank lines are superfluous. Okay: spam(1) W391: spam(1)\n However the last line should end with a new line (warning W292). """ if line_number == total_lines: stripped_last_line = physical_line.rstrip('\r\n') if physical_line and not stripped_last_line: return 0, "W391 blank line at end of file" if stripped_last_line == physical_line: return len(lines[-1]), "W292 no newline at end of file" @register_check def maximum_line_length(physical_line, max_line_length, multiline, line_number, noqa): r"""Limit all lines to a maximum of 79 characters. There are still many devices around that are limited to 80 character lines; plus, limiting windows to 80 characters makes it possible to have several windows side-by-side. The default wrapping on such devices looks ugly. Therefore, please limit all lines to a maximum of 79 characters. For flowing long blocks of text (docstrings or comments), limiting the length to 72 characters is recommended. Reports error E501. """ line = physical_line.rstrip() length = len(line) if length > max_line_length and not noqa: # Special case: ignore long shebang lines. if line_number == 1 and line.startswith('#!'): return # Special case for long URLs in multi-line docstrings or # comments, but still report the error when the 72 first chars # are whitespaces. chunks = line.split() if ((len(chunks) == 1 and multiline) or (len(chunks) == 2 and chunks[0] == '#')) and \ len(line) - len(chunks[-1]) < max_line_length - 7: return if length > max_line_length: return (max_line_length, "E501 line too long " "(%d > %d characters)" % (length, max_line_length)) ######################################################################## # Plugins (check functions) for logical lines ######################################################################## def _is_one_liner(logical_line, indent_level, lines, line_number): if not STARTSWITH_TOP_LEVEL_REGEX.match(logical_line): return False line_idx = line_number - 1 if line_idx < 1: prev_indent = 0 else: prev_indent = expand_indent(lines[line_idx - 1]) if prev_indent > indent_level: return False while line_idx < len(lines): line = lines[line_idx].strip() if not line.startswith('@') and STARTSWITH_TOP_LEVEL_REGEX.match(line): break else: line_idx += 1 else: return False # invalid syntax: EOF while searching for def/class next_idx = line_idx + 1 while next_idx < len(lines): if lines[next_idx].strip(): break else: next_idx += 1 else: return True # line is last in the file return expand_indent(lines[next_idx]) <= indent_level @register_check def blank_lines(logical_line, blank_lines, indent_level, line_number, blank_before, previous_logical, previous_unindented_logical_line, previous_indent_level, lines): r"""Separate top-level function and class definitions with two blank lines. Method definitions inside a class are separated by a single blank line. Extra blank lines may be used (sparingly) to separate groups of related functions. Blank lines may be omitted between a bunch of related one-liners (e.g. a set of dummy implementations). Use blank lines in functions, sparingly, to indicate logical sections. Okay: def a():\n pass\n\n\ndef b():\n pass Okay: def a():\n pass\n\n\nasync def b():\n pass Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass Okay: default = 1\nfoo = 1 Okay: classify = 1\nfoo = 1 E301: class Foo:\n b = 0\n def bar():\n pass E302: def a():\n pass\n\ndef b(n):\n pass E302: def a():\n pass\n\nasync def b(n):\n pass E303: def a():\n pass\n\n\n\ndef b(n):\n pass E303: def a():\n\n\n\n pass E304: @decorator\n\ndef a():\n pass E305: def a():\n pass\na() E306: def a():\n def b():\n pass\n def c():\n pass """ # noqa top_level_lines = BLANK_LINES_CONFIG['top_level'] method_lines = BLANK_LINES_CONFIG['method'] if not previous_logical and blank_before < top_level_lines: return # Don't expect blank lines before the first line if previous_logical.startswith('@'): if blank_lines: yield 0, "E304 blank lines found after function decorator" elif (blank_lines > top_level_lines or (indent_level and blank_lines == method_lines + 1) ): yield 0, "E303 too many blank lines (%d)" % blank_lines elif STARTSWITH_TOP_LEVEL_REGEX.match(logical_line): # allow a group of one-liners if ( _is_one_liner(logical_line, indent_level, lines, line_number) and blank_before == 0 ): return if indent_level: if not (blank_before == method_lines or previous_indent_level < indent_level or DOCSTRING_REGEX.match(previous_logical) ): ancestor_level = indent_level nested = False # Search backwards for a def ancestor or tree root # (top level). for line in lines[line_number - top_level_lines::-1]: if line.strip() and expand_indent(line) < ancestor_level: ancestor_level = expand_indent(line) nested = STARTSWITH_DEF_REGEX.match(line.lstrip()) if nested or ancestor_level == 0: break if nested: yield 0, "E306 expected %s blank line before a " \ "nested definition, found 0" % (method_lines,) else: yield 0, "E301 expected {} blank line, found 0".format( method_lines) elif blank_before != top_level_lines: yield 0, "E302 expected %s blank lines, found %d" % ( top_level_lines, blank_before) elif (logical_line and not indent_level and blank_before != top_level_lines and previous_unindented_logical_line.startswith(('def ', 'class ')) ): yield 0, "E305 expected %s blank lines after " \ "class or function definition, found %d" % ( top_level_lines, blank_before) @register_check def extraneous_whitespace(logical_line): r"""Avoid extraneous whitespace. Avoid extraneous whitespace in these situations: - Immediately inside parentheses, brackets or braces. - Immediately before a comma, semicolon, or colon. Okay: spam(ham[1], {eggs: 2}) E201: spam( ham[1], {eggs: 2}) E201: spam(ham[ 1], {eggs: 2}) E201: spam(ham[1], { eggs: 2}) E202: spam(ham[1], {eggs: 2} ) E202: spam(ham[1 ], {eggs: 2}) E202: spam(ham[1], {eggs: 2 }) E203: if x == 4: print x, y; x, y = y , x E203: if x == 4: print x, y ; x, y = y, x E203: if x == 4 : print x, y; x, y = y, x """ line = logical_line for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line): text = match.group() char = text.strip() found = match.start() if text[-1].isspace(): # assert char in '([{' yield found + 1, "E201 whitespace after '%s'" % char elif line[found - 1] != ',': code = ('E202' if char in '}])' else 'E203') # if char in ',;:' yield found, f"{code} whitespace before '{char}'" @register_check def whitespace_around_keywords(logical_line): r"""Avoid extraneous whitespace around keywords. Okay: True and False E271: True and False E272: True and False E273: True and\tFalse E274: True\tand False """ for match in KEYWORD_REGEX.finditer(logical_line): before, after = match.groups() if '\t' in before: yield match.start(1), "E274 tab before keyword" elif len(before) > 1: yield match.start(1), "E272 multiple spaces before keyword" if '\t' in after: yield match.start(2), "E273 tab after keyword" elif len(after) > 1: yield match.start(2), "E271 multiple spaces after keyword" @register_check def missing_whitespace_after_import_keyword(logical_line): r"""Multiple imports in form from x import (a, b, c) should have space between import statement and parenthesised name list. Okay: from foo import (bar, baz) E275: from foo import(bar, baz) E275: from importable.module import(bar, baz) """ line = logical_line indicator = ' import(' if line.startswith('from '): found = line.find(indicator) if -1 < found: pos = found + len(indicator) - 1 yield pos, "E275 missing whitespace after keyword" @register_check def missing_whitespace(logical_line): r"""Each comma, semicolon or colon should be followed by whitespace. Okay: [a, b] Okay: (3,) Okay: a[1:4] Okay: a[:4] Okay: a[1:] Okay: a[1:4:2] E231: ['a','b'] E231: foo(bar,baz) E231: [{'a':'b'}] """ line = logical_line for index in range(len(line) - 1): char = line[index] next_char = line[index + 1] if char in ',;:' and next_char not in WHITESPACE: before = line[:index] if char == ':' and before.count('[') > before.count(']') and \ before.rfind('{') < before.rfind('['): continue # Slice syntax, no space required if char == ',' and next_char == ')': continue # Allow tuple with only one element: (3,) if char == ':' and next_char == '=' and sys.version_info >= (3, 8): continue # Allow assignment expression yield index, "E231 missing whitespace after '%s'" % char @register_check def indentation(logical_line, previous_logical, indent_char, indent_level, previous_indent_level, indent_size): r"""Use indent_size (PEP8 says 4) spaces per indentation level. For really old code that you don't want to mess up, you can continue to use 8-space tabs. Okay: a = 1 Okay: if a == 0:\n a = 1 E111: a = 1 E114: # a = 1 Okay: for item in items:\n pass E112: for item in items:\npass E115: for item in items:\n# Hi\n pass Okay: a = 1\nb = 2 E113: a = 1\n b = 2 E116: a = 1\n # b = 2 """ c = 0 if logical_line else 3 tmpl = "E11%d %s" if logical_line else "E11%d %s (comment)" if indent_level % indent_size: yield 0, tmpl % ( 1 + c, "indentation is not a multiple of " + str(indent_size), ) indent_expect = previous_logical.endswith(':') if indent_expect and indent_level <= previous_indent_level: yield 0, tmpl % (2 + c, "expected an indented block") elif not indent_expect and indent_level > previous_indent_level: yield 0, tmpl % (3 + c, "unexpected indentation") if indent_expect: expected_indent_amount = 8 if indent_char == '\t' else 4 expected_indent_level = previous_indent_level + expected_indent_amount if indent_level > expected_indent_level: yield 0, tmpl % (7, 'over-indented') @register_check def continued_indentation(logical_line, tokens, indent_level, hang_closing, indent_char, indent_size, noqa, verbose): r"""Continuation lines indentation. Continuation lines should align wrapped elements either vertically using Python's implicit line joining inside parentheses, brackets and braces, or using a hanging indent. When using a hanging indent these considerations should be applied: - there should be no arguments on the first line, and - further indentation should be used to clearly distinguish itself as a continuation line. Okay: a = (\n) E123: a = (\n ) Okay: a = (\n 42) E121: a = (\n 42) E122: a = (\n42) E123: a = (\n 42\n ) E124: a = (24,\n 42\n) E125: if (\n b):\n pass E126: a = (\n 42) E127: a = (24,\n 42) E128: a = (24,\n 42) E129: if (a or\n b):\n pass E131: a = (\n 42\n 24) """ first_row = tokens[0][2][0] nrows = 1 + tokens[-1][2][0] - first_row if noqa or nrows == 1: return # indent_next tells us whether the next block is indented; assuming # that it is indented by 4 spaces, then we should not allow 4-space # indents on the final continuation line; in turn, some other # indents are allowed to have an extra 4 spaces. indent_next = logical_line.endswith(':') row = depth = 0 valid_hangs = (indent_size,) if indent_char != '\t' \ else (indent_size, indent_size * 2) # remember how many brackets were opened on each line parens = [0] * nrows # relative indents of physical lines rel_indent = [0] * nrows # for each depth, collect a list of opening rows open_rows = [[0]] # for each depth, memorize the hanging indentation hangs = [None] # visual indents indent_chances = {} last_indent = tokens[0][2] visual_indent = None last_token_multiline = False # for each depth, memorize the visual indent column indent = [last_indent[1]] if verbose >= 3: print(">>> " + tokens[0][4].rstrip()) for token_type, text, start, end, line in tokens: newline = row < start[0] - first_row if newline: row = start[0] - first_row newline = not last_token_multiline and token_type not in NEWLINE if newline: # this is the beginning of a continuation line. last_indent = start if verbose >= 3: print("... " + line.rstrip()) # record the initial indent. rel_indent[row] = expand_indent(line) - indent_level # identify closing bracket close_bracket = (token_type == tokenize.OP and text in ']})') # is the indent relative to an opening bracket line? for open_row in reversed(open_rows[depth]): hang = rel_indent[row] - rel_indent[open_row] hanging_indent = hang in valid_hangs if hanging_indent: break if hangs[depth]: hanging_indent = (hang == hangs[depth]) # is there any chance of visual indent? visual_indent = (not close_bracket and hang > 0 and indent_chances.get(start[1])) if close_bracket and indent[depth]: # closing bracket for visual indent if start[1] != indent[depth]: yield (start, "E124 closing bracket does not match " "visual indentation") elif close_bracket and not hang: # closing bracket matches indentation of opening # bracket's line if hang_closing: yield start, "E133 closing bracket is missing indentation" elif indent[depth] and start[1] < indent[depth]: if visual_indent is not True: # visual indent is broken yield (start, "E128 continuation line " "under-indented for visual indent") elif hanging_indent or (indent_next and rel_indent[row] == 2 * indent_size): # hanging indent is verified if close_bracket and not hang_closing: yield (start, "E123 closing bracket does not match " "indentation of opening bracket's line") hangs[depth] = hang elif visual_indent is True: # visual indent is verified indent[depth] = start[1] elif visual_indent in (text, str): # ignore token lined up with matching one from a # previous line pass else: # indent is broken if hang <= 0: error = "E122", "missing indentation or outdented" elif indent[depth]: error = "E127", "over-indented for visual indent" elif not close_bracket and hangs[depth]: error = "E131", "unaligned for hanging indent" else: hangs[depth] = hang if hang > indent_size: error = "E126", "over-indented for hanging indent" else: error = "E121", "under-indented for hanging indent" yield start, "%s continuation line %s" % error # look for visual indenting if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT) and not indent[depth]): indent[depth] = start[1] indent_chances[start[1]] = True if verbose >= 4: print(f"bracket depth {depth} indent to {start[1]}") # deal with implicit string concatenation elif (token_type in (tokenize.STRING, tokenize.COMMENT) or text in ('u', 'ur', 'b', 'br')): indent_chances[start[1]] = str # visual indent after assert/raise/with elif not row and not depth and text in ["assert", "raise", "with"]: indent_chances[end[1] + 1] = True # special case for the "if" statement because len("if (") == 4 elif not indent_chances and not row and not depth and text == 'if': indent_chances[end[1] + 1] = True elif text == ':' and line[end[1]:].isspace(): open_rows[depth].append(row) # keep track of bracket depth if token_type == tokenize.OP: if text in '([{': depth += 1 indent.append(0) hangs.append(None) if len(open_rows) == depth: open_rows.append([]) open_rows[depth].append(row) parens[row] += 1 if verbose >= 4: print("bracket depth %s seen, col %s, visual min = %s" % (depth, start[1], indent[depth])) elif text in ')]}' and depth > 0: # parent indents should not be more than this one prev_indent = indent.pop() or last_indent[1] hangs.pop() for d in range(depth): if indent[d] > prev_indent: indent[d] = 0 for ind in list(indent_chances): if ind >= prev_indent: del indent_chances[ind] del open_rows[depth + 1:] depth -= 1 if depth: indent_chances[indent[depth]] = True for idx in range(row, -1, -1): if parens[idx]: parens[idx] -= 1 break assert len(indent) == depth + 1 if start[1] not in indent_chances: # allow lining up tokens indent_chances[start[1]] = text last_token_multiline = (start[0] != end[0]) if last_token_multiline: rel_indent[end[0] - first_row] = rel_indent[row] if indent_next and expand_indent(line) == indent_level + indent_size: pos = (start[0], indent[0] + indent_size) if visual_indent: code = "E129 visually indented line" else: code = "E125 continuation line" yield pos, "%s with same indent as next logical line" % code @register_check def whitespace_before_parameters(logical_line, tokens): r"""Avoid extraneous whitespace. Avoid extraneous whitespace in the following situations: - before the open parenthesis that starts the argument list of a function call. - before the open parenthesis that starts an indexing or slicing. Okay: spam(1) E211: spam (1) Okay: dict['key'] = list[index] E211: dict ['key'] = list[index] E211: dict['key'] = list [index] """ prev_type, prev_text, __, prev_end, __ = tokens[0] for index in range(1, len(tokens)): token_type, text, start, end, __ = tokens[index] if ( token_type == tokenize.OP and text in '([' and start != prev_end and (prev_type == tokenize.NAME or prev_text in '}])') and # Syntax "class A (B):" is allowed, but avoid it (index < 2 or tokens[index - 2][1] != 'class') and # Allow "return (a.foo for a in range(5))" not keyword.iskeyword(prev_text) and # 'match' and 'case' are only soft keywords ( sys.version_info < (3, 9) or not keyword.issoftkeyword(prev_text) ) ): yield prev_end, "E211 whitespace before '%s'" % text prev_type = token_type prev_text = text prev_end = end @register_check def whitespace_around_operator(logical_line): r"""Avoid extraneous whitespace around an operator. Okay: a = 12 + 3 E221: a = 4 + 5 E222: a = 4 + 5 E223: a = 4\t+ 5 E224: a = 4 +\t5 """ for match in OPERATOR_REGEX.finditer(logical_line): before, after = match.groups() if '\t' in before: yield match.start(1), "E223 tab before operator" elif len(before) > 1: yield match.start(1), "E221 multiple spaces before operator" if '\t' in after: yield match.start(2), "E224 tab after operator" elif len(after) > 1: yield match.start(2), "E222 multiple spaces after operator" @register_check def missing_whitespace_around_operator(logical_line, tokens): r"""Surround operators with a single space on either side. - Always surround these binary operators with a single space on either side: assignment (=), augmented assignment (+=, -= etc.), comparisons (==, <, >, !=, <=, >=, in, not in, is, is not), Booleans (and, or, not). - If operators with different priorities are used, consider adding whitespace around the operators with the lowest priorities. Okay: i = i + 1 Okay: submitted += 1 Okay: x = x * 2 - 1 Okay: hypot2 = x * x + y * y Okay: c = (a + b) * (a - b) Okay: foo(bar, key='word', *args, **kwargs) Okay: alpha[:-i] E225: i=i+1 E225: submitted +=1 E225: x = x /2 - 1 E225: z = x **y E225: z = 1and 1 E226: c = (a+b) * (a-b) E226: hypot2 = x*x + y*y E227: c = a|b E228: msg = fmt%(errno, errmsg) """ parens = 0 need_space = False prev_type = tokenize.OP prev_text = prev_end = None operator_types = (tokenize.OP, tokenize.NAME) for token_type, text, start, end, line in tokens: if token_type in SKIP_COMMENTS: continue if text in ('(', 'lambda'): parens += 1 elif text == ')': parens -= 1 if need_space: if start != prev_end: # Found a (probably) needed space if need_space is not True and not need_space[1]: yield (need_space[0], "E225 missing whitespace around operator") need_space = False elif text == '>' and prev_text in ('<', '-'): # Tolerate the "<>" operator, even if running Python 3 # Deal with Python 3's annotated return value "->" pass elif ( # def f(a, /, b): # ^ # def f(a, b, /): # ^ # f = lambda a, /: # ^ prev_text == '/' and text in {',', ')', ':'} or # def f(a, b, /): # ^ prev_text == ')' and text == ':' ): # Tolerate the "/" operator in function definition # For more info see PEP570 pass else: if need_space is True or need_space[1]: # A needed trailing space was not found yield prev_end, "E225 missing whitespace around operator" elif prev_text != '**': code, optype = 'E226', 'arithmetic' if prev_text == '%': code, optype = 'E228', 'modulo' elif prev_text not in ARITHMETIC_OP: code, optype = 'E227', 'bitwise or shift' yield (need_space[0], "%s missing whitespace " "around %s operator" % (code, optype)) need_space = False elif token_type in operator_types and prev_end is not None: if text == '=' and parens: # Allow keyword args or defaults: foo(bar=None). pass elif text in WS_NEEDED_OPERATORS: need_space = True elif text in UNARY_OPERATORS: # Check if the operator is used as a binary operator # Allow unary operators: -123, -x, +1. # Allow argument unpacking: foo(*args, **kwargs). if prev_type == tokenize.OP and prev_text in '}])' or ( prev_type != tokenize.OP and prev_text not in KEYWORDS and ( sys.version_info < (3, 9) or not keyword.issoftkeyword(prev_text) ) ): need_space = None elif text in WS_OPTIONAL_OPERATORS: need_space = None if need_space is None: # Surrounding space is optional, but ensure that # trailing space matches opening space need_space = (prev_end, start != prev_end) elif need_space and start == prev_end: # A needed opening space was not found yield prev_end, "E225 missing whitespace around operator" need_space = False prev_type = token_type prev_text = text prev_end = end @register_check def whitespace_around_comma(logical_line): r"""Avoid extraneous whitespace after a comma or a colon. Note: these checks are disabled by default Okay: a = (1, 2) E241: a = (1, 2) E242: a = (1,\t2) """ line = logical_line for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line): found = m.start() + 1 if '\t' in m.group(): yield found, "E242 tab after '%s'" % m.group()[0] else: yield found, "E241 multiple spaces after '%s'" % m.group()[0] @register_check def whitespace_around_named_parameter_equals(logical_line, tokens): r"""Don't use spaces around the '=' sign in function arguments. Don't use spaces around the '=' sign when used to indicate a keyword argument or a default parameter value, except when using a type annotation. Okay: def complex(real, imag=0.0): Okay: return magic(r=real, i=imag) Okay: boolean(a == b) Okay: boolean(a != b) Okay: boolean(a <= b) Okay: boolean(a >= b) Okay: def foo(arg: int = 42): Okay: async def foo(arg: int = 42): E251: def complex(real, imag = 0.0): E251: return magic(r = real, i = imag) E252: def complex(real, image: float=0.0): """ parens = 0 no_space = False require_space = False prev_end = None annotated_func_arg = False in_def = bool(STARTSWITH_DEF_REGEX.match(logical_line)) message = "E251 unexpected spaces around keyword / parameter equals" missing_message = "E252 missing whitespace around parameter equals" for token_type, text, start, end, line in tokens: if token_type == tokenize.NL: continue if no_space: no_space = False if start != prev_end: yield (prev_end, message) if require_space: require_space = False if start == prev_end: yield (prev_end, missing_message) if token_type == tokenize.OP: if text in '([': parens += 1 elif text in ')]': parens -= 1 elif in_def and text == ':' and parens == 1: annotated_func_arg = True elif parens == 1 and text == ',': annotated_func_arg = False elif parens and text == '=': if annotated_func_arg and parens == 1: require_space = True if start == prev_end: yield (prev_end, missing_message) else: no_space = True if start != prev_end: yield (prev_end, message) if not parens: annotated_func_arg = False prev_end = end @register_check def whitespace_before_comment(logical_line, tokens): """Separate inline comments by at least two spaces. An inline comment is a comment on the same line as a statement. Inline comments should be separated by at least two spaces from the statement. They should start with a # and a single space. Each line of a block comment starts with a # and one or multiple spaces as there can be indented text inside the comment. Okay: x = x + 1 # Increment x Okay: x = x + 1 # Increment x Okay: # Block comments: Okay: # - Block comment list Okay: # \xa0- Block comment list E261: x = x + 1 # Increment x E262: x = x + 1 #Increment x E262: x = x + 1 # Increment x E262: x = x + 1 # \xa0Increment x E265: #Block comment E266: ### Block comment """ prev_end = (0, 0) for token_type, text, start, end, line in tokens: if token_type == tokenize.COMMENT: inline_comment = line[:start[1]].strip() if inline_comment: if prev_end[0] == start[0] and start[1] < prev_end[1] + 2: yield (prev_end, "E261 at least two spaces before inline comment") symbol, sp, comment = text.partition(' ') bad_prefix = symbol not in '#:' and (symbol.lstrip('#')[:1] or '#') if inline_comment: if bad_prefix or comment[:1] in WHITESPACE: yield start, "E262 inline comment should start with '# '" elif bad_prefix and (bad_prefix != '!' or start[0] > 1): if bad_prefix != '#': yield start, "E265 block comment should start with '# '" elif comment: yield start, "E266 too many leading '#' for block comment" elif token_type != tokenize.NL: prev_end = end @register_check def imports_on_separate_lines(logical_line): r"""Place imports on separate lines. Okay: import os\nimport sys E401: import sys, os Okay: from subprocess import Popen, PIPE Okay: from myclas import MyClass Okay: from foo.bar.yourclass import YourClass Okay: import myclass Okay: import foo.bar.yourclass """ line = logical_line if line.startswith('import '): found = line.find(',') if -1 < found and ';' not in line[:found]: yield found, "E401 multiple imports on one line" @register_check def module_imports_on_top_of_file( logical_line, indent_level, checker_state, noqa): r"""Place imports at the top of the file. Always put imports at the top of the file, just after any module comments and docstrings, and before module globals and constants. Okay: import os Okay: # this is a comment\nimport os Okay: '''this is a module docstring'''\nimport os Okay: r'''this is a module docstring'''\nimport os Okay: try:\n\timport x\nexcept ImportError:\n\tpass\nelse:\n\tpass\nimport y Okay: try:\n\timport x\nexcept ImportError:\n\tpass\nfinally:\n\tpass\nimport y E402: a=1\nimport os E402: 'One string'\n"Two string"\nimport os E402: a=1\nfrom sys import x Okay: if x:\n import os """ # noqa def is_string_literal(line): if line[0] in 'uUbB': line = line[1:] if line and line[0] in 'rR': line = line[1:] return line and (line[0] == '"' or line[0] == "'") allowed_keywords = ( 'try', 'except', 'else', 'finally', 'with', 'if', 'elif') if indent_level: # Allow imports in conditional statement/function return if not logical_line: # Allow empty lines or comments return if noqa: return line = logical_line if line.startswith('import ') or line.startswith('from '): if checker_state.get('seen_non_imports', False): yield 0, "E402 module level import not at top of file" elif re.match(DUNDER_REGEX, line): return elif any(line.startswith(kw) for kw in allowed_keywords): # Allow certain keywords intermixed with imports in order to # support conditional or filtered importing return elif is_string_literal(line): # The first literal is a docstring, allow it. Otherwise, report # error. if checker_state.get('seen_docstring', False): checker_state['seen_non_imports'] = True else: checker_state['seen_docstring'] = True else: checker_state['seen_non_imports'] = True @register_check def compound_statements(logical_line): r"""Compound statements (on the same line) are generally discouraged. While sometimes it's okay to put an if/for/while with a small body on the same line, never do this for multi-clause statements. Also avoid folding such long lines! Always use a def statement instead of an assignment statement that binds a lambda expression directly to a name. Okay: if foo == 'blah':\n do_blah_thing() Okay: do_one() Okay: do_two() Okay: do_three() E701: if foo == 'blah': do_blah_thing() E701: for x in lst: total += x E701: while t < 10: t = delay() E701: if foo == 'blah': do_blah_thing() E701: else: do_non_blah_thing() E701: try: something() E701: finally: cleanup() E701: if foo == 'blah': one(); two(); three() E702: do_one(); do_two(); do_three() E703: do_four(); # useless semicolon E704: def f(x): return 2*x E731: f = lambda x: 2*x """ line = logical_line last_char = len(line) - 1 found = line.find(':') prev_found = 0 counts = {char: 0 for char in '{}[]()'} while -1 < found < last_char: update_counts(line[prev_found:found], counts) if ((counts['{'] <= counts['}'] and # {'a': 1} (dict) counts['['] <= counts[']'] and # [1:2] (slice) counts['('] <= counts[')']) and # (annotation) not (sys.version_info >= (3, 8) and line[found + 1] == '=')): # assignment expression lambda_kw = LAMBDA_REGEX.search(line, 0, found) if lambda_kw: before = line[:lambda_kw.start()].rstrip() if before[-1:] == '=' and before[:-1].strip().isidentifier(): yield 0, ("E731 do not assign a lambda expression, use a " "def") break if STARTSWITH_DEF_REGEX.match(line): yield 0, "E704 multiple statements on one line (def)" elif STARTSWITH_INDENT_STATEMENT_REGEX.match(line): yield found, "E701 multiple statements on one line (colon)" prev_found = found found = line.find(':', found + 1) found = line.find(';') while -1 < found: if found < last_char: yield found, "E702 multiple statements on one line (semicolon)" else: yield found, "E703 statement ends with a semicolon" found = line.find(';', found + 1) @register_check def explicit_line_join(logical_line, tokens): r"""Avoid explicit line join between brackets. The preferred way of wrapping long lines is by using Python's implied line continuation inside parentheses, brackets and braces. Long lines can be broken over multiple lines by wrapping expressions in parentheses. These should be used in preference to using a backslash for line continuation. E502: aaa = [123, \\n 123] E502: aaa = ("bbb " \\n "ccc") Okay: aaa = [123,\n 123] Okay: aaa = ("bbb "\n "ccc") Okay: aaa = "bbb " \\n "ccc" Okay: aaa = 123 # \\ """ prev_start = prev_end = parens = 0 comment = False backslash = None for token_type, text, start, end, line in tokens: if token_type == tokenize.COMMENT: comment = True if start[0] != prev_start and parens and backslash and not comment: yield backslash, "E502 the backslash is redundant between brackets" if end[0] != prev_end: if line.rstrip('\r\n').endswith('\\'): backslash = (end[0], len(line.splitlines()[-1]) - 1) else: backslash = None prev_start = prev_end = end[0] else: prev_start = start[0] if token_type == tokenize.OP: if text in '([{': parens += 1 elif text in ')]}': parens -= 1 # The % character is strictly speaking a binary operator, but the # common usage seems to be to put it next to the format parameters, # after a line break. _SYMBOLIC_OPS = frozenset("()[]{},:.;@=%~") | frozenset(("...",)) def _is_binary_operator(token_type, text): return ( token_type == tokenize.OP or text in {'and', 'or'} ) and ( text not in _SYMBOLIC_OPS ) def _break_around_binary_operators(tokens): """Private function to reduce duplication. This factors out the shared details between :func:`break_before_binary_operator` and :func:`break_after_binary_operator`. """ line_break = False unary_context = True # Previous non-newline token types and text previous_token_type = None previous_text = None for token_type, text, start, end, line in tokens: if token_type == tokenize.COMMENT: continue if ('\n' in text or '\r' in text) and token_type != tokenize.STRING: line_break = True else: yield (token_type, text, previous_token_type, previous_text, line_break, unary_context, start) unary_context = text in '([{,;' line_break = False previous_token_type = token_type previous_text = text @register_check def break_before_binary_operator(logical_line, tokens): r""" Avoid breaks before binary operators. The preferred place to break around a binary operator is after the operator, not before it. W503: (width == 0\n + height == 0) W503: (width == 0\n and height == 0) W503: var = (1\n & ~2) W503: var = (1\n / -2) W503: var = (1\n + -1\n + -2) Okay: foo(\n -x) Okay: foo(x\n []) Okay: x = '''\n''' + '' Okay: foo(x,\n -y) Okay: foo(x, # comment\n -y) """ for context in _break_around_binary_operators(tokens): (token_type, text, previous_token_type, previous_text, line_break, unary_context, start) = context if (_is_binary_operator(token_type, text) and line_break and not unary_context and not _is_binary_operator(previous_token_type, previous_text)): yield start, "W503 line break before binary operator" @register_check def break_after_binary_operator(logical_line, tokens): r""" Avoid breaks after binary operators. The preferred place to break around a binary operator is before the operator, not after it. W504: (width == 0 +\n height == 0) W504: (width == 0 and\n height == 0) W504: var = (1 &\n ~2) Okay: foo(\n -x) Okay: foo(x\n []) Okay: x = '''\n''' + '' Okay: x = '' + '''\n''' Okay: foo(x,\n -y) Okay: foo(x, # comment\n -y) The following should be W504 but unary_context is tricky with these Okay: var = (1 /\n -2) Okay: var = (1 +\n -1 +\n -2) """ prev_start = None for context in _break_around_binary_operators(tokens): (token_type, text, previous_token_type, previous_text, line_break, unary_context, start) = context if (_is_binary_operator(previous_token_type, previous_text) and line_break and not unary_context and not _is_binary_operator(token_type, text)): yield prev_start, "W504 line break after binary operator" prev_start = start @register_check def comparison_to_singleton(logical_line, noqa): r"""Comparison to singletons should use "is" or "is not". Comparisons to singletons like None should always be done with "is" or "is not", never the equality operators. Okay: if arg is not None: E711: if arg != None: E711: if None == arg: E712: if arg == True: E712: if False == arg: Also, beware of writing if x when you really mean if x is not None -- e.g. when testing whether a variable or argument that defaults to None was set to some other value. The other value might have a type (such as a container) that could be false in a boolean context! """ if noqa: return for match in COMPARE_SINGLETON_REGEX.finditer(logical_line): singleton = match.group(1) or match.group(3) same = (match.group(2) == '==') msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton) if singleton in ('None',): code = 'E711' else: code = 'E712' nonzero = ((singleton == 'True' and same) or (singleton == 'False' and not same)) msg += " or 'if %scond:'" % ('' if nonzero else 'not ') yield match.start(2), ("%s comparison to %s should be %s" % (code, singleton, msg)) @register_check def comparison_negative(logical_line): r"""Negative comparison should be done using "not in" and "is not". Okay: if x not in y:\n pass Okay: assert (X in Y or X is Z) Okay: if not (X in Y):\n pass Okay: zz = x is not y E713: Z = not X in Y E713: if not X.B in Y:\n pass E714: if not X is Y:\n pass E714: Z = not X.B is Y """ match = COMPARE_NEGATIVE_REGEX.search(logical_line) if match: pos = match.start(1) if match.group(2) == 'in': yield pos, "E713 test for membership should be 'not in'" else: yield pos, "E714 test for object identity should be 'is not'" @register_check def comparison_type(logical_line, noqa): r"""Object type comparisons should always use isinstance(). Do not compare types directly. Okay: if isinstance(obj, int): E721: if type(obj) is type(1): """ match = COMPARE_TYPE_REGEX.search(logical_line) if match and not noqa: inst = match.group(1) if inst and inst.isidentifier() and inst not in SINGLETONS: return # Allow comparison for types which are not obvious yield match.start(), "E721 do not compare types, use 'isinstance()'" @register_check def bare_except(logical_line, noqa): r"""When catching exceptions, mention specific exceptions when possible. Okay: except Exception: Okay: except BaseException: E722: except: """ if noqa: return match = BLANK_EXCEPT_REGEX.match(logical_line) if match: yield match.start(), "E722 do not use bare 'except'" @register_check def ambiguous_identifier(logical_line, tokens): r"""Never use the characters 'l', 'O', or 'I' as variable names. In some fonts, these characters are indistinguishable from the numerals one and zero. When tempted to use 'l', use 'L' instead. Okay: L = 0 Okay: o = 123 Okay: i = 42 E741: l = 0 E741: O = 123 E741: I = 42 Variables can be bound in several other contexts, including class and function definitions, 'global' and 'nonlocal' statements, exception handlers, and 'with' and 'for' statements. In addition, we have a special handling for function parameters. Okay: except AttributeError as o: Okay: with lock as L: Okay: foo(l=12) Okay: for a in foo(l=12): E741: except AttributeError as O: E741: with lock as l: E741: global I E741: nonlocal l E741: def foo(l): E741: def foo(l=12): E741: l = foo(l=12) E741: for l in range(10): E742: class I(object): E743: def l(x): """ is_func_def = False # Set to true if 'def' is found parameter_parentheses_level = 0 idents_to_avoid = ('l', 'O', 'I') prev_type, prev_text, prev_start, prev_end, __ = tokens[0] for token_type, text, start, end, line in tokens[1:]: ident = pos = None # find function definitions if prev_text == 'def': is_func_def = True # update parameter parentheses level if parameter_parentheses_level == 0 and \ prev_type == tokenize.NAME and \ token_type == tokenize.OP and text == '(': parameter_parentheses_level = 1 elif parameter_parentheses_level > 0 and \ token_type == tokenize.OP: if text == '(': parameter_parentheses_level += 1 elif text == ')': parameter_parentheses_level -= 1 # identifiers on the lhs of an assignment operator if token_type == tokenize.OP and '=' in text and \ parameter_parentheses_level == 0: if prev_text in idents_to_avoid: ident = prev_text pos = prev_start # identifiers bound to values with 'as', 'for', # 'global', or 'nonlocal' if prev_text in ('as', 'for', 'global', 'nonlocal'): if text in idents_to_avoid: ident = text pos = start # function parameter definitions if is_func_def: if text in idents_to_avoid: ident = text pos = start if prev_text == 'class': if text in idents_to_avoid: yield start, "E742 ambiguous class definition '%s'" % text if prev_text == 'def': if text in idents_to_avoid: yield start, "E743 ambiguous function definition '%s'" % text if ident: yield pos, "E741 ambiguous variable name '%s'" % ident prev_type = token_type prev_text = text prev_start = start @register_check def python_3000_has_key(logical_line, noqa): r"""The {}.has_key() method is removed in Python 3: use the 'in' operator. Okay: if "alph" in d:\n print d["alph"] W601: assert d.has_key('alph') """ pos = logical_line.find('.has_key(') if pos > -1 and not noqa: yield pos, "W601 .has_key() is deprecated, use 'in'" @register_check def python_3000_raise_comma(logical_line): r"""When raising an exception, use "raise ValueError('message')". The older form is removed in Python 3. Okay: raise DummyError("Message") W602: raise DummyError, "Message" """ match = RAISE_COMMA_REGEX.match(logical_line) if match and not RERAISE_COMMA_REGEX.match(logical_line): yield match.end() - 1, "W602 deprecated form of raising exception" @register_check def python_3000_not_equal(logical_line): r"""New code should always use != instead of <>. The older syntax is removed in Python 3. Okay: if a != 'no': W603: if a <> 'no': """ pos = logical_line.find('<>') if pos > -1: yield pos, "W603 '<>' is deprecated, use '!='" @register_check def python_3000_backticks(logical_line): r"""Use repr() instead of backticks in Python 3. Okay: val = repr(1 + 2) W604: val = `1 + 2` """ pos = logical_line.find('`') if pos > -1: yield pos, "W604 backticks are deprecated, use 'repr()'" @register_check def python_3000_invalid_escape_sequence(logical_line, tokens, noqa): r"""Invalid escape sequences are deprecated in Python 3.6. Okay: regex = r'\.png$' W605: regex = '\.png$' """ if noqa: return # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals valid = [ '\n', '\\', '\'', '"', 'a', 'b', 'f', 'n', 'r', 't', 'v', '0', '1', '2', '3', '4', '5', '6', '7', 'x', # Escape sequences only recognized in string literals 'N', 'u', 'U', ] for token_type, text, start, end, line in tokens: if token_type == tokenize.STRING: start_line, start_col = start quote = text[-3:] if text[-3:] in ('"""', "'''") else text[-1] # Extract string modifiers (e.g. u or r) quote_pos = text.index(quote) prefix = text[:quote_pos].lower() start = quote_pos + len(quote) string = text[start:-len(quote)] if 'r' not in prefix: pos = string.find('\\') while pos >= 0: pos += 1 if string[pos] not in valid: line = start_line + string.count('\n', 0, pos) if line == start_line: col = start_col + len(prefix) + len(quote) + pos else: col = pos - string.rfind('\n', 0, pos) - 1 yield ( (line, col - 1), "W605 invalid escape sequence '\\%s'" % string[pos], ) pos = string.find('\\', pos + 1) @register_check def python_3000_async_await_keywords(logical_line, tokens): """'async' and 'await' are reserved keywords starting at Python 3.7. W606: async = 42 W606: await = 42 Okay: async def read(db):\n data = await db.fetch('SELECT ...') """ # The Python tokenize library before Python 3.5 recognizes # async/await as a NAME token. Therefore, use a state machine to # look for the possible async/await constructs as defined by the # Python grammar: # https://docs.python.org/3/reference/grammar.html state = None for token_type, text, start, end, line in tokens: error = False if token_type == tokenize.NL: continue if state is None: if token_type == tokenize.NAME: if text == 'async': state = ('async_stmt', start) elif text == 'await': state = ('await', start) elif (token_type == tokenize.NAME and text in ('def', 'for')): state = ('define', start) elif state[0] == 'async_stmt': if token_type == tokenize.NAME and text in ('def', 'with', 'for'): # One of funcdef, with_stmt, or for_stmt. Return to # looking for async/await names. state = None else: error = True elif state[0] == 'await': if token_type == tokenize.NAME: # An await expression. Return to looking for async/await # names. state = None elif token_type == tokenize.OP and text == '(': state = None else: error = True elif state[0] == 'define': if token_type == tokenize.NAME and text in ('async', 'await'): error = True else: state = None if error: yield ( state[1], "W606 'async' and 'await' are reserved keywords starting with " "Python 3.7", ) state = None # Last token if state is not None: yield ( state[1], "W606 'async' and 'await' are reserved keywords starting with " "Python 3.7", ) ######################################################################## @register_check def maximum_doc_length(logical_line, max_doc_length, noqa, tokens): r"""Limit all doc lines to a maximum of 72 characters. For flowing long blocks of text (docstrings or comments), limiting the length to 72 characters is recommended. Reports warning W505 """ if max_doc_length is None or noqa: return prev_token = None skip_lines = set() # Skip lines that for token_type, text, start, end, line in tokens: if token_type not in SKIP_COMMENTS.union([tokenize.STRING]): skip_lines.add(line) for token_type, text, start, end, line in tokens: # Skip lines that aren't pure strings if token_type == tokenize.STRING and skip_lines: continue if token_type in (tokenize.STRING, tokenize.COMMENT): # Only check comment-only lines if prev_token is None or prev_token in SKIP_TOKENS: lines = line.splitlines() for line_num, physical_line in enumerate(lines): if start[0] + line_num == 1 and line.startswith('#!'): return length = len(physical_line) chunks = physical_line.split() if token_type == tokenize.COMMENT: if (len(chunks) == 2 and length - len(chunks[-1]) < MAX_DOC_LENGTH): continue if len(chunks) == 1 and line_num + 1 < len(lines): if (len(chunks) == 1 and length - len(chunks[-1]) < MAX_DOC_LENGTH): continue if length > max_doc_length: doc_error = (start[0] + line_num, max_doc_length) yield (doc_error, "W505 doc line too long " "(%d > %d characters)" % (length, max_doc_length)) prev_token = token_type ######################################################################## # Helper functions ######################################################################## def readlines(filename): """Read the source code.""" try: with tokenize.open(filename) as f: return f.readlines() except (LookupError, SyntaxError, UnicodeError): # Fall back if file encoding is improperly declared with open(filename, encoding='latin-1') as f: return f.readlines() def stdin_get_value(): """Read the value from stdin.""" return TextIOWrapper(sys.stdin.buffer, errors='ignore').read() noqa = lru_cache(512)(re.compile(r'# no(?:qa|pep8)\b', re.I).search) def expand_indent(line): r"""Return the amount of indentation. Tabs are expanded to the next multiple of 8. >>> expand_indent(' ') 4 >>> expand_indent('\t') 8 >>> expand_indent(' \t') 8 >>> expand_indent(' \t') 16 """ line = line.rstrip('\n\r') if '\t' not in line: return len(line) - len(line.lstrip()) result = 0 for char in line: if char == '\t': result = result // 8 * 8 + 8 elif char == ' ': result += 1 else: break return result def mute_string(text): """Replace contents with 'xxx' to prevent syntax matching. >>> mute_string('"abc"') '"xxx"' >>> mute_string("'''abc'''") "'''xxx'''" >>> mute_string("r'abc'") "r'xxx'" """ # String modifiers (e.g. u or r) start = text.index(text[-1]) + 1 end = len(text) - 1 # Triple quotes if text[-3:] in ('"""', "'''"): start += 2 end -= 2 return text[:start] + 'x' * (end - start) + text[end:] def parse_udiff(diff, patterns=None, parent='.'): """Return a dictionary of matching lines.""" # For each file of the diff, the entry key is the filename, # and the value is a set of row numbers to consider. rv = {} path = nrows = None for line in diff.splitlines(): if nrows: if line[:1] != '-': nrows -= 1 continue if line[:3] == '@@ ': hunk_match = HUNK_REGEX.match(line) (row, nrows) = (int(g or '1') for g in hunk_match.groups()) rv[path].update(range(row, row + nrows)) elif line[:3] == '+++': path = line[4:].split('\t', 1)[0] # Git diff will use (i)ndex, (w)ork tree, (c)ommit and # (o)bject instead of a/b/c/d as prefixes for patches if path[:2] in ('b/', 'w/', 'i/'): path = path[2:] rv[path] = set() return { os.path.join(parent, filepath): rows for (filepath, rows) in rv.items() if rows and filename_match(filepath, patterns) } def normalize_paths(value, parent=os.curdir): """Parse a comma-separated list of paths. Return a list of absolute paths. """ if not value: return [] if isinstance(value, list): return value paths = [] for path in value.split(','): path = path.strip() if '/' in path: path = os.path.abspath(os.path.join(parent, path)) paths.append(path.rstrip('/')) return paths def filename_match(filename, patterns, default=True): """Check if patterns contains a pattern that matches filename. If patterns is unspecified, this always returns True. """ if not patterns: return default return any(fnmatch(filename, pattern) for pattern in patterns) def update_counts(s, counts): r"""Adds one to the counts of each appearance of characters in s, for characters in counts""" for char in s: if char in counts: counts[char] += 1 def _is_eol_token(token): return token[0] in NEWLINE or token[4][token[3][1]:].lstrip() == '\\\n' ######################################################################## # Framework to run all checks ######################################################################## class Checker: """Load a Python source file, tokenize it, check coding style.""" def __init__(self, filename=None, lines=None, options=None, report=None, **kwargs): if options is None: options = StyleGuide(kwargs).options else: assert not kwargs self._io_error = None self._physical_checks = options.physical_checks self._logical_checks = options.logical_checks self._ast_checks = options.ast_checks self.max_line_length = options.max_line_length self.max_doc_length = options.max_doc_length self.indent_size = options.indent_size self.multiline = False # in a multiline string? self.hang_closing = options.hang_closing self.indent_size = options.indent_size self.verbose = options.verbose self.filename = filename # Dictionary where a checker can store its custom state. self._checker_states = {} if filename is None: self.filename = 'stdin' self.lines = lines or [] elif filename == '-': self.filename = 'stdin' self.lines = stdin_get_value().splitlines(True) elif lines is None: try: self.lines = readlines(filename) except OSError: (exc_type, exc) = sys.exc_info()[:2] self._io_error = f'{exc_type.__name__}: {exc}' self.lines = [] else: self.lines = lines if self.lines: ord0 = ord(self.lines[0][0]) if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM if ord0 == 0xfeff: self.lines[0] = self.lines[0][1:] elif self.lines[0][:3] == '\xef\xbb\xbf': self.lines[0] = self.lines[0][3:] self.report = report or options.report self.report_error = self.report.error self.noqa = False def report_invalid_syntax(self): """Check if the syntax is valid.""" (exc_type, exc) = sys.exc_info()[:2] if len(exc.args) > 1: offset = exc.args[1] if len(offset) > 2: offset = offset[1:3] else: offset = (1, 0) self.report_error(offset[0], offset[1] or 0, f'E901 {exc_type.__name__}: {exc.args[0]}', self.report_invalid_syntax) def readline(self): """Get the next line from the input buffer.""" if self.line_number >= self.total_lines: return '' line = self.lines[self.line_number] self.line_number += 1 if self.indent_char is None and line[:1] in WHITESPACE: self.indent_char = line[0] return line def run_check(self, check, argument_names): """Run a check plugin.""" arguments = [] for name in argument_names: arguments.append(getattr(self, name)) return check(*arguments) def init_checker_state(self, name, argument_names): """Prepare custom state for the specific checker plugin.""" if 'checker_state' in argument_names: self.checker_state = self._checker_states.setdefault(name, {}) def check_physical(self, line): """Run all physical checks on a raw input line.""" self.physical_line = line for name, check, argument_names in self._physical_checks: self.init_checker_state(name, argument_names) result = self.run_check(check, argument_names) if result is not None: (offset, text) = result self.report_error(self.line_number, offset, text, check) if text[:4] == 'E101': self.indent_char = line[0] def build_tokens_line(self): """Build a logical line from tokens.""" logical = [] comments = [] length = 0 prev_row = prev_col = mapping = None for token_type, text, start, end, line in self.tokens: if token_type in SKIP_TOKENS: continue if not mapping: mapping = [(0, start)] if token_type == tokenize.COMMENT: comments.append(text) continue if token_type == tokenize.STRING: text = mute_string(text) if prev_row: (start_row, start_col) = start if prev_row != start_row: # different row prev_text = self.lines[prev_row - 1][prev_col - 1] if prev_text == ',' or (prev_text not in '{[(' and text not in '}])'): text = ' ' + text elif prev_col != start_col: # different column text = line[prev_col:start_col] + text logical.append(text) length += len(text) mapping.append((length, end)) (prev_row, prev_col) = end self.logical_line = ''.join(logical) self.noqa = comments and noqa(''.join(comments)) return mapping def check_logical(self): """Build a line from tokens and run all logical checks on it.""" self.report.increment_logical_line() mapping = self.build_tokens_line() if not mapping: return mapping_offsets = [offset for offset, _ in mapping] (start_row, start_col) = mapping[0][1] start_line = self.lines[start_row - 1] self.indent_level = expand_indent(start_line[:start_col]) if self.blank_before < self.blank_lines: self.blank_before = self.blank_lines if self.verbose >= 2: print(self.logical_line[:80].rstrip()) for name, check, argument_names in self._logical_checks: if self.verbose >= 4: print(' ' + name) self.init_checker_state(name, argument_names) for offset, text in self.run_check(check, argument_names) or (): if not isinstance(offset, tuple): # As mappings are ordered, bisecting is a fast way # to find a given offset in them. token_offset, pos = mapping[bisect.bisect_left( mapping_offsets, offset)] offset = (pos[0], pos[1] + offset - token_offset) self.report_error(offset[0], offset[1], text, check) if self.logical_line: self.previous_indent_level = self.indent_level self.previous_logical = self.logical_line if not self.indent_level: self.previous_unindented_logical_line = self.logical_line self.blank_lines = 0 self.tokens = [] def check_ast(self): """Build the file's AST and run all AST checks.""" try: tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST) except (ValueError, SyntaxError, TypeError): return self.report_invalid_syntax() for name, cls, __ in self._ast_checks: checker = cls(tree, self.filename) for lineno, offset, text, check in checker.run(): if not self.lines or not noqa(self.lines[lineno - 1]): self.report_error(lineno, offset, text, check) def generate_tokens(self): """Tokenize file, run physical line checks and yield tokens.""" if self._io_error: self.report_error(1, 0, 'E902 %s' % self._io_error, readlines) tokengen = tokenize.generate_tokens(self.readline) try: prev_physical = '' for token in tokengen: if token[2][0] > self.total_lines: return self.noqa = token[4] and noqa(token[4]) self.maybe_check_physical(token, prev_physical) yield token prev_physical = token[4] except (SyntaxError, tokenize.TokenError): self.report_invalid_syntax() def maybe_check_physical(self, token, prev_physical): """If appropriate for token, check current physical line(s).""" # Called after every token, but act only on end of line. # a newline token ends a single physical line. if _is_eol_token(token): # if the file does not end with a newline, the NEWLINE # token is inserted by the parser, but it does not contain # the previous physical line in `token[4]` if token[4] == '': self.check_physical(prev_physical) else: self.check_physical(token[4]) elif token[0] == tokenize.STRING and '\n' in token[1]: # Less obviously, a string that contains newlines is a # multiline string, either triple-quoted or with internal # newlines backslash-escaped. Check every physical line in # the string *except* for the last one: its newline is # outside of the multiline string, so we consider it a # regular physical line, and will check it like any other # physical line. # # Subtleties: # - we don't *completely* ignore the last line; if it # contains the magical "# noqa" comment, we disable all # physical checks for the entire multiline string # - have to wind self.line_number back because initially it # points to the last line of the string, and we want # check_physical() to give accurate feedback if noqa(token[4]): return self.multiline = True self.line_number = token[2][0] _, src, (_, offset), _, _ = token src = self.lines[self.line_number - 1][:offset] + src for line in src.split('\n')[:-1]: self.check_physical(line + '\n') self.line_number += 1 self.multiline = False def check_all(self, expected=None, line_offset=0): """Run all checks on the input file.""" self.report.init_file(self.filename, self.lines, expected, line_offset) self.total_lines = len(self.lines) if self._ast_checks: self.check_ast() self.line_number = 0 self.indent_char = None self.indent_level = self.previous_indent_level = 0 self.previous_logical = '' self.previous_unindented_logical_line = '' self.tokens = [] self.blank_lines = self.blank_before = 0 parens = 0 for token in self.generate_tokens(): self.tokens.append(token) token_type, text = token[0:2] if self.verbose >= 3: if token[2][0] == token[3][0]: pos = '[{}:{}]'.format(token[2][1] or '', token[3][1]) else: pos = 'l.%s' % token[3][0] print('l.%s\t%s\t%s\t%r' % (token[2][0], pos, tokenize.tok_name[token[0]], text)) if token_type == tokenize.OP: if text in '([{': parens += 1 elif text in '}])': parens -= 1 elif not parens: if token_type in NEWLINE: if token_type == tokenize.NEWLINE: self.check_logical() self.blank_before = 0 elif len(self.tokens) == 1: # The physical line contains only this token. self.blank_lines += 1 del self.tokens[0] else: self.check_logical() if self.tokens: self.check_physical(self.lines[-1]) self.check_logical() return self.report.get_file_results() class BaseReport: """Collect the results of the checks.""" print_filename = False def __init__(self, options): self._benchmark_keys = options.benchmark_keys self._ignore_code = options.ignore_code # Results self.elapsed = 0 self.total_errors = 0 self.counters = dict.fromkeys(self._benchmark_keys, 0) self.messages = {} def start(self): """Start the timer.""" self._start_time = time.time() def stop(self): """Stop the timer.""" self.elapsed = time.time() - self._start_time def init_file(self, filename, lines, expected, line_offset): """Signal a new file.""" self.filename = filename self.lines = lines self.expected = expected or () self.line_offset = line_offset self.file_errors = 0 self.counters['files'] += 1 self.counters['physical lines'] += len(lines) def increment_logical_line(self): """Signal a new logical line.""" self.counters['logical lines'] += 1 def error(self, line_number, offset, text, check): """Report an error, according to options.""" code = text[:4] if self._ignore_code(code): return if code in self.counters: self.counters[code] += 1 else: self.counters[code] = 1 self.messages[code] = text[5:] # Don't care about expected errors or warnings if code in self.expected: return if self.print_filename and not self.file_errors: print(self.filename) self.file_errors += 1 self.total_errors += 1 return code def get_file_results(self): """Return the count of errors and warnings for this file.""" return self.file_errors def get_count(self, prefix=''): """Return the total count of errors and warnings.""" return sum(self.counters[key] for key in self.messages if key.startswith(prefix)) def get_statistics(self, prefix=''): """Get statistics for message codes that start with the prefix. prefix='' matches all errors and warnings prefix='E' matches all errors prefix='W' matches all warnings prefix='E4' matches all errors that have to do with imports """ return ['%-7s %s %s' % (self.counters[key], key, self.messages[key]) for key in sorted(self.messages) if key.startswith(prefix)] def print_statistics(self, prefix=''): """Print overall statistics (number of errors and warnings).""" for line in self.get_statistics(prefix): print(line) def print_benchmark(self): """Print benchmark numbers.""" print('{:<7.2f} {}'.format(self.elapsed, 'seconds elapsed')) if self.elapsed: for key in self._benchmark_keys: print('%-7d %s per second (%d total)' % (self.counters[key] / self.elapsed, key, self.counters[key])) class FileReport(BaseReport): """Collect the results of the checks and print the filenames.""" print_filename = True class StandardReport(BaseReport): """Collect and print the results of the checks.""" def __init__(self, options): super().__init__(options) self._fmt = REPORT_FORMAT.get(options.format.lower(), options.format) self._repeat = options.repeat self._show_source = options.show_source self._show_pep8 = options.show_pep8 def init_file(self, filename, lines, expected, line_offset): """Signal a new file.""" self._deferred_print = [] return super().init_file( filename, lines, expected, line_offset) def error(self, line_number, offset, text, check): """Report an error, according to options.""" code = super().error(line_number, offset, text, check) if code and (self.counters[code] == 1 or self._repeat): self._deferred_print.append( (line_number, offset, code, text[5:], check.__doc__)) return code def get_file_results(self): """Print results and return the overall count for this file.""" self._deferred_print.sort() for line_number, offset, code, text, doc in self._deferred_print: print(self._fmt % { 'path': self.filename, 'row': self.line_offset + line_number, 'col': offset + 1, 'code': code, 'text': text, }) if self._show_source: if line_number > len(self.lines): line = '' else: line = self.lines[line_number - 1] print(line.rstrip()) print(re.sub(r'\S', ' ', line[:offset]) + '^') if self._show_pep8 and doc: print(' ' + doc.strip()) # stdout is block buffered when not stdout.isatty(). # line can be broken where buffer boundary since other # processes write to same file. # flush() after print() to avoid buffer boundary. # Typical buffer size is 8192. line written safely when # len(line) < 8192. sys.stdout.flush() return self.file_errors class DiffReport(StandardReport): """Collect and print the results for the changed lines only.""" def __init__(self, options): super().__init__(options) self._selected = options.selected_lines def error(self, line_number, offset, text, check): if line_number not in self._selected[self.filename]: return return super().error(line_number, offset, text, check) class StyleGuide: """Initialize a PEP-8 instance with few options.""" def __init__(self, *args, **kwargs): # build options from the command line self.checker_class = kwargs.pop('checker_class', Checker) parse_argv = kwargs.pop('parse_argv', False) config_file = kwargs.pop('config_file', False) parser = kwargs.pop('parser', None) # build options from dict options_dict = dict(*args, **kwargs) arglist = None if parse_argv else options_dict.get('paths', None) verbose = options_dict.get('verbose', None) options, self.paths = process_options( arglist, parse_argv, config_file, parser, verbose) if options_dict: options.__dict__.update(options_dict) if 'paths' in options_dict: self.paths = options_dict['paths'] self.runner = self.input_file self.options = options if not options.reporter: options.reporter = BaseReport if options.quiet else StandardReport options.select = tuple(options.select or ()) if not (options.select or options.ignore or options.testsuite or options.doctest) and DEFAULT_IGNORE: # The default choice: ignore controversial checks options.ignore = tuple(DEFAULT_IGNORE.split(',')) else: # Ignore all checks which are not explicitly selected options.ignore = ('',) if options.select else tuple(options.ignore) options.benchmark_keys = BENCHMARK_KEYS[:] options.ignore_code = self.ignore_code options.physical_checks = self.get_checks('physical_line') options.logical_checks = self.get_checks('logical_line') options.ast_checks = self.get_checks('tree') self.init_report() def init_report(self, reporter=None): """Initialize the report instance.""" self.options.report = (reporter or self.options.reporter)(self.options) return self.options.report def check_files(self, paths=None): """Run all checks on the paths.""" if paths is None: paths = self.paths report = self.options.report runner = self.runner report.start() try: for path in paths: if os.path.isdir(path): self.input_dir(path) elif not self.excluded(path): runner(path) except KeyboardInterrupt: print('... stopped') report.stop() return report def input_file(self, filename, lines=None, expected=None, line_offset=0): """Run all checks on a Python source file.""" if self.options.verbose: print('checking %s' % filename) fchecker = self.checker_class( filename, lines=lines, options=self.options) return fchecker.check_all(expected=expected, line_offset=line_offset) def input_dir(self, dirname): """Check all files in this directory and all subdirectories.""" dirname = dirname.rstrip('/') if self.excluded(dirname): return 0 counters = self.options.report.counters verbose = self.options.verbose filepatterns = self.options.filename runner = self.runner for root, dirs, files in os.walk(dirname): if verbose: print('directory ' + root) counters['directories'] += 1 for subdir in sorted(dirs): if self.excluded(subdir, root): dirs.remove(subdir) for filename in sorted(files): # contain a pattern that matches? if ( filename_match(filename, filepatterns) and not self.excluded(filename, root) ): runner(os.path.join(root, filename)) def excluded(self, filename, parent=None): """Check if the file should be excluded. Check if 'options.exclude' contains a pattern matching filename. """ if not self.options.exclude: return False basename = os.path.basename(filename) if filename_match(basename, self.options.exclude): return True if parent: filename = os.path.join(parent, filename) filename = os.path.abspath(filename) return filename_match(filename, self.options.exclude) def ignore_code(self, code): """Check if the error code should be ignored. If 'options.select' contains a prefix of the error code, return False. Else, if 'options.ignore' contains a prefix of the error code, return True. """ if len(code) < 4 and any(s.startswith(code) for s in self.options.select): return False return (code.startswith(self.options.ignore) and not code.startswith(self.options.select)) def get_checks(self, argument_name): """Get all the checks for this category. Find all globally visible functions where the first argument name starts with argument_name and which contain selected tests. """ checks = [] for check, attrs in _checks[argument_name].items(): (codes, args) = attrs if any(not (code and self.ignore_code(code)) for code in codes): checks.append((check.__name__, check, args)) return sorted(checks) def get_parser(prog='pycodestyle', version=__version__): """Create the parser for the program.""" parser = OptionParser(prog=prog, version=version, usage="%prog [options] input ...") parser.config_options = [ 'exclude', 'filename', 'select', 'ignore', 'max-line-length', 'max-doc-length', 'indent-size', 'hang-closing', 'count', 'format', 'quiet', 'show-pep8', 'show-source', 'statistics', 'verbose'] parser.add_option('-v', '--verbose', default=0, action='count', help="print status messages, or debug with -vv") parser.add_option('-q', '--quiet', default=0, action='count', help="report only file names, or nothing with -qq") parser.add_option('-r', '--repeat', default=True, action='store_true', help="(obsolete) show all occurrences of the same error") parser.add_option('--first', action='store_false', dest='repeat', help="show first occurrence of each error") parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE, help="exclude files or directories which match these " "comma separated patterns (default: %default)") parser.add_option('--filename', metavar='patterns', default='*.py', help="when parsing directories, only check filenames " "matching these comma separated patterns " "(default: %default)") parser.add_option('--select', metavar='errors', default='', help="select errors and warnings (e.g. E,W6)") parser.add_option('--ignore', metavar='errors', default='', help="skip errors and warnings (e.g. E4,W) " "(default: %s)" % DEFAULT_IGNORE) parser.add_option('--show-source', action='store_true', help="show source code for each error") parser.add_option('--show-pep8', action='store_true', help="show text of PEP 8 for each error " "(implies --first)") parser.add_option('--statistics', action='store_true', help="count errors and warnings") parser.add_option('--count', action='store_true', help="print total number of errors and warnings " "to standard error and set exit code to 1 if " "total is not null") parser.add_option('--max-line-length', type='int', metavar='n', default=MAX_LINE_LENGTH, help="set maximum allowed line length " "(default: %default)") parser.add_option('--max-doc-length', type='int', metavar='n', default=None, help="set maximum allowed doc line length and perform " "these checks (unchecked if not set)") parser.add_option('--indent-size', type='int', metavar='n', default=INDENT_SIZE, help="set how many spaces make up an indent " "(default: %default)") parser.add_option('--hang-closing', action='store_true', help="hang closing bracket instead of matching " "indentation of opening bracket's line") parser.add_option('--format', metavar='format', default='default', help="set the error format [default|pylint|<custom>]") parser.add_option('--diff', action='store_true', help="report changes only within line number ranges in " "the unified diff received on STDIN") group = parser.add_option_group("Testing Options") if os.path.exists(TESTSUITE_PATH): group.add_option('--testsuite', metavar='dir', help="run regression tests from dir") group.add_option('--doctest', action='store_true', help="run doctest on myself") group.add_option('--benchmark', action='store_true', help="measure processing speed") return parser def read_config(options, args, arglist, parser): """Read and parse configurations. If a config file is specified on the command line with the "--config" option, then only it is used for configuration. Otherwise, the user configuration (~/.config/pycodestyle) and any local configurations in the current directory or above will be merged together (in that order) using the read method of ConfigParser. """ config = RawConfigParser() cli_conf = options.config local_dir = os.curdir if USER_CONFIG and os.path.isfile(USER_CONFIG): if options.verbose: print('user configuration: %s' % USER_CONFIG) config.read(USER_CONFIG) parent = tail = args and os.path.abspath(os.path.commonprefix(args)) while tail: if config.read(os.path.join(parent, fn) for fn in PROJECT_CONFIG): local_dir = parent if options.verbose: print('local configuration: in %s' % parent) break (parent, tail) = os.path.split(parent) if cli_conf and os.path.isfile(cli_conf): if options.verbose: print('cli configuration: %s' % cli_conf) config.read(cli_conf) pycodestyle_section = None if config.has_section(parser.prog): pycodestyle_section = parser.prog elif config.has_section('pep8'): pycodestyle_section = 'pep8' # Deprecated warnings.warn('[pep8] section is deprecated. Use [pycodestyle].') if pycodestyle_section: option_list = {o.dest: o.type or o.action for o in parser.option_list} # First, read the default values (new_options, __) = parser.parse_args([]) # Second, parse the configuration for opt in config.options(pycodestyle_section): if opt.replace('_', '-') not in parser.config_options: print(" unknown option '%s' ignored" % opt) continue if options.verbose > 1: print(" {} = {}".format(opt, config.get(pycodestyle_section, opt))) normalized_opt = opt.replace('-', '_') opt_type = option_list[normalized_opt] if opt_type in ('int', 'count'): value = config.getint(pycodestyle_section, opt) elif opt_type in ('store_true', 'store_false'): value = config.getboolean(pycodestyle_section, opt) else: value = config.get(pycodestyle_section, opt) if normalized_opt == 'exclude': value = normalize_paths(value, local_dir) setattr(new_options, normalized_opt, value) # Third, overwrite with the command-line options (options, __) = parser.parse_args(arglist, values=new_options) options.doctest = options.testsuite = False return options def process_options(arglist=None, parse_argv=False, config_file=None, parser=None, verbose=None): """Process options passed either via arglist or command line args. Passing in the ``config_file`` parameter allows other tools, such as flake8 to specify their own options to be processed in pycodestyle. """ if not parser: parser = get_parser() if not parser.has_option('--config'): group = parser.add_option_group("Configuration", description=( "The project options are read from the [%s] section of the " "tox.ini file or the setup.cfg file located in any parent folder " "of the path(s) being processed. Allowed options are: %s." % (parser.prog, ', '.join(parser.config_options)))) group.add_option('--config', metavar='path', default=config_file, help="user config file location") # Don't read the command line if the module is used as a library. if not arglist and not parse_argv: arglist = [] # If parse_argv is True and arglist is None, arguments are # parsed from the command line (sys.argv) (options, args) = parser.parse_args(arglist) options.reporter = None # If explicitly specified verbosity, override any `-v` CLI flag if verbose is not None: options.verbose = verbose if options.ensure_value('testsuite', False): args.append(options.testsuite) elif not options.ensure_value('doctest', False): if parse_argv and not args: if options.diff or any(os.path.exists(name) for name in PROJECT_CONFIG): args = ['.'] else: parser.error('input not specified') options = read_config(options, args, arglist, parser) options.reporter = parse_argv and options.quiet == 1 and FileReport options.filename = _parse_multi_options(options.filename) options.exclude = normalize_paths(options.exclude) options.select = _parse_multi_options(options.select) options.ignore = _parse_multi_options(options.ignore) if options.diff: options.reporter = DiffReport stdin = stdin_get_value() options.selected_lines = parse_udiff(stdin, options.filename, args[0]) args = sorted(options.selected_lines) return options, args def _parse_multi_options(options, split_token=','): r"""Split and strip and discard empties. Turns the following: A, B, into ["A", "B"] """ if options: return [o.strip() for o in options.split(split_token) if o.strip()] else: return options def _main(): """Parse options and run checks on Python source.""" import signal # Handle "Broken pipe" gracefully try: signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1)) except AttributeError: pass # not supported on Windows style_guide = StyleGuide(parse_argv=True) options = style_guide.options if options.doctest or options.testsuite: from testsuite.support import run_tests report = run_tests(style_guide) else: report = style_guide.check_files() if options.statistics: report.print_statistics() if options.benchmark: report.print_benchmark() if options.testsuite and not options.quiet: report.print_results() if report.total_errors: if options.count: sys.stderr.write(str(report.total_errors) + '\n') sys.exit(1) if __name__ == '__main__': _main()
102,878
Python
.py
2,405
32.980873
89
0.57025
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,288
anaconda_pep8.py
DamnWidget_anaconda/anaconda_lib/linting/anaconda_pep8.py
# -*- coding: utf8 -*- # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details import os import pycodestyle as pep8 from linting import linter class Pep8Error(linter.LintError): """PEP-8 linting error class """ def __init__(self, filename, loc, offset, code, text, level='E'): ct_tuple = (code, text) err_str = '[{0}] PEP 8 (%s): %s'.format(level) super(Pep8Error, self).__init__( filename, loc, level, err_str, ct_tuple, offset=offset, text=text ) class Pep8Warning(linter.LintError): """PEP-8 lintng warning class """ def __init__(self, filename, loc, offset, code, text, level='W'): ct_tuple = (code, text) err_str = '[{0}] PEP 8 (%s): %s'.format(level) super(Pep8Warning, self).__init__( filename, loc, level, err_str, ct_tuple, offset=offset, text=text ) class Pep8Linter(linter.Linter): """Linter for pep8 Linter """ def lint(self, settings, code, filename): """Run the pep8 code checker with the given options """ errors = [] check_params = { 'ignore': settings.get('pep8_ignore', []), 'max_line_length': settings.get( 'pep8_max_line_length', pep8.MAX_LINE_LENGTH ), 'levels': settings.get('pep8_error_levels', { 'E': 'W', 'W': 'V', 'V': 'V' }) } errors.extend(self.check( code, filename, settings.get('pep8_rcfile'), **check_params )) return self.parse(errors) def check(self, code, filename, rcfile, ignore, max_line_length, levels): """Check the code with pyflakes to find errors """ messages = [] _lines = code.split('\n') if _lines: class AnacondaReport(pep8.BaseReport): """Helper class to report PEP8 problems """ def error(self, line_number, offset, text, check): """Report an error, according to options """ col = line_number code = text[:4] message = text[5:] if self._ignore_code(code): return if code in self.counters: self.counters[code] += 1 else: self.counters[code] = 1 self.messages[code] = message if code in self.expected: return self.file_errors += 1 self.total_errors += 1 pep8_error = code.startswith('E') klass = Pep8Error if pep8_error else Pep8Warning messages.append(klass( filename, col, offset, code, message, levels[code[0]] )) return code params = {'reporter': AnacondaReport} if not rcfile: _ignore = ignore params['ignore'] = _ignore else: params['config_file'] = os.path.expanduser(rcfile) options = pep8.StyleGuide(**params).options if not rcfile: options.max_line_length = max_line_length good_lines = [l + '\n' for l in _lines] good_lines[-1] = good_lines[-1].rstrip('\n') if not good_lines[-1]: good_lines = good_lines[:-1] pep8.Checker(filename, good_lines, options=options).check_all() return messages def parse(self, errors): errors_list = [] if errors is None: return errors_list self.sort_errors(errors) for error in errors: error_level = self.prepare_error_level(error) message = error.message.capitalize() offset = error.offset error_data = { 'underline_range': True, 'level': error_level, 'lineno': error.lineno, 'offset': offset, 'message': message, 'raw_error': str(error) } errors_list.append(error_data) return errors_list
4,366
Python
.py
110
26.727273
77
0.509479
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,289
anaconda_pylint.py
DamnWidget_anaconda/anaconda_lib/linting/anaconda_pylint.py
# -*- coding: utf8 -*- # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details """ Anaconda PyLint wrapper """ import os import sys import logging import subprocess if sys.version_info >= (3, 0): from io import StringIO else: try: from cStringIO import StringIO except ImportError: from StringIO import StringIO assert StringIO from pylint.__pkginfo__ import numversion from process import spawn PIPE = subprocess.PIPE class PyLinter(object): """PyLinter class for Anaconda""" def __init__(self, filename, rcfile): self.filename = filename self.rcfile = rcfile self.output = None self.execute() def execute(self): """Execute the linting process""" if numversion < (1, 0, 0): args = '--include-ids=y -r n'.split(' ') else: args = '--msg-template={msg_id}:{line}:{column}:{msg} -r n'.split( ' ' ) if self.rcfile: args.append('--rcfile={0}'.format(os.path.expanduser(self.rcfile))) args.append(self.filename) pylint_exec = 'pylint.lint' if numversion > (2, 4, 4) else 'pylint' args = [sys.executable, '-m', pylint_exec] + args proc = spawn(args, stdout=PIPE, stderr=PIPE, cwd=os.getcwd()) if proc is None: return {'E': [], 'W': [], 'V': []} self.output, err = proc.communicate() if sys.version_info >= (3, 0): self.output = self.output.decode('utf8') err = err.decode('utf8') if err: print('got an error from pylint: {0}'.format(err)) return {'E': [], 'W': [], 'V': []} def parse_errors(self): """Parse the output given by PyLint""" errors = {'E': [], 'W': [], 'V': []} data = self.output for error in data.splitlines(): if ( numversion > (2, 4, 4) and not error.startswith('-') or error.startswith('Your code has been rated at') ): continue if '************* Module ' in error: _, module = error.split('************* Module ') if module not in self.filename: continue else: offset = None try: if numversion >= (1, 0, 0): code, line, offset, message = error.split(':', 3) else: code, line, message = error.split(':', 2) except ValueError as exception: logging.debug( 'unhandled exception in PyLinter parse_errors ' 'this is a non fatal error: {0}'.format(exception) ) logging.debug( 'the error string that raised this exception was: ' '{0}, please, report this in the GitHub site'.format( error ) ) continue if numversion < (1, 0, 0): try: line, offset = line.split(',') except ValueError: # seems like some versions (or packagers) of pylint # prior to 1.0.0 adds offset to the output but others # doesn't pass errors[self._map_code(code)[0]].append( { 'line': int(line), 'offset': offset, 'code': self._map_code(code)[1], 'message': '[{0}] {1}'.format( self._map_code(code)[1], message ), } ) return errors def _map_code(self, code): """Map the given code to fit Anaconda codes""" mapping = {'C': 'V', 'E': 'E', 'F': 'E', 'I': 'V', 'R': 'W', 'W': 'W'} return (mapping[code[0]], code[1:])
4,202
Python
.py
108
25.453704
79
0.468143
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,290
__init__.py
DamnWidget_anaconda/anaconda_lib/linting/__init__.py
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details
128
Python
.py
2
62.5
65
0.776
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,291
anaconda_mypy.py
DamnWidget_anaconda/anaconda_lib/linting/anaconda_mypy.py
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details """ Anaconda MyPy wrapper """ import os import sys import shlex import logging import subprocess from subprocess import PIPE, Popen def parse_mypy_version(): try: from mypy import main as mypy version = mypy.__version__ if "dev" in version: # Handle when Mypy is installed directly from github: # eg: 0.730+dev.ddec163790d107f1fd9982f19cbfa0b6966c2eea version = version.split("+dev.")[0] # Handle old style Mypy version: 0.480-dev version = version.replace('-dev', '') tuple_version = tuple(int(i) for i in version.split('.')) while len(tuple_version) < 3: tuple_version += (0,) return tuple_version except ImportError: print('MyPy is enabled but we could not import it') logging.info('MyPy is enabled but we could not import it') return None class MyPy(object): """MyPy class for Anaconda """ VERSION = parse_mypy_version() def __init__(self, code, filename, mypypath, settings): self.code = code self.filename = filename self.mypypath = mypypath self.settings = settings @property def silent(self): """Returns True if --silent-imports settig is present """ return '--silent-imports' in self.settings def execute(self): """Check the code with MyPy check types """ if MyPy.VERSION is None: raise RuntimeError("MyPy was not found") errors = [] try: errors = self.check_source() except Exception as error: print(error) logging.error(error) return errors def check_source(self): """Wrap calls to MyPy as a library """ err_sum = '--no-error-summary' if MyPy.VERSION < (0, 761, 0): err_sum = '' err_ctx = '--hide-error-context' if MyPy.VERSION < (0, 4, 5): err_ctx = '--suppress-error-context' dont_follow_imports = "--follow-imports silent" args = shlex.split('\'{0}\' -O -m mypy {1} {2} {3} {4} \'{5}\''.format( sys.executable, err_sum, err_ctx, dont_follow_imports, ' '.join(self.settings[:-1]), self.filename) ) env = os.environ.copy() if self.mypypath is not None and self.mypypath != "": env['MYPYPATH'] = self.mypypath kwargs = { 'cwd': os.path.dirname(os.path.abspath(__file__)), 'bufsize': -1, 'env': env } if os.name == 'nt': startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW kwargs['startupinfo'] = startupinfo proc = Popen(args, stdout=PIPE, stderr=PIPE, **kwargs) out, err = proc.communicate() if err is not None and len(err) > 0: if sys.version_info >= (3,): err = err.decode('utf8') raise RuntimeError(err) if sys.version_info >= (3,): out = out.decode('utf8') errors = [] for line in out.splitlines(): if (self.settings[-1] and not self.silent and 'stub' in line.lower()): continue data = line.split( ':', maxsplit=3 ) if os.name != 'nt' else line[2:].split(':', maxsplit=3) errors.append({ 'level': 'W', 'lineno': int(data[1]), 'offset': 0, 'code': ' ', 'raw_error': '[W] MyPy {0}: {1}'.format( data[2].strip(), data[3].strip() ), 'message': '[W] MyPy%s: %s', 'underline_range': True }) return errors
3,974
Python
.py
109
26.440367
79
0.54247
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,292
sublime.py
DamnWidget_anaconda/anaconda_lib/linting/sublime.py
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details import os import re import time from functools import partial import sublime from . import pycodestyle as pep8 from ..worker import Worker from ..callback import Callback from ..persistent_list import PersistentList from ..helpers import ( get_settings, is_code, get_view, check_linting, LINTING_ENABLED ) from ..phantoms import Phantom sublime_api = sublime.sublime_api ANACONDA = { 'ERRORS': {}, 'WARNINGS': {}, 'VIOLATIONS': {}, 'UNDERLINES': {}, 'LAST_PULSE': time.time(), 'ALREADY_LINTED': False, 'DISABLED': PersistentList(), 'DISABLED_BUFFERS': [] } marks = { 'warning': 'dot', 'violation': 'dot', 'illegal': 'circle' } ############################################################################### # Classes ############################################################################### class Linter: """Linter class that can interacts with Sublime Linter GUI """ def __init__(self, view): self.view = view def add_message(self, lineno, lines, message, messages): # assume lineno is one-based, ST3 wants zero-based line numbers lineno -= 1 lines.add(lineno) message = message[0].upper() + message[1:] # Remove trailing period from error message unless the message is "can't import ." if message.endswith('.') and not message.endswith("import ."): message = message[:-1] if lineno in messages: messages[lineno].append(message) else: messages[lineno] = [message] def underline_range(self, lineno, position, underlines, length=1): # assume lineno is one-based, ST3 wants zero-based line numbers lineno -= 1 line = self.view.full_line(self.view.text_point(lineno, 0)) position += line.begin() for i in range(length): region = sublime.Region(position + i) if self.is_that_code(region.begin()): underlines.append(sublime.Region(position + i)) def underline_regex(self, **kwargs): # assume lineno is one-based, ST3 wants zero-based line numbers offset = 0 lineno = kwargs.get('lineno', 1) - 1 kwargs.get('lines', set()).add(lineno) line = self.view.full_line(self.view.text_point(lineno, 0)) line_text = self.view.substr(line) if kwargs.get('linematch') is not None: match = re.match(kwargs['linematch'], line_text) if match is not None: line_text = match.group('match') offset = match.start('match') else: return iters = re.finditer(kwargs.get('regex'), line_text) results = [ (r.start('underline'), r.end('underline')) for r in iters if ( kwargs.get('wordmatch') is None or r.group('underline') == kwargs.get('wordmatch') ) ] # make the lineno one-based again for underline_range lineno += 1 for start, end in results: self.underline_range( lineno, start + offset, kwargs['underlines'], end - start ) def is_that_code(self, point): """Determines if the given region is valid Python code """ matcher = 'source.python - string - comment' return self.view.match_selector(point, matcher) def parse_errors(self, errors): """Parse errors returned from the PyFlakes and pep8 libraries """ vid = self.view.id() errors_level = { 'E': {'messages': ANACONDA.get('ERRORS')[vid], 'underlines': []}, 'W': {'messages': ANACONDA.get('WARNINGS')[vid], 'underlines': []}, 'V': { 'messages': ANACONDA.get('VIOLATIONS')[vid], 'underlines': [] } } lines = set() if errors is None: return {'lines': lines, 'results': errors_level} ignore_star = get_settings(self.view, 'pyflakes_ignore_import_*', True) for error in errors: try: line_text = self.view.substr(self.view.full_line( self.view.text_point(error['lineno']-1, 0) )) if '# noqa' in line_text: continue except Exception as e: print(e) error_level = error.get('level', 'W') messages = errors_level[error_level]['messages'] underlines = errors_level[error_level]['underlines'] if 'raw_error' not in error: error['raw_error'] = error['message'] if 'import *' in error['raw_error'] and ignore_star: continue self.add_message( error['lineno'], lines, error['raw_error'], messages ) if error['underline_range'] is True: self.underline_range( error['lineno'], error['offset'], underlines ) elif error.get('len') is not None and error.get('regex') is None: self.underline_range( error['lineno'], error['offset'], underlines, error['len'] ) else: self.underline_regex( lines=lines, underlines=underlines, **error ) return {'lines': lines, 'results': errors_level} ############################################################################### # Global functions ############################################################################### def erase_lint_marks(view): """Erase all the lint marks """ if get_settings(view, 'anaconda_linter_phantoms', False): Phantom().clear_phantoms(view) types = ['illegal', 'warning', 'violation'] for t in types: view.erase_regions('anaconda-lint-underline-{}'.format(t)) view.erase_regions('anaconda-lint-outlines-{}'.format(t)) def add_lint_marks(view, lines, **errors): """Adds lint marks to view on the given lines. """ erase_lint_marks(view) types = { 'warning': errors['warning_underlines'], 'illegal': errors['error_underlines'], 'violation': errors['violation_underlines'], } style = get_settings(view, 'anaconda_linter_mark_style', 'outline') show_underlines = get_settings(view, 'anaconda_linter_underlines', True) if show_underlines: for type_name, underlines in types.items(): if len(underlines) > 0: view.add_regions( 'anaconda-lint-underline-{}'.format(type_name), underlines, 'anaconda.underline.{}'.format(type_name), flags=sublime.DRAW_EMPTY_AS_OVERWRITE ) if len(lines) > 0: outline_style = { 'solid_underline': sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_SOLID_UNDERLINE, # noqa 'stippled_underline': sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_STIPPLED_UNDERLINE, # noqa 'squiggly_underline': sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_SQUIGGLY_UNDERLINE, # noqa 'outline': sublime.DRAW_OUTLINED, 'none': sublime.HIDDEN, 'fill': None } gutter_theme = get_settings( view, 'anaconda_gutter_theme', 'basic').lower() package_name = os.path.dirname(__file__).rsplit(os.path.sep, 3)[1] ico_path = ( 'Packages/' + package_name + '/anaconda_lib/linting/' 'gutter_mark_themes/{theme}-{type}.png' ) if get_settings(view, 'anaconda_linter_phantoms', False): phantom = Phantom() vid = view.id() phantoms = [] for level in ['ERRORS', 'WARNINGS', 'VIOLATIONS']: for line, messages in ANACONDA.get(level)[vid].items(): for message in messages: phantoms.append({ "line": line, "level": level.lower(), "messages": message }) phantom.update_phantoms(view, phantoms) for lint_type, lints in get_outlines(view).items(): if len(lints) > 0: if get_settings(view, 'anaconda_gutter_marks', False): if gutter_theme == 'basic': gutter_marks = marks[lint_type] else: gutter_marks = ico_path.format(theme=gutter_theme, type=lint_type) else: gutter_marks = '' args = [ 'anaconda-lint-outlines-{}'.format(lint_type), lints, 'anaconda.outline.{}'.format(lint_type), gutter_marks ] draw_style = outline_style.get(style, sublime.DRAW_OUTLINED) if draw_style is not None: args.append(draw_style) view.add_regions(*args) def get_outlines(view): """Return outlines for the given view """ ERRORS = ANACONDA.get('ERRORS') WARNINGS = ANACONDA.get('WARNINGS') VIOLATIONS = ANACONDA.get('VIOLATIONS') vid = view.id() return { 'warning': [ view.full_line(view.text_point(l, 0)) for l in WARNINGS[vid] ], 'illegal': [ view.full_line(view.text_point(l, 0)) for l in ERRORS[vid] ], 'violation': [ view.full_line(view.text_point(l, 0)) for l in VIOLATIONS[vid] ] } def last_selected_lineno(view): """Return back the last selected line number """ sel = view.sel() return None if sel is None else view.rowcol(sel[0].end())[0] def update_statusbar(view): """Updates the status bar """ errors = get_lineno_msgs(view, last_selected_lineno(view)) if len(errors) > 0: view.set_status('Linter', '; '.join(errors)) else: view.erase_status('Linter') def get_lineno_msgs(view, lineno): """Get lineno error messages and return it back """ ERRORS = ANACONDA.get('ERRORS') WARNINGS = ANACONDA.get('WARNINGS') VIOLATIONS = ANACONDA.get('VIOLATIONS') errors_msg = [] if lineno is not None: vid = view.id() if vid in ERRORS: errors_msg.extend(ERRORS[vid].get(lineno, [])) if vid in WARNINGS: errors_msg.extend(WARNINGS[vid].get(lineno, [])) if vid in VIOLATIONS: errors_msg.extend(VIOLATIONS[vid].get(lineno, [])) return errors_msg def run_linter(view=None, hook=None): """Run the linter for the given view """ if view is None: view = sublime.active_window().active_view() window_view = (sublime.active_window().id(), view.id()) if (view.file_name() in ANACONDA['DISABLED'] or window_view in ANACONDA['DISABLED_BUFFERS']): erase_lint_marks(view) return settings = { 'pep8': get_settings(view, 'pep8', True), 'pep8_ignore': get_settings(view, 'pep8_ignore', []), 'pep8_max_line_length': get_settings( view, 'pep8_max_line_length', pep8.MAX_LINE_LENGTH), 'pep8_error_levels': get_settings(view, 'pep8_error_levels', None), 'pyflakes_ignore': get_settings(view, 'pyflakes_ignore', []), 'pyflakes_disabled': get_settings( view, 'pyflakes_disabled', False), 'use_pylint': get_settings(view, 'use_pylint', False), 'use_pep257': get_settings(view, 'pep257', False), 'validate_imports': get_settings(view, 'validate_imports', False), 'pep257_ignore': get_settings(view, 'pep257_ignore', []), 'pep8_rcfile': get_settings(view, 'pep8_rcfile'), 'pylint_rcfile': get_settings(view, 'pylint_rcfile'), 'pylint_ignores': get_settings(view, 'pylint_ignore'), 'pyflakes_explicit_ignore': get_settings( view, 'pyflakes_explicit_ignore', []), 'use_mypy': get_settings(view, 'mypy', False), 'mypy_settings': get_mypy_settings(view), 'mypypath': get_settings(view, 'mypy_mypypath', ''), 'python_interpreter': get_settings(view, 'python_interpreter', ''), } text = view.substr(sublime.Region(0, view.size())) data = { 'vid': view.id(), 'code': text, 'settings': settings, 'filename': view.file_name(), 'method': 'lint', 'handler': 'python_linter' } if hook is None: Worker().execute(Callback(on_success=parse_results), **data) else: Worker().execute(Callback(partial(hook, parse_results)), **data) def get_mypy_settings(view): """Get MyPy related settings """ mypy_settings = [] if get_settings(view, 'mypy_silent_imports', False): mypy_settings.append('--ignore-missing-imports') mypy_settings.append('--follow-imports=skip') if get_settings(view, 'mypy_almost_silent', False): mypy_settings.append('--follow-imports=error') if get_settings(view, 'mypy_py2', False): mypy_settings.append('--py2') if get_settings(view, 'mypy_disallow_untyped_calls', False): mypy_settings.append('--disallow-untyped-calls') if get_settings(view, 'mypy_disallow_untyped_defs', False): mypy_settings.append('--disallow-untyped-defs') if get_settings(view, 'mypy_check_untyped_defs', False): mypy_settings.append('--check-untyped-defs') if get_settings(view, 'mypy_fast_parser', False): mypy_settings.append('--fast-parser') custom_typing = get_settings(view, 'mypy_custom_typing', None) if custom_typing is not None: mypy_settings.append('--custom-typing') mypy_settings.append(custom_typing) mypy_settings.append('--incremental') # use cache always mypy_settings.append( get_settings(view, 'mypy_suppress_stub_warnings', False) ) return mypy_settings def parse_results(data, code='python'): """Parse the results from the server """ view = get_view(sublime.active_window(), data['vid']) if data and data['success'] is False or not is_code(view, code, True): if get_settings(view, 'use_pylint', False) is True: for p in data['errors']: print(p) return # Check if linting was disabled between now and when the request was sent # to the server. window_view = (sublime.active_window().id(), view.id()) if (not check_linting(view, LINTING_ENABLED) or view.file_name() in ANACONDA['DISABLED'] or window_view in ANACONDA['DISABLED_BUFFERS']): return vid = view.id() ANACONDA['ERRORS'][vid] = {} ANACONDA['WARNINGS'][vid] = {} ANACONDA['VIOLATIONS'][vid] = {} results = Linter(view).parse_errors(data['errors']) errors = results['results'] lines = results['lines'] ANACONDA['UNDERLINES'][vid] = errors['E']['underlines'][:] ANACONDA['UNDERLINES'][vid].extend(errors['V']['underlines']) ANACONDA['UNDERLINES'][vid].extend(errors['W']['underlines']) errors = { 'error_underlines': errors['E']['underlines'], 'warning_underlines': errors['W']['underlines'], 'violation_underlines': errors['V']['underlines'] } add_lint_marks(view, lines, **errors) update_statusbar(view)
15,673
Python
.py
374
32.259358
123
0.570124
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,293
linter.py
DamnWidget_anaconda/anaconda_lib/linting/linter.py
# -*- coding: utf8 -*- # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details """ The majority of this code is based/inspired or directly taken from SublimeLinter plugin. Because that, SublimeLinter license file has been added to this package. This doesn't meant that anaconda is a fork of SublimeLinter in any way or that anaconda is going to be updated with latest SublimeLinter updates. Anaconda and SublimeLinter are two completely separated projects but they did a great job with SublimeLinter so we are reusing part of their plugin to lint our Python scripts. The main difference between SublimeLinter linting and anaconda one is that the former lints always for Python3.3 even if we are coding in a Python 2 project. Anaconda lints for the configured Python environment """ import os import re import sys sys.path.insert(0, os.path.dirname(__file__)) import _ast # noqa import pycodestyle as pep8 # noqa import pyflakes.checker as pyflakes # noqa if sys.version_info < (2, 7): def cmp_to_key(mycmp): """Convert a cmp= function into a key= function """ class K(object): __slots__ = ['obj'] def __init__(self, obj, *args): self.obj = obj def __lt__(self, other): return mycmp(self.obj, other.obj) < 0 def __gt__(self, other): return mycmp(self.obj, other.obj) > 0 def __eq__(self, other): return mycmp(self.obj, other.obj) == 0 def __le__(self, other): return mycmp(self.obj, other.obj) <= 0 def __ge__(self, other): return mycmp(self.obj, other.obj) >= 0 def __ne__(self, other): return mycmp(self.obj, other.obj) != 0 def __hash__(self): raise TypeError('hash not implemented') return K else: from functools import cmp_to_key pyflakes.messages.Message.__str__ = ( lambda self: self.message % self.message_args ) class LintError(object): """Lint error base class """ def __init__(self, filename, loc, level, message, message_args, **kwargs): self.lineno = loc self.level = level self.message = message self.message_args = message_args for k, v in kwargs.items(): setattr(self, k, v) def __str__(self): """String represetation of the error """ return self.message % self.message_args class Pep8Error(LintError): """ Lint error clss for PEP-8 errors PEP-8 errors are treated as Warnings """ def __init__(self, filename, loc, offset, code, text): super(Pep8Error, self).__init__( filename, loc, 'W', '[W] PEP 8 (%s): %s', (code, text), offset=offset, text=text ) class Pep8Warning(LintError): """ Lint error clss for PEP-8 warnings PEP-8 warnings are treated as violations """ def __init__(self, filename, loc, offset, code, text): super(Pep8Warning, self).__init__( filename, loc, 'V', '[V] PEP 8 (%s): %s', (code, text), offset=offset, text=text ) class PythonError(LintError): """Python errors class """ def __init__(self, filename, loc, text): super(PythonError, self).__init__( filename, loc, 'E', '[E] %r', (text,), text=text ) class OffsetError(LintError): def __init__(self, filename, loc, text, offset): super(OffsetError, self).__init__( filename, loc, 'E', '[E] %s', (text,), offset=offset + 1, text=text ) class Linter(object): """Linter class for Anaconda's Python linter """ def __init__(self): self.enabled = False def pyflakes_check(self, code, filename, ignore=None): """Check the code with pyflakes to find errors """ class FakeLoc: lineno = 0 try: code = code.encode('utf8') + b'\n' tree = compile(code, filename or '', 'exec', _ast.PyCF_ONLY_AST) except (SyntaxError, IndentationError): return self._handle_syntactic_error(code, filename) except ValueError as error: return [PythonError(filename, FakeLoc(), error.args[0])] else: # the file is syntactically valid, check it now w = pyflakes.Checker(tree, filename, ignore) return w.messages def pep8_check(self, code, filename, rcfile, ignore, max_line_length): """Check the code with pep8 to find PEP 8 errors """ messages = [] _lines = code.split('\n') if _lines: class FakeCol: """Fake class to represent a col object for PyFlakes """ def __init__(self, line_number): self.lineno = line_number class SublimeLinterReport(pep8.BaseReport): """Helper class to report PEP 8 problems """ def error(self, line_number, offset, text, check): """Report an error, according to options """ col = FakeCol(line_number) code = text[:4] message = text[5:] if self._ignore_code(code): return if code in self.counters: self.counters[code] += 1 else: self.counters[code] = 1 self.messages[code] = message if code in self.expected: return self.file_errors += 1 self.total_errors += 1 pep8_error = code.startswith('E') klass = Pep8Error if pep8_error else Pep8Warning messages.append(klass( filename, col, offset, code, message )) return code params = {'reporter': SublimeLinterReport} if not rcfile: _ignore = ignore + pep8.DEFAULT_IGNORE.split(',') params['ignore'] = _ignore else: params['config_file'] = os.path.expanduser(rcfile) options = pep8.StyleGuide(**params).options if not rcfile: options.max_line_length = max_line_length good_lines = [l + '\n' for l in _lines] good_lines[-1] = good_lines[-1].rstrip('\n') if not good_lines[-1]: good_lines = good_lines[:-1] pep8.Checker(filename, good_lines, options=options).check_all() return messages def run_linter(self, settings, code, filename): """Check the code to find errors """ errors = [] if settings.get("pep8", True): check_params = { 'ignore': settings.get('pep8_ignore', []), 'max_line_length': settings.get( 'pep8_max_line_length', pep8.MAX_LINE_LENGTH ) } errors.extend(self.pep8_check( code, filename, settings.get('pep8_rcfile'), **check_params )) pyflakes_ignore = settings.get('pyflakes_ignore', None) pyflakes_disabled = settings.get('pyflakes_disabled', False) explicit_ignore = settings.get('pyflakes_explicit_ignore', []) if not pyflakes_disabled and not settings.get('use_pylint'): errors.extend(self.pyflakes_check(code, filename, pyflakes_ignore)) return self.parse_errors(errors, explicit_ignore) def sort_errors(self, errors): """Sort errors by line number """ errors.sort(key=cmp_to_key(lambda a, b: a.lineno < b.lineno)) def prepare_error_level(self, error): """Prepare a common error level in case that the error does't define """ return 'W' if not hasattr(error, 'level') else error.level def parse_errors(self, errors, explicit_ignore): """Parse errors returned from the PyFlakes and pep8 libraries """ errors_list = [] if errors is None: return errors_list errors.sort(key=cmp_to_key(lambda a, b: a.lineno < b.lineno)) for error in errors: error_level = 'W' if not hasattr(error, 'level') else error.level message = error.message.capitalize() offset = None if hasattr(error, 'offset'): offset = error.offset elif hasattr(error, 'col'): offset = error.col error_data = { 'pep8': False, 'level': error_level, 'lineno': error.lineno, 'offset': offset, 'message': message, 'raw_error': str(error) } if isinstance(error, (Pep8Error, Pep8Warning, OffsetError)): error_data['pep8'] = True errors_list.append(error_data) elif (isinstance( error, ( pyflakes.messages.RedefinedWhileUnused, pyflakes.messages.RedefinedInListComp, pyflakes.messages.UndefinedName, pyflakes.messages.UndefinedExport, pyflakes.messages.UndefinedLocal, pyflakes.messages.Redefined, pyflakes.messages.UnusedVariable)) and error.__class__.__name__ not in explicit_ignore): regex = ( r'((and|or|not|if|elif|while|in)\s+|[+\-*^%%<>=\(\{{])*\s' '*(?P<underline>[\w\.]*{0}[\w]*)'.format(re.escape( error.message_args[0] )) ) error_data['len'] = len(error.message_args[0]) error_data['regex'] = regex errors_list.append(error_data) elif isinstance(error, pyflakes.messages.ImportShadowedByLoopVar): regex = 'for\s+(?P<underline>[\w]*{0}[\w*])'.format( re.escape(error.message_args[0]) ) error_data['regex'] = regex errors_list.append(error_data) elif (isinstance( error, ( pyflakes.messages.UnusedImport, pyflakes.messages.ImportStarUsed)) and error.__class__.__name__ not in explicit_ignore): if isinstance(error, pyflakes.messages.ImportStarUsed): word = '*' else: word = error.message_args[0] linematch = '(from\s+[\w_\.]+\s+)?import\s+(?P<match>[^#;]+)' r = '(^|\s+|,\s*|as\s+)(?P<underline>[\w]*{0}[\w]*)'.format( re.escape(word) ) error_data['regex'] = r error_data['linematch'] = linematch errors_list.append(error_data) elif (isinstance(error, pyflakes.messages.DuplicateArgument) and error.__class__.__name__ not in explicit_ignore): regex = 'def [\w_]+\(.*?(?P<underline>[\w]*{0}[\w]*)'.format( re.escape(error.message_args[0]) ) error_data['regex'] = regex errors_list.append(error_data) elif isinstance(error, pyflakes.messages.LateFutureImport): pass elif isinstance(error, PythonError): print(error) else: print('Oops, we missed an error type!', type(error)) return errors_list def _handle_syntactic_error(self, code, filename): """Handle PythonError and OffsetError """ value = sys.exc_info()[1] msg = value.args[0] lineno, offset, text = value.lineno, value.offset, value.text if text is None: # encoding problems if msg.startswith('duplicate argument'): arg = msg.split( 'duplicate argument ', 1)[1].split(' ', 1)[0].strip('\'"') error = pyflakes.messages.DuplicateArgument( filename, lineno, arg ) else: error = PythonError(filename, value, msg) else: line = text.splitlines()[-1] if offset is not None: offset = offset - (len(text) - len(line)) if offset is not None: error = OffsetError(filename, value, msg, offset) else: error = PythonError(filename, value, msg) error.lineno = lineno return [error]
12,920
Python
.py
305
29.829508
79
0.537761
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,294
mccabe.py
DamnWidget_anaconda/anaconda_lib/linting/mccabe.py
""" Meager code path measurement tool. Ned Batchelder http://nedbatchelder.com/blog/200803/python_code_complexity_microtool.html MIT License. """ from __future__ import with_statement import optparse import sys import tokenize from collections import defaultdict try: import ast from ast import iter_child_nodes except ImportError: # Python 2.5 from flake8.util import ast, iter_child_nodes __version__ = '0.7.0' class ASTVisitor(object): """Performs a depth-first walk of the AST.""" def __init__(self): self.node = None self._cache = {} def default(self, node, *args): for child in iter_child_nodes(node): self.dispatch(child, *args) def dispatch(self, node, *args): self.node = node klass = node.__class__ meth = self._cache.get(klass) if meth is None: className = klass.__name__ meth = getattr(self.visitor, 'visit' + className, self.default) self._cache[klass] = meth return meth(node, *args) def preorder(self, tree, visitor, *args): """Do preorder walk of tree using visitor""" self.visitor = visitor visitor.visit = self.dispatch self.dispatch(tree, *args) # XXX *args make sense? class PathNode(object): def __init__(self, name, look="circle"): self.name = name self.look = look def to_dot(self): print('node [shape=%s,label="%s"] %d;' % ( self.look, self.name, self.dot_id())) def dot_id(self): return id(self) class PathGraph(object): def __init__(self, name, entity, lineno, column=0): self.name = name self.entity = entity self.lineno = lineno self.column = column self.nodes = defaultdict(list) def connect(self, n1, n2): self.nodes[n1].append(n2) # Ensure that the destination node is always counted. self.nodes[n2] = [] def to_dot(self): print('subgraph {') for node in self.nodes: node.to_dot() for node, nexts in self.nodes.items(): for next in nexts: print('%s -- %s;' % (node.dot_id(), next.dot_id())) print('}') def complexity(self): """ Return the McCabe complexity for the graph. V-E+2 """ num_edges = sum([len(n) for n in self.nodes.values()]) num_nodes = len(self.nodes) return num_edges - num_nodes + 2 class PathGraphingAstVisitor(ASTVisitor): """ A visitor for a parsed Abstract Syntax Tree which finds executable statements. """ def __init__(self): super(PathGraphingAstVisitor, self).__init__() self.classname = "" self.graphs = {} self.reset() def reset(self): self.graph = None self.tail = None def dispatch_list(self, node_list): for node in node_list: self.dispatch(node) def visitFunctionDef(self, node): if self.classname: entity = '%s%s' % (self.classname, node.name) else: entity = node.name name = '%d:%d: %r' % (node.lineno, node.col_offset, entity) if self.graph is not None: # closure pathnode = self.appendPathNode(name) self.tail = pathnode self.dispatch_list(node.body) bottom = PathNode("", look='point') self.graph.connect(self.tail, bottom) self.graph.connect(pathnode, bottom) self.tail = bottom else: self.graph = PathGraph(name, entity, node.lineno, node.col_offset) pathnode = PathNode(name) self.tail = pathnode self.dispatch_list(node.body) self.graphs["%s%s" % (self.classname, node.name)] = self.graph self.reset() visitAsyncFunctionDef = visitFunctionDef def visitClassDef(self, node): old_classname = self.classname self.classname += node.name + "." self.dispatch_list(node.body) self.classname = old_classname def appendPathNode(self, name): if not self.tail: return pathnode = PathNode(name) self.graph.connect(self.tail, pathnode) self.tail = pathnode return pathnode def visitSimpleStatement(self, node): if node.lineno is None: lineno = 0 else: lineno = node.lineno name = "Stmt %d" % lineno self.appendPathNode(name) def default(self, node, *args): if isinstance(node, ast.stmt): self.visitSimpleStatement(node) else: super(PathGraphingAstVisitor, self).default(node, *args) def visitLoop(self, node): name = "Loop %d" % node.lineno self._subgraph(node, name) visitAsyncFor = visitFor = visitWhile = visitLoop def visitIf(self, node): name = "If %d" % node.lineno self._subgraph(node, name) def _subgraph(self, node, name, extra_blocks=()): """create the subgraphs representing any `if` and `for` statements""" if self.graph is None: # global loop self.graph = PathGraph(name, name, node.lineno, node.col_offset) pathnode = PathNode(name) self._subgraph_parse(node, pathnode, extra_blocks) self.graphs["%s%s" % (self.classname, name)] = self.graph self.reset() else: pathnode = self.appendPathNode(name) self._subgraph_parse(node, pathnode, extra_blocks) def _subgraph_parse(self, node, pathnode, extra_blocks): """parse the body and any `else` block of `if` and `for` statements""" loose_ends = [] self.tail = pathnode self.dispatch_list(node.body) loose_ends.append(self.tail) for extra in extra_blocks: self.tail = pathnode self.dispatch_list(extra.body) loose_ends.append(self.tail) if node.orelse: self.tail = pathnode self.dispatch_list(node.orelse) loose_ends.append(self.tail) else: loose_ends.append(pathnode) if pathnode: bottom = PathNode("", look='point') for le in loose_ends: self.graph.connect(le, bottom) self.tail = bottom def visitTryExcept(self, node): name = "TryExcept %d" % node.lineno self._subgraph(node, name, extra_blocks=node.handlers) visitTry = visitTryExcept def visitWith(self, node): name = "With %d" % node.lineno self.appendPathNode(name) self.dispatch_list(node.body) visitAsyncWith = visitWith class McCabeChecker(object): """McCabe cyclomatic complexity checker.""" name = 'mccabe' version = __version__ _code = 'C901' _error_tmpl = "C901 %r is too complex (%d)" max_complexity = -1 def __init__(self, tree, filename): self.tree = tree @classmethod def add_options(cls, parser): flag = '--max-complexity' kwargs = { 'default': -1, 'action': 'store', 'type': int, 'help': 'McCabe complexity threshold', 'parse_from_config': 'True', } config_opts = getattr(parser, 'config_options', None) if isinstance(config_opts, list): # Flake8 2.x kwargs.pop('parse_from_config') parser.add_option(flag, **kwargs) parser.config_options.append('max-complexity') else: parser.add_option(flag, **kwargs) @classmethod def parse_options(cls, options): cls.max_complexity = int(options.max_complexity) def run(self): if self.max_complexity < 0: return visitor = PathGraphingAstVisitor() visitor.preorder(self.tree, visitor) for graph in visitor.graphs.values(): if graph.complexity() > self.max_complexity: text = self._error_tmpl % (graph.entity, graph.complexity()) yield graph.lineno, graph.column, text, type(self) def get_code_complexity(code, threshold=7, filename='stdin'): try: tree = compile(code, filename, "exec", ast.PyCF_ONLY_AST) except SyntaxError: e = sys.exc_info()[1] sys.stderr.write("Unable to parse %s: %s\n" % (filename, e)) return 0 complx = [] McCabeChecker.max_complexity = threshold for lineno, offset, text, check in McCabeChecker(tree, filename).run(): complx.append('%s:%d:1: %s' % (filename, lineno, text)) if len(complx) == 0: return 0 print('\n'.join(complx)) return len(complx) def get_module_complexity(module_path, threshold=7): """Returns the complexity of a module""" code = _read(module_path) return get_code_complexity(code, threshold, filename=module_path) def _read(filename): if (2, 5) < sys.version_info < (3, 0): with open(filename, 'rU') as f: return f.read() elif (3, 0) <= sys.version_info < (4, 0): """Read the source code.""" try: with open(filename, 'rb') as f: (encoding, _) = tokenize.detect_encoding(f.readline) except (LookupError, SyntaxError, UnicodeError): # Fall back if file encoding is improperly declared with open(filename, encoding='latin-1') as f: return f.read() with open(filename, 'r', encoding=encoding) as f: return f.read() def main(argv=None): if argv is None: argv = sys.argv[1:] opar = optparse.OptionParser() opar.add_option("-d", "--dot", dest="dot", help="output a graphviz dot file", action="store_true") opar.add_option("-m", "--min", dest="threshold", help="minimum complexity for output", type="int", default=1) options, args = opar.parse_args(argv) code = _read(args[0]) tree = compile(code, args[0], "exec", ast.PyCF_ONLY_AST) visitor = PathGraphingAstVisitor() visitor.preorder(tree, visitor) if options.dot: print('graph {') for graph in visitor.graphs.values(): if (not options.threshold or graph.complexity() >= options.threshold): graph.to_dot() print('}') else: for graph in visitor.graphs.values(): if graph.complexity() >= options.threshold: print(graph.name, graph.complexity()) if __name__ == '__main__': main(sys.argv[1:])
10,654
Python
.py
284
28.577465
78
0.590027
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,295
anaconda_pyflakes.py
DamnWidget_anaconda/anaconda_lib/linting/anaconda_pyflakes.py
# -*- coding: utf8 -*- # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org> # This program is Free Software see LICENSE file for details import re import _ast from linting import linter import pyflakes.checker as pyflakes pyflakes.messages.Message.__str__ = ( lambda self: self.message % self.message_args ) class PyFlakesError(pyflakes.messages.Message): """Lint error base class """ def __init__(self, filename, loc, level, message, message_args, **kwargs): super(PyFlakesError, self).__init__(filename, loc) self.level = level self.message = message self.message_args = message_args class PyFlakesLinter(linter.Linter): """Linter for PyFlakes Linter """ def lint(self, settings, code, filename): """Run the pyflakes code checker with the given options """ errors = [] pyflakes_ignore = settings.get('pyflakes_ignore', None) pyflakes_disabled = settings.get('pyflakes_disabled', False) explicit_ignore = settings.get('pyflakes_explicit_ignore', []) if not pyflakes_disabled and not settings.get('use_pylint'): errors.extend(self.check(code, filename, pyflakes_ignore)) return self.parse(errors, explicit_ignore) def check(self, code, filename, ignore=None): """Check the code with pyflakes to find errors """ class FakeLoc: lineno = 0 try: fname = '' if filename is not None: fname = filename.encode('utf8') or '' code = code.encode('utf8') + b'\n' tree = compile(code, fname, 'exec', _ast.PyCF_ONLY_AST) except (SyntaxError, IndentationError): return self._handle_syntactic_error(code, filename) except ValueError as error: return [PyFlakesError(filename, FakeLoc(), 'E', error.args[0]), []] else: # the file is syntactically valid, check it now w = pyflakes.Checker(tree, filename, ignore) return w.messages def parse(self, errors, explicit_ignore): """Parse the errors returned from the PyFlakes library """ error_list = [] if errors is None: return error_list errors.sort(key=linter.cmp_to_key(lambda a, b: a.lineno < b.lineno)) for error in errors: error_level = 'W' if not hasattr(error, 'level') else error.level message = error.message.capitalize() error_data = { 'underline_range': False, 'level': error_level, 'lineno': error.lineno, 'message': message, 'raw_error': str(error) } if hasattr(error, 'offset'): error_data['offset'] = error.offset elif hasattr(error, 'col'): error_data['offset'] = error.col if (isinstance(error, (linter.OffsetError))): error_data['underline_range'] = True error_list.append(error_data) elif (isinstance( error, ( pyflakes.messages.RedefinedWhileUnused, pyflakes.messages.RedefinedInListComp, pyflakes.messages.UndefinedName, pyflakes.messages.UndefinedExport, pyflakes.messages.UndefinedLocal, pyflakes.messages.UnusedVariable)) and error.__class__.__name__ not in explicit_ignore): error_data['len'] = len(error.message_args[0]) error_data['regex'] = ( r'((and|or|not|if|elif|while|in)\s+|[+\-*^%%<>=\(\{{])*\s' '*(?P<underline>[\w\.]*{0}[\w]*)'.format(re.escape( error.message_args[0] )) ) error_list.append(error_data) elif isinstance(error, pyflakes.messages.ImportShadowedByLoopVar): regex = 'for\s+(?P<underline>[\w]*{0}[\w*])'.format( re.escape(error.message_args[0]) ) error_data['regex'] = regex error_list.append(error_data) elif (isinstance( error, ( pyflakes.messages.UnusedImport, pyflakes.messages.ImportStarUsed)) and error.__class__.__name__ not in explicit_ignore): if isinstance(error, pyflakes.messages.ImportStarUsed): word = '*' else: word = error.message_args[0] linematch = '(from\s+[\w_\.]+\s+)?import\s+(?P<match>[^#;]+)' r = '(^|\s+|,\s*|as\s+)(?P<underline>[\w]*{0}[\w]*)'.format( re.escape(word) ) error_data['regex'] = r error_data['linematch'] = linematch error_list.append(error_data) elif (isinstance(error, pyflakes.messages.DuplicateArgument) and error.__class__.__name__ not in explicit_ignore): regex = 'def [\w_]+\(.*?(?P<underline>[\w]*{0}[\w]*)'.format( re.escape(error.message_args[0]) ) error_data['regex'] = regex error_list.append(error_data) elif isinstance(error, pyflakes.messages.LateFutureImport): pass elif isinstance(error, linter.PythonError): print(error) else: print( 'Ooops, we missed an error type for pyflakes', type(error) ) return error_list
5,741
Python
.py
129
31.232558
79
0.538765
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,296
__main__.py
DamnWidget_anaconda/anaconda_lib/linting/pydocstyle/__main__.py
#! /usr/bin/env python """Static analysis tool for checking docstring conventions and style. The repository is located at: http://github.com/PyCQA/pydocstyle """ __all__ = () def main(): from pydocstyle import cli cli.main() if __name__ == '__main__': main()
279
Python
.py
11
22.545455
69
0.684615
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,297
checker.py
DamnWidget_anaconda/anaconda_lib/linting/pydocstyle/checker.py
"""Parsed source code checkers for docstring violations.""" import ast import string import sys import tokenize as tk from itertools import takewhile from re import compile as re from collections import namedtuple from . import violations from .config import IllegalConfiguration from .parser import (Package, Module, Class, NestedClass, Definition, AllError, Method, Function, NestedFunction, Parser, StringIO, ParseError) from .utils import log, is_blank, pairwise from .wordlists import IMPERATIVE_VERBS, IMPERATIVE_BLACKLIST, stem __all__ = ('check', ) # If possible (python >= 3.2) use tokenize.open to open files, so PEP 263 # encoding markers are interpreted. try: tokenize_open = tk.open except AttributeError: tokenize_open = open def check_for(kind, terminal=False): def decorator(f): f._check_for = kind f._terminal = terminal return f return decorator class ConventionChecker(object): """Checker for PEP 257 and numpy conventions. D10x: Missing docstrings D20x: Whitespace issues D30x: Docstring formatting D40x: Docstring content issues """ SECTION_NAMES = ['Short Summary', 'Extended Summary', 'Parameters', 'Returns', 'Yields', 'Other Parameters', 'Raises', 'See Also', 'Notes', 'References', 'Examples', 'Attributes', 'Methods'] def check_source(self, source, filename, ignore_decorators=None): module = parse(StringIO(source), filename) for definition in module: for this_check in self.checks: terminate = False if isinstance(definition, this_check._check_for): skipping_all = (definition.skipped_error_codes == 'all') decorator_skip = ignore_decorators is not None and any( len(ignore_decorators.findall(dec.name)) > 0 for dec in definition.decorators) if not skipping_all and not decorator_skip: error = this_check(self, definition, definition.docstring) else: error = None errors = error if hasattr(error, '__iter__') else [error] for error in errors: if error is not None and error.code not in \ definition.skipped_error_codes: partition = this_check.__doc__.partition('.\n') message, _, explanation = partition error.set_context(explanation=explanation, definition=definition) yield error if this_check._terminal: terminate = True break if terminate: break @property def checks(self): all = [this_check for this_check in vars(type(self)).values() if hasattr(this_check, '_check_for')] return sorted(all, key=lambda this_check: not this_check._terminal) @check_for(Definition, terminal=True) def check_docstring_missing(self, definition, docstring): """D10{0,1,2,3}: Public definitions should have docstrings. All modules should normally have docstrings. [...] all functions and classes exported by a module should also have docstrings. Public methods (including the __init__ constructor) should also have docstrings. Note: Public (exported) definitions are either those with names listed in __all__ variable (if present), or those that do not start with a single underscore. """ if (not docstring and definition.is_public or docstring and is_blank(ast.literal_eval(docstring))): codes = {Module: violations.D100, Class: violations.D101, NestedClass: violations.D106, Method: (lambda: violations.D105() if definition.is_magic else violations.D102()), Function: violations.D103, NestedFunction: violations.D103, Package: violations.D104} return codes[type(definition)]() @check_for(Definition) def check_one_liners(self, definition, docstring): """D200: One-liner docstrings should fit on one line with quotes. The closing quotes are on the same line as the opening quotes. This looks better for one-liners. """ if docstring: lines = ast.literal_eval(docstring).split('\n') if len(lines) > 1: non_empty_lines = sum(1 for l in lines if not is_blank(l)) if non_empty_lines == 1: return violations.D200(len(lines)) @check_for(Function) def check_no_blank_before(self, function, docstring): # def """D20{1,2}: No blank lines allowed around function/method docstring. There's no blank line either before or after the docstring. """ if docstring: before, _, after = function.source.partition(docstring) blanks_before = list(map(is_blank, before.split('\n')[:-1])) blanks_after = list(map(is_blank, after.split('\n')[1:])) blanks_before_count = sum(takewhile(bool, reversed(blanks_before))) blanks_after_count = sum(takewhile(bool, blanks_after)) if blanks_before_count != 0: yield violations.D201(blanks_before_count) if not all(blanks_after) and blanks_after_count != 0: yield violations.D202(blanks_after_count) @check_for(Class) def check_blank_before_after_class(self, class_, docstring): """D20{3,4}: Class docstring should have 1 blank line around them. Insert a blank line before and after all docstrings (one-line or multi-line) that document a class -- generally speaking, the class's methods are separated from each other by a single blank line, and the docstring needs to be offset from the first method by a blank line; for symmetry, put a blank line between the class header and the docstring. """ # NOTE: this gives false-positive in this case # class Foo: # # """Docstring.""" # # # # comment here # def foo(): pass if docstring: before, _, after = class_.source.partition(docstring) blanks_before = list(map(is_blank, before.split('\n')[:-1])) blanks_after = list(map(is_blank, after.split('\n')[1:])) blanks_before_count = sum(takewhile(bool, reversed(blanks_before))) blanks_after_count = sum(takewhile(bool, blanks_after)) if blanks_before_count != 0: yield violations.D211(blanks_before_count) if blanks_before_count != 1: yield violations.D203(blanks_before_count) if not all(blanks_after) and blanks_after_count != 1: yield violations.D204(blanks_after_count) @check_for(Definition) def check_blank_after_summary(self, definition, docstring): """D205: Put one blank line between summary line and description. Multi-line docstrings consist of a summary line just like a one-line docstring, followed by a blank line, followed by a more elaborate description. The summary line may be used by automatic indexing tools; it is important that it fits on one line and is separated from the rest of the docstring by a blank line. """ if docstring: lines = ast.literal_eval(docstring).strip().split('\n') if len(lines) > 1: post_summary_blanks = list(map(is_blank, lines[1:])) blanks_count = sum(takewhile(bool, post_summary_blanks)) if blanks_count != 1: return violations.D205(blanks_count) @staticmethod def _get_docstring_indent(definition, docstring): """Return the indentation of the docstring's opening quotes.""" before_docstring, _, _ = definition.source.partition(docstring) _, _, indent = before_docstring.rpartition('\n') return indent @check_for(Definition) def check_indent(self, definition, docstring): """D20{6,7,8}: The entire docstring should be indented same as code. The entire docstring is indented the same as the quotes at its first line. """ if docstring: indent = self._get_docstring_indent(definition, docstring) lines = docstring.split('\n') if len(lines) > 1: lines = lines[1:] # First line does not need indent. indents = [leading_space(l) for l in lines if not is_blank(l)] if set(' \t') == set(''.join(indents) + indent): yield violations.D206() if (len(indents) > 1 and min(indents[:-1]) > indent or indents[-1] > indent): yield violations.D208() if min(indents) < indent: yield violations.D207() @check_for(Definition) def check_newline_after_last_paragraph(self, definition, docstring): """D209: Put multi-line docstring closing quotes on separate line. Unless the entire docstring fits on a line, place the closing quotes on a line by themselves. """ if docstring: lines = [l for l in ast.literal_eval(docstring).split('\n') if not is_blank(l)] if len(lines) > 1: if docstring.split("\n")[-1].strip() not in ['"""', "'''"]: return violations.D209() @check_for(Definition) def check_surrounding_whitespaces(self, definition, docstring): """D210: No whitespaces allowed surrounding docstring text.""" if docstring: lines = ast.literal_eval(docstring).split('\n') if lines[0].startswith(' ') or \ len(lines) == 1 and lines[0].endswith(' '): return violations.D210() @check_for(Definition) def check_multi_line_summary_start(self, definition, docstring): """D21{2,3}: Multi-line docstring summary style check. A multi-line docstring summary should start either at the first, or separately at the second line of a docstring. """ if docstring: start_triple = [ '"""', "'''", 'u"""', "u'''", 'r"""', "r'''", 'ur"""', "ur'''" ] lines = ast.literal_eval(docstring).split('\n') if len(lines) > 1: first = docstring.split("\n")[0].strip().lower() if first in start_triple: return violations.D212() else: return violations.D213() @check_for(Definition) def check_triple_double_quotes(self, definition, docstring): r'''D300: Use """triple double quotes""". For consistency, always use """triple double quotes""" around docstrings. Use r"""raw triple double quotes""" if you use any backslashes in your docstrings. For Unicode docstrings, use u"""Unicode triple-quoted strings""". Note: Exception to this is made if the docstring contains """ quotes in its body. ''' if docstring: if '"""' in ast.literal_eval(docstring): # Allow ''' quotes if docstring contains """, because # otherwise """ quotes could not be expressed inside # docstring. Not in PEP 257. regex = re(r"[uU]?[rR]?'''[^'].*") else: regex = re(r'[uU]?[rR]?"""[^"].*') if not regex.match(docstring): illegal_matcher = re(r"""[uU]?[rR]?("+|'+).*""") illegal_quotes = illegal_matcher.match(docstring).group(1) return violations.D300(illegal_quotes) @check_for(Definition) def check_backslashes(self, definition, docstring): r'''D301: Use r""" if any backslashes in a docstring. Use r"""raw triple double quotes""" if you use any backslashes (\) in your docstrings. ''' # Just check that docstring is raw, check_triple_double_quotes # ensures the correct quotes. if docstring and '\\' in docstring and not docstring.startswith( ('r', 'ur')): return violations.D301() @check_for(Definition) def check_unicode_docstring(self, definition, docstring): r'''D302: Use u""" for docstrings with Unicode. For Unicode docstrings, use u"""Unicode triple-quoted strings""". ''' if 'unicode_literals' in definition.module.future_imports: return # Just check that docstring is unicode, check_triple_double_quotes # ensures the correct quotes. if docstring and sys.version_info[0] <= 2: if not is_ascii(docstring) and not docstring.startswith( ('u', 'ur')): return violations.D302() @check_for(Definition) def check_ends_with_period(self, definition, docstring): """D400: First line should end with a period. The [first line of a] docstring is a phrase ending in a period. """ if docstring: summary_line = ast.literal_eval(docstring).strip().split('\n')[0] if not summary_line.endswith('.'): return violations.D400(summary_line[-1]) @check_for(Function) def check_imperative_mood(self, function, docstring): # def context """D401: First line should be in imperative mood: 'Do', not 'Does'. [Docstring] prescribes the function or method's effect as a command: ("Do this", "Return that"), not as a description; e.g. don't write "Returns the pathname ...". """ if docstring and not function.is_test: stripped = ast.literal_eval(docstring).strip() if stripped: first_word = stripped.split()[0] check_word = first_word.lower() if check_word in IMPERATIVE_BLACKLIST: return violations.D401b(first_word) try: correct_form = IMPERATIVE_VERBS.get(stem(check_word)) except UnicodeDecodeError: # This is raised when the docstring contains unicode # characters in the first word, but is not a unicode # string. In which case D302 will be reported. Ignoring. return if correct_form and correct_form != check_word: return violations.D401( correct_form.capitalize(), first_word ) @check_for(Function) def check_no_signature(self, function, docstring): # def context """D402: First line should not be function's or method's "signature". The one-line docstring should NOT be a "signature" reiterating the function/method parameters (which can be obtained by introspection). """ if docstring: first_line = ast.literal_eval(docstring).strip().split('\n')[0] if function.name + '(' in first_line.replace(' ', ''): return violations.D402() @check_for(Function) def check_capitalized(self, function, docstring): """D403: First word of the first line should be properly capitalized. The [first line of a] docstring is a phrase ending in a period. """ if docstring: first_word = ast.literal_eval(docstring).split()[0] if first_word == first_word.upper(): return for char in first_word: if char not in string.ascii_letters and char != "'": return if first_word != first_word.capitalize(): return violations.D403(first_word.capitalize(), first_word) @check_for(Definition) def check_starts_with_this(self, function, docstring): """D404: First word of the docstring should not be `This`. Docstrings should use short, simple language. They should not begin with "This class is [..]" or "This module contains [..]". """ if docstring: first_word = ast.literal_eval(docstring).split()[0] if first_word.lower() == 'this': return violations.D404() @staticmethod def _get_leading_words(line): """Return any leading set of words from `line`. For example, if `line` is " Hello world!!!", returns "Hello world". """ result = re("[A-Za-z ]+").match(line.strip()) if result is not None: return result.group() @staticmethod def _is_a_docstring_section(context): """Check if the suspected context is really a section header. Lets have a look at the following example docstring: '''Title. Some part of the docstring that specifies what the function returns. <----- Not a real section name. It has a suffix and the previous line is not empty and does not end with a punctuation sign. This is another line in the docstring. It describes stuff, but we forgot to add a blank line between it and the section name. Returns <----- A real section name. The previous line ends with ------- a period, therefore it is in a new grammatical context. Bla. ''' To make sure this is really a section we check these conditions: * There's no suffix to the section name. * The previous line ends with punctuation. * The previous line is empty. If one of the conditions is true, we will consider the line as a section name. """ section_name_suffix = context.line.lstrip(context.section_name).strip() punctuation = [',', ';', '.', '-', '\\', '/', ']', '}', ')'] prev_line_ends_with_punctuation = \ any(context.previous_line.strip().endswith(x) for x in punctuation) return (is_blank(section_name_suffix) or prev_line_ends_with_punctuation or is_blank(context.previous_line)) @classmethod def _check_section_underline(cls, section_name, context, indentation): """D4{07,08,09,12}, D215: Section underline checks. Check for correct formatting for docstring sections. Checks that: * The line that follows the section name contains dashes (D40{7,8}). * The amount of dashes is equal to the length of the section name (D409). * The section's content does not begin in the line that follows the section header (D412). * The indentation of the dashed line is equal to the docstring's indentation (D215). """ blank_lines_after_header = 0 for line in context.following_lines: if not is_blank(line): break blank_lines_after_header += 1 else: # There are only blank lines after the header. yield violations.D407(section_name) return non_empty_line = context.following_lines[blank_lines_after_header] dash_line_found = ''.join(set(non_empty_line.strip())) == '-' if not dash_line_found: yield violations.D407(section_name) if blank_lines_after_header > 0: yield violations.D412(section_name) else: if blank_lines_after_header > 0: yield violations.D408(section_name) if non_empty_line.strip() != "-" * len(section_name): yield violations.D409(len(section_name), section_name, len(non_empty_line.strip())) if leading_space(non_empty_line) > indentation: yield violations.D215(section_name) line_after_dashes_index = blank_lines_after_header + 1 # If the line index after the dashes is in range (perhaps we have # a header + underline followed by another section header). if line_after_dashes_index < len(context.following_lines): line_after_dashes = \ context.following_lines[line_after_dashes_index] if is_blank(line_after_dashes): rest_of_lines = \ context.following_lines[line_after_dashes_index:] if not is_blank(''.join(rest_of_lines)): yield violations.D412(section_name) else: yield violations.D414(section_name) else: yield violations.D414(section_name) @classmethod def _check_section(cls, docstring, definition, context): """D4{05,06,10,11,13}, D214: Section name checks. Check for valid section names. Checks that: * The section name is properly capitalized (D405). * The section is not over-indented (D214). * The section name has no superfluous suffix to it (D406). * There's a blank line after the section (D410, D413). * There's a blank line before the section (D411). Also yields all the errors from `_check_section_underline`. """ capitalized_section = context.section_name.title() indentation = cls._get_docstring_indent(definition, docstring) if (context.section_name not in cls.SECTION_NAMES and capitalized_section in cls.SECTION_NAMES): yield violations.D405(capitalized_section, context.section_name) if leading_space(context.line) > indentation: yield violations.D214(capitalized_section) suffix = context.line.strip().lstrip(context.section_name) if suffix: yield violations.D406(capitalized_section, context.line.strip()) if (not context.following_lines or not is_blank(context.following_lines[-1])): if context.is_last_section: yield violations.D413(capitalized_section) else: yield violations.D410(capitalized_section) if not is_blank(context.previous_line): yield violations.D411(capitalized_section) for err in cls._check_section_underline(capitalized_section, context, indentation): yield err @check_for(Definition) def check_docstring_sections(self, definition, docstring): """D21{4,5}, D4{05,06,07,08,09,10}: Docstring sections checks. Check the general format of a sectioned docstring: '''This is my one-liner. Short Summary ------------- This is my summary. Returns ------- None. ''' Section names appear in `SECTION_NAMES`. """ if not docstring: return lines = docstring.split("\n") if len(lines) < 2: return lower_section_names = [s.lower() for s in self.SECTION_NAMES] def _suspected_as_section(_line): result = self._get_leading_words(_line.lower()) return result in lower_section_names # Finding our suspects. suspected_section_indices = [i for i, line in enumerate(lines) if _suspected_as_section(line)] SectionContext = namedtuple('SectionContext', ('section_name', 'previous_line', 'line', 'following_lines', 'original_index', 'is_last_section')) # First - create a list of possible contexts. Note that the # `following_linex` member is until the end of the docstring. contexts = (SectionContext(self._get_leading_words(lines[i].strip()), lines[i - 1], lines[i], lines[i + 1:], i, False) for i in suspected_section_indices) # Now that we have manageable objects - rule out false positives. contexts = (c for c in contexts if self._is_a_docstring_section(c)) # Now we shall trim the `following lines` field to only reach the # next section name. for a, b in pairwise(contexts, None): end = -1 if b is None else b.original_index new_ctx = SectionContext(a.section_name, a.previous_line, a.line, lines[a.original_index + 1:end], a.original_index, b is None) for err in self._check_section(docstring, definition, new_ctx): yield err parse = Parser() def check(filenames, select=None, ignore=None, ignore_decorators=None): """Generate docstring errors that exist in `filenames` iterable. By default, the PEP-257 convention is checked. To specifically define the set of error codes to check for, supply either `select` or `ignore` (but not both). In either case, the parameter should be a collection of error code strings, e.g., {'D100', 'D404'}. When supplying `select`, only specified error codes will be reported. When supplying `ignore`, all error codes which were not specified will be reported. Note that ignored error code refer to the entire set of possible error codes, which is larger than just the PEP-257 convention. To your convenience, you may use `pydocstyle.violations.conventions.pep257` as a base set to add or remove errors from. Examples --------- >>> check(['pydocstyle.py']) <generator object check at 0x...> >>> check(['pydocstyle.py'], select=['D100']) <generator object check at 0x...> >>> check(['pydocstyle.py'], ignore=conventions.pep257 - {'D100'}) <generator object check at 0x...> """ if select is not None and ignore is not None: raise IllegalConfiguration('Cannot pass both select and ignore. ' 'They are mutually exclusive.') elif select is not None: checked_codes = select elif ignore is not None: checked_codes = list(set(violations.ErrorRegistry.get_error_codes()) - set(ignore)) else: checked_codes = violations.conventions.pep257 for filename in filenames: log.info('Checking file %s.', filename) try: with tokenize_open(filename) as file: source = file.read() for error in ConventionChecker().check_source(source, filename, ignore_decorators): code = getattr(error, 'code', None) if code in checked_codes: yield error except (EnvironmentError, AllError, ParseError) as error: log.warning('Error in file %s: %s', filename, error) yield error except tk.TokenError: yield SyntaxError('invalid syntax in file %s' % filename) def is_ascii(string): return all(ord(char) < 128 for char in string) def leading_space(string): return re('\s*').match(string).group()
28,751
Python
.py
587
35.574106
79
0.570042
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,298
config.py
DamnWidget_anaconda/anaconda_lib/linting/pydocstyle/config.py
"""Configuration file parsing and utilities.""" import copy import itertools import os from re import compile as re try: from collections import Set, namedtuple except ImportError: # this will fail in Pyton >= 3.10 from collections.abc import Set from collections import namedtuple try: # Python 3.x from ConfigParser import RawConfigParser except ImportError: # Python 2.x from configparser import RawConfigParser from .utils import __version__, log from .violations import ErrorRegistry, conventions def check_initialized(method): """Check that the configuration object was initialized.""" def _decorator(self, *args, **kwargs): if self._arguments is None or self._options is None: raise RuntimeError('using an uninitialized configuration') return method(self, *args, **kwargs) return _decorator class ConfigurationParser(object): """Responsible for parsing configuration from files and CLI. There are 2 types of configurations: Run configurations and Check configurations. Run Configurations: ------------------ Responsible for deciding things that are related to the user interface and configuration discovery, e.g. verbosity, debug options, etc. All run configurations default to `False` or `None` and are decided only by CLI. Check Configurations: -------------------- Configurations that are related to which files and errors will be checked. These are configurable in 2 ways: using the CLI, and using configuration files. Configuration files are nested within the file system, meaning that the closer a configuration file is to a checked file, the more relevant it will be. For instance, imagine this directory structure: A +-- tox.ini: sets `select=D100` +-- B +-- foo.py +-- tox.ini: sets `add-ignore=D100` Then `foo.py` will not be checked for `D100`. The configuration build algorithm is described in `self._get_config`. Note: If any of `BASE_ERROR_SELECTION_OPTIONS` was selected in the CLI, all configuration files will be ignored and each file will be checked for the error codes supplied in the CLI. """ CONFIG_FILE_OPTIONS = ('convention', 'select', 'ignore', 'add-select', 'add-ignore', 'match', 'match-dir', 'ignore-decorators') BASE_ERROR_SELECTION_OPTIONS = ('ignore', 'select', 'convention') DEFAULT_MATCH_RE = '(?!test_).*\.py' DEFAULT_MATCH_DIR_RE = '[^\.].*' DEFAULT_IGNORE_DECORATORS_RE = '' DEFAULT_CONVENTION = conventions.pep257 PROJECT_CONFIG_FILES = ( 'setup.cfg', 'tox.ini', '.pydocstyle', '.pydocstyle.ini', '.pydocstylerc', '.pydocstylerc.ini', # The following is deprecated, but remains for backwards compatibility. '.pep257', ) POSSIBLE_SECTION_NAMES = ('pydocstyle', 'pep257') def __init__(self): """Create a configuration parser.""" self._cache = {} self._override_by_cli = None self._options = self._arguments = self._run_conf = None self._parser = self._create_option_parser() # ---------------------------- Public Methods ----------------------------- def get_default_run_configuration(self): """Return a `RunConfiguration` object set with default values.""" options, _ = self._parse_args([]) return self._create_run_config(options) def parse(self): """Parse the configuration. If one of `BASE_ERROR_SELECTION_OPTIONS` was selected, overrides all error codes to check and disregards any error code related configurations from the configuration files. """ self._options, self._arguments = self._parse_args() self._arguments = self._arguments or ['.'] if not self._validate_options(self._options): raise IllegalConfiguration() self._run_conf = self._create_run_config(self._options) config = self._create_check_config(self._options, use_defaults=False) self._override_by_cli = config @check_initialized def get_user_run_configuration(self): """Return the run configuration for the script.""" return self._run_conf @check_initialized def get_files_to_check(self): """Generate files and error codes to check on each one. Walk dir trees under `self._arguments` and generate yield filnames that `match` under each directory that `match_dir`. The method locates the configuration for each file name and yields a tuple of (filename, [error_codes]). With every discovery of a new configuration file `IllegalConfiguration` might be raised. """ def _get_matches(config): """Return the `match` and `match_dir` functions for `config`.""" match_func = re(config.match + '$').match match_dir_func = re(config.match_dir + '$').match return match_func, match_dir_func def _get_ignore_decorators(config): """Return the `ignore_decorators` as None or regex.""" if config.ignore_decorators: # not None and not '' ignore_decorators = re(config.ignore_decorators) else: ignore_decorators = None return ignore_decorators for name in self._arguments: if os.path.isdir(name): for root, dirs, filenames in os.walk(name): config = self._get_config(root) match, match_dir = _get_matches(config) ignore_decorators = _get_ignore_decorators(config) # Skip any dirs that do not match match_dir dirs[:] = [dir for dir in dirs if match_dir(dir)] for filename in filenames: if match(filename): full_path = os.path.join(root, filename) yield (full_path, list(config.checked_codes), ignore_decorators) else: config = self._get_config(name) match, _ = _get_matches(config) ignore_decorators = _get_ignore_decorators(config) if match(name): yield (name, list(config.checked_codes), ignore_decorators) # --------------------------- Private Methods ----------------------------- def _get_config_by_discovery(self, node): """Get a configuration for checking `node` by config discovery. Config discovery happens when no explicit config file is specified. The file system is searched for config files starting from the directory containing the file being checked, and up until the root directory of the project. See `_get_config` for further details. """ path = self._get_node_dir(node) if path in self._cache: return self._cache[path] config_file = self._get_config_file_in_folder(path) if config_file is None: parent_dir, tail = os.path.split(path) if tail: # No configuration file, simply take the parent's. config = self._get_config(parent_dir) else: # There's no configuration file and no parent directory. # Use the default configuration or the one given in the CLI. config = self._create_check_config(self._options) else: # There's a config file! Read it and merge if necessary. options, inherit = self._read_configuration_file(config_file) parent_dir, tail = os.path.split(path) if tail and inherit: # There is a parent dir and we should try to merge. parent_config = self._get_config(parent_dir) config = self._merge_configuration(parent_config, options) else: # No need to merge or parent dir does not exist. config = self._create_check_config(options) return config def _get_config(self, node): """Get and cache the run configuration for `node`. If no configuration exists (not local and not for the parent node), returns and caches a default configuration. The algorithm: ------------- * If the current directory's configuration exists in `self._cache` - return it. * If a configuration file does not exist in this directory: * If the directory is not a root directory: * Cache its configuration as this directory's and return it. * Else: * Cache a default configuration and return it. * Else: * Read the configuration file. * If a parent directory exists AND the configuration file allows inheritance: * Read the parent configuration by calling this function with the parent directory as `node`. * Merge the parent configuration with the current one and cache it. * If the user has specified one of `BASE_ERROR_SELECTION_OPTIONS` in the CLI - return the CLI configuration with the configuration match clauses * Set the `--add-select` and `--add-ignore` CLI configurations. """ if self._run_conf.config is None: log.debug('No config file specified, discovering.') config = self._get_config_by_discovery(node) else: log.debug('Using config file %r', self._run_conf.config) if not os.path.exists(self._run_conf.config): raise IllegalConfiguration('Configuration file {!r} specified ' 'via --config was not found.' .format(self._run_conf.config)) if None in self._cache: return self._cache[None] options, _ = self._read_configuration_file(self._run_conf.config) config = self._create_check_config(options) # Make the CLI always win final_config = {} for attr in CheckConfiguration._fields: cli_val = getattr(self._override_by_cli, attr) conf_val = getattr(config, attr) final_config[attr] = cli_val if cli_val is not None else conf_val config = CheckConfiguration(**final_config) self._set_add_options(config.checked_codes, self._options) # Handle caching if self._run_conf.config is not None: self._cache[None] = config else: self._cache[self._get_node_dir(node)] = config return config @staticmethod def _get_node_dir(node): """Return the absolute path of the directory of a filesystem node.""" path = os.path.abspath(node) return path if os.path.isdir(path) else os.path.dirname(path) def _read_configuration_file(self, path): """Try to read and parse `path` as a configuration file. If the configurations were illegal (checked with `self._validate_options`), raises `IllegalConfiguration`. Returns (options, should_inherit). """ parser = RawConfigParser() options = None should_inherit = True if parser.read(path) and self._get_section_name(parser): option_list = dict([(o.dest, o.type or o.action) for o in self._parser.option_list]) # First, read the default values new_options, _ = self._parse_args([]) # Second, parse the configuration section_name = self._get_section_name(parser) for opt in parser.options(section_name): if opt == 'inherit': should_inherit = parser.getboolean(section_name, opt) continue if opt.replace('_', '-') not in self.CONFIG_FILE_OPTIONS: log.warning("Unknown option '{}' ignored".format(opt)) continue normalized_opt = opt.replace('-', '_') opt_type = option_list[normalized_opt] if opt_type in ('int', 'count'): value = parser.getint(section_name, opt) elif opt_type == 'string': value = parser.get(section_name, opt) else: assert opt_type in ('store_true', 'store_false') value = parser.getboolean(section_name, opt) setattr(new_options, normalized_opt, value) # Third, fix the set-options options = self._fix_set_options(new_options) if options is not None: if not self._validate_options(options): raise IllegalConfiguration('in file: {}'.format(path)) return options, should_inherit def _merge_configuration(self, parent_config, child_options): """Merge parent config into the child options. The migration process requires an `options` object for the child in order to distinguish between mutually exclusive codes, add-select and add-ignore error codes. """ # Copy the parent error codes so we won't override them error_codes = copy.deepcopy(parent_config.checked_codes) if self._has_exclusive_option(child_options): error_codes = self._get_exclusive_error_codes(child_options) self._set_add_options(error_codes, child_options) kwargs = dict(checked_codes=error_codes) for key in ('match', 'match_dir', 'ignore_decorators'): kwargs[key] = \ getattr(child_options, key) or getattr(parent_config, key) return CheckConfiguration(**kwargs) def _parse_args(self, args=None, values=None): """Parse the options using `self._parser` and reformat the options.""" options, arguments = self._parser.parse_args(args, values) return self._fix_set_options(options), arguments @staticmethod def _create_run_config(options): """Create a `RunConfiguration` object from `options`.""" values = dict([(opt, getattr(options, opt)) for opt in RunConfiguration._fields]) return RunConfiguration(**values) @classmethod def _create_check_config(cls, options, use_defaults=True): """Create a `CheckConfiguration` object from `options`. If `use_defaults`, any of the match options that are `None` will be replaced with their default value and the default convention will be set for the checked codes. """ checked_codes = None if cls._has_exclusive_option(options) or use_defaults: checked_codes = cls._get_checked_errors(options) kwargs = dict(checked_codes=checked_codes) for key in ('match', 'match_dir', 'ignore_decorators'): kwargs[key] = getattr(cls, 'DEFAULT_{0}_RE'.format(key.upper())) \ if getattr(options, key) is None and use_defaults \ else getattr(options, key) return CheckConfiguration(**kwargs) @classmethod def _get_section_name(cls, parser): """Parse options from relevant section.""" for section_name in cls.POSSIBLE_SECTION_NAMES: if parser.has_section(section_name): return section_name return None @classmethod def _get_config_file_in_folder(cls, path): """Look for a configuration file in `path`. If exists return its full path, otherwise None. """ if os.path.isfile(path): path = os.path.dirname(path) for fn in cls.PROJECT_CONFIG_FILES: config = RawConfigParser() full_path = os.path.join(path, fn) if config.read(full_path) and cls._get_section_name(config): return full_path @classmethod def _get_exclusive_error_codes(cls, options): """Extract the error codes from the selected exclusive option.""" codes = set(ErrorRegistry.get_error_codes()) checked_codes = None if options.ignore is not None: ignored = cls._expand_error_codes(options.ignore) checked_codes = codes - ignored elif options.select is not None: checked_codes = cls._expand_error_codes(options.select) elif options.convention is not None: checked_codes = getattr(conventions, options.convention) # To not override the conventions nor the options - copy them. return copy.deepcopy(checked_codes) @classmethod def _set_add_options(cls, checked_codes, options): """Set `checked_codes` by the `add_ignore` or `add_select` options.""" checked_codes |= cls._expand_error_codes(options.add_select) checked_codes -= cls._expand_error_codes(options.add_ignore) @staticmethod def _expand_error_codes(code_parts): """Return an expanded set of error codes to ignore.""" codes = set(ErrorRegistry.get_error_codes()) expanded_codes = set() try: for part in code_parts: if len(part) < 4: for code in codes: if code.startswith(part): expanded_codes.add(code) else: expanded_codes.add(part) except TypeError as e: raise IllegalConfiguration(e) return expanded_codes @classmethod def _get_checked_errors(cls, options): """Extract the codes needed to be checked from `options`.""" checked_codes = cls._get_exclusive_error_codes(options) if checked_codes is None: checked_codes = cls.DEFAULT_CONVENTION cls._set_add_options(checked_codes, options) return checked_codes @classmethod def _validate_options(cls, options): """Validate the mutually exclusive options. Return `True` iff only zero or one of `BASE_ERROR_SELECTION_OPTIONS` was selected. """ for opt1, opt2 in \ itertools.permutations(cls.BASE_ERROR_SELECTION_OPTIONS, 2): if getattr(options, opt1) and getattr(options, opt2): log.error('Cannot pass both {} and {}. They are ' 'mutually exclusive.'.format(opt1, opt2)) return False if options.convention and options.convention not in conventions: log.error("Illegal convention '{}'. Possible conventions: {}" .format(options.convention, ', '.join(conventions.keys()))) return False return True @classmethod def _has_exclusive_option(cls, options): """Return `True` iff one or more exclusive options were selected.""" return any([getattr(options, opt) is not None for opt in cls.BASE_ERROR_SELECTION_OPTIONS]) @staticmethod def _fix_set_options(options): """Alter the set options from None/strings to sets in place.""" optional_set_options = ('ignore', 'select') mandatory_set_options = ('add_ignore', 'add_select') def _get_set(value_str): """Split `value_str` by the delimiter `,` and return a set. Removes any occurrences of '' in the set. """ return set(value_str.split(',')) - {''} for opt in optional_set_options: value = getattr(options, opt) if value is not None: setattr(options, opt, _get_set(value)) for opt in mandatory_set_options: value = getattr(options, opt) if value is None: value = '' if not isinstance(value, Set): value = _get_set(value) setattr(options, opt, value) return options @classmethod def _create_option_parser(cls): """Return an option parser to parse the command line arguments.""" from optparse import OptionParser parser = OptionParser( version=__version__, usage='Usage: pydocstyle [options] [<file|dir>...]') option = parser.add_option # Run configuration options option('-e', '--explain', action='store_true', default=False, help='show explanation of each error') option('-s', '--source', action='store_true', default=False, help='show source for each error') option('-d', '--debug', action='store_true', default=False, help='print debug information') option('-v', '--verbose', action='store_true', default=False, help='print status information') option('--count', action='store_true', default=False, help='print total number of errors to stdout') option('--config', metavar='<path>', default=None, help='use given config file and disable config discovery') # Error check options option('--select', metavar='<codes>', default=None, help='choose the basic list of checked errors by ' 'specifying which errors to check for (with a list of ' 'comma-separated error codes or prefixes). ' 'for example: --select=D101,D2') option('--ignore', metavar='<codes>', default=None, help='choose the basic list of checked errors by ' 'specifying which errors to ignore (with a list of ' 'comma-separated error codes or prefixes). ' 'for example: --ignore=D101,D2') option('--convention', metavar='<name>', default=None, help='choose the basic list of checked errors by specifying an ' 'existing convention. Possible conventions: {}' .format(', '.join(conventions))) option('--add-select', metavar='<codes>', default=None, help='amend the list of errors to check for by specifying ' 'more error codes to check.') option('--add-ignore', metavar='<codes>', default=None, help='amend the list of errors to check for by specifying ' 'more error codes to ignore.') # Match clauses option('--match', metavar='<pattern>', default=None, help=("check only files that exactly match <pattern> regular " "expression; default is --match='{}' which matches " "files that don't start with 'test_' but end with " "'.py'").format(cls.DEFAULT_MATCH_RE)) option('--match-dir', metavar='<pattern>', default=None, help=("search only dirs that exactly match <pattern> regular " "expression; default is --match-dir='{}', which " "matches all dirs that don't start with " "a dot").format(cls.DEFAULT_MATCH_DIR_RE)) # Decorators option('--ignore-decorators', metavar='<decorators>', default=None, help=("ignore any functions or methods that are decorated " "by a function with a name fitting the <decorators> " "regular expression; default is --ignore-decorators='{0}'" " which does not ignore any decorated functions." .format(cls.DEFAULT_IGNORE_DECORATORS_RE))) return parser # Check configuration - used by the ConfigurationParser class. CheckConfiguration = namedtuple('CheckConfiguration', ('checked_codes', 'match', 'match_dir', 'ignore_decorators')) class IllegalConfiguration(Exception): """An exception for illegal configurations.""" pass # General configurations for pydocstyle run. RunConfiguration = namedtuple('RunConfiguration', ('explain', 'source', 'debug', 'verbose', 'count', 'config'))
24,331
Python
.py
493
37.750507
79
0.599232
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)
28,299
violations.py
DamnWidget_anaconda/anaconda_lib/linting/pydocstyle/violations.py
"""Docstring violation definition.""" from itertools import dropwhile from functools import partial from collections import namedtuple from .utils import is_blank __all__ = ('Error', 'ErrorRegistry') ErrorParams = namedtuple('ErrorParams', ['code', 'short_desc', 'context']) class Error(object): """Error in docstring style.""" # Options that define how errors are printed: explain = False source = False def __init__(self, code, short_desc, context, *parameters): """Initialize the object. `parameters` are specific to the created error. """ self.code = code self.short_desc = short_desc self.context = context self.parameters = parameters self.definition = None self.explanation = None def set_context(self, definition, explanation): """Set the source code context for this error.""" self.definition = definition self.explanation = explanation filename = property(lambda self: self.definition.module.name) line = property(lambda self: self.definition.start) @property def message(self): """Return the message to print to the user.""" ret = '{}: {}'.format(self.code, self.short_desc) if self.context is not None: specific_error_msg = self.context.format(*self.parameters) ret += ' ({})'.format(specific_error_msg) return ret @property def lines(self): """Return the source code lines for this error.""" source = '' lines = self.definition.source offset = self.definition.start lines_stripped = list(reversed(list(dropwhile(is_blank, reversed(lines))))) numbers_width = len(str(offset + len(lines_stripped))) line_format = '{{:{}}}:{{}}'.format(numbers_width) for n, line in enumerate(lines_stripped): if line: line = ' ' + line source += line_format.format(n + offset, line) if n > 5: source += ' ...\n' break return source def __str__(self): self.explanation = '\n'.join(l for l in self.explanation.split('\n') if not is_blank(l)) template = '{filename}:{line} {definition}:\n {message}' if self.source and self.explain: template += '\n\n{explanation}\n\n{lines}\n' elif self.source and not self.explain: template += '\n\n{lines}\n' elif self.explain and not self.source: template += '\n\n{explanation}\n\n' return template.format(**dict((name, getattr(self, name)) for name in ['filename', 'line', 'definition', 'message', 'explanation', 'lines'])) def __repr__(self): return str(self) def __lt__(self, other): return (self.filename, self.line) < (other.filename, other.line) class ErrorRegistry(object): """A registry of all error codes, divided to groups.""" groups = [] class ErrorGroup(object): """A group of similarly themed errors.""" def __init__(self, prefix, name): """Initialize the object. `Prefix` should be the common prefix for errors in this group, e.g., "D1". `name` is the name of the group (its subject). """ self.prefix = prefix self.name = name self.errors = [] def create_error(self, error_code, error_desc, error_context=None): """Create an error, register it to this group and return it.""" # TODO: check prefix error_params = ErrorParams(error_code, error_desc, error_context) factory = partial(Error, *error_params) self.errors.append(error_params) return factory @classmethod def create_group(cls, prefix, name): """Create a new error group and return it.""" group = cls.ErrorGroup(prefix, name) cls.groups.append(group) return group @classmethod def get_error_codes(cls): """Yield all registered codes.""" for group in cls.groups: for error in group.errors: yield error.code @classmethod def to_rst(cls): """Output the registry as reStructuredText, for documentation.""" sep_line = '+' + 6 * '-' + '+' + '-' * 71 + '+\n' blank_line = '|' + 78 * ' ' + '|\n' table = '' for group in cls.groups: table += sep_line table += blank_line table += '|' + '**{}**'.format(group.name).center(78) + '|\n' table += blank_line for error in group.errors: table += sep_line table += ('|' + error.code.center(6) + '| ' + error.short_desc.ljust(70) + '|\n') table += sep_line return table D1xx = ErrorRegistry.create_group('D1', 'Missing Docstrings') D100 = D1xx.create_error('D100', 'Missing docstring in public module') D101 = D1xx.create_error('D101', 'Missing docstring in public class') D102 = D1xx.create_error('D102', 'Missing docstring in public method') D103 = D1xx.create_error('D103', 'Missing docstring in public function') D104 = D1xx.create_error('D104', 'Missing docstring in public package') D105 = D1xx.create_error('D105', 'Missing docstring in magic method') D106 = D1xx.create_error('D106', 'Missing docstring in public nested class') D2xx = ErrorRegistry.create_group('D2', 'Whitespace Issues') D200 = D2xx.create_error('D200', 'One-line docstring should fit on one line ' 'with quotes', 'found {0}') D201 = D2xx.create_error('D201', 'No blank lines allowed before function ' 'docstring', 'found {0}') D202 = D2xx.create_error('D202', 'No blank lines allowed after function ' 'docstring', 'found {0}') D203 = D2xx.create_error('D203', '1 blank line required before class ' 'docstring', 'found {0}') D204 = D2xx.create_error('D204', '1 blank line required after class ' 'docstring', 'found {0}') D205 = D2xx.create_error('D205', '1 blank line required between summary line ' 'and description', 'found {0}') D206 = D2xx.create_error('D206', 'Docstring should be indented with spaces, ' 'not tabs') D207 = D2xx.create_error('D207', 'Docstring is under-indented') D208 = D2xx.create_error('D208', 'Docstring is over-indented') D209 = D2xx.create_error('D209', 'Multi-line docstring closing quotes should ' 'be on a separate line') D210 = D2xx.create_error('D210', 'No whitespaces allowed surrounding ' 'docstring text') D211 = D2xx.create_error('D211', 'No blank lines allowed before class ' 'docstring', 'found {0}') D212 = D2xx.create_error('D212', 'Multi-line docstring summary should start ' 'at the first line') D213 = D2xx.create_error('D213', 'Multi-line docstring summary should start ' 'at the second line') D214 = D2xx.create_error('D214', 'Section is over-indented', '{0!r}') D215 = D2xx.create_error('D215', 'Section underline is over-indented', 'in section {0!r}') D3xx = ErrorRegistry.create_group('D3', 'Quotes Issues') D300 = D3xx.create_error('D300', 'Use """triple double quotes"""', 'found {0}-quotes') D301 = D3xx.create_error('D301', 'Use r""" if any backslashes in a docstring') D302 = D3xx.create_error('D302', 'Use u""" for Unicode docstrings') D4xx = ErrorRegistry.create_group('D4', 'Docstring Content Issues') D400 = D4xx.create_error('D400', 'First line should end with a period', 'not {0!r}') D401 = D4xx.create_error('D401', 'First line should be in imperative mood', "'{0}', not '{1}'") D401b = D4xx.create_error('D401', 'First line should be in imperative mood; ' 'try rephrasing', "found '{0}'") D402 = D4xx.create_error('D402', 'First line should not be the function\'s ' '"signature"') D403 = D4xx.create_error('D403', 'First word of the first line should be ' 'properly capitalized', '{0!r}, not {1!r}') D404 = D4xx.create_error('D404', 'First word of the docstring should not ' 'be `This`') D405 = D4xx.create_error('D405', 'Section name should be properly capitalized', '{0!r}, not {1!r}') D406 = D4xx.create_error('D406', 'Section name should end with a newline', '{0!r}, not {1!r}') D407 = D4xx.create_error('D407', 'Missing dashed underline after section', '{0!r}') D408 = D4xx.create_error('D408', 'Section underline should be in the line ' 'following the section\'s name', '{0!r}') D409 = D4xx.create_error('D409', 'Section underline should match the length ' 'of its name', 'Expected {0!r} dashes in section {1!r}, got {2!r}') D410 = D4xx.create_error('D410', 'Missing blank line after section', '{0!r}') D411 = D4xx.create_error('D411', 'Missing blank line before section', '{0!r}') D412 = D4xx.create_error('D412', 'No blank lines allowed between a section ' 'header and its content', '{0!r}') D413 = D4xx.create_error('D413', 'Missing blank line after last section', '{0!r}') D414 = D4xx.create_error('D414', 'Section has no content', '{0!r}') class AttrDict(dict): def __getattr__(self, item): return self[item] all_errors = set(ErrorRegistry.get_error_codes()) conventions = AttrDict({ 'pep257': all_errors - {'D203', 'D212', 'D213', 'D214', 'D215', 'D404', 'D405', 'D406', 'D407', 'D408', 'D409', 'D410', 'D411'}, 'numpy': all_errors - {'D203', 'D212', 'D213', 'D402', 'D413'} })
10,359
Python
.py
206
39.165049
79
0.572177
DamnWidget/anaconda
2,213
260
184
GPL-3.0
9/5/2024, 5:14:06 PM (Europe/Amsterdam)