id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
29,900 | utils.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/utils.py | """ A universal module with functions / classes without dependencies. """
import functools
import re
import os
_sep = os.path.sep
if os.path.altsep is not None:
_sep += os.path.altsep
_path_re = re.compile(r'(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))
del _sep
def to_list(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return wrapper
def to_tuple(func):
def wrapper(*args, **kwargs):
return tuple(func(*args, **kwargs))
return wrapper
def unite(iterable):
"""Turns a two dimensional array into a one dimensional."""
return set(typ for types in iterable for typ in types)
class UncaughtAttributeError(Exception):
"""
Important, because `__getattr__` and `hasattr` catch AttributeErrors
implicitly. This is really evil (mainly because of `__getattr__`).
Therefore this class originally had to be derived from `BaseException`
instead of `Exception`. But because I removed relevant `hasattr` from
the code base, we can now switch back to `Exception`.
:param base: return values of sys.exc_info().
"""
def safe_property(func):
return property(reraise_uncaught(func))
def reraise_uncaught(func):
"""
Re-throw uncaught `AttributeError`.
Usage: Put ``@rethrow_uncaught`` in front of the function
which does **not** suppose to raise `AttributeError`.
AttributeError is easily get caught by `hasattr` and another
``except AttributeError`` clause. This becomes problem when you use
a lot of "dynamic" attributes (e.g., using ``@property``) because you
can't distinguish if the property does not exist for real or some code
inside of the "dynamic" attribute through that error. In a well
written code, such error should not exist but getting there is very
difficult. This decorator is to help us getting there by changing
`AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
This helps us noticing bugs earlier and facilitates debugging.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except AttributeError as e:
raise UncaughtAttributeError(e) from e
return wrapper
class PushBackIterator:
def __init__(self, iterator):
self.pushes = []
self.iterator = iterator
self.current = None
def push_back(self, value):
self.pushes.append(value)
def __iter__(self):
return self
def __next__(self):
if self.pushes:
self.current = self.pushes.pop()
else:
self.current = next(self.iterator)
return self.current
| 2,706 | Python | .py | 68 | 34.176471 | 82 | 0.683104 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,901 | cache.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/cache.py | """
- the popular ``_memoize_default`` works like a typical memoize and returns the
default otherwise.
- ``CachedMetaClass`` uses ``_memoize_default`` to do the same with classes.
"""
from functools import wraps
from jedi import debug
_NO_DEFAULT = object()
_RECURSION_SENTINEL = object()
def _memoize_default(default=_NO_DEFAULT, inference_state_is_first_arg=False,
second_arg_is_inference_state=False):
""" This is a typical memoization decorator, BUT there is one difference:
To prevent recursion it sets defaults.
Preventing recursion is in this case the much bigger use than speed. I
don't think, that there is a big speed difference, but there are many cases
where recursion could happen (think about a = b; b = a).
"""
def func(function):
def wrapper(obj, *args, **kwargs):
# TODO These checks are kind of ugly and slow.
if inference_state_is_first_arg:
cache = obj.memoize_cache
elif second_arg_is_inference_state:
cache = args[0].memoize_cache # needed for meta classes
else:
cache = obj.inference_state.memoize_cache
try:
memo = cache[function]
except KeyError:
cache[function] = memo = {}
key = (obj, args, frozenset(kwargs.items()))
if key in memo:
return memo[key]
else:
if default is not _NO_DEFAULT:
memo[key] = default
rv = function(obj, *args, **kwargs)
memo[key] = rv
return rv
return wrapper
return func
def inference_state_function_cache(default=_NO_DEFAULT):
def decorator(func):
return _memoize_default(default=default, inference_state_is_first_arg=True)(func)
return decorator
def inference_state_method_cache(default=_NO_DEFAULT):
def decorator(func):
return _memoize_default(default=default)(func)
return decorator
def inference_state_as_method_param_cache():
def decorator(call):
return _memoize_default(second_arg_is_inference_state=True)(call)
return decorator
class CachedMetaClass(type):
"""
This is basically almost the same than the decorator above, it just caches
class initializations. Either you do it this way or with decorators, but
with decorators you lose class access (isinstance, etc).
"""
@inference_state_as_method_param_cache()
def __call__(self, *args, **kwargs):
return super().__call__(*args, **kwargs)
def inference_state_method_generator_cache():
"""
This is a special memoizer. It memoizes generators and also checks for
recursion errors and returns no further iterator elemends in that case.
"""
def func(function):
@wraps(function)
def wrapper(obj, *args, **kwargs):
cache = obj.inference_state.memoize_cache
try:
memo = cache[function]
except KeyError:
cache[function] = memo = {}
key = (obj, args, frozenset(kwargs.items()))
if key in memo:
actual_generator, cached_lst = memo[key]
else:
actual_generator = function(obj, *args, **kwargs)
cached_lst = []
memo[key] = actual_generator, cached_lst
i = 0
while True:
try:
next_element = cached_lst[i]
if next_element is _RECURSION_SENTINEL:
debug.warning('Found a generator recursion for %s' % obj)
# This means we have hit a recursion.
return
except IndexError:
cached_lst.append(_RECURSION_SENTINEL)
next_element = next(actual_generator, None)
if next_element is None:
cached_lst.pop()
return
cached_lst[-1] = next_element
yield next_element
i += 1
return wrapper
return func
| 4,191 | Python | .py | 101 | 30.475248 | 89 | 0.589422 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,902 | recursion.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/recursion.py | """
Recursions are the recipe of |jedi| to conquer Python code. However, someone
must stop recursions going mad. Some settings are here to make |jedi| stop at
the right time. You can read more about them :ref:`here <settings-recursion>`.
Next to the internal ``jedi.inference.cache`` this module also makes |jedi| not
thread-safe, because ``execution_recursion_decorator`` uses class variables to
count the function calls.
.. _settings-recursion:
Settings
~~~~~~~~~~
Recursion settings are important if you don't want extremely
recursive python code to go absolutely crazy.
The default values are based on experiments while completing the |jedi| library
itself (inception!). But I don't think there's any other Python library that
uses recursion in a similarly extreme way. Completion should also be fast and
therefore the quality might not always be maximal.
.. autodata:: recursion_limit
.. autodata:: total_function_execution_limit
.. autodata:: per_function_execution_limit
.. autodata:: per_function_recursion_limit
"""
from contextlib import contextmanager
from jedi import debug
from jedi.inference.base_value import NO_VALUES
recursion_limit = 15
"""
Like :func:`sys.getrecursionlimit()`, just for |jedi|.
"""
total_function_execution_limit = 200
"""
This is a hard limit of how many non-builtin functions can be executed.
"""
per_function_execution_limit = 6
"""
The maximal amount of times a specific function may be executed.
"""
per_function_recursion_limit = 2
"""
A function may not be executed more than this number of times recursively.
"""
class RecursionDetector:
def __init__(self):
self.pushed_nodes = []
@contextmanager
def execution_allowed(inference_state, node):
"""
A decorator to detect recursions in statements. In a recursion a statement
at the same place, in the same module may not be executed two times.
"""
pushed_nodes = inference_state.recursion_detector.pushed_nodes
if node in pushed_nodes:
debug.warning('catched stmt recursion: %s @%s', node,
getattr(node, 'start_pos', None))
yield False
else:
try:
pushed_nodes.append(node)
yield True
finally:
pushed_nodes.pop()
def execution_recursion_decorator(default=NO_VALUES):
def decorator(func):
def wrapper(self, **kwargs):
detector = self.inference_state.execution_recursion_detector
limit_reached = detector.push_execution(self)
try:
if limit_reached:
result = default
else:
result = func(self, **kwargs)
finally:
detector.pop_execution()
return result
return wrapper
return decorator
class ExecutionRecursionDetector:
"""
Catches recursions of executions.
"""
def __init__(self, inference_state):
self._inference_state = inference_state
self._recursion_level = 0
self._parent_execution_funcs = []
self._funcdef_execution_counts = {}
self._execution_count = 0
def pop_execution(self):
self._parent_execution_funcs.pop()
self._recursion_level -= 1
def push_execution(self, execution):
funcdef = execution.tree_node
# These two will be undone in pop_execution.
self._recursion_level += 1
self._parent_execution_funcs.append(funcdef)
module_context = execution.get_root_context()
if module_context.is_builtins_module():
# We have control over builtins so we know they are not recursing
# like crazy. Therefore we just let them execute always, because
# they usually just help a lot with getting good results.
return False
if self._recursion_level > recursion_limit:
debug.warning('Recursion limit (%s) reached', recursion_limit)
return True
if self._execution_count >= total_function_execution_limit:
debug.warning('Function execution limit (%s) reached', total_function_execution_limit)
return True
self._execution_count += 1
if self._funcdef_execution_counts.setdefault(funcdef, 0) >= per_function_execution_limit:
if module_context.py__name__() == 'typing':
return False
debug.warning(
'Per function execution limit (%s) reached: %s',
per_function_execution_limit,
funcdef
)
return True
self._funcdef_execution_counts[funcdef] += 1
if self._parent_execution_funcs.count(funcdef) > per_function_recursion_limit:
debug.warning(
'Per function recursion limit (%s) reached: %s',
per_function_recursion_limit,
funcdef
)
return True
return False
| 4,932 | Python | .py | 124 | 32.266129 | 98 | 0.663528 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,903 | __init__.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/__init__.py | """
Type inference of Python code in |jedi| is based on three assumptions:
* The code uses as least side effects as possible. Jedi understands certain
list/tuple/set modifications, but there's no guarantee that Jedi detects
everything (list.append in different modules for example).
* No magic is being used:
- metaclasses
- ``setattr()`` / ``__import__()``
- writing to ``globals()``, ``locals()``, ``object.__dict__``
* The programmer is not a total dick, e.g. like `this
<https://github.com/davidhalter/jedi/issues/24>`_ :-)
The actual algorithm is based on a principle I call lazy type inference. That
said, the typical entry point for static analysis is calling
``infer_expr_stmt``. There's separate logic for autocompletion in the API, the
inference_state is all about inferring an expression.
TODO this paragraph is not what jedi does anymore, it's similar, but not the
same.
Now you need to understand what follows after ``infer_expr_stmt``. Let's
make an example::
import datetime
datetime.date.toda# <-- cursor here
First of all, this module doesn't care about completion. It really just cares
about ``datetime.date``. At the end of the procedure ``infer_expr_stmt`` will
return the ``date`` class.
To *visualize* this (simplified):
- ``InferenceState.infer_expr_stmt`` doesn't do much, because there's no assignment.
- ``Context.infer_node`` cares for resolving the dotted path
- ``InferenceState.find_types`` searches for global definitions of datetime, which
it finds in the definition of an import, by scanning the syntax tree.
- Using the import logic, the datetime module is found.
- Now ``find_types`` is called again by ``infer_node`` to find ``date``
inside the datetime module.
Now what would happen if we wanted ``datetime.date.foo.bar``? Two more
calls to ``find_types``. However the second call would be ignored, because the
first one would return nothing (there's no foo attribute in ``date``).
What if the import would contain another ``ExprStmt`` like this::
from foo import bar
Date = bar.baz
Well... You get it. Just another ``infer_expr_stmt`` recursion. It's really
easy. Python can obviously get way more complicated then this. To understand
tuple assignments, list comprehensions and everything else, a lot more code had
to be written.
Jedi has been tested very well, so you can just start modifying code. It's best
to write your own test first for your "new" feature. Don't be scared of
breaking stuff. As long as the tests pass, you're most likely to be fine.
I need to mention now that lazy type inference is really good because it
only *inferes* what needs to be *inferred*. All the statements and modules
that are not used are just being ignored.
"""
import parso
from jedi.file_io import FileIO
from jedi import debug
from jedi import settings
from jedi.inference import imports
from jedi.inference import recursion
from jedi.inference.cache import inference_state_function_cache
from jedi.inference import helpers
from jedi.inference.names import TreeNameDefinition
from jedi.inference.base_value import ContextualizedNode, \
ValueSet, iterate_values
from jedi.inference.value import ClassValue, FunctionValue
from jedi.inference.syntax_tree import infer_expr_stmt, \
check_tuple_assignments, tree_name_to_values
from jedi.inference.imports import follow_error_node_imports_if_possible
from jedi.plugins import plugin_manager
class InferenceState:
def __init__(self, project, environment=None, script_path=None):
if environment is None:
environment = project.get_environment()
self.environment = environment
self.script_path = script_path
self.compiled_subprocess = environment.get_inference_state_subprocess(self)
self.grammar = environment.get_grammar()
self.latest_grammar = parso.load_grammar(version='3.7')
self.memoize_cache = {} # for memoize decorators
self.module_cache = imports.ModuleCache() # does the job of `sys.modules`.
self.stub_module_cache = {} # Dict[Tuple[str, ...], Optional[ModuleValue]]
self.compiled_cache = {} # see `inference.compiled.create()`
self.inferred_element_counts = {}
self.mixed_cache = {} # see `inference.compiled.mixed._create()`
self.analysis = []
self.dynamic_params_depth = 0
self.is_analysis = False
self.project = project
self.access_cache = {}
self.allow_descriptor_getattr = False
self.flow_analysis_enabled = True
self.reset_recursion_limitations()
def import_module(self, import_names, sys_path=None, prefer_stubs=True):
return imports.import_module_by_names(
self, import_names, sys_path, prefer_stubs=prefer_stubs)
@staticmethod
@plugin_manager.decorate()
def execute(value, arguments):
debug.dbg('execute: %s %s', value, arguments)
with debug.increase_indent_cm():
value_set = value.py__call__(arguments=arguments)
debug.dbg('execute result: %s in %s', value_set, value)
return value_set
# mypy doesn't suppport decorated propeties (https://github.com/python/mypy/issues/1362)
@property # type: ignore[misc]
@inference_state_function_cache()
def builtins_module(self):
module_name = 'builtins'
builtins_module, = self.import_module((module_name,), sys_path=())
return builtins_module
@property # type: ignore[misc]
@inference_state_function_cache()
def typing_module(self):
typing_module, = self.import_module(('typing',))
return typing_module
def reset_recursion_limitations(self):
self.recursion_detector = recursion.RecursionDetector()
self.execution_recursion_detector = recursion.ExecutionRecursionDetector(self)
def get_sys_path(self, **kwargs):
"""Convenience function"""
return self.project._get_sys_path(self, **kwargs)
def infer(self, context, name):
def_ = name.get_definition(import_name_always=True)
if def_ is not None:
type_ = def_.type
is_classdef = type_ == 'classdef'
if is_classdef or type_ == 'funcdef':
if is_classdef:
c = ClassValue(self, context, name.parent)
else:
c = FunctionValue.from_context(context, name.parent)
return ValueSet([c])
if type_ == 'expr_stmt':
is_simple_name = name.parent.type not in ('power', 'trailer')
if is_simple_name:
return infer_expr_stmt(context, def_, name)
if type_ == 'for_stmt':
container_types = context.infer_node(def_.children[3])
cn = ContextualizedNode(context, def_.children[3])
for_types = iterate_values(container_types, cn)
n = TreeNameDefinition(context, name)
return check_tuple_assignments(n, for_types)
if type_ in ('import_from', 'import_name'):
return imports.infer_import(context, name)
if type_ == 'with_stmt':
return tree_name_to_values(self, context, name)
elif type_ == 'param':
return context.py__getattribute__(name.value, position=name.end_pos)
elif type_ == 'namedexpr_test':
return context.infer_node(def_)
else:
result = follow_error_node_imports_if_possible(context, name)
if result is not None:
return result
return helpers.infer_call_of_leaf(context, name)
def parse_and_get_code(self, code=None, path=None,
use_latest_grammar=False, file_io=None, **kwargs):
if code is None:
if file_io is None:
file_io = FileIO(path)
code = file_io.read()
# We cannot just use parso, because it doesn't use errors='replace'.
code = parso.python_bytes_to_unicode(code, encoding='utf-8', errors='replace')
if len(code) > settings._cropped_file_size:
code = code[:settings._cropped_file_size]
grammar = self.latest_grammar if use_latest_grammar else self.grammar
return grammar.parse(code=code, path=path, file_io=file_io, **kwargs), code
def parse(self, *args, **kwargs):
return self.parse_and_get_code(*args, **kwargs)[0]
| 8,441 | Python | .py | 165 | 43.806061 | 92 | 0.679243 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,904 | parser_cache.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/parser_cache.py | from jedi.inference.cache import inference_state_function_cache
@inference_state_function_cache()
def get_yield_exprs(inference_state, funcdef):
return list(funcdef.iter_yield_exprs())
| 191 | Python | .py | 4 | 45.25 | 63 | 0.810811 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,905 | docstrings.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/docstrings.py | """
Docstrings are another source of information for functions and classes.
:mod:`jedi.inference.dynamic_params` tries to find all executions of functions,
while the docstring parsing is much easier. There are three different types of
docstrings that |jedi| understands:
- `Sphinx <http://sphinx-doc.org/markup/desc.html#info-field-lists>`_
- `Epydoc <http://epydoc.sourceforge.net/manual-fields.html>`_
- `Numpydoc <https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`_
For example, the sphinx annotation ``:type foo: str`` clearly states that the
type of ``foo`` is ``str``.
As an addition to parameter searching, this module also provides return
annotations.
"""
import re
import warnings
from parso import parse, ParserSyntaxError
from jedi import debug
from jedi.inference.cache import inference_state_method_cache
from jedi.inference.base_value import iterator_to_value_set, ValueSet, \
NO_VALUES
from jedi.inference.lazy_value import LazyKnownValues
DOCSTRING_PARAM_PATTERNS = [
r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx
r'\s*:param\s+(\w+)\s+%s:[^\n]*', # Sphinx param with type
r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc
]
DOCSTRING_RETURN_PATTERNS = [
re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx
re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc
]
REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`')
_numpy_doc_string_cache = None
def _get_numpy_doc_string_cls():
global _numpy_doc_string_cache
if isinstance(_numpy_doc_string_cache, (ImportError, SyntaxError)):
raise _numpy_doc_string_cache
from numpydoc.docscrape import NumpyDocString # type: ignore[import]
_numpy_doc_string_cache = NumpyDocString
return _numpy_doc_string_cache
def _search_param_in_numpydocstr(docstr, param_str):
"""Search `docstr` (in numpydoc format) for type(-s) of `param_str`."""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
# This is a non-public API. If it ever changes we should be
# prepared and return gracefully.
params = _get_numpy_doc_string_cls()(docstr)._parsed_data['Parameters']
except Exception:
return []
for p_name, p_type, p_descr in params:
if p_name == param_str:
m = re.match(r'([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type)
if m:
p_type = m.group(1)
return list(_expand_typestr(p_type))
return []
def _search_return_in_numpydocstr(docstr):
"""
Search `docstr` (in numpydoc format) for type(-s) of function returns.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
doc = _get_numpy_doc_string_cls()(docstr)
except Exception:
return
try:
# This is a non-public API. If it ever changes we should be
# prepared and return gracefully.
returns = doc._parsed_data['Returns']
returns += doc._parsed_data['Yields']
except Exception:
return
for r_name, r_type, r_descr in returns:
# Return names are optional and if so the type is in the name
if not r_type:
r_type = r_name
yield from _expand_typestr(r_type)
def _expand_typestr(type_str):
"""
Attempts to interpret the possible types in `type_str`
"""
# Check if alternative types are specified with 'or'
if re.search(r'\bor\b', type_str):
for t in type_str.split('or'):
yield t.split('of')[0].strip()
# Check if like "list of `type`" and set type to list
elif re.search(r'\bof\b', type_str):
yield type_str.split('of')[0]
# Check if type has is a set of valid literal values eg: {'C', 'F', 'A'}
elif type_str.startswith('{'):
node = parse(type_str, version='3.7').children[0]
if node.type == 'atom':
for leaf in getattr(node.children[1], "children", []):
if leaf.type == 'number':
if '.' in leaf.value:
yield 'float'
else:
yield 'int'
elif leaf.type == 'string':
if 'b' in leaf.string_prefix.lower():
yield 'bytes'
else:
yield 'str'
# Ignore everything else.
# Otherwise just work with what we have.
else:
yield type_str
def _search_param_in_docstr(docstr, param_str):
"""
Search `docstr` for type(-s) of `param_str`.
>>> _search_param_in_docstr(':type param: int', 'param')
['int']
>>> _search_param_in_docstr('@type param: int', 'param')
['int']
>>> _search_param_in_docstr(
... ':type param: :class:`threading.Thread`', 'param')
['threading.Thread']
>>> bool(_search_param_in_docstr('no document', 'param'))
False
>>> _search_param_in_docstr(':param int param: some description', 'param')
['int']
"""
# look at #40 to see definitions of those params
patterns = [re.compile(p % re.escape(param_str))
for p in DOCSTRING_PARAM_PATTERNS]
for pattern in patterns:
match = pattern.search(docstr)
if match:
return [_strip_rst_role(match.group(1))]
return _search_param_in_numpydocstr(docstr, param_str)
def _strip_rst_role(type_str):
"""
Strip off the part looks like a ReST role in `type_str`.
>>> _strip_rst_role(':class:`ClassName`') # strip off :class:
'ClassName'
>>> _strip_rst_role(':py:obj:`module.Object`') # works with domain
'module.Object'
>>> _strip_rst_role('ClassName') # do nothing when not ReST role
'ClassName'
See also:
http://sphinx-doc.org/domains.html#cross-referencing-python-objects
"""
match = REST_ROLE_PATTERN.match(type_str)
if match:
return match.group(1)
else:
return type_str
def _infer_for_statement_string(module_context, string):
if string is None:
return []
potential_imports = re.findall(r'((?:\w+\.)*\w+)\.', string)
# Try to import module part in dotted name.
# (e.g., 'threading' in 'threading.Thread').
imports = "\n".join(f"import {p}" for p in potential_imports)
string = f'{imports}\n{string}'
debug.dbg('Parse docstring code %s', string, color='BLUE')
grammar = module_context.inference_state.grammar
try:
module = grammar.parse(string, error_recovery=False)
except ParserSyntaxError:
return []
try:
# It's not the last item, because that's an end marker.
stmt = module.children[-2]
except (AttributeError, IndexError):
return []
if stmt.type not in ('name', 'atom', 'atom_expr'):
return []
# Here we basically use a fake module that also uses the filters in
# the actual module.
from jedi.inference.docstring_utils import DocstringModule
m = DocstringModule(
in_module_context=module_context,
inference_state=module_context.inference_state,
module_node=module,
code_lines=[],
)
return list(_execute_types_in_stmt(m.as_context(), stmt))
def _execute_types_in_stmt(module_context, stmt):
"""
Executing all types or general elements that we find in a statement. This
doesn't include tuple, list and dict literals, because the stuff they
contain is executed. (Used as type information).
"""
definitions = module_context.infer_node(stmt)
return ValueSet.from_sets(
_execute_array_values(module_context.inference_state, d)
for d in definitions
)
def _execute_array_values(inference_state, array):
"""
Tuples indicate that there's not just one return value, but the listed
ones. `(str, int)` means that it returns a tuple with both types.
"""
from jedi.inference.value.iterable import SequenceLiteralValue, FakeTuple, FakeList
if isinstance(array, SequenceLiteralValue) and array.array_type in ('tuple', 'list'):
values = []
for lazy_value in array.py__iter__():
objects = ValueSet.from_sets(
_execute_array_values(inference_state, typ)
for typ in lazy_value.infer()
)
values.append(LazyKnownValues(objects))
cls = FakeTuple if array.array_type == 'tuple' else FakeList
return {cls(inference_state, values)}
else:
return array.execute_annotation()
@inference_state_method_cache()
def infer_param(function_value, param):
def infer_docstring(docstring):
return ValueSet(
p
for param_str in _search_param_in_docstr(docstring, param.name.value)
for p in _infer_for_statement_string(module_context, param_str)
)
module_context = function_value.get_root_context()
func = param.get_parent_function()
if func.type == 'lambdef':
return NO_VALUES
types = infer_docstring(function_value.py__doc__())
if function_value.is_bound_method() \
and function_value.py__name__() == '__init__':
types |= infer_docstring(function_value.class_context.py__doc__())
debug.dbg('Found param types for docstring: %s', types, color='BLUE')
return types
@inference_state_method_cache()
@iterator_to_value_set
def infer_return_types(function_value):
def search_return_in_docstr(code):
for p in DOCSTRING_RETURN_PATTERNS:
match = p.search(code)
if match:
yield _strip_rst_role(match.group(1))
# Check for numpy style return hint
yield from _search_return_in_numpydocstr(code)
for type_str in search_return_in_docstr(function_value.py__doc__()):
yield from _infer_for_statement_string(function_value.get_root_context(), type_str)
| 9,824 | Python | .py | 238 | 34.142857 | 91 | 0.634305 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,906 | sys_path.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/sys_path.py | import os
import re
from pathlib import Path
from importlib.machinery import all_suffixes
from jedi.inference.cache import inference_state_method_cache
from jedi.inference.base_value import ContextualizedNode
from jedi.inference.helpers import is_string, get_str_or_none
from jedi.parser_utils import get_cached_code_lines
from jedi.file_io import FileIO
from jedi import settings
from jedi import debug
_BUILDOUT_PATH_INSERTION_LIMIT = 10
def _abs_path(module_context, str_path: str):
path = Path(str_path)
if path.is_absolute():
return path
module_path = module_context.py__file__()
if module_path is None:
# In this case we have no idea where we actually are in the file
# system.
return None
base_dir = module_path.parent
return base_dir.joinpath(path).absolute()
def _paths_from_assignment(module_context, expr_stmt):
"""
Extracts the assigned strings from an assignment that looks as follows::
sys.path[0:0] = ['module/path', 'another/module/path']
This function is in general pretty tolerant (and therefore 'buggy').
However, it's not a big issue usually to add more paths to Jedi's sys_path,
because it will only affect Jedi in very random situations and by adding
more paths than necessary, it usually benefits the general user.
"""
for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]):
try:
assert operator in ['=', '+=']
assert assignee.type in ('power', 'atom_expr') and \
len(assignee.children) > 1
c = assignee.children
assert c[0].type == 'name' and c[0].value == 'sys'
trailer = c[1]
assert trailer.children[0] == '.' and trailer.children[1].value == 'path'
# TODO Essentially we're not checking details on sys.path
# manipulation. Both assigment of the sys.path and changing/adding
# parts of the sys.path are the same: They get added to the end of
# the current sys.path.
"""
execution = c[2]
assert execution.children[0] == '['
subscript = execution.children[1]
assert subscript.type == 'subscript'
assert ':' in subscript.children
"""
except AssertionError:
continue
cn = ContextualizedNode(module_context.create_context(expr_stmt), expr_stmt)
for lazy_value in cn.infer().iterate(cn):
for value in lazy_value.infer():
if is_string(value):
abs_path = _abs_path(module_context, value.get_safe_value())
if abs_path is not None:
yield abs_path
def _paths_from_list_modifications(module_context, trailer1, trailer2):
""" extract the path from either "sys.path.append" or "sys.path.insert" """
# Guarantee that both are trailers, the first one a name and the second one
# a function execution with at least one param.
if not (trailer1.type == 'trailer' and trailer1.children[0] == '.'
and trailer2.type == 'trailer' and trailer2.children[0] == '('
and len(trailer2.children) == 3):
return
name = trailer1.children[1].value
if name not in ['insert', 'append']:
return
arg = trailer2.children[1]
if name == 'insert' and len(arg.children) in (3, 4): # Possible trailing comma.
arg = arg.children[2]
for value in module_context.create_context(arg).infer_node(arg):
p = get_str_or_none(value)
if p is None:
continue
abs_path = _abs_path(module_context, p)
if abs_path is not None:
yield abs_path
@inference_state_method_cache(default=[])
def check_sys_path_modifications(module_context):
"""
Detect sys.path modifications within module.
"""
def get_sys_path_powers(names):
for name in names:
power = name.parent.parent
if power is not None and power.type in ('power', 'atom_expr'):
c = power.children
if c[0].type == 'name' and c[0].value == 'sys' \
and c[1].type == 'trailer':
n = c[1].children[1]
if n.type == 'name' and n.value == 'path':
yield name, power
if module_context.tree_node is None:
return []
added = []
try:
possible_names = module_context.tree_node.get_used_names()['path']
except KeyError:
pass
else:
for name, power in get_sys_path_powers(possible_names):
expr_stmt = power.parent
if len(power.children) >= 4:
added.extend(
_paths_from_list_modifications(
module_context, *power.children[2:4]
)
)
elif expr_stmt is not None and expr_stmt.type == 'expr_stmt':
added.extend(_paths_from_assignment(module_context, expr_stmt))
return added
def discover_buildout_paths(inference_state, script_path):
buildout_script_paths = set()
for buildout_script_path in _get_buildout_script_paths(script_path):
for path in _get_paths_from_buildout_script(inference_state, buildout_script_path):
buildout_script_paths.add(path)
if len(buildout_script_paths) >= _BUILDOUT_PATH_INSERTION_LIMIT:
break
return buildout_script_paths
def _get_paths_from_buildout_script(inference_state, buildout_script_path):
file_io = FileIO(str(buildout_script_path))
try:
module_node = inference_state.parse(
file_io=file_io,
cache=True,
cache_path=settings.cache_directory
)
except IOError:
debug.warning('Error trying to read buildout_script: %s', buildout_script_path)
return
from jedi.inference.value import ModuleValue
module_context = ModuleValue(
inference_state, module_node,
file_io=file_io,
string_names=None,
code_lines=get_cached_code_lines(inference_state.grammar, buildout_script_path),
).as_context()
yield from check_sys_path_modifications(module_context)
def _get_parent_dir_with_file(path: Path, filename):
for parent in path.parents:
try:
if parent.joinpath(filename).is_file():
return parent
except OSError:
continue
return None
def _get_buildout_script_paths(search_path: Path):
"""
if there is a 'buildout.cfg' file in one of the parent directories of the
given module it will return a list of all files in the buildout bin
directory that look like python files.
:param search_path: absolute path to the module.
"""
project_root = _get_parent_dir_with_file(search_path, 'buildout.cfg')
if not project_root:
return
bin_path = project_root.joinpath('bin')
if not bin_path.exists():
return
for filename in os.listdir(bin_path):
try:
filepath = bin_path.joinpath(filename)
with open(filepath, 'r') as f:
firstline = f.readline()
if firstline.startswith('#!') and 'python' in firstline:
yield filepath
except (UnicodeDecodeError, IOError) as e:
# Probably a binary file; permission error or race cond. because
# file got deleted. Ignore it.
debug.warning(str(e))
continue
def remove_python_path_suffix(path):
for suffix in all_suffixes() + ['.pyi']:
if path.suffix == suffix:
path = path.with_name(path.stem)
break
return path
def transform_path_to_dotted(sys_path, module_path):
"""
Returns the dotted path inside a sys.path as a list of names. e.g.
>>> transform_path_to_dotted([str(Path("/foo").absolute())], Path('/foo/bar/baz.py').absolute())
(('bar', 'baz'), False)
Returns (None, False) if the path doesn't really resolve to anything.
The second return part is if it is a package.
"""
# First remove the suffix.
module_path = remove_python_path_suffix(module_path)
if module_path.name.startswith('.'):
return None, False
# Once the suffix was removed we are using the files as we know them. This
# means that if someone uses an ending like .vim for a Python file, .vim
# will be part of the returned dotted part.
is_package = module_path.name == '__init__'
if is_package:
module_path = module_path.parent
def iter_potential_solutions():
for p in sys_path:
if str(module_path).startswith(p):
# Strip the trailing slash/backslash
rest = str(module_path)[len(p):]
# On Windows a path can also use a slash.
if rest.startswith(os.path.sep) or rest.startswith('/'):
# Remove a slash in cases it's still there.
rest = rest[1:]
if rest:
split = rest.split(os.path.sep)
if not all(split):
# This means that part of the file path was empty, this
# is very strange and is probably a file that is called
# `.py`.
return
# Stub folders for foo can end with foo-stubs. Just remove
# it.
yield tuple(re.sub(r'-stubs$', '', s) for s in split)
potential_solutions = tuple(iter_potential_solutions())
if not potential_solutions:
return None, False
# Try to find the shortest path, this makes more sense usually, because the
# user usually has venvs somewhere. This means that a path like
# .tox/py37/lib/python3.7/os.py can be normal for a file. However in that
# case we definitely want to return ['os'] as a path and not a crazy
# ['.tox', 'py37', 'lib', 'python3.7', 'os']. Keep in mind that this is a
# heuristic and there's now ay to "always" do it right.
return sorted(potential_solutions, key=lambda p: len(p))[0], is_package
| 10,218 | Python | .py | 229 | 35.065502 | 100 | 0.616228 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,907 | docstring_utils.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/docstring_utils.py | from jedi.inference.value import ModuleValue
from jedi.inference.context import ModuleContext
class DocstringModule(ModuleValue):
def __init__(self, in_module_context, **kwargs):
super().__init__(**kwargs)
self._in_module_context = in_module_context
def _as_context(self):
return DocstringModuleContext(self, self._in_module_context)
class DocstringModuleContext(ModuleContext):
def __init__(self, module_value, in_module_context):
super().__init__(module_value)
self._in_module_context = in_module_context
def get_filters(self, origin_scope=None, until_position=None):
yield from super().get_filters(until_position=until_position)
yield from self._in_module_context.get_filters()
| 759 | Python | .py | 15 | 44.4 | 69 | 0.716802 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,908 | signature.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/signature.py | from inspect import Parameter
from jedi.cache import memoize_method
from jedi import debug
from jedi import parser_utils
class _SignatureMixin:
def to_string(self):
def param_strings():
is_positional = False
is_kw_only = False
for n in self.get_param_names(resolve_stars=True):
kind = n.get_kind()
is_positional |= kind == Parameter.POSITIONAL_ONLY
if is_positional and kind != Parameter.POSITIONAL_ONLY:
yield '/'
is_positional = False
if kind == Parameter.VAR_POSITIONAL:
is_kw_only = True
elif kind == Parameter.KEYWORD_ONLY and not is_kw_only:
yield '*'
is_kw_only = True
yield n.to_string()
if is_positional:
yield '/'
s = self.name.string_name + '(' + ', '.join(param_strings()) + ')'
annotation = self.annotation_string
if annotation:
s += ' -> ' + annotation
return s
class AbstractSignature(_SignatureMixin):
def __init__(self, value, is_bound=False):
self.value = value
self.is_bound = is_bound
@property
def name(self):
return self.value.name
@property
def annotation_string(self):
return ''
def get_param_names(self, resolve_stars=False):
param_names = self._function_value.get_param_names()
if self.is_bound:
return param_names[1:]
return param_names
def bind(self, value):
raise NotImplementedError
def matches_signature(self, arguments):
return True
def __repr__(self):
if self.value is self._function_value:
return '<%s: %s>' % (self.__class__.__name__, self.value)
return '<%s: %s, %s>' % (self.__class__.__name__, self.value, self._function_value)
class TreeSignature(AbstractSignature):
def __init__(self, value, function_value=None, is_bound=False):
super().__init__(value, is_bound)
self._function_value = function_value or value
def bind(self, value):
return TreeSignature(value, self._function_value, is_bound=True)
@property
def _annotation(self):
# Classes don't need annotations, even if __init__ has one. They always
# return themselves.
if self.value.is_class():
return None
return self._function_value.tree_node.annotation
@property
def annotation_string(self):
a = self._annotation
if a is None:
return ''
return a.get_code(include_prefix=False)
@memoize_method
def get_param_names(self, resolve_stars=False):
params = self._function_value.get_param_names()
if resolve_stars:
from jedi.inference.star_args import process_params
params = process_params(params)
if self.is_bound:
return params[1:]
return params
def matches_signature(self, arguments):
from jedi.inference.param import get_executed_param_names_and_issues
executed_param_names, issues = \
get_executed_param_names_and_issues(self._function_value, arguments)
if issues:
return False
matches = all(executed_param_name.matches_signature()
for executed_param_name in executed_param_names)
if debug.enable_notice:
tree_node = self._function_value.tree_node
signature = parser_utils.get_signature(tree_node)
if matches:
debug.dbg("Overloading match: %s@%s (%s)",
signature, tree_node.start_pos[0], arguments, color='BLUE')
else:
debug.dbg("Overloading no match: %s@%s (%s)",
signature, tree_node.start_pos[0], arguments, color='BLUE')
return matches
class BuiltinSignature(AbstractSignature):
def __init__(self, value, return_string, function_value=None, is_bound=False):
super().__init__(value, is_bound)
self._return_string = return_string
self.__function_value = function_value
@property
def annotation_string(self):
return self._return_string
@property
def _function_value(self):
if self.__function_value is None:
return self.value
return self.__function_value
def bind(self, value):
return BuiltinSignature(
value, self._return_string,
function_value=self.value,
is_bound=True
)
class SignatureWrapper(_SignatureMixin):
def __init__(self, wrapped_signature):
self._wrapped_signature = wrapped_signature
def __getattr__(self, name):
return getattr(self._wrapped_signature, name)
| 4,859 | Python | .py | 121 | 30.157025 | 91 | 0.599958 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,909 | star_args.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/star_args.py | """
This module is responsible for inferring *args and **kwargs for signatures.
This means for example in this case::
def foo(a, b, c): ...
def bar(*args):
return foo(1, *args)
The signature here for bar should be `bar(b, c)` instead of bar(*args).
"""
from inspect import Parameter
from parso import tree
from jedi.inference.utils import to_list
from jedi.inference.names import ParamNameWrapper
from jedi.inference.helpers import is_big_annoying_library
def _iter_nodes_for_param(param_name):
from parso.python.tree import search_ancestor
from jedi.inference.arguments import TreeArguments
execution_context = param_name.parent_context
# Walk up the parso tree to get the FunctionNode we want. We use the parso
# tree rather than going via the execution context so that we're agnostic of
# the specific scope we're evaluating within (i.e: module or function,
# etc.).
function_node = tree.search_ancestor(param_name.tree_name, 'funcdef', 'lambdef')
module_node = function_node.get_root_node()
start = function_node.children[-1].start_pos
end = function_node.children[-1].end_pos
for name in module_node.get_used_names().get(param_name.string_name):
if start <= name.start_pos < end:
# Is used in the function
argument = name.parent
if argument.type == 'argument' \
and argument.children[0] == '*' * param_name.star_count:
trailer = search_ancestor(argument, 'trailer')
if trailer is not None: # Make sure we're in a function
context = execution_context.create_context(trailer)
if _goes_to_param_name(param_name, context, name):
values = _to_callables(context, trailer)
args = TreeArguments.create_cached(
execution_context.inference_state,
context=context,
argument_node=trailer.children[1],
trailer=trailer,
)
for c in values:
yield c, args
def _goes_to_param_name(param_name, context, potential_name):
if potential_name.type != 'name':
return False
from jedi.inference.names import TreeNameDefinition
found = TreeNameDefinition(context, potential_name).goto()
return any(param_name.parent_context == p.parent_context
and param_name.start_pos == p.start_pos
for p in found)
def _to_callables(context, trailer):
from jedi.inference.syntax_tree import infer_trailer
atom_expr = trailer.parent
index = atom_expr.children[0] == 'await'
# Infer atom first
values = context.infer_node(atom_expr.children[index])
for trailer2 in atom_expr.children[index + 1:]:
if trailer == trailer2:
break
values = infer_trailer(context, values, trailer2)
return values
def _remove_given_params(arguments, param_names):
count = 0
used_keys = set()
for key, _ in arguments.unpack():
if key is None:
count += 1
else:
used_keys.add(key)
for p in param_names:
if count and p.maybe_positional_argument():
count -= 1
continue
if p.string_name in used_keys and p.maybe_keyword_argument():
continue
yield p
@to_list
def process_params(param_names, star_count=3): # default means both * and **
if param_names:
if is_big_annoying_library(param_names[0].parent_context):
# At first this feature can look innocent, but it does a lot of
# type inference in some cases, so we just ditch it.
yield from param_names
return
used_names = set()
arg_callables = []
kwarg_callables = []
kw_only_names = []
kwarg_names = []
arg_names = []
original_arg_name = None
original_kwarg_name = None
for p in param_names:
kind = p.get_kind()
if kind == Parameter.VAR_POSITIONAL:
if star_count & 1:
arg_callables = _iter_nodes_for_param(p)
original_arg_name = p
elif p.get_kind() == Parameter.VAR_KEYWORD:
if star_count & 2:
kwarg_callables = list(_iter_nodes_for_param(p))
original_kwarg_name = p
elif kind == Parameter.KEYWORD_ONLY:
if star_count & 2:
kw_only_names.append(p)
elif kind == Parameter.POSITIONAL_ONLY:
if star_count & 1:
yield p
else:
if star_count == 1:
yield ParamNameFixedKind(p, Parameter.POSITIONAL_ONLY)
elif star_count == 2:
kw_only_names.append(ParamNameFixedKind(p, Parameter.KEYWORD_ONLY))
else:
used_names.add(p.string_name)
yield p
# First process *args
longest_param_names = ()
found_arg_signature = False
found_kwarg_signature = False
for func_and_argument in arg_callables:
func, arguments = func_and_argument
new_star_count = star_count
if func_and_argument in kwarg_callables:
kwarg_callables.remove(func_and_argument)
else:
new_star_count = 1
for signature in func.get_signatures():
found_arg_signature = True
if new_star_count == 3:
found_kwarg_signature = True
args_for_this_func = []
for p in process_params(
list(_remove_given_params(
arguments,
signature.get_param_names(resolve_stars=False)
)), new_star_count):
if p.get_kind() == Parameter.VAR_KEYWORD:
kwarg_names.append(p)
elif p.get_kind() == Parameter.VAR_POSITIONAL:
arg_names.append(p)
elif p.get_kind() == Parameter.KEYWORD_ONLY:
kw_only_names.append(p)
else:
args_for_this_func.append(p)
if len(args_for_this_func) > len(longest_param_names):
longest_param_names = args_for_this_func
for p in longest_param_names:
if star_count == 1 and p.get_kind() != Parameter.VAR_POSITIONAL:
yield ParamNameFixedKind(p, Parameter.POSITIONAL_ONLY)
else:
if p.get_kind() == Parameter.POSITIONAL_OR_KEYWORD:
used_names.add(p.string_name)
yield p
if not found_arg_signature and original_arg_name is not None:
yield original_arg_name
elif arg_names:
yield arg_names[0]
# Then process **kwargs
for func, arguments in kwarg_callables:
for signature in func.get_signatures():
found_kwarg_signature = True
for p in process_params(
list(_remove_given_params(
arguments,
signature.get_param_names(resolve_stars=False)
)), star_count=2):
if p.get_kind() == Parameter.VAR_KEYWORD:
kwarg_names.append(p)
elif p.get_kind() == Parameter.KEYWORD_ONLY:
kw_only_names.append(p)
for p in kw_only_names:
if p.string_name in used_names:
continue
yield p
used_names.add(p.string_name)
if not found_kwarg_signature and original_kwarg_name is not None:
yield original_kwarg_name
elif kwarg_names:
yield kwarg_names[0]
class ParamNameFixedKind(ParamNameWrapper):
def __init__(self, param_name, new_kind):
super().__init__(param_name)
self._new_kind = new_kind
def get_kind(self):
return self._new_kind
| 7,895 | Python | .py | 188 | 30.984043 | 84 | 0.589055 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,910 | base_value.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/base_value.py | """
Values are the "values" that Python would return. However Values are at the
same time also the "values" that a user is currently sitting in.
A ValueSet is typically used to specify the return of a function or any other
static analysis operation. In jedi there are always multiple returns and not
just one.
"""
from functools import reduce
from operator import add
from itertools import zip_longest
from parso.python.tree import Name
from jedi import debug
from jedi.parser_utils import clean_scope_docstring
from jedi.inference.helpers import SimpleGetItemNotFound
from jedi.inference.utils import safe_property
from jedi.inference.cache import inference_state_as_method_param_cache
from jedi.cache import memoize_method
sentinel = object()
class HasNoContext(Exception):
pass
class HelperValueMixin:
def get_root_context(self):
value = self
if value.parent_context is None:
return value.as_context()
while True:
if value.parent_context is None:
return value
value = value.parent_context
def execute(self, arguments):
return self.inference_state.execute(self, arguments=arguments)
def execute_with_values(self, *value_list):
from jedi.inference.arguments import ValuesArguments
arguments = ValuesArguments([ValueSet([value]) for value in value_list])
return self.inference_state.execute(self, arguments)
def execute_annotation(self):
return self.execute_with_values()
def gather_annotation_classes(self):
return ValueSet([self])
def merge_types_of_iterate(self, contextualized_node=None, is_async=False):
return ValueSet.from_sets(
lazy_value.infer()
for lazy_value in self.iterate(contextualized_node, is_async)
)
def _get_value_filters(self, name_or_str):
origin_scope = name_or_str if isinstance(name_or_str, Name) else None
yield from self.get_filters(origin_scope=origin_scope)
# This covers the case where a stub files are incomplete.
if self.is_stub():
from jedi.inference.gradual.conversion import convert_values
for c in convert_values(ValueSet({self})):
yield from c.get_filters()
def goto(self, name_or_str, name_context=None, analysis_errors=True):
from jedi.inference import finder
filters = self._get_value_filters(name_or_str)
names = finder.filter_name(filters, name_or_str)
debug.dbg('context.goto %s in (%s): %s', name_or_str, self, names)
return names
def py__getattribute__(self, name_or_str, name_context=None, position=None,
analysis_errors=True):
"""
:param position: Position of the last statement -> tuple of line, column
"""
if name_context is None:
name_context = self
names = self.goto(name_or_str, name_context, analysis_errors)
values = ValueSet.from_sets(name.infer() for name in names)
if not values:
n = name_or_str.value if isinstance(name_or_str, Name) else name_or_str
values = self.py__getattribute__alternatives(n)
if not names and not values and analysis_errors:
if isinstance(name_or_str, Name):
from jedi.inference import analysis
analysis.add_attribute_error(
name_context, self, name_or_str)
debug.dbg('context.names_to_types: %s -> %s', names, values)
return values
def py__await__(self):
await_value_set = self.py__getattribute__("__await__")
if not await_value_set:
debug.warning('Tried to run __await__ on value %s', self)
return await_value_set.execute_with_values()
def py__name__(self):
return self.name.string_name
def iterate(self, contextualized_node=None, is_async=False):
debug.dbg('iterate %s', self)
if is_async:
from jedi.inference.lazy_value import LazyKnownValues
# TODO if no __aiter__ values are there, error should be:
# TypeError: 'async for' requires an object with __aiter__ method, got int
return iter([
LazyKnownValues(
self.py__getattribute__('__aiter__').execute_with_values()
.py__getattribute__('__anext__').execute_with_values()
.py__getattribute__('__await__').execute_with_values()
.py__stop_iteration_returns()
) # noqa: E124
])
return self.py__iter__(contextualized_node)
def is_sub_class_of(self, class_value):
with debug.increase_indent_cm('subclass matching of %s <=> %s' % (self, class_value),
color='BLUE'):
for cls in self.py__mro__():
if cls.is_same_class(class_value):
debug.dbg('matched subclass True', color='BLUE')
return True
debug.dbg('matched subclass False', color='BLUE')
return False
def is_same_class(self, class2):
# Class matching should prefer comparisons that are not this function.
if type(class2).is_same_class != HelperValueMixin.is_same_class:
return class2.is_same_class(self)
return self == class2
@memoize_method
def as_context(self, *args, **kwargs):
return self._as_context(*args, **kwargs)
class Value(HelperValueMixin):
"""
To be implemented by subclasses.
"""
tree_node = None
# Possible values: None, tuple, list, dict and set. Here to deal with these
# very important containers.
array_type = None
api_type = 'not_defined_please_report_bug'
def __init__(self, inference_state, parent_context=None):
self.inference_state = inference_state
self.parent_context = parent_context
def py__getitem__(self, index_value_set, contextualized_node):
from jedi.inference import analysis
# TODO this value is probably not right.
analysis.add(
contextualized_node.context,
'type-error-not-subscriptable',
contextualized_node.node,
message="TypeError: '%s' object is not subscriptable" % self
)
return NO_VALUES
def py__simple_getitem__(self, index):
raise SimpleGetItemNotFound
def py__iter__(self, contextualized_node=None):
if contextualized_node is not None:
from jedi.inference import analysis
analysis.add(
contextualized_node.context,
'type-error-not-iterable',
contextualized_node.node,
message="TypeError: '%s' object is not iterable" % self)
return iter([])
def py__next__(self, contextualized_node=None):
return self.py__iter__(contextualized_node)
def get_signatures(self):
return []
def is_class(self):
return False
def is_class_mixin(self):
return False
def is_instance(self):
return False
def is_function(self):
return False
def is_module(self):
return False
def is_namespace(self):
return False
def is_compiled(self):
return False
def is_bound_method(self):
return False
def is_builtins_module(self):
return False
def py__bool__(self):
"""
Since Wrapper is a super class for classes, functions and modules,
the return value will always be true.
"""
return True
def py__doc__(self):
try:
self.tree_node.get_doc_node
except AttributeError:
return ''
else:
return clean_scope_docstring(self.tree_node)
def get_safe_value(self, default=sentinel):
if default is sentinel:
raise ValueError("There exists no safe value for value %s" % self)
return default
def execute_operation(self, other, operator):
debug.warning("%s not possible between %s and %s", operator, self, other)
return NO_VALUES
def py__call__(self, arguments):
debug.warning("no execution possible %s", self)
return NO_VALUES
def py__stop_iteration_returns(self):
debug.warning("Not possible to return the stop iterations of %s", self)
return NO_VALUES
def py__getattribute__alternatives(self, name_or_str):
"""
For now a way to add values in cases like __getattr__.
"""
return NO_VALUES
def py__get__(self, instance, class_value):
debug.warning("No __get__ defined on %s", self)
return ValueSet([self])
def py__get__on_class(self, calling_instance, instance, class_value):
return NotImplemented
def get_qualified_names(self):
# Returns Optional[Tuple[str, ...]]
return None
def is_stub(self):
# The root value knows if it's a stub or not.
return self.parent_context.is_stub()
def _as_context(self):
raise HasNoContext
@property
def name(self):
raise NotImplementedError
def get_type_hint(self, add_class_info=True):
return None
def infer_type_vars(self, value_set):
"""
When the current instance represents a type annotation, this method
tries to find information about undefined type vars and returns a dict
from type var name to value set.
This is for example important to understand what `iter([1])` returns.
According to typeshed, `iter` returns an `Iterator[_T]`:
def iter(iterable: Iterable[_T]) -> Iterator[_T]: ...
This functions would generate `int` for `_T` in this case, because it
unpacks the `Iterable`.
Parameters
----------
`self`: represents the annotation of the current parameter to infer the
value for. In the above example, this would initially be the
`Iterable[_T]` of the `iterable` parameter and then, when recursing,
just the `_T` generic parameter.
`value_set`: represents the actual argument passed to the parameter
we're inferrined for, or (for recursive calls) their types. In the
above example this would first be the representation of the list
`[1]` and then, when recursing, just of `1`.
"""
return {}
def iterate_values(values, contextualized_node=None, is_async=False):
"""
Calls `iterate`, on all values but ignores the ordering and just returns
all values that the iterate functions yield.
"""
return ValueSet.from_sets(
lazy_value.infer()
for lazy_value in values.iterate(contextualized_node, is_async=is_async)
)
class _ValueWrapperBase(HelperValueMixin):
@safe_property
def name(self):
from jedi.inference.names import ValueName
wrapped_name = self._wrapped_value.name
if wrapped_name.tree_name is not None:
return ValueName(self, wrapped_name.tree_name)
else:
from jedi.inference.compiled import CompiledValueName
return CompiledValueName(self, wrapped_name.string_name)
@classmethod
@inference_state_as_method_param_cache()
def create_cached(cls, inference_state, *args, **kwargs):
return cls(*args, **kwargs)
def __getattr__(self, name):
assert name != '_wrapped_value', 'Problem with _get_wrapped_value'
return getattr(self._wrapped_value, name)
class LazyValueWrapper(_ValueWrapperBase):
@safe_property
@memoize_method
def _wrapped_value(self):
with debug.increase_indent_cm('Resolve lazy value wrapper'):
return self._get_wrapped_value()
def __repr__(self):
return '<%s>' % (self.__class__.__name__)
def _get_wrapped_value(self):
raise NotImplementedError
class ValueWrapper(_ValueWrapperBase):
def __init__(self, wrapped_value):
self._wrapped_value = wrapped_value
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._wrapped_value)
class TreeValue(Value):
def __init__(self, inference_state, parent_context, tree_node):
super().__init__(inference_state, parent_context)
self.tree_node = tree_node
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.tree_node)
class ContextualizedNode:
def __init__(self, context, node):
self.context = context
self.node = node
def get_root_context(self):
return self.context.get_root_context()
def infer(self):
return self.context.infer_node(self.node)
def __repr__(self):
return '<%s: %s in %s>' % (self.__class__.__name__, self.node, self.context)
def _getitem(value, index_values, contextualized_node):
# The actual getitem call.
result = NO_VALUES
unused_values = set()
for index_value in index_values:
index = index_value.get_safe_value(default=None)
if type(index) in (float, int, str, slice, bytes):
try:
result |= value.py__simple_getitem__(index)
continue
except SimpleGetItemNotFound:
pass
unused_values.add(index_value)
# The index was somehow not good enough or simply a wrong type.
# Therefore we now iterate through all the values and just take
# all results.
if unused_values or not index_values:
result |= value.py__getitem__(
ValueSet(unused_values),
contextualized_node
)
debug.dbg('py__getitem__ result: %s', result)
return result
class ValueSet:
def __init__(self, iterable):
self._set = frozenset(iterable)
for value in iterable:
assert not isinstance(value, ValueSet)
@classmethod
def _from_frozen_set(cls, frozenset_):
self = cls.__new__(cls)
self._set = frozenset_
return self
@classmethod
def from_sets(cls, sets):
"""
Used to work with an iterable of set.
"""
aggregated = set()
for set_ in sets:
if isinstance(set_, ValueSet):
aggregated |= set_._set
else:
aggregated |= frozenset(set_)
return cls._from_frozen_set(frozenset(aggregated))
def __or__(self, other):
return self._from_frozen_set(self._set | other._set)
def __and__(self, other):
return self._from_frozen_set(self._set & other._set)
def __iter__(self):
return iter(self._set)
def __bool__(self):
return bool(self._set)
def __len__(self):
return len(self._set)
def __repr__(self):
return 'S{%s}' % (', '.join(str(s) for s in self._set))
def filter(self, filter_func):
return self.__class__(filter(filter_func, self._set))
def __getattr__(self, name):
def mapper(*args, **kwargs):
return self.from_sets(
getattr(value, name)(*args, **kwargs)
for value in self._set
)
return mapper
def __eq__(self, other):
return self._set == other._set
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._set)
def py__class__(self):
return ValueSet(c.py__class__() for c in self._set)
def iterate(self, contextualized_node=None, is_async=False):
from jedi.inference.lazy_value import get_merged_lazy_value
type_iters = [c.iterate(contextualized_node, is_async=is_async) for c in self._set]
for lazy_values in zip_longest(*type_iters):
yield get_merged_lazy_value(
[l for l in lazy_values if l is not None]
)
def execute(self, arguments):
return ValueSet.from_sets(c.inference_state.execute(c, arguments) for c in self._set)
def execute_with_values(self, *args, **kwargs):
return ValueSet.from_sets(c.execute_with_values(*args, **kwargs) for c in self._set)
def goto(self, *args, **kwargs):
return reduce(add, [c.goto(*args, **kwargs) for c in self._set], [])
def py__getattribute__(self, *args, **kwargs):
return ValueSet.from_sets(c.py__getattribute__(*args, **kwargs) for c in self._set)
def get_item(self, *args, **kwargs):
return ValueSet.from_sets(_getitem(c, *args, **kwargs) for c in self._set)
def try_merge(self, function_name):
value_set = self.__class__([])
for c in self._set:
try:
method = getattr(c, function_name)
except AttributeError:
pass
else:
value_set |= method()
return value_set
def gather_annotation_classes(self):
return ValueSet.from_sets([c.gather_annotation_classes() for c in self._set])
def get_signatures(self):
return [sig for c in self._set for sig in c.get_signatures()]
def get_type_hint(self, add_class_info=True):
t = [v.get_type_hint(add_class_info=add_class_info) for v in self._set]
type_hints = sorted(filter(None, t))
if len(type_hints) == 1:
return type_hints[0]
optional = 'None' in type_hints
if optional:
type_hints.remove('None')
if len(type_hints) == 0:
return None
elif len(type_hints) == 1:
s = type_hints[0]
else:
s = 'Union[%s]' % ', '.join(type_hints)
if optional:
s = 'Optional[%s]' % s
return s
def infer_type_vars(self, value_set):
# Circular
from jedi.inference.gradual.annotation import merge_type_var_dicts
type_var_dict = {}
for value in self._set:
merge_type_var_dicts(
type_var_dict,
value.infer_type_vars(value_set),
)
return type_var_dict
NO_VALUES = ValueSet([])
def iterator_to_value_set(func):
def wrapper(*args, **kwargs):
return ValueSet(func(*args, **kwargs))
return wrapper
| 18,221 | Python | .py | 436 | 32.830275 | 93 | 0.620053 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,911 | imports.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/imports.py | """
:mod:`jedi.inference.imports` is here to resolve import statements and return
the modules/classes/functions/whatever, which they stand for. However there's
not any actual importing done. This module is about finding modules in the
filesystem. This can be quite tricky sometimes, because Python imports are not
always that simple.
This module also supports import autocompletion, which means to complete
statements like ``from datetim`` (cursor at the end would return ``datetime``).
"""
import os
from pathlib import Path
from parso.python import tree
from parso.tree import search_ancestor
from jedi import debug
from jedi import settings
from jedi.file_io import FolderIO
from jedi.parser_utils import get_cached_code_lines
from jedi.inference import sys_path
from jedi.inference import helpers
from jedi.inference import compiled
from jedi.inference import analysis
from jedi.inference.utils import unite
from jedi.inference.cache import inference_state_method_cache
from jedi.inference.names import ImportName, SubModuleName
from jedi.inference.base_value import ValueSet, NO_VALUES
from jedi.inference.gradual.typeshed import import_module_decorator, \
create_stub_module, parse_stub_module
from jedi.inference.compiled.subprocess.functions import ImplicitNSInfo
from jedi.plugins import plugin_manager
class ModuleCache:
def __init__(self):
self._name_cache = {}
def add(self, string_names, value_set):
if string_names is not None:
self._name_cache[string_names] = value_set
def get(self, string_names):
return self._name_cache.get(string_names)
# This memoization is needed, because otherwise we will infinitely loop on
# certain imports.
@inference_state_method_cache(default=NO_VALUES)
def infer_import(context, tree_name):
module_context = context.get_root_context()
from_import_name, import_path, level, values = \
_prepare_infer_import(module_context, tree_name)
if values:
if from_import_name is not None:
values = values.py__getattribute__(
from_import_name,
name_context=context,
analysis_errors=False
)
if not values:
path = import_path + (from_import_name,)
importer = Importer(context.inference_state, path, module_context, level)
values = importer.follow()
debug.dbg('after import: %s', values)
return values
@inference_state_method_cache(default=[])
def goto_import(context, tree_name):
module_context = context.get_root_context()
from_import_name, import_path, level, values = \
_prepare_infer_import(module_context, tree_name)
if not values:
return []
if from_import_name is not None:
names = unite([
c.goto(
from_import_name,
name_context=context,
analysis_errors=False
) for c in values
])
# Avoid recursion on the same names.
if names and not any(n.tree_name is tree_name for n in names):
return names
path = import_path + (from_import_name,)
importer = Importer(context.inference_state, path, module_context, level)
values = importer.follow()
return set(s.name for s in values)
def _prepare_infer_import(module_context, tree_name):
import_node = search_ancestor(tree_name, 'import_name', 'import_from')
import_path = import_node.get_path_for_name(tree_name)
from_import_name = None
try:
from_names = import_node.get_from_names()
except AttributeError:
# Is an import_name
pass
else:
if len(from_names) + 1 == len(import_path):
# We have to fetch the from_names part first and then check
# if from_names exists in the modules.
from_import_name = import_path[-1]
import_path = from_names
importer = Importer(module_context.inference_state, tuple(import_path),
module_context, import_node.level)
return from_import_name, tuple(import_path), import_node.level, importer.follow()
def _add_error(value, name, message):
if hasattr(name, 'parent') and value is not None:
analysis.add(value, 'import-error', name, message)
else:
debug.warning('ImportError without origin: ' + message)
def _level_to_base_import_path(project_path, directory, level):
"""
In case the level is outside of the currently known package (something like
import .....foo), we can still try our best to help the user for
completions.
"""
for i in range(level - 1):
old = directory
directory = os.path.dirname(directory)
if old == directory:
return None, None
d = directory
level_import_paths = []
# Now that we are on the level that the user wants to be, calculate the
# import path for it.
while True:
if d == project_path:
return level_import_paths, d
dir_name = os.path.basename(d)
if dir_name:
level_import_paths.insert(0, dir_name)
d = os.path.dirname(d)
else:
return None, directory
class Importer:
def __init__(self, inference_state, import_path, module_context, level=0):
"""
An implementation similar to ``__import__``. Use `follow`
to actually follow the imports.
*level* specifies whether to use absolute or relative imports. 0 (the
default) means only perform absolute imports. Positive values for level
indicate the number of parent directories to search relative to the
directory of the module calling ``__import__()`` (see PEP 328 for the
details).
:param import_path: List of namespaces (strings or Names).
"""
debug.speed('import %s %s' % (import_path, module_context))
self._inference_state = inference_state
self.level = level
self._module_context = module_context
self._fixed_sys_path = None
self._infer_possible = True
if level:
base = module_context.get_value().py__package__()
# We need to care for two cases, the first one is if it's a valid
# Python import. This import has a properly defined module name
# chain like `foo.bar.baz` and an import in baz is made for
# `..lala.` It can then resolve to `foo.bar.lala`.
# The else here is a heuristic for all other cases, if for example
# in `foo` you search for `...bar`, it's obviously out of scope.
# However since Jedi tries to just do it's best, we help the user
# here, because he might have specified something wrong in his
# project.
if level <= len(base):
# Here we basically rewrite the level to 0.
base = tuple(base)
if level > 1:
base = base[:-level + 1]
import_path = base + tuple(import_path)
else:
path = module_context.py__file__()
project_path = self._inference_state.project.path
import_path = list(import_path)
if path is None:
# If no path is defined, our best guess is that the current
# file is edited by a user on the current working
# directory. We need to add an initial path, because it
# will get removed as the name of the current file.
directory = project_path
else:
directory = os.path.dirname(path)
base_import_path, base_directory = _level_to_base_import_path(
project_path, directory, level,
)
if base_directory is None:
# Everything is lost, the relative import does point
# somewhere out of the filesystem.
self._infer_possible = False
else:
self._fixed_sys_path = [base_directory]
if base_import_path is None:
if import_path:
_add_error(
module_context, import_path[0],
message='Attempted relative import beyond top-level package.'
)
else:
import_path = base_import_path + import_path
self.import_path = import_path
@property
def _str_import_path(self):
"""Returns the import path as pure strings instead of `Name`."""
return tuple(
name.value if isinstance(name, tree.Name) else name
for name in self.import_path
)
def _sys_path_with_modifications(self, is_completion):
if self._fixed_sys_path is not None:
return self._fixed_sys_path
return (
# For import completions we don't want to see init paths, but for
# inference we want to show the user as much as possible.
# See GH #1446.
self._inference_state.get_sys_path(add_init_paths=not is_completion)
+ [
str(p) for p
in sys_path.check_sys_path_modifications(self._module_context)
]
)
def follow(self):
if not self.import_path:
if self._fixed_sys_path:
# This is a bit of a special case, that maybe should be
# revisited. If the project path is wrong or the user uses
# relative imports the wrong way, we might end up here, where
# the `fixed_sys_path == project.path` in that case we kind of
# use the project.path.parent directory as our path. This is
# usually not a problem, except if imports in other places are
# using the same names. Example:
#
# foo/ < #1
# - setup.py
# - foo/ < #2
# - __init__.py
# - foo.py < #3
#
# If the top foo is our project folder and somebody uses
# `from . import foo` in `setup.py`, it will resolve to foo #2,
# which means that the import for foo.foo is cached as
# `__init__.py` (#2) and not as `foo.py` (#3). This is usually
# not an issue, because this case is probably pretty rare, but
# might be an issue for some people.
#
# However for most normal cases where we work with different
# file names, this code path hits where we basically change the
# project path to an ancestor of project path.
from jedi.inference.value.namespace import ImplicitNamespaceValue
import_path = (os.path.basename(self._fixed_sys_path[0]),)
ns = ImplicitNamespaceValue(
self._inference_state,
string_names=import_path,
paths=self._fixed_sys_path,
)
return ValueSet({ns})
return NO_VALUES
if not self._infer_possible:
return NO_VALUES
# Check caches first
from_cache = self._inference_state.stub_module_cache.get(self._str_import_path)
if from_cache is not None:
return ValueSet({from_cache})
from_cache = self._inference_state.module_cache.get(self._str_import_path)
if from_cache is not None:
return from_cache
sys_path = self._sys_path_with_modifications(is_completion=False)
return import_module_by_names(
self._inference_state, self.import_path, sys_path, self._module_context
)
def _get_module_names(self, search_path=None, in_module=None):
"""
Get the names of all modules in the search_path. This means file names
and not names defined in the files.
"""
if search_path is None:
sys_path = self._sys_path_with_modifications(is_completion=True)
else:
sys_path = search_path
return list(iter_module_names(
self._inference_state, self._module_context, sys_path,
module_cls=ImportName if in_module is None else SubModuleName,
add_builtin_modules=search_path is None and in_module is None,
))
def completion_names(self, inference_state, only_modules=False):
"""
:param only_modules: Indicates wheter it's possible to import a
definition that is not defined in a module.
"""
if not self._infer_possible:
return []
names = []
if self.import_path:
# flask
if self._str_import_path == ('flask', 'ext'):
# List Flask extensions like ``flask_foo``
for mod in self._get_module_names():
modname = mod.string_name
if modname.startswith('flask_'):
extname = modname[len('flask_'):]
names.append(ImportName(self._module_context, extname))
# Now the old style: ``flaskext.foo``
for dir in self._sys_path_with_modifications(is_completion=True):
flaskext = os.path.join(dir, 'flaskext')
if os.path.isdir(flaskext):
names += self._get_module_names([flaskext])
values = self.follow()
for value in values:
# Non-modules are not completable.
if value.api_type not in ('module', 'namespace'): # not a module
continue
if not value.is_compiled():
# sub_modules_dict is not implemented for compiled modules.
names += value.sub_modules_dict().values()
if not only_modules:
from jedi.inference.gradual.conversion import convert_values
both_values = values | convert_values(values)
for c in both_values:
for filter in c.get_filters():
names += filter.values()
else:
if self.level:
# We only get here if the level cannot be properly calculated.
names += self._get_module_names(self._fixed_sys_path)
else:
# This is just the list of global imports.
names += self._get_module_names()
return names
def import_module_by_names(inference_state, import_names, sys_path=None,
module_context=None, prefer_stubs=True):
if sys_path is None:
sys_path = inference_state.get_sys_path()
str_import_names = tuple(
i.value if isinstance(i, tree.Name) else i
for i in import_names
)
value_set = [None]
for i, name in enumerate(import_names):
value_set = ValueSet.from_sets([
import_module(
inference_state,
str_import_names[:i+1],
parent_module_value,
sys_path,
prefer_stubs=prefer_stubs,
) for parent_module_value in value_set
])
if not value_set:
message = 'No module named ' + '.'.join(str_import_names)
if module_context is not None:
_add_error(module_context, name, message)
else:
debug.warning(message)
return NO_VALUES
return value_set
@plugin_manager.decorate()
@import_module_decorator
def import_module(inference_state, import_names, parent_module_value, sys_path):
"""
This method is very similar to importlib's `_gcd_import`.
"""
if import_names[0] in settings.auto_import_modules:
module = _load_builtin_module(inference_state, import_names, sys_path)
if module is None:
return NO_VALUES
return ValueSet([module])
module_name = '.'.join(import_names)
if parent_module_value is None:
# Override the sys.path. It works only good that way.
# Injecting the path directly into `find_module` did not work.
file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info(
string=import_names[-1],
full_name=module_name,
sys_path=sys_path,
is_global_search=True,
)
if is_pkg is None:
return NO_VALUES
else:
paths = parent_module_value.py__path__()
if paths is None:
# The module might not be a package.
return NO_VALUES
file_io_or_ns, is_pkg = inference_state.compiled_subprocess.get_module_info(
string=import_names[-1],
path=paths,
full_name=module_name,
is_global_search=False,
)
if is_pkg is None:
return NO_VALUES
if isinstance(file_io_or_ns, ImplicitNSInfo):
from jedi.inference.value.namespace import ImplicitNamespaceValue
module = ImplicitNamespaceValue(
inference_state,
string_names=tuple(file_io_or_ns.name.split('.')),
paths=file_io_or_ns.paths,
)
elif file_io_or_ns is None:
module = _load_builtin_module(inference_state, import_names, sys_path)
if module is None:
return NO_VALUES
else:
module = _load_python_module(
inference_state, file_io_or_ns,
import_names=import_names,
is_package=is_pkg,
)
if parent_module_value is None:
debug.dbg('global search_module %s: %s', import_names[-1], module)
else:
debug.dbg('search_module %s in paths %s: %s', module_name, paths, module)
return ValueSet([module])
def _load_python_module(inference_state, file_io,
import_names=None, is_package=False):
module_node = inference_state.parse(
file_io=file_io,
cache=True,
diff_cache=settings.fast_parser,
cache_path=settings.cache_directory,
)
from jedi.inference.value import ModuleValue
return ModuleValue(
inference_state, module_node,
file_io=file_io,
string_names=import_names,
code_lines=get_cached_code_lines(inference_state.grammar, file_io.path),
is_package=is_package,
)
def _load_builtin_module(inference_state, import_names=None, sys_path=None):
project = inference_state.project
if sys_path is None:
sys_path = inference_state.get_sys_path()
if not project._load_unsafe_extensions:
safe_paths = project._get_base_sys_path(inference_state)
sys_path = [p for p in sys_path if p in safe_paths]
dotted_name = '.'.join(import_names)
assert dotted_name is not None
module = compiled.load_module(inference_state, dotted_name=dotted_name, sys_path=sys_path)
if module is None:
# The file might raise an ImportError e.g. and therefore not be
# importable.
return None
return module
def load_module_from_path(inference_state, file_io, import_names=None, is_package=None):
"""
This should pretty much only be used for get_modules_containing_name. It's
here to ensure that a random path is still properly loaded into the Jedi
module structure.
"""
path = Path(file_io.path)
if import_names is None:
e_sys_path = inference_state.get_sys_path()
import_names, is_package = sys_path.transform_path_to_dotted(e_sys_path, path)
else:
assert isinstance(is_package, bool)
is_stub = path.suffix == '.pyi'
if is_stub:
folder_io = file_io.get_parent_folder()
if folder_io.path.endswith('-stubs'):
folder_io = FolderIO(folder_io.path[:-6])
if path.name == '__init__.pyi':
python_file_io = folder_io.get_file_io('__init__.py')
else:
python_file_io = folder_io.get_file_io(import_names[-1] + '.py')
try:
v = load_module_from_path(
inference_state, python_file_io,
import_names, is_package=is_package
)
values = ValueSet([v])
except FileNotFoundError:
values = NO_VALUES
return create_stub_module(
inference_state, inference_state.latest_grammar, values,
parse_stub_module(inference_state, file_io), file_io, import_names
)
else:
module = _load_python_module(
inference_state, file_io,
import_names=import_names,
is_package=is_package,
)
inference_state.module_cache.add(import_names, ValueSet([module]))
return module
def load_namespace_from_path(inference_state, folder_io):
import_names, is_package = sys_path.transform_path_to_dotted(
inference_state.get_sys_path(),
Path(folder_io.path)
)
from jedi.inference.value.namespace import ImplicitNamespaceValue
return ImplicitNamespaceValue(inference_state, import_names, [folder_io.path])
def follow_error_node_imports_if_possible(context, name):
error_node = tree.search_ancestor(name, 'error_node')
if error_node is not None:
# Get the first command start of a started simple_stmt. The error
# node is sometimes a small_stmt and sometimes a simple_stmt. Check
# for ; leaves that start a new statements.
start_index = 0
for index, n in enumerate(error_node.children):
if n.start_pos > name.start_pos:
break
if n == ';':
start_index = index + 1
nodes = error_node.children[start_index:]
first_name = nodes[0].get_first_leaf().value
# Make it possible to infer stuff like `import foo.` or
# `from foo.bar`.
if first_name in ('from', 'import'):
is_import_from = first_name == 'from'
level, names = helpers.parse_dotted_names(
nodes,
is_import_from=is_import_from,
until_node=name,
)
return Importer(
context.inference_state, names, context.get_root_context(), level).follow()
return None
def iter_module_names(inference_state, module_context, search_path,
module_cls=ImportName, add_builtin_modules=True):
"""
Get the names of all modules in the search_path. This means file names
and not names defined in the files.
"""
# add builtin module names
if add_builtin_modules:
for name in inference_state.compiled_subprocess.get_builtin_module_names():
yield module_cls(module_context, name)
for name in inference_state.compiled_subprocess.iter_module_names(search_path):
yield module_cls(module_context, name)
| 23,082 | Python | .py | 520 | 33.725 | 94 | 0.603068 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,912 | helpers.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/helpers.py | import copy
import sys
import re
import os
from itertools import chain
from contextlib import contextmanager
from parso.python import tree
def is_stdlib_path(path):
# Python standard library paths look like this:
# /usr/lib/python3.9/...
# TODO The implementation below is probably incorrect and not complete.
parts = path.parts
if 'dist-packages' in parts or 'site-packages' in parts:
return False
base_path = os.path.join(sys.prefix, 'lib', 'python')
return bool(re.match(re.escape(base_path) + r'\d.\d', str(path)))
def deep_ast_copy(obj):
"""
Much, much faster than copy.deepcopy, but just for parser tree nodes.
"""
# If it's already in the cache, just return it.
new_obj = copy.copy(obj)
# Copy children
new_children = []
for child in obj.children:
if isinstance(child, tree.Leaf):
new_child = copy.copy(child)
new_child.parent = new_obj
else:
new_child = deep_ast_copy(child)
new_child.parent = new_obj
new_children.append(new_child)
new_obj.children = new_children
return new_obj
def infer_call_of_leaf(context, leaf, cut_own_trailer=False):
"""
Creates a "call" node that consist of all ``trailer`` and ``power``
objects. E.g. if you call it with ``append``::
list([]).append(3) or None
You would get a node with the content ``list([]).append`` back.
This generates a copy of the original ast node.
If you're using the leaf, e.g. the bracket `)` it will return ``list([])``.
We use this function for two purposes. Given an expression ``bar.foo``,
we may want to
- infer the type of ``foo`` to offer completions after foo
- infer the type of ``bar`` to be able to jump to the definition of foo
The option ``cut_own_trailer`` must be set to true for the second purpose.
"""
trailer = leaf.parent
if trailer.type == 'fstring':
from jedi.inference import compiled
return compiled.get_string_value_set(context.inference_state)
# The leaf may not be the last or first child, because there exist three
# different trailers: `( x )`, `[ x ]` and `.x`. In the first two examples
# we should not match anything more than x.
if trailer.type != 'trailer' or leaf not in (trailer.children[0], trailer.children[-1]):
if leaf == ':':
# Basically happens with foo[:] when the cursor is on the colon
from jedi.inference.base_value import NO_VALUES
return NO_VALUES
if trailer.type == 'atom':
return context.infer_node(trailer)
return context.infer_node(leaf)
power = trailer.parent
index = power.children.index(trailer)
if cut_own_trailer:
cut = index
else:
cut = index + 1
if power.type == 'error_node':
start = index
while True:
start -= 1
base = power.children[start]
if base.type != 'trailer':
break
trailers = power.children[start + 1:cut]
else:
base = power.children[0]
trailers = power.children[1:cut]
if base == 'await':
base = trailers[0]
trailers = trailers[1:]
values = context.infer_node(base)
from jedi.inference.syntax_tree import infer_trailer
for trailer in trailers:
values = infer_trailer(context, values, trailer)
return values
def get_names_of_node(node):
try:
children = node.children
except AttributeError:
if node.type == 'name':
return [node]
else:
return []
else:
return list(chain.from_iterable(get_names_of_node(c) for c in children))
def is_string(value):
return value.is_compiled() and isinstance(value.get_safe_value(default=None), str)
def is_literal(value):
return is_number(value) or is_string(value)
def _get_safe_value_or_none(value, accept):
value = value.get_safe_value(default=None)
if isinstance(value, accept):
return value
def get_int_or_none(value):
return _get_safe_value_or_none(value, int)
def get_str_or_none(value):
return _get_safe_value_or_none(value, str)
def is_number(value):
return _get_safe_value_or_none(value, (int, float)) is not None
class SimpleGetItemNotFound(Exception):
pass
@contextmanager
def reraise_getitem_errors(*exception_classes):
try:
yield
except exception_classes as e:
raise SimpleGetItemNotFound(e)
def parse_dotted_names(nodes, is_import_from, until_node=None):
level = 0
names = []
for node in nodes[1:]:
if node in ('.', '...'):
if not names:
level += len(node.value)
elif node.type == 'dotted_name':
for n in node.children[::2]:
names.append(n)
if n is until_node:
break
else:
continue
break
elif node.type == 'name':
names.append(node)
if node is until_node:
break
elif node == ',':
if not is_import_from:
names = []
else:
# Here if the keyword `import` comes along it stops checking
# for names.
break
return level, names
def values_from_qualified_names(inference_state, *names):
return inference_state.import_module(names[:-1]).py__getattribute__(names[-1])
def is_big_annoying_library(context):
string_names = context.get_root_context().string_names
if string_names is None:
return False
# Especially pandas and tensorflow are huge complicated Python libraries
# that get even slower than they already are when Jedi tries to undrstand
# dynamic features like decorators, ifs and other stuff.
return string_names[0] in ('pandas', 'numpy', 'tensorflow', 'matplotlib')
| 5,943 | Python | .py | 157 | 30.477707 | 92 | 0.634558 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,913 | syntax_tree.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/syntax_tree.py | """
Functions inferring the syntax tree.
"""
import copy
from parso.python import tree
from jedi import debug
from jedi import parser_utils
from jedi.inference.base_value import ValueSet, NO_VALUES, ContextualizedNode, \
iterator_to_value_set, iterate_values
from jedi.inference.lazy_value import LazyTreeValue
from jedi.inference import compiled
from jedi.inference import recursion
from jedi.inference import analysis
from jedi.inference import imports
from jedi.inference import arguments
from jedi.inference.value import ClassValue, FunctionValue
from jedi.inference.value import iterable
from jedi.inference.value.dynamic_arrays import ListModification, DictModification
from jedi.inference.value import TreeInstance
from jedi.inference.helpers import is_string, is_literal, is_number, \
get_names_of_node, is_big_annoying_library
from jedi.inference.compiled.access import COMPARISON_OPERATORS
from jedi.inference.cache import inference_state_method_cache
from jedi.inference.gradual.stub_value import VersionInfo
from jedi.inference.gradual import annotation
from jedi.inference.names import TreeNameDefinition
from jedi.inference.context import CompForContext
from jedi.inference.value.decorator import Decoratee
from jedi.plugins import plugin_manager
operator_to_magic_method = {
'+': '__add__',
'-': '__sub__',
'*': '__mul__',
'@': '__matmul__',
'/': '__truediv__',
'//': '__floordiv__',
'%': '__mod__',
'**': '__pow__',
'<<': '__lshift__',
'>>': '__rshift__',
'&': '__and__',
'|': '__or__',
'^': '__xor__',
}
reverse_operator_to_magic_method = {
k: '__r' + v[2:] for k, v in operator_to_magic_method.items()
}
def _limit_value_infers(func):
"""
This is for now the way how we limit type inference going wild. There are
other ways to ensure recursion limits as well. This is mostly necessary
because of instance (self) access that can be quite tricky to limit.
I'm still not sure this is the way to go, but it looks okay for now and we
can still go anther way in the future. Tests are there. ~ dave
"""
def wrapper(context, *args, **kwargs):
n = context.tree_node
inference_state = context.inference_state
try:
inference_state.inferred_element_counts[n] += 1
maximum = 300
if context.parent_context is None \
and context.get_value() is inference_state.builtins_module:
# Builtins should have a more generous inference limit.
# It is important that builtins can be executed, otherwise some
# functions that depend on certain builtins features would be
# broken, see e.g. GH #1432
maximum *= 100
if inference_state.inferred_element_counts[n] > maximum:
debug.warning('In value %s there were too many inferences.', n)
return NO_VALUES
except KeyError:
inference_state.inferred_element_counts[n] = 1
return func(context, *args, **kwargs)
return wrapper
def infer_node(context, element):
if isinstance(context, CompForContext):
return _infer_node(context, element)
if_stmt = element
while if_stmt is not None:
if_stmt = if_stmt.parent
if if_stmt.type in ('if_stmt', 'for_stmt'):
break
if parser_utils.is_scope(if_stmt):
if_stmt = None
break
predefined_if_name_dict = context.predefined_names.get(if_stmt)
# TODO there's a lot of issues with this one. We actually should do
# this in a different way. Caching should only be active in certain
# cases and this all sucks.
if predefined_if_name_dict is None and if_stmt \
and if_stmt.type == 'if_stmt' and context.inference_state.is_analysis:
if_stmt_test = if_stmt.children[1]
name_dicts = [{}]
# If we already did a check, we don't want to do it again -> If
# value.predefined_names is filled, we stop.
# We don't want to check the if stmt itself, it's just about
# the content.
if element.start_pos > if_stmt_test.end_pos:
# Now we need to check if the names in the if_stmt match the
# names in the suite.
if_names = get_names_of_node(if_stmt_test)
element_names = get_names_of_node(element)
str_element_names = [e.value for e in element_names]
if any(i.value in str_element_names for i in if_names):
for if_name in if_names:
definitions = context.inference_state.infer(context, if_name)
# Every name that has multiple different definitions
# causes the complexity to rise. The complexity should
# never fall below 1.
if len(definitions) > 1:
if len(name_dicts) * len(definitions) > 16:
debug.dbg('Too many options for if branch inference %s.', if_stmt)
# There's only a certain amount of branches
# Jedi can infer, otherwise it will take to
# long.
name_dicts = [{}]
break
original_name_dicts = list(name_dicts)
name_dicts = []
for definition in definitions:
new_name_dicts = list(original_name_dicts)
for i, name_dict in enumerate(new_name_dicts):
new_name_dicts[i] = name_dict.copy()
new_name_dicts[i][if_name.value] = ValueSet([definition])
name_dicts += new_name_dicts
else:
for name_dict in name_dicts:
name_dict[if_name.value] = definitions
if len(name_dicts) > 1:
result = NO_VALUES
for name_dict in name_dicts:
with context.predefine_names(if_stmt, name_dict):
result |= _infer_node(context, element)
return result
else:
return _infer_node_if_inferred(context, element)
else:
if predefined_if_name_dict:
return _infer_node(context, element)
else:
return _infer_node_if_inferred(context, element)
def _infer_node_if_inferred(context, element):
"""
TODO This function is temporary: Merge with infer_node.
"""
parent = element
while parent is not None:
parent = parent.parent
predefined_if_name_dict = context.predefined_names.get(parent)
if predefined_if_name_dict is not None:
return _infer_node(context, element)
return _infer_node_cached(context, element)
@inference_state_method_cache(default=NO_VALUES)
def _infer_node_cached(context, element):
return _infer_node(context, element)
@debug.increase_indent
@_limit_value_infers
def _infer_node(context, element):
debug.dbg('infer_node %s@%s in %s', element, element.start_pos, context)
inference_state = context.inference_state
typ = element.type
if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword', 'fstring'):
return infer_atom(context, element)
elif typ == 'lambdef':
return ValueSet([FunctionValue.from_context(context, element)])
elif typ == 'expr_stmt':
return infer_expr_stmt(context, element)
elif typ in ('power', 'atom_expr'):
first_child = element.children[0]
children = element.children[1:]
had_await = False
if first_child.type == 'keyword' and first_child.value == 'await':
had_await = True
first_child = children.pop(0)
value_set = context.infer_node(first_child)
for (i, trailer) in enumerate(children):
if trailer == '**': # has a power operation.
right = context.infer_node(children[i + 1])
value_set = _infer_comparison(
context,
value_set,
trailer,
right
)
break
value_set = infer_trailer(context, value_set, trailer)
if had_await:
return value_set.py__await__().py__stop_iteration_returns()
return value_set
elif typ in ('testlist_star_expr', 'testlist',):
# The implicit tuple in statements.
return ValueSet([iterable.SequenceLiteralValue(inference_state, context, element)])
elif typ in ('not_test', 'factor'):
value_set = context.infer_node(element.children[-1])
for operator in element.children[:-1]:
value_set = infer_factor(value_set, operator)
return value_set
elif typ == 'test':
# `x if foo else y` case.
return (context.infer_node(element.children[0])
| context.infer_node(element.children[-1]))
elif typ == 'operator':
# Must be an ellipsis, other operators are not inferred.
if element.value != '...':
origin = element.parent
raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin))
return ValueSet([compiled.builtin_from_name(inference_state, 'Ellipsis')])
elif typ == 'dotted_name':
value_set = infer_atom(context, element.children[0])
for next_name in element.children[2::2]:
value_set = value_set.py__getattribute__(next_name, name_context=context)
return value_set
elif typ == 'eval_input':
return context.infer_node(element.children[0])
elif typ == 'annassign':
return annotation.infer_annotation(context, element.children[1]) \
.execute_annotation()
elif typ == 'yield_expr':
if len(element.children) and element.children[1].type == 'yield_arg':
# Implies that it's a yield from.
element = element.children[1].children[1]
generators = context.infer_node(element) \
.py__getattribute__('__iter__').execute_with_values()
return generators.py__stop_iteration_returns()
# Generator.send() is not implemented.
return NO_VALUES
elif typ == 'namedexpr_test':
return context.infer_node(element.children[2])
else:
return infer_or_test(context, element)
def infer_trailer(context, atom_values, trailer):
trailer_op, node = trailer.children[:2]
if node == ')': # `arglist` is optional.
node = None
if trailer_op == '[':
trailer_op, node, _ = trailer.children
return atom_values.get_item(
_infer_subscript_list(context, node),
ContextualizedNode(context, trailer)
)
else:
debug.dbg('infer_trailer: %s in %s', trailer, atom_values)
if trailer_op == '.':
return atom_values.py__getattribute__(
name_context=context,
name_or_str=node
)
else:
assert trailer_op == '(', 'trailer_op is actually %s' % trailer_op
args = arguments.TreeArguments(context.inference_state, context, node, trailer)
return atom_values.execute(args)
def infer_atom(context, atom):
"""
Basically to process ``atom`` nodes. The parser sometimes doesn't
generate the node (because it has just one child). In that case an atom
might be a name or a literal as well.
"""
state = context.inference_state
if atom.type == 'name':
# This is the first global lookup.
stmt = tree.search_ancestor(atom, 'expr_stmt', 'lambdef', 'if_stmt') or atom
if stmt.type == 'if_stmt':
if not any(n.start_pos <= atom.start_pos < n.end_pos for n in stmt.get_test_nodes()):
stmt = atom
elif stmt.type == 'lambdef':
stmt = atom
position = stmt.start_pos
if _is_annotation_name(atom):
# Since Python 3.7 (with from __future__ import annotations),
# annotations are essentially strings and can reference objects
# that are defined further down in code. Therefore just set the
# position to None, so the finder will not try to stop at a certain
# position in the module.
position = None
return context.py__getattribute__(atom, position=position)
elif atom.type == 'keyword':
# For False/True/None
if atom.value in ('False', 'True', 'None'):
return ValueSet([compiled.builtin_from_name(state, atom.value)])
elif atom.value == 'yield':
# Contrary to yield from, yield can just appear alone to return a
# value when used with `.send()`.
return NO_VALUES
assert False, 'Cannot infer the keyword %s' % atom
elif isinstance(atom, tree.Literal):
string = state.compiled_subprocess.safe_literal_eval(atom.value)
return ValueSet([compiled.create_simple_object(state, string)])
elif atom.type == 'strings':
# Will be multiple string.
value_set = infer_atom(context, atom.children[0])
for string in atom.children[1:]:
right = infer_atom(context, string)
value_set = _infer_comparison(context, value_set, '+', right)
return value_set
elif atom.type == 'fstring':
return compiled.get_string_value_set(state)
else:
c = atom.children
# Parentheses without commas are not tuples.
if c[0] == '(' and not len(c) == 2 \
and not(c[1].type == 'testlist_comp'
and len(c[1].children) > 1):
return context.infer_node(c[1])
try:
comp_for = c[1].children[1]
except (IndexError, AttributeError):
pass
else:
if comp_for == ':':
# Dict comprehensions have a colon at the 3rd index.
try:
comp_for = c[1].children[3]
except IndexError:
pass
if comp_for.type in ('comp_for', 'sync_comp_for'):
return ValueSet([iterable.comprehension_from_atom(
state, context, atom
)])
# It's a dict/list/tuple literal.
array_node = c[1]
try:
array_node_c = array_node.children
except AttributeError:
array_node_c = []
if c[0] == '{' and (array_node == '}' or ':' in array_node_c
or '**' in array_node_c):
new_value = iterable.DictLiteralValue(state, context, atom)
else:
new_value = iterable.SequenceLiteralValue(state, context, atom)
return ValueSet([new_value])
@_limit_value_infers
def infer_expr_stmt(context, stmt, seek_name=None):
with recursion.execution_allowed(context.inference_state, stmt) as allowed:
if allowed:
if seek_name is not None:
pep0484_values = \
annotation.find_type_from_comment_hint_assign(context, stmt, seek_name)
if pep0484_values:
return pep0484_values
return _infer_expr_stmt(context, stmt, seek_name)
return NO_VALUES
@debug.increase_indent
def _infer_expr_stmt(context, stmt, seek_name=None):
"""
The starting point of the completion. A statement always owns a call
list, which are the calls, that a statement does. In case multiple
names are defined in the statement, `seek_name` returns the result for
this name.
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
('=' (yield_expr|testlist_star_expr))*)
annassign: ':' test ['=' test]
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
'<<=' | '>>=' | '**=' | '//=')
:param stmt: A `tree.ExprStmt`.
"""
def check_setitem(stmt):
atom_expr = stmt.children[0]
if atom_expr.type not in ('atom_expr', 'power'):
return False, None
name = atom_expr.children[0]
if name.type != 'name' or len(atom_expr.children) != 2:
return False, None
trailer = atom_expr.children[-1]
return trailer.children[0] == '[', trailer.children[1]
debug.dbg('infer_expr_stmt %s (%s)', stmt, seek_name)
rhs = stmt.get_rhs()
value_set = context.infer_node(rhs)
if seek_name:
n = TreeNameDefinition(context, seek_name)
value_set = check_tuple_assignments(n, value_set)
first_operator = next(stmt.yield_operators(), None)
is_setitem, subscriptlist = check_setitem(stmt)
is_annassign = first_operator not in ('=', None) and first_operator.type == 'operator'
if is_annassign or is_setitem:
# `=` is always the last character in aug assignments -> -1
name = stmt.get_defined_names(include_setitem=True)[0].value
left_values = context.py__getattribute__(name, position=stmt.start_pos)
if is_setitem:
def to_mod(v):
c = ContextualizedSubscriptListNode(context, subscriptlist)
if v.array_type == 'dict':
return DictModification(v, value_set, c)
elif v.array_type == 'list':
return ListModification(v, value_set, c)
return v
value_set = ValueSet(to_mod(v) for v in left_values)
else:
operator = copy.copy(first_operator)
operator.value = operator.value[:-1]
for_stmt = tree.search_ancestor(stmt, 'for_stmt')
if for_stmt is not None and for_stmt.type == 'for_stmt' and value_set \
and parser_utils.for_stmt_defines_one_name(for_stmt):
# Iterate through result and add the values, that's possible
# only in for loops without clutter, because they are
# predictable. Also only do it, if the variable is not a tuple.
node = for_stmt.get_testlist()
cn = ContextualizedNode(context, node)
ordered = list(cn.infer().iterate(cn))
for lazy_value in ordered:
dct = {for_stmt.children[1].value: lazy_value.infer()}
with context.predefine_names(for_stmt, dct):
t = context.infer_node(rhs)
left_values = _infer_comparison(context, left_values, operator, t)
value_set = left_values
else:
value_set = _infer_comparison(context, left_values, operator, value_set)
debug.dbg('infer_expr_stmt result %s', value_set)
return value_set
def infer_or_test(context, or_test):
iterator = iter(or_test.children)
types = context.infer_node(next(iterator))
for operator in iterator:
right = next(iterator)
if operator.type == 'comp_op': # not in / is not
operator = ' '.join(c.value for c in operator.children)
# handle type inference of and/or here.
if operator in ('and', 'or'):
left_bools = set(left.py__bool__() for left in types)
if left_bools == {True}:
if operator == 'and':
types = context.infer_node(right)
elif left_bools == {False}:
if operator != 'and':
types = context.infer_node(right)
# Otherwise continue, because of uncertainty.
else:
types = _infer_comparison(context, types, operator,
context.infer_node(right))
debug.dbg('infer_or_test types %s', types)
return types
@iterator_to_value_set
def infer_factor(value_set, operator):
"""
Calculates `+`, `-`, `~` and `not` prefixes.
"""
for value in value_set:
if operator == '-':
if is_number(value):
yield value.negate()
elif operator == 'not':
b = value.py__bool__()
if b is None: # Uncertainty.
return
yield compiled.create_simple_object(value.inference_state, not b)
else:
yield value
def _literals_to_types(inference_state, result):
# Changes literals ('a', 1, 1.0, etc) to its type instances (str(),
# int(), float(), etc).
new_result = NO_VALUES
for typ in result:
if is_literal(typ):
# Literals are only valid as long as the operations are
# correct. Otherwise add a value-free instance.
cls = compiled.builtin_from_name(inference_state, typ.name.string_name)
new_result |= cls.execute_with_values()
else:
new_result |= ValueSet([typ])
return new_result
def _infer_comparison(context, left_values, operator, right_values):
state = context.inference_state
if not left_values or not right_values:
# illegal slices e.g. cause left/right_result to be None
result = (left_values or NO_VALUES) | (right_values or NO_VALUES)
return _literals_to_types(state, result)
else:
# I don't think there's a reasonable chance that a string
# operation is still correct, once we pass something like six
# objects.
if len(left_values) * len(right_values) > 6:
return _literals_to_types(state, left_values | right_values)
else:
return ValueSet.from_sets(
_infer_comparison_part(state, context, left, operator, right)
for left in left_values
for right in right_values
)
def _is_annotation_name(name):
ancestor = tree.search_ancestor(name, 'param', 'funcdef', 'expr_stmt')
if ancestor is None:
return False
if ancestor.type in ('param', 'funcdef'):
ann = ancestor.annotation
if ann is not None:
return ann.start_pos <= name.start_pos < ann.end_pos
elif ancestor.type == 'expr_stmt':
c = ancestor.children
if len(c) > 1 and c[1].type == 'annassign':
return c[1].start_pos <= name.start_pos < c[1].end_pos
return False
def _is_list(value):
return value.array_type == 'list'
def _is_tuple(value):
return value.array_type == 'tuple'
def _bool_to_value(inference_state, bool_):
return compiled.builtin_from_name(inference_state, str(bool_))
def _get_tuple_ints(value):
if not isinstance(value, iterable.SequenceLiteralValue):
return None
numbers = []
for lazy_value in value.py__iter__():
if not isinstance(lazy_value, LazyTreeValue):
return None
node = lazy_value.data
if node.type != 'number':
return None
try:
numbers.append(int(node.value))
except ValueError:
return None
return numbers
def _infer_comparison_part(inference_state, context, left, operator, right):
l_is_num = is_number(left)
r_is_num = is_number(right)
if isinstance(operator, str):
str_operator = operator
else:
str_operator = str(operator.value)
if str_operator == '*':
# for iterables, ignore * operations
if isinstance(left, iterable.Sequence) or is_string(left):
return ValueSet([left])
elif isinstance(right, iterable.Sequence) or is_string(right):
return ValueSet([right])
elif str_operator == '+':
if l_is_num and r_is_num or is_string(left) and is_string(right):
return left.execute_operation(right, str_operator)
elif _is_list(left) and _is_list(right) or _is_tuple(left) and _is_tuple(right):
return ValueSet([iterable.MergedArray(inference_state, (left, right))])
elif str_operator == '-':
if l_is_num and r_is_num:
return left.execute_operation(right, str_operator)
elif str_operator == '%':
# With strings and numbers the left type typically remains. Except for
# `int() % float()`.
return ValueSet([left])
elif str_operator in COMPARISON_OPERATORS:
if left.is_compiled() and right.is_compiled():
# Possible, because the return is not an option. Just compare.
result = left.execute_operation(right, str_operator)
if result:
return result
else:
if str_operator in ('is', '!=', '==', 'is not'):
operation = COMPARISON_OPERATORS[str_operator]
bool_ = operation(left, right)
# Only if == returns True or != returns False, we can continue.
# There's no guarantee that they are not equal. This can help
# in some cases, but does not cover everything.
if (str_operator in ('is', '==')) == bool_:
return ValueSet([_bool_to_value(inference_state, bool_)])
if isinstance(left, VersionInfo):
version_info = _get_tuple_ints(right)
if version_info is not None:
bool_result = compiled.access.COMPARISON_OPERATORS[operator](
inference_state.environment.version_info,
tuple(version_info)
)
return ValueSet([_bool_to_value(inference_state, bool_result)])
return ValueSet([
_bool_to_value(inference_state, True),
_bool_to_value(inference_state, False)
])
elif str_operator in ('in', 'not in'):
return NO_VALUES
def check(obj):
"""Checks if a Jedi object is either a float or an int."""
return isinstance(obj, TreeInstance) and \
obj.name.string_name in ('int', 'float')
# Static analysis, one is a number, the other one is not.
if str_operator in ('+', '-') and l_is_num != r_is_num \
and not (check(left) or check(right)):
message = "TypeError: unsupported operand type(s) for +: %s and %s"
analysis.add(context, 'type-error-operation', operator,
message % (left, right))
if left.is_class() or right.is_class():
return NO_VALUES
method_name = operator_to_magic_method[str_operator]
magic_methods = left.py__getattribute__(method_name)
if magic_methods:
result = magic_methods.execute_with_values(right)
if result:
return result
if not magic_methods:
reverse_method_name = reverse_operator_to_magic_method[str_operator]
magic_methods = right.py__getattribute__(reverse_method_name)
result = magic_methods.execute_with_values(left)
if result:
return result
result = ValueSet([left, right])
debug.dbg('Used operator %s resulting in %s', operator, result)
return result
@plugin_manager.decorate()
def tree_name_to_values(inference_state, context, tree_name):
value_set = NO_VALUES
module_node = context.get_root_context().tree_node
# First check for annotations, like: `foo: int = 3`
if module_node is not None:
names = module_node.get_used_names().get(tree_name.value, [])
found_annotation = False
for name in names:
expr_stmt = name.parent
if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign":
correct_scope = parser_utils.get_parent_scope(name) == context.tree_node
if correct_scope:
found_annotation = True
value_set |= annotation.infer_annotation(
context, expr_stmt.children[1].children[1]
).execute_annotation()
if found_annotation:
return value_set
types = []
node = tree_name.get_definition(import_name_always=True, include_setitem=True)
if node is None:
node = tree_name.parent
if node.type == 'global_stmt':
c = context.create_context(tree_name)
if c.is_module():
# In case we are already part of the module, there is no point
# in looking up the global statement anymore, because it's not
# valid at that point anyway.
return NO_VALUES
# For global_stmt lookups, we only need the first possible scope,
# which means the function itself.
filter = next(c.get_filters())
names = filter.get(tree_name.value)
return ValueSet.from_sets(name.infer() for name in names)
elif node.type not in ('import_from', 'import_name'):
c = context.create_context(tree_name)
return infer_atom(c, tree_name)
typ = node.type
if typ == 'for_stmt':
types = annotation.find_type_from_comment_hint_for(context, node, tree_name)
if types:
return types
if typ == 'with_stmt':
types = annotation.find_type_from_comment_hint_with(context, node, tree_name)
if types:
return types
if typ in ('for_stmt', 'comp_for', 'sync_comp_for'):
try:
types = context.predefined_names[node][tree_name.value]
except KeyError:
cn = ContextualizedNode(context, node.children[3])
for_types = iterate_values(
cn.infer(),
contextualized_node=cn,
is_async=node.parent.type == 'async_stmt',
)
n = TreeNameDefinition(context, tree_name)
types = check_tuple_assignments(n, for_types)
elif typ == 'expr_stmt':
types = infer_expr_stmt(context, node, tree_name)
elif typ == 'with_stmt':
value_managers = context.infer_node(node.get_test_node_from_name(tree_name))
if node.parent.type == 'async_stmt':
# In the case of `async with` statements, we need to
# first get the coroutine from the `__aenter__` method,
# then "unwrap" via the `__await__` method
enter_methods = value_managers.py__getattribute__('__aenter__')
coro = enter_methods.execute_with_values()
return coro.py__await__().py__stop_iteration_returns()
enter_methods = value_managers.py__getattribute__('__enter__')
return enter_methods.execute_with_values()
elif typ in ('import_from', 'import_name'):
types = imports.infer_import(context, tree_name)
elif typ in ('funcdef', 'classdef'):
types = _apply_decorators(context, node)
elif typ == 'try_stmt':
# TODO an exception can also be a tuple. Check for those.
# TODO check for types that are not classes and add it to
# the static analysis report.
exceptions = context.infer_node(tree_name.get_previous_sibling().get_previous_sibling())
types = exceptions.execute_with_values()
elif typ == 'param':
types = NO_VALUES
elif typ == 'del_stmt':
types = NO_VALUES
elif typ == 'namedexpr_test':
types = infer_node(context, node)
else:
raise ValueError("Should not happen. type: %s" % typ)
return types
# We don't want to have functions/classes that are created by the same
# tree_node.
@inference_state_method_cache()
def _apply_decorators(context, node):
"""
Returns the function, that should to be executed in the end.
This is also the places where the decorators are processed.
"""
if node.type == 'classdef':
decoratee_value = ClassValue(
context.inference_state,
parent_context=context,
tree_node=node
)
else:
decoratee_value = FunctionValue.from_context(context, node)
initial = values = ValueSet([decoratee_value])
if is_big_annoying_library(context):
return values
for dec in reversed(node.get_decorators()):
debug.dbg('decorator: %s %s', dec, values, color="MAGENTA")
with debug.increase_indent_cm():
dec_values = context.infer_node(dec.children[1])
trailer_nodes = dec.children[2:-1]
if trailer_nodes:
# Create a trailer and infer it.
trailer = tree.PythonNode('trailer', trailer_nodes)
trailer.parent = dec
dec_values = infer_trailer(context, dec_values, trailer)
if not len(dec_values):
code = dec.get_code(include_prefix=False)
# For the short future, we don't want to hear about the runtime
# decorator in typing that was intentionally omitted. This is not
# "correct", but helps with debugging.
if code != '@runtime\n':
debug.warning('decorator not found: %s on %s', dec, node)
return initial
values = dec_values.execute(arguments.ValuesArguments([values]))
if not len(values):
debug.warning('not possible to resolve wrappers found %s', node)
return initial
debug.dbg('decorator end %s', values, color="MAGENTA")
if values != initial:
return ValueSet([Decoratee(c, decoratee_value) for c in values])
return values
def check_tuple_assignments(name, value_set):
"""
Checks if tuples are assigned.
"""
lazy_value = None
for index, node in name.assignment_indexes():
cn = ContextualizedNode(name.parent_context, node)
iterated = value_set.iterate(cn)
if isinstance(index, slice):
# For no star unpacking is not possible.
return NO_VALUES
i = 0
while i <= index:
try:
lazy_value = next(iterated)
except StopIteration:
# We could do this with the default param in next. But this
# would allow this loop to run for a very long time if the
# index number is high. Therefore break if the loop is
# finished.
return NO_VALUES
else:
i += lazy_value.max
value_set = lazy_value.infer()
return value_set
class ContextualizedSubscriptListNode(ContextualizedNode):
def infer(self):
return _infer_subscript_list(self.context, self.node)
def _infer_subscript_list(context, index):
"""
Handles slices in subscript nodes.
"""
if index == ':':
# Like array[:]
return ValueSet([iterable.Slice(context, None, None, None)])
elif index.type == 'subscript' and not index.children[0] == '.':
# subscript basically implies a slice operation
# e.g. array[:3]
result = []
for el in index.children:
if el == ':':
if not result:
result.append(None)
elif el.type == 'sliceop':
if len(el.children) == 2:
result.append(el.children[1])
else:
result.append(el)
result += [None] * (3 - len(result))
return ValueSet([iterable.Slice(context, *result)])
elif index.type == 'subscriptlist':
return ValueSet([iterable.SequenceLiteralValue(context.inference_state, context, index)])
# No slices
return context.infer_node(index)
| 35,356 | Python | .py | 783 | 34.810983 | 97 | 0.597047 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,914 | module.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/value/module.py | import os
from pathlib import Path
from typing import Optional
from jedi.inference.cache import inference_state_method_cache
from jedi.inference.names import AbstractNameDefinition, ModuleName
from jedi.inference.filters import GlobalNameFilter, ParserTreeFilter, DictFilter, MergedFilter
from jedi.inference import compiled
from jedi.inference.base_value import TreeValue
from jedi.inference.names import SubModuleName
from jedi.inference.helpers import values_from_qualified_names
from jedi.inference.compiled import create_simple_object
from jedi.inference.base_value import ValueSet
from jedi.inference.context import ModuleContext
class _ModuleAttributeName(AbstractNameDefinition):
"""
For module attributes like __file__, __str__ and so on.
"""
api_type = 'instance'
def __init__(self, parent_module, string_name, string_value=None):
self.parent_context = parent_module
self.string_name = string_name
self._string_value = string_value
def infer(self):
if self._string_value is not None:
s = self._string_value
return ValueSet([
create_simple_object(self.parent_context.inference_state, s)
])
return compiled.get_string_value_set(self.parent_context.inference_state)
class SubModuleDictMixin:
@inference_state_method_cache()
def sub_modules_dict(self):
"""
Lists modules in the directory of this module (if this module is a
package).
"""
names = {}
if self.is_package():
mods = self.inference_state.compiled_subprocess.iter_module_names(
self.py__path__()
)
for name in mods:
# It's obviously a relative import to the current module.
names[name] = SubModuleName(self.as_context(), name)
# In the case of an import like `from x.` we don't need to
# add all the variables, this is only about submodules.
return names
class ModuleMixin(SubModuleDictMixin):
_module_name_class = ModuleName
def get_filters(self, origin_scope=None):
yield MergedFilter(
ParserTreeFilter(
parent_context=self.as_context(),
origin_scope=origin_scope
),
GlobalNameFilter(self.as_context()),
)
yield DictFilter(self.sub_modules_dict())
yield DictFilter(self._module_attributes_dict())
yield from self.iter_star_filters()
def py__class__(self):
c, = values_from_qualified_names(self.inference_state, 'types', 'ModuleType')
return c
def is_module(self):
return True
def is_stub(self):
return False
@property # type: ignore[misc]
@inference_state_method_cache()
def name(self):
return self._module_name_class(self, self.string_names[-1])
@inference_state_method_cache()
def _module_attributes_dict(self):
names = ['__package__', '__doc__', '__name__']
# All the additional module attributes are strings.
dct = dict((n, _ModuleAttributeName(self, n)) for n in names)
path = self.py__file__()
if path is not None:
dct['__file__'] = _ModuleAttributeName(self, '__file__', str(path))
return dct
def iter_star_filters(self):
for star_module in self.star_imports():
f = next(star_module.get_filters(), None)
assert f is not None
yield f
# I'm not sure if the star import cache is really that effective anymore
# with all the other really fast import caches. Recheck. Also we would need
# to push the star imports into InferenceState.module_cache, if we reenable this.
@inference_state_method_cache([])
def star_imports(self):
from jedi.inference.imports import Importer
modules = []
module_context = self.as_context()
for i in self.tree_node.iter_imports():
if i.is_star_import():
new = Importer(
self.inference_state,
import_path=i.get_paths()[-1],
module_context=module_context,
level=i.level
).follow()
for module in new:
if isinstance(module, ModuleValue):
modules += module.star_imports()
modules += new
return modules
def get_qualified_names(self):
"""
A module doesn't have a qualified name, but it's important to note that
it's reachable and not `None`. With this information we can add
qualified names on top for all value children.
"""
return ()
class ModuleValue(ModuleMixin, TreeValue):
api_type = 'module'
def __init__(self, inference_state, module_node, code_lines, file_io=None,
string_names=None, is_package=False):
super().__init__(
inference_state,
parent_context=None,
tree_node=module_node
)
self.file_io = file_io
if file_io is None:
self._path: Optional[Path] = None
else:
self._path = file_io.path
self.string_names = string_names # Optional[Tuple[str, ...]]
self.code_lines = code_lines
self._is_package = is_package
def is_stub(self):
if self._path is not None and self._path.suffix == '.pyi':
# Currently this is the way how we identify stubs when e.g. goto is
# used in them. This could be changed if stubs would be identified
# sooner and used as StubModuleValue.
return True
return super().is_stub()
def py__name__(self):
if self.string_names is None:
return None
return '.'.join(self.string_names)
def py__file__(self) -> Optional[Path]:
"""
In contrast to Python's __file__ can be None.
"""
if self._path is None:
return None
return self._path.absolute()
def is_package(self):
return self._is_package
def py__package__(self):
if self.string_names is None:
return []
if self._is_package:
return self.string_names
return self.string_names[:-1]
def py__path__(self):
"""
In case of a package, this returns Python's __path__ attribute, which
is a list of paths (strings).
Returns None if the module is not a package.
"""
if not self._is_package:
return None
# A namespace package is typically auto generated and ~10 lines long.
first_few_lines = ''.join(self.code_lines[:50])
# these are strings that need to be used for namespace packages,
# the first one is ``pkgutil``, the second ``pkg_resources``.
options = ('declare_namespace(__name__)', 'extend_path(__path__')
if options[0] in first_few_lines or options[1] in first_few_lines:
# It is a namespace, now try to find the rest of the
# modules on sys_path or whatever the search_path is.
paths = set()
for s in self.inference_state.get_sys_path():
other = os.path.join(s, self.name.string_name)
if os.path.isdir(other):
paths.add(other)
if paths:
return list(paths)
# Nested namespace packages will not be supported. Nobody ever
# asked for it and in Python 3 they are there without using all the
# crap above.
# Default to the of this file.
file = self.py__file__()
assert file is not None # Shouldn't be a package in the first place.
return [os.path.dirname(file)]
def _as_context(self):
return ModuleContext(self)
def __repr__(self):
return "<%s: %s@%s-%s is_stub=%s>" % (
self.__class__.__name__, self.py__name__(),
self.tree_node.start_pos[0], self.tree_node.end_pos[0],
self.is_stub()
)
| 8,118 | Python | .py | 194 | 32.242268 | 95 | 0.607378 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,915 | iterable.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/value/iterable.py | """
Contains all classes and functions to deal with lists, dicts, generators and
iterators in general.
"""
from jedi.inference import compiled
from jedi.inference import analysis
from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues, \
LazyTreeValue
from jedi.inference.helpers import get_int_or_none, is_string, \
reraise_getitem_errors, SimpleGetItemNotFound
from jedi.inference.utils import safe_property, to_list
from jedi.inference.cache import inference_state_method_cache
from jedi.inference.filters import LazyAttributeOverwrite, publish_method
from jedi.inference.base_value import ValueSet, Value, NO_VALUES, \
ContextualizedNode, iterate_values, sentinel, \
LazyValueWrapper
from jedi.parser_utils import get_sync_comp_fors
from jedi.inference.context import CompForContext
from jedi.inference.value.dynamic_arrays import check_array_additions
class IterableMixin:
def py__next__(self, contextualized_node=None):
return self.py__iter__(contextualized_node)
def py__stop_iteration_returns(self):
return ValueSet([compiled.builtin_from_name(self.inference_state, 'None')])
# At the moment, safe values are simple values like "foo", 1 and not
# lists/dicts. Therefore as a small speed optimization we can just do the
# default instead of resolving the lazy wrapped values, that are just
# doing this in the end as well.
# This mostly speeds up patterns like `sys.version_info >= (3, 0)` in
# typeshed.
get_safe_value = Value.get_safe_value
class GeneratorBase(LazyAttributeOverwrite, IterableMixin):
array_type = None
def _get_wrapped_value(self):
instance, = self._get_cls().execute_annotation()
return instance
def _get_cls(self):
generator, = self.inference_state.typing_module.py__getattribute__('Generator')
return generator
def py__bool__(self):
return True
@publish_method('__iter__')
def _iter(self, arguments):
return ValueSet([self])
@publish_method('send')
@publish_method('__next__')
def _next(self, arguments):
return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())
def py__stop_iteration_returns(self):
return ValueSet([compiled.builtin_from_name(self.inference_state, 'None')])
@property
def name(self):
return compiled.CompiledValueName(self, 'Generator')
def get_annotated_class_object(self):
from jedi.inference.gradual.generics import TupleGenericManager
gen_values = self.merge_types_of_iterate().py__class__()
gm = TupleGenericManager((gen_values, NO_VALUES, NO_VALUES))
return self._get_cls().with_generics(gm)
class Generator(GeneratorBase):
"""Handling of `yield` functions."""
def __init__(self, inference_state, func_execution_context):
super().__init__(inference_state)
self._func_execution_context = func_execution_context
def py__iter__(self, contextualized_node=None):
iterators = self._func_execution_context.infer_annotations()
if iterators:
return iterators.iterate(contextualized_node)
return self._func_execution_context.get_yield_lazy_values()
def py__stop_iteration_returns(self):
return self._func_execution_context.get_return_values()
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self._func_execution_context)
def comprehension_from_atom(inference_state, value, atom):
bracket = atom.children[0]
test_list_comp = atom.children[1]
if bracket == '{':
if atom.children[1].children[1] == ':':
sync_comp_for = test_list_comp.children[3]
if sync_comp_for.type == 'comp_for':
sync_comp_for = sync_comp_for.children[1]
return DictComprehension(
inference_state,
value,
sync_comp_for_node=sync_comp_for,
key_node=test_list_comp.children[0],
value_node=test_list_comp.children[2],
)
else:
cls = SetComprehension
elif bracket == '(':
cls = GeneratorComprehension
elif bracket == '[':
cls = ListComprehension
sync_comp_for = test_list_comp.children[1]
if sync_comp_for.type == 'comp_for':
sync_comp_for = sync_comp_for.children[1]
return cls(
inference_state,
defining_context=value,
sync_comp_for_node=sync_comp_for,
entry_node=test_list_comp.children[0],
)
class ComprehensionMixin:
@inference_state_method_cache()
def _get_comp_for_context(self, parent_context, comp_for):
return CompForContext(parent_context, comp_for)
def _nested(self, comp_fors, parent_context=None):
comp_for = comp_fors[0]
is_async = comp_for.parent.type == 'comp_for'
input_node = comp_for.children[3]
parent_context = parent_context or self._defining_context
input_types = parent_context.infer_node(input_node)
cn = ContextualizedNode(parent_context, input_node)
iterated = input_types.iterate(cn, is_async=is_async)
exprlist = comp_for.children[1]
for i, lazy_value in enumerate(iterated):
types = lazy_value.infer()
dct = unpack_tuple_to_dict(parent_context, types, exprlist)
context = self._get_comp_for_context(
parent_context,
comp_for,
)
with context.predefine_names(comp_for, dct):
try:
yield from self._nested(comp_fors[1:], context)
except IndexError:
iterated = context.infer_node(self._entry_node)
if self.array_type == 'dict':
yield iterated, context.infer_node(self._value_node)
else:
yield iterated
@inference_state_method_cache(default=[])
@to_list
def _iterate(self):
comp_fors = tuple(get_sync_comp_fors(self._sync_comp_for_node))
yield from self._nested(comp_fors)
def py__iter__(self, contextualized_node=None):
for set_ in self._iterate():
yield LazyKnownValues(set_)
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self._sync_comp_for_node)
class _DictMixin:
def _get_generics(self):
return tuple(c_set.py__class__() for c_set in self.get_mapping_item_values())
class Sequence(LazyAttributeOverwrite, IterableMixin):
api_type = 'instance'
@property
def name(self):
return compiled.CompiledValueName(self, self.array_type)
def _get_generics(self):
return (self.merge_types_of_iterate().py__class__(),)
@inference_state_method_cache(default=())
def _cached_generics(self):
return self._get_generics()
def _get_wrapped_value(self):
from jedi.inference.gradual.base import GenericClass
from jedi.inference.gradual.generics import TupleGenericManager
klass = compiled.builtin_from_name(self.inference_state, self.array_type)
c, = GenericClass(
klass,
TupleGenericManager(self._cached_generics())
).execute_annotation()
return c
def py__bool__(self):
return None # We don't know the length, because of appends.
@safe_property
def parent(self):
return self.inference_state.builtins_module
def py__getitem__(self, index_value_set, contextualized_node):
if self.array_type == 'dict':
return self._dict_values()
return iterate_values(ValueSet([self]))
class _BaseComprehension(ComprehensionMixin):
def __init__(self, inference_state, defining_context, sync_comp_for_node, entry_node):
assert sync_comp_for_node.type == 'sync_comp_for'
super().__init__(inference_state)
self._defining_context = defining_context
self._sync_comp_for_node = sync_comp_for_node
self._entry_node = entry_node
class ListComprehension(_BaseComprehension, Sequence):
array_type = 'list'
def py__simple_getitem__(self, index):
if isinstance(index, slice):
return ValueSet([self])
all_types = list(self.py__iter__())
with reraise_getitem_errors(IndexError, TypeError):
lazy_value = all_types[index]
return lazy_value.infer()
class SetComprehension(_BaseComprehension, Sequence):
array_type = 'set'
class GeneratorComprehension(_BaseComprehension, GeneratorBase):
pass
class _DictKeyMixin:
# TODO merge with _DictMixin?
def get_mapping_item_values(self):
return self._dict_keys(), self._dict_values()
def get_key_values(self):
# TODO merge with _dict_keys?
return self._dict_keys()
class DictComprehension(ComprehensionMixin, Sequence, _DictKeyMixin):
array_type = 'dict'
def __init__(self, inference_state, defining_context, sync_comp_for_node, key_node, value_node):
assert sync_comp_for_node.type == 'sync_comp_for'
super().__init__(inference_state)
self._defining_context = defining_context
self._sync_comp_for_node = sync_comp_for_node
self._entry_node = key_node
self._value_node = value_node
def py__iter__(self, contextualized_node=None):
for keys, values in self._iterate():
yield LazyKnownValues(keys)
def py__simple_getitem__(self, index):
for keys, values in self._iterate():
for k in keys:
# Be careful in the future if refactoring, index could be a
# slice object.
if k.get_safe_value(default=object()) == index:
return values
raise SimpleGetItemNotFound()
def _dict_keys(self):
return ValueSet.from_sets(keys for keys, values in self._iterate())
def _dict_values(self):
return ValueSet.from_sets(values for keys, values in self._iterate())
@publish_method('values')
def _imitate_values(self, arguments):
lazy_value = LazyKnownValues(self._dict_values())
return ValueSet([FakeList(self.inference_state, [lazy_value])])
@publish_method('items')
def _imitate_items(self, arguments):
lazy_values = [
LazyKnownValue(
FakeTuple(
self.inference_state,
[LazyKnownValues(key),
LazyKnownValues(value)]
)
)
for key, value in self._iterate()
]
return ValueSet([FakeList(self.inference_state, lazy_values)])
def exact_key_items(self):
# NOTE: A smarter thing can probably done here to achieve better
# completions, but at least like this jedi doesn't crash
return []
class SequenceLiteralValue(Sequence):
_TUPLE_LIKE = 'testlist_star_expr', 'testlist', 'subscriptlist'
mapping = {'(': 'tuple',
'[': 'list',
'{': 'set'}
def __init__(self, inference_state, defining_context, atom):
super().__init__(inference_state)
self.atom = atom
self._defining_context = defining_context
if self.atom.type in self._TUPLE_LIKE:
self.array_type = 'tuple'
else:
self.array_type = SequenceLiteralValue.mapping[atom.children[0]]
"""The builtin name of the array (list, set, tuple or dict)."""
def _get_generics(self):
if self.array_type == 'tuple':
return tuple(x.infer().py__class__() for x in self.py__iter__())
return super()._get_generics()
def py__simple_getitem__(self, index):
"""Here the index is an int/str. Raises IndexError/KeyError."""
if isinstance(index, slice):
return ValueSet([self])
else:
with reraise_getitem_errors(TypeError, KeyError, IndexError):
node = self.get_tree_entries()[index]
if node == ':' or node.type == 'subscript':
return NO_VALUES
return self._defining_context.infer_node(node)
def py__iter__(self, contextualized_node=None):
"""
While values returns the possible values for any array field, this
function returns the value for a certain index.
"""
for node in self.get_tree_entries():
if node == ':' or node.type == 'subscript':
# TODO this should probably use at least part of the code
# of infer_subscript_list.
yield LazyKnownValue(Slice(self._defining_context, None, None, None))
else:
yield LazyTreeValue(self._defining_context, node)
yield from check_array_additions(self._defining_context, self)
def py__len__(self):
# This function is not really used often. It's more of a try.
return len(self.get_tree_entries())
def get_tree_entries(self):
c = self.atom.children
if self.atom.type in self._TUPLE_LIKE:
return c[::2]
array_node = c[1]
if array_node in (']', '}', ')'):
return [] # Direct closing bracket, doesn't contain items.
if array_node.type == 'testlist_comp':
# filter out (for now) pep 448 single-star unpacking
return [value for value in array_node.children[::2]
if value.type != "star_expr"]
elif array_node.type == 'dictorsetmaker':
kv = []
iterator = iter(array_node.children)
for key in iterator:
if key == "**":
# dict with pep 448 double-star unpacking
# for now ignoring the values imported by **
next(iterator)
next(iterator, None) # Possible comma.
else:
op = next(iterator, None)
if op is None or op == ',':
if key.type == "star_expr":
# pep 448 single-star unpacking
# for now ignoring values imported by *
pass
else:
kv.append(key) # A set.
else:
assert op == ':' # A dict.
kv.append((key, next(iterator)))
next(iterator, None) # Possible comma.
return kv
else:
if array_node.type == "star_expr":
# pep 448 single-star unpacking
# for now ignoring values imported by *
return []
else:
return [array_node]
def __repr__(self):
return "<%s of %s>" % (self.__class__.__name__, self.atom)
class DictLiteralValue(_DictMixin, SequenceLiteralValue, _DictKeyMixin):
array_type = 'dict'
def __init__(self, inference_state, defining_context, atom):
# Intentionally don't call the super class. This is definitely a sign
# that the architecture is bad and we should refactor.
Sequence.__init__(self, inference_state)
self._defining_context = defining_context
self.atom = atom
def py__simple_getitem__(self, index):
"""Here the index is an int/str. Raises IndexError/KeyError."""
compiled_value_index = compiled.create_simple_object(self.inference_state, index)
for key, value in self.get_tree_entries():
for k in self._defining_context.infer_node(key):
for key_v in k.execute_operation(compiled_value_index, '=='):
if key_v.get_safe_value():
return self._defining_context.infer_node(value)
raise SimpleGetItemNotFound('No key found in dictionary %s.' % self)
def py__iter__(self, contextualized_node=None):
"""
While values returns the possible values for any array field, this
function returns the value for a certain index.
"""
# Get keys.
types = NO_VALUES
for k, _ in self.get_tree_entries():
types |= self._defining_context.infer_node(k)
# We don't know which dict index comes first, therefore always
# yield all the types.
for _ in types:
yield LazyKnownValues(types)
@publish_method('values')
def _imitate_values(self, arguments):
lazy_value = LazyKnownValues(self._dict_values())
return ValueSet([FakeList(self.inference_state, [lazy_value])])
@publish_method('items')
def _imitate_items(self, arguments):
lazy_values = [
LazyKnownValue(FakeTuple(
self.inference_state,
(LazyTreeValue(self._defining_context, key_node),
LazyTreeValue(self._defining_context, value_node))
)) for key_node, value_node in self.get_tree_entries()
]
return ValueSet([FakeList(self.inference_state, lazy_values)])
def exact_key_items(self):
"""
Returns a generator of tuples like dict.items(), where the key is
resolved (as a string) and the values are still lazy values.
"""
for key_node, value in self.get_tree_entries():
for key in self._defining_context.infer_node(key_node):
if is_string(key):
yield key.get_safe_value(), LazyTreeValue(self._defining_context, value)
def _dict_values(self):
return ValueSet.from_sets(
self._defining_context.infer_node(v)
for k, v in self.get_tree_entries()
)
def _dict_keys(self):
return ValueSet.from_sets(
self._defining_context.infer_node(k)
for k, v in self.get_tree_entries()
)
class _FakeSequence(Sequence):
def __init__(self, inference_state, lazy_value_list):
"""
type should be one of "tuple", "list"
"""
super().__init__(inference_state)
self._lazy_value_list = lazy_value_list
def py__simple_getitem__(self, index):
if isinstance(index, slice):
return ValueSet([self])
with reraise_getitem_errors(IndexError, TypeError):
lazy_value = self._lazy_value_list[index]
return lazy_value.infer()
def py__iter__(self, contextualized_node=None):
return self._lazy_value_list
def py__bool__(self):
return bool(len(self._lazy_value_list))
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self._lazy_value_list)
class FakeTuple(_FakeSequence):
array_type = 'tuple'
class FakeList(_FakeSequence):
array_type = 'tuple'
class FakeDict(_DictMixin, Sequence, _DictKeyMixin):
array_type = 'dict'
def __init__(self, inference_state, dct):
super().__init__(inference_state)
self._dct = dct
def py__iter__(self, contextualized_node=None):
for key in self._dct:
yield LazyKnownValue(compiled.create_simple_object(self.inference_state, key))
def py__simple_getitem__(self, index):
with reraise_getitem_errors(KeyError, TypeError):
lazy_value = self._dct[index]
return lazy_value.infer()
@publish_method('values')
def _values(self, arguments):
return ValueSet([FakeTuple(
self.inference_state,
[LazyKnownValues(self._dict_values())]
)])
def _dict_values(self):
return ValueSet.from_sets(lazy_value.infer() for lazy_value in self._dct.values())
def _dict_keys(self):
return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())
def exact_key_items(self):
return self._dct.items()
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._dct)
class MergedArray(Sequence):
def __init__(self, inference_state, arrays):
super().__init__(inference_state)
self.array_type = arrays[-1].array_type
self._arrays = arrays
def py__iter__(self, contextualized_node=None):
for array in self._arrays:
yield from array.py__iter__()
def py__simple_getitem__(self, index):
return ValueSet.from_sets(lazy_value.infer() for lazy_value in self.py__iter__())
def unpack_tuple_to_dict(context, types, exprlist):
"""
Unpacking tuple assignments in for statements and expr_stmts.
"""
if exprlist.type == 'name':
return {exprlist.value: types}
elif exprlist.type == 'atom' and exprlist.children[0] in ('(', '['):
return unpack_tuple_to_dict(context, types, exprlist.children[1])
elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist',
'testlist_star_expr'):
dct = {}
parts = iter(exprlist.children[::2])
n = 0
for lazy_value in types.iterate(ContextualizedNode(context, exprlist)):
n += 1
try:
part = next(parts)
except StopIteration:
analysis.add(context, 'value-error-too-many-values', part,
message="ValueError: too many values to unpack (expected %s)" % n)
else:
dct.update(unpack_tuple_to_dict(context, lazy_value.infer(), part))
has_parts = next(parts, None)
if types and has_parts is not None:
analysis.add(context, 'value-error-too-few-values', has_parts,
message="ValueError: need more than %s values to unpack" % n)
return dct
elif exprlist.type == 'power' or exprlist.type == 'atom_expr':
# Something like ``arr[x], var = ...``.
# This is something that is not yet supported, would also be difficult
# to write into a dict.
return {}
elif exprlist.type == 'star_expr': # `a, *b, c = x` type unpackings
# Currently we're not supporting them.
return {}
raise NotImplementedError
class Slice(LazyValueWrapper):
def __init__(self, python_context, start, stop, step):
self.inference_state = python_context.inference_state
self._context = python_context
# All of them are either a Precedence or None.
self._start = start
self._stop = stop
self._step = step
def _get_wrapped_value(self):
value = compiled.builtin_from_name(self._context.inference_state, 'slice')
slice_value, = value.execute_with_values()
return slice_value
def get_safe_value(self, default=sentinel):
"""
Imitate CompiledValue.obj behavior and return a ``builtin.slice()``
object.
"""
def get(element):
if element is None:
return None
result = self._context.infer_node(element)
if len(result) != 1:
# For simplicity, we want slices to be clear defined with just
# one type. Otherwise we will return an empty slice object.
raise IndexError
value, = result
return get_int_or_none(value)
try:
return slice(get(self._start), get(self._stop), get(self._step))
except IndexError:
return slice(None, None, None)
| 23,305 | Python | .py | 520 | 35.048077 | 100 | 0.614044 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,916 | decorator.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/value/decorator.py | '''
Decorators are not really values, however we need some wrappers to improve
docstrings and other things around decorators.
'''
from jedi.inference.base_value import ValueWrapper, ValueSet
class Decoratee(ValueWrapper):
def __init__(self, wrapped_value, original_value):
super().__init__(wrapped_value)
self._original_value = original_value
def py__doc__(self):
return self._original_value.py__doc__()
def py__get__(self, instance, class_value):
return ValueSet(
Decoratee(v, self._original_value)
for v in self._wrapped_value.py__get__(instance, class_value)
)
def get_signatures(self):
signatures = self._wrapped_value.get_signatures()
if signatures:
return signatures
# Fallback to signatures of the original function/class if the
# decorator has no signature or it is not inferrable.
#
# __get__ means that it's a descriptor. In that case we don't return
# signatures, because they are usually properties.
if not self._wrapped_value.py__getattribute__('__get__'):
return self._original_value.get_signatures()
return []
| 1,207 | Python | .py | 28 | 35.607143 | 76 | 0.658142 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,917 | function.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/value/function.py | from parso.python import tree
from jedi import debug
from jedi.inference.cache import inference_state_method_cache, CachedMetaClass
from jedi.inference import compiled
from jedi.inference import recursion
from jedi.inference import docstrings
from jedi.inference import flow_analysis
from jedi.inference.signature import TreeSignature
from jedi.inference.filters import ParserTreeFilter, FunctionExecutionFilter, \
AnonymousFunctionExecutionFilter
from jedi.inference.names import ValueName, AbstractNameDefinition, \
AnonymousParamName, ParamName, NameWrapper
from jedi.inference.base_value import ContextualizedNode, NO_VALUES, \
ValueSet, TreeValue, ValueWrapper
from jedi.inference.lazy_value import LazyKnownValues, LazyKnownValue, \
LazyTreeValue
from jedi.inference.context import ValueContext, TreeContextMixin
from jedi.inference.value import iterable
from jedi import parser_utils
from jedi.inference.parser_cache import get_yield_exprs
from jedi.inference.helpers import values_from_qualified_names
from jedi.inference.gradual.generics import TupleGenericManager
class LambdaName(AbstractNameDefinition):
string_name = '<lambda>'
api_type = 'function'
def __init__(self, lambda_value):
self._lambda_value = lambda_value
self.parent_context = lambda_value.parent_context
@property
def start_pos(self):
return self._lambda_value.tree_node.start_pos
def infer(self):
return ValueSet([self._lambda_value])
class FunctionAndClassBase(TreeValue):
def get_qualified_names(self):
if self.parent_context.is_class():
n = self.parent_context.get_qualified_names()
if n is None:
# This means that the parent class lives within a function.
return None
return n + (self.py__name__(),)
elif self.parent_context.is_module():
return (self.py__name__(),)
else:
return None
class FunctionMixin:
api_type = 'function'
def get_filters(self, origin_scope=None):
cls = self.py__class__()
for instance in cls.execute_with_values():
yield from instance.get_filters(origin_scope=origin_scope)
def py__get__(self, instance, class_value):
from jedi.inference.value.instance import BoundMethod
if instance is None:
# Calling the Foo.bar results in the original bar function.
return ValueSet([self])
return ValueSet([BoundMethod(instance, class_value.as_context(), self)])
def get_param_names(self):
return [AnonymousParamName(self, param.name)
for param in self.tree_node.get_params()]
@property
def name(self):
if self.tree_node.type == 'lambdef':
return LambdaName(self)
return ValueName(self, self.tree_node.name)
def is_function(self):
return True
def py__name__(self):
return self.name.string_name
def get_type_hint(self, add_class_info=True):
return_annotation = self.tree_node.annotation
if return_annotation is None:
def param_name_to_str(n):
s = n.string_name
annotation = n.infer().get_type_hint()
if annotation is not None:
s += ': ' + annotation
if n.default_node is not None:
s += '=' + n.default_node.get_code(include_prefix=False)
return s
function_execution = self.as_context()
result = function_execution.infer()
return_hint = result.get_type_hint()
body = self.py__name__() + '(%s)' % ', '.join([
param_name_to_str(n)
for n in function_execution.get_param_names()
])
if return_hint is None:
return body
else:
return_hint = return_annotation.get_code(include_prefix=False)
body = self.py__name__() + self.tree_node.children[2].get_code(include_prefix=False)
return body + ' -> ' + return_hint
def py__call__(self, arguments):
function_execution = self.as_context(arguments)
return function_execution.infer()
def _as_context(self, arguments=None):
if arguments is None:
return AnonymousFunctionExecution(self)
return FunctionExecutionContext(self, arguments)
def get_signatures(self):
return [TreeSignature(f) for f in self.get_signature_functions()]
class FunctionValue(FunctionMixin, FunctionAndClassBase, metaclass=CachedMetaClass):
@classmethod
def from_context(cls, context, tree_node):
def create(tree_node):
if context.is_class():
return MethodValue(
context.inference_state,
context,
parent_context=parent_context,
tree_node=tree_node
)
else:
return cls(
context.inference_state,
parent_context=parent_context,
tree_node=tree_node
)
overloaded_funcs = list(_find_overload_functions(context, tree_node))
parent_context = context
while parent_context.is_class() or parent_context.is_instance():
parent_context = parent_context.parent_context
function = create(tree_node)
if overloaded_funcs:
return OverloadedFunctionValue(
function,
# Get them into the correct order: lower line first.
list(reversed([create(f) for f in overloaded_funcs]))
)
return function
def py__class__(self):
c, = values_from_qualified_names(self.inference_state, 'types', 'FunctionType')
return c
def get_default_param_context(self):
return self.parent_context
def get_signature_functions(self):
return [self]
class FunctionNameInClass(NameWrapper):
def __init__(self, class_context, name):
super().__init__(name)
self._class_context = class_context
def get_defining_qualified_value(self):
return self._class_context.get_value() # Might be None.
class MethodValue(FunctionValue):
def __init__(self, inference_state, class_context, *args, **kwargs):
super().__init__(inference_state, *args, **kwargs)
self.class_context = class_context
def get_default_param_context(self):
return self.class_context
def get_qualified_names(self):
# Need to implement this, because the parent value of a method
# value is not the class value but the module.
names = self.class_context.get_qualified_names()
if names is None:
return None
return names + (self.py__name__(),)
@property
def name(self):
return FunctionNameInClass(self.class_context, super().name)
class BaseFunctionExecutionContext(ValueContext, TreeContextMixin):
def infer_annotations(self):
raise NotImplementedError
@inference_state_method_cache(default=NO_VALUES)
@recursion.execution_recursion_decorator()
def get_return_values(self, check_yields=False):
funcdef = self.tree_node
if funcdef.type == 'lambdef':
return self.infer_node(funcdef.children[-1])
if check_yields:
value_set = NO_VALUES
returns = get_yield_exprs(self.inference_state, funcdef)
else:
value_set = self.infer_annotations()
if value_set:
# If there are annotations, prefer them over anything else.
# This will make it faster.
return value_set
value_set |= docstrings.infer_return_types(self._value)
returns = funcdef.iter_return_stmts()
for r in returns:
if check_yields:
value_set |= ValueSet.from_sets(
lazy_value.infer()
for lazy_value in self._get_yield_lazy_value(r)
)
else:
check = flow_analysis.reachability_check(self, funcdef, r)
if check is flow_analysis.UNREACHABLE:
debug.dbg('Return unreachable: %s', r)
else:
try:
children = r.children
except AttributeError:
ctx = compiled.builtin_from_name(self.inference_state, 'None')
value_set |= ValueSet([ctx])
else:
value_set |= self.infer_node(children[1])
if check is flow_analysis.REACHABLE:
debug.dbg('Return reachable: %s', r)
break
return value_set
def _get_yield_lazy_value(self, yield_expr):
if yield_expr.type == 'keyword':
# `yield` just yields None.
ctx = compiled.builtin_from_name(self.inference_state, 'None')
yield LazyKnownValue(ctx)
return
node = yield_expr.children[1]
if node.type == 'yield_arg': # It must be a yield from.
cn = ContextualizedNode(self, node.children[1])
yield from cn.infer().iterate(cn)
else:
yield LazyTreeValue(self, node)
@recursion.execution_recursion_decorator(default=iter([]))
def get_yield_lazy_values(self, is_async=False):
# TODO: if is_async, wrap yield statements in Awaitable/async_generator_asend
for_parents = [(y, tree.search_ancestor(y, 'for_stmt', 'funcdef',
'while_stmt', 'if_stmt'))
for y in get_yield_exprs(self.inference_state, self.tree_node)]
# Calculate if the yields are placed within the same for loop.
yields_order = []
last_for_stmt = None
for yield_, for_stmt in for_parents:
# For really simple for loops we can predict the order. Otherwise
# we just ignore it.
parent = for_stmt.parent
if parent.type == 'suite':
parent = parent.parent
if for_stmt.type == 'for_stmt' and parent == self.tree_node \
and parser_utils.for_stmt_defines_one_name(for_stmt): # Simplicity for now.
if for_stmt == last_for_stmt:
yields_order[-1][1].append(yield_)
else:
yields_order.append((for_stmt, [yield_]))
elif for_stmt == self.tree_node:
yields_order.append((None, [yield_]))
else:
types = self.get_return_values(check_yields=True)
if types:
yield LazyKnownValues(types, min=0, max=float('inf'))
return
last_for_stmt = for_stmt
for for_stmt, yields in yields_order:
if for_stmt is None:
# No for_stmt, just normal yields.
for yield_ in yields:
yield from self._get_yield_lazy_value(yield_)
else:
input_node = for_stmt.get_testlist()
cn = ContextualizedNode(self, input_node)
ordered = cn.infer().iterate(cn)
ordered = list(ordered)
for lazy_value in ordered:
dct = {str(for_stmt.children[1].value): lazy_value.infer()}
with self.predefine_names(for_stmt, dct):
for yield_in_same_for_stmt in yields:
yield from self._get_yield_lazy_value(yield_in_same_for_stmt)
def merge_yield_values(self, is_async=False):
return ValueSet.from_sets(
lazy_value.infer()
for lazy_value in self.get_yield_lazy_values()
)
def is_generator(self):
return bool(get_yield_exprs(self.inference_state, self.tree_node))
def infer(self):
"""
Created to be used by inheritance.
"""
inference_state = self.inference_state
is_coroutine = self.tree_node.parent.type in ('async_stmt', 'async_funcdef')
from jedi.inference.gradual.base import GenericClass
if is_coroutine:
if self.is_generator():
async_generator_classes = inference_state.typing_module \
.py__getattribute__('AsyncGenerator')
yield_values = self.merge_yield_values(is_async=True)
# The contravariant doesn't seem to be defined.
generics = (yield_values.py__class__(), NO_VALUES)
return ValueSet(
GenericClass(c, TupleGenericManager(generics))
for c in async_generator_classes
).execute_annotation()
else:
async_classes = inference_state.typing_module.py__getattribute__('Coroutine')
return_values = self.get_return_values()
# Only the first generic is relevant.
generics = (return_values.py__class__(), NO_VALUES, NO_VALUES)
return ValueSet(
GenericClass(c, TupleGenericManager(generics)) for c in async_classes
).execute_annotation()
else:
# If there are annotations, prefer them over anything else.
if self.is_generator() and not self.infer_annotations():
return ValueSet([iterable.Generator(inference_state, self)])
else:
return self.get_return_values()
class FunctionExecutionContext(BaseFunctionExecutionContext):
def __init__(self, function_value, arguments):
super().__init__(function_value)
self._arguments = arguments
def get_filters(self, until_position=None, origin_scope=None):
yield FunctionExecutionFilter(
self, self._value,
until_position=until_position,
origin_scope=origin_scope,
arguments=self._arguments
)
def infer_annotations(self):
from jedi.inference.gradual.annotation import infer_return_types
return infer_return_types(self._value, self._arguments)
def get_param_names(self):
return [
ParamName(self._value, param.name, self._arguments)
for param in self._value.tree_node.get_params()
]
class AnonymousFunctionExecution(BaseFunctionExecutionContext):
def infer_annotations(self):
# I don't think inferring anonymous executions is a big thing.
# Anonymous contexts are mostly there for the user to work in. ~ dave
return NO_VALUES
def get_filters(self, until_position=None, origin_scope=None):
yield AnonymousFunctionExecutionFilter(
self, self._value,
until_position=until_position,
origin_scope=origin_scope,
)
def get_param_names(self):
return self._value.get_param_names()
class OverloadedFunctionValue(FunctionMixin, ValueWrapper):
def __init__(self, function, overloaded_functions):
super().__init__(function)
self._overloaded_functions = overloaded_functions
def py__call__(self, arguments):
debug.dbg("Execute overloaded function %s", self._wrapped_value, color='BLUE')
function_executions = []
for signature in self.get_signatures():
function_execution = signature.value.as_context(arguments)
function_executions.append(function_execution)
if signature.matches_signature(arguments):
return function_execution.infer()
if self.inference_state.is_analysis:
# In this case we want precision.
return NO_VALUES
return ValueSet.from_sets(fe.infer() for fe in function_executions)
def get_signature_functions(self):
return self._overloaded_functions
def get_type_hint(self, add_class_info=True):
return 'Union[%s]' % ', '.join(f.get_type_hint() for f in self._overloaded_functions)
def _find_overload_functions(context, tree_node):
def _is_overload_decorated(funcdef):
if funcdef.parent.type == 'decorated':
decorators = funcdef.parent.children[0]
if decorators.type == 'decorator':
decorators = [decorators]
else:
decorators = decorators.children
for decorator in decorators:
dotted_name = decorator.children[1]
if dotted_name.type == 'name' and dotted_name.value == 'overload':
# TODO check with values if it's the right overload
return True
return False
if tree_node.type == 'lambdef':
return
if _is_overload_decorated(tree_node):
yield tree_node
while True:
filter = ParserTreeFilter(
context,
until_position=tree_node.start_pos
)
names = filter.get(tree_node.name.value)
assert isinstance(names, list)
if not names:
break
found = False
for name in names:
funcdef = name.tree_name.parent
if funcdef.type == 'funcdef' and _is_overload_decorated(funcdef):
tree_node = funcdef
found = True
yield funcdef
if not found:
break
| 17,424 | Python | .py | 385 | 33.787013 | 96 | 0.607499 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,918 | dynamic_arrays.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/value/dynamic_arrays.py | """
A module to deal with stuff like `list.append` and `set.add`.
Array modifications
*******************
If the content of an array (``set``/``list``) is requested somewhere, the
current module will be checked for appearances of ``arr.append``,
``arr.insert``, etc. If the ``arr`` name points to an actual array, the
content will be added
This can be really cpu intensive, as you can imagine. Because |jedi| has to
follow **every** ``append`` and check whether it's the right array. However this
works pretty good, because in *slow* cases, the recursion detector and other
settings will stop this process.
It is important to note that:
1. Array modfications work only in the current module.
2. Jedi only checks Array additions; ``list.pop``, etc are ignored.
"""
from jedi import debug
from jedi import settings
from jedi.inference import recursion
from jedi.inference.base_value import ValueSet, NO_VALUES, HelperValueMixin, \
ValueWrapper
from jedi.inference.lazy_value import LazyKnownValues
from jedi.inference.helpers import infer_call_of_leaf
from jedi.inference.cache import inference_state_method_cache
_sentinel = object()
def check_array_additions(context, sequence):
""" Just a mapper function for the internal _internal_check_array_additions """
if sequence.array_type not in ('list', 'set'):
# TODO also check for dict updates
return NO_VALUES
return _internal_check_array_additions(context, sequence)
@inference_state_method_cache(default=NO_VALUES)
@debug.increase_indent
def _internal_check_array_additions(context, sequence):
"""
Checks if a `Array` has "add" (append, insert, extend) statements:
>>> a = [""]
>>> a.append(1)
"""
from jedi.inference import arguments
debug.dbg('Dynamic array search for %s' % sequence, color='MAGENTA')
module_context = context.get_root_context()
if not settings.dynamic_array_additions or module_context.is_compiled():
debug.dbg('Dynamic array search aborted.', color='MAGENTA')
return NO_VALUES
def find_additions(context, arglist, add_name):
params = list(arguments.TreeArguments(context.inference_state, context, arglist).unpack())
result = set()
if add_name in ['insert']:
params = params[1:]
if add_name in ['append', 'add', 'insert']:
for key, lazy_value in params:
result.add(lazy_value)
elif add_name in ['extend', 'update']:
for key, lazy_value in params:
result |= set(lazy_value.infer().iterate())
return result
temp_param_add, settings.dynamic_params_for_other_modules = \
settings.dynamic_params_for_other_modules, False
is_list = sequence.name.string_name == 'list'
search_names = (['append', 'extend', 'insert'] if is_list else ['add', 'update'])
added_types = set()
for add_name in search_names:
try:
possible_names = module_context.tree_node.get_used_names()[add_name]
except KeyError:
continue
else:
for name in possible_names:
value_node = context.tree_node
if not (value_node.start_pos < name.start_pos < value_node.end_pos):
continue
trailer = name.parent
power = trailer.parent
trailer_pos = power.children.index(trailer)
try:
execution_trailer = power.children[trailer_pos + 1]
except IndexError:
continue
else:
if execution_trailer.type != 'trailer' \
or execution_trailer.children[0] != '(' \
or execution_trailer.children[1] == ')':
continue
random_context = context.create_context(name)
with recursion.execution_allowed(context.inference_state, power) as allowed:
if allowed:
found = infer_call_of_leaf(
random_context,
name,
cut_own_trailer=True
)
if sequence in found:
# The arrays match. Now add the results
added_types |= find_additions(
random_context,
execution_trailer.children[1],
add_name
)
# reset settings
settings.dynamic_params_for_other_modules = temp_param_add
debug.dbg('Dynamic array result %s', added_types, color='MAGENTA')
return added_types
def get_dynamic_array_instance(instance, arguments):
"""Used for set() and list() instances."""
ai = _DynamicArrayAdditions(instance, arguments)
from jedi.inference import arguments
return arguments.ValuesArguments([ValueSet([ai])])
class _DynamicArrayAdditions(HelperValueMixin):
"""
Used for the usage of set() and list().
This is definitely a hack, but a good one :-)
It makes it possible to use set/list conversions.
This is not a proper context, because it doesn't have to be. It's not used
in the wild, it's just used within typeshed as an argument to `__init__`
for set/list and never used in any other place.
"""
def __init__(self, instance, arguments):
self._instance = instance
self._arguments = arguments
def py__class__(self):
tuple_, = self._instance.inference_state.builtins_module.py__getattribute__('tuple')
return tuple_
def py__iter__(self, contextualized_node=None):
arguments = self._arguments
try:
_, lazy_value = next(arguments.unpack())
except StopIteration:
pass
else:
yield from lazy_value.infer().iterate()
from jedi.inference.arguments import TreeArguments
if isinstance(arguments, TreeArguments):
additions = _internal_check_array_additions(arguments.context, self._instance)
yield from additions
def iterate(self, contextualized_node=None, is_async=False):
return self.py__iter__(contextualized_node)
class _Modification(ValueWrapper):
def __init__(self, wrapped_value, assigned_values, contextualized_key):
super().__init__(wrapped_value)
self._assigned_values = assigned_values
self._contextualized_key = contextualized_key
def py__getitem__(self, *args, **kwargs):
return self._wrapped_value.py__getitem__(*args, **kwargs) | self._assigned_values
def py__simple_getitem__(self, index):
actual = [
v.get_safe_value(_sentinel)
for v in self._contextualized_key.infer()
]
if index in actual:
return self._assigned_values
return self._wrapped_value.py__simple_getitem__(index)
class DictModification(_Modification):
def py__iter__(self, contextualized_node=None):
yield from self._wrapped_value.py__iter__(contextualized_node)
yield self._contextualized_key
def get_key_values(self):
return self._wrapped_value.get_key_values() | self._contextualized_key.infer()
class ListModification(_Modification):
def py__iter__(self, contextualized_node=None):
yield from self._wrapped_value.py__iter__(contextualized_node)
yield LazyKnownValues(self._assigned_values)
| 7,526 | Python | .py | 162 | 36.82716 | 98 | 0.631177 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,919 | klass.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/value/klass.py | """
Like described in the :mod:`parso.python.tree` module,
there's a need for an ast like module to represent the states of parsed
modules.
But now there are also structures in Python that need a little bit more than
that. An ``Instance`` for example is only a ``Class`` before it is
instantiated. This class represents these cases.
So, why is there also a ``Class`` class here? Well, there are decorators and
they change classes in Python 3.
Representation modules also define "magic methods". Those methods look like
``py__foo__`` and are typically mappable to the Python equivalents ``__call__``
and others. Here's a list:
====================================== ========================================
**Method** **Description**
-------------------------------------- ----------------------------------------
py__call__(arguments: Array) On callable objects, returns types.
py__bool__() Returns True/False/None; None means that
there's no certainty.
py__bases__() Returns a list of base classes.
py__iter__() Returns a generator of a set of types.
py__class__() Returns the class of an instance.
py__simple_getitem__(index: int/str) Returns a a set of types of the index.
Can raise an IndexError/KeyError.
py__getitem__(indexes: ValueSet) Returns a a set of types of the index.
py__file__() Only on modules. Returns None if does
not exist.
py__package__() -> List[str] Only on modules. For the import system.
py__path__() Only on modules. For the import system.
py__get__(call_object) Only on instances. Simulates
descriptors.
py__doc__() Returns the docstring for a value.
====================================== ========================================
"""
from jedi import debug
from jedi.parser_utils import get_cached_parent_scope, expr_is_dotted, \
function_is_property
from jedi.inference.cache import inference_state_method_cache, CachedMetaClass, \
inference_state_method_generator_cache
from jedi.inference import compiled
from jedi.inference.lazy_value import LazyKnownValues, LazyTreeValue
from jedi.inference.filters import ParserTreeFilter
from jedi.inference.names import TreeNameDefinition, ValueName
from jedi.inference.arguments import unpack_arglist, ValuesArguments
from jedi.inference.base_value import ValueSet, iterator_to_value_set, \
NO_VALUES
from jedi.inference.context import ClassContext
from jedi.inference.value.function import FunctionAndClassBase
from jedi.inference.gradual.generics import LazyGenericManager, TupleGenericManager
from jedi.plugins import plugin_manager
class ClassName(TreeNameDefinition):
def __init__(self, class_value, tree_name, name_context, apply_decorators):
super().__init__(name_context, tree_name)
self._apply_decorators = apply_decorators
self._class_value = class_value
@iterator_to_value_set
def infer(self):
# We're using a different value to infer, so we cannot call super().
from jedi.inference.syntax_tree import tree_name_to_values
inferred = tree_name_to_values(
self.parent_context.inference_state, self.parent_context, self.tree_name)
for result_value in inferred:
if self._apply_decorators:
yield from result_value.py__get__(instance=None, class_value=self._class_value)
else:
yield result_value
@property
def api_type(self):
type_ = super().api_type
if type_ == 'function':
definition = self.tree_name.get_definition()
if function_is_property(definition):
# This essentially checks if there is an @property before
# the function. @property could be something different, but
# any programmer that redefines property as something that
# is not really a property anymore, should be shot. (i.e.
# this is a heuristic).
return 'property'
return type_
class ClassFilter(ParserTreeFilter):
def __init__(self, class_value, node_context=None, until_position=None,
origin_scope=None, is_instance=False):
super().__init__(
class_value.as_context(), node_context,
until_position=until_position,
origin_scope=origin_scope,
)
self._class_value = class_value
self._is_instance = is_instance
def _convert_names(self, names):
return [
ClassName(
class_value=self._class_value,
tree_name=name,
name_context=self._node_context,
apply_decorators=not self._is_instance,
) for name in names
]
def _equals_origin_scope(self):
node = self._origin_scope
while node is not None:
if node == self._parser_scope or node == self.parent_context:
return True
node = get_cached_parent_scope(self._parso_cache_node, node)
return False
def _access_possible(self, name):
# Filter for ClassVar variables
# TODO this is not properly done, yet. It just checks for the string
# ClassVar in the annotation, which can be quite imprecise. If we
# wanted to do this correct, we would have to infer the ClassVar.
if not self._is_instance:
expr_stmt = name.get_definition()
if expr_stmt is not None and expr_stmt.type == 'expr_stmt':
annassign = expr_stmt.children[1]
if annassign.type == 'annassign':
# If there is an =, the variable is obviously also
# defined on the class.
if 'ClassVar' not in annassign.children[1].get_code() \
and '=' not in annassign.children:
return False
# Filter for name mangling of private variables like __foo
return not name.value.startswith('__') or name.value.endswith('__') \
or self._equals_origin_scope()
def _filter(self, names):
names = super()._filter(names)
return [name for name in names if self._access_possible(name)]
class ClassMixin:
def is_class(self):
return True
def is_class_mixin(self):
return True
def py__call__(self, arguments):
from jedi.inference.value import TreeInstance
from jedi.inference.gradual.typing import TypedDict
if self.is_typeddict():
return ValueSet([TypedDict(self)])
return ValueSet([TreeInstance(self.inference_state, self.parent_context, self, arguments)])
def py__class__(self):
return compiled.builtin_from_name(self.inference_state, 'type')
@property
def name(self):
return ValueName(self, self.tree_node.name)
def py__name__(self):
return self.name.string_name
@inference_state_method_generator_cache()
def py__mro__(self):
mro = [self]
yield self
# TODO Do a proper mro resolution. Currently we are just listing
# classes. However, it's a complicated algorithm.
for lazy_cls in self.py__bases__():
# TODO there's multiple different mro paths possible if this yields
# multiple possibilities. Could be changed to be more correct.
for cls in lazy_cls.infer():
# TODO detect for TypeError: duplicate base class str,
# e.g. `class X(str, str): pass`
try:
mro_method = cls.py__mro__
except AttributeError:
# TODO add a TypeError like:
"""
>>> class Y(lambda: test): pass
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: function() argument 1 must be code, not str
>>> class Y(1): pass
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: int() takes at most 2 arguments (3 given)
"""
debug.warning('Super class of %s is not a class: %s', self, cls)
else:
for cls_new in mro_method():
if cls_new not in mro:
mro.append(cls_new)
yield cls_new
def get_filters(self, origin_scope=None, is_instance=False,
include_metaclasses=True, include_type_when_class=True):
if include_metaclasses:
metaclasses = self.get_metaclasses()
if metaclasses:
yield from self.get_metaclass_filters(metaclasses, is_instance)
for cls in self.py__mro__():
if cls.is_compiled():
yield from cls.get_filters(is_instance=is_instance)
else:
yield ClassFilter(
self, node_context=cls.as_context(),
origin_scope=origin_scope,
is_instance=is_instance
)
if not is_instance and include_type_when_class:
from jedi.inference.compiled import builtin_from_name
type_ = builtin_from_name(self.inference_state, 'type')
assert isinstance(type_, ClassValue)
if type_ != self:
# We are not using execute_with_values here, because the
# plugin function for type would get executed instead of an
# instance creation.
args = ValuesArguments([])
for instance in type_.py__call__(args):
instance_filters = instance.get_filters()
# Filter out self filters
next(instance_filters, None)
next(instance_filters, None)
x = next(instance_filters, None)
assert x is not None
yield x
def get_signatures(self):
# Since calling staticmethod without a function is illegal, the Jedi
# plugin doesn't return anything. Therefore call directly and get what
# we want: An instance of staticmethod.
metaclasses = self.get_metaclasses()
if metaclasses:
sigs = self.get_metaclass_signatures(metaclasses)
if sigs:
return sigs
args = ValuesArguments([])
init_funcs = self.py__call__(args).py__getattribute__('__init__')
return [sig.bind(self) for sig in init_funcs.get_signatures()]
def _as_context(self):
return ClassContext(self)
def get_type_hint(self, add_class_info=True):
if add_class_info:
return 'Type[%s]' % self.py__name__()
return self.py__name__()
@inference_state_method_cache(default=False)
def is_typeddict(self):
# TODO Do a proper mro resolution. Currently we are just listing
# classes. However, it's a complicated algorithm.
from jedi.inference.gradual.typing import TypedDictClass
for lazy_cls in self.py__bases__():
if not isinstance(lazy_cls, LazyTreeValue):
return False
tree_node = lazy_cls.data
# Only resolve simple classes, stuff like Iterable[str] are more
# intensive to resolve and if generics are involved, we know it's
# not a TypedDict.
if not expr_is_dotted(tree_node):
return False
for cls in lazy_cls.infer():
if isinstance(cls, TypedDictClass):
return True
try:
method = cls.is_typeddict
except AttributeError:
# We're only dealing with simple classes, so just returning
# here should be fine. This only happens with e.g. compiled
# classes.
return False
else:
if method():
return True
return False
def py__getitem__(self, index_value_set, contextualized_node):
from jedi.inference.gradual.base import GenericClass
if not index_value_set:
debug.warning('Class indexes inferred to nothing. Returning class instead')
return ValueSet([self])
return ValueSet(
GenericClass(
self,
LazyGenericManager(
context_of_index=contextualized_node.context,
index_value=index_value,
)
)
for index_value in index_value_set
)
def with_generics(self, generics_tuple):
from jedi.inference.gradual.base import GenericClass
return GenericClass(
self,
TupleGenericManager(generics_tuple)
)
def define_generics(self, type_var_dict):
from jedi.inference.gradual.base import GenericClass
def remap_type_vars():
"""
The TypeVars in the resulting classes have sometimes different names
and we need to check for that, e.g. a signature can be:
def iter(iterable: Iterable[_T]) -> Iterator[_T]: ...
However, the iterator is defined as Iterator[_T_co], which means it has
a different type var name.
"""
for type_var in self.list_type_vars():
yield type_var_dict.get(type_var.py__name__(), NO_VALUES)
if type_var_dict:
return ValueSet([GenericClass(
self,
TupleGenericManager(tuple(remap_type_vars()))
)])
return ValueSet({self})
class ClassValue(ClassMixin, FunctionAndClassBase, metaclass=CachedMetaClass):
api_type = 'class'
@inference_state_method_cache()
def list_type_vars(self):
found = []
arglist = self.tree_node.get_super_arglist()
if arglist is None:
return []
for stars, node in unpack_arglist(arglist):
if stars:
continue # These are not relevant for this search.
from jedi.inference.gradual.annotation import find_unknown_type_vars
for type_var in find_unknown_type_vars(self.parent_context, node):
if type_var not in found:
# The order matters and it's therefore a list.
found.append(type_var)
return found
def _get_bases_arguments(self):
arglist = self.tree_node.get_super_arglist()
if arglist:
from jedi.inference import arguments
return arguments.TreeArguments(self.inference_state, self.parent_context, arglist)
return None
@inference_state_method_cache(default=())
def py__bases__(self):
args = self._get_bases_arguments()
if args is not None:
lst = [value for key, value in args.unpack() if key is None]
if lst:
return lst
if self.py__name__() == 'object' \
and self.parent_context.is_builtins_module():
return []
return [LazyKnownValues(
self.inference_state.builtins_module.py__getattribute__('object')
)]
@plugin_manager.decorate()
def get_metaclass_filters(self, metaclasses, is_instance):
debug.warning('Unprocessed metaclass %s', metaclasses)
return []
@inference_state_method_cache(default=NO_VALUES)
def get_metaclasses(self):
args = self._get_bases_arguments()
if args is not None:
m = [value for key, value in args.unpack() if key == 'metaclass']
metaclasses = ValueSet.from_sets(lazy_value.infer() for lazy_value in m)
metaclasses = ValueSet(m for m in metaclasses if m.is_class())
if metaclasses:
return metaclasses
for lazy_base in self.py__bases__():
for value in lazy_base.infer():
if value.is_class():
values = value.get_metaclasses()
if values:
return values
return NO_VALUES
@plugin_manager.decorate()
def get_metaclass_signatures(self, metaclasses):
return []
| 16,685 | Python | .py | 351 | 35.900285 | 99 | 0.581992 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,920 | namespace.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/value/namespace.py | from pathlib import Path
from typing import Optional
from jedi.inference.cache import inference_state_method_cache
from jedi.inference.filters import DictFilter
from jedi.inference.names import ValueNameMixin, AbstractNameDefinition
from jedi.inference.base_value import Value
from jedi.inference.value.module import SubModuleDictMixin
from jedi.inference.context import NamespaceContext
class ImplicitNSName(ValueNameMixin, AbstractNameDefinition):
"""
Accessing names for implicit namespace packages should infer to nothing.
This object will prevent Jedi from raising exceptions
"""
def __init__(self, implicit_ns_value, string_name):
self._value = implicit_ns_value
self.string_name = string_name
class ImplicitNamespaceValue(Value, SubModuleDictMixin):
"""
Provides support for implicit namespace packages
"""
api_type = 'namespace'
parent_context = None
def __init__(self, inference_state, string_names, paths):
super().__init__(inference_state, parent_context=None)
self.inference_state = inference_state
self.string_names = string_names
self._paths = paths
def get_filters(self, origin_scope=None):
yield DictFilter(self.sub_modules_dict())
def get_qualified_names(self):
return ()
@property # type: ignore[misc]
@inference_state_method_cache()
def name(self):
string_name = self.py__package__()[-1]
return ImplicitNSName(self, string_name)
def py__file__(self) -> Optional[Path]:
return None
def py__package__(self):
"""Return the fullname
"""
return self.string_names
def py__path__(self):
return self._paths
def py__name__(self):
return '.'.join(self.string_names)
def is_namespace(self):
return True
def is_stub(self):
return False
def is_package(self):
return True
def as_context(self):
return NamespaceContext(self)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.py__name__())
| 2,101 | Python | .py | 56 | 31.410714 | 76 | 0.686236 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,921 | __init__.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/value/__init__.py | # Re-export symbols for wider use. We configure mypy and flake8 to be aware that
# this file does this.
from jedi.inference.value.module import ModuleValue
from jedi.inference.value.klass import ClassValue
from jedi.inference.value.function import FunctionValue, \
MethodValue
from jedi.inference.value.instance import AnonymousInstance, BoundMethod, \
CompiledInstance, AbstractInstanceValue, TreeInstance
| 416 | Python | .py | 8 | 49.875 | 80 | 0.835381 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,922 | instance.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/value/instance.py | from abc import abstractproperty
from parso.tree import search_ancestor
from jedi import debug
from jedi import settings
from jedi.inference import compiled
from jedi.inference.compiled.value import CompiledValueFilter
from jedi.inference.helpers import values_from_qualified_names, is_big_annoying_library
from jedi.inference.filters import AbstractFilter, AnonymousFunctionExecutionFilter
from jedi.inference.names import ValueName, TreeNameDefinition, ParamName, \
NameWrapper
from jedi.inference.base_value import Value, NO_VALUES, ValueSet, \
iterator_to_value_set, ValueWrapper
from jedi.inference.lazy_value import LazyKnownValue, LazyKnownValues
from jedi.inference.cache import inference_state_method_cache
from jedi.inference.arguments import ValuesArguments, TreeArgumentsWrapper
from jedi.inference.value.function import \
FunctionValue, FunctionMixin, OverloadedFunctionValue, \
BaseFunctionExecutionContext, FunctionExecutionContext, FunctionNameInClass
from jedi.inference.value.klass import ClassFilter
from jedi.inference.value.dynamic_arrays import get_dynamic_array_instance
from jedi.parser_utils import function_is_staticmethod, function_is_classmethod
class InstanceExecutedParamName(ParamName):
def __init__(self, instance, function_value, tree_name):
super().__init__(
function_value, tree_name, arguments=None)
self._instance = instance
def infer(self):
return ValueSet([self._instance])
def matches_signature(self):
return True
class AnonymousMethodExecutionFilter(AnonymousFunctionExecutionFilter):
def __init__(self, instance, *args, **kwargs):
super().__init__(*args, **kwargs)
self._instance = instance
def _convert_param(self, param, name):
if param.position_index == 0:
if function_is_classmethod(self._function_value.tree_node):
return InstanceExecutedParamName(
self._instance.py__class__(),
self._function_value,
name
)
elif not function_is_staticmethod(self._function_value.tree_node):
return InstanceExecutedParamName(
self._instance,
self._function_value,
name
)
return super()._convert_param(param, name)
class AnonymousMethodExecutionContext(BaseFunctionExecutionContext):
def __init__(self, instance, value):
super().__init__(value)
self.instance = instance
def get_filters(self, until_position=None, origin_scope=None):
yield AnonymousMethodExecutionFilter(
self.instance, self, self._value,
until_position=until_position,
origin_scope=origin_scope,
)
def get_param_names(self):
param_names = list(self._value.get_param_names())
# set the self name
param_names[0] = InstanceExecutedParamName(
self.instance,
self._value,
param_names[0].tree_name
)
return param_names
class MethodExecutionContext(FunctionExecutionContext):
def __init__(self, instance, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instance = instance
class AbstractInstanceValue(Value):
api_type = 'instance'
def __init__(self, inference_state, parent_context, class_value):
super().__init__(inference_state, parent_context)
# Generated instances are classes that are just generated by self
# (No arguments) used.
self.class_value = class_value
def is_instance(self):
return True
def get_qualified_names(self):
return self.class_value.get_qualified_names()
def get_annotated_class_object(self):
return self.class_value # This is the default.
def py__class__(self):
return self.class_value
def py__bool__(self):
# Signalize that we don't know about the bool type.
return None
@abstractproperty
def name(self):
raise NotImplementedError
def get_signatures(self):
call_funcs = self.py__getattribute__('__call__').py__get__(self, self.class_value)
return [s.bind(self) for s in call_funcs.get_signatures()]
def get_function_slot_names(self, name):
# Python classes don't look at the dictionary of the instance when
# looking up `__call__`. This is something that has to do with Python's
# internal slot system (note: not __slots__, but C slots).
for filter in self.get_filters(include_self_names=False):
names = filter.get(name)
if names:
return names
return []
def execute_function_slots(self, names, *inferred_args):
return ValueSet.from_sets(
name.infer().execute_with_values(*inferred_args)
for name in names
)
def get_type_hint(self, add_class_info=True):
return self.py__name__()
def py__getitem__(self, index_value_set, contextualized_node):
names = self.get_function_slot_names('__getitem__')
if not names:
return super().py__getitem__(
index_value_set,
contextualized_node,
)
args = ValuesArguments([index_value_set])
return ValueSet.from_sets(name.infer().execute(args) for name in names)
def py__iter__(self, contextualized_node=None):
iter_slot_names = self.get_function_slot_names('__iter__')
if not iter_slot_names:
return super().py__iter__(contextualized_node)
def iterate():
for generator in self.execute_function_slots(iter_slot_names):
yield from generator.py__next__(contextualized_node)
return iterate()
def __repr__(self):
return "<%s of %s>" % (self.__class__.__name__, self.class_value)
class CompiledInstance(AbstractInstanceValue):
# This is not really a compiled class, it's just an instance from a
# compiled class.
def __init__(self, inference_state, parent_context, class_value, arguments):
super().__init__(inference_state, parent_context, class_value)
self._arguments = arguments
def get_filters(self, origin_scope=None, include_self_names=True):
class_value = self.get_annotated_class_object()
class_filters = class_value.get_filters(
origin_scope=origin_scope,
is_instance=True,
)
for f in class_filters:
yield CompiledInstanceClassFilter(self, f)
@property
def name(self):
return compiled.CompiledValueName(self, self.class_value.name.string_name)
def is_stub(self):
return False
class _BaseTreeInstance(AbstractInstanceValue):
@property
def array_type(self):
name = self.class_value.py__name__()
if name in ['list', 'set', 'dict'] \
and self.parent_context.get_root_context().is_builtins_module():
return name
return None
@property
def name(self):
return ValueName(self, self.class_value.name.tree_name)
def get_filters(self, origin_scope=None, include_self_names=True):
class_value = self.get_annotated_class_object()
if include_self_names:
for cls in class_value.py__mro__():
if not cls.is_compiled():
# In this case we're excluding compiled objects that are
# not fake objects. It doesn't make sense for normal
# compiled objects to search for self variables.
yield SelfAttributeFilter(self, class_value, cls.as_context(), origin_scope)
class_filters = class_value.get_filters(
origin_scope=origin_scope,
is_instance=True,
)
for f in class_filters:
if isinstance(f, ClassFilter):
yield InstanceClassFilter(self, f)
elif isinstance(f, CompiledValueFilter):
yield CompiledInstanceClassFilter(self, f)
else:
# Propably from the metaclass.
yield f
@inference_state_method_cache()
def create_instance_context(self, class_context, node):
new = node
while True:
func_node = new
new = search_ancestor(new, 'funcdef', 'classdef')
if class_context.tree_node is new:
func = FunctionValue.from_context(class_context, func_node)
bound_method = BoundMethod(self, class_context, func)
if func_node.name.value == '__init__':
context = bound_method.as_context(self._arguments)
else:
context = bound_method.as_context()
break
return context.create_context(node)
def py__getattribute__alternatives(self, string_name):
'''
Since nothing was inferred, now check the __getattr__ and
__getattribute__ methods. Stubs don't need to be checked, because
they don't contain any logic.
'''
if self.is_stub():
return NO_VALUES
name = compiled.create_simple_object(self.inference_state, string_name)
# This is a little bit special. `__getattribute__` is in Python
# executed before `__getattr__`. But: I know no use case, where
# this could be practical and where Jedi would return wrong types.
# If you ever find something, let me know!
# We are inversing this, because a hand-crafted `__getattribute__`
# could still call another hand-crafted `__getattr__`, but not the
# other way around.
if is_big_annoying_library(self.parent_context):
return NO_VALUES
names = (self.get_function_slot_names('__getattr__')
or self.get_function_slot_names('__getattribute__'))
return self.execute_function_slots(names, name)
def py__next__(self, contextualized_node=None):
name = u'__next__'
next_slot_names = self.get_function_slot_names(name)
if next_slot_names:
yield LazyKnownValues(
self.execute_function_slots(next_slot_names)
)
else:
debug.warning('Instance has no __next__ function in %s.', self)
def py__call__(self, arguments):
names = self.get_function_slot_names('__call__')
if not names:
# Means the Instance is not callable.
return super().py__call__(arguments)
return ValueSet.from_sets(name.infer().execute(arguments) for name in names)
def py__get__(self, instance, class_value):
"""
obj may be None.
"""
# Arguments in __get__ descriptors are obj, class.
# `method` is the new parent of the array, don't know if that's good.
for cls in self.class_value.py__mro__():
result = cls.py__get__on_class(self, instance, class_value)
if result is not NotImplemented:
return result
names = self.get_function_slot_names('__get__')
if names:
if instance is None:
instance = compiled.builtin_from_name(self.inference_state, 'None')
return self.execute_function_slots(names, instance, class_value)
else:
return ValueSet([self])
class TreeInstance(_BaseTreeInstance):
def __init__(self, inference_state, parent_context, class_value, arguments):
# I don't think that dynamic append lookups should happen here. That
# sounds more like something that should go to py__iter__.
if class_value.py__name__() in ['list', 'set'] \
and parent_context.get_root_context().is_builtins_module():
# compare the module path with the builtin name.
if settings.dynamic_array_additions:
arguments = get_dynamic_array_instance(self, arguments)
super().__init__(inference_state, parent_context, class_value)
self._arguments = arguments
self.tree_node = class_value.tree_node
# This can recurse, if the initialization of the class includes a reference
# to itself.
@inference_state_method_cache(default=None)
def _get_annotated_class_object(self):
from jedi.inference.gradual.annotation import py__annotations__, \
infer_type_vars_for_execution
args = InstanceArguments(self, self._arguments)
for signature in self.class_value.py__getattribute__('__init__').get_signatures():
# Just take the first result, it should always be one, because we
# control the typeshed code.
funcdef = signature.value.tree_node
if funcdef is None or funcdef.type != 'funcdef' \
or not signature.matches_signature(args):
# First check if the signature even matches, if not we don't
# need to infer anything.
continue
bound_method = BoundMethod(self, self.class_value.as_context(), signature.value)
all_annotations = py__annotations__(funcdef)
type_var_dict = infer_type_vars_for_execution(bound_method, args, all_annotations)
if type_var_dict:
defined, = self.class_value.define_generics(
infer_type_vars_for_execution(signature.value, args, all_annotations),
)
debug.dbg('Inferred instance value as %s', defined, color='BLUE')
return defined
return None
def get_annotated_class_object(self):
return self._get_annotated_class_object() or self.class_value
def get_key_values(self):
values = NO_VALUES
if self.array_type == 'dict':
for i, (key, instance) in enumerate(self._arguments.unpack()):
if key is None and i == 0:
values |= ValueSet.from_sets(
v.get_key_values()
for v in instance.infer()
if v.array_type == 'dict'
)
if key:
values |= ValueSet([compiled.create_simple_object(
self.inference_state,
key,
)])
return values
def py__simple_getitem__(self, index):
if self.array_type == 'dict':
# Logic for dict({'foo': bar}) and dict(foo=bar)
# reversed, because:
# >>> dict({'a': 1}, a=3)
# {'a': 3}
# TODO tuple initializations
# >>> dict([('a', 4)])
# {'a': 4}
for key, lazy_context in reversed(list(self._arguments.unpack())):
if key is None:
values = ValueSet.from_sets(
dct_value.py__simple_getitem__(index)
for dct_value in lazy_context.infer()
if dct_value.array_type == 'dict'
)
if values:
return values
else:
if key == index:
return lazy_context.infer()
return super().py__simple_getitem__(index)
def __repr__(self):
return "<%s of %s(%s)>" % (self.__class__.__name__, self.class_value,
self._arguments)
class AnonymousInstance(_BaseTreeInstance):
_arguments = None
class CompiledInstanceName(NameWrapper):
@iterator_to_value_set
def infer(self):
for result_value in self._wrapped_name.infer():
if result_value.api_type == 'function':
yield CompiledBoundMethod(result_value)
else:
yield result_value
class CompiledInstanceClassFilter(AbstractFilter):
def __init__(self, instance, f):
self._instance = instance
self._class_filter = f
def get(self, name):
return self._convert(self._class_filter.get(name))
def values(self):
return self._convert(self._class_filter.values())
def _convert(self, names):
return [CompiledInstanceName(n) for n in names]
class BoundMethod(FunctionMixin, ValueWrapper):
def __init__(self, instance, class_context, function):
super().__init__(function)
self.instance = instance
self._class_context = class_context
def is_bound_method(self):
return True
@property
def name(self):
return FunctionNameInClass(
self._class_context,
super().name
)
def py__class__(self):
c, = values_from_qualified_names(self.inference_state, 'types', 'MethodType')
return c
def _get_arguments(self, arguments):
assert arguments is not None
return InstanceArguments(self.instance, arguments)
def _as_context(self, arguments=None):
if arguments is None:
return AnonymousMethodExecutionContext(self.instance, self)
arguments = self._get_arguments(arguments)
return MethodExecutionContext(self.instance, self, arguments)
def py__call__(self, arguments):
if isinstance(self._wrapped_value, OverloadedFunctionValue):
return self._wrapped_value.py__call__(self._get_arguments(arguments))
function_execution = self.as_context(arguments)
return function_execution.infer()
def get_signature_functions(self):
return [
BoundMethod(self.instance, self._class_context, f)
for f in self._wrapped_value.get_signature_functions()
]
def get_signatures(self):
return [sig.bind(self) for sig in super().get_signatures()]
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._wrapped_value)
class CompiledBoundMethod(ValueWrapper):
def is_bound_method(self):
return True
def get_signatures(self):
return [sig.bind(self) for sig in self._wrapped_value.get_signatures()]
class SelfName(TreeNameDefinition):
"""
This name calculates the parent_context lazily.
"""
def __init__(self, instance, class_context, tree_name):
self._instance = instance
self.class_context = class_context
self.tree_name = tree_name
@property
def parent_context(self):
return self._instance.create_instance_context(self.class_context, self.tree_name)
def get_defining_qualified_value(self):
return self._instance
def infer(self):
stmt = search_ancestor(self.tree_name, 'expr_stmt')
if stmt is not None:
if stmt.children[1].type == "annassign":
from jedi.inference.gradual.annotation import infer_annotation
values = infer_annotation(
self.parent_context, stmt.children[1].children[1]
).execute_annotation()
if values:
return values
return super().infer()
class LazyInstanceClassName(NameWrapper):
def __init__(self, instance, class_member_name):
super().__init__(class_member_name)
self._instance = instance
@iterator_to_value_set
def infer(self):
for result_value in self._wrapped_name.infer():
yield from result_value.py__get__(self._instance, self._instance.py__class__())
def get_signatures(self):
return self.infer().get_signatures()
def get_defining_qualified_value(self):
return self._instance
class InstanceClassFilter(AbstractFilter):
"""
This filter is special in that it uses the class filter and wraps the
resulting names in LazyInstanceClassName. The idea is that the class name
filtering can be very flexible and always be reflected in instances.
"""
def __init__(self, instance, class_filter):
self._instance = instance
self._class_filter = class_filter
def get(self, name):
return self._convert(self._class_filter.get(name))
def values(self):
return self._convert(self._class_filter.values())
def _convert(self, names):
return [
LazyInstanceClassName(self._instance, n)
for n in names
]
def __repr__(self):
return '<%s for %s>' % (self.__class__.__name__, self._class_filter)
class SelfAttributeFilter(ClassFilter):
"""
This class basically filters all the use cases where `self.*` was assigned.
"""
def __init__(self, instance, instance_class, node_context, origin_scope):
super().__init__(
class_value=instance_class,
node_context=node_context,
origin_scope=origin_scope,
is_instance=True,
)
self._instance = instance
def _filter(self, names):
start, end = self._parser_scope.start_pos, self._parser_scope.end_pos
names = [n for n in names if start < n.start_pos < end]
return self._filter_self_names(names)
def _filter_self_names(self, names):
for name in names:
trailer = name.parent
if trailer.type == 'trailer' \
and len(trailer.parent.children) == 2 \
and trailer.children[0] == '.':
if name.is_definition() and self._access_possible(name):
# TODO filter non-self assignments instead of this bad
# filter.
if self._is_in_right_scope(trailer.parent.children[0], name):
yield name
def _is_in_right_scope(self, self_name, name):
self_context = self._node_context.create_context(self_name)
names = self_context.goto(self_name, position=self_name.start_pos)
return any(
n.api_type == 'param'
and n.tree_name.get_definition().position_index == 0
and n.parent_context.tree_node is self._parser_scope
for n in names
)
def _convert_names(self, names):
return [SelfName(self._instance, self._node_context, name) for name in names]
def _check_flows(self, names):
return names
class InstanceArguments(TreeArgumentsWrapper):
def __init__(self, instance, arguments):
super().__init__(arguments)
self.instance = instance
def unpack(self, func=None):
yield None, LazyKnownValue(self.instance)
yield from self._wrapped_arguments.unpack(func)
| 22,511 | Python | .py | 497 | 35.036217 | 96 | 0.61883 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,923 | access.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/compiled/access.py | import inspect
import types
import traceback
import sys
import operator as op
from collections import namedtuple
import warnings
import re
import builtins
import typing
from pathlib import Path
from typing import Optional
from jedi.inference.compiled.getattr_static import getattr_static
ALLOWED_GETITEM_TYPES = (str, list, tuple, bytes, bytearray, dict)
MethodDescriptorType = type(str.replace)
# These are not considered classes and access is granted even though they have
# a __class__ attribute.
NOT_CLASS_TYPES = (
types.BuiltinFunctionType,
types.CodeType,
types.FrameType,
types.FunctionType,
types.GeneratorType,
types.GetSetDescriptorType,
types.LambdaType,
types.MemberDescriptorType,
types.MethodType,
types.ModuleType,
types.TracebackType,
MethodDescriptorType,
types.MappingProxyType,
types.SimpleNamespace,
types.DynamicClassAttribute,
)
# Those types don't exist in typing.
MethodDescriptorType = type(str.replace)
WrapperDescriptorType = type(set.__iter__)
# `object.__subclasshook__` is an already executed descriptor.
object_class_dict = type.__dict__["__dict__"].__get__(object)
ClassMethodDescriptorType = type(object_class_dict['__subclasshook__'])
_sentinel = object()
# Maps Python syntax to the operator module.
COMPARISON_OPERATORS = {
'==': op.eq,
'!=': op.ne,
'is': op.is_,
'is not': op.is_not,
'<': op.lt,
'<=': op.le,
'>': op.gt,
'>=': op.ge,
}
_OPERATORS = {
'+': op.add,
'-': op.sub,
}
_OPERATORS.update(COMPARISON_OPERATORS)
ALLOWED_DESCRIPTOR_ACCESS = (
types.FunctionType,
types.GetSetDescriptorType,
types.MemberDescriptorType,
MethodDescriptorType,
WrapperDescriptorType,
ClassMethodDescriptorType,
staticmethod,
classmethod,
)
def safe_getattr(obj, name, default=_sentinel):
try:
attr, is_get_descriptor = getattr_static(obj, name)
except AttributeError:
if default is _sentinel:
raise
return default
else:
if isinstance(attr, ALLOWED_DESCRIPTOR_ACCESS):
# In case of descriptors that have get methods we cannot return
# it's value, because that would mean code execution.
# Since it's an isinstance call, code execution is still possible,
# but this is not really a security feature, but much more of a
# safety feature. Code execution is basically always possible when
# a module is imported. This is here so people don't shoot
# themselves in the foot.
return getattr(obj, name)
return attr
SignatureParam = namedtuple(
'SignatureParam',
'name has_default default default_string has_annotation annotation annotation_string kind_name'
)
def shorten_repr(func):
def wrapper(self):
r = func(self)
if len(r) > 50:
r = r[:50] + '..'
return r
return wrapper
def create_access(inference_state, obj):
return inference_state.compiled_subprocess.get_or_create_access_handle(obj)
def load_module(inference_state, dotted_name, sys_path):
temp, sys.path = sys.path, sys_path
try:
__import__(dotted_name)
except ImportError:
# If a module is "corrupt" or not really a Python module or whatever.
warnings.warn(
"Module %s not importable in path %s." % (dotted_name, sys_path),
UserWarning,
stacklevel=2,
)
return None
except Exception:
# Since __import__ pretty much makes code execution possible, just
# catch any error here and print it.
warnings.warn(
"Cannot import:\n%s" % traceback.format_exc(), UserWarning, stacklevel=2
)
return None
finally:
sys.path = temp
# Just access the cache after import, because of #59 as well as the very
# complicated import structure of Python.
module = sys.modules[dotted_name]
return create_access_path(inference_state, module)
class AccessPath:
def __init__(self, accesses):
self.accesses = accesses
def create_access_path(inference_state, obj):
access = create_access(inference_state, obj)
return AccessPath(access.get_access_path_tuples())
def get_api_type(obj):
if inspect.isclass(obj):
return 'class'
elif inspect.ismodule(obj):
return 'module'
elif inspect.isbuiltin(obj) or inspect.ismethod(obj) \
or inspect.ismethoddescriptor(obj) or inspect.isfunction(obj):
return 'function'
# Everything else...
return 'instance'
class DirectObjectAccess:
def __init__(self, inference_state, obj):
self._inference_state = inference_state
self._obj = obj
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.get_repr())
def _create_access(self, obj):
return create_access(self._inference_state, obj)
def _create_access_path(self, obj):
return create_access_path(self._inference_state, obj)
def py__bool__(self):
return bool(self._obj)
def py__file__(self) -> Optional[Path]:
try:
return Path(self._obj.__file__)
except AttributeError:
return None
def py__doc__(self):
return inspect.getdoc(self._obj) or ''
def py__name__(self):
if not _is_class_instance(self._obj) or \
inspect.ismethoddescriptor(self._obj): # slots
cls = self._obj
else:
try:
cls = self._obj.__class__
except AttributeError:
# happens with numpy.core.umath._UFUNC_API (you get it
# automatically by doing `import numpy`.
return None
try:
return cls.__name__
except AttributeError:
return None
def py__mro__accesses(self):
return tuple(self._create_access_path(cls) for cls in self._obj.__mro__[1:])
def py__getitem__all_values(self):
if isinstance(self._obj, dict):
return [self._create_access_path(v) for v in self._obj.values()]
if isinstance(self._obj, (list, tuple)):
return [self._create_access_path(v) for v in self._obj]
if self.is_instance():
cls = DirectObjectAccess(self._inference_state, self._obj.__class__)
return cls.py__getitem__all_values()
try:
getitem = self._obj.__getitem__
except AttributeError:
pass
else:
annotation = DirectObjectAccess(self._inference_state, getitem).get_return_annotation()
if annotation is not None:
return [annotation]
return None
def py__simple_getitem__(self, index):
if type(self._obj) not in ALLOWED_GETITEM_TYPES:
# Get rid of side effects, we won't call custom `__getitem__`s.
return None
return self._create_access_path(self._obj[index])
def py__iter__list(self):
try:
iter_method = self._obj.__iter__
except AttributeError:
return None
else:
p = DirectObjectAccess(self._inference_state, iter_method).get_return_annotation()
if p is not None:
return [p]
if type(self._obj) not in ALLOWED_GETITEM_TYPES:
# Get rid of side effects, we won't call custom `__getitem__`s.
return []
lst = []
for i, part in enumerate(self._obj):
if i > 20:
# Should not go crazy with large iterators
break
lst.append(self._create_access_path(part))
return lst
def py__class__(self):
return self._create_access_path(self._obj.__class__)
def py__bases__(self):
return [self._create_access_path(base) for base in self._obj.__bases__]
def py__path__(self):
paths = getattr(self._obj, '__path__', None)
# Avoid some weird hacks that would just fail, because they cannot be
# used by pickle.
if not isinstance(paths, list) \
or not all(isinstance(p, str) for p in paths):
return None
return paths
@shorten_repr
def get_repr(self):
if inspect.ismodule(self._obj):
return repr(self._obj)
# Try to avoid execution of the property.
if safe_getattr(self._obj, '__module__', default='') == 'builtins':
return repr(self._obj)
type_ = type(self._obj)
if type_ == type:
return type.__repr__(self._obj)
if safe_getattr(type_, '__module__', default='') == 'builtins':
# Allow direct execution of repr for builtins.
return repr(self._obj)
return object.__repr__(self._obj)
def is_class(self):
return inspect.isclass(self._obj)
def is_function(self):
return inspect.isfunction(self._obj) or inspect.ismethod(self._obj)
def is_module(self):
return inspect.ismodule(self._obj)
def is_instance(self):
return _is_class_instance(self._obj)
def ismethoddescriptor(self):
return inspect.ismethoddescriptor(self._obj)
def get_qualified_names(self):
def try_to_get_name(obj):
return getattr(obj, '__qualname__', getattr(obj, '__name__', None))
if self.is_module():
return ()
name = try_to_get_name(self._obj)
if name is None:
name = try_to_get_name(type(self._obj))
if name is None:
return ()
return tuple(name.split('.'))
def dir(self):
return dir(self._obj)
def has_iter(self):
try:
iter(self._obj)
return True
except TypeError:
return False
def is_allowed_getattr(self, name, safe=True):
# TODO this API is ugly.
if not safe:
# Unsafe is mostly used to check for __getattr__/__getattribute__.
# getattr_static works for properties, but the underscore methods
# are just ignored (because it's safer and avoids more code
# execution). See also GH #1378.
# Avoid warnings, see comment in the next function.
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
try:
return hasattr(self._obj, name), False
except Exception:
# Obviously has an attribute (propably a property) that
# gets executed, so just avoid all exceptions here.
return False, False
try:
attr, is_get_descriptor = getattr_static(self._obj, name)
except AttributeError:
return False, False
else:
if is_get_descriptor and type(attr) not in ALLOWED_DESCRIPTOR_ACCESS:
# In case of descriptors that have get methods we cannot return
# it's value, because that would mean code execution.
return True, True
return True, False
def getattr_paths(self, name, default=_sentinel):
try:
# Make sure no warnings are printed here, this is autocompletion,
# warnings should not be shown. See also GH #1383.
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
return_obj = getattr(self._obj, name)
except Exception as e:
if default is _sentinel:
if isinstance(e, AttributeError):
# Happens e.g. in properties of
# PyQt4.QtGui.QStyleOptionComboBox.currentText
# -> just set it to None
raise
# Just in case anything happens, return an AttributeError. It
# should not crash.
raise AttributeError
return_obj = default
access = self._create_access(return_obj)
if inspect.ismodule(return_obj):
return [access]
try:
module = return_obj.__module__
except AttributeError:
pass
else:
if module is not None and isinstance(module, str):
try:
__import__(module)
# For some modules like _sqlite3, the __module__ for classes is
# different, in this case it's sqlite3. So we have to try to
# load that "original" module, because it's not loaded yet. If
# we don't do that, we don't really have a "parent" module and
# we would fall back to builtins.
except ImportError:
pass
module = inspect.getmodule(return_obj)
if module is None:
module = inspect.getmodule(type(return_obj))
if module is None:
module = builtins
return [self._create_access(module), access]
def get_safe_value(self):
if type(self._obj) in (bool, bytes, float, int, str, slice) or self._obj is None:
return self._obj
raise ValueError("Object is type %s and not simple" % type(self._obj))
def get_api_type(self):
return get_api_type(self._obj)
def get_array_type(self):
if isinstance(self._obj, dict):
return 'dict'
return None
def get_key_paths(self):
def iter_partial_keys():
# We could use list(keys()), but that might take a lot more memory.
for (i, k) in enumerate(self._obj.keys()):
# Limit key listing at some point. This is artificial, but this
# way we don't get stalled because of slow completions
if i > 50:
break
yield k
return [self._create_access_path(k) for k in iter_partial_keys()]
def get_access_path_tuples(self):
accesses = [create_access(self._inference_state, o) for o in self._get_objects_path()]
return [(access.py__name__(), access) for access in accesses]
def _get_objects_path(self):
def get():
obj = self._obj
yield obj
try:
obj = obj.__objclass__
except AttributeError:
pass
else:
yield obj
try:
# Returns a dotted string path.
imp_plz = obj.__module__
except AttributeError:
# Unfortunately in some cases like `int` there's no __module__
if not inspect.ismodule(obj):
yield builtins
else:
if imp_plz is None:
# Happens for example in `(_ for _ in []).send.__module__`.
yield builtins
else:
try:
yield sys.modules[imp_plz]
except KeyError:
# __module__ can be something arbitrary that doesn't exist.
yield builtins
return list(reversed(list(get())))
def execute_operation(self, other_access_handle, operator):
other_access = other_access_handle.access
op = _OPERATORS[operator]
return self._create_access_path(op(self._obj, other_access._obj))
def get_annotation_name_and_args(self):
"""
Returns Tuple[Optional[str], Tuple[AccessPath, ...]]
"""
name = None
args = ()
if safe_getattr(self._obj, '__module__', default='') == 'typing':
m = re.match(r'typing.(\w+)\[', repr(self._obj))
if m is not None:
name = m.group(1)
import typing
if sys.version_info >= (3, 8):
args = typing.get_args(self._obj)
else:
args = safe_getattr(self._obj, '__args__', default=None)
return name, tuple(self._create_access_path(arg) for arg in args)
def needs_type_completions(self):
return inspect.isclass(self._obj) and self._obj != type
def _annotation_to_str(self, annotation):
return inspect.formatannotation(annotation)
def get_signature_params(self):
return [
SignatureParam(
name=p.name,
has_default=p.default is not p.empty,
default=self._create_access_path(p.default),
default_string=repr(p.default),
has_annotation=p.annotation is not p.empty,
annotation=self._create_access_path(p.annotation),
annotation_string=self._annotation_to_str(p.annotation),
kind_name=str(p.kind)
) for p in self._get_signature().parameters.values()
]
def _get_signature(self):
obj = self._obj
try:
return inspect.signature(obj)
except (RuntimeError, TypeError):
# Reading the code of the function in Python 3.6 implies there are
# at least these errors that might occur if something is wrong with
# the signature. In that case we just want a simple escape for now.
raise ValueError
def get_return_annotation(self):
try:
o = self._obj.__annotations__.get('return')
except AttributeError:
return None
if o is None:
return None
try:
o = typing.get_type_hints(self._obj).get('return')
except Exception:
pass
return self._create_access_path(o)
def negate(self):
return self._create_access_path(-self._obj)
def get_dir_infos(self):
"""
Used to return a couple of infos that are needed when accessing the sub
objects of an objects
"""
tuples = dict(
(name, self.is_allowed_getattr(name))
for name in self.dir()
)
return self.needs_type_completions(), tuples
def _is_class_instance(obj):
"""Like inspect.* methods."""
try:
cls = obj.__class__
except AttributeError:
return False
else:
# The isinstance check for cls is just there so issubclass doesn't
# raise an exception.
return cls != type and isinstance(cls, type) and not issubclass(cls, NOT_CLASS_TYPES)
| 18,442 | Python | .py | 470 | 29.293617 | 99 | 0.590696 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,924 | getattr_static.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/compiled/getattr_static.py | """
A static version of getattr.
This is a backport of the Python 3 code with a little bit of additional
information returned to enable Jedi to make decisions.
"""
import types
from jedi import debug
_sentinel = object()
def _check_instance(obj, attr):
instance_dict = {}
try:
instance_dict = object.__getattribute__(obj, "__dict__")
except AttributeError:
pass
return dict.get(instance_dict, attr, _sentinel)
def _check_class(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
return _sentinel
def _is_type(obj):
try:
_static_getmro(obj)
except TypeError:
return False
return True
def _shadowed_dict(klass):
dict_attr = type.__dict__["__dict__"]
for entry in _static_getmro(klass):
try:
class_dict = dict_attr.__get__(entry)["__dict__"]
except KeyError:
pass
else:
if not (type(class_dict) is types.GetSetDescriptorType
and class_dict.__name__ == "__dict__"
and class_dict.__objclass__ is entry):
return class_dict
return _sentinel
def _static_getmro(klass):
mro = type.__dict__['__mro__'].__get__(klass)
if not isinstance(mro, (tuple, list)):
# There are unfortunately no tests for this, I was not able to
# reproduce this in pure Python. However should still solve the issue
# raised in GH #1517.
debug.warning('mro of %s returned %s, should be a tuple' % (klass, mro))
return ()
return mro
def _safe_hasattr(obj, name):
return _check_class(type(obj), name) is not _sentinel
def _safe_is_data_descriptor(obj):
return _safe_hasattr(obj, '__set__') or _safe_hasattr(obj, '__delete__')
def getattr_static(obj, attr, default=_sentinel):
"""Retrieve attributes without triggering dynamic lookup via the
descriptor protocol, __getattr__ or __getattribute__.
Note: this function may not be able to retrieve all attributes
that getattr can fetch (like dynamically created attributes)
and may find attributes that getattr can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases. See the
documentation for details.
Returns a tuple `(attr, is_get_descriptor)`. is_get_descripter means that
the attribute is a descriptor that has a `__get__` attribute.
"""
instance_result = _sentinel
if not _is_type(obj):
klass = type(obj)
dict_attr = _shadowed_dict(klass)
if (dict_attr is _sentinel or type(dict_attr) is types.MemberDescriptorType):
instance_result = _check_instance(obj, attr)
else:
klass = obj
klass_result = _check_class(klass, attr)
if instance_result is not _sentinel and klass_result is not _sentinel:
if _safe_hasattr(klass_result, '__get__') \
and _safe_is_data_descriptor(klass_result):
# A get/set descriptor has priority over everything.
return klass_result, True
if instance_result is not _sentinel:
return instance_result, False
if klass_result is not _sentinel:
return klass_result, _safe_hasattr(klass_result, '__get__')
if obj is klass:
# for types we check the metaclass too
for entry in _static_getmro(type(klass)):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr], False
except KeyError:
pass
if default is not _sentinel:
return default, False
raise AttributeError(attr)
| 3,862 | Python | .py | 96 | 32.229167 | 85 | 0.631382 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,925 | __init__.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/compiled/__init__.py | # This file also re-exports symbols for wider use. We configure mypy and flake8
# to be aware that this file does this.
from jedi.inference.compiled.value import CompiledValue, CompiledName, \
CompiledValueFilter, CompiledValueName, create_from_access_path
from jedi.inference.base_value import LazyValueWrapper
def builtin_from_name(inference_state, string):
typing_builtins_module = inference_state.builtins_module
if string in ('None', 'True', 'False'):
builtins, = typing_builtins_module.non_stub_value_set
filter_ = next(builtins.get_filters())
else:
filter_ = next(typing_builtins_module.get_filters())
name, = filter_.get(string)
value, = name.infer()
return value
class ExactValue(LazyValueWrapper):
"""
This class represents exact values, that makes operations like additions
and exact boolean values possible, while still being a "normal" stub.
"""
def __init__(self, compiled_value):
self.inference_state = compiled_value.inference_state
self._compiled_value = compiled_value
def __getattribute__(self, name):
if name in ('get_safe_value', 'execute_operation', 'access_handle',
'negate', 'py__bool__', 'is_compiled'):
return getattr(self._compiled_value, name)
return super().__getattribute__(name)
def _get_wrapped_value(self):
instance, = builtin_from_name(
self.inference_state, self._compiled_value.name.string_name).execute_with_values()
return instance
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._compiled_value)
def create_simple_object(inference_state, obj):
"""
Only allows creations of objects that are easily picklable across Python
versions.
"""
assert type(obj) in (int, float, str, bytes, slice, complex, bool), repr(obj)
compiled_value = create_from_access_path(
inference_state,
inference_state.compiled_subprocess.create_simple_object(obj)
)
return ExactValue(compiled_value)
def get_string_value_set(inference_state):
return builtin_from_name(inference_state, 'str').execute_with_values()
def load_module(inference_state, dotted_name, **kwargs):
# Temporary, some tensorflow builtins cannot be loaded, so it's tried again
# and again and it's really slow.
if dotted_name.startswith('tensorflow.'):
return None
access_path = inference_state.compiled_subprocess.load_module(dotted_name=dotted_name, **kwargs)
if access_path is None:
return None
return create_from_access_path(inference_state, access_path)
| 2,651 | Python | .py | 56 | 41.160714 | 100 | 0.699341 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,926 | value.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/compiled/value.py | """
Imitate the parser representation.
"""
import re
from functools import partial
from inspect import Parameter
from pathlib import Path
from typing import Optional
from jedi import debug
from jedi.inference.utils import to_list
from jedi.cache import memoize_method
from jedi.inference.filters import AbstractFilter
from jedi.inference.names import AbstractNameDefinition, ValueNameMixin, \
ParamNameInterface
from jedi.inference.base_value import Value, ValueSet, NO_VALUES
from jedi.inference.lazy_value import LazyKnownValue
from jedi.inference.compiled.access import _sentinel
from jedi.inference.cache import inference_state_function_cache
from jedi.inference.helpers import reraise_getitem_errors
from jedi.inference.signature import BuiltinSignature
from jedi.inference.context import CompiledContext, CompiledModuleContext
class CheckAttribute:
"""Raises :exc:`AttributeError` if the attribute X is not available."""
def __init__(self, check_name=None):
# Remove the py in front of e.g. py__call__.
self.check_name = check_name
def __call__(self, func):
self.func = func
if self.check_name is None:
self.check_name = func.__name__[2:]
return self
def __get__(self, instance, owner):
if instance is None:
return self
# This might raise an AttributeError. That's wanted.
instance.access_handle.getattr_paths(self.check_name)
return partial(self.func, instance)
class CompiledValue(Value):
def __init__(self, inference_state, access_handle, parent_context=None):
super().__init__(inference_state, parent_context)
self.access_handle = access_handle
def py__call__(self, arguments):
return_annotation = self.access_handle.get_return_annotation()
if return_annotation is not None:
# TODO the return annotation may also be a string.
return create_from_access_path(
self.inference_state,
return_annotation
).execute_annotation()
try:
self.access_handle.getattr_paths('__call__')
except AttributeError:
return super().py__call__(arguments)
else:
if self.access_handle.is_class():
from jedi.inference.value import CompiledInstance
return ValueSet([
CompiledInstance(self.inference_state, self.parent_context, self, arguments)
])
else:
return ValueSet(self._execute_function(arguments))
@CheckAttribute()
def py__class__(self):
return create_from_access_path(self.inference_state, self.access_handle.py__class__())
@CheckAttribute()
def py__mro__(self):
return (self,) + tuple(
create_from_access_path(self.inference_state, access)
for access in self.access_handle.py__mro__accesses()
)
@CheckAttribute()
def py__bases__(self):
return tuple(
create_from_access_path(self.inference_state, access)
for access in self.access_handle.py__bases__()
)
def get_qualified_names(self):
return self.access_handle.get_qualified_names()
def py__bool__(self):
return self.access_handle.py__bool__()
def is_class(self):
return self.access_handle.is_class()
def is_function(self):
return self.access_handle.is_function()
def is_module(self):
return self.access_handle.is_module()
def is_compiled(self):
return True
def is_stub(self):
return False
def is_instance(self):
return self.access_handle.is_instance()
def py__doc__(self):
return self.access_handle.py__doc__()
@to_list
def get_param_names(self):
try:
signature_params = self.access_handle.get_signature_params()
except ValueError: # Has no signature
params_str, ret = self._parse_function_doc()
if not params_str:
tokens = []
else:
tokens = params_str.split(',')
if self.access_handle.ismethoddescriptor():
tokens.insert(0, 'self')
for p in tokens:
name, _, default = p.strip().partition('=')
yield UnresolvableParamName(self, name, default)
else:
for signature_param in signature_params:
yield SignatureParamName(self, signature_param)
def get_signatures(self):
_, return_string = self._parse_function_doc()
return [BuiltinSignature(self, return_string)]
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.access_handle.get_repr())
@memoize_method
def _parse_function_doc(self):
doc = self.py__doc__()
if doc is None:
return '', ''
return _parse_function_doc(doc)
@property
def api_type(self):
return self.access_handle.get_api_type()
def get_filters(self, is_instance=False, origin_scope=None):
yield self._ensure_one_filter(is_instance)
@memoize_method
def _ensure_one_filter(self, is_instance):
return CompiledValueFilter(self.inference_state, self, is_instance)
def py__simple_getitem__(self, index):
with reraise_getitem_errors(IndexError, KeyError, TypeError):
try:
access = self.access_handle.py__simple_getitem__(index)
except AttributeError:
return super().py__simple_getitem__(index)
if access is None:
return super().py__simple_getitem__(index)
return ValueSet([create_from_access_path(self.inference_state, access)])
def py__getitem__(self, index_value_set, contextualized_node):
all_access_paths = self.access_handle.py__getitem__all_values()
if all_access_paths is None:
# This means basically that no __getitem__ has been defined on this
# object.
return super().py__getitem__(index_value_set, contextualized_node)
return ValueSet(
create_from_access_path(self.inference_state, access)
for access in all_access_paths
)
def py__iter__(self, contextualized_node=None):
if not self.access_handle.has_iter():
yield from super().py__iter__(contextualized_node)
access_path_list = self.access_handle.py__iter__list()
if access_path_list is None:
# There is no __iter__ method on this object.
return
for access in access_path_list:
yield LazyKnownValue(create_from_access_path(self.inference_state, access))
def py__name__(self):
return self.access_handle.py__name__()
@property
def name(self):
name = self.py__name__()
if name is None:
name = self.access_handle.get_repr()
return CompiledValueName(self, name)
def _execute_function(self, params):
from jedi.inference import docstrings
from jedi.inference.compiled import builtin_from_name
if self.api_type != 'function':
return
for name in self._parse_function_doc()[1].split():
try:
# TODO wtf is this? this is exactly the same as the thing
# below. It uses getattr as well.
self.inference_state.builtins_module.access_handle.getattr_paths(name)
except AttributeError:
continue
else:
bltn_obj = builtin_from_name(self.inference_state, name)
yield from self.inference_state.execute(bltn_obj, params)
yield from docstrings.infer_return_types(self)
def get_safe_value(self, default=_sentinel):
try:
return self.access_handle.get_safe_value()
except ValueError:
if default == _sentinel:
raise
return default
def execute_operation(self, other, operator):
try:
return ValueSet([create_from_access_path(
self.inference_state,
self.access_handle.execute_operation(other.access_handle, operator)
)])
except TypeError:
return NO_VALUES
def execute_annotation(self):
if self.access_handle.get_repr() == 'None':
# None as an annotation doesn't need to be executed.
return ValueSet([self])
name, args = self.access_handle.get_annotation_name_and_args()
arguments = [
ValueSet([create_from_access_path(self.inference_state, path)])
for path in args
]
if name == 'Union':
return ValueSet.from_sets(arg.execute_annotation() for arg in arguments)
elif name:
# While with_generics only exists on very specific objects, we
# should probably be fine, because we control all the typing
# objects.
return ValueSet([
v.with_generics(arguments)
for v in self.inference_state.typing_module.py__getattribute__(name)
]).execute_annotation()
return super().execute_annotation()
def negate(self):
return create_from_access_path(self.inference_state, self.access_handle.negate())
def get_metaclasses(self):
return NO_VALUES
def _as_context(self):
return CompiledContext(self)
@property
def array_type(self):
return self.access_handle.get_array_type()
def get_key_values(self):
return [
create_from_access_path(self.inference_state, k)
for k in self.access_handle.get_key_paths()
]
def get_type_hint(self, add_class_info=True):
if self.access_handle.get_repr() in ('None', "<class 'NoneType'>"):
return 'None'
return None
class CompiledModule(CompiledValue):
file_io = None # For modules
def _as_context(self):
return CompiledModuleContext(self)
def py__path__(self):
return self.access_handle.py__path__()
def is_package(self):
return self.py__path__() is not None
@property
def string_names(self):
# For modules
name = self.py__name__()
if name is None:
return ()
return tuple(name.split('.'))
def py__file__(self) -> Optional[Path]:
return self.access_handle.py__file__() # type: ignore[no-any-return]
class CompiledName(AbstractNameDefinition):
def __init__(self, inference_state, parent_value, name):
self._inference_state = inference_state
self.parent_context = parent_value.as_context()
self._parent_value = parent_value
self.string_name = name
def py__doc__(self):
return self.infer_compiled_value().py__doc__()
def _get_qualified_names(self):
parent_qualified_names = self.parent_context.get_qualified_names()
if parent_qualified_names is None:
return None
return parent_qualified_names + (self.string_name,)
def get_defining_qualified_value(self):
context = self.parent_context
if context.is_module() or context.is_class():
return self.parent_context.get_value() # Might be None
return None
def __repr__(self):
try:
name = self.parent_context.name # __name__ is not defined all the time
except AttributeError:
name = None
return '<%s: (%s).%s>' % (self.__class__.__name__, name, self.string_name)
@property
def api_type(self):
return self.infer_compiled_value().api_type
def infer(self):
return ValueSet([self.infer_compiled_value()])
@memoize_method
def infer_compiled_value(self):
return create_from_name(self._inference_state, self._parent_value, self.string_name)
class SignatureParamName(ParamNameInterface, AbstractNameDefinition):
def __init__(self, compiled_value, signature_param):
self.parent_context = compiled_value.parent_context
self._signature_param = signature_param
@property
def string_name(self):
return self._signature_param.name
def to_string(self):
s = self._kind_string() + self.string_name
if self._signature_param.has_annotation:
s += ': ' + self._signature_param.annotation_string
if self._signature_param.has_default:
s += '=' + self._signature_param.default_string
return s
def get_kind(self):
return getattr(Parameter, self._signature_param.kind_name)
def infer(self):
p = self._signature_param
inference_state = self.parent_context.inference_state
values = NO_VALUES
if p.has_default:
values = ValueSet([create_from_access_path(inference_state, p.default)])
if p.has_annotation:
annotation = create_from_access_path(inference_state, p.annotation)
values |= annotation.execute_with_values()
return values
class UnresolvableParamName(ParamNameInterface, AbstractNameDefinition):
def __init__(self, compiled_value, name, default):
self.parent_context = compiled_value.parent_context
self.string_name = name
self._default = default
def get_kind(self):
return Parameter.POSITIONAL_ONLY
def to_string(self):
string = self.string_name
if self._default:
string += '=' + self._default
return string
def infer(self):
return NO_VALUES
class CompiledValueName(ValueNameMixin, AbstractNameDefinition):
def __init__(self, value, name):
self.string_name = name
self._value = value
self.parent_context = value.parent_context
class EmptyCompiledName(AbstractNameDefinition):
"""
Accessing some names will raise an exception. To avoid not having any
completions, just give Jedi the option to return this object. It infers to
nothing.
"""
def __init__(self, inference_state, name):
self.parent_context = inference_state.builtins_module
self.string_name = name
def infer(self):
return NO_VALUES
class CompiledValueFilter(AbstractFilter):
def __init__(self, inference_state, compiled_value, is_instance=False):
self._inference_state = inference_state
self.compiled_value = compiled_value
self.is_instance = is_instance
def get(self, name):
access_handle = self.compiled_value.access_handle
return self._get(
name,
lambda name, safe: access_handle.is_allowed_getattr(name, safe=safe),
lambda name: name in access_handle.dir(),
check_has_attribute=True
)
def _get(self, name, allowed_getattr_callback, in_dir_callback, check_has_attribute=False):
"""
To remove quite a few access calls we introduced the callback here.
"""
if self._inference_state.allow_descriptor_getattr:
pass
has_attribute, is_descriptor = allowed_getattr_callback(
name,
safe=not self._inference_state.allow_descriptor_getattr
)
if check_has_attribute and not has_attribute:
return []
if (is_descriptor or not has_attribute) \
and not self._inference_state.allow_descriptor_getattr:
return [self._get_cached_name(name, is_empty=True)]
if self.is_instance and not in_dir_callback(name):
return []
return [self._get_cached_name(name)]
@memoize_method
def _get_cached_name(self, name, is_empty=False):
if is_empty:
return EmptyCompiledName(self._inference_state, name)
else:
return self._create_name(name)
def values(self):
from jedi.inference.compiled import builtin_from_name
names = []
needs_type_completions, dir_infos = self.compiled_value.access_handle.get_dir_infos()
# We could use `safe=False` here as well, especially as a parameter to
# get_dir_infos. But this would lead to a lot of property executions
# that are probably not wanted. The drawback for this is that we
# have a different name for `get` and `values`. For `get` we always
# execute.
for name in dir_infos:
names += self._get(
name,
lambda name, safe: dir_infos[name],
lambda name: name in dir_infos,
)
# ``dir`` doesn't include the type names.
if not self.is_instance and needs_type_completions:
for filter in builtin_from_name(self._inference_state, 'type').get_filters():
names += filter.values()
return names
def _create_name(self, name):
return CompiledName(
self._inference_state,
self.compiled_value,
name
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.compiled_value)
docstr_defaults = {
'floating point number': 'float',
'character': 'str',
'integer': 'int',
'dictionary': 'dict',
'string': 'str',
}
def _parse_function_doc(doc):
"""
Takes a function and returns the params and return value as a tuple.
This is nothing more than a docstring parser.
TODO docstrings like utime(path, (atime, mtime)) and a(b [, b]) -> None
TODO docstrings like 'tuple of integers'
"""
# parse round parentheses: def func(a, (b,c))
try:
count = 0
start = doc.index('(')
for i, s in enumerate(doc[start:]):
if s == '(':
count += 1
elif s == ')':
count -= 1
if count == 0:
end = start + i
break
param_str = doc[start + 1:end]
except (ValueError, UnboundLocalError):
# ValueError for doc.index
# UnboundLocalError for undefined end in last line
debug.dbg('no brackets found - no param')
end = 0
param_str = ''
else:
# remove square brackets, that show an optional param ( = None)
def change_options(m):
args = m.group(1).split(',')
for i, a in enumerate(args):
if a and '=' not in a:
args[i] += '=None'
return ','.join(args)
while True:
param_str, changes = re.subn(r' ?\[([^\[\]]+)\]',
change_options, param_str)
if changes == 0:
break
param_str = param_str.replace('-', '_') # see: isinstance.__doc__
# parse return value
r = re.search('-[>-]* ', doc[end:end + 7])
if r is None:
ret = ''
else:
index = end + r.end()
# get result type, which can contain newlines
pattern = re.compile(r'(,\n|[^\n-])+')
ret_str = pattern.match(doc, index).group(0).strip()
# New object -> object()
ret_str = re.sub(r'[nN]ew (.*)', r'\1()', ret_str)
ret = docstr_defaults.get(ret_str, ret_str)
return param_str, ret
def create_from_name(inference_state, compiled_value, name):
access_paths = compiled_value.access_handle.getattr_paths(name, default=None)
value = None
for access_path in access_paths:
value = create_cached_compiled_value(
inference_state,
access_path,
parent_context=None if value is None else value.as_context(),
)
return value
def _normalize_create_args(func):
"""The cache doesn't care about keyword vs. normal args."""
def wrapper(inference_state, obj, parent_context=None):
return func(inference_state, obj, parent_context)
return wrapper
def create_from_access_path(inference_state, access_path):
value = None
for name, access in access_path.accesses:
value = create_cached_compiled_value(
inference_state,
access,
parent_context=None if value is None else value.as_context()
)
return value
@_normalize_create_args
@inference_state_function_cache()
def create_cached_compiled_value(inference_state, access_handle, parent_context):
assert not isinstance(parent_context, CompiledValue)
if parent_context is None:
cls = CompiledModule
else:
cls = CompiledValue
return cls(inference_state, access_handle, parent_context)
| 20,526 | Python | .py | 499 | 32.0501 | 96 | 0.622477 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,927 | mixed.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/compiled/mixed.py | """
Used only for REPL Completion.
"""
import inspect
from pathlib import Path
from jedi.parser_utils import get_cached_code_lines
from jedi import settings
from jedi.cache import memoize_method
from jedi.inference import compiled
from jedi.file_io import FileIO
from jedi.inference.names import NameWrapper
from jedi.inference.base_value import ValueSet, ValueWrapper, NO_VALUES
from jedi.inference.value import ModuleValue
from jedi.inference.cache import inference_state_function_cache, \
inference_state_method_cache
from jedi.inference.compiled.access import ALLOWED_GETITEM_TYPES, get_api_type
from jedi.inference.gradual.conversion import to_stub
from jedi.inference.context import CompiledContext, CompiledModuleContext, \
TreeContextMixin
_sentinel = object()
class MixedObject(ValueWrapper):
"""
A ``MixedObject`` is used in two ways:
1. It uses the default logic of ``parser.python.tree`` objects,
2. except for getattr calls and signatures. The names dicts are generated
in a fashion like ``CompiledValue``.
This combined logic makes it possible to provide more powerful REPL
completion. It allows side effects that are not noticable with the default
parser structure to still be completeable.
The biggest difference from CompiledValue to MixedObject is that we are
generally dealing with Python code and not with C code. This will generate
fewer special cases, because we in Python you don't have the same freedoms
to modify the runtime.
"""
def __init__(self, compiled_value, tree_value):
super().__init__(tree_value)
self.compiled_value = compiled_value
self.access_handle = compiled_value.access_handle
def get_filters(self, *args, **kwargs):
yield MixedObjectFilter(
self.inference_state, self.compiled_value, self._wrapped_value)
def get_signatures(self):
# Prefer `inspect.signature` over somehow analyzing Python code. It
# should be very precise, especially for stuff like `partial`.
return self.compiled_value.get_signatures()
@inference_state_method_cache(default=NO_VALUES)
def py__call__(self, arguments):
# Fallback to the wrapped value if to stub returns no values.
values = to_stub(self._wrapped_value)
if not values:
values = self._wrapped_value
return values.py__call__(arguments)
def get_safe_value(self, default=_sentinel):
if default is _sentinel:
return self.compiled_value.get_safe_value()
else:
return self.compiled_value.get_safe_value(default)
@property
def array_type(self):
return self.compiled_value.array_type
def get_key_values(self):
return self.compiled_value.get_key_values()
def py__simple_getitem__(self, index):
python_object = self.compiled_value.access_handle.access._obj
if type(python_object) in ALLOWED_GETITEM_TYPES:
return self.compiled_value.py__simple_getitem__(index)
return self._wrapped_value.py__simple_getitem__(index)
def negate(self):
return self.compiled_value.negate()
def _as_context(self):
if self.parent_context is None:
return MixedModuleContext(self)
return MixedContext(self)
def __repr__(self):
return '<%s: %s; %s>' % (
type(self).__name__,
self.access_handle.get_repr(),
self._wrapped_value,
)
class MixedContext(CompiledContext, TreeContextMixin):
@property
def compiled_value(self):
return self._value.compiled_value
class MixedModuleContext(CompiledModuleContext, MixedContext):
pass
class MixedName(NameWrapper):
"""
The ``CompiledName._compiled_value`` is our MixedObject.
"""
def __init__(self, wrapped_name, parent_tree_value):
super().__init__(wrapped_name)
self._parent_tree_value = parent_tree_value
@property
def start_pos(self):
values = list(self.infer())
if not values:
# This means a start_pos that doesn't exist (compiled objects).
return 0, 0
return values[0].name.start_pos
@memoize_method
def infer(self):
compiled_value = self._wrapped_name.infer_compiled_value()
tree_value = self._parent_tree_value
if tree_value.is_instance() or tree_value.is_class():
tree_values = tree_value.py__getattribute__(self.string_name)
if compiled_value.is_function():
return ValueSet({MixedObject(compiled_value, v) for v in tree_values})
module_context = tree_value.get_root_context()
return _create(self._inference_state, compiled_value, module_context)
class MixedObjectFilter(compiled.CompiledValueFilter):
def __init__(self, inference_state, compiled_value, tree_value):
super().__init__(inference_state, compiled_value)
self._tree_value = tree_value
def _create_name(self, name):
return MixedName(
super()._create_name(name),
self._tree_value,
)
@inference_state_function_cache()
def _load_module(inference_state, path):
return inference_state.parse(
path=path,
cache=True,
diff_cache=settings.fast_parser,
cache_path=settings.cache_directory
).get_root_node()
def _get_object_to_check(python_object):
"""Check if inspect.getfile has a chance to find the source."""
try:
python_object = inspect.unwrap(python_object)
except ValueError:
# Can return a ValueError when it wraps around
pass
if (inspect.ismodule(python_object)
or inspect.isclass(python_object)
or inspect.ismethod(python_object)
or inspect.isfunction(python_object)
or inspect.istraceback(python_object)
or inspect.isframe(python_object)
or inspect.iscode(python_object)):
return python_object
try:
return python_object.__class__
except AttributeError:
raise TypeError # Prevents computation of `repr` within inspect.
def _find_syntax_node_name(inference_state, python_object):
original_object = python_object
try:
python_object = _get_object_to_check(python_object)
path = inspect.getsourcefile(python_object)
except (OSError, TypeError):
# The type might not be known (e.g. class_with_dict.__weakref__)
return None
path = None if path is None else Path(path)
try:
if path is None or not path.exists():
# The path might not exist or be e.g. <stdin>.
return None
except OSError:
# Might raise an OSError on Windows:
#
# [WinError 123] The filename, directory name, or volume label
# syntax is incorrect: '<string>'
return None
file_io = FileIO(path)
module_node = _load_module(inference_state, path)
if inspect.ismodule(python_object):
# We don't need to check names for modules, because there's not really
# a way to write a module in a module in Python (and also __name__ can
# be something like ``email.utils``).
code_lines = get_cached_code_lines(inference_state.grammar, path)
return module_node, module_node, file_io, code_lines
try:
name_str = python_object.__name__
except AttributeError:
# Stuff like python_function.__code__.
return None
if name_str == '<lambda>':
return None # It's too hard to find lambdas.
# Doesn't always work (e.g. os.stat_result)
names = module_node.get_used_names().get(name_str, [])
# Only functions and classes are relevant. If a name e.g. points to an
# import, it's probably a builtin (like collections.deque) and needs to be
# ignored.
names = [
n for n in names
if n.parent.type in ('funcdef', 'classdef') and n.parent.name == n
]
if not names:
return None
try:
code = python_object.__code__
# By using the line number of a code object we make the lookup in a
# file pretty easy. There's still a possibility of people defining
# stuff like ``a = 3; foo(a); a = 4`` on the same line, but if people
# do so we just don't care.
line_nr = code.co_firstlineno
except AttributeError:
pass
else:
line_names = [name for name in names if name.start_pos[0] == line_nr]
# There's a chance that the object is not available anymore, because
# the code has changed in the background.
if line_names:
names = line_names
code_lines = get_cached_code_lines(inference_state.grammar, path)
# It's really hard to actually get the right definition, here as a last
# resort we just return the last one. This chance might lead to odd
# completions at some points but will lead to mostly correct type
# inference, because people tend to define a public name in a module only
# once.
tree_node = names[-1].parent
if tree_node.type == 'funcdef' and get_api_type(original_object) == 'instance':
# If an instance is given and we're landing on a function (e.g.
# partial in 3.5), something is completely wrong and we should not
# return that.
return None
return module_node, tree_node, file_io, code_lines
@inference_state_function_cache()
def _create(inference_state, compiled_value, module_context):
# TODO accessing this is bad, but it probably doesn't matter that much,
# because we're working with interpreteters only here.
python_object = compiled_value.access_handle.access._obj
result = _find_syntax_node_name(inference_state, python_object)
if result is None:
# TODO Care about generics from stuff like `[1]` and don't return like this.
if type(python_object) in (dict, list, tuple):
return ValueSet({compiled_value})
tree_values = to_stub(compiled_value)
if not tree_values:
return ValueSet({compiled_value})
else:
module_node, tree_node, file_io, code_lines = result
if module_context is None or module_context.tree_node != module_node:
root_compiled_value = compiled_value.get_root_context().get_value()
# TODO this __name__ might be wrong.
name = root_compiled_value.py__name__()
string_names = tuple(name.split('.'))
module_value = ModuleValue(
inference_state, module_node,
file_io=file_io,
string_names=string_names,
code_lines=code_lines,
is_package=root_compiled_value.is_package(),
)
if name is not None:
inference_state.module_cache.add(string_names, ValueSet([module_value]))
module_context = module_value.as_context()
tree_values = ValueSet({module_context.create_value(tree_node)})
if tree_node.type == 'classdef':
if not compiled_value.is_class():
# Is an instance, not a class.
tree_values = tree_values.execute_with_values()
return ValueSet(
MixedObject(compiled_value, tree_value=tree_value)
for tree_value in tree_values
)
| 11,355 | Python | .py | 257 | 36.431907 | 88 | 0.664222 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,928 | __main__.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/compiled/subprocess/__main__.py | import os
import sys
from importlib.abc import MetaPathFinder
from importlib.machinery import PathFinder
# Remove the first entry, because it's simply a directory entry that equals
# this directory.
del sys.path[0]
def _get_paths():
# Get the path to jedi.
_d = os.path.dirname
_jedi_path = _d(_d(_d(_d(_d(__file__)))))
_parso_path = sys.argv[1]
# The paths are the directory that jedi and parso lie in.
return {'jedi': _jedi_path, 'parso': _parso_path}
class _ExactImporter(MetaPathFinder):
def __init__(self, path_dct):
self._path_dct = path_dct
def find_module(self, fullname, path=None):
if path is None and fullname in self._path_dct:
p = self._path_dct[fullname]
loader = PathFinder.find_module(fullname, path=[p])
return loader
return None
# Try to import jedi/parso.
sys.meta_path.insert(0, _ExactImporter(_get_paths()))
from jedi.inference.compiled import subprocess # noqa: E402
sys.meta_path.pop(0)
# Retrieve the pickle protocol.
host_sys_version = [int(x) for x in sys.argv[2].split('.')]
# And finally start the client.
subprocess.Listener().listen()
| 1,167 | Python | .py | 31 | 33.387097 | 75 | 0.689441 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,929 | __init__.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/compiled/subprocess/__init__.py | """
Makes it possible to do the compiled analysis in a subprocess. This has two
goals:
1. Making it safer - Segfaults and RuntimeErrors as well as stdout/stderr can
be ignored and dealt with.
2. Make it possible to handle different Python versions as well as virtualenvs.
"""
import collections
import os
import sys
import queue
import subprocess
import traceback
import weakref
from functools import partial
from threading import Thread
from jedi._compatibility import pickle_dump, pickle_load
from jedi import debug
from jedi.cache import memoize_method
from jedi.inference.compiled.subprocess import functions
from jedi.inference.compiled.access import DirectObjectAccess, AccessPath, \
SignatureParam
from jedi.api.exceptions import InternalError
_MAIN_PATH = os.path.join(os.path.dirname(__file__), '__main__.py')
PICKLE_PROTOCOL = 4
def _GeneralizedPopen(*args, **kwargs):
if os.name == 'nt':
try:
# Was introduced in Python 3.7.
CREATE_NO_WINDOW = subprocess.CREATE_NO_WINDOW
except AttributeError:
CREATE_NO_WINDOW = 0x08000000
kwargs['creationflags'] = CREATE_NO_WINDOW
# The child process doesn't need file descriptors except 0, 1, 2.
# This is unix only.
kwargs['close_fds'] = 'posix' in sys.builtin_module_names
return subprocess.Popen(*args, **kwargs)
def _enqueue_output(out, queue_):
for line in iter(out.readline, b''):
queue_.put(line)
def _add_stderr_to_debug(stderr_queue):
while True:
# Try to do some error reporting from the subprocess and print its
# stderr contents.
try:
line = stderr_queue.get_nowait()
line = line.decode('utf-8', 'replace')
debug.warning('stderr output: %s' % line.rstrip('\n'))
except queue.Empty:
break
def _get_function(name):
return getattr(functions, name)
def _cleanup_process(process, thread):
try:
process.kill()
process.wait()
except OSError:
# Raised if the process is already killed.
pass
thread.join()
for stream in [process.stdin, process.stdout, process.stderr]:
try:
stream.close()
except OSError:
# Raised if the stream is broken.
pass
class _InferenceStateProcess:
def __init__(self, inference_state):
self._inference_state_weakref = weakref.ref(inference_state)
self._inference_state_id = id(inference_state)
self._handles = {}
def get_or_create_access_handle(self, obj):
id_ = id(obj)
try:
return self.get_access_handle(id_)
except KeyError:
access = DirectObjectAccess(self._inference_state_weakref(), obj)
handle = AccessHandle(self, access, id_)
self.set_access_handle(handle)
return handle
def get_access_handle(self, id_):
return self._handles[id_]
def set_access_handle(self, handle):
self._handles[handle.id] = handle
class InferenceStateSameProcess(_InferenceStateProcess):
"""
Basically just an easy access to functions.py. It has the same API
as InferenceStateSubprocess and does the same thing without using a subprocess.
This is necessary for the Interpreter process.
"""
def __getattr__(self, name):
return partial(_get_function(name), self._inference_state_weakref())
class InferenceStateSubprocess(_InferenceStateProcess):
def __init__(self, inference_state, compiled_subprocess):
super().__init__(inference_state)
self._used = False
self._compiled_subprocess = compiled_subprocess
def __getattr__(self, name):
func = _get_function(name)
def wrapper(*args, **kwargs):
self._used = True
result = self._compiled_subprocess.run(
self._inference_state_weakref(),
func,
args=args,
kwargs=kwargs,
)
# IMO it should be possible to create a hook in pickle.load to
# mess with the loaded objects. However it's extremely complicated
# to work around this so just do it with this call. ~ dave
return self._convert_access_handles(result)
return wrapper
def _convert_access_handles(self, obj):
if isinstance(obj, SignatureParam):
return SignatureParam(*self._convert_access_handles(tuple(obj)))
elif isinstance(obj, tuple):
return tuple(self._convert_access_handles(o) for o in obj)
elif isinstance(obj, list):
return [self._convert_access_handles(o) for o in obj]
elif isinstance(obj, AccessHandle):
try:
# Rewrite the access handle to one we're already having.
obj = self.get_access_handle(obj.id)
except KeyError:
obj.add_subprocess(self)
self.set_access_handle(obj)
elif isinstance(obj, AccessPath):
return AccessPath(self._convert_access_handles(obj.accesses))
return obj
def __del__(self):
if self._used and not self._compiled_subprocess.is_crashed:
self._compiled_subprocess.delete_inference_state(self._inference_state_id)
class CompiledSubprocess:
is_crashed = False
def __init__(self, executable, env_vars=None):
self._executable = executable
self._env_vars = env_vars
self._inference_state_deletion_queue = collections.deque()
self._cleanup_callable = lambda: None
def __repr__(self):
pid = os.getpid()
return '<%s _executable=%r, is_crashed=%r, pid=%r>' % (
self.__class__.__name__,
self._executable,
self.is_crashed,
pid,
)
@memoize_method
def _get_process(self):
debug.dbg('Start environment subprocess %s', self._executable)
parso_path = sys.modules['parso'].__file__
args = (
self._executable,
_MAIN_PATH,
os.path.dirname(os.path.dirname(parso_path)),
'.'.join(str(x) for x in sys.version_info[:3]),
)
process = _GeneralizedPopen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self._env_vars
)
self._stderr_queue = queue.Queue()
self._stderr_thread = t = Thread(
target=_enqueue_output,
args=(process.stderr, self._stderr_queue)
)
t.daemon = True
t.start()
# Ensure the subprocess is properly cleaned up when the object
# is garbage collected.
self._cleanup_callable = weakref.finalize(self,
_cleanup_process,
process,
t)
return process
def run(self, inference_state, function, args=(), kwargs={}):
# Delete old inference_states.
while True:
try:
inference_state_id = self._inference_state_deletion_queue.pop()
except IndexError:
break
else:
self._send(inference_state_id, None)
assert callable(function)
return self._send(id(inference_state), function, args, kwargs)
def get_sys_path(self):
return self._send(None, functions.get_sys_path, (), {})
def _kill(self):
self.is_crashed = True
self._cleanup_callable()
def _send(self, inference_state_id, function, args=(), kwargs={}):
if self.is_crashed:
raise InternalError("The subprocess %s has crashed." % self._executable)
data = inference_state_id, function, args, kwargs
try:
pickle_dump(data, self._get_process().stdin, PICKLE_PROTOCOL)
except BrokenPipeError:
self._kill()
raise InternalError("The subprocess %s was killed. Maybe out of memory?"
% self._executable)
try:
is_exception, traceback, result = pickle_load(self._get_process().stdout)
except EOFError as eof_error:
try:
stderr = self._get_process().stderr.read().decode('utf-8', 'replace')
except Exception as exc:
stderr = '<empty/not available (%r)>' % exc
self._kill()
_add_stderr_to_debug(self._stderr_queue)
raise InternalError(
"The subprocess %s has crashed (%r, stderr=%s)." % (
self._executable,
eof_error,
stderr,
))
_add_stderr_to_debug(self._stderr_queue)
if is_exception:
# Replace the attribute error message with a the traceback. It's
# way more informative.
result.args = (traceback,)
raise result
return result
def delete_inference_state(self, inference_state_id):
"""
Currently we are not deleting inference_state instantly. They only get
deleted once the subprocess is used again. It would probably a better
solution to move all of this into a thread. However, the memory usage
of a single inference_state shouldn't be that high.
"""
# With an argument - the inference_state gets deleted.
self._inference_state_deletion_queue.append(inference_state_id)
class Listener:
def __init__(self):
self._inference_states = {}
# TODO refactor so we don't need to process anymore just handle
# controlling.
self._process = _InferenceStateProcess(Listener)
def _get_inference_state(self, function, inference_state_id):
from jedi.inference import InferenceState
try:
inference_state = self._inference_states[inference_state_id]
except KeyError:
from jedi import InterpreterEnvironment
inference_state = InferenceState(
# The project is not actually needed. Nothing should need to
# access it.
project=None,
environment=InterpreterEnvironment()
)
self._inference_states[inference_state_id] = inference_state
return inference_state
def _run(self, inference_state_id, function, args, kwargs):
if inference_state_id is None:
return function(*args, **kwargs)
elif function is None:
del self._inference_states[inference_state_id]
else:
inference_state = self._get_inference_state(function, inference_state_id)
# Exchange all handles
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, AccessHandle):
args[i] = inference_state.compiled_subprocess.get_access_handle(arg.id)
for key, value in kwargs.items():
if isinstance(value, AccessHandle):
kwargs[key] = inference_state.compiled_subprocess.get_access_handle(value.id)
return function(inference_state, *args, **kwargs)
def listen(self):
stdout = sys.stdout
# Mute stdout. Nobody should actually be able to write to it,
# because stdout is used for IPC.
sys.stdout = open(os.devnull, 'w')
stdin = sys.stdin
stdout = stdout.buffer
stdin = stdin.buffer
while True:
try:
payload = pickle_load(stdin)
except EOFError:
# It looks like the parent process closed.
# Don't make a big fuss here and just exit.
exit(0)
try:
result = False, None, self._run(*payload)
except Exception as e:
result = True, traceback.format_exc(), e
pickle_dump(result, stdout, PICKLE_PROTOCOL)
class AccessHandle:
def __init__(self, subprocess, access, id_):
self.access = access
self._subprocess = subprocess
self.id = id_
def add_subprocess(self, subprocess):
self._subprocess = subprocess
def __repr__(self):
try:
detail = self.access
except AttributeError:
detail = '#' + str(self.id)
return '<%s of %s>' % (self.__class__.__name__, detail)
def __getstate__(self):
return self.id
def __setstate__(self, state):
self.id = state
def __getattr__(self, name):
if name in ('id', 'access') or name.startswith('_'):
raise AttributeError("Something went wrong with unpickling")
# print('getattr', name, file=sys.stderr)
return partial(self._workaround, name)
def _workaround(self, name, *args, **kwargs):
"""
TODO Currently we're passing slice objects around. This should not
happen. They are also the only unhashable objects that we're passing
around.
"""
if args and isinstance(args[0], slice):
return self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs)
return self._cached_results(name, *args, **kwargs)
@memoize_method
def _cached_results(self, name, *args, **kwargs):
return self._subprocess.get_compiled_method_return(self.id, name, *args, **kwargs)
| 13,490 | Python | .py | 325 | 31.44 | 97 | 0.607069 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,930 | functions.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/compiled/subprocess/functions.py | import sys
import os
import inspect
import importlib
import warnings
from pathlib import Path
from zipfile import ZipFile
from zipimport import zipimporter, ZipImportError
from importlib.machinery import all_suffixes
from jedi.inference.compiled import access
from jedi import debug
from jedi import parser_utils
from jedi.file_io import KnownContentFileIO, ZipFileIO
def get_sys_path():
return sys.path
def load_module(inference_state, **kwargs):
return access.load_module(inference_state, **kwargs)
def get_compiled_method_return(inference_state, id, attribute, *args, **kwargs):
handle = inference_state.compiled_subprocess.get_access_handle(id)
return getattr(handle.access, attribute)(*args, **kwargs)
def create_simple_object(inference_state, obj):
return access.create_access_path(inference_state, obj)
def get_module_info(inference_state, sys_path=None, full_name=None, **kwargs):
"""
Returns Tuple[Union[NamespaceInfo, FileIO, None], Optional[bool]]
"""
if sys_path is not None:
sys.path, temp = sys_path, sys.path
try:
return _find_module(full_name=full_name, **kwargs)
except ImportError:
return None, None
finally:
if sys_path is not None:
sys.path = temp
def get_builtin_module_names(inference_state):
return sys.builtin_module_names
def _test_raise_error(inference_state, exception_type):
"""
Raise an error to simulate certain problems for unit tests.
"""
raise exception_type
def _test_print(inference_state, stderr=None, stdout=None):
"""
Force some prints in the subprocesses. This exists for unit tests.
"""
if stderr is not None:
print(stderr, file=sys.stderr)
sys.stderr.flush()
if stdout is not None:
print(stdout)
sys.stdout.flush()
def _get_init_path(directory_path):
"""
The __init__ file can be searched in a directory. If found return it, else
None.
"""
for suffix in all_suffixes():
path = os.path.join(directory_path, '__init__' + suffix)
if os.path.exists(path):
return path
return None
def safe_literal_eval(inference_state, value):
return parser_utils.safe_literal_eval(value)
def iter_module_names(*args, **kwargs):
return list(_iter_module_names(*args, **kwargs))
def _iter_module_names(inference_state, paths):
# Python modules/packages
for path in paths:
try:
dir_entries = ((entry.name, entry.is_dir()) for entry in os.scandir(path))
except OSError:
try:
zip_import_info = zipimporter(path)
# Unfortunately, there is no public way to access zipimporter's
# private _files member. We therefore have to use a
# custom function to iterate over the files.
dir_entries = _zip_list_subdirectory(
zip_import_info.archive, zip_import_info.prefix)
except ZipImportError:
# The file might not exist or reading it might lead to an error.
debug.warning("Not possible to list directory: %s", path)
continue
for name, is_dir in dir_entries:
# First Namespaces then modules/stubs
if is_dir:
# pycache is obviously not an interesting namespace. Also the
# name must be a valid identifier.
if name != '__pycache__' and name.isidentifier():
yield name
else:
if name.endswith('.pyi'): # Stub files
modname = name[:-4]
else:
modname = inspect.getmodulename(name)
if modname and '.' not in modname:
if modname != '__init__':
yield modname
def _find_module(string, path=None, full_name=None, is_global_search=True):
"""
Provides information about a module.
This function isolates the differences in importing libraries introduced with
python 3.3 on; it gets a module name and optionally a path. It will return a
tuple containin an open file for the module (if not builtin), the filename
or the name of the module if it is a builtin one and a boolean indicating
if the module is contained in a package.
"""
spec = None
loader = None
for finder in sys.meta_path:
if is_global_search and finder != importlib.machinery.PathFinder:
p = None
else:
p = path
try:
find_spec = finder.find_spec
except AttributeError:
# These are old-school clases that still have a different API, just
# ignore those.
continue
spec = find_spec(string, p)
if spec is not None:
loader = spec.loader
if loader is None and not spec.has_location:
# This is a namespace package.
full_name = string if not path else full_name
implicit_ns_info = ImplicitNSInfo(full_name, spec.submodule_search_locations._path)
return implicit_ns_info, True
break
return _find_module_py33(string, path, loader)
def _find_module_py33(string, path=None, loader=None, full_name=None, is_global_search=True):
loader = loader or importlib.machinery.PathFinder.find_module(string, path)
if loader is None and path is None: # Fallback to find builtins
try:
with warnings.catch_warnings(record=True):
# Mute "DeprecationWarning: Use importlib.util.find_spec()
# instead." While we should replace that in the future, it's
# probably good to wait until we deprecate Python 3.3, since
# it was added in Python 3.4 and find_loader hasn't been
# removed in 3.6.
loader = importlib.find_loader(string)
except ValueError as e:
# See #491. Importlib might raise a ValueError, to avoid this, we
# just raise an ImportError to fix the issue.
raise ImportError("Originally " + repr(e))
if loader is None:
raise ImportError("Couldn't find a loader for {}".format(string))
return _from_loader(loader, string)
def _from_loader(loader, string):
try:
is_package_method = loader.is_package
except AttributeError:
is_package = False
else:
is_package = is_package_method(string)
try:
get_filename = loader.get_filename
except AttributeError:
return None, is_package
else:
module_path = get_filename(string)
# To avoid unicode and read bytes, "overwrite" loader.get_source if
# possible.
try:
f = type(loader).get_source
except AttributeError:
raise ImportError("get_source was not defined on loader")
if f is not importlib.machinery.SourceFileLoader.get_source:
# Unfortunately we are reading unicode here, not bytes.
# It seems hard to get bytes, because the zip importer
# logic just unpacks the zip file and returns a file descriptor
# that we cannot as easily access. Therefore we just read it as
# a string in the cases where get_source was overwritten.
code = loader.get_source(string)
else:
code = _get_source(loader, string)
if code is None:
return None, is_package
if isinstance(loader, zipimporter):
return ZipFileIO(module_path, code, Path(loader.archive)), is_package
return KnownContentFileIO(module_path, code), is_package
def _get_source(loader, fullname):
"""
This method is here as a replacement for SourceLoader.get_source. That
method returns unicode, but we prefer bytes.
"""
path = loader.get_filename(fullname)
try:
return loader.get_data(path)
except OSError:
raise ImportError('source not available through get_data()',
name=fullname)
def _zip_list_subdirectory(zip_path, zip_subdir_path):
zip_file = ZipFile(zip_path)
zip_subdir_path = Path(zip_subdir_path)
zip_content_file_paths = zip_file.namelist()
for raw_file_name in zip_content_file_paths:
file_path = Path(raw_file_name)
if file_path.parent == zip_subdir_path:
file_path = file_path.relative_to(zip_subdir_path)
yield file_path.name, raw_file_name.endswith("/")
class ImplicitNSInfo:
"""Stores information returned from an implicit namespace spec"""
def __init__(self, name, paths):
self.name = name
self.paths = paths
| 8,666 | Python | .py | 206 | 33.674757 | 99 | 0.649269 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,931 | annotation.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/gradual/annotation.py | """
PEP 0484 ( https://www.python.org/dev/peps/pep-0484/ ) describes type hints
through function annotations. There is a strong suggestion in this document
that only the type of type hinting defined in PEP0484 should be allowed
as annotations in future python versions.
"""
import re
from inspect import Parameter
from parso import ParserSyntaxError, parse
from jedi.inference.cache import inference_state_method_cache
from jedi.inference.base_value import ValueSet, NO_VALUES
from jedi.inference.gradual.base import DefineGenericBaseClass, GenericClass
from jedi.inference.gradual.generics import TupleGenericManager
from jedi.inference.gradual.type_var import TypeVar
from jedi.inference.helpers import is_string
from jedi.inference.compiled import builtin_from_name
from jedi.inference.param import get_executed_param_names
from jedi import debug
from jedi import parser_utils
def infer_annotation(context, annotation):
"""
Inferes an annotation node. This means that it inferes the part of
`int` here:
foo: int = 3
Also checks for forward references (strings)
"""
value_set = context.infer_node(annotation)
if len(value_set) != 1:
debug.warning("Inferred typing index %s should lead to 1 object, "
" not %s" % (annotation, value_set))
return value_set
inferred_value = list(value_set)[0]
if is_string(inferred_value):
result = _get_forward_reference_node(context, inferred_value.get_safe_value())
if result is not None:
return context.infer_node(result)
return value_set
def _infer_annotation_string(context, string, index=None):
node = _get_forward_reference_node(context, string)
if node is None:
return NO_VALUES
value_set = context.infer_node(node)
if index is not None:
value_set = value_set.filter(
lambda value: (
value.array_type == 'tuple'
and len(list(value.py__iter__())) >= index
)
).py__simple_getitem__(index)
return value_set
def _get_forward_reference_node(context, string):
try:
new_node = context.inference_state.grammar.parse(
string,
start_symbol='eval_input',
error_recovery=False
)
except ParserSyntaxError:
debug.warning('Annotation not parsed: %s' % string)
return None
else:
module = context.tree_node.get_root_node()
parser_utils.move(new_node, module.end_pos[0])
new_node.parent = context.tree_node
return new_node
def _split_comment_param_declaration(decl_text):
"""
Split decl_text on commas, but group generic expressions
together.
For example, given "foo, Bar[baz, biz]" we return
['foo', 'Bar[baz, biz]'].
"""
try:
node = parse(decl_text, error_recovery=False).children[0]
except ParserSyntaxError:
debug.warning('Comment annotation is not valid Python: %s' % decl_text)
return []
if node.type in ['name', 'atom_expr', 'power']:
return [node.get_code().strip()]
params = []
try:
children = node.children
except AttributeError:
return []
else:
for child in children:
if child.type in ['name', 'atom_expr', 'power']:
params.append(child.get_code().strip())
return params
@inference_state_method_cache()
def infer_param(function_value, param, ignore_stars=False):
values = _infer_param(function_value, param)
if ignore_stars or not values:
return values
inference_state = function_value.inference_state
if param.star_count == 1:
tuple_ = builtin_from_name(inference_state, 'tuple')
return ValueSet([GenericClass(
tuple_,
TupleGenericManager((values,)),
)])
elif param.star_count == 2:
dct = builtin_from_name(inference_state, 'dict')
generics = (
ValueSet([builtin_from_name(inference_state, 'str')]),
values
)
return ValueSet([GenericClass(
dct,
TupleGenericManager(generics),
)])
return values
def _infer_param(function_value, param):
"""
Infers the type of a function parameter, using type annotations.
"""
annotation = param.annotation
if annotation is None:
# If no Python 3-style annotation, look for a comment annotation.
# Identify parameters to function in the same sequence as they would
# appear in a type comment.
all_params = [child for child in param.parent.children
if child.type == 'param']
node = param.parent.parent
comment = parser_utils.get_following_comment_same_line(node)
if comment is None:
return NO_VALUES
match = re.match(r"^#\s*type:\s*\(([^#]*)\)\s*->", comment)
if not match:
return NO_VALUES
params_comments = _split_comment_param_declaration(match.group(1))
# Find the specific param being investigated
index = all_params.index(param)
# If the number of parameters doesn't match length of type comment,
# ignore first parameter (assume it's self).
if len(params_comments) != len(all_params):
debug.warning(
"Comments length != Params length %s %s",
params_comments, all_params
)
if function_value.is_bound_method():
if index == 0:
# Assume it's self, which is already handled
return NO_VALUES
index -= 1
if index >= len(params_comments):
return NO_VALUES
param_comment = params_comments[index]
return _infer_annotation_string(
function_value.get_default_param_context(),
param_comment
)
# Annotations are like default params and resolve in the same way.
context = function_value.get_default_param_context()
return infer_annotation(context, annotation)
def py__annotations__(funcdef):
dct = {}
for function_param in funcdef.get_params():
param_annotation = function_param.annotation
if param_annotation is not None:
dct[function_param.name.value] = param_annotation
return_annotation = funcdef.annotation
if return_annotation:
dct['return'] = return_annotation
return dct
def resolve_forward_references(context, all_annotations):
def resolve(node):
if node is None or node.type != 'string':
return node
node = _get_forward_reference_node(
context,
context.inference_state.compiled_subprocess.safe_literal_eval(
node.value,
),
)
if node is None:
# There was a string, but it's not a valid annotation
return None
# The forward reference tree has an additional root node ('eval_input')
# that we don't want. Extract the node we do want, that is equivalent to
# the nodes returned by `py__annotations__` for a non-quoted node.
node = node.children[0]
return node
return {name: resolve(node) for name, node in all_annotations.items()}
@inference_state_method_cache()
def infer_return_types(function, arguments):
"""
Infers the type of a function's return value,
according to type annotations.
"""
context = function.get_default_param_context()
all_annotations = resolve_forward_references(
context,
py__annotations__(function.tree_node),
)
annotation = all_annotations.get("return", None)
if annotation is None:
# If there is no Python 3-type annotation, look for an annotation
# comment.
node = function.tree_node
comment = parser_utils.get_following_comment_same_line(node)
if comment is None:
return NO_VALUES
match = re.match(r"^#\s*type:\s*\([^#]*\)\s*->\s*([^#]*)", comment)
if not match:
return NO_VALUES
return _infer_annotation_string(
context,
match.group(1).strip()
).execute_annotation()
unknown_type_vars = find_unknown_type_vars(context, annotation)
annotation_values = infer_annotation(context, annotation)
if not unknown_type_vars:
return annotation_values.execute_annotation()
type_var_dict = infer_type_vars_for_execution(function, arguments, all_annotations)
return ValueSet.from_sets(
ann.define_generics(type_var_dict)
if isinstance(ann, (DefineGenericBaseClass, TypeVar)) else ValueSet({ann})
for ann in annotation_values
).execute_annotation()
def infer_type_vars_for_execution(function, arguments, annotation_dict):
"""
Some functions use type vars that are not defined by the class, but rather
only defined in the function. See for example `iter`. In those cases we
want to:
1. Search for undefined type vars.
2. Infer type vars with the execution state we have.
3. Return the union of all type vars that have been found.
"""
context = function.get_default_param_context()
annotation_variable_results = {}
executed_param_names = get_executed_param_names(function, arguments)
for executed_param_name in executed_param_names:
try:
annotation_node = annotation_dict[executed_param_name.string_name]
except KeyError:
continue
annotation_variables = find_unknown_type_vars(context, annotation_node)
if annotation_variables:
# Infer unknown type var
annotation_value_set = context.infer_node(annotation_node)
kind = executed_param_name.get_kind()
actual_value_set = executed_param_name.infer()
if kind is Parameter.VAR_POSITIONAL:
actual_value_set = actual_value_set.merge_types_of_iterate()
elif kind is Parameter.VAR_KEYWORD:
# TODO _dict_values is not public.
actual_value_set = actual_value_set.try_merge('_dict_values')
merge_type_var_dicts(
annotation_variable_results,
annotation_value_set.infer_type_vars(actual_value_set),
)
return annotation_variable_results
def infer_return_for_callable(arguments, param_values, result_values):
all_type_vars = {}
for pv in param_values:
if pv.array_type == 'list':
type_var_dict = _infer_type_vars_for_callable(arguments, pv.py__iter__())
all_type_vars.update(type_var_dict)
return ValueSet.from_sets(
v.define_generics(all_type_vars)
if isinstance(v, (DefineGenericBaseClass, TypeVar))
else ValueSet({v})
for v in result_values
).execute_annotation()
def _infer_type_vars_for_callable(arguments, lazy_params):
"""
Infers type vars for the Calllable class:
def x() -> Callable[[Callable[..., _T]], _T]: ...
"""
annotation_variable_results = {}
for (_, lazy_value), lazy_callable_param in zip(arguments.unpack(), lazy_params):
callable_param_values = lazy_callable_param.infer()
# Infer unknown type var
actual_value_set = lazy_value.infer()
merge_type_var_dicts(
annotation_variable_results,
callable_param_values.infer_type_vars(actual_value_set),
)
return annotation_variable_results
def merge_type_var_dicts(base_dict, new_dict):
for type_var_name, values in new_dict.items():
if values:
try:
base_dict[type_var_name] |= values
except KeyError:
base_dict[type_var_name] = values
def merge_pairwise_generics(annotation_value, annotated_argument_class):
"""
Match up the generic parameters from the given argument class to the
target annotation.
This walks the generic parameters immediately within the annotation and
argument's type, in order to determine the concrete values of the
annotation's parameters for the current case.
For example, given the following code:
def values(mapping: Mapping[K, V]) -> List[V]: ...
for val in values({1: 'a'}):
val
Then this function should be given representations of `Mapping[K, V]`
and `Mapping[int, str]`, so that it can determine that `K` is `int and
`V` is `str`.
Note that it is responsibility of the caller to traverse the MRO of the
argument type as needed in order to find the type matching the
annotation (in this case finding `Mapping[int, str]` as a parent of
`Dict[int, str]`).
Parameters
----------
`annotation_value`: represents the annotation to infer the concrete
parameter types of.
`annotated_argument_class`: represents the annotated class of the
argument being passed to the object annotated by `annotation_value`.
"""
type_var_dict = {}
if not isinstance(annotated_argument_class, DefineGenericBaseClass):
return type_var_dict
annotation_generics = annotation_value.get_generics()
actual_generics = annotated_argument_class.get_generics()
for annotation_generics_set, actual_generic_set in zip(annotation_generics, actual_generics):
merge_type_var_dicts(
type_var_dict,
annotation_generics_set.infer_type_vars(actual_generic_set.execute_annotation()),
)
return type_var_dict
def find_type_from_comment_hint_for(context, node, name):
return _find_type_from_comment_hint(context, node, node.children[1], name)
def find_type_from_comment_hint_with(context, node, name):
assert len(node.children[1].children) == 3, \
"Can only be here when children[1] is 'foo() as f'"
varlist = node.children[1].children[2]
return _find_type_from_comment_hint(context, node, varlist, name)
def find_type_from_comment_hint_assign(context, node, name):
return _find_type_from_comment_hint(context, node, node.children[0], name)
def _find_type_from_comment_hint(context, node, varlist, name):
index = None
if varlist.type in ("testlist_star_expr", "exprlist", "testlist"):
# something like "a, b = 1, 2"
index = 0
for child in varlist.children:
if child == name:
break
if child.type == "operator":
continue
index += 1
else:
return []
comment = parser_utils.get_following_comment_same_line(node)
if comment is None:
return []
match = re.match(r"^#\s*type:\s*([^#]*)", comment)
if match is None:
return []
return _infer_annotation_string(
context, match.group(1).strip(), index
).execute_annotation()
def find_unknown_type_vars(context, node):
def check_node(node):
if node.type in ('atom_expr', 'power'):
trailer = node.children[-1]
if trailer.type == 'trailer' and trailer.children[0] == '[':
for subscript_node in _unpack_subscriptlist(trailer.children[1]):
check_node(subscript_node)
else:
found[:] = _filter_type_vars(context.infer_node(node), found)
found = [] # We're not using a set, because the order matters.
check_node(node)
return found
def _filter_type_vars(value_set, found=()):
new_found = list(found)
for type_var in value_set:
if isinstance(type_var, TypeVar) and type_var not in found:
new_found.append(type_var)
return new_found
def _unpack_subscriptlist(subscriptlist):
if subscriptlist.type == 'subscriptlist':
for subscript in subscriptlist.children[::2]:
if subscript.type != 'subscript':
yield subscript
else:
if subscriptlist.type != 'subscript':
yield subscriptlist
| 15,932 | Python | .py | 380 | 33.878947 | 97 | 0.650498 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,932 | generics.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/gradual/generics.py | """
This module is about generics, like the `int` in `List[int]`. It's not about
the Generic class.
"""
from jedi import debug
from jedi.cache import memoize_method
from jedi.inference.utils import to_tuple
from jedi.inference.base_value import ValueSet, NO_VALUES
from jedi.inference.value.iterable import SequenceLiteralValue
from jedi.inference.helpers import is_string
def _resolve_forward_references(context, value_set):
for value in value_set:
if is_string(value):
from jedi.inference.gradual.annotation import _get_forward_reference_node
node = _get_forward_reference_node(context, value.get_safe_value())
if node is not None:
for c in context.infer_node(node):
yield c
else:
yield value
class _AbstractGenericManager:
def get_index_and_execute(self, index):
try:
return self[index].execute_annotation()
except IndexError:
debug.warning('No param #%s found for annotation %s', index, self)
return NO_VALUES
def get_type_hint(self):
return '[%s]' % ', '.join(t.get_type_hint(add_class_info=False) for t in self.to_tuple())
class LazyGenericManager(_AbstractGenericManager):
def __init__(self, context_of_index, index_value):
self._context_of_index = context_of_index
self._index_value = index_value
@memoize_method
def __getitem__(self, index):
return self._tuple()[index]()
def __len__(self):
return len(self._tuple())
@memoize_method
@to_tuple
def _tuple(self):
def lambda_scoping_in_for_loop_sucks(lazy_value):
return lambda: ValueSet(_resolve_forward_references(
self._context_of_index,
lazy_value.infer()
))
if isinstance(self._index_value, SequenceLiteralValue):
for lazy_value in self._index_value.py__iter__(contextualized_node=None):
yield lambda_scoping_in_for_loop_sucks(lazy_value)
else:
yield lambda: ValueSet(_resolve_forward_references(
self._context_of_index,
ValueSet([self._index_value])
))
@to_tuple
def to_tuple(self):
for callable_ in self._tuple():
yield callable_()
def is_homogenous_tuple(self):
if isinstance(self._index_value, SequenceLiteralValue):
entries = self._index_value.get_tree_entries()
if len(entries) == 2 and entries[1] == '...':
return True
return False
def __repr__(self):
return '<LazyG>[%s]' % (', '.join(repr(x) for x in self.to_tuple()))
class TupleGenericManager(_AbstractGenericManager):
def __init__(self, tup):
self._tuple = tup
def __getitem__(self, index):
return self._tuple[index]
def __len__(self):
return len(self._tuple)
def to_tuple(self):
return self._tuple
def is_homogenous_tuple(self):
return False
def __repr__(self):
return '<TupG>[%s]' % (', '.join(repr(x) for x in self.to_tuple()))
| 3,144 | Python | .py | 79 | 31.329114 | 97 | 0.621755 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,933 | typeshed.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/gradual/typeshed.py | import os
import re
from functools import wraps
from collections import namedtuple
from typing import Dict, Mapping, Tuple
from pathlib import Path
from jedi import settings
from jedi.file_io import FileIO
from jedi.parser_utils import get_cached_code_lines
from jedi.inference.base_value import ValueSet, NO_VALUES
from jedi.inference.gradual.stub_value import TypingModuleWrapper, StubModuleValue
from jedi.inference.value import ModuleValue
_jedi_path = Path(__file__).parent.parent.parent
TYPESHED_PATH = _jedi_path.joinpath('third_party', 'typeshed')
DJANGO_INIT_PATH = _jedi_path.joinpath('third_party', 'django-stubs',
'django-stubs', '__init__.pyi')
_IMPORT_MAP = dict(
_collections='collections',
_socket='socket',
)
PathInfo = namedtuple('PathInfo', 'path is_third_party')
def _merge_create_stub_map(path_infos):
map_ = {}
for directory_path_info in path_infos:
map_.update(_create_stub_map(directory_path_info))
return map_
def _create_stub_map(directory_path_info):
"""
Create a mapping of an importable name in Python to a stub file.
"""
def generate():
try:
listed = os.listdir(directory_path_info.path)
except (FileNotFoundError, NotADirectoryError):
return
for entry in listed:
path = os.path.join(directory_path_info.path, entry)
if os.path.isdir(path):
init = os.path.join(path, '__init__.pyi')
if os.path.isfile(init):
yield entry, PathInfo(init, directory_path_info.is_third_party)
elif entry.endswith('.pyi') and os.path.isfile(path):
name = entry[:-4]
if name != '__init__':
yield name, PathInfo(path, directory_path_info.is_third_party)
# Create a dictionary from the tuple generator.
return dict(generate())
def _get_typeshed_directories(version_info):
check_version_list = ['2and3', '3']
for base in ['stdlib', 'third_party']:
base_path = TYPESHED_PATH.joinpath(base)
base_list = os.listdir(base_path)
for base_list_entry in base_list:
match = re.match(r'(\d+)\.(\d+)$', base_list_entry)
if match is not None:
if match.group(1) == '3' and int(match.group(2)) <= version_info.minor:
check_version_list.append(base_list_entry)
for check_version in check_version_list:
is_third_party = base != 'stdlib'
yield PathInfo(str(base_path.joinpath(check_version)), is_third_party)
_version_cache: Dict[Tuple[int, int], Mapping[str, PathInfo]] = {}
def _cache_stub_file_map(version_info):
"""
Returns a map of an importable name in Python to a stub file.
"""
# TODO this caches the stub files indefinitely, maybe use a time cache
# for that?
version = version_info[:2]
try:
return _version_cache[version]
except KeyError:
pass
_version_cache[version] = file_set = \
_merge_create_stub_map(_get_typeshed_directories(version_info))
return file_set
def import_module_decorator(func):
@wraps(func)
def wrapper(inference_state, import_names, parent_module_value, sys_path, prefer_stubs):
python_value_set = inference_state.module_cache.get(import_names)
if python_value_set is None:
if parent_module_value is not None and parent_module_value.is_stub():
parent_module_values = parent_module_value.non_stub_value_set
else:
parent_module_values = [parent_module_value]
if import_names == ('os', 'path'):
# This is a huge exception, we follow a nested import
# ``os.path``, because it's a very important one in Python
# that is being achieved by messing with ``sys.modules`` in
# ``os``.
python_value_set = ValueSet.from_sets(
func(inference_state, (n,), None, sys_path,)
for n in ['posixpath', 'ntpath', 'macpath', 'os2emxpath']
)
else:
python_value_set = ValueSet.from_sets(
func(inference_state, import_names, p, sys_path,)
for p in parent_module_values
)
inference_state.module_cache.add(import_names, python_value_set)
if not prefer_stubs or import_names[0] in settings.auto_import_modules:
return python_value_set
stub = try_to_load_stub_cached(inference_state, import_names, python_value_set,
parent_module_value, sys_path)
if stub is not None:
return ValueSet([stub])
return python_value_set
return wrapper
def try_to_load_stub_cached(inference_state, import_names, *args, **kwargs):
if import_names is None:
return None
try:
return inference_state.stub_module_cache[import_names]
except KeyError:
pass
# TODO is this needed? where are the exceptions coming from that make this
# necessary? Just remove this line.
inference_state.stub_module_cache[import_names] = None
inference_state.stub_module_cache[import_names] = result = \
_try_to_load_stub(inference_state, import_names, *args, **kwargs)
return result
def _try_to_load_stub(inference_state, import_names, python_value_set,
parent_module_value, sys_path):
"""
Trying to load a stub for a set of import_names.
This is modelled to work like "PEP 561 -- Distributing and Packaging Type
Information", see https://www.python.org/dev/peps/pep-0561.
"""
if parent_module_value is None and len(import_names) > 1:
try:
parent_module_value = try_to_load_stub_cached(
inference_state, import_names[:-1], NO_VALUES,
parent_module_value=None, sys_path=sys_path)
except KeyError:
pass
# 1. Try to load foo-stubs folders on path for import name foo.
if len(import_names) == 1:
# foo-stubs
for p in sys_path:
init = os.path.join(p, *import_names) + '-stubs' + os.path.sep + '__init__.pyi'
m = _try_to_load_stub_from_file(
inference_state,
python_value_set,
file_io=FileIO(init),
import_names=import_names,
)
if m is not None:
return m
if import_names[0] == 'django' and python_value_set:
return _try_to_load_stub_from_file(
inference_state,
python_value_set,
file_io=FileIO(str(DJANGO_INIT_PATH)),
import_names=import_names,
)
# 2. Try to load pyi files next to py files.
for c in python_value_set:
try:
method = c.py__file__
except AttributeError:
pass
else:
file_path = method()
file_paths = []
if c.is_namespace():
file_paths = [os.path.join(p, '__init__.pyi') for p in c.py__path__()]
elif file_path is not None and file_path.suffix == '.py':
file_paths = [str(file_path) + 'i']
for file_path in file_paths:
m = _try_to_load_stub_from_file(
inference_state,
python_value_set,
# The file path should end with .pyi
file_io=FileIO(file_path),
import_names=import_names,
)
if m is not None:
return m
# 3. Try to load typeshed
m = _load_from_typeshed(inference_state, python_value_set, parent_module_value, import_names)
if m is not None:
return m
# 4. Try to load pyi file somewhere if python_value_set was not defined.
if not python_value_set:
if parent_module_value is not None:
check_path = parent_module_value.py__path__() or []
# In case import_names
names_for_path = (import_names[-1],)
else:
check_path = sys_path
names_for_path = import_names
for p in check_path:
m = _try_to_load_stub_from_file(
inference_state,
python_value_set,
file_io=FileIO(os.path.join(p, *names_for_path) + '.pyi'),
import_names=import_names,
)
if m is not None:
return m
# If no stub is found, that's fine, the calling function has to deal with
# it.
return None
def _load_from_typeshed(inference_state, python_value_set, parent_module_value, import_names):
import_name = import_names[-1]
map_ = None
if len(import_names) == 1:
map_ = _cache_stub_file_map(inference_state.grammar.version_info)
import_name = _IMPORT_MAP.get(import_name, import_name)
elif isinstance(parent_module_value, ModuleValue):
if not parent_module_value.is_package():
# Only if it's a package (= a folder) something can be
# imported.
return None
paths = parent_module_value.py__path__()
# Once the initial package has been loaded, the sub packages will
# always be loaded, regardless if they are there or not. This makes
# sense, IMO, because stubs take preference, even if the original
# library doesn't provide a module (it could be dynamic). ~dave
map_ = _merge_create_stub_map([PathInfo(p, is_third_party=False) for p in paths])
if map_ is not None:
path_info = map_.get(import_name)
if path_info is not None and (not path_info.is_third_party or python_value_set):
return _try_to_load_stub_from_file(
inference_state,
python_value_set,
file_io=FileIO(path_info.path),
import_names=import_names,
)
def _try_to_load_stub_from_file(inference_state, python_value_set, file_io, import_names):
try:
stub_module_node = parse_stub_module(inference_state, file_io)
except OSError:
# The file that you're looking for doesn't exist (anymore).
return None
else:
return create_stub_module(
inference_state, inference_state.latest_grammar, python_value_set,
stub_module_node, file_io, import_names
)
def parse_stub_module(inference_state, file_io):
return inference_state.parse(
file_io=file_io,
cache=True,
diff_cache=settings.fast_parser,
cache_path=settings.cache_directory,
use_latest_grammar=True
)
def create_stub_module(inference_state, grammar, python_value_set,
stub_module_node, file_io, import_names):
if import_names == ('typing',):
module_cls = TypingModuleWrapper
else:
module_cls = StubModuleValue
file_name = os.path.basename(file_io.path)
stub_module_value = module_cls(
python_value_set, inference_state, stub_module_node,
file_io=file_io,
string_names=import_names,
# The code was loaded with latest_grammar, so use
# that.
code_lines=get_cached_code_lines(grammar, file_io.path),
is_package=file_name == '__init__.pyi',
)
return stub_module_value
| 11,467 | Python | .py | 264 | 33.356061 | 97 | 0.603926 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,934 | type_var.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/gradual/type_var.py | from jedi import debug
from jedi.inference.base_value import ValueSet, NO_VALUES, ValueWrapper
from jedi.inference.gradual.base import BaseTypingValue
class TypeVarClass(ValueWrapper):
def py__call__(self, arguments):
unpacked = arguments.unpack()
key, lazy_value = next(unpacked, (None, None))
var_name = self._find_string_name(lazy_value)
# The name must be given, otherwise it's useless.
if var_name is None or key is not None:
debug.warning('Found a variable without a name %s', arguments)
return NO_VALUES
return ValueSet([TypeVar.create_cached(
self.inference_state,
self.parent_context,
tree_name=self.tree_node.name,
var_name=var_name,
unpacked_args=unpacked,
)])
def _find_string_name(self, lazy_value):
if lazy_value is None:
return None
value_set = lazy_value.infer()
if not value_set:
return None
if len(value_set) > 1:
debug.warning('Found multiple values for a type variable: %s', value_set)
name_value = next(iter(value_set))
try:
method = name_value.get_safe_value
except AttributeError:
return None
else:
safe_value = method(default=None)
if isinstance(safe_value, str):
return safe_value
return None
class TypeVar(BaseTypingValue):
def __init__(self, parent_context, tree_name, var_name, unpacked_args):
super().__init__(parent_context, tree_name)
self._var_name = var_name
self._constraints_lazy_values = []
self._bound_lazy_value = None
self._covariant_lazy_value = None
self._contravariant_lazy_value = None
for key, lazy_value in unpacked_args:
if key is None:
self._constraints_lazy_values.append(lazy_value)
else:
if key == 'bound':
self._bound_lazy_value = lazy_value
elif key == 'covariant':
self._covariant_lazy_value = lazy_value
elif key == 'contravariant':
self._contra_variant_lazy_value = lazy_value
else:
debug.warning('Invalid TypeVar param name %s', key)
def py__name__(self):
return self._var_name
def get_filters(self, *args, **kwargs):
return iter([])
def _get_classes(self):
if self._bound_lazy_value is not None:
return self._bound_lazy_value.infer()
if self._constraints_lazy_values:
return self.constraints
debug.warning('Tried to infer the TypeVar %s without a given type', self._var_name)
return NO_VALUES
def is_same_class(self, other):
# Everything can match an undefined type var.
return True
@property
def constraints(self):
return ValueSet.from_sets(
lazy.infer() for lazy in self._constraints_lazy_values
)
def define_generics(self, type_var_dict):
try:
found = type_var_dict[self.py__name__()]
except KeyError:
pass
else:
if found:
return found
return ValueSet({self})
def execute_annotation(self):
return self._get_classes().execute_annotation()
def infer_type_vars(self, value_set):
def iterate():
for v in value_set:
cls = v.py__class__()
if v.is_function() or v.is_class():
cls = TypeWrapper(cls, v)
yield cls
annotation_name = self.py__name__()
return {annotation_name: ValueSet(iterate())}
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.py__name__())
class TypeWrapper(ValueWrapper):
def __init__(self, wrapped_value, original_value):
super().__init__(wrapped_value)
self._original_value = original_value
def execute_annotation(self):
return ValueSet({self._original_value})
| 4,139 | Python | .py | 104 | 29.346154 | 91 | 0.583998 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,935 | utils.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/gradual/utils.py | from pathlib import Path
from jedi.inference.gradual.typeshed import TYPESHED_PATH, create_stub_module
def load_proper_stub_module(inference_state, grammar, file_io, import_names, module_node):
"""
This function is given a random .pyi file and should return the proper
module.
"""
path = file_io.path
path = Path(path)
assert path.suffix == '.pyi'
try:
relative_path = path.relative_to(TYPESHED_PATH)
except ValueError:
pass
else:
# /[...]/stdlib/3/os/__init__.pyi -> stdlib/3/os/__init__
rest = relative_path.with_suffix('')
# Remove the stdlib/3 or third_party/3.6 part
import_names = rest.parts[2:]
if rest.name == '__init__':
import_names = import_names[:-1]
if import_names is not None:
actual_value_set = inference_state.import_module(import_names, prefer_stubs=False)
stub = create_stub_module(
inference_state, grammar, actual_value_set,
module_node, file_io, import_names
)
inference_state.stub_module_cache[import_names] = stub
return stub
return None
| 1,147 | Python | .py | 30 | 31.066667 | 90 | 0.641187 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,936 | stub_value.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/gradual/stub_value.py | from jedi.inference.base_value import ValueWrapper
from jedi.inference.value.module import ModuleValue
from jedi.inference.filters import ParserTreeFilter
from jedi.inference.names import StubName, StubModuleName
from jedi.inference.gradual.typing import TypingModuleFilterWrapper
from jedi.inference.context import ModuleContext
class StubModuleValue(ModuleValue):
_module_name_class = StubModuleName
def __init__(self, non_stub_value_set, *args, **kwargs):
super().__init__(*args, **kwargs)
self.non_stub_value_set = non_stub_value_set
def is_stub(self):
return True
def sub_modules_dict(self):
"""
We have to overwrite this, because it's possible to have stubs that
don't have code for all the child modules. At the time of writing this
there are for example no stubs for `json.tool`.
"""
names = {}
for value in self.non_stub_value_set:
try:
method = value.sub_modules_dict
except AttributeError:
pass
else:
names.update(method())
names.update(super().sub_modules_dict())
return names
def _get_stub_filters(self, origin_scope):
return [StubFilter(
parent_context=self.as_context(),
origin_scope=origin_scope
)] + list(self.iter_star_filters())
def get_filters(self, origin_scope=None):
filters = super().get_filters(origin_scope)
next(filters, None) # Ignore the first filter and replace it with our own
stub_filters = self._get_stub_filters(origin_scope=origin_scope)
yield from stub_filters
yield from filters
def _as_context(self):
return StubModuleContext(self)
class StubModuleContext(ModuleContext):
def get_filters(self, until_position=None, origin_scope=None):
# Make sure to ignore the position, because positions are not relevant
# for stubs.
return super().get_filters(origin_scope=origin_scope)
class TypingModuleWrapper(StubModuleValue):
def get_filters(self, *args, **kwargs):
filters = super().get_filters(*args, **kwargs)
f = next(filters, None)
assert f is not None
yield TypingModuleFilterWrapper(f)
yield from filters
def _as_context(self):
return TypingModuleContext(self)
class TypingModuleContext(ModuleContext):
def get_filters(self, *args, **kwargs):
filters = super().get_filters(*args, **kwargs)
yield TypingModuleFilterWrapper(next(filters, None))
yield from filters
class StubFilter(ParserTreeFilter):
name_class = StubName
def _is_name_reachable(self, name):
if not super()._is_name_reachable(name):
return False
# Imports in stub files are only public if they have an "as"
# export.
definition = name.get_definition()
if definition.type in ('import_from', 'import_name'):
if name.parent.type not in ('import_as_name', 'dotted_as_name'):
return False
n = name.value
# TODO rewrite direct return
if n.startswith('_') and not (n.startswith('__') and n.endswith('__')):
return False
return True
class VersionInfo(ValueWrapper):
pass
| 3,329 | Python | .py | 79 | 33.987342 | 82 | 0.659957 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,937 | __init__.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/gradual/__init__.py | """
It is unfortunately not well documented how stubs and annotations work in Jedi.
If somebody needs an introduction, please let me know.
"""
| 143 | Python | .py | 4 | 34.75 | 79 | 0.791367 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,938 | base.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/gradual/base.py | from jedi.inference.cache import inference_state_method_cache
from jedi.inference.base_value import ValueSet, NO_VALUES, Value, \
iterator_to_value_set, LazyValueWrapper, ValueWrapper
from jedi.inference.compiled import builtin_from_name
from jedi.inference.value.klass import ClassFilter
from jedi.inference.value.klass import ClassMixin
from jedi.inference.utils import to_list
from jedi.inference.names import AbstractNameDefinition, ValueName
from jedi.inference.context import ClassContext
from jedi.inference.gradual.generics import TupleGenericManager
class _BoundTypeVarName(AbstractNameDefinition):
"""
This type var was bound to a certain type, e.g. int.
"""
def __init__(self, type_var, value_set):
self._type_var = type_var
self.parent_context = type_var.parent_context
self._value_set = value_set
def infer(self):
def iter_():
for value in self._value_set:
# Replace any with the constraints if they are there.
from jedi.inference.gradual.typing import AnyClass
if isinstance(value, AnyClass):
yield from self._type_var.constraints
else:
yield value
return ValueSet(iter_())
def py__name__(self):
return self._type_var.py__name__()
def __repr__(self):
return '<%s %s -> %s>' % (self.__class__.__name__, self.py__name__(), self._value_set)
class _TypeVarFilter:
"""
A filter for all given variables in a class.
A = TypeVar('A')
B = TypeVar('B')
class Foo(Mapping[A, B]):
...
In this example we would have two type vars given: A and B
"""
def __init__(self, generics, type_vars):
self._generics = generics
self._type_vars = type_vars
def get(self, name):
for i, type_var in enumerate(self._type_vars):
if type_var.py__name__() == name:
try:
return [_BoundTypeVarName(type_var, self._generics[i])]
except IndexError:
return [type_var.name]
return []
def values(self):
# The values are not relevant. If it's not searched exactly, the type
# vars are just global and should be looked up as that.
return []
class _AnnotatedClassContext(ClassContext):
def get_filters(self, *args, **kwargs):
filters = super().get_filters(
*args, **kwargs
)
yield from filters
# The type vars can only be looked up if it's a global search and
# not a direct lookup on the class.
yield self._value.get_type_var_filter()
class DefineGenericBaseClass(LazyValueWrapper):
def __init__(self, generics_manager):
self._generics_manager = generics_manager
def _create_instance_with_generics(self, generics_manager):
raise NotImplementedError
@inference_state_method_cache()
def get_generics(self):
return self._generics_manager.to_tuple()
def define_generics(self, type_var_dict):
from jedi.inference.gradual.type_var import TypeVar
changed = False
new_generics = []
for generic_set in self.get_generics():
values = NO_VALUES
for generic in generic_set:
if isinstance(generic, (DefineGenericBaseClass, TypeVar)):
result = generic.define_generics(type_var_dict)
values |= result
if result != ValueSet({generic}):
changed = True
else:
values |= ValueSet([generic])
new_generics.append(values)
if not changed:
# There might not be any type vars that change. In that case just
# return itself, because it does not make sense to potentially lose
# cached results.
return ValueSet([self])
return ValueSet([self._create_instance_with_generics(
TupleGenericManager(tuple(new_generics))
)])
def is_same_class(self, other):
if not isinstance(other, DefineGenericBaseClass):
return False
if self.tree_node != other.tree_node:
# TODO not sure if this is nice.
return False
given_params1 = self.get_generics()
given_params2 = other.get_generics()
if len(given_params1) != len(given_params2):
# If the amount of type vars doesn't match, the class doesn't
# match.
return False
# Now compare generics
return all(
any(
# TODO why is this ordering the correct one?
cls2.is_same_class(cls1)
# TODO I'm still not sure gather_annotation_classes is a good
# idea. They are essentially here to avoid comparing Tuple <=>
# tuple and instead compare tuple <=> tuple, but at the moment
# the whole `is_same_class` and `is_sub_class` matching is just
# not in the best shape.
for cls1 in class_set1.gather_annotation_classes()
for cls2 in class_set2.gather_annotation_classes()
) for class_set1, class_set2 in zip(given_params1, given_params2)
)
def get_signatures(self):
return []
def __repr__(self):
return '<%s: %s%s>' % (
self.__class__.__name__,
self._wrapped_value,
list(self.get_generics()),
)
class GenericClass(DefineGenericBaseClass, ClassMixin):
"""
A class that is defined with generics, might be something simple like:
class Foo(Generic[T]): ...
my_foo_int_cls = Foo[int]
"""
def __init__(self, class_value, generics_manager):
super().__init__(generics_manager)
self._class_value = class_value
def _get_wrapped_value(self):
return self._class_value
def get_type_hint(self, add_class_info=True):
n = self.py__name__()
# Not sure if this is the best way to do this, but all of these types
# are a bit special in that they have type aliases and other ways to
# become lower case. It's probably better to make them upper case,
# because that's what you can use in annotations.
n = dict(list="List", dict="Dict", set="Set", tuple="Tuple").get(n, n)
s = n + self._generics_manager.get_type_hint()
if add_class_info:
return 'Type[%s]' % s
return s
def get_type_var_filter(self):
return _TypeVarFilter(self.get_generics(), self.list_type_vars())
def py__call__(self, arguments):
instance, = super().py__call__(arguments)
return ValueSet([_GenericInstanceWrapper(instance)])
def _as_context(self):
return _AnnotatedClassContext(self)
@to_list
def py__bases__(self):
for base in self._wrapped_value.py__bases__():
yield _LazyGenericBaseClass(self, base, self._generics_manager)
def _create_instance_with_generics(self, generics_manager):
return GenericClass(self._class_value, generics_manager)
def is_sub_class_of(self, class_value):
if super().is_sub_class_of(class_value):
return True
return self._class_value.is_sub_class_of(class_value)
def with_generics(self, generics_tuple):
return self._class_value.with_generics(generics_tuple)
def infer_type_vars(self, value_set):
# Circular
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
annotation_name = self.py__name__()
type_var_dict = {}
if annotation_name == 'Iterable':
annotation_generics = self.get_generics()
if annotation_generics:
return annotation_generics[0].infer_type_vars(
value_set.merge_types_of_iterate(),
)
else:
# Note: we need to handle the MRO _in order_, so we need to extract
# the elements from the set first, then handle them, even if we put
# them back in a set afterwards.
for py_class in value_set:
if py_class.is_instance() and not py_class.is_compiled():
py_class = py_class.get_annotated_class_object()
else:
continue
if py_class.api_type != 'class':
# Functions & modules don't have an MRO and we're not
# expecting a Callable (those are handled separately within
# TypingClassValueWithIndex).
continue
for parent_class in py_class.py__mro__():
class_name = parent_class.py__name__()
if annotation_name == class_name:
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self, parent_class),
)
break
return type_var_dict
class _LazyGenericBaseClass:
def __init__(self, class_value, lazy_base_class, generics_manager):
self._class_value = class_value
self._lazy_base_class = lazy_base_class
self._generics_manager = generics_manager
@iterator_to_value_set
def infer(self):
for base in self._lazy_base_class.infer():
if isinstance(base, GenericClass):
# Here we have to recalculate the given types.
yield GenericClass.create_cached(
base.inference_state,
base._wrapped_value,
TupleGenericManager(tuple(self._remap_type_vars(base))),
)
else:
if base.is_class_mixin():
# This case basically allows classes like `class Foo(List)`
# to be used like `Foo[int]`. The generics are not
# necessary and can be used later.
yield GenericClass.create_cached(
base.inference_state,
base,
self._generics_manager,
)
else:
yield base
def _remap_type_vars(self, base):
from jedi.inference.gradual.type_var import TypeVar
filter = self._class_value.get_type_var_filter()
for type_var_set in base.get_generics():
new = NO_VALUES
for type_var in type_var_set:
if isinstance(type_var, TypeVar):
names = filter.get(type_var.py__name__())
new |= ValueSet.from_sets(
name.infer() for name in names
)
else:
# Mostly will be type vars, except if in some cases
# a concrete type will already be there. In that
# case just add it to the value set.
new |= ValueSet([type_var])
yield new
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._lazy_base_class)
class _GenericInstanceWrapper(ValueWrapper):
def py__stop_iteration_returns(self):
for cls in self._wrapped_value.class_value.py__mro__():
if cls.py__name__() == 'Generator':
generics = cls.get_generics()
try:
return generics[2].execute_annotation()
except IndexError:
pass
elif cls.py__name__() == 'Iterator':
return ValueSet([builtin_from_name(self.inference_state, 'None')])
return self._wrapped_value.py__stop_iteration_returns()
def get_type_hint(self, add_class_info=True):
return self._wrapped_value.class_value.get_type_hint(add_class_info=False)
class _PseudoTreeNameClass(Value):
"""
In typeshed, some classes are defined like this:
Tuple: _SpecialForm = ...
Now this is not a real class, therefore we have to do some workarounds like
this class. Essentially this class makes it possible to goto that `Tuple`
name, without affecting anything else negatively.
"""
api_type = 'class'
def __init__(self, parent_context, tree_name):
super().__init__(
parent_context.inference_state,
parent_context
)
self._tree_name = tree_name
@property
def tree_node(self):
return self._tree_name
def get_filters(self, *args, **kwargs):
# TODO this is obviously wrong. Is it though?
class EmptyFilter(ClassFilter):
def __init__(self):
pass
def get(self, name, **kwargs):
return []
def values(self, **kwargs):
return []
yield EmptyFilter()
def py__class__(self):
# This might not be 100% correct, but it is good enough. The details of
# the typing library are not really an issue for Jedi.
return builtin_from_name(self.inference_state, 'type')
@property
def name(self):
return ValueName(self, self._tree_name)
def get_qualified_names(self):
return (self._tree_name.value,)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._tree_name.value)
class BaseTypingValue(LazyValueWrapper):
def __init__(self, parent_context, tree_name):
self.inference_state = parent_context.inference_state
self.parent_context = parent_context
self._tree_name = tree_name
@property
def name(self):
return ValueName(self, self._tree_name)
def _get_wrapped_value(self):
return _PseudoTreeNameClass(self.parent_context, self._tree_name)
def get_signatures(self):
return self._wrapped_value.get_signatures()
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._tree_name.value)
class BaseTypingClassWithGenerics(DefineGenericBaseClass):
def __init__(self, parent_context, tree_name, generics_manager):
super().__init__(generics_manager)
self.inference_state = parent_context.inference_state
self.parent_context = parent_context
self._tree_name = tree_name
def _get_wrapped_value(self):
return _PseudoTreeNameClass(self.parent_context, self._tree_name)
def __repr__(self):
return '%s(%s%s)' % (self.__class__.__name__, self._tree_name.value,
self._generics_manager)
class BaseTypingInstance(LazyValueWrapper):
def __init__(self, parent_context, class_value, tree_name, generics_manager):
self.inference_state = class_value.inference_state
self.parent_context = parent_context
self._class_value = class_value
self._tree_name = tree_name
self._generics_manager = generics_manager
def py__class__(self):
return self._class_value
def get_annotated_class_object(self):
return self._class_value
def get_qualified_names(self):
return (self.py__name__(),)
@property
def name(self):
return ValueName(self, self._tree_name)
def _get_wrapped_value(self):
object_, = builtin_from_name(self.inference_state, 'object').execute_annotation()
return object_
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._generics_manager)
| 15,554 | Python | .py | 350 | 33.471429 | 99 | 0.593254 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,939 | conversion.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/gradual/conversion.py | from jedi import debug
from jedi.inference.base_value import ValueSet, \
NO_VALUES
from jedi.inference.utils import to_list
from jedi.inference.gradual.stub_value import StubModuleValue
from jedi.inference.gradual.typeshed import try_to_load_stub_cached
from jedi.inference.value.decorator import Decoratee
def _stub_to_python_value_set(stub_value, ignore_compiled=False):
stub_module_context = stub_value.get_root_context()
if not stub_module_context.is_stub():
return ValueSet([stub_value])
decorates = None
if isinstance(stub_value, Decoratee):
decorates = stub_value._original_value
was_instance = stub_value.is_instance()
if was_instance:
arguments = getattr(stub_value, '_arguments', None)
stub_value = stub_value.py__class__()
qualified_names = stub_value.get_qualified_names()
if qualified_names is None:
return NO_VALUES
was_bound_method = stub_value.is_bound_method()
if was_bound_method:
# Infer the object first. We can infer the method later.
method_name = qualified_names[-1]
qualified_names = qualified_names[:-1]
was_instance = True
arguments = None
values = _infer_from_stub(stub_module_context, qualified_names, ignore_compiled)
if was_instance:
values = ValueSet.from_sets(
c.execute_with_values() if arguments is None else c.execute(arguments)
for c in values
if c.is_class()
)
if was_bound_method:
# Now that the instance has been properly created, we can simply get
# the method.
values = values.py__getattribute__(method_name)
if decorates is not None:
values = ValueSet(Decoratee(v, decorates) for v in values)
return values
def _infer_from_stub(stub_module_context, qualified_names, ignore_compiled):
from jedi.inference.compiled.mixed import MixedObject
stub_module = stub_module_context.get_value()
assert isinstance(stub_module, (StubModuleValue, MixedObject)), stub_module_context
non_stubs = stub_module.non_stub_value_set
if ignore_compiled:
non_stubs = non_stubs.filter(lambda c: not c.is_compiled())
for name in qualified_names:
non_stubs = non_stubs.py__getattribute__(name)
return non_stubs
@to_list
def _try_stub_to_python_names(names, prefer_stub_to_compiled=False):
for name in names:
module_context = name.get_root_context()
if not module_context.is_stub():
yield name
continue
if name.api_type == 'module':
values = convert_values(name.infer(), ignore_compiled=prefer_stub_to_compiled)
if values:
for v in values:
yield v.name
continue
else:
v = name.get_defining_qualified_value()
if v is not None:
converted = _stub_to_python_value_set(v, ignore_compiled=prefer_stub_to_compiled)
if converted:
converted_names = converted.goto(name.get_public_name())
if converted_names:
for n in converted_names:
if n.get_root_context().is_stub():
# If it's a stub again, it means we're going in
# a circle. Probably some imports make it a
# stub again.
yield name
else:
yield n
continue
yield name
def _load_stub_module(module):
if module.is_stub():
return module
return try_to_load_stub_cached(
module.inference_state,
import_names=module.string_names,
python_value_set=ValueSet([module]),
parent_module_value=None,
sys_path=module.inference_state.get_sys_path(),
)
@to_list
def _python_to_stub_names(names, fallback_to_python=False):
for name in names:
module_context = name.get_root_context()
if module_context.is_stub():
yield name
continue
if name.api_type == 'module':
found_name = False
for n in name.goto():
if n.api_type == 'module':
values = convert_values(n.infer(), only_stubs=True)
for v in values:
yield v.name
found_name = True
else:
for x in _python_to_stub_names([n], fallback_to_python=fallback_to_python):
yield x
found_name = True
if found_name:
continue
else:
v = name.get_defining_qualified_value()
if v is not None:
converted = to_stub(v)
if converted:
converted_names = converted.goto(name.get_public_name())
if converted_names:
yield from converted_names
continue
if fallback_to_python:
# This is the part where if we haven't found anything, just return
# the stub name.
yield name
def convert_names(names, only_stubs=False, prefer_stubs=False, prefer_stub_to_compiled=True):
if only_stubs and prefer_stubs:
raise ValueError("You cannot use both of only_stubs and prefer_stubs.")
with debug.increase_indent_cm('convert names'):
if only_stubs or prefer_stubs:
return _python_to_stub_names(names, fallback_to_python=prefer_stubs)
else:
return _try_stub_to_python_names(
names, prefer_stub_to_compiled=prefer_stub_to_compiled)
def convert_values(values, only_stubs=False, prefer_stubs=False, ignore_compiled=True):
assert not (only_stubs and prefer_stubs)
with debug.increase_indent_cm('convert values'):
if only_stubs or prefer_stubs:
return ValueSet.from_sets(
to_stub(value)
or (ValueSet({value}) if prefer_stubs else NO_VALUES)
for value in values
)
else:
return ValueSet.from_sets(
_stub_to_python_value_set(stub_value, ignore_compiled=ignore_compiled)
or ValueSet({stub_value})
for stub_value in values
)
def to_stub(value):
if value.is_stub():
return ValueSet([value])
was_instance = value.is_instance()
if was_instance:
value = value.py__class__()
qualified_names = value.get_qualified_names()
stub_module = _load_stub_module(value.get_root_context().get_value())
if stub_module is None or qualified_names is None:
return NO_VALUES
was_bound_method = value.is_bound_method()
if was_bound_method:
# Infer the object first. We can infer the method later.
method_name = qualified_names[-1]
qualified_names = qualified_names[:-1]
was_instance = True
stub_values = ValueSet([stub_module])
for name in qualified_names:
stub_values = stub_values.py__getattribute__(name)
if was_instance:
stub_values = ValueSet.from_sets(
c.execute_with_values()
for c in stub_values
if c.is_class()
)
if was_bound_method:
# Now that the instance has been properly created, we can simply get
# the method.
stub_values = stub_values.py__getattribute__(method_name)
return stub_values
| 7,601 | Python | .py | 180 | 31.311111 | 97 | 0.600785 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,940 | typing.py | DamnWidget_anaconda/anaconda_lib/jedi/inference/gradual/typing.py | """
We need to somehow work with the typing objects. Since the typing objects are
pretty bare we need to add all the Jedi customizations to make them work as
values.
This file deals with all the typing.py cases.
"""
import itertools
from jedi import debug
from jedi.inference.compiled import builtin_from_name, create_simple_object
from jedi.inference.base_value import ValueSet, NO_VALUES, Value, \
LazyValueWrapper, ValueWrapper
from jedi.inference.lazy_value import LazyKnownValues
from jedi.inference.arguments import repack_with_argument_clinic
from jedi.inference.filters import FilterWrapper
from jedi.inference.names import NameWrapper, ValueName
from jedi.inference.value.klass import ClassMixin
from jedi.inference.gradual.base import BaseTypingValue, \
BaseTypingClassWithGenerics, BaseTypingInstance
from jedi.inference.gradual.type_var import TypeVarClass
from jedi.inference.gradual.generics import LazyGenericManager, TupleGenericManager
_PROXY_CLASS_TYPES = 'Tuple Generic Protocol Callable Type'.split()
_TYPE_ALIAS_TYPES = {
'List': 'builtins.list',
'Dict': 'builtins.dict',
'Set': 'builtins.set',
'FrozenSet': 'builtins.frozenset',
'ChainMap': 'collections.ChainMap',
'Counter': 'collections.Counter',
'DefaultDict': 'collections.defaultdict',
'Deque': 'collections.deque',
}
_PROXY_TYPES = 'Optional Union ClassVar'.split()
class TypingModuleName(NameWrapper):
def infer(self):
return ValueSet(self._remap())
def _remap(self):
name = self.string_name
inference_state = self.parent_context.inference_state
try:
actual = _TYPE_ALIAS_TYPES[name]
except KeyError:
pass
else:
yield TypeAlias.create_cached(
inference_state, self.parent_context, self.tree_name, actual)
return
if name in _PROXY_CLASS_TYPES:
yield ProxyTypingClassValue.create_cached(
inference_state, self.parent_context, self.tree_name)
elif name in _PROXY_TYPES:
yield ProxyTypingValue.create_cached(
inference_state, self.parent_context, self.tree_name)
elif name == 'runtime':
# We don't want anything here, not sure what this function is
# supposed to do, since it just appears in the stubs and shouldn't
# have any effects there (because it's never executed).
return
elif name == 'TypeVar':
cls, = self._wrapped_name.infer()
yield TypeVarClass.create_cached(inference_state, cls)
elif name == 'Any':
yield AnyClass.create_cached(
inference_state, self.parent_context, self.tree_name)
elif name == 'TYPE_CHECKING':
# This is needed for e.g. imports that are only available for type
# checking or are in cycles. The user can then check this variable.
yield builtin_from_name(inference_state, 'True')
elif name == 'overload':
yield OverloadFunction.create_cached(
inference_state, self.parent_context, self.tree_name)
elif name == 'NewType':
v, = self._wrapped_name.infer()
yield NewTypeFunction.create_cached(inference_state, v)
elif name == 'cast':
cast_fn, = self._wrapped_name.infer()
yield CastFunction.create_cached(inference_state, cast_fn)
elif name == 'TypedDict':
# TODO doesn't even exist in typeshed/typing.py, yet. But will be
# added soon.
yield TypedDictClass.create_cached(
inference_state, self.parent_context, self.tree_name)
else:
# Not necessary, as long as we are not doing type checking:
# no_type_check & no_type_check_decorator
# Everything else shouldn't be relevant...
yield from self._wrapped_name.infer()
class TypingModuleFilterWrapper(FilterWrapper):
name_wrapper_class = TypingModuleName
class ProxyWithGenerics(BaseTypingClassWithGenerics):
def execute_annotation(self):
string_name = self._tree_name.value
if string_name == 'Union':
# This is kind of a special case, because we have Unions (in Jedi
# ValueSets).
return self.gather_annotation_classes().execute_annotation()
elif string_name == 'Optional':
# Optional is basically just saying it's either None or the actual
# type.
return self.gather_annotation_classes().execute_annotation() \
| ValueSet([builtin_from_name(self.inference_state, 'None')])
elif string_name == 'Type':
# The type is actually already given in the index_value
return self._generics_manager[0]
elif string_name == 'ClassVar':
# For now don't do anything here, ClassVars are always used.
return self._generics_manager[0].execute_annotation()
mapped = {
'Tuple': Tuple,
'Generic': Generic,
'Protocol': Protocol,
'Callable': Callable,
}
cls = mapped[string_name]
return ValueSet([cls(
self.parent_context,
self,
self._tree_name,
generics_manager=self._generics_manager,
)])
def gather_annotation_classes(self):
return ValueSet.from_sets(self._generics_manager.to_tuple())
def _create_instance_with_generics(self, generics_manager):
return ProxyWithGenerics(
self.parent_context,
self._tree_name,
generics_manager
)
def infer_type_vars(self, value_set):
annotation_generics = self.get_generics()
if not annotation_generics:
return {}
annotation_name = self.py__name__()
if annotation_name == 'Optional':
# Optional[T] is equivalent to Union[T, None]. In Jedi unions
# are represented by members within a ValueSet, so we extract
# the T from the Optional[T] by removing the None value.
none = builtin_from_name(self.inference_state, 'None')
return annotation_generics[0].infer_type_vars(
value_set.filter(lambda x: x != none),
)
return {}
class ProxyTypingValue(BaseTypingValue):
index_class = ProxyWithGenerics
def with_generics(self, generics_tuple):
return self.index_class.create_cached(
self.inference_state,
self.parent_context,
self._tree_name,
generics_manager=TupleGenericManager(generics_tuple)
)
def py__getitem__(self, index_value_set, contextualized_node):
return ValueSet(
self.index_class.create_cached(
self.inference_state,
self.parent_context,
self._tree_name,
generics_manager=LazyGenericManager(
context_of_index=contextualized_node.context,
index_value=index_value,
)
) for index_value in index_value_set
)
class _TypingClassMixin(ClassMixin):
def py__bases__(self):
return [LazyKnownValues(
self.inference_state.builtins_module.py__getattribute__('object')
)]
def get_metaclasses(self):
return []
@property
def name(self):
return ValueName(self, self._tree_name)
class TypingClassWithGenerics(ProxyWithGenerics, _TypingClassMixin):
def infer_type_vars(self, value_set):
type_var_dict = {}
annotation_generics = self.get_generics()
if not annotation_generics:
return type_var_dict
annotation_name = self.py__name__()
if annotation_name == 'Type':
return annotation_generics[0].infer_type_vars(
# This is basically a trick to avoid extra code: We execute the
# incoming classes to be able to use the normal code for type
# var inference.
value_set.execute_annotation(),
)
elif annotation_name == 'Callable':
if len(annotation_generics) == 2:
return annotation_generics[1].infer_type_vars(
value_set.execute_annotation(),
)
elif annotation_name == 'Tuple':
tuple_annotation, = self.execute_annotation()
return tuple_annotation.infer_type_vars(value_set)
return type_var_dict
def _create_instance_with_generics(self, generics_manager):
return TypingClassWithGenerics(
self.parent_context,
self._tree_name,
generics_manager
)
class ProxyTypingClassValue(ProxyTypingValue, _TypingClassMixin):
index_class = TypingClassWithGenerics
class TypeAlias(LazyValueWrapper):
def __init__(self, parent_context, origin_tree_name, actual):
self.inference_state = parent_context.inference_state
self.parent_context = parent_context
self._origin_tree_name = origin_tree_name
self._actual = actual # e.g. builtins.list
@property
def name(self):
return ValueName(self, self._origin_tree_name)
def py__name__(self):
return self.name.string_name
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._actual)
def _get_wrapped_value(self):
module_name, class_name = self._actual.split('.')
# TODO use inference_state.import_module?
from jedi.inference.imports import Importer
module, = Importer(
self.inference_state, [module_name], self.inference_state.builtins_module
).follow()
classes = module.py__getattribute__(class_name)
# There should only be one, because it's code that we control.
assert len(classes) == 1, classes
cls = next(iter(classes))
return cls
def gather_annotation_classes(self):
return ValueSet([self._get_wrapped_value()])
def get_signatures(self):
return []
class Callable(BaseTypingInstance):
def py__call__(self, arguments):
"""
def x() -> Callable[[Callable[..., _T]], _T]: ...
"""
# The 0th index are the arguments.
try:
param_values = self._generics_manager[0]
result_values = self._generics_manager[1]
except IndexError:
debug.warning('Callable[...] defined without two arguments')
return NO_VALUES
else:
from jedi.inference.gradual.annotation import infer_return_for_callable
return infer_return_for_callable(arguments, param_values, result_values)
def py__get__(self, instance, class_value):
return ValueSet([self])
class Tuple(BaseTypingInstance):
def _is_homogenous(self):
# To specify a variable-length tuple of homogeneous type, Tuple[T, ...]
# is used.
return self._generics_manager.is_homogenous_tuple()
def py__simple_getitem__(self, index):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
else:
if isinstance(index, int):
return self._generics_manager.get_index_and_execute(index)
debug.dbg('The getitem type on Tuple was %s' % index)
return NO_VALUES
def py__iter__(self, contextualized_node=None):
if self._is_homogenous():
yield LazyKnownValues(self._generics_manager.get_index_and_execute(0))
else:
for v in self._generics_manager.to_tuple():
yield LazyKnownValues(v.execute_annotation())
def py__getitem__(self, index_value_set, contextualized_node):
if self._is_homogenous():
return self._generics_manager.get_index_and_execute(0)
return ValueSet.from_sets(
self._generics_manager.to_tuple()
).execute_annotation()
def _get_wrapped_value(self):
tuple_, = self.inference_state.builtins_module \
.py__getattribute__('tuple').execute_annotation()
return tuple_
@property
def name(self):
return self._wrapped_value.name
def infer_type_vars(self, value_set):
# Circular
from jedi.inference.gradual.annotation import merge_pairwise_generics, merge_type_var_dicts
value_set = value_set.filter(
lambda x: x.py__name__().lower() == 'tuple',
)
if self._is_homogenous():
# The parameter annotation is of the form `Tuple[T, ...]`,
# so we treat the incoming tuple like a iterable sequence
# rather than a positional container of elements.
return self._class_value.get_generics()[0].infer_type_vars(
value_set.merge_types_of_iterate(),
)
else:
# The parameter annotation has only explicit type parameters
# (e.g: `Tuple[T]`, `Tuple[T, U]`, `Tuple[T, U, V]`, etc.) so we
# treat the incoming values as needing to match the annotation
# exactly, just as we would for non-tuple annotations.
type_var_dict = {}
for element in value_set:
try:
method = element.get_annotated_class_object
except AttributeError:
# This might still happen, because the tuple name matching
# above is not 100% correct, so just catch the remaining
# cases here.
continue
py_class = method()
merge_type_var_dicts(
type_var_dict,
merge_pairwise_generics(self._class_value, py_class),
)
return type_var_dict
class Generic(BaseTypingInstance):
pass
class Protocol(BaseTypingInstance):
pass
class AnyClass(BaseTypingValue):
def execute_annotation(self):
debug.warning('Used Any - returned no results')
return NO_VALUES
class OverloadFunction(BaseTypingValue):
@repack_with_argument_clinic('func, /')
def py__call__(self, func_value_set):
# Just pass arguments through.
return func_value_set
class NewTypeFunction(ValueWrapper):
def py__call__(self, arguments):
ordered_args = arguments.unpack()
next(ordered_args, (None, None))
_, second_arg = next(ordered_args, (None, None))
if second_arg is None:
return NO_VALUES
return ValueSet(
NewType(
self.inference_state,
contextualized_node.context,
contextualized_node.node,
second_arg.infer(),
) for contextualized_node in arguments.get_calling_nodes())
class NewType(Value):
def __init__(self, inference_state, parent_context, tree_node, type_value_set):
super().__init__(inference_state, parent_context)
self._type_value_set = type_value_set
self.tree_node = tree_node
def py__class__(self):
c, = self._type_value_set.py__class__()
return c
def py__call__(self, arguments):
return self._type_value_set.execute_annotation()
@property
def name(self):
from jedi.inference.compiled.value import CompiledValueName
return CompiledValueName(self, 'NewType')
def __repr__(self) -> str:
return '<NewType: %s>%s' % (self.tree_node, self._type_value_set)
class CastFunction(ValueWrapper):
@repack_with_argument_clinic('type, object, /')
def py__call__(self, type_value_set, object_value_set):
return type_value_set.execute_annotation()
class TypedDictClass(BaseTypingValue):
"""
This class has no responsibilities and is just here to make sure that typed
dicts can be identified.
"""
class TypedDict(LazyValueWrapper):
"""Represents the instance version of ``TypedDictClass``."""
def __init__(self, definition_class):
self.inference_state = definition_class.inference_state
self.parent_context = definition_class.parent_context
self.tree_node = definition_class.tree_node
self._definition_class = definition_class
@property
def name(self):
return ValueName(self, self.tree_node.name)
def py__simple_getitem__(self, index):
if isinstance(index, str):
return ValueSet.from_sets(
name.infer()
for filter in self._definition_class.get_filters(is_instance=True)
for name in filter.get(index)
)
return NO_VALUES
def get_key_values(self):
filtered_values = itertools.chain.from_iterable((
f.values()
for f in self._definition_class.get_filters(is_instance=True)
))
return ValueSet({
create_simple_object(self.inference_state, v.string_name)
for v in filtered_values
})
def _get_wrapped_value(self):
d, = self.inference_state.builtins_module.py__getattribute__('dict')
result, = d.execute_with_values()
return result
| 17,230 | Python | .py | 397 | 33.596977 | 99 | 0.627464 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,941 | completion_cache.py | DamnWidget_anaconda/anaconda_lib/jedi/api/completion_cache.py | from typing import Dict, Tuple, Callable
CacheValues = Tuple[str, str, str]
CacheValuesCallback = Callable[[], CacheValues]
_cache: Dict[str, Dict[str, CacheValues]] = {}
def save_entry(module_name: str, name: str, cache: CacheValues) -> None:
try:
module_cache = _cache[module_name]
except KeyError:
module_cache = _cache[module_name] = {}
module_cache[name] = cache
def _create_get_from_cache(number: int) -> Callable[[str, str, CacheValuesCallback], str]:
def _get_from_cache(module_name: str, name: str, get_cache_values: CacheValuesCallback) -> str:
try:
return _cache[module_name][name][number]
except KeyError:
v = get_cache_values()
save_entry(module_name, name, v)
return v[number]
return _get_from_cache
get_type = _create_get_from_cache(0)
get_docstring_signature = _create_get_from_cache(1)
get_docstring = _create_get_from_cache(2)
| 954 | Python | .py | 22 | 37.409091 | 99 | 0.670639 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,942 | project.py | DamnWidget_anaconda/anaconda_lib/jedi/api/project.py | """
Projects are a way to handle Python projects within Jedi. For simpler plugins
you might not want to deal with projects, but if you want to give the user more
flexibility to define sys paths and Python interpreters for a project,
:class:`.Project` is the perfect way to allow for that.
Projects can be saved to disk and loaded again, to allow project definitions to
be used across repositories.
"""
import json
from pathlib import Path
from itertools import chain
from jedi import debug
from jedi.api.environment import get_cached_default_environment, create_environment
from jedi.api.exceptions import WrongVersion
from jedi.api.completion import search_in_module
from jedi.api.helpers import split_search_string, get_module_names
from jedi.inference.imports import load_module_from_path, \
load_namespace_from_path, iter_module_names
from jedi.inference.sys_path import discover_buildout_paths
from jedi.inference.cache import inference_state_as_method_param_cache
from jedi.inference.references import recurse_find_python_folders_and_files, search_in_file_ios
from jedi.file_io import FolderIO
_CONFIG_FOLDER = '.jedi'
_CONTAINS_POTENTIAL_PROJECT = \
'setup.py', '.git', '.hg', 'requirements.txt', 'MANIFEST.in', 'pyproject.toml'
_SERIALIZER_VERSION = 1
def _try_to_skip_duplicates(func):
def wrapper(*args, **kwargs):
found_tree_nodes = []
found_modules = []
for definition in func(*args, **kwargs):
tree_node = definition._name.tree_name
if tree_node is not None and tree_node in found_tree_nodes:
continue
if definition.type == 'module' and definition.module_path is not None:
if definition.module_path in found_modules:
continue
found_modules.append(definition.module_path)
yield definition
found_tree_nodes.append(tree_node)
return wrapper
def _remove_duplicates_from_path(path):
used = set()
for p in path:
if p in used:
continue
used.add(p)
yield p
class Project:
"""
Projects are a simple way to manage Python folders and define how Jedi does
import resolution. It is mostly used as a parameter to :class:`.Script`.
Additionally there are functions to search a whole project.
"""
_environment = None
@staticmethod
def _get_config_folder_path(base_path):
return base_path.joinpath(_CONFIG_FOLDER)
@staticmethod
def _get_json_path(base_path):
return Project._get_config_folder_path(base_path).joinpath('project.json')
@classmethod
def load(cls, path):
"""
Loads a project from a specific path. You should not provide the path
to ``.jedi/project.json``, but rather the path to the project folder.
:param path: The path of the directory you want to use as a project.
"""
if isinstance(path, str):
path = Path(path)
with open(cls._get_json_path(path)) as f:
version, data = json.load(f)
if version == 1:
return cls(**data)
else:
raise WrongVersion(
"The Jedi version of this project seems newer than what we can handle."
)
def save(self):
"""
Saves the project configuration in the project in ``.jedi/project.json``.
"""
data = dict(self.__dict__)
data.pop('_environment', None)
data.pop('_django', None) # TODO make django setting public?
data = {k.lstrip('_'): v for k, v in data.items()}
data['path'] = str(data['path'])
self._get_config_folder_path(self._path).mkdir(parents=True, exist_ok=True)
with open(self._get_json_path(self._path), 'w') as f:
return json.dump((_SERIALIZER_VERSION, data), f)
def __init__(
self,
path,
*,
environment_path=None,
load_unsafe_extensions=False,
sys_path=None,
added_sys_path=(),
smart_sys_path=True,
) -> None:
"""
:param path: The base path for this project.
:param environment_path: The Python executable path, typically the path
of a virtual environment.
:param load_unsafe_extensions: Default False, Loads extensions that are not in the
sys path and in the local directories. With this option enabled,
this is potentially unsafe if you clone a git repository and
analyze it's code, because those compiled extensions will be
important and therefore have execution privileges.
:param sys_path: list of str. You can override the sys path if you
want. By default the ``sys.path.`` is generated by the
environment (virtualenvs, etc).
:param added_sys_path: list of str. Adds these paths at the end of the
sys path.
:param smart_sys_path: If this is enabled (default), adds paths from
local directories. Otherwise you will have to rely on your packages
being properly configured on the ``sys.path``.
"""
if isinstance(path, str):
path = Path(path).absolute()
self._path = path
self._environment_path = environment_path
if sys_path is not None:
# Remap potential pathlib.Path entries
sys_path = list(map(str, sys_path))
self._sys_path = sys_path
self._smart_sys_path = smart_sys_path
self._load_unsafe_extensions = load_unsafe_extensions
self._django = False
# Remap potential pathlib.Path entries
self.added_sys_path = list(map(str, added_sys_path))
"""The sys path that is going to be added at the end of the """
@property
def path(self):
"""
The base path for this project.
"""
return self._path
@property
def sys_path(self):
"""
The sys path provided to this project. This can be None and in that
case will be auto generated.
"""
return self._sys_path
@property
def smart_sys_path(self):
"""
If the sys path is going to be calculated in a smart way, where
additional paths are added.
"""
return self._smart_sys_path
@property
def load_unsafe_extensions(self):
"""
Wheter the project loads unsafe extensions.
"""
return self._load_unsafe_extensions
@inference_state_as_method_param_cache()
def _get_base_sys_path(self, inference_state):
# The sys path has not been set explicitly.
sys_path = list(inference_state.environment.get_sys_path())
try:
sys_path.remove('')
except ValueError:
pass
return sys_path
@inference_state_as_method_param_cache()
def _get_sys_path(self, inference_state, add_parent_paths=True, add_init_paths=False):
"""
Keep this method private for all users of jedi. However internally this
one is used like a public method.
"""
suffixed = list(self.added_sys_path)
prefixed = []
if self._sys_path is None:
sys_path = list(self._get_base_sys_path(inference_state))
else:
sys_path = list(self._sys_path)
if self._smart_sys_path:
prefixed.append(str(self._path))
if inference_state.script_path is not None:
suffixed += map(str, discover_buildout_paths(
inference_state,
inference_state.script_path
))
if add_parent_paths:
# Collect directories in upward search by:
# 1. Skipping directories with __init__.py
# 2. Stopping immediately when above self._path
traversed = []
for parent_path in inference_state.script_path.parents:
if parent_path == self._path \
or self._path not in parent_path.parents:
break
if not add_init_paths \
and parent_path.joinpath("__init__.py").is_file():
continue
traversed.append(str(parent_path))
# AFAIK some libraries have imports like `foo.foo.bar`, which
# leads to the conclusion to by default prefer longer paths
# rather than shorter ones by default.
suffixed += reversed(traversed)
if self._django:
prefixed.append(str(self._path))
path = prefixed + sys_path + suffixed
return list(_remove_duplicates_from_path(path))
def get_environment(self):
if self._environment is None:
if self._environment_path is not None:
self._environment = create_environment(self._environment_path, safe=False)
else:
self._environment = get_cached_default_environment()
return self._environment
def search(self, string, *, all_scopes=False):
"""
Searches a name in the whole project. If the project is very big,
at some point Jedi will stop searching. However it's also very much
recommended to not exhaust the generator. Just display the first ten
results to the user.
There are currently three different search patterns:
- ``foo`` to search for a definition foo in any file or a file called
``foo.py`` or ``foo.pyi``.
- ``foo.bar`` to search for the ``foo`` and then an attribute ``bar``
in it.
- ``class foo.bar.Bar`` or ``def foo.bar.baz`` to search for a specific
API type.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:yields: :class:`.Name`
"""
return self._search_func(string, all_scopes=all_scopes)
def complete_search(self, string, **kwargs):
"""
Like :meth:`.Script.search`, but completes that string. An empty string
lists all definitions in a project, so be careful with that.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:yields: :class:`.Completion`
"""
return self._search_func(string, complete=True, **kwargs)
@_try_to_skip_duplicates
def _search_func(self, string, complete=False, all_scopes=False):
# Using a Script is they easiest way to get an empty module context.
from jedi import Script
s = Script('', project=self)
inference_state = s._inference_state
empty_module_context = s._get_module_context()
debug.dbg('Search for string %s, complete=%s', string, complete)
wanted_type, wanted_names = split_search_string(string)
name = wanted_names[0]
stub_folder_name = name + '-stubs'
ios = recurse_find_python_folders_and_files(FolderIO(str(self._path)))
file_ios = []
# 1. Search for modules in the current project
for folder_io, file_io in ios:
if file_io is None:
file_name = folder_io.get_base_name()
if file_name == name or file_name == stub_folder_name:
f = folder_io.get_file_io('__init__.py')
try:
m = load_module_from_path(inference_state, f).as_context()
except FileNotFoundError:
f = folder_io.get_file_io('__init__.pyi')
try:
m = load_module_from_path(inference_state, f).as_context()
except FileNotFoundError:
m = load_namespace_from_path(inference_state, folder_io).as_context()
else:
continue
else:
file_ios.append(file_io)
if Path(file_io.path).name in (name + '.py', name + '.pyi'):
m = load_module_from_path(inference_state, file_io).as_context()
else:
continue
debug.dbg('Search of a specific module %s', m)
yield from search_in_module(
inference_state,
m,
names=[m.name],
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
convert=True,
ignore_imports=True,
)
# 2. Search for identifiers in the project.
for module_context in search_in_file_ios(inference_state, file_ios,
name, complete=complete):
names = get_module_names(module_context.tree_node, all_scopes=all_scopes)
names = [module_context.create_name(n) for n in names]
names = _remove_imports(names)
yield from search_in_module(
inference_state,
module_context,
names=names,
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
ignore_imports=True,
)
# 3. Search for modules on sys.path
sys_path = [
p for p in self._get_sys_path(inference_state)
# Exclude folders that are handled by recursing of the Python
# folders.
if not p.startswith(str(self._path))
]
names = list(iter_module_names(inference_state, empty_module_context, sys_path))
yield from search_in_module(
inference_state,
empty_module_context,
names=names,
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
convert=True,
)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._path)
def _is_potential_project(path):
for name in _CONTAINS_POTENTIAL_PROJECT:
try:
if path.joinpath(name).exists():
return True
except OSError:
continue
return False
def _is_django_path(directory):
""" Detects the path of the very well known Django library (if used) """
try:
with open(directory.joinpath('manage.py'), 'rb') as f:
return b"DJANGO_SETTINGS_MODULE" in f.read()
except (FileNotFoundError, IsADirectoryError, PermissionError):
return False
def get_default_project(path=None):
"""
If a project is not defined by the user, Jedi tries to define a project by
itself as well as possible. Jedi traverses folders until it finds one of
the following:
1. A ``.jedi/config.json``
2. One of the following files: ``setup.py``, ``.git``, ``.hg``,
``requirements.txt`` and ``MANIFEST.in``.
"""
if path is None:
path = Path.cwd()
elif isinstance(path, str):
path = Path(path)
check = path.absolute()
probable_path = None
first_no_init_file = None
for dir in chain([check], check.parents):
try:
return Project.load(dir)
except (FileNotFoundError, IsADirectoryError, PermissionError):
pass
except NotADirectoryError:
continue
if first_no_init_file is None:
if dir.joinpath('__init__.py').exists():
# In the case that a __init__.py exists, it's in 99% just a
# Python package and the project sits at least one level above.
continue
elif not dir.is_file():
first_no_init_file = dir
if _is_django_path(dir):
project = Project(dir)
project._django = True
return project
if probable_path is None and _is_potential_project(dir):
probable_path = dir
if probable_path is not None:
# TODO search for setup.py etc
return Project(probable_path)
if first_no_init_file is not None:
return Project(first_no_init_file)
curdir = path if path.is_dir() else path.parent
return Project(curdir)
def _remove_imports(names):
return [
n for n in names
if n.tree_name is None or n.api_type not in ('module', 'namespace')
]
| 16,613 | Python | .py | 386 | 32.406736 | 97 | 0.59729 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,943 | errors.py | DamnWidget_anaconda/anaconda_lib/jedi/api/errors.py | """
This file is about errors in Python files and not about exception handling in
Jedi.
"""
def parso_to_jedi_errors(grammar, module_node):
return [SyntaxError(e) for e in grammar.iter_errors(module_node)]
class SyntaxError:
"""
Syntax errors are generated by :meth:`.Script.get_syntax_errors`.
"""
def __init__(self, parso_error):
self._parso_error = parso_error
@property
def line(self):
"""The line where the error starts (starting with 1)."""
return self._parso_error.start_pos[0]
@property
def column(self):
"""The column where the error starts (starting with 0)."""
return self._parso_error.start_pos[1]
@property
def until_line(self):
"""The line where the error ends (starting with 1)."""
return self._parso_error.end_pos[0]
@property
def until_column(self):
"""The column where the error ends (starting with 0)."""
return self._parso_error.end_pos[1]
def get_message(self):
return self._parso_error.message
def __repr__(self):
return '<%s from=%s to=%s>' % (
self.__class__.__name__,
self._parso_error.start_pos,
self._parso_error.end_pos,
)
| 1,253 | Python | .py | 36 | 28.194444 | 77 | 0.622204 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,944 | keywords.py | DamnWidget_anaconda/anaconda_lib/jedi/api/keywords.py | import pydoc
from contextlib import suppress
from typing import Dict, Optional
from jedi.inference.names import AbstractArbitraryName
try:
# https://github.com/python/typeshed/pull/4351 adds pydoc_data
from pydoc_data import topics # type: ignore[import]
pydoc_topics: Optional[Dict[str, str]] = topics.topics
except ImportError:
# Python 3.6.8 embeddable does not have pydoc_data.
pydoc_topics = None
class KeywordName(AbstractArbitraryName):
api_type = 'keyword'
def py__doc__(self):
return imitate_pydoc(self.string_name)
def imitate_pydoc(string):
"""
It's not possible to get the pydoc's without starting the annoying pager
stuff.
"""
if pydoc_topics is None:
return ''
h = pydoc.help
with suppress(KeyError):
# try to access symbols
string = h.symbols[string]
string, _, related = string.partition(' ')
def get_target(s):
return h.topics.get(s, h.keywords.get(s))
while isinstance(string, str):
string = get_target(string)
try:
# is a tuple now
label, related = string
except TypeError:
return ''
try:
return pydoc_topics[label].strip() if pydoc_topics else ''
except KeyError:
return ''
| 1,283 | Python | .py | 40 | 26.375 | 76 | 0.674249 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,945 | file_name.py | DamnWidget_anaconda/anaconda_lib/jedi/api/file_name.py | import os
from jedi.api import classes
from jedi.api.strings import StringName, get_quote_ending
from jedi.api.helpers import match
from jedi.inference.helpers import get_str_or_none
class PathName(StringName):
api_type = 'path'
def complete_file_name(inference_state, module_context, start_leaf, quote, string,
like_name, signatures_callback, code_lines, position, fuzzy):
# First we want to find out what can actually be changed as a name.
like_name_length = len(os.path.basename(string))
addition = _get_string_additions(module_context, start_leaf)
if string.startswith('~'):
string = os.path.expanduser(string)
if addition is None:
return
string = addition + string
# Here we use basename again, because if strings are added like
# `'foo' + 'bar`, it should complete to `foobar/`.
must_start_with = os.path.basename(string)
string = os.path.dirname(string)
sigs = signatures_callback(*position)
is_in_os_path_join = sigs and all(s.full_name == 'os.path.join' for s in sigs)
if is_in_os_path_join:
to_be_added = _add_os_path_join(module_context, start_leaf, sigs[0].bracket_start)
if to_be_added is None:
is_in_os_path_join = False
else:
string = to_be_added + string
base_path = os.path.join(inference_state.project.path, string)
try:
listed = sorted(os.scandir(base_path), key=lambda e: e.name)
# OSError: [Errno 36] File name too long: '...'
except (FileNotFoundError, OSError):
return
quote_ending = get_quote_ending(quote, code_lines, position)
for entry in listed:
name = entry.name
if match(name, must_start_with, fuzzy=fuzzy):
if is_in_os_path_join or not entry.is_dir():
name += quote_ending
else:
name += os.path.sep
yield classes.Completion(
inference_state,
PathName(inference_state, name[len(must_start_with) - like_name_length:]),
stack=None,
like_name_length=like_name_length,
is_fuzzy=fuzzy,
)
def _get_string_additions(module_context, start_leaf):
def iterate_nodes():
node = addition.parent
was_addition = True
for child_node in reversed(node.children[:node.children.index(addition)]):
if was_addition:
was_addition = False
yield child_node
continue
if child_node != '+':
break
was_addition = True
addition = start_leaf.get_previous_leaf()
if addition != '+':
return ''
context = module_context.create_context(start_leaf)
return _add_strings(context, reversed(list(iterate_nodes())))
def _add_strings(context, nodes, add_slash=False):
string = ''
first = True
for child_node in nodes:
values = context.infer_node(child_node)
if len(values) != 1:
return None
c, = values
s = get_str_or_none(c)
if s is None:
return None
if not first and add_slash:
string += os.path.sep
string += s
first = False
return string
def _add_os_path_join(module_context, start_leaf, bracket_start):
def check(maybe_bracket, nodes):
if maybe_bracket.start_pos != bracket_start:
return None
if not nodes:
return ''
context = module_context.create_context(nodes[0])
return _add_strings(context, nodes, add_slash=True) or ''
if start_leaf.type == 'error_leaf':
# Unfinished string literal, like `join('`
value_node = start_leaf.parent
index = value_node.children.index(start_leaf)
if index > 0:
error_node = value_node.children[index - 1]
if error_node.type == 'error_node' and len(error_node.children) >= 2:
index = -2
if error_node.children[-1].type == 'arglist':
arglist_nodes = error_node.children[-1].children
index -= 1
else:
arglist_nodes = []
return check(error_node.children[index + 1], arglist_nodes[::2])
return None
# Maybe an arglist or some weird error case. Therefore checked below.
searched_node_child = start_leaf
while searched_node_child.parent is not None \
and searched_node_child.parent.type not in ('arglist', 'trailer', 'error_node'):
searched_node_child = searched_node_child.parent
if searched_node_child.get_first_leaf() is not start_leaf:
return None
searched_node = searched_node_child.parent
if searched_node is None:
return None
index = searched_node.children.index(searched_node_child)
arglist_nodes = searched_node.children[:index]
if searched_node.type == 'arglist':
trailer = searched_node.parent
if trailer.type == 'error_node':
trailer_index = trailer.children.index(searched_node)
assert trailer_index >= 2
assert trailer.children[trailer_index - 1] == '('
return check(trailer.children[trailer_index - 1], arglist_nodes[::2])
elif trailer.type == 'trailer':
return check(trailer.children[0], arglist_nodes[::2])
elif searched_node.type == 'trailer':
return check(searched_node.children[0], [])
elif searched_node.type == 'error_node':
# Stuff like `join(""`
return check(arglist_nodes[-1], [])
| 5,620 | Python | .py | 132 | 33.287879 | 92 | 0.614273 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,946 | strings.py | DamnWidget_anaconda/anaconda_lib/jedi/api/strings.py | """
This module is here for string completions. This means mostly stuff where
strings are returned, like `foo = dict(bar=3); foo["ba` would complete to
`"bar"]`.
It however does the same for numbers. The difference between string completions
and other completions is mostly that this module doesn't return defined
names in a module, but pretty much an arbitrary string.
"""
import re
from jedi.inference.names import AbstractArbitraryName
from jedi.inference.helpers import infer_call_of_leaf
from jedi.api.classes import Completion
from jedi.parser_utils import cut_value_at_position
_sentinel = object()
class StringName(AbstractArbitraryName):
api_type = 'string'
is_value_name = False
def complete_dict(module_context, code_lines, leaf, position, string, fuzzy):
bracket_leaf = leaf
if bracket_leaf != '[':
bracket_leaf = leaf.get_previous_leaf()
cut_end_quote = ''
if string:
cut_end_quote = get_quote_ending(string, code_lines, position, invert_result=True)
if bracket_leaf == '[':
if string is None and leaf is not bracket_leaf:
string = cut_value_at_position(leaf, position)
context = module_context.create_context(bracket_leaf)
before_bracket_leaf = bracket_leaf.get_previous_leaf()
if before_bracket_leaf.type in ('atom', 'trailer', 'name'):
values = infer_call_of_leaf(context, before_bracket_leaf)
return list(_completions_for_dicts(
module_context.inference_state,
values,
'' if string is None else string,
cut_end_quote,
fuzzy=fuzzy,
))
return []
def _completions_for_dicts(inference_state, dicts, literal_string, cut_end_quote, fuzzy):
for dict_key in sorted(_get_python_keys(dicts), key=lambda x: repr(x)):
dict_key_str = _create_repr_string(literal_string, dict_key)
if dict_key_str.startswith(literal_string):
name = StringName(inference_state, dict_key_str[:-len(cut_end_quote) or None])
yield Completion(
inference_state,
name,
stack=None,
like_name_length=len(literal_string),
is_fuzzy=fuzzy
)
def _create_repr_string(literal_string, dict_key):
if not isinstance(dict_key, (str, bytes)) or not literal_string:
return repr(dict_key)
r = repr(dict_key)
prefix, quote = _get_string_prefix_and_quote(literal_string)
if quote is None:
return r
if quote == r[0]:
return prefix + r
return prefix + quote + r[1:-1] + quote
def _get_python_keys(dicts):
for dct in dicts:
if dct.array_type == 'dict':
for key in dct.get_key_values():
dict_key = key.get_safe_value(default=_sentinel)
if dict_key is not _sentinel:
yield dict_key
def _get_string_prefix_and_quote(string):
match = re.match(r'(\w*)("""|\'{3}|"|\')', string)
if match is None:
return None, None
return match.group(1), match.group(2)
def _matches_quote_at_position(code_lines, quote, position):
string = code_lines[position[0] - 1][position[1]:position[1] + len(quote)]
return string == quote
def get_quote_ending(string, code_lines, position, invert_result=False):
_, quote = _get_string_prefix_and_quote(string)
if quote is None:
return ''
# Add a quote only if it's not already there.
if _matches_quote_at_position(code_lines, quote, position) != invert_result:
return ''
return quote
| 3,616 | Python | .py | 84 | 35.428571 | 90 | 0.649658 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,947 | interpreter.py | DamnWidget_anaconda/anaconda_lib/jedi/api/interpreter.py | """
TODO Some parts of this module are still not well documented.
"""
from jedi.inference import compiled
from jedi.inference.base_value import ValueSet
from jedi.inference.filters import ParserTreeFilter, MergedFilter
from jedi.inference.names import TreeNameDefinition
from jedi.inference.compiled import mixed
from jedi.inference.compiled.access import create_access_path
from jedi.inference.context import ModuleContext
def _create(inference_state, obj):
return compiled.create_from_access_path(
inference_state, create_access_path(inference_state, obj)
)
class NamespaceObject:
def __init__(self, dct):
self.__dict__ = dct
class MixedTreeName(TreeNameDefinition):
def infer(self):
"""
In IPython notebook it is typical that some parts of the code that is
provided was already executed. In that case if something is not properly
inferred, it should still infer from the variables it already knows.
"""
inferred = super().infer()
if not inferred:
for compiled_value in self.parent_context.mixed_values:
for f in compiled_value.get_filters():
values = ValueSet.from_sets(
n.infer() for n in f.get(self.string_name)
)
if values:
return values
return inferred
class MixedParserTreeFilter(ParserTreeFilter):
name_class = MixedTreeName
class MixedModuleContext(ModuleContext):
def __init__(self, tree_module_value, namespaces):
super().__init__(tree_module_value)
self.mixed_values = [
self._get_mixed_object(
_create(self.inference_state, NamespaceObject(n))
) for n in namespaces
]
def _get_mixed_object(self, compiled_value):
return mixed.MixedObject(
compiled_value=compiled_value,
tree_value=self._value
)
def get_filters(self, until_position=None, origin_scope=None):
yield MergedFilter(
MixedParserTreeFilter(
parent_context=self,
until_position=until_position,
origin_scope=origin_scope
),
self.get_global_filter(),
)
for mixed_object in self.mixed_values:
yield from mixed_object.get_filters(until_position, origin_scope)
| 2,415 | Python | .py | 60 | 31.15 | 80 | 0.651858 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,948 | environment.py | DamnWidget_anaconda/anaconda_lib/jedi/api/environment.py | """
Environments are a way to activate different Python versions or Virtualenvs for
static analysis. The Python binary in that environment is going to be executed.
"""
import os
import sys
import hashlib
import filecmp
from collections import namedtuple
from shutil import which
from jedi.cache import memoize_method, time_cache
from jedi.inference.compiled.subprocess import CompiledSubprocess, \
InferenceStateSameProcess, InferenceStateSubprocess
import parso
_VersionInfo = namedtuple('VersionInfo', 'major minor micro')
_SUPPORTED_PYTHONS = ['3.10', '3.9', '3.8', '3.7', '3.6']
_SAFE_PATHS = ['/usr/bin', '/usr/local/bin']
_CONDA_VAR = 'CONDA_PREFIX'
_CURRENT_VERSION = '%s.%s' % (sys.version_info.major, sys.version_info.minor)
class InvalidPythonEnvironment(Exception):
"""
If you see this exception, the Python executable or Virtualenv you have
been trying to use is probably not a correct Python version.
"""
class _BaseEnvironment:
@memoize_method
def get_grammar(self):
version_string = '%s.%s' % (self.version_info.major, self.version_info.minor)
return parso.load_grammar(version=version_string)
@property
def _sha256(self):
try:
return self._hash
except AttributeError:
self._hash = _calculate_sha256_for_file(self.executable)
return self._hash
def _get_info():
return (
sys.executable,
sys.prefix,
sys.version_info[:3],
)
class Environment(_BaseEnvironment):
"""
This class is supposed to be created by internal Jedi architecture. You
should not create it directly. Please use create_environment or the other
functions instead. It is then returned by that function.
"""
_subprocess = None
def __init__(self, executable, env_vars=None):
self._start_executable = executable
self._env_vars = env_vars
# Initialize the environment
self._get_subprocess()
def _get_subprocess(self):
if self._subprocess is not None and not self._subprocess.is_crashed:
return self._subprocess
try:
self._subprocess = CompiledSubprocess(self._start_executable,
env_vars=self._env_vars)
info = self._subprocess._send(None, _get_info)
except Exception as exc:
raise InvalidPythonEnvironment(
"Could not get version information for %r: %r" % (
self._start_executable,
exc))
# Since it could change and might not be the same(?) as the one given,
# set it here.
self.executable = info[0]
"""
The Python executable, matches ``sys.executable``.
"""
self.path = info[1]
"""
The path to an environment, matches ``sys.prefix``.
"""
self.version_info = _VersionInfo(*info[2])
"""
Like :data:`sys.version_info`: a tuple to show the current
Environment's Python version.
"""
return self._subprocess
def __repr__(self):
version = '.'.join(str(i) for i in self.version_info)
return '<%s: %s in %s>' % (self.__class__.__name__, version, self.path)
def get_inference_state_subprocess(self, inference_state):
return InferenceStateSubprocess(inference_state, self._get_subprocess())
@memoize_method
def get_sys_path(self):
"""
The sys path for this environment. Does not include potential
modifications from e.g. appending to :data:`sys.path`.
:returns: list of str
"""
# It's pretty much impossible to generate the sys path without actually
# executing Python. The sys path (when starting with -S) itself depends
# on how the Python version was compiled (ENV variables).
# If you omit -S when starting Python (normal case), additionally
# site.py gets executed.
return self._get_subprocess().get_sys_path()
class _SameEnvironmentMixin:
def __init__(self):
self._start_executable = self.executable = sys.executable
self.path = sys.prefix
self.version_info = _VersionInfo(*sys.version_info[:3])
self._env_vars = None
class SameEnvironment(_SameEnvironmentMixin, Environment):
pass
class InterpreterEnvironment(_SameEnvironmentMixin, _BaseEnvironment):
def get_inference_state_subprocess(self, inference_state):
return InferenceStateSameProcess(inference_state)
def get_sys_path(self):
return sys.path
def _get_virtual_env_from_var(env_var='VIRTUAL_ENV'):
"""Get virtualenv environment from VIRTUAL_ENV environment variable.
It uses `safe=False` with ``create_environment``, because the environment
variable is considered to be safe / controlled by the user solely.
"""
var = os.environ.get(env_var)
if var:
# Under macOS in some cases - notably when using Pipenv - the
# sys.prefix of the virtualenv is /path/to/env/bin/.. instead of
# /path/to/env so we need to fully resolve the paths in order to
# compare them.
if os.path.realpath(var) == os.path.realpath(sys.prefix):
return _try_get_same_env()
try:
return create_environment(var, safe=False)
except InvalidPythonEnvironment:
pass
def _calculate_sha256_for_file(path):
sha256 = hashlib.sha256()
with open(path, 'rb') as f:
for block in iter(lambda: f.read(filecmp.BUFSIZE), b''):
sha256.update(block)
return sha256.hexdigest()
def get_default_environment():
"""
Tries to return an active Virtualenv or conda environment.
If there is no VIRTUAL_ENV variable or no CONDA_PREFIX variable set
set it will return the latest Python version installed on the system. This
makes it possible to use as many new Python features as possible when using
autocompletion and other functionality.
:returns: :class:`.Environment`
"""
virtual_env = _get_virtual_env_from_var()
if virtual_env is not None:
return virtual_env
conda_env = _get_virtual_env_from_var(_CONDA_VAR)
if conda_env is not None:
return conda_env
return _try_get_same_env()
def _try_get_same_env():
env = SameEnvironment()
if not os.path.basename(env.executable).lower().startswith('python'):
# This tries to counter issues with embedding. In some cases (e.g.
# VIM's Python Mac/Windows, sys.executable is /foo/bar/vim. This
# happens, because for Mac a function called `_NSGetExecutablePath` is
# used and for Windows `GetModuleFileNameW`. These are both platform
# specific functions. For all other systems sys.executable should be
# alright. However here we try to generalize:
#
# 1. Check if the executable looks like python (heuristic)
# 2. In case it's not try to find the executable
# 3. In case we don't find it use an interpreter environment.
#
# The last option will always work, but leads to potential crashes of
# Jedi - which is ok, because it happens very rarely and even less,
# because the code below should work for most cases.
if os.name == 'nt':
# The first case would be a virtualenv and the second a normal
# Python installation.
checks = (r'Scripts\python.exe', 'python.exe')
else:
# For unix it looks like Python is always in a bin folder.
checks = (
'bin/python%s.%s' % (sys.version_info[0], sys.version[1]),
'bin/python%s' % (sys.version_info[0]),
'bin/python',
)
for check in checks:
guess = os.path.join(sys.exec_prefix, check)
if os.path.isfile(guess):
# Bingo - We think we have our Python.
return Environment(guess)
# It looks like there is no reasonable Python to be found.
return InterpreterEnvironment()
# If no virtualenv is found, use the environment we're already
# using.
return env
def get_cached_default_environment():
var = os.environ.get('VIRTUAL_ENV') or os.environ.get(_CONDA_VAR)
environment = _get_cached_default_environment()
# Under macOS in some cases - notably when using Pipenv - the
# sys.prefix of the virtualenv is /path/to/env/bin/.. instead of
# /path/to/env so we need to fully resolve the paths in order to
# compare them.
if var and os.path.realpath(var) != os.path.realpath(environment.path):
_get_cached_default_environment.clear_cache()
return _get_cached_default_environment()
return environment
@time_cache(seconds=10 * 60) # 10 Minutes
def _get_cached_default_environment():
try:
return get_default_environment()
except InvalidPythonEnvironment:
# It's possible that `sys.executable` is wrong. Typically happens
# when Jedi is used in an executable that embeds Python. For further
# information, have a look at:
# https://github.com/davidhalter/jedi/issues/1531
return InterpreterEnvironment()
def find_virtualenvs(paths=None, *, safe=True, use_environment_vars=True):
"""
:param paths: A list of paths in your file system to be scanned for
Virtualenvs. It will search in these paths and potentially execute the
Python binaries.
:param safe: Default True. In case this is False, it will allow this
function to execute potential `python` environments. An attacker might
be able to drop an executable in a path this function is searching by
default. If the executable has not been installed by root, it will not
be executed.
:param use_environment_vars: Default True. If True, the VIRTUAL_ENV
variable will be checked if it contains a valid VirtualEnv.
CONDA_PREFIX will be checked to see if it contains a valid conda
environment.
:yields: :class:`.Environment`
"""
if paths is None:
paths = []
_used_paths = set()
if use_environment_vars:
# Using this variable should be safe, because attackers might be
# able to drop files (via git) but not environment variables.
virtual_env = _get_virtual_env_from_var()
if virtual_env is not None:
yield virtual_env
_used_paths.add(virtual_env.path)
conda_env = _get_virtual_env_from_var(_CONDA_VAR)
if conda_env is not None:
yield conda_env
_used_paths.add(conda_env.path)
for directory in paths:
if not os.path.isdir(directory):
continue
directory = os.path.abspath(directory)
for path in os.listdir(directory):
path = os.path.join(directory, path)
if path in _used_paths:
# A path shouldn't be inferred twice.
continue
_used_paths.add(path)
try:
executable = _get_executable_path(path, safe=safe)
yield Environment(executable)
except InvalidPythonEnvironment:
pass
def find_system_environments(*, env_vars=None):
"""
Ignores virtualenvs and returns the Python versions that were installed on
your system. This might return nothing, if you're running Python e.g. from
a portable version.
The environments are sorted from latest to oldest Python version.
:yields: :class:`.Environment`
"""
for version_string in _SUPPORTED_PYTHONS:
try:
yield get_system_environment(version_string, env_vars=env_vars)
except InvalidPythonEnvironment:
pass
# TODO: this function should probably return a list of environments since
# multiple Python installations can be found on a system for the same version.
def get_system_environment(version, *, env_vars=None):
"""
Return the first Python environment found for a string of the form 'X.Y'
where X and Y are the major and minor versions of Python.
:raises: :exc:`.InvalidPythonEnvironment`
:returns: :class:`.Environment`
"""
exe = which('python' + version)
if exe:
if exe == sys.executable:
return SameEnvironment()
return Environment(exe)
if os.name == 'nt':
for exe in _get_executables_from_windows_registry(version):
try:
return Environment(exe, env_vars=env_vars)
except InvalidPythonEnvironment:
pass
raise InvalidPythonEnvironment("Cannot find executable python%s." % version)
def create_environment(path, *, safe=True, env_vars=None):
"""
Make it possible to manually create an Environment object by specifying a
Virtualenv path or an executable path and optional environment variables.
:raises: :exc:`.InvalidPythonEnvironment`
:returns: :class:`.Environment`
"""
if os.path.isfile(path):
_assert_safe(path, safe)
return Environment(path, env_vars=env_vars)
return Environment(_get_executable_path(path, safe=safe), env_vars=env_vars)
def _get_executable_path(path, safe=True):
"""
Returns None if it's not actually a virtual env.
"""
if os.name == 'nt':
python = os.path.join(path, 'Scripts', 'python.exe')
else:
python = os.path.join(path, 'bin', 'python')
if not os.path.exists(python):
raise InvalidPythonEnvironment("%s seems to be missing." % python)
_assert_safe(python, safe)
return python
def _get_executables_from_windows_registry(version):
# https://github.com/python/typeshed/pull/3794 adds winreg
import winreg # type: ignore[import]
# TODO: support Python Anaconda.
sub_keys = [
r'SOFTWARE\Python\PythonCore\{version}\InstallPath',
r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}\InstallPath',
r'SOFTWARE\Python\PythonCore\{version}-32\InstallPath',
r'SOFTWARE\Wow6432Node\Python\PythonCore\{version}-32\InstallPath'
]
for root_key in [winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE]:
for sub_key in sub_keys:
sub_key = sub_key.format(version=version)
try:
with winreg.OpenKey(root_key, sub_key) as key:
prefix = winreg.QueryValueEx(key, '')[0]
exe = os.path.join(prefix, 'python.exe')
if os.path.isfile(exe):
yield exe
except WindowsError:
pass
def _assert_safe(executable_path, safe):
if safe and not _is_safe(executable_path):
raise InvalidPythonEnvironment(
"The python binary is potentially unsafe.")
def _is_safe(executable_path):
# Resolve sym links. A venv typically is a symlink to a known Python
# binary. Only virtualenvs copy symlinks around.
real_path = os.path.realpath(executable_path)
if _is_unix_safe_simple(real_path):
return True
# Just check the list of known Python versions. If it's not in there,
# it's likely an attacker or some Python that was not properly
# installed in the system.
for environment in find_system_environments():
if environment.executable == real_path:
return True
# If the versions don't match, just compare the binary files. If we
# don't do that, only venvs will be working and not virtualenvs.
# venvs are symlinks while virtualenvs are actual copies of the
# Python files.
# This still means that if the system Python is updated and the
# virtualenv's Python is not (which is probably never going to get
# upgraded), it will not work with Jedi. IMO that's fine, because
# people should just be using venv. ~ dave
if environment._sha256 == _calculate_sha256_for_file(real_path):
return True
return False
def _is_unix_safe_simple(real_path):
if _is_unix_admin():
# In case we are root, just be conservative and
# only execute known paths.
return any(real_path.startswith(p) for p in _SAFE_PATHS)
uid = os.stat(real_path).st_uid
# The interpreter needs to be owned by root. This means that it wasn't
# written by a user and therefore attacking Jedi is not as simple.
# The attack could look like the following:
# 1. A user clones a repository.
# 2. The repository has an innocent looking folder called foobar. jedi
# searches for the folder and executes foobar/bin/python --version if
# there's also a foobar/bin/activate.
# 3. The attacker has gained code execution, since he controls
# foobar/bin/python.
return uid == 0
def _is_unix_admin():
try:
return os.getuid() == 0
except AttributeError:
return False # Windows
| 16,956 | Python | .py | 382 | 36.489529 | 85 | 0.659773 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,949 | __init__.py | DamnWidget_anaconda/anaconda_lib/jedi/api/__init__.py | """
The API basically only provides one class. You can create a :class:`Script` and
use its methods.
Additionally you can add a debug function with :func:`set_debug_function`.
Alternatively, if you don't need a custom function and are happy with printing
debug messages to stdout, simply call :func:`set_debug_function` without
arguments.
"""
import sys
from pathlib import Path
import parso
from parso.python import tree
from jedi.parser_utils import get_executable_nodes
from jedi import debug
from jedi import settings
from jedi import cache
from jedi.file_io import KnownContentFileIO
from jedi.api import classes
from jedi.api import interpreter
from jedi.api import helpers
from jedi.api.helpers import validate_line_column
from jedi.api.completion import Completion, search_in_module
from jedi.api.keywords import KeywordName
from jedi.api.environment import InterpreterEnvironment
from jedi.api.project import get_default_project, Project
from jedi.api.errors import parso_to_jedi_errors
from jedi.api import refactoring
from jedi.api.refactoring.extract import extract_function, extract_variable
from jedi.inference import InferenceState
from jedi.inference import imports
from jedi.inference.references import find_references
from jedi.inference.arguments import try_iter_content
from jedi.inference.helpers import infer_call_of_leaf
from jedi.inference.sys_path import transform_path_to_dotted
from jedi.inference.syntax_tree import tree_name_to_values
from jedi.inference.value import ModuleValue
from jedi.inference.base_value import ValueSet
from jedi.inference.value.iterable import unpack_tuple_to_dict
from jedi.inference.gradual.conversion import convert_names, convert_values
from jedi.inference.gradual.utils import load_proper_stub_module
from jedi.inference.utils import to_list
# Jedi uses lots and lots of recursion. By setting this a little bit higher, we
# can remove some "maximum recursion depth" errors.
sys.setrecursionlimit(3000)
class Script:
"""
A Script is the base for completions, goto or whatever you want to do with
Jedi. The counter part of this class is :class:`Interpreter`, which works
with actual dictionaries and can work with a REPL. This class
should be used when a user edits code in an editor.
You can either use the ``code`` parameter or ``path`` to read a file.
Usually you're going to want to use both of them (in an editor).
The Script's ``sys.path`` is very customizable:
- If `project` is provided with a ``sys_path``, that is going to be used.
- If `environment` is provided, its ``sys.path`` will be used
(see :func:`Environment.get_sys_path <jedi.api.environment.Environment.get_sys_path>`);
- Otherwise ``sys.path`` will match that of the default environment of
Jedi, which typically matches the sys path that was used at the time
when Jedi was imported.
Most methods have a ``line`` and a ``column`` parameter. Lines in Jedi are
always 1-based and columns are always zero based. To avoid repetition they
are not always documented. You can omit both line and column. Jedi will
then just do whatever action you are calling at the end of the file. If you
provide only the line, just will complete at the end of that line.
.. warning:: By default :attr:`jedi.settings.fast_parser` is enabled, which means
that parso reuses modules (i.e. they are not immutable). With this setting
Jedi is **not thread safe** and it is also not safe to use multiple
:class:`.Script` instances and its definitions at the same time.
If you are a normal plugin developer this should not be an issue. It is
an issue for people that do more complex stuff with Jedi.
This is purely a performance optimization and works pretty well for all
typical usages, however consider to turn the setting off if it causes
you problems. See also
`this discussion <https://github.com/davidhalter/jedi/issues/1240>`_.
:param code: The source code of the current file, separated by newlines.
:type code: str
:param path: The path of the file in the file system, or ``''`` if
it hasn't been saved yet.
:type path: str or pathlib.Path or None
:param Environment environment: Provide a predefined :ref:`Environment <environments>`
to work with a specific Python version or virtualenv.
:param Project project: Provide a :class:`.Project` to make sure finding
references works well, because the right folder is searched. There are
also ways to modify the sys path and other things.
"""
def __init__(self, code=None, *, path=None, environment=None, project=None):
self._orig_path = path
if isinstance(path, str):
path = Path(path)
self.path = path.absolute() if path else None
if code is None:
if path is None:
raise ValueError("Must provide at least one of code or path")
# TODO add a better warning than the traceback!
with open(path, 'rb') as f:
code = f.read()
if project is None:
# Load the Python grammar of the current interpreter.
project = get_default_project(None if self.path is None else self.path.parent)
self._inference_state = InferenceState(
project, environment=environment, script_path=self.path
)
debug.speed('init')
self._module_node, code = self._inference_state.parse_and_get_code(
code=code,
path=self.path,
use_latest_grammar=path and path.suffix == '.pyi',
cache=False, # No disk cache, because the current script often changes.
diff_cache=settings.fast_parser,
cache_path=settings.cache_directory,
)
debug.speed('parsed')
self._code_lines = parso.split_lines(code, keepends=True)
self._code = code
cache.clear_time_caches()
debug.reset_time()
# Cache the module, this is mostly useful for testing, since this shouldn't
# be called multiple times.
@cache.memoize_method
def _get_module(self):
names = None
is_package = False
if self.path is not None:
import_names, is_p = transform_path_to_dotted(
self._inference_state.get_sys_path(add_parent_paths=False),
self.path
)
if import_names is not None:
names = import_names
is_package = is_p
if self.path is None:
file_io = None
else:
file_io = KnownContentFileIO(self.path, self._code)
if self.path is not None and self.path.suffix == '.pyi':
# We are in a stub file. Try to load the stub properly.
stub_module = load_proper_stub_module(
self._inference_state,
self._inference_state.latest_grammar,
file_io,
names,
self._module_node
)
if stub_module is not None:
return stub_module
if names is None:
names = ('__main__',)
module = ModuleValue(
self._inference_state, self._module_node,
file_io=file_io,
string_names=names,
code_lines=self._code_lines,
is_package=is_package,
)
if names[0] not in ('builtins', 'typing'):
# These modules are essential for Jedi, so don't overwrite them.
self._inference_state.module_cache.add(names, ValueSet([module]))
return module
def _get_module_context(self):
return self._get_module().as_context()
def __repr__(self):
return '<%s: %s %r>' % (
self.__class__.__name__,
repr(self._orig_path),
self._inference_state.environment,
)
@validate_line_column
def complete(self, line=None, column=None, *, fuzzy=False):
"""
Completes objects under the cursor.
Those objects contain information about the completions, more than just
names.
:param fuzzy: Default False. Will return fuzzy completions, which means
that e.g. ``ooa`` will match ``foobar``.
:return: Completion objects, sorted by name. Normal names appear
before "private" names that start with ``_`` and those appear
before magic methods and name mangled names that start with ``__``.
:rtype: list of :class:`.Completion`
"""
with debug.increase_indent_cm('complete'):
completion = Completion(
self._inference_state, self._get_module_context(), self._code_lines,
(line, column), self.get_signatures, fuzzy=fuzzy,
)
return completion.complete()
@validate_line_column
def infer(self, line=None, column=None, *, only_stubs=False, prefer_stubs=False):
"""
Return the definitions of under the cursor. It is basically a wrapper
around Jedi's type inference.
This method follows complicated paths and returns the end, not the
first definition. The big difference between :meth:`goto` and
:meth:`infer` is that :meth:`goto` doesn't
follow imports and statements. Multiple objects may be returned,
because depending on an option you can have two different versions of a
function.
:param only_stubs: Only return stubs for this method.
:param prefer_stubs: Prefer stubs to Python objects for this method.
:rtype: list of :class:`.Name`
"""
pos = line, column
leaf = self._module_node.get_name_of_position(pos)
if leaf is None:
leaf = self._module_node.get_leaf_for_position(pos)
if leaf is None or leaf.type == 'string':
return []
if leaf.end_pos == (line, column) and leaf.type == 'operator':
next_ = leaf.get_next_leaf()
if next_.start_pos == leaf.end_pos \
and next_.type in ('number', 'string', 'keyword'):
leaf = next_
context = self._get_module_context().create_context(leaf)
values = helpers.infer(self._inference_state, context, leaf)
values = convert_values(
values,
only_stubs=only_stubs,
prefer_stubs=prefer_stubs,
)
defs = [classes.Name(self._inference_state, c.name) for c in values]
# The additional set here allows the definitions to become unique in an
# API sense. In the internals we want to separate more things than in
# the API.
return helpers.sorted_definitions(set(defs))
@validate_line_column
def goto(self, line=None, column=None, *, follow_imports=False, follow_builtin_imports=False,
only_stubs=False, prefer_stubs=False):
"""
Goes to the name that defined the object under the cursor. Optionally
you can follow imports.
Multiple objects may be returned, depending on an if you can have two
different versions of a function.
:param follow_imports: The method will follow imports.
:param follow_builtin_imports: If ``follow_imports`` is True will try
to look up names in builtins (i.e. compiled or extension modules).
:param only_stubs: Only return stubs for this method.
:param prefer_stubs: Prefer stubs to Python objects for this method.
:rtype: list of :class:`.Name`
"""
tree_name = self._module_node.get_name_of_position((line, column))
if tree_name is None:
# Without a name we really just want to jump to the result e.g.
# executed by `foo()`, if we the cursor is after `)`.
return self.infer(line, column, only_stubs=only_stubs, prefer_stubs=prefer_stubs)
name = self._get_module_context().create_name(tree_name)
# Make it possible to goto the super class function/attribute
# definitions, when they are overwritten.
names = []
if name.tree_name.is_definition() and name.parent_context.is_class():
class_node = name.parent_context.tree_node
class_value = self._get_module_context().create_value(class_node)
mro = class_value.py__mro__()
next(mro) # Ignore the first entry, because it's the class itself.
for cls in mro:
names = cls.goto(tree_name.value)
if names:
break
if not names:
names = list(name.goto())
if follow_imports:
names = helpers.filter_follow_imports(names, follow_builtin_imports)
names = convert_names(
names,
only_stubs=only_stubs,
prefer_stubs=prefer_stubs,
)
defs = [classes.Name(self._inference_state, d) for d in set(names)]
# Avoid duplicates
return list(set(helpers.sorted_definitions(defs)))
def search(self, string, *, all_scopes=False):
"""
Searches a name in the current file. For a description of how the
search string should look like, please have a look at
:meth:`.Project.search`.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:yields: :class:`.Name`
"""
return self._search_func(string, all_scopes=all_scopes)
@to_list
def _search_func(self, string, all_scopes=False, complete=False, fuzzy=False):
names = self._names(all_scopes=all_scopes)
wanted_type, wanted_names = helpers.split_search_string(string)
return search_in_module(
self._inference_state,
self._get_module_context(),
names=names,
wanted_type=wanted_type,
wanted_names=wanted_names,
complete=complete,
fuzzy=fuzzy,
)
def complete_search(self, string, **kwargs):
"""
Like :meth:`.Script.search`, but completes that string. If you want to
have all possible definitions in a file you can also provide an empty
string.
:param bool all_scopes: Default False; searches not only for
definitions on the top level of a module level, but also in
functions and classes.
:param fuzzy: Default False. Will return fuzzy completions, which means
that e.g. ``ooa`` will match ``foobar``.
:yields: :class:`.Completion`
"""
return self._search_func(string, complete=True, **kwargs)
@validate_line_column
def help(self, line=None, column=None):
"""
Used to display a help window to users. Uses :meth:`.Script.goto` and
returns additional definitions for keywords and operators.
Typically you will want to display :meth:`.BaseName.docstring` to the
user for all the returned definitions.
The additional definitions are ``Name(...).type == 'keyword'``.
These definitions do not have a lot of value apart from their docstring
attribute, which contains the output of Python's :func:`help` function.
:rtype: list of :class:`.Name`
"""
definitions = self.goto(line, column, follow_imports=True)
if definitions:
return definitions
leaf = self._module_node.get_leaf_for_position((line, column))
if leaf is not None and leaf.type in ('keyword', 'operator', 'error_leaf'):
def need_pydoc():
if leaf.value in ('(', ')', '[', ']'):
if leaf.parent.type == 'trailer':
return False
if leaf.parent.type == 'atom':
return False
grammar = self._inference_state.grammar
# This parso stuff is not public, but since I control it, this
# is fine :-) ~dave
reserved = grammar._pgen_grammar.reserved_syntax_strings.keys()
return leaf.value in reserved
if need_pydoc():
name = KeywordName(self._inference_state, leaf.value)
return [classes.Name(self._inference_state, name)]
return []
@validate_line_column
def get_references(self, line=None, column=None, **kwargs):
"""
Lists all references of a variable in a project. Since this can be
quite hard to do for Jedi, if it is too complicated, Jedi will stop
searching.
:param include_builtins: Default ``True``. If ``False``, checks if a definition
is a builtin (e.g. ``sys``) and in that case does not return it.
:param scope: Default ``'project'``. If ``'file'``, include references in
the current module only.
:rtype: list of :class:`.Name`
"""
def _references(include_builtins=True, scope='project'):
if scope not in ('project', 'file'):
raise ValueError('Only the scopes "file" and "project" are allowed')
tree_name = self._module_node.get_name_of_position((line, column))
if tree_name is None:
# Must be syntax
return []
names = find_references(self._get_module_context(), tree_name, scope == 'file')
definitions = [classes.Name(self._inference_state, n) for n in names]
if not include_builtins or scope == 'file':
definitions = [d for d in definitions if not d.in_builtin_module()]
return helpers.sorted_definitions(definitions)
return _references(**kwargs)
@validate_line_column
def get_signatures(self, line=None, column=None):
"""
Return the function object of the call under the cursor.
E.g. if the cursor is here::
abs(# <-- cursor is here
This would return the ``abs`` function. On the other hand::
abs()# <-- cursor is here
This would return an empty list..
:rtype: list of :class:`.Signature`
"""
pos = line, column
call_details = helpers.get_signature_details(self._module_node, pos)
if call_details is None:
return []
context = self._get_module_context().create_context(call_details.bracket_leaf)
definitions = helpers.cache_signatures(
self._inference_state,
context,
call_details.bracket_leaf,
self._code_lines,
pos
)
debug.speed('func_call followed')
# TODO here we use stubs instead of the actual values. We should use
# the signatures from stubs, but the actual values, probably?!
return [classes.Signature(self._inference_state, signature, call_details)
for signature in definitions.get_signatures()]
@validate_line_column
def get_context(self, line=None, column=None):
"""
Returns the scope context under the cursor. This basically means the
function, class or module where the cursor is at.
:rtype: :class:`.Name`
"""
pos = (line, column)
leaf = self._module_node.get_leaf_for_position(pos, include_prefixes=True)
if leaf.start_pos > pos or leaf.type == 'endmarker':
previous_leaf = leaf.get_previous_leaf()
if previous_leaf is not None:
leaf = previous_leaf
module_context = self._get_module_context()
n = tree.search_ancestor(leaf, 'funcdef', 'classdef')
if n is not None and n.start_pos < pos <= n.children[-1].start_pos:
# This is a bit of a special case. The context of a function/class
# name/param/keyword is always it's parent context, not the
# function itself. Catch all the cases here where we are before the
# suite object, but still in the function.
context = module_context.create_value(n).as_context()
else:
context = module_context.create_context(leaf)
while context.name is None:
context = context.parent_context # comprehensions
definition = classes.Name(self._inference_state, context.name)
while definition.type != 'module':
name = definition._name # TODO private access
tree_name = name.tree_name
if tree_name is not None: # Happens with lambdas.
scope = tree_name.get_definition()
if scope.start_pos[1] < column:
break
definition = definition.parent()
return definition
def _analysis(self):
self._inference_state.is_analysis = True
self._inference_state.analysis_modules = [self._module_node]
module = self._get_module_context()
try:
for node in get_executable_nodes(self._module_node):
context = module.create_context(node)
if node.type in ('funcdef', 'classdef'):
# Resolve the decorators.
tree_name_to_values(self._inference_state, context, node.children[1])
elif isinstance(node, tree.Import):
import_names = set(node.get_defined_names())
if node.is_nested():
import_names |= set(path[-1] for path in node.get_paths())
for n in import_names:
imports.infer_import(context, n)
elif node.type == 'expr_stmt':
types = context.infer_node(node)
for testlist in node.children[:-1:2]:
# Iterate tuples.
unpack_tuple_to_dict(context, types, testlist)
else:
if node.type == 'name':
defs = self._inference_state.infer(context, node)
else:
defs = infer_call_of_leaf(context, node)
try_iter_content(defs)
self._inference_state.reset_recursion_limitations()
ana = [a for a in self._inference_state.analysis if self.path == a.path]
return sorted(set(ana), key=lambda x: x.line)
finally:
self._inference_state.is_analysis = False
def get_names(self, **kwargs):
"""
Returns names defined in the current file.
:param all_scopes: If True lists the names of all scopes instead of
only the module namespace.
:param definitions: If True lists the names that have been defined by a
class, function or a statement (``a = b`` returns ``a``).
:param references: If True lists all the names that are not listed by
``definitions=True``. E.g. ``a = b`` returns ``b``.
:rtype: list of :class:`.Name`
"""
names = self._names(**kwargs)
return [classes.Name(self._inference_state, n) for n in names]
def get_syntax_errors(self):
"""
Lists all syntax errors in the current file.
:rtype: list of :class:`.SyntaxError`
"""
return parso_to_jedi_errors(self._inference_state.grammar, self._module_node)
def _names(self, all_scopes=False, definitions=True, references=False):
# Set line/column to a random position, because they don't matter.
module_context = self._get_module_context()
defs = [
module_context.create_name(name)
for name in helpers.get_module_names(
self._module_node,
all_scopes=all_scopes,
definitions=definitions,
references=references,
)
]
return sorted(defs, key=lambda x: x.start_pos)
def rename(self, line=None, column=None, *, new_name):
"""
Renames all references of the variable under the cursor.
:param new_name: The variable under the cursor will be renamed to this
string.
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
definitions = self.get_references(line, column, include_builtins=False)
return refactoring.rename(self._inference_state, definitions, new_name)
@validate_line_column
def extract_variable(self, line, column, *, new_name, until_line=None, until_column=None):
"""
Moves an expression to a new statement.
For example if you have the cursor on ``foo`` and provide a
``new_name`` called ``bar``::
foo = 3.1
x = int(foo + 1)
the code above will become::
foo = 3.1
bar = foo + 1
x = int(bar)
:param new_name: The expression under the cursor will be renamed to
this string.
:param int until_line: The the selection range ends at this line, when
omitted, Jedi will be clever and try to define the range itself.
:param int until_column: The the selection range ends at this column, when
omitted, Jedi will be clever and try to define the range itself.
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
if until_line is None and until_column is None:
until_pos = None
else:
if until_line is None:
until_line = line
if until_column is None:
until_column = len(self._code_lines[until_line - 1])
until_pos = until_line, until_column
return extract_variable(
self._inference_state, self.path, self._module_node,
new_name, (line, column), until_pos
)
@validate_line_column
def extract_function(self, line, column, *, new_name, until_line=None, until_column=None):
"""
Moves an expression to a new function.
For example if you have the cursor on ``foo`` and provide a
``new_name`` called ``bar``::
global_var = 3
def x():
foo = 3.1
x = int(foo + 1 + global_var)
the code above will become::
global_var = 3
def bar(foo):
return int(foo + 1 + global_var)
def x():
foo = 3.1
x = bar(foo)
:param new_name: The expression under the cursor will be replaced with
a function with this name.
:param int until_line: The the selection range ends at this line, when
omitted, Jedi will be clever and try to define the range itself.
:param int until_column: The the selection range ends at this column, when
omitted, Jedi will be clever and try to define the range itself.
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
if until_line is None and until_column is None:
until_pos = None
else:
if until_line is None:
until_line = line
if until_column is None:
until_column = len(self._code_lines[until_line - 1])
until_pos = until_line, until_column
return extract_function(
self._inference_state, self.path, self._get_module_context(),
new_name, (line, column), until_pos
)
def inline(self, line=None, column=None):
"""
Inlines a variable under the cursor. This is basically the opposite of
extracting a variable. For example with the cursor on bar::
foo = 3.1
bar = foo + 1
x = int(bar)
the code above will become::
foo = 3.1
x = int(foo + 1)
:raises: :exc:`.RefactoringError`
:rtype: :class:`.Refactoring`
"""
names = [d._name for d in self.get_references(line, column, include_builtins=True)]
return refactoring.inline(self._inference_state, names)
class Interpreter(Script):
"""
Jedi's API for Python REPLs.
Implements all of the methods that are present in :class:`.Script` as well.
In addition to completions that normal REPL completion does like
``str.upper``, Jedi also supports code completion based on static code
analysis. For example Jedi will complete ``str().upper``.
>>> from os.path import join
>>> namespace = locals()
>>> script = Interpreter('join("").up', [namespace])
>>> print(script.complete()[0].name)
upper
All keyword arguments are same as the arguments for :class:`.Script`.
:param str code: Code to parse.
:type namespaces: typing.List[dict]
:param namespaces: A list of namespace dictionaries such as the one
returned by :func:`globals` and :func:`locals`.
"""
_allow_descriptor_getattr_default = True
def __init__(self, code, namespaces, *, project=None, **kwds):
try:
namespaces = [dict(n) for n in namespaces]
except Exception:
raise TypeError("namespaces must be a non-empty list of dicts.")
environment = kwds.get('environment', None)
if environment is None:
environment = InterpreterEnvironment()
else:
if not isinstance(environment, InterpreterEnvironment):
raise TypeError("The environment needs to be an InterpreterEnvironment subclass.")
if project is None:
project = Project(Path.cwd())
super().__init__(code, environment=environment, project=project, **kwds)
self.namespaces = namespaces
self._inference_state.allow_descriptor_getattr = self._allow_descriptor_getattr_default
@cache.memoize_method
def _get_module_context(self):
if self.path is None:
file_io = None
else:
file_io = KnownContentFileIO(self.path, self._code)
tree_module_value = ModuleValue(
self._inference_state, self._module_node,
file_io=file_io,
string_names=('__main__',),
code_lines=self._code_lines,
)
return interpreter.MixedModuleContext(
tree_module_value,
self.namespaces,
)
def preload_module(*modules):
"""
Preloading modules tells Jedi to load a module now, instead of lazy parsing
of modules. This can be useful for IDEs, to control which modules to load
on startup.
:param modules: different module names, list of string.
"""
for m in modules:
s = "import %s as x; x." % m
Script(s).complete(1, len(s))
def set_debug_function(func_cb=debug.print_to_stdout, warnings=True,
notices=True, speed=True):
"""
Define a callback debug function to get all the debug messages.
If you don't specify any arguments, debug messages will be printed to stdout.
:param func_cb: The callback function for debug messages.
"""
debug.debug_function = func_cb
debug.enable_warning = warnings
debug.enable_notice = notices
debug.enable_speed = speed
| 31,270 | Python | .py | 657 | 37.410959 | 98 | 0.621979 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,950 | classes.py | DamnWidget_anaconda/anaconda_lib/jedi/api/classes.py | """
There are a couple of classes documented in here:
- :class:`.BaseName` as an abstact base class for almost everything.
- :class:`.Name` used in a lot of places
- :class:`.Completion` for completions
- :class:`.BaseSignature` as a base class for signatures
- :class:`.Signature` for :meth:`.Script.get_signatures` only
- :class:`.ParamName` used for parameters of signatures
- :class:`.Refactoring` for refactorings
- :class:`.SyntaxError` for :meth:`.Script.get_syntax_errors` only
These classes are the much biggest part of the API, because they contain
the interesting information about all operations.
"""
import re
from pathlib import Path
from typing import Optional
from parso.tree import search_ancestor
from jedi import settings
from jedi import debug
from jedi.inference.utils import unite
from jedi.cache import memoize_method
from jedi.inference.compiled.mixed import MixedName
from jedi.inference.names import ImportName, SubModuleName
from jedi.inference.gradual.stub_value import StubModuleValue
from jedi.inference.gradual.conversion import convert_names, convert_values
from jedi.inference.base_value import ValueSet, HasNoContext
from jedi.api.keywords import KeywordName
from jedi.api import completion_cache
from jedi.api.helpers import filter_follow_imports
def _sort_names_by_start_pos(names):
return sorted(names, key=lambda s: s.start_pos or (0, 0))
def defined_names(inference_state, value):
"""
List sub-definitions (e.g., methods in class).
:type scope: Scope
:rtype: list of Name
"""
try:
context = value.as_context()
except HasNoContext:
return []
filter = next(context.get_filters())
names = [name for name in filter.values()]
return [Name(inference_state, n) for n in _sort_names_by_start_pos(names)]
def _values_to_definitions(values):
return [Name(c.inference_state, c.name) for c in values]
class BaseName:
"""
The base class for all definitions, completions and signatures.
"""
_mapping = {
'posixpath': 'os.path',
'riscospath': 'os.path',
'ntpath': 'os.path',
'os2emxpath': 'os.path',
'macpath': 'os.path',
'genericpath': 'os.path',
'posix': 'os',
'_io': 'io',
'_functools': 'functools',
'_collections': 'collections',
'_socket': 'socket',
'_sqlite3': 'sqlite3',
}
_tuple_mapping = dict((tuple(k.split('.')), v) for (k, v) in {
'argparse._ActionsContainer': 'argparse.ArgumentParser',
}.items())
def __init__(self, inference_state, name):
self._inference_state = inference_state
self._name = name
"""
An instance of :class:`parso.python.tree.Name` subclass.
"""
self.is_keyword = isinstance(self._name, KeywordName)
@memoize_method
def _get_module_context(self):
# This can take a while to complete, because in the worst case of
# imports (consider `import a` completions), we need to load all
# modules starting with a first.
return self._name.get_root_context()
@property
def module_path(self) -> Optional[Path]:
"""
Shows the file path of a module. e.g. ``/usr/lib/python3.9/os.py``
"""
module = self._get_module_context()
if module.is_stub() or not module.is_compiled():
# Compiled modules should not return a module path even if they
# have one.
path: Optional[Path] = self._get_module_context().py__file__()
if path is not None:
return path
return None
@property
def name(self):
"""
Name of variable/function/class/module.
For example, for ``x = None`` it returns ``'x'``.
:rtype: str or None
"""
return self._name.get_public_name()
@property
def type(self):
"""
The type of the definition.
Here is an example of the value of this attribute. Let's consider
the following source. As what is in ``variable`` is unambiguous
to Jedi, :meth:`jedi.Script.infer` should return a list of
definition for ``sys``, ``f``, ``C`` and ``x``.
>>> from jedi import Script
>>> source = '''
... import keyword
...
... class C:
... pass
...
... class D:
... pass
...
... x = D()
...
... def f():
... pass
...
... for variable in [keyword, f, C, x]:
... variable'''
>>> script = Script(source)
>>> defs = script.infer()
Before showing what is in ``defs``, let's sort it by :attr:`line`
so that it is easy to relate the result to the source code.
>>> defs = sorted(defs, key=lambda d: d.line)
>>> print(defs) # doctest: +NORMALIZE_WHITESPACE
[<Name full_name='keyword', description='module keyword'>,
<Name full_name='__main__.C', description='class C'>,
<Name full_name='__main__.D', description='instance D'>,
<Name full_name='__main__.f', description='def f'>]
Finally, here is what you can get from :attr:`type`:
>>> defs = [d.type for d in defs]
>>> defs[0]
'module'
>>> defs[1]
'class'
>>> defs[2]
'instance'
>>> defs[3]
'function'
Valid values for type are ``module``, ``class``, ``instance``, ``function``,
``param``, ``path``, ``keyword``, ``property`` and ``statement``.
"""
tree_name = self._name.tree_name
resolve = False
if tree_name is not None:
# TODO move this to their respective names.
definition = tree_name.get_definition()
if definition is not None and definition.type == 'import_from' and \
tree_name.is_definition():
resolve = True
if isinstance(self._name, SubModuleName) or resolve:
for value in self._name.infer():
return value.api_type
return self._name.api_type
@property
def module_name(self):
"""
The module name, a bit similar to what ``__name__`` is in a random
Python module.
>>> from jedi import Script
>>> source = 'import json'
>>> script = Script(source, path='example.py')
>>> d = script.infer()[0]
>>> print(d.module_name) # doctest: +ELLIPSIS
json
"""
return self._get_module_context().py__name__()
def in_builtin_module(self):
"""
Returns True, if this is a builtin module.
"""
value = self._get_module_context().get_value()
if isinstance(value, StubModuleValue):
return any(v.is_compiled() for v in value.non_stub_value_set)
return value.is_compiled()
@property
def line(self):
"""The line where the definition occurs (starting with 1)."""
start_pos = self._name.start_pos
if start_pos is None:
return None
return start_pos[0]
@property
def column(self):
"""The column where the definition occurs (starting with 0)."""
start_pos = self._name.start_pos
if start_pos is None:
return None
return start_pos[1]
def get_definition_start_position(self):
"""
The (row, column) of the start of the definition range. Rows start with
1, columns start with 0.
:rtype: Optional[Tuple[int, int]]
"""
if self._name.tree_name is None:
return None
definition = self._name.tree_name.get_definition()
if definition is None:
return self._name.start_pos
return definition.start_pos
def get_definition_end_position(self):
"""
The (row, column) of the end of the definition range. Rows start with
1, columns start with 0.
:rtype: Optional[Tuple[int, int]]
"""
if self._name.tree_name is None:
return None
definition = self._name.tree_name.get_definition()
if definition is None:
return self._name.tree_name.end_pos
if self.type in ("function", "class"):
last_leaf = definition.get_last_leaf()
if last_leaf.type == "newline":
return last_leaf.get_previous_leaf().end_pos
return last_leaf.end_pos
return definition.end_pos
def docstring(self, raw=False, fast=True):
r"""
Return a document string for this completion object.
Example:
>>> from jedi import Script
>>> source = '''\
... def f(a, b=1):
... "Document for function f."
... '''
>>> script = Script(source, path='example.py')
>>> doc = script.infer(1, len('def f'))[0].docstring()
>>> print(doc)
f(a, b=1)
<BLANKLINE>
Document for function f.
Notice that useful extra information is added to the actual
docstring, e.g. function signatures are prepended to their docstrings.
If you need the actual docstring, use ``raw=True`` instead.
>>> print(script.infer(1, len('def f'))[0].docstring(raw=True))
Document for function f.
:param fast: Don't follow imports that are only one level deep like
``import foo``, but follow ``from foo import bar``. This makes
sense for speed reasons. Completing `import a` is slow if you use
the ``foo.docstring(fast=False)`` on every object, because it
parses all libraries starting with ``a``.
"""
if isinstance(self._name, ImportName) and fast:
return ''
doc = self._get_docstring()
if raw:
return doc
signature_text = self._get_docstring_signature()
if signature_text and doc:
return signature_text + '\n\n' + doc
else:
return signature_text + doc
def _get_docstring(self):
return self._name.py__doc__()
def _get_docstring_signature(self):
return '\n'.join(
signature.to_string()
for signature in self._get_signatures(for_docstring=True)
)
@property
def description(self):
"""
A description of the :class:`.Name` object, which is heavily used
in testing. e.g. for ``isinstance`` it returns ``def isinstance``.
Example:
>>> from jedi import Script
>>> source = '''
... def f():
... pass
...
... class C:
... pass
...
... variable = f if random.choice([0,1]) else C'''
>>> script = Script(source) # line is maximum by default
>>> defs = script.infer(column=3)
>>> defs = sorted(defs, key=lambda d: d.line)
>>> print(defs) # doctest: +NORMALIZE_WHITESPACE
[<Name full_name='__main__.f', description='def f'>,
<Name full_name='__main__.C', description='class C'>]
>>> str(defs[0].description)
'def f'
>>> str(defs[1].description)
'class C'
"""
typ = self.type
tree_name = self._name.tree_name
if typ == 'param':
return typ + ' ' + self._name.to_string()
if typ in ('function', 'class', 'module', 'instance') or tree_name is None:
if typ == 'function':
# For the description we want a short and a pythonic way.
typ = 'def'
return typ + ' ' + self._name.get_public_name()
definition = tree_name.get_definition(include_setitem=True) or tree_name
# Remove the prefix, because that's not what we want for get_code
# here.
txt = definition.get_code(include_prefix=False)
# Delete comments:
txt = re.sub(r'#[^\n]+\n', ' ', txt)
# Delete multi spaces/newlines
txt = re.sub(r'\s+', ' ', txt).strip()
return txt
@property
def full_name(self):
"""
Dot-separated path of this object.
It is in the form of ``<module>[.<submodule>[...]][.<object>]``.
It is useful when you want to look up Python manual of the
object at hand.
Example:
>>> from jedi import Script
>>> source = '''
... import os
... os.path.join'''
>>> script = Script(source, path='example.py')
>>> print(script.infer(3, len('os.path.join'))[0].full_name)
os.path.join
Notice that it returns ``'os.path.join'`` instead of (for example)
``'posixpath.join'``. This is not correct, since the modules name would
be ``<module 'posixpath' ...>```. However most users find the latter
more practical.
"""
if not self._name.is_value_name:
return None
names = self._name.get_qualified_names(include_module_names=True)
if names is None:
return None
names = list(names)
try:
names[0] = self._mapping[names[0]]
except KeyError:
pass
return '.'.join(names)
def is_stub(self):
"""
Returns True if the current name is defined in a stub file.
"""
if not self._name.is_value_name:
return False
return self._name.get_root_context().is_stub()
def is_side_effect(self):
"""
Checks if a name is defined as ``self.foo = 3``. In case of self, this
function would return False, for foo it would return True.
"""
tree_name = self._name.tree_name
if tree_name is None:
return False
return tree_name.is_definition() and tree_name.parent.type == 'trailer'
@debug.increase_indent_cm('goto on name')
def goto(self, *, follow_imports=False, follow_builtin_imports=False,
only_stubs=False, prefer_stubs=False):
"""
Like :meth:`.Script.goto` (also supports the same params), but does it
for the current name. This is typically useful if you are using
something like :meth:`.Script.get_names()`.
:param follow_imports: The goto call will follow imports.
:param follow_builtin_imports: If follow_imports is True will try to
look up names in builtins (i.e. compiled or extension modules).
:param only_stubs: Only return stubs for this goto call.
:param prefer_stubs: Prefer stubs to Python objects for this goto call.
:rtype: list of :class:`Name`
"""
if not self._name.is_value_name:
return []
names = self._name.goto()
if follow_imports:
names = filter_follow_imports(names, follow_builtin_imports)
names = convert_names(
names,
only_stubs=only_stubs,
prefer_stubs=prefer_stubs,
)
return [self if n == self._name else Name(self._inference_state, n)
for n in names]
@debug.increase_indent_cm('infer on name')
def infer(self, *, only_stubs=False, prefer_stubs=False):
"""
Like :meth:`.Script.infer`, it can be useful to understand which type
the current name has.
Return the actual definitions. I strongly recommend not using it for
your completions, because it might slow down |jedi|. If you want to
read only a few objects (<=20), it might be useful, especially to get
the original docstrings. The basic problem of this function is that it
follows all results. This means with 1000 completions (e.g. numpy),
it's just very, very slow.
:param only_stubs: Only return stubs for this goto call.
:param prefer_stubs: Prefer stubs to Python objects for this type
inference call.
:rtype: list of :class:`Name`
"""
assert not (only_stubs and prefer_stubs)
if not self._name.is_value_name:
return []
# First we need to make sure that we have stub names (if possible) that
# we can follow. If we don't do that, we can end up with the inferred
# results of Python objects instead of stubs.
names = convert_names([self._name], prefer_stubs=True)
values = convert_values(
ValueSet.from_sets(n.infer() for n in names),
only_stubs=only_stubs,
prefer_stubs=prefer_stubs,
)
resulting_names = [c.name for c in values]
return [self if n == self._name else Name(self._inference_state, n)
for n in resulting_names]
def parent(self):
"""
Returns the parent scope of this identifier.
:rtype: Name
"""
if not self._name.is_value_name:
return None
if self.type in ('function', 'class', 'param') and self._name.tree_name is not None:
# Since the parent_context doesn't really match what the user
# thinks of that the parent is here, we do these cases separately.
# The reason for this is the following:
# - class: Nested classes parent_context is always the
# parent_context of the most outer one.
# - function: Functions in classes have the module as
# parent_context.
# - param: The parent_context of a param is not its function but
# e.g. the outer class or module.
cls_or_func_node = self._name.tree_name.get_definition()
parent = search_ancestor(cls_or_func_node, 'funcdef', 'classdef', 'file_input')
context = self._get_module_context().create_value(parent).as_context()
else:
context = self._name.parent_context
if context is None:
return None
while context.name is None:
# Happens for comprehension contexts
context = context.parent_context
return Name(self._inference_state, context.name)
def __repr__(self):
return "<%s %sname=%r, description=%r>" % (
self.__class__.__name__,
'full_' if self.full_name else '',
self.full_name or self.name,
self.description,
)
def get_line_code(self, before=0, after=0):
"""
Returns the line of code where this object was defined.
:param before: Add n lines before the current line to the output.
:param after: Add n lines after the current line to the output.
:return str: Returns the line(s) of code or an empty string if it's a
builtin.
"""
if not self._name.is_value_name:
return ''
lines = self._name.get_root_context().code_lines
if lines is None:
# Probably a builtin module, just ignore in that case.
return ''
index = self._name.start_pos[0] - 1
start_index = max(index - before, 0)
return ''.join(lines[start_index:index + after + 1])
def _get_signatures(self, for_docstring=False):
if self._name.api_type == 'property':
return []
if for_docstring and self._name.api_type == 'statement' and not self.is_stub():
# For docstrings we don't resolve signatures if they are simple
# statements and not stubs. This is a speed optimization.
return []
if isinstance(self._name, MixedName):
# While this would eventually happen anyway, it's basically just a
# shortcut to not infer anything tree related, because it's really
# not necessary.
return self._name.infer_compiled_value().get_signatures()
names = convert_names([self._name], prefer_stubs=True)
return [sig for name in names for sig in name.infer().get_signatures()]
def get_signatures(self):
"""
Returns all potential signatures for a function or a class. Multiple
signatures are typical if you use Python stubs with ``@overload``.
:rtype: list of :class:`BaseSignature`
"""
return [
BaseSignature(self._inference_state, s)
for s in self._get_signatures()
]
def execute(self):
"""
Uses type inference to "execute" this identifier and returns the
executed objects.
:rtype: list of :class:`Name`
"""
return _values_to_definitions(self._name.infer().execute_with_values())
def get_type_hint(self):
"""
Returns type hints like ``Iterable[int]`` or ``Union[int, str]``.
This method might be quite slow, especially for functions. The problem
is finding executions for those functions to return something like
``Callable[[int, str], str]``.
:rtype: str
"""
return self._name.infer().get_type_hint()
class Completion(BaseName):
"""
``Completion`` objects are returned from :meth:`.Script.complete`. They
provide additional information about a completion.
"""
def __init__(self, inference_state, name, stack, like_name_length,
is_fuzzy, cached_name=None):
super().__init__(inference_state, name)
self._like_name_length = like_name_length
self._stack = stack
self._is_fuzzy = is_fuzzy
self._cached_name = cached_name
# Completion objects with the same Completion name (which means
# duplicate items in the completion)
self._same_name_completions = []
def _complete(self, like_name):
append = ''
if settings.add_bracket_after_function \
and self.type == 'function':
append = '('
name = self._name.get_public_name()
if like_name:
name = name[self._like_name_length:]
return name + append
@property
def complete(self):
"""
Only works with non-fuzzy completions. Returns None if fuzzy
completions are used.
Return the rest of the word, e.g. completing ``isinstance``::
isinstan# <-- Cursor is here
would return the string 'ce'. It also adds additional stuff, depending
on your ``settings.py``.
Assuming the following function definition::
def foo(param=0):
pass
completing ``foo(par`` would give a ``Completion`` which ``complete``
would be ``am=``.
"""
if self._is_fuzzy:
return None
return self._complete(True)
@property
def name_with_symbols(self):
"""
Similar to :attr:`.name`, but like :attr:`.name` returns also the
symbols, for example assuming the following function definition::
def foo(param=0):
pass
completing ``foo(`` would give a ``Completion`` which
``name_with_symbols`` would be "param=".
"""
return self._complete(False)
def docstring(self, raw=False, fast=True):
"""
Documented under :meth:`BaseName.docstring`.
"""
if self._like_name_length >= 3:
# In this case we can just resolve the like name, because we
# wouldn't load like > 100 Python modules anymore.
fast = False
return super().docstring(raw=raw, fast=fast)
def _get_docstring(self):
if self._cached_name is not None:
return completion_cache.get_docstring(
self._cached_name,
self._name.get_public_name(),
lambda: self._get_cache()
)
return super()._get_docstring()
def _get_docstring_signature(self):
if self._cached_name is not None:
return completion_cache.get_docstring_signature(
self._cached_name,
self._name.get_public_name(),
lambda: self._get_cache()
)
return super()._get_docstring_signature()
def _get_cache(self):
return (
super().type,
super()._get_docstring_signature(),
super()._get_docstring(),
)
@property
def type(self):
"""
Documented under :meth:`BaseName.type`.
"""
# Purely a speed optimization.
if self._cached_name is not None:
return completion_cache.get_type(
self._cached_name,
self._name.get_public_name(),
lambda: self._get_cache()
)
return super().type
def get_completion_prefix_length(self):
"""
Returns the length of the prefix being completed.
For example, completing ``isinstance``::
isinstan# <-- Cursor is here
would return 8, because len('isinstan') == 8.
Assuming the following function definition::
def foo(param=0):
pass
completing ``foo(par`` would return 3.
"""
return self._like_name_length
def __repr__(self):
return '<%s: %s>' % (type(self).__name__, self._name.get_public_name())
class Name(BaseName):
"""
*Name* objects are returned from many different APIs including
:meth:`.Script.goto` or :meth:`.Script.infer`.
"""
def __init__(self, inference_state, definition):
super().__init__(inference_state, definition)
@memoize_method
def defined_names(self):
"""
List sub-definitions (e.g., methods in class).
:rtype: list of :class:`Name`
"""
defs = self._name.infer()
return sorted(
unite(defined_names(self._inference_state, d) for d in defs),
key=lambda s: s._name.start_pos or (0, 0)
)
def is_definition(self):
"""
Returns True, if defined as a name in a statement, function or class.
Returns False, if it's a reference to such a definition.
"""
if self._name.tree_name is None:
return True
else:
return self._name.tree_name.is_definition()
def __eq__(self, other):
return self._name.start_pos == other._name.start_pos \
and self.module_path == other.module_path \
and self.name == other.name \
and self._inference_state == other._inference_state
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self._name.start_pos, self.module_path, self.name, self._inference_state))
class BaseSignature(Name):
"""
These signatures are returned by :meth:`BaseName.get_signatures`
calls.
"""
def __init__(self, inference_state, signature):
super().__init__(inference_state, signature.name)
self._signature = signature
@property
def params(self):
"""
Returns definitions for all parameters that a signature defines.
This includes stuff like ``*args`` and ``**kwargs``.
:rtype: list of :class:`.ParamName`
"""
return [ParamName(self._inference_state, n)
for n in self._signature.get_param_names(resolve_stars=True)]
def to_string(self):
"""
Returns a text representation of the signature. This could for example
look like ``foo(bar, baz: int, **kwargs)``.
:rtype: str
"""
return self._signature.to_string()
class Signature(BaseSignature):
"""
A full signature object is the return value of
:meth:`.Script.get_signatures`.
"""
def __init__(self, inference_state, signature, call_details):
super().__init__(inference_state, signature)
self._call_details = call_details
self._signature = signature
@property
def index(self):
"""
Returns the param index of the current cursor position.
Returns None if the index cannot be found in the curent call.
:rtype: int
"""
return self._call_details.calculate_index(
self._signature.get_param_names(resolve_stars=True)
)
@property
def bracket_start(self):
"""
Returns a line/column tuple of the bracket that is responsible for the
last function call. The first line is 1 and the first column 0.
:rtype: int, int
"""
return self._call_details.bracket_leaf.start_pos
def __repr__(self):
return '<%s: index=%r %s>' % (
type(self).__name__,
self.index,
self._signature.to_string(),
)
class ParamName(Name):
def infer_default(self):
"""
Returns default values like the ``1`` of ``def foo(x=1):``.
:rtype: list of :class:`.Name`
"""
return _values_to_definitions(self._name.infer_default())
def infer_annotation(self, **kwargs):
"""
:param execute_annotation: Default True; If False, values are not
executed and classes are returned instead of instances.
:rtype: list of :class:`.Name`
"""
return _values_to_definitions(self._name.infer_annotation(ignore_stars=True, **kwargs))
def to_string(self):
"""
Returns a simple representation of a param, like
``f: Callable[..., Any]``.
:rtype: str
"""
return self._name.to_string()
@property
def kind(self):
"""
Returns an enum instance of :mod:`inspect`'s ``Parameter`` enum.
:rtype: :py:attr:`inspect.Parameter.kind`
"""
return self._name.get_kind()
| 29,637 | Python | .py | 740 | 30.927027 | 95 | 0.589193 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,951 | exceptions.py | DamnWidget_anaconda/anaconda_lib/jedi/api/exceptions.py | class _JediError(Exception):
pass
class InternalError(_JediError):
"""
This error might happen a subprocess is crashing. The reason for this is
usually broken C code in third party libraries. This is not a very common
thing and it is safe to use Jedi again. However using the same calls might
result in the same error again.
"""
class WrongVersion(_JediError):
"""
This error is reserved for the future, shouldn't really be happening at the
moment.
"""
class RefactoringError(_JediError):
"""
Refactorings can fail for various reasons. So if you work with refactorings
like :meth:`.Script.rename`, :meth:`.Script.inline`,
:meth:`.Script.extract_variable` and :meth:`.Script.extract_function`, make
sure to catch these. The descriptions in the errors are ususally valuable
for end users.
A typical ``RefactoringError`` would tell the user that inlining is not
possible if no name is under the cursor.
"""
| 991 | Python | .py | 24 | 36.666667 | 79 | 0.725 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,952 | completion.py | DamnWidget_anaconda/anaconda_lib/jedi/api/completion.py | import re
from textwrap import dedent
from inspect import Parameter
from parso.python.token import PythonTokenTypes
from parso.python import tree
from parso.tree import search_ancestor, Leaf
from parso import split_lines
from jedi import debug
from jedi import settings
from jedi.api import classes
from jedi.api import helpers
from jedi.api import keywords
from jedi.api.strings import complete_dict
from jedi.api.file_name import complete_file_name
from jedi.inference import imports
from jedi.inference.base_value import ValueSet
from jedi.inference.helpers import infer_call_of_leaf, parse_dotted_names
from jedi.inference.context import get_global_filters
from jedi.inference.value import TreeInstance
from jedi.inference.docstring_utils import DocstringModule
from jedi.inference.names import ParamNameWrapper, SubModuleName
from jedi.inference.gradual.conversion import convert_values, convert_names
from jedi.parser_utils import cut_value_at_position
from jedi.plugins import plugin_manager
class ParamNameWithEquals(ParamNameWrapper):
def get_public_name(self):
return self.string_name + '='
def _get_signature_param_names(signatures, positional_count, used_kwargs):
# Add named params
for call_sig in signatures:
for i, p in enumerate(call_sig.params):
kind = p.kind
if i < positional_count and kind == Parameter.POSITIONAL_OR_KEYWORD:
continue
if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) \
and p.name not in used_kwargs:
yield ParamNameWithEquals(p._name)
def _must_be_kwarg(signatures, positional_count, used_kwargs):
if used_kwargs:
return True
must_be_kwarg = True
for signature in signatures:
for i, p in enumerate(signature.params):
kind = p.kind
if kind is Parameter.VAR_POSITIONAL:
# In case there were not already kwargs, the next param can
# always be a normal argument.
return False
if i >= positional_count and kind in (Parameter.POSITIONAL_OR_KEYWORD,
Parameter.POSITIONAL_ONLY):
must_be_kwarg = False
break
if not must_be_kwarg:
break
return must_be_kwarg
def filter_names(inference_state, completion_names, stack, like_name, fuzzy, cached_name):
comp_dct = set()
if settings.case_insensitive_completion:
like_name = like_name.lower()
for name in completion_names:
string = name.string_name
if settings.case_insensitive_completion:
string = string.lower()
if helpers.match(string, like_name, fuzzy=fuzzy):
new = classes.Completion(
inference_state,
name,
stack,
len(like_name),
is_fuzzy=fuzzy,
cached_name=cached_name,
)
k = (new.name, new.complete) # key
if k not in comp_dct:
comp_dct.add(k)
tree_name = name.tree_name
if tree_name is not None:
definition = tree_name.get_definition()
if definition is not None and definition.type == 'del_stmt':
continue
yield new
def _remove_duplicates(completions, other_completions):
names = {d.name for d in other_completions}
return [c for c in completions if c.name not in names]
def get_user_context(module_context, position):
"""
Returns the scope in which the user resides. This includes flows.
"""
leaf = module_context.tree_node.get_leaf_for_position(position, include_prefixes=True)
return module_context.create_context(leaf)
def get_flow_scope_node(module_node, position):
node = module_node.get_leaf_for_position(position, include_prefixes=True)
while not isinstance(node, (tree.Scope, tree.Flow)):
node = node.parent
return node
@plugin_manager.decorate()
def complete_param_names(context, function_name, decorator_nodes):
# Basically there's no way to do param completion. The plugins are
# responsible for this.
return []
class Completion:
def __init__(self, inference_state, module_context, code_lines, position,
signatures_callback, fuzzy=False):
self._inference_state = inference_state
self._module_context = module_context
self._module_node = module_context.tree_node
self._code_lines = code_lines
# The first step of completions is to get the name
self._like_name = helpers.get_on_completion_name(self._module_node, code_lines, position)
# The actual cursor position is not what we need to calculate
# everything. We want the start of the name we're on.
self._original_position = position
self._signatures_callback = signatures_callback
self._fuzzy = fuzzy
def complete(self):
leaf = self._module_node.get_leaf_for_position(
self._original_position,
include_prefixes=True
)
string, start_leaf, quote = _extract_string_while_in_string(leaf, self._original_position)
prefixed_completions = complete_dict(
self._module_context,
self._code_lines,
start_leaf or leaf,
self._original_position,
None if string is None else quote + string,
fuzzy=self._fuzzy,
)
if string is not None and not prefixed_completions:
prefixed_completions = list(complete_file_name(
self._inference_state, self._module_context, start_leaf, quote, string,
self._like_name, self._signatures_callback,
self._code_lines, self._original_position,
self._fuzzy
))
if string is not None:
if not prefixed_completions and '\n' in string:
# Complete only multi line strings
prefixed_completions = self._complete_in_string(start_leaf, string)
return prefixed_completions
cached_name, completion_names = self._complete_python(leaf)
completions = list(filter_names(self._inference_state, completion_names,
self.stack, self._like_name,
self._fuzzy, cached_name=cached_name))
return (
# Removing duplicates mostly to remove False/True/None duplicates.
_remove_duplicates(prefixed_completions, completions)
+ sorted(completions, key=lambda x: (x.name.startswith('__'),
x.name.startswith('_'),
x.name.lower()))
)
def _complete_python(self, leaf):
"""
Analyzes the current context of a completion and decides what to
return.
Technically this works by generating a parser stack and analysing the
current stack for possible grammar nodes.
Possible enhancements:
- global/nonlocal search global
- yield from / raise from <- could be only exceptions/generators
- In args: */**: no completion
- In params (also lambda): no completion before =
"""
grammar = self._inference_state.grammar
self.stack = stack = None
self._position = (
self._original_position[0],
self._original_position[1] - len(self._like_name)
)
cached_name = None
try:
self.stack = stack = helpers.get_stack_at_position(
grammar, self._code_lines, leaf, self._position
)
except helpers.OnErrorLeaf as e:
value = e.error_leaf.value
if value == '.':
# After ErrorLeaf's that are dots, we will not do any
# completions since this probably just confuses the user.
return cached_name, []
# If we don't have a value, just use global completion.
return cached_name, self._complete_global_scope()
allowed_transitions = \
list(stack._allowed_transition_names_and_token_types())
if 'if' in allowed_transitions:
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
previous_leaf = leaf.get_previous_leaf()
indent = self._position[1]
if not (leaf.start_pos <= self._position <= leaf.end_pos):
indent = leaf.start_pos[1]
if previous_leaf is not None:
stmt = previous_leaf
while True:
stmt = search_ancestor(
stmt, 'if_stmt', 'for_stmt', 'while_stmt', 'try_stmt',
'error_node',
)
if stmt is None:
break
type_ = stmt.type
if type_ == 'error_node':
first = stmt.children[0]
if isinstance(first, Leaf):
type_ = first.value + '_stmt'
# Compare indents
if stmt.start_pos[1] == indent:
if type_ == 'if_stmt':
allowed_transitions += ['elif', 'else']
elif type_ == 'try_stmt':
allowed_transitions += ['except', 'finally', 'else']
elif type_ == 'for_stmt':
allowed_transitions.append('else')
completion_names = []
kwargs_only = False
if any(t in allowed_transitions for t in (PythonTokenTypes.NAME,
PythonTokenTypes.INDENT)):
# This means that we actually have to do type inference.
nonterminals = [stack_node.nonterminal for stack_node in stack]
nodes = _gather_nodes(stack)
if nodes and nodes[-1] in ('as', 'def', 'class'):
# No completions for ``with x as foo`` and ``import x as foo``.
# Also true for defining names as a class or function.
return cached_name, list(self._complete_inherited(is_function=True))
elif "import_stmt" in nonterminals:
level, names = parse_dotted_names(nodes, "import_from" in nonterminals)
only_modules = not ("import_from" in nonterminals and 'import' in nodes)
completion_names += self._get_importer_names(
names,
level,
only_modules=only_modules,
)
elif nonterminals[-1] in ('trailer', 'dotted_name') and nodes[-1] == '.':
dot = self._module_node.get_leaf_for_position(self._position)
if dot.type == "endmarker":
# This is a bit of a weird edge case, maybe we can somehow
# generalize this.
dot = leaf.get_previous_leaf()
cached_name, n = self._complete_trailer(dot.get_previous_leaf())
completion_names += n
elif self._is_parameter_completion():
completion_names += self._complete_params(leaf)
else:
# Apparently this looks like it's good enough to filter most cases
# so that signature completions don't randomly appear.
# To understand why this works, three things are important:
# 1. trailer with a `,` in it is either a subscript or an arglist.
# 2. If there's no `,`, it's at the start and only signatures start
# with `(`. Other trailers could start with `.` or `[`.
# 3. Decorators are very primitive and have an optional `(` with
# optional arglist in them.
if nodes[-1] in ['(', ','] \
and nonterminals[-1] in ('trailer', 'arglist', 'decorator'):
signatures = self._signatures_callback(*self._position)
if signatures:
call_details = signatures[0]._call_details
used_kwargs = list(call_details.iter_used_keyword_arguments())
positional_count = call_details.count_positional_arguments()
completion_names += _get_signature_param_names(
signatures,
positional_count,
used_kwargs,
)
kwargs_only = _must_be_kwarg(signatures, positional_count, used_kwargs)
if not kwargs_only:
completion_names += self._complete_global_scope()
completion_names += self._complete_inherited(is_function=False)
if not kwargs_only:
current_line = self._code_lines[self._position[0] - 1][:self._position[1]]
completion_names += self._complete_keywords(
allowed_transitions,
only_values=not (not current_line or current_line[-1] in ' \t.;'
and current_line[-3:] != '...')
)
return cached_name, completion_names
def _is_parameter_completion(self):
tos = self.stack[-1]
if tos.nonterminal == 'lambdef' and len(tos.nodes) == 1:
# We are at the position `lambda `, where basically the next node
# is a param.
return True
if tos.nonterminal in 'parameters':
# Basically we are at the position `foo(`, there's nothing there
# yet, so we have no `typedargslist`.
return True
# var args is for lambdas and typed args for normal functions
return tos.nonterminal in ('typedargslist', 'varargslist') and tos.nodes[-1] == ','
def _complete_params(self, leaf):
stack_node = self.stack[-2]
if stack_node.nonterminal == 'parameters':
stack_node = self.stack[-3]
if stack_node.nonterminal == 'funcdef':
context = get_user_context(self._module_context, self._position)
node = search_ancestor(leaf, 'error_node', 'funcdef')
if node is not None:
if node.type == 'error_node':
n = node.children[0]
if n.type == 'decorators':
decorators = n.children
elif n.type == 'decorator':
decorators = [n]
else:
decorators = []
else:
decorators = node.get_decorators()
function_name = stack_node.nodes[1]
return complete_param_names(context, function_name.value, decorators)
return []
def _complete_keywords(self, allowed_transitions, only_values):
for k in allowed_transitions:
if isinstance(k, str) and k.isalpha():
if not only_values or k in ('True', 'False', 'None'):
yield keywords.KeywordName(self._inference_state, k)
def _complete_global_scope(self):
context = get_user_context(self._module_context, self._position)
debug.dbg('global completion scope: %s', context)
flow_scope_node = get_flow_scope_node(self._module_node, self._position)
filters = get_global_filters(
context,
self._position,
flow_scope_node
)
completion_names = []
for filter in filters:
completion_names += filter.values()
return completion_names
def _complete_trailer(self, previous_leaf):
inferred_context = self._module_context.create_context(previous_leaf)
values = infer_call_of_leaf(inferred_context, previous_leaf)
debug.dbg('trailer completion values: %s', values, color='MAGENTA')
# The cached name simply exists to make speed optimizations for certain
# modules.
cached_name = None
if len(values) == 1:
v, = values
if v.is_module():
if len(v.string_names) == 1:
module_name = v.string_names[0]
if module_name in ('numpy', 'tensorflow', 'matplotlib', 'pandas'):
cached_name = module_name
return cached_name, self._complete_trailer_for_values(values)
def _complete_trailer_for_values(self, values):
user_context = get_user_context(self._module_context, self._position)
return complete_trailer(user_context, values)
def _get_importer_names(self, names, level=0, only_modules=True):
names = [n.value for n in names]
i = imports.Importer(self._inference_state, names, self._module_context, level)
return i.completion_names(self._inference_state, only_modules=only_modules)
def _complete_inherited(self, is_function=True):
"""
Autocomplete inherited methods when overriding in child class.
"""
leaf = self._module_node.get_leaf_for_position(self._position, include_prefixes=True)
cls = tree.search_ancestor(leaf, 'classdef')
if cls is None:
return
# Complete the methods that are defined in the super classes.
class_value = self._module_context.create_value(cls)
if cls.start_pos[1] >= leaf.start_pos[1]:
return
filters = class_value.get_filters(is_instance=True)
# The first dict is the dictionary of class itself.
next(filters)
for filter in filters:
for name in filter.values():
# TODO we should probably check here for properties
if (name.api_type == 'function') == is_function:
yield name
def _complete_in_string(self, start_leaf, string):
"""
To make it possible for people to have completions in doctests or
generally in "Python" code in docstrings, we use the following
heuristic:
- Having an indented block of code
- Having some doctest code that starts with `>>>`
- Having backticks that doesn't have whitespace inside it
"""
def iter_relevant_lines(lines):
include_next_line = False
for l in code_lines:
if include_next_line or l.startswith('>>>') or l.startswith(' '):
yield re.sub(r'^( *>>> ?| +)', '', l)
else:
yield None
include_next_line = bool(re.match(' *>>>', l))
string = dedent(string)
code_lines = split_lines(string, keepends=True)
relevant_code_lines = list(iter_relevant_lines(code_lines))
if relevant_code_lines[-1] is not None:
# Some code lines might be None, therefore get rid of that.
relevant_code_lines = ['\n' if c is None else c for c in relevant_code_lines]
return self._complete_code_lines(relevant_code_lines)
match = re.search(r'`([^`\s]+)', code_lines[-1])
if match:
return self._complete_code_lines([match.group(1)])
return []
def _complete_code_lines(self, code_lines):
module_node = self._inference_state.grammar.parse(''.join(code_lines))
module_value = DocstringModule(
in_module_context=self._module_context,
inference_state=self._inference_state,
module_node=module_node,
code_lines=code_lines,
)
return Completion(
self._inference_state,
module_value.as_context(),
code_lines=code_lines,
position=module_node.end_pos,
signatures_callback=lambda *args, **kwargs: [],
fuzzy=self._fuzzy
).complete()
def _gather_nodes(stack):
nodes = []
for stack_node in stack:
if stack_node.dfa.from_rule == 'small_stmt':
nodes = []
else:
nodes += stack_node.nodes
return nodes
_string_start = re.compile(r'^\w*(\'{3}|"{3}|\'|")')
def _extract_string_while_in_string(leaf, position):
def return_part_of_leaf(leaf):
kwargs = {}
if leaf.line == position[0]:
kwargs['endpos'] = position[1] - leaf.column
match = _string_start.match(leaf.value, **kwargs)
if not match:
return None, None, None
start = match.group(0)
if leaf.line == position[0] and position[1] < leaf.column + match.end():
return None, None, None
return cut_value_at_position(leaf, position)[match.end():], leaf, start
if position < leaf.start_pos:
return None, None, None
if leaf.type == 'string':
return return_part_of_leaf(leaf)
leaves = []
while leaf is not None:
if leaf.type == 'error_leaf' and ('"' in leaf.value or "'" in leaf.value):
if len(leaf.value) > 1:
return return_part_of_leaf(leaf)
prefix_leaf = None
if not leaf.prefix:
prefix_leaf = leaf.get_previous_leaf()
if prefix_leaf is None or prefix_leaf.type != 'name' \
or not all(c in 'rubf' for c in prefix_leaf.value.lower()):
prefix_leaf = None
return (
''.join(cut_value_at_position(l, position) for l in leaves),
prefix_leaf or leaf,
('' if prefix_leaf is None else prefix_leaf.value)
+ cut_value_at_position(leaf, position),
)
if leaf.line != position[0]:
# Multi line strings are always simple error leaves and contain the
# whole string, single line error leaves are atherefore important
# now and since the line is different, it's not really a single
# line string anymore.
break
leaves.insert(0, leaf)
leaf = leaf.get_previous_leaf()
return None, None, None
def complete_trailer(user_context, values):
completion_names = []
for value in values:
for filter in value.get_filters(origin_scope=user_context.tree_node):
completion_names += filter.values()
if not value.is_stub() and isinstance(value, TreeInstance):
completion_names += _complete_getattr(user_context, value)
python_values = convert_values(values)
for c in python_values:
if c not in values:
for filter in c.get_filters(origin_scope=user_context.tree_node):
completion_names += filter.values()
return completion_names
def _complete_getattr(user_context, instance):
"""
A heuristic to make completion for proxy objects work. This is not
intended to work in all cases. It works exactly in this case:
def __getattr__(self, name):
...
return getattr(any_object, name)
It is important that the return contains getattr directly, otherwise it
won't work anymore. It's really just a stupid heuristic. It will not
work if you write e.g. `return (getatr(o, name))`, because of the
additional parentheses. It will also not work if you move the getattr
to some other place that is not the return statement itself.
It is intentional that it doesn't work in all cases. Generally it's
really hard to do even this case (as you can see below). Most people
will write it like this anyway and the other ones, well they are just
out of luck I guess :) ~dave.
"""
names = (instance.get_function_slot_names('__getattr__')
or instance.get_function_slot_names('__getattribute__'))
functions = ValueSet.from_sets(
name.infer()
for name in names
)
for func in functions:
tree_node = func.tree_node
if tree_node is None or tree_node.type != 'funcdef':
continue
for return_stmt in tree_node.iter_return_stmts():
# Basically until the next comment we just try to find out if a
# return statement looks exactly like `return getattr(x, name)`.
if return_stmt.type != 'return_stmt':
continue
atom_expr = return_stmt.children[1]
if atom_expr.type != 'atom_expr':
continue
atom = atom_expr.children[0]
trailer = atom_expr.children[1]
if len(atom_expr.children) != 2 or atom.type != 'name' \
or atom.value != 'getattr':
continue
arglist = trailer.children[1]
if arglist.type != 'arglist' or len(arglist.children) < 3:
continue
context = func.as_context()
object_node = arglist.children[0]
# Make sure it's a param: foo in __getattr__(self, foo)
name_node = arglist.children[2]
name_list = context.goto(name_node, name_node.start_pos)
if not any(n.api_type == 'param' for n in name_list):
continue
# Now that we know that these are most probably completion
# objects, we just infer the object and return them as
# completions.
objects = context.infer_node(object_node)
return complete_trailer(user_context, objects)
return []
def search_in_module(inference_state, module_context, names, wanted_names,
wanted_type, complete=False, fuzzy=False,
ignore_imports=False, convert=False):
for s in wanted_names[:-1]:
new_names = []
for n in names:
if s == n.string_name:
if n.tree_name is not None and n.api_type in ('module', 'namespace') \
and ignore_imports:
continue
new_names += complete_trailer(
module_context,
n.infer()
)
debug.dbg('dot lookup on search %s from %s', new_names, names[:10])
names = new_names
last_name = wanted_names[-1].lower()
for n in names:
string = n.string_name.lower()
if complete and helpers.match(string, last_name, fuzzy=fuzzy) \
or not complete and string == last_name:
if isinstance(n, SubModuleName):
names = [v.name for v in n.infer()]
else:
names = [n]
if convert:
names = convert_names(names)
for n2 in names:
if complete:
def_ = classes.Completion(
inference_state, n2,
stack=None,
like_name_length=len(last_name),
is_fuzzy=fuzzy,
)
else:
def_ = classes.Name(inference_state, n2)
if not wanted_type or wanted_type == def_.type:
yield def_
| 27,191 | Python | .py | 570 | 35.077193 | 98 | 0.578888 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,953 | helpers.py | DamnWidget_anaconda/anaconda_lib/jedi/api/helpers.py | """
Helpers for the API
"""
import re
from collections import namedtuple
from textwrap import dedent
from itertools import chain
from functools import wraps
from inspect import Parameter
from parso.python.parser import Parser
from parso.python import tree
from jedi.inference.base_value import NO_VALUES
from jedi.inference.syntax_tree import infer_atom
from jedi.inference.helpers import infer_call_of_leaf
from jedi.inference.compiled import get_string_value_set
from jedi.cache import signature_time_cache, memoize_method
from jedi.parser_utils import get_parent_scope
CompletionParts = namedtuple('CompletionParts', ['path', 'has_dot', 'name'])
def _start_match(string, like_name):
return string.startswith(like_name)
def _fuzzy_match(string, like_name):
if len(like_name) <= 1:
return like_name in string
pos = string.find(like_name[0])
if pos >= 0:
return _fuzzy_match(string[pos + 1:], like_name[1:])
return False
def match(string, like_name, fuzzy=False):
if fuzzy:
return _fuzzy_match(string, like_name)
else:
return _start_match(string, like_name)
def sorted_definitions(defs):
# Note: `or ''` below is required because `module_path` could be
return sorted(defs, key=lambda x: (str(x.module_path or ''),
x.line or 0,
x.column or 0,
x.name))
def get_on_completion_name(module_node, lines, position):
leaf = module_node.get_leaf_for_position(position)
if leaf is None or leaf.type in ('string', 'error_leaf'):
# Completions inside strings are a bit special, we need to parse the
# string. The same is true for comments and error_leafs.
line = lines[position[0] - 1]
# The first step of completions is to get the name
return re.search(r'(?!\d)\w+$|$', line[:position[1]]).group(0)
elif leaf.type not in ('name', 'keyword'):
return ''
return leaf.value[:position[1] - leaf.start_pos[1]]
def _get_code(code_lines, start_pos, end_pos):
# Get relevant lines.
lines = code_lines[start_pos[0] - 1:end_pos[0]]
# Remove the parts at the end of the line.
lines[-1] = lines[-1][:end_pos[1]]
# Remove first line indentation.
lines[0] = lines[0][start_pos[1]:]
return ''.join(lines)
class OnErrorLeaf(Exception):
@property
def error_leaf(self):
return self.args[0]
def _get_code_for_stack(code_lines, leaf, position):
# It might happen that we're on whitespace or on a comment. This means
# that we would not get the right leaf.
if leaf.start_pos >= position:
# If we're not on a comment simply get the previous leaf and proceed.
leaf = leaf.get_previous_leaf()
if leaf is None:
return '' # At the beginning of the file.
is_after_newline = leaf.type == 'newline'
while leaf.type == 'newline':
leaf = leaf.get_previous_leaf()
if leaf is None:
return ''
if leaf.type == 'error_leaf' or leaf.type == 'string':
if leaf.start_pos[0] < position[0]:
# On a different line, we just begin anew.
return ''
# Error leafs cannot be parsed, completion in strings is also
# impossible.
raise OnErrorLeaf(leaf)
else:
user_stmt = leaf
while True:
if user_stmt.parent.type in ('file_input', 'suite', 'simple_stmt'):
break
user_stmt = user_stmt.parent
if is_after_newline:
if user_stmt.start_pos[1] > position[1]:
# This means that it's actually a dedent and that means that we
# start without value (part of a suite).
return ''
# This is basically getting the relevant lines.
return _get_code(code_lines, user_stmt.get_start_pos_of_prefix(), position)
def get_stack_at_position(grammar, code_lines, leaf, pos):
"""
Returns the possible node names (e.g. import_from, xor_test or yield_stmt).
"""
class EndMarkerReached(Exception):
pass
def tokenize_without_endmarker(code):
# TODO This is for now not an official parso API that exists purely
# for Jedi.
tokens = grammar._tokenize(code)
for token in tokens:
if token.string == safeword:
raise EndMarkerReached()
elif token.prefix.endswith(safeword):
# This happens with comments.
raise EndMarkerReached()
elif token.string.endswith(safeword):
yield token # Probably an f-string literal that was not finished.
raise EndMarkerReached()
else:
yield token
# The code might be indedented, just remove it.
code = dedent(_get_code_for_stack(code_lines, leaf, pos))
# We use a word to tell Jedi when we have reached the start of the
# completion.
# Use Z as a prefix because it's not part of a number suffix.
safeword = 'ZZZ_USER_WANTS_TO_COMPLETE_HERE_WITH_JEDI'
code = code + ' ' + safeword
p = Parser(grammar._pgen_grammar, error_recovery=True)
try:
p.parse(tokens=tokenize_without_endmarker(code))
except EndMarkerReached:
return p.stack
raise SystemError(
"This really shouldn't happen. There's a bug in Jedi:\n%s"
% list(tokenize_without_endmarker(code))
)
def infer(inference_state, context, leaf):
if leaf.type == 'name':
return inference_state.infer(context, leaf)
parent = leaf.parent
definitions = NO_VALUES
if parent.type == 'atom':
# e.g. `(a + b)`
definitions = context.infer_node(leaf.parent)
elif parent.type == 'trailer':
# e.g. `a()`
definitions = infer_call_of_leaf(context, leaf)
elif isinstance(leaf, tree.Literal):
# e.g. `"foo"` or `1.0`
return infer_atom(context, leaf)
elif leaf.type in ('fstring_string', 'fstring_start', 'fstring_end'):
return get_string_value_set(inference_state)
return definitions
def filter_follow_imports(names, follow_builtin_imports=False):
for name in names:
if name.is_import():
new_names = list(filter_follow_imports(
name.goto(),
follow_builtin_imports=follow_builtin_imports,
))
found_builtin = False
if follow_builtin_imports:
for new_name in new_names:
if new_name.start_pos is None:
found_builtin = True
if found_builtin:
yield name
else:
yield from new_names
else:
yield name
class CallDetails:
def __init__(self, bracket_leaf, children, position):
self.bracket_leaf = bracket_leaf
self._children = children
self._position = position
@property
def index(self):
return _get_index_and_key(self._children, self._position)[0]
@property
def keyword_name_str(self):
return _get_index_and_key(self._children, self._position)[1]
@memoize_method
def _list_arguments(self):
return list(_iter_arguments(self._children, self._position))
def calculate_index(self, param_names):
positional_count = 0
used_names = set()
star_count = -1
args = self._list_arguments()
if not args:
if param_names:
return 0
else:
return None
is_kwarg = False
for i, (star_count, key_start, had_equal) in enumerate(args):
is_kwarg |= had_equal | (star_count == 2)
if star_count:
pass # For now do nothing, we don't know what's in there here.
else:
if i + 1 != len(args): # Not last
if had_equal:
used_names.add(key_start)
else:
positional_count += 1
for i, param_name in enumerate(param_names):
kind = param_name.get_kind()
if not is_kwarg:
if kind == Parameter.VAR_POSITIONAL:
return i
if kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.POSITIONAL_ONLY):
if i == positional_count:
return i
if key_start is not None and not star_count == 1 or star_count == 2:
if param_name.string_name not in used_names \
and (kind == Parameter.KEYWORD_ONLY
or kind == Parameter.POSITIONAL_OR_KEYWORD
and positional_count <= i):
if star_count:
return i
if had_equal:
if param_name.string_name == key_start:
return i
else:
if param_name.string_name.startswith(key_start):
return i
if kind == Parameter.VAR_KEYWORD:
return i
return None
def iter_used_keyword_arguments(self):
for star_count, key_start, had_equal in list(self._list_arguments()):
if had_equal and key_start:
yield key_start
def count_positional_arguments(self):
count = 0
for star_count, key_start, had_equal in self._list_arguments()[:-1]:
if star_count:
break
count += 1
return count
def _iter_arguments(nodes, position):
def remove_after_pos(name):
if name.type != 'name':
return None
return name.value[:position[1] - name.start_pos[1]]
# Returns Generator[Tuple[star_count, Optional[key_start: str], had_equal]]
nodes_before = [c for c in nodes if c.start_pos < position]
if nodes_before[-1].type == 'arglist':
yield from _iter_arguments(nodes_before[-1].children, position)
return
previous_node_yielded = False
stars_seen = 0
for i, node in enumerate(nodes_before):
if node.type == 'argument':
previous_node_yielded = True
first = node.children[0]
second = node.children[1]
if second == '=':
if second.start_pos < position:
yield 0, first.value, True
else:
yield 0, remove_after_pos(first), False
elif first in ('*', '**'):
yield len(first.value), remove_after_pos(second), False
else:
# Must be a Comprehension
first_leaf = node.get_first_leaf()
if first_leaf.type == 'name' and first_leaf.start_pos >= position:
yield 0, remove_after_pos(first_leaf), False
else:
yield 0, None, False
stars_seen = 0
elif node.type == 'testlist_star_expr':
for n in node.children[::2]:
if n.type == 'star_expr':
stars_seen = 1
n = n.children[1]
yield stars_seen, remove_after_pos(n), False
stars_seen = 0
# The count of children is even if there's a comma at the end.
previous_node_yielded = bool(len(node.children) % 2)
elif isinstance(node, tree.PythonLeaf) and node.value == ',':
if not previous_node_yielded:
yield stars_seen, '', False
stars_seen = 0
previous_node_yielded = False
elif isinstance(node, tree.PythonLeaf) and node.value in ('*', '**'):
stars_seen = len(node.value)
elif node == '=' and nodes_before[-1]:
previous_node_yielded = True
before = nodes_before[i - 1]
if before.type == 'name':
yield 0, before.value, True
else:
yield 0, None, False
# Just ignore the star that is probably a syntax error.
stars_seen = 0
if not previous_node_yielded:
if nodes_before[-1].type == 'name':
yield stars_seen, remove_after_pos(nodes_before[-1]), False
else:
yield stars_seen, '', False
def _get_index_and_key(nodes, position):
"""
Returns the amount of commas and the keyword argument string.
"""
nodes_before = [c for c in nodes if c.start_pos < position]
if nodes_before[-1].type == 'arglist':
return _get_index_and_key(nodes_before[-1].children, position)
key_str = None
last = nodes_before[-1]
if last.type == 'argument' and last.children[1] == '=' \
and last.children[1].end_pos <= position:
# Checked if the argument
key_str = last.children[0].value
elif last == '=':
key_str = nodes_before[-2].value
return nodes_before.count(','), key_str
def _get_signature_details_from_error_node(node, additional_children, position):
for index, element in reversed(list(enumerate(node.children))):
# `index > 0` means that it's a trailer and not an atom.
if element == '(' and element.end_pos <= position and index > 0:
# It's an error node, we don't want to match too much, just
# until the parentheses is enough.
children = node.children[index:]
name = element.get_previous_leaf()
if name is None:
continue
if name.type == 'name' or name.parent.type in ('trailer', 'atom'):
return CallDetails(element, children + additional_children, position)
def get_signature_details(module, position):
leaf = module.get_leaf_for_position(position, include_prefixes=True)
# It's easier to deal with the previous token than the next one in this
# case.
if leaf.start_pos >= position:
# Whitespace / comments after the leaf count towards the previous leaf.
leaf = leaf.get_previous_leaf()
if leaf is None:
return None
# Now that we know where we are in the syntax tree, we start to look at
# parents for possible function definitions.
node = leaf.parent
while node is not None:
if node.type in ('funcdef', 'classdef', 'decorated', 'async_stmt'):
# Don't show signatures if there's stuff before it that just
# makes it feel strange to have a signature.
return None
additional_children = []
for n in reversed(node.children):
if n.start_pos < position:
if n.type == 'error_node':
result = _get_signature_details_from_error_node(
n, additional_children, position
)
if result is not None:
return result
additional_children[0:0] = n.children
continue
additional_children.insert(0, n)
# Find a valid trailer
if node.type == 'trailer' and node.children[0] == '(' \
or node.type == 'decorator' and node.children[2] == '(':
# Additionally we have to check that an ending parenthesis isn't
# interpreted wrong. There are two cases:
# 1. Cursor before paren -> The current signature is good
# 2. Cursor after paren -> We need to skip the current signature
if not (leaf is node.children[-1] and position >= leaf.end_pos):
leaf = node.get_previous_leaf()
if leaf is None:
return None
return CallDetails(
node.children[0] if node.type == 'trailer' else node.children[2],
node.children,
position
)
node = node.parent
return None
@signature_time_cache("call_signatures_validity")
def cache_signatures(inference_state, context, bracket_leaf, code_lines, user_pos):
"""This function calculates the cache key."""
line_index = user_pos[0] - 1
before_cursor = code_lines[line_index][:user_pos[1]]
other_lines = code_lines[bracket_leaf.start_pos[0]:line_index]
whole = ''.join(other_lines + [before_cursor])
before_bracket = re.match(r'.*\(', whole, re.DOTALL)
module_path = context.get_root_context().py__file__()
if module_path is None:
yield None # Don't cache!
else:
yield (module_path, before_bracket, bracket_leaf.start_pos)
yield infer(
inference_state,
context,
bracket_leaf.get_previous_leaf(),
)
def validate_line_column(func):
@wraps(func)
def wrapper(self, line=None, column=None, *args, **kwargs):
line = max(len(self._code_lines), 1) if line is None else line
if not (0 < line <= len(self._code_lines)):
raise ValueError('`line` parameter is not in a valid range.')
line_string = self._code_lines[line - 1]
line_len = len(line_string)
if line_string.endswith('\r\n'):
line_len -= 2
elif line_string.endswith('\n'):
line_len -= 1
column = line_len if column is None else column
if not (0 <= column <= line_len):
raise ValueError('`column` parameter (%d) is not in a valid range '
'(0-%d) for line %d (%r).' % (
column, line_len, line, line_string))
return func(self, line, column, *args, **kwargs)
return wrapper
def get_module_names(module, all_scopes, definitions=True, references=False):
"""
Returns a dictionary with name parts as keys and their call paths as
values.
"""
def def_ref_filter(name):
is_def = name.is_definition()
return definitions and is_def or references and not is_def
names = list(chain.from_iterable(module.get_used_names().values()))
if not all_scopes:
# We have to filter all the names that don't have the module as a
# parent_scope. There's None as a parent, because nodes in the module
# node have the parent module and not suite as all the others.
# Therefore it's important to catch that case.
def is_module_scope_name(name):
parent_scope = get_parent_scope(name)
# async functions have an extra wrapper. Strip it.
if parent_scope and parent_scope.type == 'async_stmt':
parent_scope = parent_scope.parent
return parent_scope in (module, None)
names = [n for n in names if is_module_scope_name(n)]
return filter(def_ref_filter, names)
def split_search_string(name):
type, _, dotted_names = name.rpartition(' ')
if type == 'def':
type = 'function'
return type, dotted_names.split('.')
| 18,944 | Python | .py | 437 | 32.826087 | 88 | 0.586364 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,954 | replstartup.py | DamnWidget_anaconda/anaconda_lib/jedi/api/replstartup.py | """
To use Jedi completion in Python interpreter, add the following in your shell
setup (e.g., ``.bashrc``). This works only on Linux/Mac, because readline is
not available on Windows. If you still want Jedi autocompletion in your REPL,
just use IPython instead::
export PYTHONSTARTUP="$(python -m jedi repl)"
Then you will be able to use Jedi completer in your Python interpreter::
$ python
Python 3.9.2+ (default, Jul 20 2020, 22:15:08)
[GCC 4.6.1] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import os
>>> os.path.join('a', 'b').split().in<TAB> # doctest: +SKIP
..dex ..sert
"""
import jedi.utils
from jedi import __version__ as __jedi_version__
print('REPL completion using Jedi %s' % __jedi_version__)
jedi.utils.setup_readline(fuzzy=False)
del jedi
# Note: try not to do many things here, as it will contaminate global
# namespace of the interpreter.
| 950 | Python | .py | 22 | 40.409091 | 77 | 0.704669 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,955 | __init__.py | DamnWidget_anaconda/anaconda_lib/jedi/api/refactoring/__init__.py | import difflib
from pathlib import Path
from typing import Dict, Iterable, Tuple
from parso import split_lines
from jedi.api.exceptions import RefactoringError
EXPRESSION_PARTS = (
'or_test and_test not_test comparison '
'expr xor_expr and_expr shift_expr arith_expr term factor power atom_expr'
).split()
class ChangedFile:
def __init__(self, inference_state, from_path, to_path,
module_node, node_to_str_map):
self._inference_state = inference_state
self._from_path = from_path
self._to_path = to_path
self._module_node = module_node
self._node_to_str_map = node_to_str_map
def get_diff(self):
old_lines = split_lines(self._module_node.get_code(), keepends=True)
new_lines = split_lines(self.get_new_code(), keepends=True)
# Add a newline at the end if it's missing. Otherwise the diff will be
# very weird. A `diff -u file1 file2` would show the string:
#
# \ No newline at end of file
#
# This is not necessary IMO, because Jedi does not really play with
# newlines and the ending newline does not really matter in Python
# files. ~dave
if old_lines[-1] != '':
old_lines[-1] += '\n'
if new_lines[-1] != '':
new_lines[-1] += '\n'
project_path = self._inference_state.project.path
if self._from_path is None:
from_p = ''
else:
from_p = self._from_path.relative_to(project_path)
if self._to_path is None:
to_p = ''
else:
to_p = self._to_path.relative_to(project_path)
diff = difflib.unified_diff(
old_lines, new_lines,
fromfile=str(from_p),
tofile=str(to_p),
)
# Apparently there's a space at the end of the diff - for whatever
# reason.
return ''.join(diff).rstrip(' ')
def get_new_code(self):
return self._inference_state.grammar.refactor(self._module_node, self._node_to_str_map)
def apply(self):
if self._from_path is None:
raise RefactoringError(
'Cannot apply a refactoring on a Script with path=None'
)
with open(self._from_path, 'w', newline='') as f:
f.write(self.get_new_code())
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._from_path)
class Refactoring:
def __init__(self, inference_state, file_to_node_changes, renames=()):
self._inference_state = inference_state
self._renames = renames
self._file_to_node_changes = file_to_node_changes
def get_changed_files(self) -> Dict[Path, ChangedFile]:
def calculate_to_path(p):
if p is None:
return p
p = str(p)
for from_, to in renames:
if p.startswith(str(from_)):
p = str(to) + p[len(str(from_)):]
return Path(p)
renames = self.get_renames()
return {
path: ChangedFile(
self._inference_state,
from_path=path,
to_path=calculate_to_path(path),
module_node=next(iter(map_)).get_root_node(),
node_to_str_map=map_
) for path, map_ in sorted(self._file_to_node_changes.items())
}
def get_renames(self) -> Iterable[Tuple[Path, Path]]:
"""
Files can be renamed in a refactoring.
"""
return sorted(self._renames)
def get_diff(self):
text = ''
project_path = self._inference_state.project.path
for from_, to in self.get_renames():
text += 'rename from %s\nrename to %s\n' \
% (from_.relative_to(project_path), to.relative_to(project_path))
return text + ''.join(f.get_diff() for f in self.get_changed_files().values())
def apply(self):
"""
Applies the whole refactoring to the files, which includes renames.
"""
for f in self.get_changed_files().values():
f.apply()
for old, new in self.get_renames():
old.rename(new)
def _calculate_rename(path, new_name):
dir_ = path.parent
if path.name in ('__init__.py', '__init__.pyi'):
return dir_, dir_.parent.joinpath(new_name)
return path, dir_.joinpath(new_name + path.suffix)
def rename(inference_state, definitions, new_name):
file_renames = set()
file_tree_name_map = {}
if not definitions:
raise RefactoringError("There is no name under the cursor")
for d in definitions:
tree_name = d._name.tree_name
if d.type == 'module' and tree_name is None:
p = None if d.module_path is None else Path(d.module_path)
file_renames.add(_calculate_rename(p, new_name))
else:
# This private access is ok in a way. It's not public to
# protect Jedi users from seeing it.
if tree_name is not None:
fmap = file_tree_name_map.setdefault(d.module_path, {})
fmap[tree_name] = tree_name.prefix + new_name
return Refactoring(inference_state, file_tree_name_map, file_renames)
def inline(inference_state, names):
if not names:
raise RefactoringError("There is no name under the cursor")
if any(n.api_type in ('module', 'namespace') for n in names):
raise RefactoringError("Cannot inline imports, modules or namespaces")
if any(n.tree_name is None for n in names):
raise RefactoringError("Cannot inline builtins/extensions")
definitions = [n for n in names if n.tree_name.is_definition()]
if len(definitions) == 0:
raise RefactoringError("No definition found to inline")
if len(definitions) > 1:
raise RefactoringError("Cannot inline a name with multiple definitions")
if len(names) == 1:
raise RefactoringError("There are no references to this name")
tree_name = definitions[0].tree_name
expr_stmt = tree_name.get_definition()
if expr_stmt.type != 'expr_stmt':
type_ = dict(
funcdef='function',
classdef='class',
).get(expr_stmt.type, expr_stmt.type)
raise RefactoringError("Cannot inline a %s" % type_)
if len(expr_stmt.get_defined_names(include_setitem=True)) > 1:
raise RefactoringError("Cannot inline a statement with multiple definitions")
first_child = expr_stmt.children[1]
if first_child.type == 'annassign' and len(first_child.children) == 4:
first_child = first_child.children[2]
if first_child != '=':
if first_child.type == 'annassign':
raise RefactoringError(
'Cannot inline a statement that is defined by an annotation'
)
else:
raise RefactoringError(
'Cannot inline a statement with "%s"'
% first_child.get_code(include_prefix=False)
)
rhs = expr_stmt.get_rhs()
replace_code = rhs.get_code(include_prefix=False)
references = [n for n in names if not n.tree_name.is_definition()]
file_to_node_changes = {}
for name in references:
tree_name = name.tree_name
path = name.get_root_context().py__file__()
s = replace_code
if rhs.type == 'testlist_star_expr' \
or tree_name.parent.type in EXPRESSION_PARTS \
or tree_name.parent.type == 'trailer' \
and tree_name.parent.get_next_sibling() is not None:
s = '(' + replace_code + ')'
of_path = file_to_node_changes.setdefault(path, {})
n = tree_name
prefix = n.prefix
par = n.parent
if par.type == 'trailer' and par.children[0] == '.':
prefix = par.parent.children[0].prefix
n = par
for some_node in par.parent.children[:par.parent.children.index(par)]:
of_path[some_node] = ''
of_path[n] = prefix + s
path = definitions[0].get_root_context().py__file__()
changes = file_to_node_changes.setdefault(path, {})
changes[expr_stmt] = _remove_indent_of_prefix(expr_stmt.get_first_leaf().prefix)
next_leaf = expr_stmt.get_next_leaf()
# Most of the time we have to remove the newline at the end of the
# statement, but if there's a comment we might not need to.
if next_leaf.prefix.strip(' \t') == '' \
and (next_leaf.type == 'newline' or next_leaf == ';'):
changes[next_leaf] = ''
return Refactoring(inference_state, file_to_node_changes)
def _remove_indent_of_prefix(prefix):
r"""
Removes the last indentation of a prefix, e.g. " \n \n " becomes " \n \n".
"""
return ''.join(split_lines(prefix, keepends=True)[:-1])
| 8,820 | Python | .py | 201 | 34.651741 | 95 | 0.59944 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,956 | extract.py | DamnWidget_anaconda/anaconda_lib/jedi/api/refactoring/extract.py | from textwrap import dedent
from parso import split_lines
from jedi import debug
from jedi.api.exceptions import RefactoringError
from jedi.api.refactoring import Refactoring, EXPRESSION_PARTS
from jedi.common import indent_block
from jedi.parser_utils import function_is_classmethod, function_is_staticmethod
_DEFINITION_SCOPES = ('suite', 'file_input')
_VARIABLE_EXCTRACTABLE = EXPRESSION_PARTS + \
('atom testlist_star_expr testlist test lambdef lambdef_nocond '
'keyword name number string fstring').split()
def extract_variable(inference_state, path, module_node, name, pos, until_pos):
nodes = _find_nodes(module_node, pos, until_pos)
debug.dbg('Extracting nodes: %s', nodes)
is_expression, message = _is_expression_with_error(nodes)
if not is_expression:
raise RefactoringError(message)
generated_code = name + ' = ' + _expression_nodes_to_string(nodes)
file_to_node_changes = {path: _replace(nodes, name, generated_code, pos)}
return Refactoring(inference_state, file_to_node_changes)
def _is_expression_with_error(nodes):
"""
Returns a tuple (is_expression, error_string).
"""
if any(node.type == 'name' and node.is_definition() for node in nodes):
return False, 'Cannot extract a name that defines something'
if nodes[0].type not in _VARIABLE_EXCTRACTABLE:
return False, 'Cannot extract a "%s"' % nodes[0].type
return True, ''
def _find_nodes(module_node, pos, until_pos):
"""
Looks up a module and tries to find the appropriate amount of nodes that
are in there.
"""
start_node = module_node.get_leaf_for_position(pos, include_prefixes=True)
if until_pos is None:
if start_node.type == 'operator':
next_leaf = start_node.get_next_leaf()
if next_leaf is not None and next_leaf.start_pos == pos:
start_node = next_leaf
if _is_not_extractable_syntax(start_node):
start_node = start_node.parent
if start_node.parent.type == 'trailer':
start_node = start_node.parent.parent
while start_node.parent.type in EXPRESSION_PARTS:
start_node = start_node.parent
nodes = [start_node]
else:
# Get the next leaf if we are at the end of a leaf
if start_node.end_pos == pos:
next_leaf = start_node.get_next_leaf()
if next_leaf is not None:
start_node = next_leaf
# Some syntax is not exactable, just use its parent
if _is_not_extractable_syntax(start_node):
start_node = start_node.parent
# Find the end
end_leaf = module_node.get_leaf_for_position(until_pos, include_prefixes=True)
if end_leaf.start_pos > until_pos:
end_leaf = end_leaf.get_previous_leaf()
if end_leaf is None:
raise RefactoringError('Cannot extract anything from that')
parent_node = start_node
while parent_node.end_pos < end_leaf.end_pos:
parent_node = parent_node.parent
nodes = _remove_unwanted_expression_nodes(parent_node, pos, until_pos)
# If the user marks just a return statement, we return the expression
# instead of the whole statement, because the user obviously wants to
# extract that part.
if len(nodes) == 1 and start_node.type in ('return_stmt', 'yield_expr'):
return [nodes[0].children[1]]
return nodes
def _replace(nodes, expression_replacement, extracted, pos,
insert_before_leaf=None, remaining_prefix=None):
# Now try to replace the nodes found with a variable and move the code
# before the current statement.
definition = _get_parent_definition(nodes[0])
if insert_before_leaf is None:
insert_before_leaf = definition.get_first_leaf()
first_node_leaf = nodes[0].get_first_leaf()
lines = split_lines(insert_before_leaf.prefix, keepends=True)
if first_node_leaf is insert_before_leaf:
if remaining_prefix is not None:
# The remaining prefix has already been calculated.
lines[:-1] = remaining_prefix
lines[-1:-1] = [indent_block(extracted, lines[-1]) + '\n']
extracted_prefix = ''.join(lines)
replacement_dct = {}
if first_node_leaf is insert_before_leaf:
replacement_dct[nodes[0]] = extracted_prefix + expression_replacement
else:
if remaining_prefix is None:
p = first_node_leaf.prefix
else:
p = remaining_prefix + _get_indentation(nodes[0])
replacement_dct[nodes[0]] = p + expression_replacement
replacement_dct[insert_before_leaf] = extracted_prefix + insert_before_leaf.value
for node in nodes[1:]:
replacement_dct[node] = ''
return replacement_dct
def _expression_nodes_to_string(nodes):
return ''.join(n.get_code(include_prefix=i != 0) for i, n in enumerate(nodes))
def _suite_nodes_to_string(nodes, pos):
n = nodes[0]
prefix, part_of_code = _split_prefix_at(n.get_first_leaf(), pos[0] - 1)
code = part_of_code + n.get_code(include_prefix=False) \
+ ''.join(n.get_code() for n in nodes[1:])
return prefix, code
def _split_prefix_at(leaf, until_line):
"""
Returns a tuple of the leaf's prefix, split at the until_line
position.
"""
# second means the second returned part
second_line_count = leaf.start_pos[0] - until_line
lines = split_lines(leaf.prefix, keepends=True)
return ''.join(lines[:-second_line_count]), ''.join(lines[-second_line_count:])
def _get_indentation(node):
return split_lines(node.get_first_leaf().prefix)[-1]
def _get_parent_definition(node):
"""
Returns the statement where a node is defined.
"""
while node is not None:
if node.parent.type in _DEFINITION_SCOPES:
return node
node = node.parent
raise NotImplementedError('We should never even get here')
def _remove_unwanted_expression_nodes(parent_node, pos, until_pos):
"""
This function makes it so for `1 * 2 + 3` you can extract `2 + 3`, even
though it is not part of the expression.
"""
typ = parent_node.type
is_suite_part = typ in ('suite', 'file_input')
if typ in EXPRESSION_PARTS or is_suite_part:
nodes = parent_node.children
for i, n in enumerate(nodes):
if n.end_pos > pos:
start_index = i
if n.type == 'operator':
start_index -= 1
break
for i, n in reversed(list(enumerate(nodes))):
if n.start_pos < until_pos:
end_index = i
if n.type == 'operator':
end_index += 1
# Something like `not foo or bar` should not be cut after not
for n2 in nodes[i:]:
if _is_not_extractable_syntax(n2):
end_index += 1
else:
break
break
nodes = nodes[start_index:end_index + 1]
if not is_suite_part:
nodes[0:1] = _remove_unwanted_expression_nodes(nodes[0], pos, until_pos)
nodes[-1:] = _remove_unwanted_expression_nodes(nodes[-1], pos, until_pos)
return nodes
return [parent_node]
def _is_not_extractable_syntax(node):
return node.type == 'operator' \
or node.type == 'keyword' and node.value not in ('None', 'True', 'False')
def extract_function(inference_state, path, module_context, name, pos, until_pos):
nodes = _find_nodes(module_context.tree_node, pos, until_pos)
assert len(nodes)
is_expression, _ = _is_expression_with_error(nodes)
context = module_context.create_context(nodes[0])
is_bound_method = context.is_bound_method()
params, return_variables = list(_find_inputs_and_outputs(module_context, context, nodes))
# Find variables
# Is a class method / method
if context.is_module():
insert_before_leaf = None # Leaf will be determined later
else:
node = _get_code_insertion_node(context.tree_node, is_bound_method)
insert_before_leaf = node.get_first_leaf()
if is_expression:
code_block = 'return ' + _expression_nodes_to_string(nodes) + '\n'
remaining_prefix = None
has_ending_return_stmt = False
else:
has_ending_return_stmt = _is_node_ending_return_stmt(nodes[-1])
if not has_ending_return_stmt:
# Find the actually used variables (of the defined ones). If none are
# used (e.g. if the range covers the whole function), return the last
# defined variable.
return_variables = list(_find_needed_output_variables(
context,
nodes[0].parent,
nodes[-1].end_pos,
return_variables
)) or [return_variables[-1]] if return_variables else []
remaining_prefix, code_block = _suite_nodes_to_string(nodes, pos)
after_leaf = nodes[-1].get_next_leaf()
first, second = _split_prefix_at(after_leaf, until_pos[0])
code_block += first
code_block = dedent(code_block)
if not has_ending_return_stmt:
output_var_str = ', '.join(return_variables)
code_block += 'return ' + output_var_str + '\n'
# Check if we have to raise RefactoringError
_check_for_non_extractables(nodes[:-1] if has_ending_return_stmt else nodes)
decorator = ''
self_param = None
if is_bound_method:
if not function_is_staticmethod(context.tree_node):
function_param_names = context.get_value().get_param_names()
if len(function_param_names):
self_param = function_param_names[0].string_name
params = [p for p in params if p != self_param]
if function_is_classmethod(context.tree_node):
decorator = '@classmethod\n'
else:
code_block += '\n'
function_code = '%sdef %s(%s):\n%s' % (
decorator,
name,
', '.join(params if self_param is None else [self_param] + params),
indent_block(code_block)
)
function_call = '%s(%s)' % (
('' if self_param is None else self_param + '.') + name,
', '.join(params)
)
if is_expression:
replacement = function_call
else:
if has_ending_return_stmt:
replacement = 'return ' + function_call + '\n'
else:
replacement = output_var_str + ' = ' + function_call + '\n'
replacement_dct = _replace(nodes, replacement, function_code, pos,
insert_before_leaf, remaining_prefix)
if not is_expression:
replacement_dct[after_leaf] = second + after_leaf.value
file_to_node_changes = {path: replacement_dct}
return Refactoring(inference_state, file_to_node_changes)
def _check_for_non_extractables(nodes):
for n in nodes:
try:
children = n.children
except AttributeError:
if n.value == 'return':
raise RefactoringError(
'Can only extract return statements if they are at the end.')
if n.value == 'yield':
raise RefactoringError('Cannot extract yield statements.')
else:
_check_for_non_extractables(children)
def _is_name_input(module_context, names, first, last):
for name in names:
if name.api_type == 'param' or not name.parent_context.is_module():
if name.get_root_context() is not module_context:
return True
if name.start_pos is None or not (first <= name.start_pos < last):
return True
return False
def _find_inputs_and_outputs(module_context, context, nodes):
first = nodes[0].start_pos
last = nodes[-1].end_pos
inputs = []
outputs = []
for name in _find_non_global_names(nodes):
if name.is_definition():
if name not in outputs:
outputs.append(name.value)
else:
if name.value not in inputs:
name_definitions = context.goto(name, name.start_pos)
if not name_definitions \
or _is_name_input(module_context, name_definitions, first, last):
inputs.append(name.value)
# Check if outputs are really needed:
return inputs, outputs
def _find_non_global_names(nodes):
for node in nodes:
try:
children = node.children
except AttributeError:
if node.type == 'name':
yield node
else:
# We only want to check foo in foo.bar
if node.type == 'trailer' and node.children[0] == '.':
continue
yield from _find_non_global_names(children)
def _get_code_insertion_node(node, is_bound_method):
if not is_bound_method or function_is_staticmethod(node):
while node.parent.type != 'file_input':
node = node.parent
while node.parent.type in ('async_funcdef', 'decorated', 'async_stmt'):
node = node.parent
return node
def _find_needed_output_variables(context, search_node, at_least_pos, return_variables):
"""
Searches everything after at_least_pos in a node and checks if any of the
return_variables are used in there and returns those.
"""
for node in search_node.children:
if node.start_pos < at_least_pos:
continue
return_variables = set(return_variables)
for name in _find_non_global_names([node]):
if not name.is_definition() and name.value in return_variables:
return_variables.remove(name.value)
yield name.value
def _is_node_ending_return_stmt(node):
t = node.type
if t == 'simple_stmt':
return _is_node_ending_return_stmt(node.children[0])
return t == 'return_stmt'
| 13,933 | Python | .py | 313 | 35.71246 | 93 | 0.626633 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,957 | pytest.py | DamnWidget_anaconda/anaconda_lib/jedi/plugins/pytest.py | from pathlib import Path
from parso.tree import search_ancestor
from jedi.inference.cache import inference_state_method_cache
from jedi.inference.imports import load_module_from_path
from jedi.inference.filters import ParserTreeFilter
from jedi.inference.base_value import NO_VALUES, ValueSet
from jedi.inference.helpers import infer_call_of_leaf
_PYTEST_FIXTURE_MODULES = [
('_pytest', 'monkeypatch'),
('_pytest', 'capture'),
('_pytest', 'logging'),
('_pytest', 'tmpdir'),
('_pytest', 'pytester'),
]
def execute(callback):
def wrapper(value, arguments):
# This might not be necessary anymore in pytest 4/5, definitely needed
# for pytest 3.
if value.py__name__() == 'fixture' \
and value.parent_context.py__name__() == '_pytest.fixtures':
return NO_VALUES
return callback(value, arguments)
return wrapper
def infer_anonymous_param(func):
def get_returns(value):
if value.tree_node.annotation is not None:
result = value.execute_with_values()
if any(v.name.get_qualified_names(include_module_names=True)
== ('typing', 'Generator')
for v in result):
return ValueSet.from_sets(
v.py__getattribute__('__next__').execute_annotation()
for v in result
)
return result
# In pytest we need to differentiate between generators and normal
# returns.
# Parameters still need to be anonymous, .as_context() ensures that.
function_context = value.as_context()
if function_context.is_generator():
return function_context.merge_yield_values()
else:
return function_context.get_return_values()
def wrapper(param_name):
# parameters with an annotation do not need special handling
if param_name.annotation_node:
return func(param_name)
is_pytest_param, param_name_is_function_name = \
_is_a_pytest_param_and_inherited(param_name)
if is_pytest_param:
module = param_name.get_root_context()
fixtures = _goto_pytest_fixture(
module,
param_name.string_name,
# This skips the current module, because we are basically
# inheriting a fixture from somewhere else.
skip_own_module=param_name_is_function_name,
)
if fixtures:
return ValueSet.from_sets(
get_returns(value)
for fixture in fixtures
for value in fixture.infer()
)
return func(param_name)
return wrapper
def goto_anonymous_param(func):
def wrapper(param_name):
is_pytest_param, param_name_is_function_name = \
_is_a_pytest_param_and_inherited(param_name)
if is_pytest_param:
names = _goto_pytest_fixture(
param_name.get_root_context(),
param_name.string_name,
skip_own_module=param_name_is_function_name,
)
if names:
return names
return func(param_name)
return wrapper
def complete_param_names(func):
def wrapper(context, func_name, decorator_nodes):
module_context = context.get_root_context()
if _is_pytest_func(func_name, decorator_nodes):
names = []
for module_context in _iter_pytest_modules(module_context):
names += FixtureFilter(module_context).values()
if names:
return names
return func(context, func_name, decorator_nodes)
return wrapper
def _goto_pytest_fixture(module_context, name, skip_own_module):
for module_context in _iter_pytest_modules(module_context, skip_own_module=skip_own_module):
names = FixtureFilter(module_context).get(name)
if names:
return names
def _is_a_pytest_param_and_inherited(param_name):
"""
Pytest params are either in a `test_*` function or have a pytest fixture
with the decorator @pytest.fixture.
This is a heuristic and will work in most cases.
"""
funcdef = search_ancestor(param_name.tree_name, 'funcdef')
if funcdef is None: # A lambda
return False, False
decorators = funcdef.get_decorators()
return _is_pytest_func(funcdef.name.value, decorators), \
funcdef.name.value == param_name.string_name
def _is_pytest_func(func_name, decorator_nodes):
return func_name.startswith('test') \
or any('fixture' in n.get_code() for n in decorator_nodes)
@inference_state_method_cache()
def _iter_pytest_modules(module_context, skip_own_module=False):
if not skip_own_module:
yield module_context
file_io = module_context.get_value().file_io
if file_io is not None:
folder = file_io.get_parent_folder()
sys_path = module_context.inference_state.get_sys_path()
# prevent an infinite loop when reaching the root of the current drive
last_folder = None
while any(folder.path.startswith(p) for p in sys_path):
file_io = folder.get_file_io('conftest.py')
if Path(file_io.path) != module_context.py__file__():
try:
m = load_module_from_path(module_context.inference_state, file_io)
yield m.as_context()
except FileNotFoundError:
pass
folder = folder.get_parent_folder()
# prevent an infinite for loop if the same parent folder is return twice
if last_folder is not None and folder.path == last_folder.path:
break
last_folder = folder # keep track of the last found parent name
for names in _PYTEST_FIXTURE_MODULES:
for module_value in module_context.inference_state.import_module(names):
yield module_value.as_context()
class FixtureFilter(ParserTreeFilter):
def _filter(self, names):
for name in super()._filter(names):
funcdef = name.parent
# Class fixtures are not supported
if funcdef.type == 'funcdef':
decorated = funcdef.parent
if decorated.type == 'decorated' and self._is_fixture(decorated):
yield name
def _is_fixture(self, decorated):
decorators = decorated.children[0]
if decorators.type == 'decorators':
decorators = decorators.children
else:
decorators = [decorators]
for decorator in decorators:
dotted_name = decorator.children[1]
# A heuristic, this makes it faster.
if 'fixture' in dotted_name.get_code():
if dotted_name.type == 'atom_expr':
# Since Python3.9 a decorator does not have dotted names
# anymore.
last_trailer = dotted_name.children[-1]
last_leaf = last_trailer.get_last_leaf()
if last_leaf == ')':
values = infer_call_of_leaf(
self.parent_context, last_leaf, cut_own_trailer=True)
else:
values = self.parent_context.infer_node(dotted_name)
else:
values = self.parent_context.infer_node(dotted_name)
for value in values:
if value.name.get_qualified_names(include_module_names=True) \
== ('_pytest', 'fixtures', 'fixture'):
return True
return False
| 7,730 | Python | .py | 173 | 33.369942 | 96 | 0.601701 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,958 | flask.py | DamnWidget_anaconda/anaconda_lib/jedi/plugins/flask.py | def import_module(callback):
"""
Handle "magic" Flask extension imports:
``flask.ext.foo`` is really ``flask_foo`` or ``flaskext.foo``.
"""
def wrapper(inference_state, import_names, module_context, *args, **kwargs):
if len(import_names) == 3 and import_names[:2] == ('flask', 'ext'):
# New style.
ipath = ('flask_' + import_names[2]),
value_set = callback(inference_state, ipath, None, *args, **kwargs)
if value_set:
return value_set
value_set = callback(inference_state, ('flaskext',), None, *args, **kwargs)
return callback(
inference_state,
('flaskext', import_names[2]),
next(iter(value_set)),
*args, **kwargs
)
return callback(inference_state, import_names, module_context, *args, **kwargs)
return wrapper
| 916 | Python | .py | 21 | 32.904762 | 87 | 0.555307 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,959 | stdlib.py | DamnWidget_anaconda/anaconda_lib/jedi/plugins/stdlib.py | """
Implementations of standard library functions, because it's not possible to
understand them with Jedi.
To add a new implementation, create a function and add it to the
``_implemented`` dict at the bottom of this module.
Note that this module exists only to implement very specific functionality in
the standard library. The usual way to understand the standard library is the
compiled module that returns the types for C-builtins.
"""
import parso
import os
from inspect import Parameter
from jedi import debug
from jedi.inference.utils import safe_property
from jedi.inference.helpers import get_str_or_none
from jedi.inference.arguments import iterate_argument_clinic, ParamIssue, \
repack_with_argument_clinic, AbstractArguments, TreeArgumentsWrapper
from jedi.inference import analysis
from jedi.inference import compiled
from jedi.inference.value.instance import \
AnonymousMethodExecutionContext, MethodExecutionContext
from jedi.inference.base_value import ContextualizedNode, \
NO_VALUES, ValueSet, ValueWrapper, LazyValueWrapper
from jedi.inference.value import ClassValue, ModuleValue
from jedi.inference.value.klass import ClassMixin
from jedi.inference.value.function import FunctionMixin
from jedi.inference.value import iterable
from jedi.inference.lazy_value import LazyTreeValue, LazyKnownValue, \
LazyKnownValues
from jedi.inference.names import ValueName, BaseTreeParamName
from jedi.inference.filters import AttributeOverwrite, publish_method, \
ParserTreeFilter, DictFilter
from jedi.inference.signature import AbstractSignature, SignatureWrapper
# Copied from Python 3.6's stdlib.
_NAMEDTUPLE_CLASS_TEMPLATE = """\
_property = property
_tuple = tuple
from operator import itemgetter as _itemgetter
from collections import OrderedDict
class {typename}(tuple):
__slots__ = ()
_fields = {field_names!r}
def __new__(_cls, {arg_list}):
'Create new instance of {typename}({arg_list})'
return _tuple.__new__(_cls, ({arg_list}))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new {typename} object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != {num_fields:d}:
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
return result
def _replace(_self, **kwds):
'Return a new {typename} object replacing specified fields with new values'
result = _self._make(map(kwds.pop, {field_names!r}, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % list(kwds))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + '({repr_fmt})' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values.'
return OrderedDict(zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
# These methods were added by Jedi.
# __new__ doesn't really work with Jedi. So adding this to nametuples seems
# like the easiest way.
def __init__(self, {arg_list}):
'A helper function for namedtuple.'
self.__iterable = ({arg_list})
def __iter__(self):
for i in self.__iterable:
yield i
def __getitem__(self, y):
return self.__iterable[y]
{field_defs}
"""
_NAMEDTUPLE_FIELD_TEMPLATE = '''\
{name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
def execute(callback):
def wrapper(value, arguments):
def call():
return callback(value, arguments=arguments)
try:
obj_name = value.name.string_name
except AttributeError:
pass
else:
p = value.parent_context
if p is not None and p.is_builtins_module():
module_name = 'builtins'
elif p is not None and p.is_module():
module_name = p.py__name__()
else:
return call()
if value.is_bound_method() or value.is_instance():
# value can be an instance for example if it is a partial
# object.
return call()
# for now we just support builtin functions.
try:
func = _implemented[module_name][obj_name]
except KeyError:
pass
else:
return func(value, arguments=arguments, callback=call)
return call()
return wrapper
def _follow_param(inference_state, arguments, index):
try:
key, lazy_value = list(arguments.unpack())[index]
except IndexError:
return NO_VALUES
else:
return lazy_value.infer()
def argument_clinic(clinic_string, want_value=False, want_context=False,
want_arguments=False, want_inference_state=False,
want_callback=False):
"""
Works like Argument Clinic (PEP 436), to validate function params.
"""
def f(func):
def wrapper(value, arguments, callback):
try:
args = tuple(iterate_argument_clinic(
value.inference_state, arguments, clinic_string))
except ParamIssue:
return NO_VALUES
debug.dbg('builtin start %s' % value, color='MAGENTA')
kwargs = {}
if want_context:
kwargs['context'] = arguments.context
if want_value:
kwargs['value'] = value
if want_inference_state:
kwargs['inference_state'] = value.inference_state
if want_arguments:
kwargs['arguments'] = arguments
if want_callback:
kwargs['callback'] = callback
result = func(*args, **kwargs)
debug.dbg('builtin end: %s', result, color='MAGENTA')
return result
return wrapper
return f
@argument_clinic('iterator[, default], /', want_inference_state=True)
def builtins_next(iterators, defaults, inference_state):
# TODO theoretically we have to check here if something is an iterator.
# That is probably done by checking if it's not a class.
return defaults | iterators.py__getattribute__('__next__').execute_with_values()
@argument_clinic('iterator[, default], /')
def builtins_iter(iterators_or_callables, defaults):
# TODO implement this if it's a callable.
return iterators_or_callables.py__getattribute__('__iter__').execute_with_values()
@argument_clinic('object, name[, default], /')
def builtins_getattr(objects, names, defaults=None):
# follow the first param
for value in objects:
for name in names:
string = get_str_or_none(name)
if string is None:
debug.warning('getattr called without str')
continue
else:
return value.py__getattribute__(string)
return NO_VALUES
@argument_clinic('object[, bases, dict], /')
def builtins_type(objects, bases, dicts):
if bases or dicts:
# It's a type creation... maybe someday...
return NO_VALUES
else:
return objects.py__class__()
class SuperInstance(LazyValueWrapper):
"""To be used like the object ``super`` returns."""
def __init__(self, inference_state, instance):
self.inference_state = inference_state
self._instance = instance # Corresponds to super().__self__
def _get_bases(self):
return self._instance.py__class__().py__bases__()
def _get_wrapped_value(self):
objs = self._get_bases()[0].infer().execute_with_values()
if not objs:
# This is just a fallback and will only be used, if it's not
# possible to find a class
return self._instance
return next(iter(objs))
def get_filters(self, origin_scope=None):
for b in self._get_bases():
for value in b.infer().execute_with_values():
for f in value.get_filters():
yield f
@argument_clinic('[type[, value]], /', want_context=True)
def builtins_super(types, objects, context):
instance = None
if isinstance(context, AnonymousMethodExecutionContext):
instance = context.instance
elif isinstance(context, MethodExecutionContext):
instance = context.instance
if instance is None:
return NO_VALUES
return ValueSet({SuperInstance(instance.inference_state, instance)})
class ReversedObject(AttributeOverwrite):
def __init__(self, reversed_obj, iter_list):
super().__init__(reversed_obj)
self._iter_list = iter_list
def py__iter__(self, contextualized_node=None):
return self._iter_list
@publish_method('__next__')
def _next(self, arguments):
return ValueSet.from_sets(
lazy_value.infer() for lazy_value in self._iter_list
)
@argument_clinic('sequence, /', want_value=True, want_arguments=True)
def builtins_reversed(sequences, value, arguments):
# While we could do without this variable (just by using sequences), we
# want static analysis to work well. Therefore we need to generated the
# values again.
key, lazy_value = next(arguments.unpack())
cn = None
if isinstance(lazy_value, LazyTreeValue):
cn = ContextualizedNode(lazy_value.context, lazy_value.data)
ordered = list(sequences.iterate(cn))
# Repack iterator values and then run it the normal way. This is
# necessary, because `reversed` is a function and autocompletion
# would fail in certain cases like `reversed(x).__iter__` if we
# just returned the result directly.
seq, = value.inference_state.typing_module.py__getattribute__('Iterator').execute_with_values()
return ValueSet([ReversedObject(seq, list(reversed(ordered)))])
@argument_clinic('value, type, /', want_arguments=True, want_inference_state=True)
def builtins_isinstance(objects, types, arguments, inference_state):
bool_results = set()
for o in objects:
cls = o.py__class__()
try:
cls.py__bases__
except AttributeError:
# This is temporary. Everything should have a class attribute in
# Python?! Maybe we'll leave it here, because some numpy objects or
# whatever might not.
bool_results = set([True, False])
break
mro = list(cls.py__mro__())
for cls_or_tup in types:
if cls_or_tup.is_class():
bool_results.add(cls_or_tup in mro)
elif cls_or_tup.name.string_name == 'tuple' \
and cls_or_tup.get_root_context().is_builtins_module():
# Check for tuples.
classes = ValueSet.from_sets(
lazy_value.infer()
for lazy_value in cls_or_tup.iterate()
)
bool_results.add(any(cls in mro for cls in classes))
else:
_, lazy_value = list(arguments.unpack())[1]
if isinstance(lazy_value, LazyTreeValue):
node = lazy_value.data
message = 'TypeError: isinstance() arg 2 must be a ' \
'class, type, or tuple of classes and types, ' \
'not %s.' % cls_or_tup
analysis.add(lazy_value.context, 'type-error-isinstance', node, message)
return ValueSet(
compiled.builtin_from_name(inference_state, str(b))
for b in bool_results
)
class StaticMethodObject(ValueWrapper):
def py__get__(self, instance, class_value):
return ValueSet([self._wrapped_value])
@argument_clinic('sequence, /')
def builtins_staticmethod(functions):
return ValueSet(StaticMethodObject(f) for f in functions)
class ClassMethodObject(ValueWrapper):
def __init__(self, class_method_obj, function):
super().__init__(class_method_obj)
self._function = function
def py__get__(self, instance, class_value):
return ValueSet([
ClassMethodGet(__get__, class_value, self._function)
for __get__ in self._wrapped_value.py__getattribute__('__get__')
])
class ClassMethodGet(ValueWrapper):
def __init__(self, get_method, klass, function):
super().__init__(get_method)
self._class = klass
self._function = function
def get_signatures(self):
return [sig.bind(self._function) for sig in self._function.get_signatures()]
def py__call__(self, arguments):
return self._function.execute(ClassMethodArguments(self._class, arguments))
class ClassMethodArguments(TreeArgumentsWrapper):
def __init__(self, klass, arguments):
super().__init__(arguments)
self._class = klass
def unpack(self, func=None):
yield None, LazyKnownValue(self._class)
for values in self._wrapped_arguments.unpack(func):
yield values
@argument_clinic('sequence, /', want_value=True, want_arguments=True)
def builtins_classmethod(functions, value, arguments):
return ValueSet(
ClassMethodObject(class_method_object, function)
for class_method_object in value.py__call__(arguments=arguments)
for function in functions
)
class PropertyObject(AttributeOverwrite, ValueWrapper):
api_type = 'property'
def __init__(self, property_obj, function):
super().__init__(property_obj)
self._function = function
def py__get__(self, instance, class_value):
if instance is None:
return ValueSet([self])
return self._function.execute_with_values(instance)
@publish_method('deleter')
@publish_method('getter')
@publish_method('setter')
def _return_self(self, arguments):
return ValueSet({self})
@argument_clinic('func, /', want_callback=True)
def builtins_property(functions, callback):
return ValueSet(
PropertyObject(property_value, function)
for property_value in callback()
for function in functions
)
def collections_namedtuple(value, arguments, callback):
"""
Implementation of the namedtuple function.
This has to be done by processing the namedtuple class template and
inferring the result.
"""
inference_state = value.inference_state
# Process arguments
name = 'jedi_unknown_namedtuple'
for c in _follow_param(inference_state, arguments, 0):
x = get_str_or_none(c)
if x is not None:
name = x
break
# TODO here we only use one of the types, we should use all.
param_values = _follow_param(inference_state, arguments, 1)
if not param_values:
return NO_VALUES
_fields = list(param_values)[0]
string = get_str_or_none(_fields)
if string is not None:
fields = string.replace(',', ' ').split()
elif isinstance(_fields, iterable.Sequence):
fields = [
get_str_or_none(v)
for lazy_value in _fields.py__iter__()
for v in lazy_value.infer()
]
fields = [f for f in fields if f is not None]
else:
return NO_VALUES
# Build source code
code = _NAMEDTUPLE_CLASS_TEMPLATE.format(
typename=name,
field_names=tuple(fields),
num_fields=len(fields),
arg_list=repr(tuple(fields)).replace("'", "")[1:-1],
repr_fmt='',
field_defs='\n'.join(_NAMEDTUPLE_FIELD_TEMPLATE.format(index=index, name=name)
for index, name in enumerate(fields))
)
# Parse source code
module = inference_state.grammar.parse(code)
generated_class = next(module.iter_classdefs())
parent_context = ModuleValue(
inference_state, module,
code_lines=parso.split_lines(code, keepends=True),
).as_context()
return ValueSet([ClassValue(inference_state, parent_context, generated_class)])
class PartialObject(ValueWrapper):
def __init__(self, actual_value, arguments, instance=None):
super().__init__(actual_value)
self._arguments = arguments
self._instance = instance
def _get_functions(self, unpacked_arguments):
key, lazy_value = next(unpacked_arguments, (None, None))
if key is not None or lazy_value is None:
debug.warning("Partial should have a proper function %s", self._arguments)
return None
return lazy_value.infer()
def get_signatures(self):
unpacked_arguments = self._arguments.unpack()
funcs = self._get_functions(unpacked_arguments)
if funcs is None:
return []
arg_count = 0
if self._instance is not None:
arg_count = 1
keys = set()
for key, _ in unpacked_arguments:
if key is None:
arg_count += 1
else:
keys.add(key)
return [PartialSignature(s, arg_count, keys) for s in funcs.get_signatures()]
def py__call__(self, arguments):
funcs = self._get_functions(self._arguments.unpack())
if funcs is None:
return NO_VALUES
return funcs.execute(
MergedPartialArguments(self._arguments, arguments, self._instance)
)
def py__doc__(self):
"""
In CPython partial does not replace the docstring. However we are still
imitating it here, because we want this docstring to be worth something
for the user.
"""
callables = self._get_functions(self._arguments.unpack())
if callables is None:
return ''
for callable_ in callables:
return callable_.py__doc__()
return ''
def py__get__(self, instance, class_value):
return ValueSet([self])
class PartialMethodObject(PartialObject):
def py__get__(self, instance, class_value):
if instance is None:
return ValueSet([self])
return ValueSet([PartialObject(self._wrapped_value, self._arguments, instance)])
class PartialSignature(SignatureWrapper):
def __init__(self, wrapped_signature, skipped_arg_count, skipped_arg_set):
super().__init__(wrapped_signature)
self._skipped_arg_count = skipped_arg_count
self._skipped_arg_set = skipped_arg_set
def get_param_names(self, resolve_stars=False):
names = self._wrapped_signature.get_param_names()[self._skipped_arg_count:]
return [n for n in names if n.string_name not in self._skipped_arg_set]
class MergedPartialArguments(AbstractArguments):
def __init__(self, partial_arguments, call_arguments, instance=None):
self._partial_arguments = partial_arguments
self._call_arguments = call_arguments
self._instance = instance
def unpack(self, funcdef=None):
unpacked = self._partial_arguments.unpack(funcdef)
# Ignore this one, it's the function. It was checked before that it's
# there.
next(unpacked, None)
if self._instance is not None:
yield None, LazyKnownValue(self._instance)
for key_lazy_value in unpacked:
yield key_lazy_value
for key_lazy_value in self._call_arguments.unpack(funcdef):
yield key_lazy_value
def functools_partial(value, arguments, callback):
return ValueSet(
PartialObject(instance, arguments)
for instance in value.py__call__(arguments)
)
def functools_partialmethod(value, arguments, callback):
return ValueSet(
PartialMethodObject(instance, arguments)
for instance in value.py__call__(arguments)
)
@argument_clinic('first, /')
def _return_first_param(firsts):
return firsts
@argument_clinic('seq')
def _random_choice(sequences):
return ValueSet.from_sets(
lazy_value.infer()
for sequence in sequences
for lazy_value in sequence.py__iter__()
)
def _dataclass(value, arguments, callback):
for c in _follow_param(value.inference_state, arguments, 0):
if c.is_class():
return ValueSet([DataclassWrapper(c)])
else:
return ValueSet([value])
return NO_VALUES
class DataclassWrapper(ValueWrapper, ClassMixin):
def get_signatures(self):
param_names = []
for cls in reversed(list(self.py__mro__())):
if isinstance(cls, DataclassWrapper):
filter_ = cls.as_context().get_global_filter()
# .values ordering is not guaranteed, at least not in
# Python < 3.6, when dicts where not ordered, which is an
# implementation detail anyway.
for name in sorted(filter_.values(), key=lambda name: name.start_pos):
d = name.tree_name.get_definition()
annassign = d.children[1]
if d.type == 'expr_stmt' and annassign.type == 'annassign':
if len(annassign.children) < 4:
default = None
else:
default = annassign.children[3]
param_names.append(DataclassParamName(
parent_context=cls.parent_context,
tree_name=name.tree_name,
annotation_node=annassign.children[1],
default_node=default,
))
return [DataclassSignature(cls, param_names)]
class DataclassSignature(AbstractSignature):
def __init__(self, value, param_names):
super().__init__(value)
self._param_names = param_names
def get_param_names(self, resolve_stars=False):
return self._param_names
class DataclassParamName(BaseTreeParamName):
def __init__(self, parent_context, tree_name, annotation_node, default_node):
super().__init__(parent_context, tree_name)
self.annotation_node = annotation_node
self.default_node = default_node
def get_kind(self):
return Parameter.POSITIONAL_OR_KEYWORD
def infer(self):
if self.annotation_node is None:
return NO_VALUES
else:
return self.parent_context.infer_node(self.annotation_node)
class ItemGetterCallable(ValueWrapper):
def __init__(self, instance, args_value_set):
super().__init__(instance)
self._args_value_set = args_value_set
@repack_with_argument_clinic('item, /')
def py__call__(self, item_value_set):
value_set = NO_VALUES
for args_value in self._args_value_set:
lazy_values = list(args_value.py__iter__())
if len(lazy_values) == 1:
# TODO we need to add the contextualized value.
value_set |= item_value_set.get_item(lazy_values[0].infer(), None)
else:
value_set |= ValueSet([iterable.FakeList(
self._wrapped_value.inference_state,
[
LazyKnownValues(item_value_set.get_item(lazy_value.infer(), None))
for lazy_value in lazy_values
],
)])
return value_set
@argument_clinic('func, /')
def _functools_wraps(funcs):
return ValueSet(WrapsCallable(func) for func in funcs)
class WrapsCallable(ValueWrapper):
# XXX this is not the correct wrapped value, it should be a weird
# partials object, but it doesn't matter, because it's always used as a
# decorator anyway.
@repack_with_argument_clinic('func, /')
def py__call__(self, funcs):
return ValueSet({Wrapped(func, self._wrapped_value) for func in funcs})
class Wrapped(ValueWrapper, FunctionMixin):
def __init__(self, func, original_function):
super().__init__(func)
self._original_function = original_function
@property
def name(self):
return self._original_function.name
def get_signature_functions(self):
return [self]
@argument_clinic('*args, /', want_value=True, want_arguments=True)
def _operator_itemgetter(args_value_set, value, arguments):
return ValueSet([
ItemGetterCallable(instance, args_value_set)
for instance in value.py__call__(arguments)
])
def _create_string_input_function(func):
@argument_clinic('string, /', want_value=True, want_arguments=True)
def wrapper(strings, value, arguments):
def iterate():
for value in strings:
s = get_str_or_none(value)
if s is not None:
s = func(s)
yield compiled.create_simple_object(value.inference_state, s)
values = ValueSet(iterate())
if values:
return values
return value.py__call__(arguments)
return wrapper
@argument_clinic('*args, /', want_callback=True)
def _os_path_join(args_set, callback):
if len(args_set) == 1:
string = ''
sequence, = args_set
is_first = True
for lazy_value in sequence.py__iter__():
string_values = lazy_value.infer()
if len(string_values) != 1:
break
s = get_str_or_none(next(iter(string_values)))
if s is None:
break
if not is_first:
string += os.path.sep
string += s
is_first = False
else:
return ValueSet([compiled.create_simple_object(sequence.inference_state, string)])
return callback()
_implemented = {
'builtins': {
'getattr': builtins_getattr,
'type': builtins_type,
'super': builtins_super,
'reversed': builtins_reversed,
'isinstance': builtins_isinstance,
'next': builtins_next,
'iter': builtins_iter,
'staticmethod': builtins_staticmethod,
'classmethod': builtins_classmethod,
'property': builtins_property,
},
'copy': {
'copy': _return_first_param,
'deepcopy': _return_first_param,
},
'json': {
'load': lambda value, arguments, callback: NO_VALUES,
'loads': lambda value, arguments, callback: NO_VALUES,
},
'collections': {
'namedtuple': collections_namedtuple,
},
'functools': {
'partial': functools_partial,
'partialmethod': functools_partialmethod,
'wraps': _functools_wraps,
},
'_weakref': {
'proxy': _return_first_param,
},
'random': {
'choice': _random_choice,
},
'operator': {
'itemgetter': _operator_itemgetter,
},
'abc': {
# Not sure if this is necessary, but it's used a lot in typeshed and
# it's for now easier to just pass the function.
'abstractmethod': _return_first_param,
},
'typing': {
# The _alias function just leads to some annoying type inference.
# Therefore, just make it return nothing, which leads to the stubs
# being used instead. This only matters for 3.7+.
'_alias': lambda value, arguments, callback: NO_VALUES,
# runtime_checkable doesn't really change anything and is just
# adding logs for infering stuff, so we can safely ignore it.
'runtime_checkable': lambda value, arguments, callback: NO_VALUES,
},
'dataclasses': {
# For now this works at least better than Jedi trying to understand it.
'dataclass': _dataclass
},
'os.path': {
'dirname': _create_string_input_function(os.path.dirname),
'abspath': _create_string_input_function(os.path.abspath),
'relpath': _create_string_input_function(os.path.relpath),
'join': _os_path_join,
}
}
def get_metaclass_filters(func):
def wrapper(cls, metaclasses, is_instance):
for metaclass in metaclasses:
if metaclass.py__name__() == 'EnumMeta' \
and metaclass.get_root_context().py__name__() == 'enum':
filter_ = ParserTreeFilter(parent_context=cls.as_context())
return [DictFilter({
name.string_name: EnumInstance(cls, name).name
for name in filter_.values()
})]
return func(cls, metaclasses, is_instance)
return wrapper
class EnumInstance(LazyValueWrapper):
def __init__(self, cls, name):
self.inference_state = cls.inference_state
self._cls = cls # Corresponds to super().__self__
self._name = name
self.tree_node = self._name.tree_name
@safe_property
def name(self):
return ValueName(self, self._name.tree_name)
def _get_wrapped_value(self):
n = self._name.string_name
if n.startswith('__') and n.endswith('__') or self._name.api_type == 'function':
inferred = self._name.infer()
if inferred:
return next(iter(inferred))
o, = self.inference_state.builtins_module.py__getattribute__('object')
return o
value, = self._cls.execute_with_values()
return value
def get_filters(self, origin_scope=None):
yield DictFilter(dict(
name=compiled.create_simple_object(self.inference_state, self._name.string_name).name,
value=self._name,
))
for f in self._get_wrapped_value().get_filters():
yield f
def tree_name_to_values(func):
def wrapper(inference_state, context, tree_name):
if tree_name.value == 'sep' and context.is_module() and context.py__name__() == 'os.path':
return ValueSet({
compiled.create_simple_object(inference_state, os.path.sep),
})
return func(inference_state, context, tree_name)
return wrapper
| 29,917 | Python | .py | 713 | 33.13885 | 99 | 0.627629 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,960 | __init__.py | DamnWidget_anaconda/anaconda_lib/jedi/plugins/__init__.py | from functools import wraps
class _PluginManager:
def __init__(self):
self._registered_plugins = []
self._cached_base_callbacks = {}
self._built_functions = {}
def register(self, *plugins):
"""
Makes it possible to register your plugin.
"""
self._registered_plugins.extend(plugins)
self._build_functions()
def decorate(self, name=None):
def decorator(callback):
@wraps(callback)
def wrapper(*args, **kwargs):
return built_functions[public_name](*args, **kwargs)
public_name = name or callback.__name__
assert public_name not in self._built_functions
built_functions = self._built_functions
built_functions[public_name] = callback
self._cached_base_callbacks[public_name] = callback
return wrapper
return decorator
def _build_functions(self):
for name, callback in self._cached_base_callbacks.items():
for plugin in reversed(self._registered_plugins):
# Need to reverse so the first plugin is run first.
try:
func = getattr(plugin, name)
except AttributeError:
pass
else:
callback = func(callback)
self._built_functions[name] = callback
plugin_manager = _PluginManager()
| 1,445 | Python | .py | 36 | 28.722222 | 68 | 0.581545 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,961 | django.py | DamnWidget_anaconda/anaconda_lib/jedi/plugins/django.py | """
Module is used to infer Django model fields.
"""
from inspect import Parameter
from jedi import debug
from jedi.inference.cache import inference_state_function_cache
from jedi.inference.base_value import ValueSet, iterator_to_value_set, ValueWrapper
from jedi.inference.filters import DictFilter, AttributeOverwrite
from jedi.inference.names import NameWrapper, BaseTreeParamName
from jedi.inference.compiled.value import EmptyCompiledName
from jedi.inference.value.instance import TreeInstance
from jedi.inference.value.klass import ClassMixin
from jedi.inference.gradual.base import GenericClass
from jedi.inference.gradual.generics import TupleGenericManager
from jedi.inference.signature import AbstractSignature
mapping = {
'IntegerField': (None, 'int'),
'BigIntegerField': (None, 'int'),
'PositiveIntegerField': (None, 'int'),
'SmallIntegerField': (None, 'int'),
'CharField': (None, 'str'),
'TextField': (None, 'str'),
'EmailField': (None, 'str'),
'GenericIPAddressField': (None, 'str'),
'URLField': (None, 'str'),
'FloatField': (None, 'float'),
'BinaryField': (None, 'bytes'),
'BooleanField': (None, 'bool'),
'DecimalField': ('decimal', 'Decimal'),
'TimeField': ('datetime', 'time'),
'DurationField': ('datetime', 'timedelta'),
'DateField': ('datetime', 'date'),
'DateTimeField': ('datetime', 'datetime'),
'UUIDField': ('uuid', 'UUID'),
}
_FILTER_LIKE_METHODS = ('create', 'filter', 'exclude', 'update', 'get',
'get_or_create', 'update_or_create')
@inference_state_function_cache()
def _get_deferred_attributes(inference_state):
return inference_state.import_module(
('django', 'db', 'models', 'query_utils')
).py__getattribute__('DeferredAttribute').execute_annotation()
def _infer_scalar_field(inference_state, field_name, field_tree_instance, is_instance):
try:
module_name, attribute_name = mapping[field_tree_instance.py__name__()]
except KeyError:
return None
if not is_instance:
return _get_deferred_attributes(inference_state)
if module_name is None:
module = inference_state.builtins_module
else:
module = inference_state.import_module((module_name,))
for attribute in module.py__getattribute__(attribute_name):
return attribute.execute_with_values()
@iterator_to_value_set
def _get_foreign_key_values(cls, field_tree_instance):
if isinstance(field_tree_instance, TreeInstance):
# TODO private access..
argument_iterator = field_tree_instance._arguments.unpack()
key, lazy_values = next(argument_iterator, (None, None))
if key is None and lazy_values is not None:
for value in lazy_values.infer():
if value.py__name__() == 'str':
foreign_key_class_name = value.get_safe_value()
module = cls.get_root_context()
for v in module.py__getattribute__(foreign_key_class_name):
if v.is_class():
yield v
elif value.is_class():
yield value
def _infer_field(cls, field_name, is_instance):
inference_state = cls.inference_state
result = field_name.infer()
for field_tree_instance in result:
scalar_field = _infer_scalar_field(
inference_state, field_name, field_tree_instance, is_instance)
if scalar_field is not None:
return scalar_field
name = field_tree_instance.py__name__()
is_many_to_many = name == 'ManyToManyField'
if name in ('ForeignKey', 'OneToOneField') or is_many_to_many:
if not is_instance:
return _get_deferred_attributes(inference_state)
values = _get_foreign_key_values(cls, field_tree_instance)
if is_many_to_many:
return ValueSet(filter(None, [
_create_manager_for(v, 'RelatedManager') for v in values
]))
else:
return values.execute_with_values()
debug.dbg('django plugin: fail to infer `%s` from class `%s`',
field_name.string_name, cls.py__name__())
return result
class DjangoModelName(NameWrapper):
def __init__(self, cls, name, is_instance):
super().__init__(name)
self._cls = cls
self._is_instance = is_instance
def infer(self):
return _infer_field(self._cls, self._wrapped_name, self._is_instance)
def _create_manager_for(cls, manager_cls='BaseManager'):
managers = cls.inference_state.import_module(
('django', 'db', 'models', 'manager')
).py__getattribute__(manager_cls)
for m in managers:
if m.is_class_mixin():
generics_manager = TupleGenericManager((ValueSet([cls]),))
for c in GenericClass(m, generics_manager).execute_annotation():
return c
return None
def _new_dict_filter(cls, is_instance):
filters = list(cls.get_filters(
is_instance=is_instance,
include_metaclasses=False,
include_type_when_class=False)
)
dct = {
name.string_name: DjangoModelName(cls, name, is_instance)
for filter_ in reversed(filters)
for name in filter_.values()
}
if is_instance:
# Replace the objects with a name that amounts to nothing when accessed
# in an instance. This is not perfect and still completes "objects" in
# that case, but it at least not inferes stuff like `.objects.filter`.
# It would be nicer to do that in a better way, so that it also doesn't
# show up in completions, but it's probably just not worth doing that
# for the extra amount of work.
dct['objects'] = EmptyCompiledName(cls.inference_state, 'objects')
return DictFilter(dct)
def is_django_model_base(value):
return value.py__name__() == 'ModelBase' \
and value.get_root_context().py__name__() == 'django.db.models.base'
def get_metaclass_filters(func):
def wrapper(cls, metaclasses, is_instance):
for metaclass in metaclasses:
if is_django_model_base(metaclass):
return [_new_dict_filter(cls, is_instance)]
return func(cls, metaclasses, is_instance)
return wrapper
def tree_name_to_values(func):
def wrapper(inference_state, context, tree_name):
result = func(inference_state, context, tree_name)
if tree_name.value in _FILTER_LIKE_METHODS:
# Here we try to overwrite stuff like User.objects.filter. We need
# this to make sure that keyword param completion works on these
# kind of methods.
for v in result:
if v.get_qualified_names() == ('_BaseQuerySet', tree_name.value) \
and v.parent_context.is_module() \
and v.parent_context.py__name__() == 'django.db.models.query':
qs = context.get_value()
generics = qs.get_generics()
if len(generics) >= 1:
return ValueSet(QuerySetMethodWrapper(v, model)
for model in generics[0])
elif tree_name.value == 'BaseManager' and context.is_module() \
and context.py__name__() == 'django.db.models.manager':
return ValueSet(ManagerWrapper(r) for r in result)
elif tree_name.value == 'Field' and context.is_module() \
and context.py__name__() == 'django.db.models.fields':
return ValueSet(FieldWrapper(r) for r in result)
return result
return wrapper
def _find_fields(cls):
for name in _new_dict_filter(cls, is_instance=False).values():
for value in name.infer():
if value.name.get_qualified_names(include_module_names=True) \
== ('django', 'db', 'models', 'query_utils', 'DeferredAttribute'):
yield name
def _get_signatures(cls):
return [DjangoModelSignature(cls, field_names=list(_find_fields(cls)))]
def get_metaclass_signatures(func):
def wrapper(cls, metaclasses):
for metaclass in metaclasses:
if is_django_model_base(metaclass):
return _get_signatures(cls)
return func(cls, metaclass)
return wrapper
class ManagerWrapper(ValueWrapper):
def py__getitem__(self, index_value_set, contextualized_node):
return ValueSet(
GenericManagerWrapper(generic)
for generic in self._wrapped_value.py__getitem__(
index_value_set, contextualized_node)
)
class GenericManagerWrapper(AttributeOverwrite, ClassMixin):
def py__get__on_class(self, calling_instance, instance, class_value):
return calling_instance.class_value.with_generics(
(ValueSet({class_value}),)
).py__call__(calling_instance._arguments)
def with_generics(self, generics_tuple):
return self._wrapped_value.with_generics(generics_tuple)
class FieldWrapper(ValueWrapper):
def py__getitem__(self, index_value_set, contextualized_node):
return ValueSet(
GenericFieldWrapper(generic)
for generic in self._wrapped_value.py__getitem__(
index_value_set, contextualized_node)
)
class GenericFieldWrapper(AttributeOverwrite, ClassMixin):
def py__get__on_class(self, calling_instance, instance, class_value):
# This is mostly an optimization to avoid Jedi aborting inference,
# because of too many function executions of Field.__get__.
return ValueSet({calling_instance})
class DjangoModelSignature(AbstractSignature):
def __init__(self, value, field_names):
super().__init__(value)
self._field_names = field_names
def get_param_names(self, resolve_stars=False):
return [DjangoParamName(name) for name in self._field_names]
class DjangoParamName(BaseTreeParamName):
def __init__(self, field_name):
super().__init__(field_name.parent_context, field_name.tree_name)
self._field_name = field_name
def get_kind(self):
return Parameter.KEYWORD_ONLY
def infer(self):
return self._field_name.infer()
class QuerySetMethodWrapper(ValueWrapper):
def __init__(self, method, model_cls):
super().__init__(method)
self._model_cls = model_cls
def py__get__(self, instance, class_value):
return ValueSet({QuerySetBoundMethodWrapper(v, self._model_cls)
for v in self._wrapped_value.py__get__(instance, class_value)})
class QuerySetBoundMethodWrapper(ValueWrapper):
def __init__(self, method, model_cls):
super().__init__(method)
self._model_cls = model_cls
def get_signatures(self):
return _get_signatures(self._model_cls)
| 10,895 | Python | .py | 233 | 38.008584 | 88 | 0.6444 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,962 | registry.py | DamnWidget_anaconda/anaconda_lib/jedi/plugins/registry.py | """
This is not a plugin, this is just the place were plugins are registered.
"""
from jedi.plugins import stdlib
from jedi.plugins import flask
from jedi.plugins import pytest
from jedi.plugins import django
from jedi.plugins import plugin_manager
plugin_manager.register(stdlib, flask, pytest, django)
| 307 | Python | .py | 9 | 32.777778 | 73 | 0.823729 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,963 | basestemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/basestemmer.py | class BaseStemmer(object):
def __init__(self):
self.set_current("")
self.maxCacheSize = 10000
self._cache = {}
self._counter = 0
def set_current(self, value):
'''
Set the self.current string.
'''
self.current = value
self.cursor = 0
self.limit = len(self.current)
self.limit_backward = 0
self.bra = self.cursor
self.ket = self.limit
def get_current(self):
'''
Get the self.current string.
'''
return self.current
def copy_from(self, other):
self.current = other.current
self.cursor = other.cursor
self.limit = other.limit
self.limit_backward = other.limit_backward
self.bra = other.bra
self.ket = other.ket
def in_grouping(self, s, min, max):
if self.cursor >= self.limit:
return False
ch = ord(self.current[self.cursor])
if ch > max or ch < min:
return False
ch -= min
if (s[ch >> 3] & (0x1 << (ch & 0x7))) == 0:
return False
self.cursor += 1
return True
def in_grouping_b(self, s, min, max):
if self.cursor <= self.limit_backward:
return False
ch = ord(self.current[self.cursor - 1])
if ch > max or ch < min:
return False
ch -= min
if (s[ch >> 3] & (0x1 << (ch & 0x7))) == 0:
return False
self.cursor -= 1
return True
def out_grouping(self, s, min, max):
if self.cursor >= self.limit:
return False
ch = ord(self.current[self.cursor])
if ch > max or ch < min:
self.cursor += 1
return True
ch -= min
if (s[ch >> 3] & (0X1 << (ch & 0x7))) == 0:
self.cursor += 1
return True
return False
def out_grouping_b(self, s, min, max):
if self.cursor <= self.limit_backward:
return False
ch = ord(self.current[self.cursor - 1])
if ch > max or ch < min:
self.cursor -= 1
return True
ch -= min
if (s[ch >> 3] & (0X1 << (ch & 0x7))) == 0:
self.cursor -= 1
return True
return False
def in_range(self, min, max):
if self.cursor >= self.limit:
return False
ch = ord(self.current[self.cursor])
if ch > max or ch < min:
return False
self.cursor += 1
return True
def in_range_b(self, min, max):
if self.cursor <= self.limit_backward:
return False
ch = ord(self.current[self.cursor - 1])
if ch > max or ch < min:
return False
self.cursor -= 1
return True
def out_range(self, min, max):
if self.cursor >= self.limit:
return False
ch = ord(self.current[self.cursor])
if not (ch > max or ch < min):
return False
self.cursor += 1
return True
def out_range_b(self, min, max):
if self.cursor <= self.limit_backward:
return False
ch = ord(self.current[self.cursor - 1])
if not (ch > max or ch < min):
return False
self.cursor -= 1
return True
def eq_s(self, s_size, s):
if self.limit - self.cursor < s_size:
return False
if self.current[self.cursor:self.cursor + s_size] != s:
return False
self.cursor += s_size
return True
def eq_s_b(self, s_size, s):
if self.cursor - self.limit_backward < s_size:
return False
if self.current[self.cursor - s_size:self.cursor] != s:
return False
self.cursor -= s_size
return True
def eq_v(self, s):
return self.eq_s(len(s), s)
def eq_v_b(self, s):
return self.eq_s_b(len(s), s)
def find_among(self, v, v_size):
i = 0
j = v_size
c = self.cursor
l = self.limit
common_i = 0
common_j = 0
first_key_inspected = False
while True:
k = i + ((j - i) >> 1)
diff = 0
common = min(common_i, common_j) # smalle
w = v[k]
for i2 in range(common, w.s_size):
if c + common == l:
diff = -1
break
diff = ord(self.current[c + common]) - ord(w.s[i2])
if diff != 0:
break
common += 1
if diff < 0:
j = k
common_j = common
else:
i = k
common_i = common
if j - i <= 1:
if i > 0:
break # v->s has been inspected
if j == i:
break # only one item in v
# - but now we need to go round once more to get
# v->s inspected. self looks messy, but is actually
# the optimal approach.
if first_key_inspected:
break
first_key_inspected = True
while True:
w = v[i]
if common_i >= w.s_size:
self.cursor = c + w.s_size
if w.method is None:
return w.result
method = getattr(self, w.method)
res = method()
self.cursor = c + w.s_size
if res:
return w.result
i = w.substring_i
if i < 0:
return 0
return -1 # not reachable
def find_among_b(self, v, v_size):
'''
find_among_b is for backwards processing. Same comments apply
'''
i = 0
j = v_size
c = self.cursor
lb = self.limit_backward;
common_i = 0
common_j = 0
first_key_inspected = False
while True:
k = i + ((j - i) >> 1)
diff = 0
common = min(common_i, common_j)
w = v[k]
for i2 in range(w.s_size - 1 - common, -1, -1):
if c - common == lb:
diff = -1
break
diff = ord(self.current[c - 1 - common]) - ord(w.s[i2])
if diff != 0:
break
common += 1
if diff < 0:
j = k
common_j = common
else:
i = k
common_i = common
if j - i <= 1:
if i > 0:
break
if j == i:
break
if first_key_inspected:
break
first_key_inspected = True
while True:
w = v[i]
if common_i >= w.s_size:
self.cursor = c - w.s_size
if w.method is None:
return w.result
method = getattr(self, w.method)
res = method()
self.cursor = c - w.s_size
if res:
return w.result
i = w.substring_i
if i < 0:
return 0
return -1 # not reachable
def replace_s(self, c_bra, c_ket, s):
'''
to replace chars between c_bra and c_ket in self.current by the
chars in s.
@type c_bra int
@type c_ket int
@type s: string
'''
adjustment = len(s) - (c_ket - c_bra)
self.current = self.current[0:c_bra] + s + self.current[c_ket:]
self.limit += adjustment
if self.cursor >= c_ket:
self.cursor += adjustment
elif self.cursor > c_bra:
self.cursor = c_bra
return adjustment
def slice_check(self):
if self.bra < 0 or self.bra > self.ket or self.ket > self.limit or self.limit > len(self.current):
return False
return True
def slice_from(self, s):
'''
@type s string
'''
result = False
if self.slice_check():
self.replace_s(self.bra, self.ket, s)
result = True
return result
def slice_del(self):
return self.slice_from("")
def insert(self, c_bra, c_ket, s):
'''
@type c_bra int
@type c_ket int
@type s: string
'''
adjustment = self.replace_s(c_bra, c_ket, s)
if c_bra <= self.bra:
self.bra += adjustment
if c_bra <= self.ket:
self.ket += adjustment
def slice_to(self, s):
'''
Copy the slice into the supplied StringBuffer
@type s: string
'''
result = ''
if self.slice_check():
result = self.current[self.bra:self.ket]
return result
def assign_to(self, s):
'''
@type s: string
'''
return self.current[0:self.limit]
def _stem_word(self, word):
cache = self._cache.get(word)
if cache is None:
self.set_current(word)
self._stem()
result = self.get_current()
self._cache[word] = [result, self._counter]
else:
cache[1] = self._counter
result = cache[0]
self._counter += 1
return result
def _clear_cache(self):
removecount = int(len(self._cache) - self.maxCacheSize * 8 / 10)
oldcaches = sorted(self._cache.items(), key=lambda cache: cache[1][1])[0:removecount]
for key, value in oldcaches:
del self._cache[key]
def stemWord(self, word):
result = self._stem_word(word)
if len(self._cache) > self.maxCacheSize:
self._clear_cache()
return result
def stemWords(self, words):
result = [self._stem_word(word) for word in words]
if len(self._cache) > self.maxCacheSize:
self._clear_cache()
return result
| 10,107 | Python | .py | 313 | 21.022364 | 106 | 0.473145 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,964 | norwegian_stemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/norwegian_stemmer.py | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class NorwegianStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"a", -1, 1),
Among(u"e", -1, 1),
Among(u"ede", 1, 1),
Among(u"ande", 1, 1),
Among(u"ende", 1, 1),
Among(u"ane", 1, 1),
Among(u"ene", 1, 1),
Among(u"hetene", 6, 1),
Among(u"erte", 1, 3),
Among(u"en", -1, 1),
Among(u"heten", 9, 1),
Among(u"ar", -1, 1),
Among(u"er", -1, 1),
Among(u"heter", 12, 1),
Among(u"s", -1, 2),
Among(u"as", 14, 1),
Among(u"es", 14, 1),
Among(u"edes", 16, 1),
Among(u"endes", 16, 1),
Among(u"enes", 16, 1),
Among(u"hetenes", 19, 1),
Among(u"ens", 14, 1),
Among(u"hetens", 21, 1),
Among(u"ers", 14, 1),
Among(u"ets", 14, 1),
Among(u"et", -1, 1),
Among(u"het", 25, 1),
Among(u"ert", -1, 3),
Among(u"ast", -1, 1)
]
a_1 = [
Among(u"dt", -1, -1),
Among(u"vt", -1, -1)
]
a_2 = [
Among(u"leg", -1, 1),
Among(u"eleg", 0, 1),
Among(u"ig", -1, 1),
Among(u"eig", 2, 1),
Among(u"lig", 2, 1),
Among(u"elig", 4, 1),
Among(u"els", -1, 1),
Among(u"lov", -1, 1),
Among(u"elov", 7, 1),
Among(u"slov", 7, 1),
Among(u"hetslov", 9, 1)
]
g_v = [17, 65, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 0, 128]
g_s_ending = [119, 125, 149, 1]
I_x = 0
I_p1 = 0
def copy_from(self, other):
self.I_x = other.I_x
self.I_p1 = other.I_p1
super.copy_from(other)
def r_mark_regions(self):
# (, line 26
self.I_p1 = self.limit;
# test, line 30
v_1 = self.cursor
# (, line 30
# hop, line 30
c = self.cursor + 3
if 0 > c or c > self.limit:
return False
self.cursor = c
# setmark x, line 30
self.I_x = self.cursor
self.cursor = v_1
# goto, line 31
try:
while True:
v_2 = self.cursor
try:
if not self.in_grouping(NorwegianStemmer.g_v, 97, 248):
raise lab1()
self.cursor = v_2
raise lab0()
except lab1: pass
self.cursor = v_2
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab0: pass
# gopast, line 31
try:
while True:
try:
if not self.out_grouping(NorwegianStemmer.g_v, 97, 248):
raise lab3()
raise lab2()
except lab3: pass
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab2: pass
# setmark p1, line 31
self.I_p1 = self.cursor
# try, line 32
try:
# (, line 32
if not (self.I_p1 < self.I_x):
raise lab4()
self.I_p1 = self.I_x;
except lab4: pass
return True
def r_main_suffix(self):
# (, line 37
# setlimit, line 38
v_1 = self.limit - self.cursor
# tomark, line 38
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 38
# [, line 38
self.ket = self.cursor
# substring, line 38
among_var = self.find_among_b(NorwegianStemmer.a_0, 29)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 38
self.bra = self.cursor
self.limit_backward = v_2
if among_var == 0:
return False
elif among_var == 1:
# (, line 44
# delete, line 44
if not self.slice_del():
return False
elif among_var == 2:
# (, line 46
# or, line 46
try:
v_3 = self.limit - self.cursor
try:
if not self.in_grouping_b(NorwegianStemmer.g_s_ending, 98, 122):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_3
# (, line 46
# literal, line 46
if not self.eq_s_b(1, u"k"):
return False
if not self.out_grouping_b(NorwegianStemmer.g_v, 97, 248):
return False
except lab0: pass
# delete, line 46
if not self.slice_del():
return False
elif among_var == 3:
# (, line 48
# <-, line 48
if not self.slice_from(u"er"):
return False
return True
def r_consonant_pair(self):
# (, line 52
# test, line 53
v_1 = self.limit - self.cursor
# (, line 53
# setlimit, line 54
v_2 = self.limit - self.cursor
# tomark, line 54
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_3 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_2
# (, line 54
# [, line 54
self.ket = self.cursor
# substring, line 54
if self.find_among_b(NorwegianStemmer.a_1, 2) == 0:
self.limit_backward = v_3
return False
# ], line 54
self.bra = self.cursor
self.limit_backward = v_3
self.cursor = self.limit - v_1
# next, line 59
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
# ], line 59
self.bra = self.cursor
# delete, line 59
if not self.slice_del():
return False
return True
def r_other_suffix(self):
# (, line 62
# setlimit, line 63
v_1 = self.limit - self.cursor
# tomark, line 63
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 63
# [, line 63
self.ket = self.cursor
# substring, line 63
among_var = self.find_among_b(NorwegianStemmer.a_2, 11)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 63
self.bra = self.cursor
self.limit_backward = v_2
if among_var == 0:
return False
elif among_var == 1:
# (, line 67
# delete, line 67
if not self.slice_del():
return False
return True
def _stem(self):
# (, line 72
# do, line 74
v_1 = self.cursor
try:
# call mark_regions, line 74
if not self.r_mark_regions():
raise lab0()
except lab0: pass
self.cursor = v_1
# backwards, line 75
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 75
# do, line 76
v_2 = self.limit - self.cursor
try:
# call main_suffix, line 76
if not self.r_main_suffix():
raise lab1()
except lab1: pass
self.cursor = self.limit - v_2
# do, line 77
v_3 = self.limit - self.cursor
try:
# call consonant_pair, line 77
if not self.r_consonant_pair():
raise lab2()
except lab2: pass
self.cursor = self.limit - v_3
# do, line 78
v_4 = self.limit - self.cursor
try:
# call other_suffix, line 78
if not self.r_other_suffix():
raise lab3()
except lab3: pass
self.cursor = self.limit - v_4
self.cursor = self.limit_backward
return True
def equals(self, o):
return isinstance(o, NorwegianStemmer)
def hashCode(self):
return hash("NorwegianStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
| 8,854 | Python | .py | 286 | 20.468531 | 84 | 0.482206 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,965 | romanian_stemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/romanian_stemmer.py | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class RomanianStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"", -1, 3),
Among(u"I", 0, 1),
Among(u"U", 0, 2)
]
a_1 = [
Among(u"ea", -1, 3),
Among(u"a\u0163ia", -1, 7),
Among(u"aua", -1, 2),
Among(u"iua", -1, 4),
Among(u"a\u0163ie", -1, 7),
Among(u"ele", -1, 3),
Among(u"ile", -1, 5),
Among(u"iile", 6, 4),
Among(u"iei", -1, 4),
Among(u"atei", -1, 6),
Among(u"ii", -1, 4),
Among(u"ului", -1, 1),
Among(u"ul", -1, 1),
Among(u"elor", -1, 3),
Among(u"ilor", -1, 4),
Among(u"iilor", 14, 4)
]
a_2 = [
Among(u"icala", -1, 4),
Among(u"iciva", -1, 4),
Among(u"ativa", -1, 5),
Among(u"itiva", -1, 6),
Among(u"icale", -1, 4),
Among(u"a\u0163iune", -1, 5),
Among(u"i\u0163iune", -1, 6),
Among(u"atoare", -1, 5),
Among(u"itoare", -1, 6),
Among(u"\u0103toare", -1, 5),
Among(u"icitate", -1, 4),
Among(u"abilitate", -1, 1),
Among(u"ibilitate", -1, 2),
Among(u"ivitate", -1, 3),
Among(u"icive", -1, 4),
Among(u"ative", -1, 5),
Among(u"itive", -1, 6),
Among(u"icali", -1, 4),
Among(u"atori", -1, 5),
Among(u"icatori", 18, 4),
Among(u"itori", -1, 6),
Among(u"\u0103tori", -1, 5),
Among(u"icitati", -1, 4),
Among(u"abilitati", -1, 1),
Among(u"ivitati", -1, 3),
Among(u"icivi", -1, 4),
Among(u"ativi", -1, 5),
Among(u"itivi", -1, 6),
Among(u"icit\u0103i", -1, 4),
Among(u"abilit\u0103i", -1, 1),
Among(u"ivit\u0103i", -1, 3),
Among(u"icit\u0103\u0163i", -1, 4),
Among(u"abilit\u0103\u0163i", -1, 1),
Among(u"ivit\u0103\u0163i", -1, 3),
Among(u"ical", -1, 4),
Among(u"ator", -1, 5),
Among(u"icator", 35, 4),
Among(u"itor", -1, 6),
Among(u"\u0103tor", -1, 5),
Among(u"iciv", -1, 4),
Among(u"ativ", -1, 5),
Among(u"itiv", -1, 6),
Among(u"ical\u0103", -1, 4),
Among(u"iciv\u0103", -1, 4),
Among(u"ativ\u0103", -1, 5),
Among(u"itiv\u0103", -1, 6)
]
a_3 = [
Among(u"ica", -1, 1),
Among(u"abila", -1, 1),
Among(u"ibila", -1, 1),
Among(u"oasa", -1, 1),
Among(u"ata", -1, 1),
Among(u"ita", -1, 1),
Among(u"anta", -1, 1),
Among(u"ista", -1, 3),
Among(u"uta", -1, 1),
Among(u"iva", -1, 1),
Among(u"ic", -1, 1),
Among(u"ice", -1, 1),
Among(u"abile", -1, 1),
Among(u"ibile", -1, 1),
Among(u"isme", -1, 3),
Among(u"iune", -1, 2),
Among(u"oase", -1, 1),
Among(u"ate", -1, 1),
Among(u"itate", 17, 1),
Among(u"ite", -1, 1),
Among(u"ante", -1, 1),
Among(u"iste", -1, 3),
Among(u"ute", -1, 1),
Among(u"ive", -1, 1),
Among(u"ici", -1, 1),
Among(u"abili", -1, 1),
Among(u"ibili", -1, 1),
Among(u"iuni", -1, 2),
Among(u"atori", -1, 1),
Among(u"osi", -1, 1),
Among(u"ati", -1, 1),
Among(u"itati", 30, 1),
Among(u"iti", -1, 1),
Among(u"anti", -1, 1),
Among(u"isti", -1, 3),
Among(u"uti", -1, 1),
Among(u"i\u015Fti", -1, 3),
Among(u"ivi", -1, 1),
Among(u"it\u0103i", -1, 1),
Among(u"o\u015Fi", -1, 1),
Among(u"it\u0103\u0163i", -1, 1),
Among(u"abil", -1, 1),
Among(u"ibil", -1, 1),
Among(u"ism", -1, 3),
Among(u"ator", -1, 1),
Among(u"os", -1, 1),
Among(u"at", -1, 1),
Among(u"it", -1, 1),
Among(u"ant", -1, 1),
Among(u"ist", -1, 3),
Among(u"ut", -1, 1),
Among(u"iv", -1, 1),
Among(u"ic\u0103", -1, 1),
Among(u"abil\u0103", -1, 1),
Among(u"ibil\u0103", -1, 1),
Among(u"oas\u0103", -1, 1),
Among(u"at\u0103", -1, 1),
Among(u"it\u0103", -1, 1),
Among(u"ant\u0103", -1, 1),
Among(u"ist\u0103", -1, 3),
Among(u"ut\u0103", -1, 1),
Among(u"iv\u0103", -1, 1)
]
a_4 = [
Among(u"ea", -1, 1),
Among(u"ia", -1, 1),
Among(u"esc", -1, 1),
Among(u"\u0103sc", -1, 1),
Among(u"ind", -1, 1),
Among(u"\u00E2nd", -1, 1),
Among(u"are", -1, 1),
Among(u"ere", -1, 1),
Among(u"ire", -1, 1),
Among(u"\u00E2re", -1, 1),
Among(u"se", -1, 2),
Among(u"ase", 10, 1),
Among(u"sese", 10, 2),
Among(u"ise", 10, 1),
Among(u"use", 10, 1),
Among(u"\u00E2se", 10, 1),
Among(u"e\u015Fte", -1, 1),
Among(u"\u0103\u015Fte", -1, 1),
Among(u"eze", -1, 1),
Among(u"ai", -1, 1),
Among(u"eai", 19, 1),
Among(u"iai", 19, 1),
Among(u"sei", -1, 2),
Among(u"e\u015Fti", -1, 1),
Among(u"\u0103\u015Fti", -1, 1),
Among(u"ui", -1, 1),
Among(u"ezi", -1, 1),
Among(u"\u00E2i", -1, 1),
Among(u"a\u015Fi", -1, 1),
Among(u"se\u015Fi", -1, 2),
Among(u"ase\u015Fi", 29, 1),
Among(u"sese\u015Fi", 29, 2),
Among(u"ise\u015Fi", 29, 1),
Among(u"use\u015Fi", 29, 1),
Among(u"\u00E2se\u015Fi", 29, 1),
Among(u"i\u015Fi", -1, 1),
Among(u"u\u015Fi", -1, 1),
Among(u"\u00E2\u015Fi", -1, 1),
Among(u"a\u0163i", -1, 2),
Among(u"ea\u0163i", 38, 1),
Among(u"ia\u0163i", 38, 1),
Among(u"e\u0163i", -1, 2),
Among(u"i\u0163i", -1, 2),
Among(u"\u00E2\u0163i", -1, 2),
Among(u"ar\u0103\u0163i", -1, 1),
Among(u"ser\u0103\u0163i", -1, 2),
Among(u"aser\u0103\u0163i", 45, 1),
Among(u"seser\u0103\u0163i", 45, 2),
Among(u"iser\u0103\u0163i", 45, 1),
Among(u"user\u0103\u0163i", 45, 1),
Among(u"\u00E2ser\u0103\u0163i", 45, 1),
Among(u"ir\u0103\u0163i", -1, 1),
Among(u"ur\u0103\u0163i", -1, 1),
Among(u"\u00E2r\u0103\u0163i", -1, 1),
Among(u"am", -1, 1),
Among(u"eam", 54, 1),
Among(u"iam", 54, 1),
Among(u"em", -1, 2),
Among(u"asem", 57, 1),
Among(u"sesem", 57, 2),
Among(u"isem", 57, 1),
Among(u"usem", 57, 1),
Among(u"\u00E2sem", 57, 1),
Among(u"im", -1, 2),
Among(u"\u00E2m", -1, 2),
Among(u"\u0103m", -1, 2),
Among(u"ar\u0103m", 65, 1),
Among(u"ser\u0103m", 65, 2),
Among(u"aser\u0103m", 67, 1),
Among(u"seser\u0103m", 67, 2),
Among(u"iser\u0103m", 67, 1),
Among(u"user\u0103m", 67, 1),
Among(u"\u00E2ser\u0103m", 67, 1),
Among(u"ir\u0103m", 65, 1),
Among(u"ur\u0103m", 65, 1),
Among(u"\u00E2r\u0103m", 65, 1),
Among(u"au", -1, 1),
Among(u"eau", 76, 1),
Among(u"iau", 76, 1),
Among(u"indu", -1, 1),
Among(u"\u00E2ndu", -1, 1),
Among(u"ez", -1, 1),
Among(u"easc\u0103", -1, 1),
Among(u"ar\u0103", -1, 1),
Among(u"ser\u0103", -1, 2),
Among(u"aser\u0103", 84, 1),
Among(u"seser\u0103", 84, 2),
Among(u"iser\u0103", 84, 1),
Among(u"user\u0103", 84, 1),
Among(u"\u00E2ser\u0103", 84, 1),
Among(u"ir\u0103", -1, 1),
Among(u"ur\u0103", -1, 1),
Among(u"\u00E2r\u0103", -1, 1),
Among(u"eaz\u0103", -1, 1)
]
a_5 = [
Among(u"a", -1, 1),
Among(u"e", -1, 1),
Among(u"ie", 1, 1),
Among(u"i", -1, 1),
Among(u"\u0103", -1, 1)
]
g_v = [17, 65, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 32, 0, 0, 4]
B_standard_suffix_removed = False
I_p2 = 0
I_p1 = 0
I_pV = 0
def copy_from(self, other):
self.B_standard_suffix_removed = other.B_standard_suffix_removed
self.I_p2 = other.I_p2
self.I_p1 = other.I_p1
self.I_pV = other.I_pV
super.copy_from(other)
def r_prelude(self):
# (, line 31
# repeat, line 32
try:
while True:
try:
v_1 = self.cursor
try:
# goto, line 32
try:
while True:
v_2 = self.cursor
try:
# (, line 32
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab4()
# [, line 33
self.bra = self.cursor
# or, line 33
try:
v_3 = self.cursor
try:
# (, line 33
# literal, line 33
if not self.eq_s(1, u"u"):
raise lab6()
# ], line 33
self.ket = self.cursor
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab6()
# <-, line 33
if not self.slice_from(u"U"):
return False
raise lab5()
except lab6: pass
self.cursor = v_3
# (, line 34
# literal, line 34
if not self.eq_s(1, u"i"):
raise lab4()
# ], line 34
self.ket = self.cursor
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab4()
# <-, line 34
if not self.slice_from(u"I"):
return False
except lab5: pass
self.cursor = v_2
raise lab3()
except lab4: pass
self.cursor = v_2
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
except lab3: pass
raise lab1()
except lab2: pass
self.cursor = v_1
raise lab0()
except lab1: pass
except lab0: pass
return True
def r_mark_regions(self):
# (, line 38
self.I_pV = self.limit;
self.I_p1 = self.limit;
self.I_p2 = self.limit;
# do, line 44
v_1 = self.cursor
try:
# (, line 44
# or, line 46
try:
v_2 = self.cursor
try:
# (, line 45
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab2()
# or, line 45
try:
v_3 = self.cursor
try:
# (, line 45
if not self.out_grouping(RomanianStemmer.g_v, 97, 259):
raise lab4()
# gopast, line 45
try:
while True:
try:
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab6()
raise lab5()
except lab6: pass
if self.cursor >= self.limit:
raise lab4()
self.cursor += 1
except lab5: pass
raise lab3()
except lab4: pass
self.cursor = v_3
# (, line 45
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab2()
# gopast, line 45
try:
while True:
try:
if not self.out_grouping(RomanianStemmer.g_v, 97, 259):
raise lab8()
raise lab7()
except lab8: pass
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
except lab7: pass
except lab3: pass
raise lab1()
except lab2: pass
self.cursor = v_2
# (, line 47
if not self.out_grouping(RomanianStemmer.g_v, 97, 259):
raise lab0()
# or, line 47
try:
v_6 = self.cursor
try:
# (, line 47
if not self.out_grouping(RomanianStemmer.g_v, 97, 259):
raise lab10()
# gopast, line 47
try:
while True:
try:
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab12()
raise lab11()
except lab12: pass
if self.cursor >= self.limit:
raise lab10()
self.cursor += 1
except lab11: pass
raise lab9()
except lab10: pass
self.cursor = v_6
# (, line 47
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab0()
# next, line 47
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
except lab9: pass
except lab1: pass
# setmark pV, line 48
self.I_pV = self.cursor
except lab0: pass
self.cursor = v_1
# do, line 50
v_8 = self.cursor
try:
# (, line 50
# gopast, line 51
try:
while True:
try:
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab15()
raise lab14()
except lab15: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab14: pass
# gopast, line 51
try:
while True:
try:
if not self.out_grouping(RomanianStemmer.g_v, 97, 259):
raise lab17()
raise lab16()
except lab17: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab16: pass
# setmark p1, line 51
self.I_p1 = self.cursor
# gopast, line 52
try:
while True:
try:
if not self.in_grouping(RomanianStemmer.g_v, 97, 259):
raise lab19()
raise lab18()
except lab19: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab18: pass
# gopast, line 52
try:
while True:
try:
if not self.out_grouping(RomanianStemmer.g_v, 97, 259):
raise lab21()
raise lab20()
except lab21: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab20: pass
# setmark p2, line 52
self.I_p2 = self.cursor
except lab13: pass
self.cursor = v_8
return True
def r_postlude(self):
# repeat, line 56
try:
while True:
try:
v_1 = self.cursor
try:
# (, line 56
# [, line 58
self.bra = self.cursor
# substring, line 58
among_var = self.find_among(RomanianStemmer.a_0, 3)
if among_var == 0:
raise lab2()
# ], line 58
self.ket = self.cursor
if among_var == 0:
raise lab2()
elif among_var == 1:
# (, line 59
# <-, line 59
if not self.slice_from(u"i"):
return False
elif among_var == 2:
# (, line 60
# <-, line 60
if not self.slice_from(u"u"):
return False
elif among_var == 3:
# (, line 61
# next, line 61
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
raise lab1()
except lab2: pass
self.cursor = v_1
raise lab0()
except lab1: pass
except lab0: pass
return True
def r_RV(self):
if not self.I_pV <= self.cursor:
return False
return True
def r_R1(self):
if not self.I_p1 <= self.cursor:
return False
return True
def r_R2(self):
if not self.I_p2 <= self.cursor:
return False
return True
def r_step_0(self):
# (, line 72
# [, line 73
self.ket = self.cursor
# substring, line 73
among_var = self.find_among_b(RomanianStemmer.a_1, 16)
if among_var == 0:
return False
# ], line 73
self.bra = self.cursor
# call R1, line 73
if not self.r_R1():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 75
# delete, line 75
if not self.slice_del():
return False
elif among_var == 2:
# (, line 77
# <-, line 77
if not self.slice_from(u"a"):
return False
elif among_var == 3:
# (, line 79
# <-, line 79
if not self.slice_from(u"e"):
return False
elif among_var == 4:
# (, line 81
# <-, line 81
if not self.slice_from(u"i"):
return False
elif among_var == 5:
# (, line 83
# not, line 83
v_1 = self.limit - self.cursor
try:
# literal, line 83
if not self.eq_s_b(2, u"ab"):
raise lab0()
return False
except lab0: pass
self.cursor = self.limit - v_1
# <-, line 83
if not self.slice_from(u"i"):
return False
elif among_var == 6:
# (, line 85
# <-, line 85
if not self.slice_from(u"at"):
return False
elif among_var == 7:
# (, line 87
# <-, line 87
if not self.slice_from(u"a\u0163i"):
return False
return True
def r_combo_suffix(self):
# test, line 91
v_1 = self.limit - self.cursor
# (, line 91
# [, line 92
self.ket = self.cursor
# substring, line 92
among_var = self.find_among_b(RomanianStemmer.a_2, 46)
if among_var == 0:
return False
# ], line 92
self.bra = self.cursor
# call R1, line 92
if not self.r_R1():
return False
# (, line 92
if among_var == 0:
return False
elif among_var == 1:
# (, line 100
# <-, line 101
if not self.slice_from(u"abil"):
return False
elif among_var == 2:
# (, line 103
# <-, line 104
if not self.slice_from(u"ibil"):
return False
elif among_var == 3:
# (, line 106
# <-, line 107
if not self.slice_from(u"iv"):
return False
elif among_var == 4:
# (, line 112
# <-, line 113
if not self.slice_from(u"ic"):
return False
elif among_var == 5:
# (, line 117
# <-, line 118
if not self.slice_from(u"at"):
return False
elif among_var == 6:
# (, line 121
# <-, line 122
if not self.slice_from(u"it"):
return False
# set standard_suffix_removed, line 125
self.B_standard_suffix_removed = True
self.cursor = self.limit - v_1
return True
def r_standard_suffix(self):
# (, line 129
# unset standard_suffix_removed, line 130
self.B_standard_suffix_removed = False
# repeat, line 131
try:
while True:
try:
v_1 = self.limit - self.cursor
try:
# call combo_suffix, line 131
if not self.r_combo_suffix():
raise lab2()
raise lab1()
except lab2: pass
self.cursor = self.limit - v_1
raise lab0()
except lab1: pass
except lab0: pass
# [, line 132
self.ket = self.cursor
# substring, line 132
among_var = self.find_among_b(RomanianStemmer.a_3, 62)
if among_var == 0:
return False
# ], line 132
self.bra = self.cursor
# call R2, line 132
if not self.r_R2():
return False
# (, line 132
if among_var == 0:
return False
elif among_var == 1:
# (, line 148
# delete, line 149
if not self.slice_del():
return False
elif among_var == 2:
# (, line 151
# literal, line 152
if not self.eq_s_b(1, u"\u0163"):
return False
# ], line 152
self.bra = self.cursor
# <-, line 152
if not self.slice_from(u"t"):
return False
elif among_var == 3:
# (, line 155
# <-, line 156
if not self.slice_from(u"ist"):
return False
# set standard_suffix_removed, line 160
self.B_standard_suffix_removed = True
return True
def r_verb_suffix(self):
# setlimit, line 164
v_1 = self.limit - self.cursor
# tomark, line 164
if self.cursor < self.I_pV:
return False
self.cursor = self.I_pV
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 164
# [, line 165
self.ket = self.cursor
# substring, line 165
among_var = self.find_among_b(RomanianStemmer.a_4, 94)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 165
self.bra = self.cursor
if among_var == 0:
self.limit_backward = v_2
return False
elif among_var == 1:
# (, line 200
# or, line 200
try:
v_3 = self.limit - self.cursor
try:
if not self.out_grouping_b(RomanianStemmer.g_v, 97, 259):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_3
# literal, line 200
if not self.eq_s_b(1, u"u"):
self.limit_backward = v_2
return False
except lab0: pass
# delete, line 200
if not self.slice_del():
return False
elif among_var == 2:
# (, line 214
# delete, line 214
if not self.slice_del():
return False
self.limit_backward = v_2
return True
def r_vowel_suffix(self):
# (, line 218
# [, line 219
self.ket = self.cursor
# substring, line 219
among_var = self.find_among_b(RomanianStemmer.a_5, 5)
if among_var == 0:
return False
# ], line 219
self.bra = self.cursor
# call RV, line 219
if not self.r_RV():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 220
# delete, line 220
if not self.slice_del():
return False
return True
def _stem(self):
# (, line 225
# do, line 226
v_1 = self.cursor
try:
# call prelude, line 226
if not self.r_prelude():
raise lab0()
except lab0: pass
self.cursor = v_1
# do, line 227
v_2 = self.cursor
try:
# call mark_regions, line 227
if not self.r_mark_regions():
raise lab1()
except lab1: pass
self.cursor = v_2
# backwards, line 228
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 228
# do, line 229
v_3 = self.limit - self.cursor
try:
# call step_0, line 229
if not self.r_step_0():
raise lab2()
except lab2: pass
self.cursor = self.limit - v_3
# do, line 230
v_4 = self.limit - self.cursor
try:
# call standard_suffix, line 230
if not self.r_standard_suffix():
raise lab3()
except lab3: pass
self.cursor = self.limit - v_4
# do, line 231
v_5 = self.limit - self.cursor
try:
# (, line 231
# or, line 231
try:
v_6 = self.limit - self.cursor
try:
# Boolean test standard_suffix_removed, line 231
if not self.B_standard_suffix_removed:
raise lab6()
raise lab5()
except lab6: pass
self.cursor = self.limit - v_6
# call verb_suffix, line 231
if not self.r_verb_suffix():
raise lab4()
except lab5: pass
except lab4: pass
self.cursor = self.limit - v_5
# do, line 232
v_7 = self.limit - self.cursor
try:
# call vowel_suffix, line 232
if not self.r_vowel_suffix():
raise lab7()
except lab7: pass
self.cursor = self.limit - v_7
self.cursor = self.limit_backward
# do, line 234
v_8 = self.cursor
try:
# call postlude, line 234
if not self.r_postlude():
raise lab8()
except lab8: pass
self.cursor = v_8
return True
def equals(self, o):
return isinstance(o, RomanianStemmer)
def hashCode(self):
return hash("RomanianStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
class lab10(BaseException): pass
class lab11(BaseException): pass
class lab12(BaseException): pass
class lab13(BaseException): pass
class lab14(BaseException): pass
class lab15(BaseException): pass
class lab16(BaseException): pass
class lab17(BaseException): pass
class lab18(BaseException): pass
class lab19(BaseException): pass
class lab20(BaseException): pass
class lab21(BaseException): pass
| 30,431 | Python | .py | 868 | 20.381336 | 98 | 0.409625 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,966 | turkish_stemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/turkish_stemmer.py | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class TurkishStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"m", -1, -1),
Among(u"n", -1, -1),
Among(u"miz", -1, -1),
Among(u"niz", -1, -1),
Among(u"muz", -1, -1),
Among(u"nuz", -1, -1),
Among(u"m\u00FCz", -1, -1),
Among(u"n\u00FCz", -1, -1),
Among(u"m\u0131z", -1, -1),
Among(u"n\u0131z", -1, -1)
]
a_1 = [
Among(u"leri", -1, -1),
Among(u"lar\u0131", -1, -1)
]
a_2 = [
Among(u"ni", -1, -1),
Among(u"nu", -1, -1),
Among(u"n\u00FC", -1, -1),
Among(u"n\u0131", -1, -1)
]
a_3 = [
Among(u"in", -1, -1),
Among(u"un", -1, -1),
Among(u"\u00FCn", -1, -1),
Among(u"\u0131n", -1, -1)
]
a_4 = [
Among(u"a", -1, -1),
Among(u"e", -1, -1)
]
a_5 = [
Among(u"na", -1, -1),
Among(u"ne", -1, -1)
]
a_6 = [
Among(u"da", -1, -1),
Among(u"ta", -1, -1),
Among(u"de", -1, -1),
Among(u"te", -1, -1)
]
a_7 = [
Among(u"nda", -1, -1),
Among(u"nde", -1, -1)
]
a_8 = [
Among(u"dan", -1, -1),
Among(u"tan", -1, -1),
Among(u"den", -1, -1),
Among(u"ten", -1, -1)
]
a_9 = [
Among(u"ndan", -1, -1),
Among(u"nden", -1, -1)
]
a_10 = [
Among(u"la", -1, -1),
Among(u"le", -1, -1)
]
a_11 = [
Among(u"ca", -1, -1),
Among(u"ce", -1, -1)
]
a_12 = [
Among(u"im", -1, -1),
Among(u"um", -1, -1),
Among(u"\u00FCm", -1, -1),
Among(u"\u0131m", -1, -1)
]
a_13 = [
Among(u"sin", -1, -1),
Among(u"sun", -1, -1),
Among(u"s\u00FCn", -1, -1),
Among(u"s\u0131n", -1, -1)
]
a_14 = [
Among(u"iz", -1, -1),
Among(u"uz", -1, -1),
Among(u"\u00FCz", -1, -1),
Among(u"\u0131z", -1, -1)
]
a_15 = [
Among(u"siniz", -1, -1),
Among(u"sunuz", -1, -1),
Among(u"s\u00FCn\u00FCz", -1, -1),
Among(u"s\u0131n\u0131z", -1, -1)
]
a_16 = [
Among(u"lar", -1, -1),
Among(u"ler", -1, -1)
]
a_17 = [
Among(u"niz", -1, -1),
Among(u"nuz", -1, -1),
Among(u"n\u00FCz", -1, -1),
Among(u"n\u0131z", -1, -1)
]
a_18 = [
Among(u"dir", -1, -1),
Among(u"tir", -1, -1),
Among(u"dur", -1, -1),
Among(u"tur", -1, -1),
Among(u"d\u00FCr", -1, -1),
Among(u"t\u00FCr", -1, -1),
Among(u"d\u0131r", -1, -1),
Among(u"t\u0131r", -1, -1)
]
a_19 = [
Among(u"cas\u0131na", -1, -1),
Among(u"cesine", -1, -1)
]
a_20 = [
Among(u"di", -1, -1),
Among(u"ti", -1, -1),
Among(u"dik", -1, -1),
Among(u"tik", -1, -1),
Among(u"duk", -1, -1),
Among(u"tuk", -1, -1),
Among(u"d\u00FCk", -1, -1),
Among(u"t\u00FCk", -1, -1),
Among(u"d\u0131k", -1, -1),
Among(u"t\u0131k", -1, -1),
Among(u"dim", -1, -1),
Among(u"tim", -1, -1),
Among(u"dum", -1, -1),
Among(u"tum", -1, -1),
Among(u"d\u00FCm", -1, -1),
Among(u"t\u00FCm", -1, -1),
Among(u"d\u0131m", -1, -1),
Among(u"t\u0131m", -1, -1),
Among(u"din", -1, -1),
Among(u"tin", -1, -1),
Among(u"dun", -1, -1),
Among(u"tun", -1, -1),
Among(u"d\u00FCn", -1, -1),
Among(u"t\u00FCn", -1, -1),
Among(u"d\u0131n", -1, -1),
Among(u"t\u0131n", -1, -1),
Among(u"du", -1, -1),
Among(u"tu", -1, -1),
Among(u"d\u00FC", -1, -1),
Among(u"t\u00FC", -1, -1),
Among(u"d\u0131", -1, -1),
Among(u"t\u0131", -1, -1)
]
a_21 = [
Among(u"sa", -1, -1),
Among(u"se", -1, -1),
Among(u"sak", -1, -1),
Among(u"sek", -1, -1),
Among(u"sam", -1, -1),
Among(u"sem", -1, -1),
Among(u"san", -1, -1),
Among(u"sen", -1, -1)
]
a_22 = [
Among(u"mi\u015F", -1, -1),
Among(u"mu\u015F", -1, -1),
Among(u"m\u00FC\u015F", -1, -1),
Among(u"m\u0131\u015F", -1, -1)
]
a_23 = [
Among(u"b", -1, 1),
Among(u"c", -1, 2),
Among(u"d", -1, 3),
Among(u"\u011F", -1, 4)
]
g_vowel = [17, 65, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 8, 0, 0, 0, 0, 0, 0, 1]
g_U = [1, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 1]
g_vowel1 = [1, 64, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
g_vowel2 = [17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 130]
g_vowel3 = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
g_vowel4 = [17]
g_vowel5 = [65]
g_vowel6 = [65]
B_continue_stemming_noun_suffixes = False
I_strlen = 0
def copy_from(self, other):
self.B_continue_stemming_noun_suffixes = other.B_continue_stemming_noun_suffixes
self.I_strlen = other.I_strlen
super.copy_from(other)
def r_check_vowel_harmony(self):
# (, line 111
# test, line 112
v_1 = self.limit - self.cursor
# (, line 113
# (, line 114
# goto, line 114
try:
while True:
v_2 = self.limit - self.cursor
try:
if not self.in_grouping_b(TurkishStemmer.g_vowel, 97, 305):
raise lab1()
self.cursor = self.limit - v_2
raise lab0()
except lab1: pass
self.cursor = self.limit - v_2
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
except lab0: pass
# (, line 115
# or, line 116
try:
v_3 = self.limit - self.cursor
try:
# (, line 116
# literal, line 116
if not self.eq_s_b(1, u"a"):
raise lab3()
# goto, line 116
try:
while True:
v_4 = self.limit - self.cursor
try:
if not self.in_grouping_b(TurkishStemmer.g_vowel1, 97, 305):
raise lab5()
self.cursor = self.limit - v_4
raise lab4()
except lab5: pass
self.cursor = self.limit - v_4
if self.cursor <= self.limit_backward:
raise lab3()
self.cursor -= 1
except lab4: pass
raise lab2()
except lab3: pass
self.cursor = self.limit - v_3
try:
# (, line 117
# literal, line 117
if not self.eq_s_b(1, u"e"):
raise lab6()
# goto, line 117
try:
while True:
v_5 = self.limit - self.cursor
try:
if not self.in_grouping_b(TurkishStemmer.g_vowel2, 101, 252):
raise lab8()
self.cursor = self.limit - v_5
raise lab7()
except lab8: pass
self.cursor = self.limit - v_5
if self.cursor <= self.limit_backward:
raise lab6()
self.cursor -= 1
except lab7: pass
raise lab2()
except lab6: pass
self.cursor = self.limit - v_3
try:
# (, line 118
# literal, line 118
if not self.eq_s_b(1, u"\u0131"):
raise lab9()
# goto, line 118
try:
while True:
v_6 = self.limit - self.cursor
try:
if not self.in_grouping_b(TurkishStemmer.g_vowel3, 97, 305):
raise lab11()
self.cursor = self.limit - v_6
raise lab10()
except lab11: pass
self.cursor = self.limit - v_6
if self.cursor <= self.limit_backward:
raise lab9()
self.cursor -= 1
except lab10: pass
raise lab2()
except lab9: pass
self.cursor = self.limit - v_3
try:
# (, line 119
# literal, line 119
if not self.eq_s_b(1, u"i"):
raise lab12()
# goto, line 119
try:
while True:
v_7 = self.limit - self.cursor
try:
if not self.in_grouping_b(TurkishStemmer.g_vowel4, 101, 105):
raise lab14()
self.cursor = self.limit - v_7
raise lab13()
except lab14: pass
self.cursor = self.limit - v_7
if self.cursor <= self.limit_backward:
raise lab12()
self.cursor -= 1
except lab13: pass
raise lab2()
except lab12: pass
self.cursor = self.limit - v_3
try:
# (, line 120
# literal, line 120
if not self.eq_s_b(1, u"o"):
raise lab15()
# goto, line 120
try:
while True:
v_8 = self.limit - self.cursor
try:
if not self.in_grouping_b(TurkishStemmer.g_vowel5, 111, 117):
raise lab17()
self.cursor = self.limit - v_8
raise lab16()
except lab17: pass
self.cursor = self.limit - v_8
if self.cursor <= self.limit_backward:
raise lab15()
self.cursor -= 1
except lab16: pass
raise lab2()
except lab15: pass
self.cursor = self.limit - v_3
try:
# (, line 121
# literal, line 121
if not self.eq_s_b(1, u"\u00F6"):
raise lab18()
# goto, line 121
try:
while True:
v_9 = self.limit - self.cursor
try:
if not self.in_grouping_b(TurkishStemmer.g_vowel6, 246, 252):
raise lab20()
self.cursor = self.limit - v_9
raise lab19()
except lab20: pass
self.cursor = self.limit - v_9
if self.cursor <= self.limit_backward:
raise lab18()
self.cursor -= 1
except lab19: pass
raise lab2()
except lab18: pass
self.cursor = self.limit - v_3
try:
# (, line 122
# literal, line 122
if not self.eq_s_b(1, u"u"):
raise lab21()
# goto, line 122
try:
while True:
v_10 = self.limit - self.cursor
try:
if not self.in_grouping_b(TurkishStemmer.g_vowel5, 111, 117):
raise lab23()
self.cursor = self.limit - v_10
raise lab22()
except lab23: pass
self.cursor = self.limit - v_10
if self.cursor <= self.limit_backward:
raise lab21()
self.cursor -= 1
except lab22: pass
raise lab2()
except lab21: pass
self.cursor = self.limit - v_3
# (, line 123
# literal, line 123
if not self.eq_s_b(1, u"\u00FC"):
return False
# goto, line 123
try:
while True:
v_11 = self.limit - self.cursor
try:
if not self.in_grouping_b(TurkishStemmer.g_vowel6, 246, 252):
raise lab25()
self.cursor = self.limit - v_11
raise lab24()
except lab25: pass
self.cursor = self.limit - v_11
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
except lab24: pass
except lab2: pass
self.cursor = self.limit - v_1
return True
def r_mark_suffix_with_optional_n_consonant(self):
# (, line 132
# or, line 134
try:
v_1 = self.limit - self.cursor
try:
# (, line 133
# (, line 133
# test, line 133
v_2 = self.limit - self.cursor
# literal, line 133
if not self.eq_s_b(1, u"n"):
raise lab1()
self.cursor = self.limit - v_2
# next, line 133
if self.cursor <= self.limit_backward:
raise lab1()
self.cursor -= 1
# (, line 133
# test, line 133
v_3 = self.limit - self.cursor
if not self.in_grouping_b(TurkishStemmer.g_vowel, 97, 305):
raise lab1()
self.cursor = self.limit - v_3
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# (, line 135
# (, line 135
# not, line 135
v_4 = self.limit - self.cursor
try:
# (, line 135
# test, line 135
v_5 = self.limit - self.cursor
# literal, line 135
if not self.eq_s_b(1, u"n"):
raise lab2()
self.cursor = self.limit - v_5
return False
except lab2: pass
self.cursor = self.limit - v_4
# test, line 135
v_6 = self.limit - self.cursor
# (, line 135
# next, line 135
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
# (, line 135
# test, line 135
v_7 = self.limit - self.cursor
if not self.in_grouping_b(TurkishStemmer.g_vowel, 97, 305):
return False
self.cursor = self.limit - v_7
self.cursor = self.limit - v_6
except lab0: pass
return True
def r_mark_suffix_with_optional_s_consonant(self):
# (, line 143
# or, line 145
try:
v_1 = self.limit - self.cursor
try:
# (, line 144
# (, line 144
# test, line 144
v_2 = self.limit - self.cursor
# literal, line 144
if not self.eq_s_b(1, u"s"):
raise lab1()
self.cursor = self.limit - v_2
# next, line 144
if self.cursor <= self.limit_backward:
raise lab1()
self.cursor -= 1
# (, line 144
# test, line 144
v_3 = self.limit - self.cursor
if not self.in_grouping_b(TurkishStemmer.g_vowel, 97, 305):
raise lab1()
self.cursor = self.limit - v_3
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# (, line 146
# (, line 146
# not, line 146
v_4 = self.limit - self.cursor
try:
# (, line 146
# test, line 146
v_5 = self.limit - self.cursor
# literal, line 146
if not self.eq_s_b(1, u"s"):
raise lab2()
self.cursor = self.limit - v_5
return False
except lab2: pass
self.cursor = self.limit - v_4
# test, line 146
v_6 = self.limit - self.cursor
# (, line 146
# next, line 146
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
# (, line 146
# test, line 146
v_7 = self.limit - self.cursor
if not self.in_grouping_b(TurkishStemmer.g_vowel, 97, 305):
return False
self.cursor = self.limit - v_7
self.cursor = self.limit - v_6
except lab0: pass
return True
def r_mark_suffix_with_optional_y_consonant(self):
# (, line 153
# or, line 155
try:
v_1 = self.limit - self.cursor
try:
# (, line 154
# (, line 154
# test, line 154
v_2 = self.limit - self.cursor
# literal, line 154
if not self.eq_s_b(1, u"y"):
raise lab1()
self.cursor = self.limit - v_2
# next, line 154
if self.cursor <= self.limit_backward:
raise lab1()
self.cursor -= 1
# (, line 154
# test, line 154
v_3 = self.limit - self.cursor
if not self.in_grouping_b(TurkishStemmer.g_vowel, 97, 305):
raise lab1()
self.cursor = self.limit - v_3
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# (, line 156
# (, line 156
# not, line 156
v_4 = self.limit - self.cursor
try:
# (, line 156
# test, line 156
v_5 = self.limit - self.cursor
# literal, line 156
if not self.eq_s_b(1, u"y"):
raise lab2()
self.cursor = self.limit - v_5
return False
except lab2: pass
self.cursor = self.limit - v_4
# test, line 156
v_6 = self.limit - self.cursor
# (, line 156
# next, line 156
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
# (, line 156
# test, line 156
v_7 = self.limit - self.cursor
if not self.in_grouping_b(TurkishStemmer.g_vowel, 97, 305):
return False
self.cursor = self.limit - v_7
self.cursor = self.limit - v_6
except lab0: pass
return True
def r_mark_suffix_with_optional_U_vowel(self):
# (, line 159
# or, line 161
try:
v_1 = self.limit - self.cursor
try:
# (, line 160
# (, line 160
# test, line 160
v_2 = self.limit - self.cursor
if not self.in_grouping_b(TurkishStemmer.g_U, 105, 305):
raise lab1()
self.cursor = self.limit - v_2
# next, line 160
if self.cursor <= self.limit_backward:
raise lab1()
self.cursor -= 1
# (, line 160
# test, line 160
v_3 = self.limit - self.cursor
if not self.out_grouping_b(TurkishStemmer.g_vowel, 97, 305):
raise lab1()
self.cursor = self.limit - v_3
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# (, line 162
# (, line 162
# not, line 162
v_4 = self.limit - self.cursor
try:
# (, line 162
# test, line 162
v_5 = self.limit - self.cursor
if not self.in_grouping_b(TurkishStemmer.g_U, 105, 305):
raise lab2()
self.cursor = self.limit - v_5
return False
except lab2: pass
self.cursor = self.limit - v_4
# test, line 162
v_6 = self.limit - self.cursor
# (, line 162
# next, line 162
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
# (, line 162
# test, line 162
v_7 = self.limit - self.cursor
if not self.out_grouping_b(TurkishStemmer.g_vowel, 97, 305):
return False
self.cursor = self.limit - v_7
self.cursor = self.limit - v_6
except lab0: pass
return True
def r_mark_possessives(self):
# (, line 166
# among, line 167
if self.find_among_b(TurkishStemmer.a_0, 10) == 0:
return False
# (, line 169
# call mark_suffix_with_optional_U_vowel, line 169
if not self.r_mark_suffix_with_optional_U_vowel():
return False
return True
def r_mark_sU(self):
# (, line 172
# call check_vowel_harmony, line 173
if not self.r_check_vowel_harmony():
return False
if not self.in_grouping_b(TurkishStemmer.g_U, 105, 305):
return False
# (, line 175
# call mark_suffix_with_optional_s_consonant, line 175
if not self.r_mark_suffix_with_optional_s_consonant():
return False
return True
def r_mark_lArI(self):
# (, line 178
# among, line 179
if self.find_among_b(TurkishStemmer.a_1, 2) == 0:
return False
return True
def r_mark_yU(self):
# (, line 182
# call check_vowel_harmony, line 183
if not self.r_check_vowel_harmony():
return False
if not self.in_grouping_b(TurkishStemmer.g_U, 105, 305):
return False
# (, line 185
# call mark_suffix_with_optional_y_consonant, line 185
if not self.r_mark_suffix_with_optional_y_consonant():
return False
return True
def r_mark_nU(self):
# (, line 188
# call check_vowel_harmony, line 189
if not self.r_check_vowel_harmony():
return False
# among, line 190
if self.find_among_b(TurkishStemmer.a_2, 4) == 0:
return False
return True
def r_mark_nUn(self):
# (, line 193
# call check_vowel_harmony, line 194
if not self.r_check_vowel_harmony():
return False
# among, line 195
if self.find_among_b(TurkishStemmer.a_3, 4) == 0:
return False
# (, line 196
# call mark_suffix_with_optional_n_consonant, line 196
if not self.r_mark_suffix_with_optional_n_consonant():
return False
return True
def r_mark_yA(self):
# (, line 199
# call check_vowel_harmony, line 200
if not self.r_check_vowel_harmony():
return False
# among, line 201
if self.find_among_b(TurkishStemmer.a_4, 2) == 0:
return False
# (, line 202
# call mark_suffix_with_optional_y_consonant, line 202
if not self.r_mark_suffix_with_optional_y_consonant():
return False
return True
def r_mark_nA(self):
# (, line 205
# call check_vowel_harmony, line 206
if not self.r_check_vowel_harmony():
return False
# among, line 207
if self.find_among_b(TurkishStemmer.a_5, 2) == 0:
return False
return True
def r_mark_DA(self):
# (, line 210
# call check_vowel_harmony, line 211
if not self.r_check_vowel_harmony():
return False
# among, line 212
if self.find_among_b(TurkishStemmer.a_6, 4) == 0:
return False
return True
def r_mark_ndA(self):
# (, line 215
# call check_vowel_harmony, line 216
if not self.r_check_vowel_harmony():
return False
# among, line 217
if self.find_among_b(TurkishStemmer.a_7, 2) == 0:
return False
return True
def r_mark_DAn(self):
# (, line 220
# call check_vowel_harmony, line 221
if not self.r_check_vowel_harmony():
return False
# among, line 222
if self.find_among_b(TurkishStemmer.a_8, 4) == 0:
return False
return True
def r_mark_ndAn(self):
# (, line 225
# call check_vowel_harmony, line 226
if not self.r_check_vowel_harmony():
return False
# among, line 227
if self.find_among_b(TurkishStemmer.a_9, 2) == 0:
return False
return True
def r_mark_ylA(self):
# (, line 230
# call check_vowel_harmony, line 231
if not self.r_check_vowel_harmony():
return False
# among, line 232
if self.find_among_b(TurkishStemmer.a_10, 2) == 0:
return False
# (, line 233
# call mark_suffix_with_optional_y_consonant, line 233
if not self.r_mark_suffix_with_optional_y_consonant():
return False
return True
def r_mark_ki(self):
# (, line 236
# literal, line 237
if not self.eq_s_b(2, u"ki"):
return False
return True
def r_mark_ncA(self):
# (, line 240
# call check_vowel_harmony, line 241
if not self.r_check_vowel_harmony():
return False
# among, line 242
if self.find_among_b(TurkishStemmer.a_11, 2) == 0:
return False
# (, line 243
# call mark_suffix_with_optional_n_consonant, line 243
if not self.r_mark_suffix_with_optional_n_consonant():
return False
return True
def r_mark_yUm(self):
# (, line 246
# call check_vowel_harmony, line 247
if not self.r_check_vowel_harmony():
return False
# among, line 248
if self.find_among_b(TurkishStemmer.a_12, 4) == 0:
return False
# (, line 249
# call mark_suffix_with_optional_y_consonant, line 249
if not self.r_mark_suffix_with_optional_y_consonant():
return False
return True
def r_mark_sUn(self):
# (, line 252
# call check_vowel_harmony, line 253
if not self.r_check_vowel_harmony():
return False
# among, line 254
if self.find_among_b(TurkishStemmer.a_13, 4) == 0:
return False
return True
def r_mark_yUz(self):
# (, line 257
# call check_vowel_harmony, line 258
if not self.r_check_vowel_harmony():
return False
# among, line 259
if self.find_among_b(TurkishStemmer.a_14, 4) == 0:
return False
# (, line 260
# call mark_suffix_with_optional_y_consonant, line 260
if not self.r_mark_suffix_with_optional_y_consonant():
return False
return True
def r_mark_sUnUz(self):
# (, line 263
# among, line 264
if self.find_among_b(TurkishStemmer.a_15, 4) == 0:
return False
return True
def r_mark_lAr(self):
# (, line 267
# call check_vowel_harmony, line 268
if not self.r_check_vowel_harmony():
return False
# among, line 269
if self.find_among_b(TurkishStemmer.a_16, 2) == 0:
return False
return True
def r_mark_nUz(self):
# (, line 272
# call check_vowel_harmony, line 273
if not self.r_check_vowel_harmony():
return False
# among, line 274
if self.find_among_b(TurkishStemmer.a_17, 4) == 0:
return False
return True
def r_mark_DUr(self):
# (, line 277
# call check_vowel_harmony, line 278
if not self.r_check_vowel_harmony():
return False
# among, line 279
if self.find_among_b(TurkishStemmer.a_18, 8) == 0:
return False
return True
def r_mark_cAsInA(self):
# (, line 282
# among, line 283
if self.find_among_b(TurkishStemmer.a_19, 2) == 0:
return False
return True
def r_mark_yDU(self):
# (, line 286
# call check_vowel_harmony, line 287
if not self.r_check_vowel_harmony():
return False
# among, line 288
if self.find_among_b(TurkishStemmer.a_20, 32) == 0:
return False
# (, line 292
# call mark_suffix_with_optional_y_consonant, line 292
if not self.r_mark_suffix_with_optional_y_consonant():
return False
return True
def r_mark_ysA(self):
# (, line 296
# among, line 297
if self.find_among_b(TurkishStemmer.a_21, 8) == 0:
return False
# (, line 298
# call mark_suffix_with_optional_y_consonant, line 298
if not self.r_mark_suffix_with_optional_y_consonant():
return False
return True
def r_mark_ymUs_(self):
# (, line 301
# call check_vowel_harmony, line 302
if not self.r_check_vowel_harmony():
return False
# among, line 303
if self.find_among_b(TurkishStemmer.a_22, 4) == 0:
return False
# (, line 304
# call mark_suffix_with_optional_y_consonant, line 304
if not self.r_mark_suffix_with_optional_y_consonant():
return False
return True
def r_mark_yken(self):
# (, line 307
# literal, line 308
if not self.eq_s_b(3, u"ken"):
return False
# (, line 308
# call mark_suffix_with_optional_y_consonant, line 308
if not self.r_mark_suffix_with_optional_y_consonant():
return False
return True
def r_stem_nominal_verb_suffixes(self):
# (, line 311
# [, line 312
self.ket = self.cursor
# set continue_stemming_noun_suffixes, line 313
self.B_continue_stemming_noun_suffixes = True
# or, line 315
try:
v_1 = self.limit - self.cursor
try:
# (, line 314
# or, line 314
try:
v_2 = self.limit - self.cursor
try:
# call mark_ymUs_, line 314
if not self.r_mark_ymUs_():
raise lab3()
raise lab2()
except lab3: pass
self.cursor = self.limit - v_2
try:
# call mark_yDU, line 314
if not self.r_mark_yDU():
raise lab4()
raise lab2()
except lab4: pass
self.cursor = self.limit - v_2
try:
# call mark_ysA, line 314
if not self.r_mark_ysA():
raise lab5()
raise lab2()
except lab5: pass
self.cursor = self.limit - v_2
# call mark_yken, line 314
if not self.r_mark_yken():
raise lab1()
except lab2: pass
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
try:
# (, line 316
# call mark_cAsInA, line 316
if not self.r_mark_cAsInA():
raise lab6()
# (, line 316
# or, line 316
try:
v_3 = self.limit - self.cursor
try:
# call mark_sUnUz, line 316
if not self.r_mark_sUnUz():
raise lab8()
raise lab7()
except lab8: pass
self.cursor = self.limit - v_3
try:
# call mark_lAr, line 316
if not self.r_mark_lAr():
raise lab9()
raise lab7()
except lab9: pass
self.cursor = self.limit - v_3
try:
# call mark_yUm, line 316
if not self.r_mark_yUm():
raise lab10()
raise lab7()
except lab10: pass
self.cursor = self.limit - v_3
try:
# call mark_sUn, line 316
if not self.r_mark_sUn():
raise lab11()
raise lab7()
except lab11: pass
self.cursor = self.limit - v_3
try:
# call mark_yUz, line 316
if not self.r_mark_yUz():
raise lab12()
raise lab7()
except lab12: pass
self.cursor = self.limit - v_3
except lab7: pass
# call mark_ymUs_, line 316
if not self.r_mark_ymUs_():
raise lab6()
raise lab0()
except lab6: pass
self.cursor = self.limit - v_1
try:
# (, line 318
# call mark_lAr, line 319
if not self.r_mark_lAr():
raise lab13()
# ], line 319
self.bra = self.cursor
# delete, line 319
if not self.slice_del():
return False
# try, line 319
v_4 = self.limit - self.cursor
try:
# (, line 319
# [, line 319
self.ket = self.cursor
# (, line 319
# or, line 319
try:
v_5 = self.limit - self.cursor
try:
# call mark_DUr, line 319
if not self.r_mark_DUr():
raise lab16()
raise lab15()
except lab16: pass
self.cursor = self.limit - v_5
try:
# call mark_yDU, line 319
if not self.r_mark_yDU():
raise lab17()
raise lab15()
except lab17: pass
self.cursor = self.limit - v_5
try:
# call mark_ysA, line 319
if not self.r_mark_ysA():
raise lab18()
raise lab15()
except lab18: pass
self.cursor = self.limit - v_5
# call mark_ymUs_, line 319
if not self.r_mark_ymUs_():
self.cursor = self.limit - v_4
raise lab14()
except lab15: pass
except lab14: pass
# unset continue_stemming_noun_suffixes, line 320
self.B_continue_stemming_noun_suffixes = False
raise lab0()
except lab13: pass
self.cursor = self.limit - v_1
try:
# (, line 323
# call mark_nUz, line 323
if not self.r_mark_nUz():
raise lab19()
# (, line 323
# or, line 323
try:
v_6 = self.limit - self.cursor
try:
# call mark_yDU, line 323
if not self.r_mark_yDU():
raise lab21()
raise lab20()
except lab21: pass
self.cursor = self.limit - v_6
# call mark_ysA, line 323
if not self.r_mark_ysA():
raise lab19()
except lab20: pass
raise lab0()
except lab19: pass
self.cursor = self.limit - v_1
try:
# (, line 325
# (, line 325
# or, line 325
try:
v_7 = self.limit - self.cursor
try:
# call mark_sUnUz, line 325
if not self.r_mark_sUnUz():
raise lab24()
raise lab23()
except lab24: pass
self.cursor = self.limit - v_7
try:
# call mark_yUz, line 325
if not self.r_mark_yUz():
raise lab25()
raise lab23()
except lab25: pass
self.cursor = self.limit - v_7
try:
# call mark_sUn, line 325
if not self.r_mark_sUn():
raise lab26()
raise lab23()
except lab26: pass
self.cursor = self.limit - v_7
# call mark_yUm, line 325
if not self.r_mark_yUm():
raise lab22()
except lab23: pass
# ], line 325
self.bra = self.cursor
# delete, line 325
if not self.slice_del():
return False
# try, line 325
v_8 = self.limit - self.cursor
try:
# (, line 325
# [, line 325
self.ket = self.cursor
# call mark_ymUs_, line 325
if not self.r_mark_ymUs_():
self.cursor = self.limit - v_8
raise lab27()
except lab27: pass
raise lab0()
except lab22: pass
self.cursor = self.limit - v_1
# (, line 327
# call mark_DUr, line 327
if not self.r_mark_DUr():
return False
# ], line 327
self.bra = self.cursor
# delete, line 327
if not self.slice_del():
return False
# try, line 327
v_9 = self.limit - self.cursor
try:
# (, line 327
# [, line 327
self.ket = self.cursor
# (, line 327
# or, line 327
try:
v_10 = self.limit - self.cursor
try:
# call mark_sUnUz, line 327
if not self.r_mark_sUnUz():
raise lab30()
raise lab29()
except lab30: pass
self.cursor = self.limit - v_10
try:
# call mark_lAr, line 327
if not self.r_mark_lAr():
raise lab31()
raise lab29()
except lab31: pass
self.cursor = self.limit - v_10
try:
# call mark_yUm, line 327
if not self.r_mark_yUm():
raise lab32()
raise lab29()
except lab32: pass
self.cursor = self.limit - v_10
try:
# call mark_sUn, line 327
if not self.r_mark_sUn():
raise lab33()
raise lab29()
except lab33: pass
self.cursor = self.limit - v_10
try:
# call mark_yUz, line 327
if not self.r_mark_yUz():
raise lab34()
raise lab29()
except lab34: pass
self.cursor = self.limit - v_10
except lab29: pass
# call mark_ymUs_, line 327
if not self.r_mark_ymUs_():
self.cursor = self.limit - v_9
raise lab28()
except lab28: pass
except lab0: pass
# ], line 328
self.bra = self.cursor
# delete, line 328
if not self.slice_del():
return False
return True
def r_stem_suffix_chain_before_ki(self):
# (, line 332
# [, line 333
self.ket = self.cursor
# call mark_ki, line 334
if not self.r_mark_ki():
return False
# (, line 335
# or, line 342
try:
v_1 = self.limit - self.cursor
try:
# (, line 336
# call mark_DA, line 336
if not self.r_mark_DA():
raise lab1()
# ], line 336
self.bra = self.cursor
# delete, line 336
if not self.slice_del():
return False
# try, line 336
v_2 = self.limit - self.cursor
try:
# (, line 336
# [, line 336
self.ket = self.cursor
# or, line 338
try:
v_3 = self.limit - self.cursor
try:
# (, line 337
# call mark_lAr, line 337
if not self.r_mark_lAr():
raise lab4()
# ], line 337
self.bra = self.cursor
# delete, line 337
if not self.slice_del():
return False
# try, line 337
v_4 = self.limit - self.cursor
try:
# (, line 337
# call stem_suffix_chain_before_ki, line 337
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_4
raise lab5()
except lab5: pass
raise lab3()
except lab4: pass
self.cursor = self.limit - v_3
# (, line 339
# call mark_possessives, line 339
if not self.r_mark_possessives():
self.cursor = self.limit - v_2
raise lab2()
# ], line 339
self.bra = self.cursor
# delete, line 339
if not self.slice_del():
return False
# try, line 339
v_5 = self.limit - self.cursor
try:
# (, line 339
# [, line 339
self.ket = self.cursor
# call mark_lAr, line 339
if not self.r_mark_lAr():
self.cursor = self.limit - v_5
raise lab6()
# ], line 339
self.bra = self.cursor
# delete, line 339
if not self.slice_del():
return False
# call stem_suffix_chain_before_ki, line 339
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_5
raise lab6()
except lab6: pass
except lab3: pass
except lab2: pass
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
try:
# (, line 343
# call mark_nUn, line 343
if not self.r_mark_nUn():
raise lab7()
# ], line 343
self.bra = self.cursor
# delete, line 343
if not self.slice_del():
return False
# try, line 343
v_6 = self.limit - self.cursor
try:
# (, line 343
# [, line 343
self.ket = self.cursor
# or, line 345
try:
v_7 = self.limit - self.cursor
try:
# (, line 344
# call mark_lArI, line 344
if not self.r_mark_lArI():
raise lab10()
# ], line 344
self.bra = self.cursor
# delete, line 344
if not self.slice_del():
return False
raise lab9()
except lab10: pass
self.cursor = self.limit - v_7
try:
# (, line 346
# [, line 346
self.ket = self.cursor
# or, line 346
try:
v_8 = self.limit - self.cursor
try:
# call mark_possessives, line 346
if not self.r_mark_possessives():
raise lab13()
raise lab12()
except lab13: pass
self.cursor = self.limit - v_8
# call mark_sU, line 346
if not self.r_mark_sU():
raise lab11()
except lab12: pass
# ], line 346
self.bra = self.cursor
# delete, line 346
if not self.slice_del():
return False
# try, line 346
v_9 = self.limit - self.cursor
try:
# (, line 346
# [, line 346
self.ket = self.cursor
# call mark_lAr, line 346
if not self.r_mark_lAr():
self.cursor = self.limit - v_9
raise lab14()
# ], line 346
self.bra = self.cursor
# delete, line 346
if not self.slice_del():
return False
# call stem_suffix_chain_before_ki, line 346
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_9
raise lab14()
except lab14: pass
raise lab9()
except lab11: pass
self.cursor = self.limit - v_7
# (, line 348
# call stem_suffix_chain_before_ki, line 348
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_6
raise lab8()
except lab9: pass
except lab8: pass
raise lab0()
except lab7: pass
self.cursor = self.limit - v_1
# (, line 351
# call mark_ndA, line 351
if not self.r_mark_ndA():
return False
# (, line 351
# or, line 353
try:
v_10 = self.limit - self.cursor
try:
# (, line 352
# call mark_lArI, line 352
if not self.r_mark_lArI():
raise lab16()
# ], line 352
self.bra = self.cursor
# delete, line 352
if not self.slice_del():
return False
raise lab15()
except lab16: pass
self.cursor = self.limit - v_10
try:
# (, line 354
# (, line 354
# call mark_sU, line 354
if not self.r_mark_sU():
raise lab17()
# ], line 354
self.bra = self.cursor
# delete, line 354
if not self.slice_del():
return False
# try, line 354
v_11 = self.limit - self.cursor
try:
# (, line 354
# [, line 354
self.ket = self.cursor
# call mark_lAr, line 354
if not self.r_mark_lAr():
self.cursor = self.limit - v_11
raise lab18()
# ], line 354
self.bra = self.cursor
# delete, line 354
if not self.slice_del():
return False
# call stem_suffix_chain_before_ki, line 354
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_11
raise lab18()
except lab18: pass
raise lab15()
except lab17: pass
self.cursor = self.limit - v_10
# (, line 356
# call stem_suffix_chain_before_ki, line 356
if not self.r_stem_suffix_chain_before_ki():
return False
except lab15: pass
except lab0: pass
return True
def r_stem_noun_suffixes(self):
# (, line 361
# or, line 363
try:
v_1 = self.limit - self.cursor
try:
# (, line 362
# [, line 362
self.ket = self.cursor
# call mark_lAr, line 362
if not self.r_mark_lAr():
raise lab1()
# ], line 362
self.bra = self.cursor
# delete, line 362
if not self.slice_del():
return False
# try, line 362
v_2 = self.limit - self.cursor
try:
# (, line 362
# call stem_suffix_chain_before_ki, line 362
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_2
raise lab2()
except lab2: pass
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
try:
# (, line 364
# [, line 364
self.ket = self.cursor
# call mark_ncA, line 364
if not self.r_mark_ncA():
raise lab3()
# ], line 364
self.bra = self.cursor
# delete, line 364
if not self.slice_del():
return False
# try, line 365
v_3 = self.limit - self.cursor
try:
# (, line 365
# or, line 367
try:
v_4 = self.limit - self.cursor
try:
# (, line 366
# [, line 366
self.ket = self.cursor
# call mark_lArI, line 366
if not self.r_mark_lArI():
raise lab6()
# ], line 366
self.bra = self.cursor
# delete, line 366
if not self.slice_del():
return False
raise lab5()
except lab6: pass
self.cursor = self.limit - v_4
try:
# (, line 368
# [, line 368
self.ket = self.cursor
# or, line 368
try:
v_5 = self.limit - self.cursor
try:
# call mark_possessives, line 368
if not self.r_mark_possessives():
raise lab9()
raise lab8()
except lab9: pass
self.cursor = self.limit - v_5
# call mark_sU, line 368
if not self.r_mark_sU():
raise lab7()
except lab8: pass
# ], line 368
self.bra = self.cursor
# delete, line 368
if not self.slice_del():
return False
# try, line 368
v_6 = self.limit - self.cursor
try:
# (, line 368
# [, line 368
self.ket = self.cursor
# call mark_lAr, line 368
if not self.r_mark_lAr():
self.cursor = self.limit - v_6
raise lab10()
# ], line 368
self.bra = self.cursor
# delete, line 368
if not self.slice_del():
return False
# call stem_suffix_chain_before_ki, line 368
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_6
raise lab10()
except lab10: pass
raise lab5()
except lab7: pass
self.cursor = self.limit - v_4
# (, line 370
# [, line 370
self.ket = self.cursor
# call mark_lAr, line 370
if not self.r_mark_lAr():
self.cursor = self.limit - v_3
raise lab4()
# ], line 370
self.bra = self.cursor
# delete, line 370
if not self.slice_del():
return False
# call stem_suffix_chain_before_ki, line 370
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_3
raise lab4()
except lab5: pass
except lab4: pass
raise lab0()
except lab3: pass
self.cursor = self.limit - v_1
try:
# (, line 374
# [, line 374
self.ket = self.cursor
# (, line 374
# or, line 374
try:
v_7 = self.limit - self.cursor
try:
# call mark_ndA, line 374
if not self.r_mark_ndA():
raise lab13()
raise lab12()
except lab13: pass
self.cursor = self.limit - v_7
# call mark_nA, line 374
if not self.r_mark_nA():
raise lab11()
except lab12: pass
# (, line 375
# or, line 377
try:
v_8 = self.limit - self.cursor
try:
# (, line 376
# call mark_lArI, line 376
if not self.r_mark_lArI():
raise lab15()
# ], line 376
self.bra = self.cursor
# delete, line 376
if not self.slice_del():
return False
raise lab14()
except lab15: pass
self.cursor = self.limit - v_8
try:
# (, line 378
# call mark_sU, line 378
if not self.r_mark_sU():
raise lab16()
# ], line 378
self.bra = self.cursor
# delete, line 378
if not self.slice_del():
return False
# try, line 378
v_9 = self.limit - self.cursor
try:
# (, line 378
# [, line 378
self.ket = self.cursor
# call mark_lAr, line 378
if not self.r_mark_lAr():
self.cursor = self.limit - v_9
raise lab17()
# ], line 378
self.bra = self.cursor
# delete, line 378
if not self.slice_del():
return False
# call stem_suffix_chain_before_ki, line 378
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_9
raise lab17()
except lab17: pass
raise lab14()
except lab16: pass
self.cursor = self.limit - v_8
# (, line 380
# call stem_suffix_chain_before_ki, line 380
if not self.r_stem_suffix_chain_before_ki():
raise lab11()
except lab14: pass
raise lab0()
except lab11: pass
self.cursor = self.limit - v_1
try:
# (, line 384
# [, line 384
self.ket = self.cursor
# (, line 384
# or, line 384
try:
v_10 = self.limit - self.cursor
try:
# call mark_ndAn, line 384
if not self.r_mark_ndAn():
raise lab20()
raise lab19()
except lab20: pass
self.cursor = self.limit - v_10
# call mark_nU, line 384
if not self.r_mark_nU():
raise lab18()
except lab19: pass
# (, line 384
# or, line 384
try:
v_11 = self.limit - self.cursor
try:
# (, line 384
# call mark_sU, line 384
if not self.r_mark_sU():
raise lab22()
# ], line 384
self.bra = self.cursor
# delete, line 384
if not self.slice_del():
return False
# try, line 384
v_12 = self.limit - self.cursor
try:
# (, line 384
# [, line 384
self.ket = self.cursor
# call mark_lAr, line 384
if not self.r_mark_lAr():
self.cursor = self.limit - v_12
raise lab23()
# ], line 384
self.bra = self.cursor
# delete, line 384
if not self.slice_del():
return False
# call stem_suffix_chain_before_ki, line 384
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_12
raise lab23()
except lab23: pass
raise lab21()
except lab22: pass
self.cursor = self.limit - v_11
# (, line 384
# call mark_lArI, line 384
if not self.r_mark_lArI():
raise lab18()
except lab21: pass
raise lab0()
except lab18: pass
self.cursor = self.limit - v_1
try:
# (, line 386
# [, line 386
self.ket = self.cursor
# call mark_DAn, line 386
if not self.r_mark_DAn():
raise lab24()
# ], line 386
self.bra = self.cursor
# delete, line 386
if not self.slice_del():
return False
# try, line 386
v_13 = self.limit - self.cursor
try:
# (, line 386
# [, line 386
self.ket = self.cursor
# (, line 387
# or, line 389
try:
v_14 = self.limit - self.cursor
try:
# (, line 388
# call mark_possessives, line 388
if not self.r_mark_possessives():
raise lab27()
# ], line 388
self.bra = self.cursor
# delete, line 388
if not self.slice_del():
return False
# try, line 388
v_15 = self.limit - self.cursor
try:
# (, line 388
# [, line 388
self.ket = self.cursor
# call mark_lAr, line 388
if not self.r_mark_lAr():
self.cursor = self.limit - v_15
raise lab28()
# ], line 388
self.bra = self.cursor
# delete, line 388
if not self.slice_del():
return False
# call stem_suffix_chain_before_ki, line 388
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_15
raise lab28()
except lab28: pass
raise lab26()
except lab27: pass
self.cursor = self.limit - v_14
try:
# (, line 390
# call mark_lAr, line 390
if not self.r_mark_lAr():
raise lab29()
# ], line 390
self.bra = self.cursor
# delete, line 390
if not self.slice_del():
return False
# try, line 390
v_16 = self.limit - self.cursor
try:
# (, line 390
# call stem_suffix_chain_before_ki, line 390
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_16
raise lab30()
except lab30: pass
raise lab26()
except lab29: pass
self.cursor = self.limit - v_14
# (, line 392
# call stem_suffix_chain_before_ki, line 392
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_13
raise lab25()
except lab26: pass
except lab25: pass
raise lab0()
except lab24: pass
self.cursor = self.limit - v_1
try:
# (, line 396
# [, line 396
self.ket = self.cursor
# or, line 396
try:
v_17 = self.limit - self.cursor
try:
# call mark_nUn, line 396
if not self.r_mark_nUn():
raise lab33()
raise lab32()
except lab33: pass
self.cursor = self.limit - v_17
# call mark_ylA, line 396
if not self.r_mark_ylA():
raise lab31()
except lab32: pass
# ], line 396
self.bra = self.cursor
# delete, line 396
if not self.slice_del():
return False
# try, line 397
v_18 = self.limit - self.cursor
try:
# (, line 397
# or, line 399
try:
v_19 = self.limit - self.cursor
try:
# (, line 398
# [, line 398
self.ket = self.cursor
# call mark_lAr, line 398
if not self.r_mark_lAr():
raise lab36()
# ], line 398
self.bra = self.cursor
# delete, line 398
if not self.slice_del():
return False
# call stem_suffix_chain_before_ki, line 398
if not self.r_stem_suffix_chain_before_ki():
raise lab36()
raise lab35()
except lab36: pass
self.cursor = self.limit - v_19
try:
# (, line 400
# [, line 400
self.ket = self.cursor
# or, line 400
try:
v_20 = self.limit - self.cursor
try:
# call mark_possessives, line 400
if not self.r_mark_possessives():
raise lab39()
raise lab38()
except lab39: pass
self.cursor = self.limit - v_20
# call mark_sU, line 400
if not self.r_mark_sU():
raise lab37()
except lab38: pass
# ], line 400
self.bra = self.cursor
# delete, line 400
if not self.slice_del():
return False
# try, line 400
v_21 = self.limit - self.cursor
try:
# (, line 400
# [, line 400
self.ket = self.cursor
# call mark_lAr, line 400
if not self.r_mark_lAr():
self.cursor = self.limit - v_21
raise lab40()
# ], line 400
self.bra = self.cursor
# delete, line 400
if not self.slice_del():
return False
# call stem_suffix_chain_before_ki, line 400
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_21
raise lab40()
except lab40: pass
raise lab35()
except lab37: pass
self.cursor = self.limit - v_19
# call stem_suffix_chain_before_ki, line 402
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_18
raise lab34()
except lab35: pass
except lab34: pass
raise lab0()
except lab31: pass
self.cursor = self.limit - v_1
try:
# (, line 406
# [, line 406
self.ket = self.cursor
# call mark_lArI, line 406
if not self.r_mark_lArI():
raise lab41()
# ], line 406
self.bra = self.cursor
# delete, line 406
if not self.slice_del():
return False
raise lab0()
except lab41: pass
self.cursor = self.limit - v_1
try:
# (, line 408
# call stem_suffix_chain_before_ki, line 408
if not self.r_stem_suffix_chain_before_ki():
raise lab42()
raise lab0()
except lab42: pass
self.cursor = self.limit - v_1
try:
# (, line 410
# [, line 410
self.ket = self.cursor
# or, line 410
try:
v_22 = self.limit - self.cursor
try:
# call mark_DA, line 410
if not self.r_mark_DA():
raise lab45()
raise lab44()
except lab45: pass
self.cursor = self.limit - v_22
try:
# call mark_yU, line 410
if not self.r_mark_yU():
raise lab46()
raise lab44()
except lab46: pass
self.cursor = self.limit - v_22
# call mark_yA, line 410
if not self.r_mark_yA():
raise lab43()
except lab44: pass
# ], line 410
self.bra = self.cursor
# delete, line 410
if not self.slice_del():
return False
# try, line 410
v_23 = self.limit - self.cursor
try:
# (, line 410
# [, line 410
self.ket = self.cursor
# (, line 410
# or, line 410
try:
v_24 = self.limit - self.cursor
try:
# (, line 410
# call mark_possessives, line 410
if not self.r_mark_possessives():
raise lab49()
# ], line 410
self.bra = self.cursor
# delete, line 410
if not self.slice_del():
return False
# try, line 410
v_25 = self.limit - self.cursor
try:
# (, line 410
# [, line 410
self.ket = self.cursor
# call mark_lAr, line 410
if not self.r_mark_lAr():
self.cursor = self.limit - v_25
raise lab50()
except lab50: pass
raise lab48()
except lab49: pass
self.cursor = self.limit - v_24
# call mark_lAr, line 410
if not self.r_mark_lAr():
self.cursor = self.limit - v_23
raise lab47()
except lab48: pass
# ], line 410
self.bra = self.cursor
# delete, line 410
if not self.slice_del():
return False
# [, line 410
self.ket = self.cursor
# call stem_suffix_chain_before_ki, line 410
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_23
raise lab47()
except lab47: pass
raise lab0()
except lab43: pass
self.cursor = self.limit - v_1
# (, line 412
# [, line 412
self.ket = self.cursor
# or, line 412
try:
v_26 = self.limit - self.cursor
try:
# call mark_possessives, line 412
if not self.r_mark_possessives():
raise lab52()
raise lab51()
except lab52: pass
self.cursor = self.limit - v_26
# call mark_sU, line 412
if not self.r_mark_sU():
return False
except lab51: pass
# ], line 412
self.bra = self.cursor
# delete, line 412
if not self.slice_del():
return False
# try, line 412
v_27 = self.limit - self.cursor
try:
# (, line 412
# [, line 412
self.ket = self.cursor
# call mark_lAr, line 412
if not self.r_mark_lAr():
self.cursor = self.limit - v_27
raise lab53()
# ], line 412
self.bra = self.cursor
# delete, line 412
if not self.slice_del():
return False
# call stem_suffix_chain_before_ki, line 412
if not self.r_stem_suffix_chain_before_ki():
self.cursor = self.limit - v_27
raise lab53()
except lab53: pass
except lab0: pass
return True
def r_post_process_last_consonants(self):
# (, line 415
# [, line 416
self.ket = self.cursor
# substring, line 416
among_var = self.find_among_b(TurkishStemmer.a_23, 4)
if among_var == 0:
return False
# ], line 416
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 417
# <-, line 417
if not self.slice_from(u"p"):
return False
elif among_var == 2:
# (, line 418
# <-, line 418
if not self.slice_from(u"\u00E7"):
return False
elif among_var == 3:
# (, line 419
# <-, line 419
if not self.slice_from(u"t"):
return False
elif among_var == 4:
# (, line 420
# <-, line 420
if not self.slice_from(u"k"):
return False
return True
def r_append_U_to_stems_ending_with_d_or_g(self):
# (, line 430
# test, line 431
v_1 = self.limit - self.cursor
# (, line 431
# or, line 431
try:
v_2 = self.limit - self.cursor
try:
# literal, line 431
if not self.eq_s_b(1, u"d"):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_2
# literal, line 431
if not self.eq_s_b(1, u"g"):
return False
except lab0: pass
self.cursor = self.limit - v_1
# or, line 433
try:
v_3 = self.limit - self.cursor
try:
# (, line 432
# test, line 432
v_4 = self.limit - self.cursor
# (, line 432
# (, line 432
# goto, line 432
try:
while True:
v_5 = self.limit - self.cursor
try:
if not self.in_grouping_b(TurkishStemmer.g_vowel, 97, 305):
raise lab5()
self.cursor = self.limit - v_5
raise lab4()
except lab5: pass
self.cursor = self.limit - v_5
if self.cursor <= self.limit_backward:
raise lab3()
self.cursor -= 1
except lab4: pass
# or, line 432
try:
v_6 = self.limit - self.cursor
try:
# literal, line 432
if not self.eq_s_b(1, u"a"):
raise lab7()
raise lab6()
except lab7: pass
self.cursor = self.limit - v_6
# literal, line 432
if not self.eq_s_b(1, u"\u0131"):
raise lab3()
except lab6: pass
self.cursor = self.limit - v_4
# <+, line 432
c = self.cursor
self.insert(self.cursor, self.cursor, u"\u0131")
self.cursor = c
raise lab2()
except lab3: pass
self.cursor = self.limit - v_3
try:
# (, line 434
# test, line 434
v_7 = self.limit - self.cursor
# (, line 434
# (, line 434
# goto, line 434
try:
while True:
v_8 = self.limit - self.cursor
try:
if not self.in_grouping_b(TurkishStemmer.g_vowel, 97, 305):
raise lab10()
self.cursor = self.limit - v_8
raise lab9()
except lab10: pass
self.cursor = self.limit - v_8
if self.cursor <= self.limit_backward:
raise lab8()
self.cursor -= 1
except lab9: pass
# or, line 434
try:
v_9 = self.limit - self.cursor
try:
# literal, line 434
if not self.eq_s_b(1, u"e"):
raise lab12()
raise lab11()
except lab12: pass
self.cursor = self.limit - v_9
# literal, line 434
if not self.eq_s_b(1, u"i"):
raise lab8()
except lab11: pass
self.cursor = self.limit - v_7
# <+, line 434
c = self.cursor
self.insert(self.cursor, self.cursor, u"i")
self.cursor = c
raise lab2()
except lab8: pass
self.cursor = self.limit - v_3
try:
# (, line 436
# test, line 436
v_10 = self.limit - self.cursor
# (, line 436
# (, line 436
# goto, line 436
try:
while True:
v_11 = self.limit - self.cursor
try:
if not self.in_grouping_b(TurkishStemmer.g_vowel, 97, 305):
raise lab15()
self.cursor = self.limit - v_11
raise lab14()
except lab15: pass
self.cursor = self.limit - v_11
if self.cursor <= self.limit_backward:
raise lab13()
self.cursor -= 1
except lab14: pass
# or, line 436
try:
v_12 = self.limit - self.cursor
try:
# literal, line 436
if not self.eq_s_b(1, u"o"):
raise lab17()
raise lab16()
except lab17: pass
self.cursor = self.limit - v_12
# literal, line 436
if not self.eq_s_b(1, u"u"):
raise lab13()
except lab16: pass
self.cursor = self.limit - v_10
# <+, line 436
c = self.cursor
self.insert(self.cursor, self.cursor, u"u")
self.cursor = c
raise lab2()
except lab13: pass
self.cursor = self.limit - v_3
# (, line 438
# test, line 438
v_13 = self.limit - self.cursor
# (, line 438
# (, line 438
# goto, line 438
try:
while True:
v_14 = self.limit - self.cursor
try:
if not self.in_grouping_b(TurkishStemmer.g_vowel, 97, 305):
raise lab19()
self.cursor = self.limit - v_14
raise lab18()
except lab19: pass
self.cursor = self.limit - v_14
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
except lab18: pass
# or, line 438
try:
v_15 = self.limit - self.cursor
try:
# literal, line 438
if not self.eq_s_b(1, u"\u00F6"):
raise lab21()
raise lab20()
except lab21: pass
self.cursor = self.limit - v_15
# literal, line 438
if not self.eq_s_b(1, u"\u00FC"):
return False
except lab20: pass
self.cursor = self.limit - v_13
# <+, line 438
c = self.cursor
self.insert(self.cursor, self.cursor, u"\u00FC")
self.cursor = c
except lab2: pass
return True
def r_more_than_one_syllable_word(self):
# (, line 445
# test, line 446
v_1 = self.cursor
# (, line 446
# atleast, line 446
v_2 = 2
# atleast, line 446
try:
while True:
try:
v_3 = self.cursor
try:
# (, line 446
# gopast, line 446
try:
while True:
try:
if not self.in_grouping(TurkishStemmer.g_vowel, 97, 305):
raise lab4()
raise lab3()
except lab4: pass
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
except lab3: pass
v_2 -= 1
raise lab1()
except lab2: pass
self.cursor = v_3
raise lab0()
except lab1: pass
except lab0: pass
if v_2 > 0:
return False
self.cursor = v_1
return True
def r_is_reserved_word(self):
# (, line 449
# or, line 451
try:
v_1 = self.cursor
try:
# test, line 450
v_2 = self.cursor
# (, line 450
# gopast, line 450
try:
while True:
try:
# literal, line 450
if not self.eq_s(2, u"ad"):
raise lab3()
raise lab2()
except lab3: pass
if self.cursor >= self.limit:
raise lab1()
self.cursor += 1
except lab2: pass
# (, line 450
self.I_strlen = 2;
# (, line 450
if not self.I_strlen == self.limit:
raise lab1()
self.cursor = v_2
raise lab0()
except lab1: pass
self.cursor = v_1
# test, line 452
v_4 = self.cursor
# (, line 452
# gopast, line 452
try:
while True:
try:
# literal, line 452
if not self.eq_s(5, u"soyad"):
raise lab5()
raise lab4()
except lab5: pass
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab4: pass
# (, line 452
self.I_strlen = 5;
# (, line 452
if not self.I_strlen == self.limit:
return False
self.cursor = v_4
except lab0: pass
return True
def r_postlude(self):
# (, line 455
# not, line 456
v_1 = self.cursor
try:
# (, line 456
# call is_reserved_word, line 456
if not self.r_is_reserved_word():
raise lab0()
return False
except lab0: pass
self.cursor = v_1
# backwards, line 457
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 457
# do, line 458
v_2 = self.limit - self.cursor
try:
# call append_U_to_stems_ending_with_d_or_g, line 458
if not self.r_append_U_to_stems_ending_with_d_or_g():
raise lab1()
except lab1: pass
self.cursor = self.limit - v_2
# do, line 459
v_3 = self.limit - self.cursor
try:
# call post_process_last_consonants, line 459
if not self.r_post_process_last_consonants():
raise lab2()
except lab2: pass
self.cursor = self.limit - v_3
self.cursor = self.limit_backward
return True
def _stem(self):
# (, line 464
# (, line 465
# call more_than_one_syllable_word, line 465
if not self.r_more_than_one_syllable_word():
return False
# (, line 466
# backwards, line 467
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 467
# do, line 468
v_1 = self.limit - self.cursor
try:
# call stem_nominal_verb_suffixes, line 468
if not self.r_stem_nominal_verb_suffixes():
raise lab0()
except lab0: pass
self.cursor = self.limit - v_1
# Boolean test continue_stemming_noun_suffixes, line 469
if not self.B_continue_stemming_noun_suffixes:
return False
# do, line 470
v_2 = self.limit - self.cursor
try:
# call stem_noun_suffixes, line 470
if not self.r_stem_noun_suffixes():
raise lab1()
except lab1: pass
self.cursor = self.limit - v_2
self.cursor = self.limit_backward
# call postlude, line 473
if not self.r_postlude():
return False
return True
def equals(self, o):
return isinstance(o, TurkishStemmer)
def hashCode(self):
return hash("TurkishStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
class lab10(BaseException): pass
class lab11(BaseException): pass
class lab12(BaseException): pass
class lab13(BaseException): pass
class lab14(BaseException): pass
class lab15(BaseException): pass
class lab16(BaseException): pass
class lab17(BaseException): pass
class lab18(BaseException): pass
class lab19(BaseException): pass
class lab20(BaseException): pass
class lab21(BaseException): pass
class lab22(BaseException): pass
class lab23(BaseException): pass
class lab24(BaseException): pass
class lab25(BaseException): pass
class lab26(BaseException): pass
class lab27(BaseException): pass
class lab28(BaseException): pass
class lab29(BaseException): pass
class lab30(BaseException): pass
class lab31(BaseException): pass
class lab32(BaseException): pass
class lab33(BaseException): pass
class lab34(BaseException): pass
class lab35(BaseException): pass
class lab36(BaseException): pass
class lab37(BaseException): pass
class lab38(BaseException): pass
class lab39(BaseException): pass
class lab40(BaseException): pass
class lab41(BaseException): pass
class lab42(BaseException): pass
class lab43(BaseException): pass
class lab44(BaseException): pass
class lab45(BaseException): pass
class lab46(BaseException): pass
class lab47(BaseException): pass
class lab48(BaseException): pass
class lab49(BaseException): pass
class lab50(BaseException): pass
class lab51(BaseException): pass
class lab52(BaseException): pass
class lab53(BaseException): pass
| 95,322 | Python | .py | 2,480 | 20.390726 | 99 | 0.390435 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,967 | among.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/among.py |
class Among(object):
def __init__(self, s, substring_i, result, method=None):
"""
@ivar s_size search string size
@ivar s search string
@ivar substring index to longest matching substring
@ivar result of the lookup
@ivar method method to use if substring matches
"""
self.s_size = len(s)
self.s = s
self.substring_i = substring_i
self.result = result
self.method = method
| 473 | Python | .py | 14 | 25.571429 | 60 | 0.600437 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,968 | russian_stemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/russian_stemmer.py | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class RussianStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"\u0432", -1, 1),
Among(u"\u0438\u0432", 0, 2),
Among(u"\u044B\u0432", 0, 2),
Among(u"\u0432\u0448\u0438", -1, 1),
Among(u"\u0438\u0432\u0448\u0438", 3, 2),
Among(u"\u044B\u0432\u0448\u0438", 3, 2),
Among(u"\u0432\u0448\u0438\u0441\u044C", -1, 1),
Among(u"\u0438\u0432\u0448\u0438\u0441\u044C", 6, 2),
Among(u"\u044B\u0432\u0448\u0438\u0441\u044C", 6, 2)
]
a_1 = [
Among(u"\u0435\u0435", -1, 1),
Among(u"\u0438\u0435", -1, 1),
Among(u"\u043E\u0435", -1, 1),
Among(u"\u044B\u0435", -1, 1),
Among(u"\u0438\u043C\u0438", -1, 1),
Among(u"\u044B\u043C\u0438", -1, 1),
Among(u"\u0435\u0439", -1, 1),
Among(u"\u0438\u0439", -1, 1),
Among(u"\u043E\u0439", -1, 1),
Among(u"\u044B\u0439", -1, 1),
Among(u"\u0435\u043C", -1, 1),
Among(u"\u0438\u043C", -1, 1),
Among(u"\u043E\u043C", -1, 1),
Among(u"\u044B\u043C", -1, 1),
Among(u"\u0435\u0433\u043E", -1, 1),
Among(u"\u043E\u0433\u043E", -1, 1),
Among(u"\u0435\u043C\u0443", -1, 1),
Among(u"\u043E\u043C\u0443", -1, 1),
Among(u"\u0438\u0445", -1, 1),
Among(u"\u044B\u0445", -1, 1),
Among(u"\u0435\u044E", -1, 1),
Among(u"\u043E\u044E", -1, 1),
Among(u"\u0443\u044E", -1, 1),
Among(u"\u044E\u044E", -1, 1),
Among(u"\u0430\u044F", -1, 1),
Among(u"\u044F\u044F", -1, 1)
]
a_2 = [
Among(u"\u0435\u043C", -1, 1),
Among(u"\u043D\u043D", -1, 1),
Among(u"\u0432\u0448", -1, 1),
Among(u"\u0438\u0432\u0448", 2, 2),
Among(u"\u044B\u0432\u0448", 2, 2),
Among(u"\u0449", -1, 1),
Among(u"\u044E\u0449", 5, 1),
Among(u"\u0443\u044E\u0449", 6, 2)
]
a_3 = [
Among(u"\u0441\u044C", -1, 1),
Among(u"\u0441\u044F", -1, 1)
]
a_4 = [
Among(u"\u043B\u0430", -1, 1),
Among(u"\u0438\u043B\u0430", 0, 2),
Among(u"\u044B\u043B\u0430", 0, 2),
Among(u"\u043D\u0430", -1, 1),
Among(u"\u0435\u043D\u0430", 3, 2),
Among(u"\u0435\u0442\u0435", -1, 1),
Among(u"\u0438\u0442\u0435", -1, 2),
Among(u"\u0439\u0442\u0435", -1, 1),
Among(u"\u0435\u0439\u0442\u0435", 7, 2),
Among(u"\u0443\u0439\u0442\u0435", 7, 2),
Among(u"\u043B\u0438", -1, 1),
Among(u"\u0438\u043B\u0438", 10, 2),
Among(u"\u044B\u043B\u0438", 10, 2),
Among(u"\u0439", -1, 1),
Among(u"\u0435\u0439", 13, 2),
Among(u"\u0443\u0439", 13, 2),
Among(u"\u043B", -1, 1),
Among(u"\u0438\u043B", 16, 2),
Among(u"\u044B\u043B", 16, 2),
Among(u"\u0435\u043C", -1, 1),
Among(u"\u0438\u043C", -1, 2),
Among(u"\u044B\u043C", -1, 2),
Among(u"\u043D", -1, 1),
Among(u"\u0435\u043D", 22, 2),
Among(u"\u043B\u043E", -1, 1),
Among(u"\u0438\u043B\u043E", 24, 2),
Among(u"\u044B\u043B\u043E", 24, 2),
Among(u"\u043D\u043E", -1, 1),
Among(u"\u0435\u043D\u043E", 27, 2),
Among(u"\u043D\u043D\u043E", 27, 1),
Among(u"\u0435\u0442", -1, 1),
Among(u"\u0443\u0435\u0442", 30, 2),
Among(u"\u0438\u0442", -1, 2),
Among(u"\u044B\u0442", -1, 2),
Among(u"\u044E\u0442", -1, 1),
Among(u"\u0443\u044E\u0442", 34, 2),
Among(u"\u044F\u0442", -1, 2),
Among(u"\u043D\u044B", -1, 1),
Among(u"\u0435\u043D\u044B", 37, 2),
Among(u"\u0442\u044C", -1, 1),
Among(u"\u0438\u0442\u044C", 39, 2),
Among(u"\u044B\u0442\u044C", 39, 2),
Among(u"\u0435\u0448\u044C", -1, 1),
Among(u"\u0438\u0448\u044C", -1, 2),
Among(u"\u044E", -1, 2),
Among(u"\u0443\u044E", 44, 2)
]
a_5 = [
Among(u"\u0430", -1, 1),
Among(u"\u0435\u0432", -1, 1),
Among(u"\u043E\u0432", -1, 1),
Among(u"\u0435", -1, 1),
Among(u"\u0438\u0435", 3, 1),
Among(u"\u044C\u0435", 3, 1),
Among(u"\u0438", -1, 1),
Among(u"\u0435\u0438", 6, 1),
Among(u"\u0438\u0438", 6, 1),
Among(u"\u0430\u043C\u0438", 6, 1),
Among(u"\u044F\u043C\u0438", 6, 1),
Among(u"\u0438\u044F\u043C\u0438", 10, 1),
Among(u"\u0439", -1, 1),
Among(u"\u0435\u0439", 12, 1),
Among(u"\u0438\u0435\u0439", 13, 1),
Among(u"\u0438\u0439", 12, 1),
Among(u"\u043E\u0439", 12, 1),
Among(u"\u0430\u043C", -1, 1),
Among(u"\u0435\u043C", -1, 1),
Among(u"\u0438\u0435\u043C", 18, 1),
Among(u"\u043E\u043C", -1, 1),
Among(u"\u044F\u043C", -1, 1),
Among(u"\u0438\u044F\u043C", 21, 1),
Among(u"\u043E", -1, 1),
Among(u"\u0443", -1, 1),
Among(u"\u0430\u0445", -1, 1),
Among(u"\u044F\u0445", -1, 1),
Among(u"\u0438\u044F\u0445", 26, 1),
Among(u"\u044B", -1, 1),
Among(u"\u044C", -1, 1),
Among(u"\u044E", -1, 1),
Among(u"\u0438\u044E", 30, 1),
Among(u"\u044C\u044E", 30, 1),
Among(u"\u044F", -1, 1),
Among(u"\u0438\u044F", 33, 1),
Among(u"\u044C\u044F", 33, 1)
]
a_6 = [
Among(u"\u043E\u0441\u0442", -1, 1),
Among(u"\u043E\u0441\u0442\u044C", -1, 1)
]
a_7 = [
Among(u"\u0435\u0439\u0448\u0435", -1, 1),
Among(u"\u043D", -1, 2),
Among(u"\u0435\u0439\u0448", -1, 1),
Among(u"\u044C", -1, 3)
]
g_v = [33, 65, 8, 232]
I_p2 = 0
I_pV = 0
def copy_from(self, other):
self.I_p2 = other.I_p2
self.I_pV = other.I_pV
super.copy_from(other)
def r_mark_regions(self):
# (, line 57
self.I_pV = self.limit;
self.I_p2 = self.limit;
# do, line 61
v_1 = self.cursor
try:
# (, line 61
# gopast, line 62
try:
while True:
try:
if not self.in_grouping(RussianStemmer.g_v, 1072, 1103):
raise lab2()
raise lab1()
except lab2: pass
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
except lab1: pass
# setmark pV, line 62
self.I_pV = self.cursor
# gopast, line 62
try:
while True:
try:
if not self.out_grouping(RussianStemmer.g_v, 1072, 1103):
raise lab4()
raise lab3()
except lab4: pass
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
except lab3: pass
# gopast, line 63
try:
while True:
try:
if not self.in_grouping(RussianStemmer.g_v, 1072, 1103):
raise lab6()
raise lab5()
except lab6: pass
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
except lab5: pass
# gopast, line 63
try:
while True:
try:
if not self.out_grouping(RussianStemmer.g_v, 1072, 1103):
raise lab8()
raise lab7()
except lab8: pass
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
except lab7: pass
# setmark p2, line 63
self.I_p2 = self.cursor
except lab0: pass
self.cursor = v_1
return True
def r_R2(self):
if not self.I_p2 <= self.cursor:
return False
return True
def r_perfective_gerund(self):
# (, line 71
# [, line 72
self.ket = self.cursor
# substring, line 72
among_var = self.find_among_b(RussianStemmer.a_0, 9)
if among_var == 0:
return False
# ], line 72
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 76
# or, line 76
try:
v_1 = self.limit - self.cursor
try:
# literal, line 76
if not self.eq_s_b(1, u"\u0430"):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# literal, line 76
if not self.eq_s_b(1, u"\u044F"):
return False
except lab0: pass
# delete, line 76
if not self.slice_del():
return False
elif among_var == 2:
# (, line 83
# delete, line 83
if not self.slice_del():
return False
return True
def r_adjective(self):
# (, line 87
# [, line 88
self.ket = self.cursor
# substring, line 88
among_var = self.find_among_b(RussianStemmer.a_1, 26)
if among_var == 0:
return False
# ], line 88
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 97
# delete, line 97
if not self.slice_del():
return False
return True
def r_adjectival(self):
# (, line 101
# call adjective, line 102
if not self.r_adjective():
return False
# try, line 109
v_1 = self.limit - self.cursor
try:
# (, line 109
# [, line 110
self.ket = self.cursor
# substring, line 110
among_var = self.find_among_b(RussianStemmer.a_2, 8)
if among_var == 0:
self.cursor = self.limit - v_1
raise lab0()
# ], line 110
self.bra = self.cursor
if among_var == 0:
self.cursor = self.limit - v_1
raise lab0()
elif among_var == 1:
# (, line 115
# or, line 115
try:
v_2 = self.limit - self.cursor
try:
# literal, line 115
if not self.eq_s_b(1, u"\u0430"):
raise lab2()
raise lab1()
except lab2: pass
self.cursor = self.limit - v_2
# literal, line 115
if not self.eq_s_b(1, u"\u044F"):
self.cursor = self.limit - v_1
raise lab0()
except lab1: pass
# delete, line 115
if not self.slice_del():
return False
elif among_var == 2:
# (, line 122
# delete, line 122
if not self.slice_del():
return False
except lab0: pass
return True
def r_reflexive(self):
# (, line 128
# [, line 129
self.ket = self.cursor
# substring, line 129
among_var = self.find_among_b(RussianStemmer.a_3, 2)
if among_var == 0:
return False
# ], line 129
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 132
# delete, line 132
if not self.slice_del():
return False
return True
def r_verb(self):
# (, line 136
# [, line 137
self.ket = self.cursor
# substring, line 137
among_var = self.find_among_b(RussianStemmer.a_4, 46)
if among_var == 0:
return False
# ], line 137
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 143
# or, line 143
try:
v_1 = self.limit - self.cursor
try:
# literal, line 143
if not self.eq_s_b(1, u"\u0430"):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# literal, line 143
if not self.eq_s_b(1, u"\u044F"):
return False
except lab0: pass
# delete, line 143
if not self.slice_del():
return False
elif among_var == 2:
# (, line 151
# delete, line 151
if not self.slice_del():
return False
return True
def r_noun(self):
# (, line 159
# [, line 160
self.ket = self.cursor
# substring, line 160
among_var = self.find_among_b(RussianStemmer.a_5, 36)
if among_var == 0:
return False
# ], line 160
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 167
# delete, line 167
if not self.slice_del():
return False
return True
def r_derivational(self):
# (, line 175
# [, line 176
self.ket = self.cursor
# substring, line 176
among_var = self.find_among_b(RussianStemmer.a_6, 2)
if among_var == 0:
return False
# ], line 176
self.bra = self.cursor
# call R2, line 176
if not self.r_R2():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 179
# delete, line 179
if not self.slice_del():
return False
return True
def r_tidy_up(self):
# (, line 183
# [, line 184
self.ket = self.cursor
# substring, line 184
among_var = self.find_among_b(RussianStemmer.a_7, 4)
if among_var == 0:
return False
# ], line 184
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 188
# delete, line 188
if not self.slice_del():
return False
# [, line 189
self.ket = self.cursor
# literal, line 189
if not self.eq_s_b(1, u"\u043D"):
return False
# ], line 189
self.bra = self.cursor
# literal, line 189
if not self.eq_s_b(1, u"\u043D"):
return False
# delete, line 189
if not self.slice_del():
return False
elif among_var == 2:
# (, line 192
# literal, line 192
if not self.eq_s_b(1, u"\u043D"):
return False
# delete, line 192
if not self.slice_del():
return False
elif among_var == 3:
# (, line 194
# delete, line 194
if not self.slice_del():
return False
return True
def _stem(self):
# (, line 199
# do, line 201
v_1 = self.cursor
try:
# call mark_regions, line 201
if not self.r_mark_regions():
raise lab0()
except lab0: pass
self.cursor = v_1
# backwards, line 202
self.limit_backward = self.cursor
self.cursor = self.limit
# setlimit, line 202
v_2 = self.limit - self.cursor
# tomark, line 202
if self.cursor < self.I_pV:
return False
self.cursor = self.I_pV
v_3 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_2
# (, line 202
# do, line 203
v_4 = self.limit - self.cursor
try:
# (, line 203
# or, line 204
try:
v_5 = self.limit - self.cursor
try:
# call perfective_gerund, line 204
if not self.r_perfective_gerund():
raise lab3()
raise lab2()
except lab3: pass
self.cursor = self.limit - v_5
# (, line 205
# try, line 205
v_6 = self.limit - self.cursor
try:
# call reflexive, line 205
if not self.r_reflexive():
self.cursor = self.limit - v_6
raise lab4()
except lab4: pass
# or, line 206
try:
v_7 = self.limit - self.cursor
try:
# call adjectival, line 206
if not self.r_adjectival():
raise lab6()
raise lab5()
except lab6: pass
self.cursor = self.limit - v_7
try:
# call verb, line 206
if not self.r_verb():
raise lab7()
raise lab5()
except lab7: pass
self.cursor = self.limit - v_7
# call noun, line 206
if not self.r_noun():
raise lab1()
except lab5: pass
except lab2: pass
except lab1: pass
self.cursor = self.limit - v_4
# try, line 209
v_8 = self.limit - self.cursor
try:
# (, line 209
# [, line 209
self.ket = self.cursor
# literal, line 209
if not self.eq_s_b(1, u"\u0438"):
self.cursor = self.limit - v_8
raise lab8()
# ], line 209
self.bra = self.cursor
# delete, line 209
if not self.slice_del():
return False
except lab8: pass
# do, line 212
v_9 = self.limit - self.cursor
try:
# call derivational, line 212
if not self.r_derivational():
raise lab9()
except lab9: pass
self.cursor = self.limit - v_9
# do, line 213
v_10 = self.limit - self.cursor
try:
# call tidy_up, line 213
if not self.r_tidy_up():
raise lab10()
except lab10: pass
self.cursor = self.limit - v_10
self.limit_backward = v_3
self.cursor = self.limit_backward
return True
def equals(self, o):
return isinstance(o, RussianStemmer)
def hashCode(self):
return hash("RussianStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
class lab10(BaseException): pass
| 20,090 | Python | .py | 593 | 21.689713 | 81 | 0.459434 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,969 | spanish_stemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/spanish_stemmer.py | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class SpanishStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"", -1, 6),
Among(u"\u00E1", 0, 1),
Among(u"\u00E9", 0, 2),
Among(u"\u00ED", 0, 3),
Among(u"\u00F3", 0, 4),
Among(u"\u00FA", 0, 5)
]
a_1 = [
Among(u"la", -1, -1),
Among(u"sela", 0, -1),
Among(u"le", -1, -1),
Among(u"me", -1, -1),
Among(u"se", -1, -1),
Among(u"lo", -1, -1),
Among(u"selo", 5, -1),
Among(u"las", -1, -1),
Among(u"selas", 7, -1),
Among(u"les", -1, -1),
Among(u"los", -1, -1),
Among(u"selos", 10, -1),
Among(u"nos", -1, -1)
]
a_2 = [
Among(u"ando", -1, 6),
Among(u"iendo", -1, 6),
Among(u"yendo", -1, 7),
Among(u"\u00E1ndo", -1, 2),
Among(u"i\u00E9ndo", -1, 1),
Among(u"ar", -1, 6),
Among(u"er", -1, 6),
Among(u"ir", -1, 6),
Among(u"\u00E1r", -1, 3),
Among(u"\u00E9r", -1, 4),
Among(u"\u00EDr", -1, 5)
]
a_3 = [
Among(u"ic", -1, -1),
Among(u"ad", -1, -1),
Among(u"os", -1, -1),
Among(u"iv", -1, 1)
]
a_4 = [
Among(u"able", -1, 1),
Among(u"ible", -1, 1),
Among(u"ante", -1, 1)
]
a_5 = [
Among(u"ic", -1, 1),
Among(u"abil", -1, 1),
Among(u"iv", -1, 1)
]
a_6 = [
Among(u"ica", -1, 1),
Among(u"ancia", -1, 2),
Among(u"encia", -1, 5),
Among(u"adora", -1, 2),
Among(u"osa", -1, 1),
Among(u"ista", -1, 1),
Among(u"iva", -1, 9),
Among(u"anza", -1, 1),
Among(u"log\u00EDa", -1, 3),
Among(u"idad", -1, 8),
Among(u"able", -1, 1),
Among(u"ible", -1, 1),
Among(u"ante", -1, 2),
Among(u"mente", -1, 7),
Among(u"amente", 13, 6),
Among(u"aci\u00F3n", -1, 2),
Among(u"uci\u00F3n", -1, 4),
Among(u"ico", -1, 1),
Among(u"ismo", -1, 1),
Among(u"oso", -1, 1),
Among(u"amiento", -1, 1),
Among(u"imiento", -1, 1),
Among(u"ivo", -1, 9),
Among(u"ador", -1, 2),
Among(u"icas", -1, 1),
Among(u"ancias", -1, 2),
Among(u"encias", -1, 5),
Among(u"adoras", -1, 2),
Among(u"osas", -1, 1),
Among(u"istas", -1, 1),
Among(u"ivas", -1, 9),
Among(u"anzas", -1, 1),
Among(u"log\u00EDas", -1, 3),
Among(u"idades", -1, 8),
Among(u"ables", -1, 1),
Among(u"ibles", -1, 1),
Among(u"aciones", -1, 2),
Among(u"uciones", -1, 4),
Among(u"adores", -1, 2),
Among(u"antes", -1, 2),
Among(u"icos", -1, 1),
Among(u"ismos", -1, 1),
Among(u"osos", -1, 1),
Among(u"amientos", -1, 1),
Among(u"imientos", -1, 1),
Among(u"ivos", -1, 9)
]
a_7 = [
Among(u"ya", -1, 1),
Among(u"ye", -1, 1),
Among(u"yan", -1, 1),
Among(u"yen", -1, 1),
Among(u"yeron", -1, 1),
Among(u"yendo", -1, 1),
Among(u"yo", -1, 1),
Among(u"yas", -1, 1),
Among(u"yes", -1, 1),
Among(u"yais", -1, 1),
Among(u"yamos", -1, 1),
Among(u"y\u00F3", -1, 1)
]
a_8 = [
Among(u"aba", -1, 2),
Among(u"ada", -1, 2),
Among(u"ida", -1, 2),
Among(u"ara", -1, 2),
Among(u"iera", -1, 2),
Among(u"\u00EDa", -1, 2),
Among(u"ar\u00EDa", 5, 2),
Among(u"er\u00EDa", 5, 2),
Among(u"ir\u00EDa", 5, 2),
Among(u"ad", -1, 2),
Among(u"ed", -1, 2),
Among(u"id", -1, 2),
Among(u"ase", -1, 2),
Among(u"iese", -1, 2),
Among(u"aste", -1, 2),
Among(u"iste", -1, 2),
Among(u"an", -1, 2),
Among(u"aban", 16, 2),
Among(u"aran", 16, 2),
Among(u"ieran", 16, 2),
Among(u"\u00EDan", 16, 2),
Among(u"ar\u00EDan", 20, 2),
Among(u"er\u00EDan", 20, 2),
Among(u"ir\u00EDan", 20, 2),
Among(u"en", -1, 1),
Among(u"asen", 24, 2),
Among(u"iesen", 24, 2),
Among(u"aron", -1, 2),
Among(u"ieron", -1, 2),
Among(u"ar\u00E1n", -1, 2),
Among(u"er\u00E1n", -1, 2),
Among(u"ir\u00E1n", -1, 2),
Among(u"ado", -1, 2),
Among(u"ido", -1, 2),
Among(u"ando", -1, 2),
Among(u"iendo", -1, 2),
Among(u"ar", -1, 2),
Among(u"er", -1, 2),
Among(u"ir", -1, 2),
Among(u"as", -1, 2),
Among(u"abas", 39, 2),
Among(u"adas", 39, 2),
Among(u"idas", 39, 2),
Among(u"aras", 39, 2),
Among(u"ieras", 39, 2),
Among(u"\u00EDas", 39, 2),
Among(u"ar\u00EDas", 45, 2),
Among(u"er\u00EDas", 45, 2),
Among(u"ir\u00EDas", 45, 2),
Among(u"es", -1, 1),
Among(u"ases", 49, 2),
Among(u"ieses", 49, 2),
Among(u"abais", -1, 2),
Among(u"arais", -1, 2),
Among(u"ierais", -1, 2),
Among(u"\u00EDais", -1, 2),
Among(u"ar\u00EDais", 55, 2),
Among(u"er\u00EDais", 55, 2),
Among(u"ir\u00EDais", 55, 2),
Among(u"aseis", -1, 2),
Among(u"ieseis", -1, 2),
Among(u"asteis", -1, 2),
Among(u"isteis", -1, 2),
Among(u"\u00E1is", -1, 2),
Among(u"\u00E9is", -1, 1),
Among(u"ar\u00E9is", 64, 2),
Among(u"er\u00E9is", 64, 2),
Among(u"ir\u00E9is", 64, 2),
Among(u"ados", -1, 2),
Among(u"idos", -1, 2),
Among(u"amos", -1, 2),
Among(u"\u00E1bamos", 70, 2),
Among(u"\u00E1ramos", 70, 2),
Among(u"i\u00E9ramos", 70, 2),
Among(u"\u00EDamos", 70, 2),
Among(u"ar\u00EDamos", 74, 2),
Among(u"er\u00EDamos", 74, 2),
Among(u"ir\u00EDamos", 74, 2),
Among(u"emos", -1, 1),
Among(u"aremos", 78, 2),
Among(u"eremos", 78, 2),
Among(u"iremos", 78, 2),
Among(u"\u00E1semos", 78, 2),
Among(u"i\u00E9semos", 78, 2),
Among(u"imos", -1, 2),
Among(u"ar\u00E1s", -1, 2),
Among(u"er\u00E1s", -1, 2),
Among(u"ir\u00E1s", -1, 2),
Among(u"\u00EDs", -1, 2),
Among(u"ar\u00E1", -1, 2),
Among(u"er\u00E1", -1, 2),
Among(u"ir\u00E1", -1, 2),
Among(u"ar\u00E9", -1, 2),
Among(u"er\u00E9", -1, 2),
Among(u"ir\u00E9", -1, 2),
Among(u"i\u00F3", -1, 2)
]
a_9 = [
Among(u"a", -1, 1),
Among(u"e", -1, 2),
Among(u"o", -1, 1),
Among(u"os", -1, 1),
Among(u"\u00E1", -1, 1),
Among(u"\u00E9", -1, 2),
Among(u"\u00ED", -1, 1),
Among(u"\u00F3", -1, 1)
]
g_v = [17, 65, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 17, 4, 10]
I_p2 = 0
I_p1 = 0
I_pV = 0
def copy_from(self, other):
self.I_p2 = other.I_p2
self.I_p1 = other.I_p1
self.I_pV = other.I_pV
super.copy_from(other)
def r_mark_regions(self):
# (, line 31
self.I_pV = self.limit;
self.I_p1 = self.limit;
self.I_p2 = self.limit;
# do, line 37
v_1 = self.cursor
try:
# (, line 37
# or, line 39
try:
v_2 = self.cursor
try:
# (, line 38
if not self.in_grouping(SpanishStemmer.g_v, 97, 252):
raise lab2()
# or, line 38
try:
v_3 = self.cursor
try:
# (, line 38
if not self.out_grouping(SpanishStemmer.g_v, 97, 252):
raise lab4()
# gopast, line 38
try:
while True:
try:
if not self.in_grouping(SpanishStemmer.g_v, 97, 252):
raise lab6()
raise lab5()
except lab6: pass
if self.cursor >= self.limit:
raise lab4()
self.cursor += 1
except lab5: pass
raise lab3()
except lab4: pass
self.cursor = v_3
# (, line 38
if not self.in_grouping(SpanishStemmer.g_v, 97, 252):
raise lab2()
# gopast, line 38
try:
while True:
try:
if not self.out_grouping(SpanishStemmer.g_v, 97, 252):
raise lab8()
raise lab7()
except lab8: pass
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
except lab7: pass
except lab3: pass
raise lab1()
except lab2: pass
self.cursor = v_2
# (, line 40
if not self.out_grouping(SpanishStemmer.g_v, 97, 252):
raise lab0()
# or, line 40
try:
v_6 = self.cursor
try:
# (, line 40
if not self.out_grouping(SpanishStemmer.g_v, 97, 252):
raise lab10()
# gopast, line 40
try:
while True:
try:
if not self.in_grouping(SpanishStemmer.g_v, 97, 252):
raise lab12()
raise lab11()
except lab12: pass
if self.cursor >= self.limit:
raise lab10()
self.cursor += 1
except lab11: pass
raise lab9()
except lab10: pass
self.cursor = v_6
# (, line 40
if not self.in_grouping(SpanishStemmer.g_v, 97, 252):
raise lab0()
# next, line 40
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
except lab9: pass
except lab1: pass
# setmark pV, line 41
self.I_pV = self.cursor
except lab0: pass
self.cursor = v_1
# do, line 43
v_8 = self.cursor
try:
# (, line 43
# gopast, line 44
try:
while True:
try:
if not self.in_grouping(SpanishStemmer.g_v, 97, 252):
raise lab15()
raise lab14()
except lab15: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab14: pass
# gopast, line 44
try:
while True:
try:
if not self.out_grouping(SpanishStemmer.g_v, 97, 252):
raise lab17()
raise lab16()
except lab17: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab16: pass
# setmark p1, line 44
self.I_p1 = self.cursor
# gopast, line 45
try:
while True:
try:
if not self.in_grouping(SpanishStemmer.g_v, 97, 252):
raise lab19()
raise lab18()
except lab19: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab18: pass
# gopast, line 45
try:
while True:
try:
if not self.out_grouping(SpanishStemmer.g_v, 97, 252):
raise lab21()
raise lab20()
except lab21: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab20: pass
# setmark p2, line 45
self.I_p2 = self.cursor
except lab13: pass
self.cursor = v_8
return True
def r_postlude(self):
# repeat, line 49
try:
while True:
try:
v_1 = self.cursor
try:
# (, line 49
# [, line 50
self.bra = self.cursor
# substring, line 50
among_var = self.find_among(SpanishStemmer.a_0, 6)
if among_var == 0:
raise lab2()
# ], line 50
self.ket = self.cursor
if among_var == 0:
raise lab2()
elif among_var == 1:
# (, line 51
# <-, line 51
if not self.slice_from(u"a"):
return False
elif among_var == 2:
# (, line 52
# <-, line 52
if not self.slice_from(u"e"):
return False
elif among_var == 3:
# (, line 53
# <-, line 53
if not self.slice_from(u"i"):
return False
elif among_var == 4:
# (, line 54
# <-, line 54
if not self.slice_from(u"o"):
return False
elif among_var == 5:
# (, line 55
# <-, line 55
if not self.slice_from(u"u"):
return False
elif among_var == 6:
# (, line 57
# next, line 57
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
raise lab1()
except lab2: pass
self.cursor = v_1
raise lab0()
except lab1: pass
except lab0: pass
return True
def r_RV(self):
if not self.I_pV <= self.cursor:
return False
return True
def r_R1(self):
if not self.I_p1 <= self.cursor:
return False
return True
def r_R2(self):
if not self.I_p2 <= self.cursor:
return False
return True
def r_attached_pronoun(self):
# (, line 67
# [, line 68
self.ket = self.cursor
# substring, line 68
if self.find_among_b(SpanishStemmer.a_1, 13) == 0:
return False
# ], line 68
self.bra = self.cursor
# substring, line 72
among_var = self.find_among_b(SpanishStemmer.a_2, 11)
if among_var == 0:
return False
# call RV, line 72
if not self.r_RV():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 73
# ], line 73
self.bra = self.cursor
# <-, line 73
if not self.slice_from(u"iendo"):
return False
elif among_var == 2:
# (, line 74
# ], line 74
self.bra = self.cursor
# <-, line 74
if not self.slice_from(u"ando"):
return False
elif among_var == 3:
# (, line 75
# ], line 75
self.bra = self.cursor
# <-, line 75
if not self.slice_from(u"ar"):
return False
elif among_var == 4:
# (, line 76
# ], line 76
self.bra = self.cursor
# <-, line 76
if not self.slice_from(u"er"):
return False
elif among_var == 5:
# (, line 77
# ], line 77
self.bra = self.cursor
# <-, line 77
if not self.slice_from(u"ir"):
return False
elif among_var == 6:
# (, line 81
# delete, line 81
if not self.slice_del():
return False
elif among_var == 7:
# (, line 82
# literal, line 82
if not self.eq_s_b(1, u"u"):
return False
# delete, line 82
if not self.slice_del():
return False
return True
def r_standard_suffix(self):
# (, line 86
# [, line 87
self.ket = self.cursor
# substring, line 87
among_var = self.find_among_b(SpanishStemmer.a_6, 46)
if among_var == 0:
return False
# ], line 87
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 98
# call R2, line 99
if not self.r_R2():
return False
# delete, line 99
if not self.slice_del():
return False
elif among_var == 2:
# (, line 104
# call R2, line 105
if not self.r_R2():
return False
# delete, line 105
if not self.slice_del():
return False
# try, line 106
v_1 = self.limit - self.cursor
try:
# (, line 106
# [, line 106
self.ket = self.cursor
# literal, line 106
if not self.eq_s_b(2, u"ic"):
self.cursor = self.limit - v_1
raise lab0()
# ], line 106
self.bra = self.cursor
# call R2, line 106
if not self.r_R2():
self.cursor = self.limit - v_1
raise lab0()
# delete, line 106
if not self.slice_del():
return False
except lab0: pass
elif among_var == 3:
# (, line 110
# call R2, line 111
if not self.r_R2():
return False
# <-, line 111
if not self.slice_from(u"log"):
return False
elif among_var == 4:
# (, line 114
# call R2, line 115
if not self.r_R2():
return False
# <-, line 115
if not self.slice_from(u"u"):
return False
elif among_var == 5:
# (, line 118
# call R2, line 119
if not self.r_R2():
return False
# <-, line 119
if not self.slice_from(u"ente"):
return False
elif among_var == 6:
# (, line 122
# call R1, line 123
if not self.r_R1():
return False
# delete, line 123
if not self.slice_del():
return False
# try, line 124
v_2 = self.limit - self.cursor
try:
# (, line 124
# [, line 125
self.ket = self.cursor
# substring, line 125
among_var = self.find_among_b(SpanishStemmer.a_3, 4)
if among_var == 0:
self.cursor = self.limit - v_2
raise lab1()
# ], line 125
self.bra = self.cursor
# call R2, line 125
if not self.r_R2():
self.cursor = self.limit - v_2
raise lab1()
# delete, line 125
if not self.slice_del():
return False
if among_var == 0:
self.cursor = self.limit - v_2
raise lab1()
elif among_var == 1:
# (, line 126
# [, line 126
self.ket = self.cursor
# literal, line 126
if not self.eq_s_b(2, u"at"):
self.cursor = self.limit - v_2
raise lab1()
# ], line 126
self.bra = self.cursor
# call R2, line 126
if not self.r_R2():
self.cursor = self.limit - v_2
raise lab1()
# delete, line 126
if not self.slice_del():
return False
except lab1: pass
elif among_var == 7:
# (, line 134
# call R2, line 135
if not self.r_R2():
return False
# delete, line 135
if not self.slice_del():
return False
# try, line 136
v_3 = self.limit - self.cursor
try:
# (, line 136
# [, line 137
self.ket = self.cursor
# substring, line 137
among_var = self.find_among_b(SpanishStemmer.a_4, 3)
if among_var == 0:
self.cursor = self.limit - v_3
raise lab2()
# ], line 137
self.bra = self.cursor
if among_var == 0:
self.cursor = self.limit - v_3
raise lab2()
elif among_var == 1:
# (, line 140
# call R2, line 140
if not self.r_R2():
self.cursor = self.limit - v_3
raise lab2()
# delete, line 140
if not self.slice_del():
return False
except lab2: pass
elif among_var == 8:
# (, line 146
# call R2, line 147
if not self.r_R2():
return False
# delete, line 147
if not self.slice_del():
return False
# try, line 148
v_4 = self.limit - self.cursor
try:
# (, line 148
# [, line 149
self.ket = self.cursor
# substring, line 149
among_var = self.find_among_b(SpanishStemmer.a_5, 3)
if among_var == 0:
self.cursor = self.limit - v_4
raise lab3()
# ], line 149
self.bra = self.cursor
if among_var == 0:
self.cursor = self.limit - v_4
raise lab3()
elif among_var == 1:
# (, line 152
# call R2, line 152
if not self.r_R2():
self.cursor = self.limit - v_4
raise lab3()
# delete, line 152
if not self.slice_del():
return False
except lab3: pass
elif among_var == 9:
# (, line 158
# call R2, line 159
if not self.r_R2():
return False
# delete, line 159
if not self.slice_del():
return False
# try, line 160
v_5 = self.limit - self.cursor
try:
# (, line 160
# [, line 161
self.ket = self.cursor
# literal, line 161
if not self.eq_s_b(2, u"at"):
self.cursor = self.limit - v_5
raise lab4()
# ], line 161
self.bra = self.cursor
# call R2, line 161
if not self.r_R2():
self.cursor = self.limit - v_5
raise lab4()
# delete, line 161
if not self.slice_del():
return False
except lab4: pass
return True
def r_y_verb_suffix(self):
# (, line 167
# setlimit, line 168
v_1 = self.limit - self.cursor
# tomark, line 168
if self.cursor < self.I_pV:
return False
self.cursor = self.I_pV
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 168
# [, line 168
self.ket = self.cursor
# substring, line 168
among_var = self.find_among_b(SpanishStemmer.a_7, 12)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 168
self.bra = self.cursor
self.limit_backward = v_2
if among_var == 0:
return False
elif among_var == 1:
# (, line 171
# literal, line 171
if not self.eq_s_b(1, u"u"):
return False
# delete, line 171
if not self.slice_del():
return False
return True
def r_verb_suffix(self):
# (, line 175
# setlimit, line 176
v_1 = self.limit - self.cursor
# tomark, line 176
if self.cursor < self.I_pV:
return False
self.cursor = self.I_pV
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 176
# [, line 176
self.ket = self.cursor
# substring, line 176
among_var = self.find_among_b(SpanishStemmer.a_8, 96)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 176
self.bra = self.cursor
self.limit_backward = v_2
if among_var == 0:
return False
elif among_var == 1:
# (, line 179
# try, line 179
v_3 = self.limit - self.cursor
try:
# (, line 179
# literal, line 179
if not self.eq_s_b(1, u"u"):
self.cursor = self.limit - v_3
raise lab0()
# test, line 179
v_4 = self.limit - self.cursor
# literal, line 179
if not self.eq_s_b(1, u"g"):
self.cursor = self.limit - v_3
raise lab0()
self.cursor = self.limit - v_4
except lab0: pass
# ], line 179
self.bra = self.cursor
# delete, line 179
if not self.slice_del():
return False
elif among_var == 2:
# (, line 200
# delete, line 200
if not self.slice_del():
return False
return True
def r_residual_suffix(self):
# (, line 204
# [, line 205
self.ket = self.cursor
# substring, line 205
among_var = self.find_among_b(SpanishStemmer.a_9, 8)
if among_var == 0:
return False
# ], line 205
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 208
# call RV, line 208
if not self.r_RV():
return False
# delete, line 208
if not self.slice_del():
return False
elif among_var == 2:
# (, line 210
# call RV, line 210
if not self.r_RV():
return False
# delete, line 210
if not self.slice_del():
return False
# try, line 210
v_1 = self.limit - self.cursor
try:
# (, line 210
# [, line 210
self.ket = self.cursor
# literal, line 210
if not self.eq_s_b(1, u"u"):
self.cursor = self.limit - v_1
raise lab0()
# ], line 210
self.bra = self.cursor
# test, line 210
v_2 = self.limit - self.cursor
# literal, line 210
if not self.eq_s_b(1, u"g"):
self.cursor = self.limit - v_1
raise lab0()
self.cursor = self.limit - v_2
# call RV, line 210
if not self.r_RV():
self.cursor = self.limit - v_1
raise lab0()
# delete, line 210
if not self.slice_del():
return False
except lab0: pass
return True
def _stem(self):
# (, line 215
# do, line 216
v_1 = self.cursor
try:
# call mark_regions, line 216
if not self.r_mark_regions():
raise lab0()
except lab0: pass
self.cursor = v_1
# backwards, line 217
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 217
# do, line 218
v_2 = self.limit - self.cursor
try:
# call attached_pronoun, line 218
if not self.r_attached_pronoun():
raise lab1()
except lab1: pass
self.cursor = self.limit - v_2
# do, line 219
v_3 = self.limit - self.cursor
try:
# (, line 219
# or, line 219
try:
v_4 = self.limit - self.cursor
try:
# call standard_suffix, line 219
if not self.r_standard_suffix():
raise lab4()
raise lab3()
except lab4: pass
self.cursor = self.limit - v_4
try:
# call y_verb_suffix, line 220
if not self.r_y_verb_suffix():
raise lab5()
raise lab3()
except lab5: pass
self.cursor = self.limit - v_4
# call verb_suffix, line 221
if not self.r_verb_suffix():
raise lab2()
except lab3: pass
except lab2: pass
self.cursor = self.limit - v_3
# do, line 223
v_5 = self.limit - self.cursor
try:
# call residual_suffix, line 223
if not self.r_residual_suffix():
raise lab6()
except lab6: pass
self.cursor = self.limit - v_5
self.cursor = self.limit_backward
# do, line 225
v_6 = self.cursor
try:
# call postlude, line 225
if not self.r_postlude():
raise lab7()
except lab7: pass
self.cursor = v_6
return True
def equals(self, o):
return isinstance(o, SpanishStemmer)
def hashCode(self):
return hash("SpanishStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
class lab10(BaseException): pass
class lab11(BaseException): pass
class lab12(BaseException): pass
class lab13(BaseException): pass
class lab14(BaseException): pass
class lab15(BaseException): pass
class lab16(BaseException): pass
class lab17(BaseException): pass
class lab18(BaseException): pass
class lab19(BaseException): pass
class lab20(BaseException): pass
class lab21(BaseException): pass
| 33,490 | Python | .py | 982 | 19.745418 | 93 | 0.407746 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,970 | english_stemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/english_stemmer.py | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class EnglishStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"arsen", -1, -1),
Among(u"commun", -1, -1),
Among(u"gener", -1, -1)
]
a_1 = [
Among(u"'", -1, 1),
Among(u"'s'", 0, 1),
Among(u"'s", -1, 1)
]
a_2 = [
Among(u"ied", -1, 2),
Among(u"s", -1, 3),
Among(u"ies", 1, 2),
Among(u"sses", 1, 1),
Among(u"ss", 1, -1),
Among(u"us", 1, -1)
]
a_3 = [
Among(u"", -1, 3),
Among(u"bb", 0, 2),
Among(u"dd", 0, 2),
Among(u"ff", 0, 2),
Among(u"gg", 0, 2),
Among(u"bl", 0, 1),
Among(u"mm", 0, 2),
Among(u"nn", 0, 2),
Among(u"pp", 0, 2),
Among(u"rr", 0, 2),
Among(u"at", 0, 1),
Among(u"tt", 0, 2),
Among(u"iz", 0, 1)
]
a_4 = [
Among(u"ed", -1, 2),
Among(u"eed", 0, 1),
Among(u"ing", -1, 2),
Among(u"edly", -1, 2),
Among(u"eedly", 3, 1),
Among(u"ingly", -1, 2)
]
a_5 = [
Among(u"anci", -1, 3),
Among(u"enci", -1, 2),
Among(u"ogi", -1, 13),
Among(u"li", -1, 16),
Among(u"bli", 3, 12),
Among(u"abli", 4, 4),
Among(u"alli", 3, 8),
Among(u"fulli", 3, 14),
Among(u"lessli", 3, 15),
Among(u"ousli", 3, 10),
Among(u"entli", 3, 5),
Among(u"aliti", -1, 8),
Among(u"biliti", -1, 12),
Among(u"iviti", -1, 11),
Among(u"tional", -1, 1),
Among(u"ational", 14, 7),
Among(u"alism", -1, 8),
Among(u"ation", -1, 7),
Among(u"ization", 17, 6),
Among(u"izer", -1, 6),
Among(u"ator", -1, 7),
Among(u"iveness", -1, 11),
Among(u"fulness", -1, 9),
Among(u"ousness", -1, 10)
]
a_6 = [
Among(u"icate", -1, 4),
Among(u"ative", -1, 6),
Among(u"alize", -1, 3),
Among(u"iciti", -1, 4),
Among(u"ical", -1, 4),
Among(u"tional", -1, 1),
Among(u"ational", 5, 2),
Among(u"ful", -1, 5),
Among(u"ness", -1, 5)
]
a_7 = [
Among(u"ic", -1, 1),
Among(u"ance", -1, 1),
Among(u"ence", -1, 1),
Among(u"able", -1, 1),
Among(u"ible", -1, 1),
Among(u"ate", -1, 1),
Among(u"ive", -1, 1),
Among(u"ize", -1, 1),
Among(u"iti", -1, 1),
Among(u"al", -1, 1),
Among(u"ism", -1, 1),
Among(u"ion", -1, 2),
Among(u"er", -1, 1),
Among(u"ous", -1, 1),
Among(u"ant", -1, 1),
Among(u"ent", -1, 1),
Among(u"ment", 15, 1),
Among(u"ement", 16, 1)
]
a_8 = [
Among(u"e", -1, 1),
Among(u"l", -1, 2)
]
a_9 = [
Among(u"succeed", -1, -1),
Among(u"proceed", -1, -1),
Among(u"exceed", -1, -1),
Among(u"canning", -1, -1),
Among(u"inning", -1, -1),
Among(u"earring", -1, -1),
Among(u"herring", -1, -1),
Among(u"outing", -1, -1)
]
a_10 = [
Among(u"andes", -1, -1),
Among(u"atlas", -1, -1),
Among(u"bias", -1, -1),
Among(u"cosmos", -1, -1),
Among(u"dying", -1, 3),
Among(u"early", -1, 9),
Among(u"gently", -1, 7),
Among(u"howe", -1, -1),
Among(u"idly", -1, 6),
Among(u"lying", -1, 4),
Among(u"news", -1, -1),
Among(u"only", -1, 10),
Among(u"singly", -1, 11),
Among(u"skies", -1, 2),
Among(u"skis", -1, 1),
Among(u"sky", -1, -1),
Among(u"tying", -1, 5),
Among(u"ugly", -1, 8)
]
g_v = [17, 65, 16, 1]
g_v_WXY = [1, 17, 65, 208, 1]
g_valid_LI = [55, 141, 2]
B_Y_found = False
I_p2 = 0
I_p1 = 0
def copy_from(self, other):
self.B_Y_found = other.B_Y_found
self.I_p2 = other.I_p2
self.I_p1 = other.I_p1
super.copy_from(other)
def r_prelude(self):
# (, line 25
# unset Y_found, line 26
self.B_Y_found = False
# do, line 27
v_1 = self.cursor
try:
# (, line 27
# [, line 27
self.bra = self.cursor
# literal, line 27
if not self.eq_s(1, u"'"):
raise lab0()
# ], line 27
self.ket = self.cursor
# delete, line 27
if not self.slice_del():
return False
except lab0: pass
self.cursor = v_1
# do, line 28
v_2 = self.cursor
try:
# (, line 28
# [, line 28
self.bra = self.cursor
# literal, line 28
if not self.eq_s(1, u"y"):
raise lab1()
# ], line 28
self.ket = self.cursor
# <-, line 28
if not self.slice_from(u"Y"):
return False
# set Y_found, line 28
self.B_Y_found = True
except lab1: pass
self.cursor = v_2
# do, line 29
v_3 = self.cursor
try:
# repeat, line 29
try:
while True:
try:
v_4 = self.cursor
try:
# (, line 29
# goto, line 29
try:
while True:
v_5 = self.cursor
try:
# (, line 29
if not self.in_grouping(EnglishStemmer.g_v, 97, 121):
raise lab7()
# [, line 29
self.bra = self.cursor
# literal, line 29
if not self.eq_s(1, u"y"):
raise lab7()
# ], line 29
self.ket = self.cursor
self.cursor = v_5
raise lab6()
except lab7: pass
self.cursor = v_5
if self.cursor >= self.limit:
raise lab5()
self.cursor += 1
except lab6: pass
# <-, line 29
if not self.slice_from(u"Y"):
return False
# set Y_found, line 29
self.B_Y_found = True
raise lab4()
except lab5: pass
self.cursor = v_4
raise lab3()
except lab4: pass
except lab3: pass
except lab2: pass
self.cursor = v_3
return True
def r_mark_regions(self):
# (, line 32
self.I_p1 = self.limit;
self.I_p2 = self.limit;
# do, line 35
v_1 = self.cursor
try:
# (, line 35
# or, line 41
try:
v_2 = self.cursor
try:
# among, line 36
if self.find_among(EnglishStemmer.a_0, 3) == 0:
raise lab2()
raise lab1()
except lab2: pass
self.cursor = v_2
# (, line 41
# gopast, line 41
try:
while True:
try:
if not self.in_grouping(EnglishStemmer.g_v, 97, 121):
raise lab4()
raise lab3()
except lab4: pass
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
except lab3: pass
# gopast, line 41
try:
while True:
try:
if not self.out_grouping(EnglishStemmer.g_v, 97, 121):
raise lab6()
raise lab5()
except lab6: pass
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
except lab5: pass
except lab1: pass
# setmark p1, line 42
self.I_p1 = self.cursor
# gopast, line 43
try:
while True:
try:
if not self.in_grouping(EnglishStemmer.g_v, 97, 121):
raise lab8()
raise lab7()
except lab8: pass
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
except lab7: pass
# gopast, line 43
try:
while True:
try:
if not self.out_grouping(EnglishStemmer.g_v, 97, 121):
raise lab10()
raise lab9()
except lab10: pass
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
except lab9: pass
# setmark p2, line 43
self.I_p2 = self.cursor
except lab0: pass
self.cursor = v_1
return True
def r_shortv(self):
# (, line 49
# or, line 51
try:
v_1 = self.limit - self.cursor
try:
# (, line 50
if not self.out_grouping_b(EnglishStemmer.g_v_WXY, 89, 121):
raise lab1()
if not self.in_grouping_b(EnglishStemmer.g_v, 97, 121):
raise lab1()
if not self.out_grouping_b(EnglishStemmer.g_v, 97, 121):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# (, line 52
if not self.out_grouping_b(EnglishStemmer.g_v, 97, 121):
return False
if not self.in_grouping_b(EnglishStemmer.g_v, 97, 121):
return False
# atlimit, line 52
if self.cursor > self.limit_backward:
return False
except lab0: pass
return True
def r_R1(self):
if not self.I_p1 <= self.cursor:
return False
return True
def r_R2(self):
if not self.I_p2 <= self.cursor:
return False
return True
def r_Step_1a(self):
# (, line 58
# try, line 59
v_1 = self.limit - self.cursor
try:
# (, line 59
# [, line 60
self.ket = self.cursor
# substring, line 60
among_var = self.find_among_b(EnglishStemmer.a_1, 3)
if among_var == 0:
self.cursor = self.limit - v_1
raise lab0()
# ], line 60
self.bra = self.cursor
if among_var == 0:
self.cursor = self.limit - v_1
raise lab0()
elif among_var == 1:
# (, line 62
# delete, line 62
if not self.slice_del():
return False
except lab0: pass
# [, line 65
self.ket = self.cursor
# substring, line 65
among_var = self.find_among_b(EnglishStemmer.a_2, 6)
if among_var == 0:
return False
# ], line 65
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 66
# <-, line 66
if not self.slice_from(u"ss"):
return False
elif among_var == 2:
# (, line 68
# or, line 68
try:
v_2 = self.limit - self.cursor
try:
# (, line 68
# hop, line 68
c = self.cursor - 2
if self.limit_backward > c or c > self.limit:
raise lab2()
self.cursor = c
# <-, line 68
if not self.slice_from(u"i"):
return False
raise lab1()
except lab2: pass
self.cursor = self.limit - v_2
# <-, line 68
if not self.slice_from(u"ie"):
return False
except lab1: pass
elif among_var == 3:
# (, line 69
# next, line 69
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
# gopast, line 69
try:
while True:
try:
if not self.in_grouping_b(EnglishStemmer.g_v, 97, 121):
raise lab4()
raise lab3()
except lab4: pass
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
except lab3: pass
# delete, line 69
if not self.slice_del():
return False
return True
def r_Step_1b(self):
# (, line 74
# [, line 75
self.ket = self.cursor
# substring, line 75
among_var = self.find_among_b(EnglishStemmer.a_4, 6)
if among_var == 0:
return False
# ], line 75
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 77
# call R1, line 77
if not self.r_R1():
return False
# <-, line 77
if not self.slice_from(u"ee"):
return False
elif among_var == 2:
# (, line 79
# test, line 80
v_1 = self.limit - self.cursor
# gopast, line 80
try:
while True:
try:
if not self.in_grouping_b(EnglishStemmer.g_v, 97, 121):
raise lab1()
raise lab0()
except lab1: pass
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
except lab0: pass
self.cursor = self.limit - v_1
# delete, line 80
if not self.slice_del():
return False
# test, line 81
v_3 = self.limit - self.cursor
# substring, line 81
among_var = self.find_among_b(EnglishStemmer.a_3, 13)
if among_var == 0:
return False
self.cursor = self.limit - v_3
if among_var == 0:
return False
elif among_var == 1:
# (, line 83
# <+, line 83
c = self.cursor
self.insert(self.cursor, self.cursor, u"e")
self.cursor = c
elif among_var == 2:
# (, line 86
# [, line 86
self.ket = self.cursor
# next, line 86
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
# ], line 86
self.bra = self.cursor
# delete, line 86
if not self.slice_del():
return False
elif among_var == 3:
# (, line 87
# atmark, line 87
if self.cursor != self.I_p1:
return False
# test, line 87
v_4 = self.limit - self.cursor
# call shortv, line 87
if not self.r_shortv():
return False
self.cursor = self.limit - v_4
# <+, line 87
c = self.cursor
self.insert(self.cursor, self.cursor, u"e")
self.cursor = c
return True
def r_Step_1c(self):
# (, line 93
# [, line 94
self.ket = self.cursor
# or, line 94
try:
v_1 = self.limit - self.cursor
try:
# literal, line 94
if not self.eq_s_b(1, u"y"):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# literal, line 94
if not self.eq_s_b(1, u"Y"):
return False
except lab0: pass
# ], line 94
self.bra = self.cursor
if not self.out_grouping_b(EnglishStemmer.g_v, 97, 121):
return False
# not, line 95
v_2 = self.limit - self.cursor
try:
# atlimit, line 95
if self.cursor > self.limit_backward:
raise lab2()
return False
except lab2: pass
self.cursor = self.limit - v_2
# <-, line 96
if not self.slice_from(u"i"):
return False
return True
def r_Step_2(self):
# (, line 99
# [, line 100
self.ket = self.cursor
# substring, line 100
among_var = self.find_among_b(EnglishStemmer.a_5, 24)
if among_var == 0:
return False
# ], line 100
self.bra = self.cursor
# call R1, line 100
if not self.r_R1():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 101
# <-, line 101
if not self.slice_from(u"tion"):
return False
elif among_var == 2:
# (, line 102
# <-, line 102
if not self.slice_from(u"ence"):
return False
elif among_var == 3:
# (, line 103
# <-, line 103
if not self.slice_from(u"ance"):
return False
elif among_var == 4:
# (, line 104
# <-, line 104
if not self.slice_from(u"able"):
return False
elif among_var == 5:
# (, line 105
# <-, line 105
if not self.slice_from(u"ent"):
return False
elif among_var == 6:
# (, line 107
# <-, line 107
if not self.slice_from(u"ize"):
return False
elif among_var == 7:
# (, line 109
# <-, line 109
if not self.slice_from(u"ate"):
return False
elif among_var == 8:
# (, line 111
# <-, line 111
if not self.slice_from(u"al"):
return False
elif among_var == 9:
# (, line 112
# <-, line 112
if not self.slice_from(u"ful"):
return False
elif among_var == 10:
# (, line 114
# <-, line 114
if not self.slice_from(u"ous"):
return False
elif among_var == 11:
# (, line 116
# <-, line 116
if not self.slice_from(u"ive"):
return False
elif among_var == 12:
# (, line 118
# <-, line 118
if not self.slice_from(u"ble"):
return False
elif among_var == 13:
# (, line 119
# literal, line 119
if not self.eq_s_b(1, u"l"):
return False
# <-, line 119
if not self.slice_from(u"og"):
return False
elif among_var == 14:
# (, line 120
# <-, line 120
if not self.slice_from(u"ful"):
return False
elif among_var == 15:
# (, line 121
# <-, line 121
if not self.slice_from(u"less"):
return False
elif among_var == 16:
# (, line 122
if not self.in_grouping_b(EnglishStemmer.g_valid_LI, 99, 116):
return False
# delete, line 122
if not self.slice_del():
return False
return True
def r_Step_3(self):
# (, line 126
# [, line 127
self.ket = self.cursor
# substring, line 127
among_var = self.find_among_b(EnglishStemmer.a_6, 9)
if among_var == 0:
return False
# ], line 127
self.bra = self.cursor
# call R1, line 127
if not self.r_R1():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 128
# <-, line 128
if not self.slice_from(u"tion"):
return False
elif among_var == 2:
# (, line 129
# <-, line 129
if not self.slice_from(u"ate"):
return False
elif among_var == 3:
# (, line 130
# <-, line 130
if not self.slice_from(u"al"):
return False
elif among_var == 4:
# (, line 132
# <-, line 132
if not self.slice_from(u"ic"):
return False
elif among_var == 5:
# (, line 134
# delete, line 134
if not self.slice_del():
return False
elif among_var == 6:
# (, line 136
# call R2, line 136
if not self.r_R2():
return False
# delete, line 136
if not self.slice_del():
return False
return True
def r_Step_4(self):
# (, line 140
# [, line 141
self.ket = self.cursor
# substring, line 141
among_var = self.find_among_b(EnglishStemmer.a_7, 18)
if among_var == 0:
return False
# ], line 141
self.bra = self.cursor
# call R2, line 141
if not self.r_R2():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 144
# delete, line 144
if not self.slice_del():
return False
elif among_var == 2:
# (, line 145
# or, line 145
try:
v_1 = self.limit - self.cursor
try:
# literal, line 145
if not self.eq_s_b(1, u"s"):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# literal, line 145
if not self.eq_s_b(1, u"t"):
return False
except lab0: pass
# delete, line 145
if not self.slice_del():
return False
return True
def r_Step_5(self):
# (, line 149
# [, line 150
self.ket = self.cursor
# substring, line 150
among_var = self.find_among_b(EnglishStemmer.a_8, 2)
if among_var == 0:
return False
# ], line 150
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 151
# or, line 151
try:
v_1 = self.limit - self.cursor
try:
# call R2, line 151
if not self.r_R2():
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# (, line 151
# call R1, line 151
if not self.r_R1():
return False
# not, line 151
v_2 = self.limit - self.cursor
try:
# call shortv, line 151
if not self.r_shortv():
raise lab2()
return False
except lab2: pass
self.cursor = self.limit - v_2
except lab0: pass
# delete, line 151
if not self.slice_del():
return False
elif among_var == 2:
# (, line 152
# call R2, line 152
if not self.r_R2():
return False
# literal, line 152
if not self.eq_s_b(1, u"l"):
return False
# delete, line 152
if not self.slice_del():
return False
return True
def r_exception2(self):
# (, line 156
# [, line 158
self.ket = self.cursor
# substring, line 158
if self.find_among_b(EnglishStemmer.a_9, 8) == 0:
return False
# ], line 158
self.bra = self.cursor
# atlimit, line 158
if self.cursor > self.limit_backward:
return False
return True
def r_exception1(self):
# (, line 168
# [, line 170
self.bra = self.cursor
# substring, line 170
among_var = self.find_among(EnglishStemmer.a_10, 18)
if among_var == 0:
return False
# ], line 170
self.ket = self.cursor
# atlimit, line 170
if self.cursor < self.limit:
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 174
# <-, line 174
if not self.slice_from(u"ski"):
return False
elif among_var == 2:
# (, line 175
# <-, line 175
if not self.slice_from(u"sky"):
return False
elif among_var == 3:
# (, line 176
# <-, line 176
if not self.slice_from(u"die"):
return False
elif among_var == 4:
# (, line 177
# <-, line 177
if not self.slice_from(u"lie"):
return False
elif among_var == 5:
# (, line 178
# <-, line 178
if not self.slice_from(u"tie"):
return False
elif among_var == 6:
# (, line 182
# <-, line 182
if not self.slice_from(u"idl"):
return False
elif among_var == 7:
# (, line 183
# <-, line 183
if not self.slice_from(u"gentl"):
return False
elif among_var == 8:
# (, line 184
# <-, line 184
if not self.slice_from(u"ugli"):
return False
elif among_var == 9:
# (, line 185
# <-, line 185
if not self.slice_from(u"earli"):
return False
elif among_var == 10:
# (, line 186
# <-, line 186
if not self.slice_from(u"onli"):
return False
elif among_var == 11:
# (, line 187
# <-, line 187
if not self.slice_from(u"singl"):
return False
return True
def r_postlude(self):
# (, line 203
# Boolean test Y_found, line 203
if not self.B_Y_found:
return False
# repeat, line 203
try:
while True:
try:
v_1 = self.cursor
try:
# (, line 203
# goto, line 203
try:
while True:
v_2 = self.cursor
try:
# (, line 203
# [, line 203
self.bra = self.cursor
# literal, line 203
if not self.eq_s(1, u"Y"):
raise lab4()
# ], line 203
self.ket = self.cursor
self.cursor = v_2
raise lab3()
except lab4: pass
self.cursor = v_2
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
except lab3: pass
# <-, line 203
if not self.slice_from(u"y"):
return False
raise lab1()
except lab2: pass
self.cursor = v_1
raise lab0()
except lab1: pass
except lab0: pass
return True
def _stem(self):
# (, line 205
# or, line 207
try:
v_1 = self.cursor
try:
# call exception1, line 207
if not self.r_exception1():
raise lab1()
raise lab0()
except lab1: pass
self.cursor = v_1
try:
# not, line 208
v_2 = self.cursor
try:
# hop, line 208
c = self.cursor + 3
if 0 > c or c > self.limit:
raise lab3()
self.cursor = c
raise lab2()
except lab3: pass
self.cursor = v_2
raise lab0()
except lab2: pass
self.cursor = v_1
# (, line 208
# do, line 209
v_3 = self.cursor
try:
# call prelude, line 209
if not self.r_prelude():
raise lab4()
except lab4: pass
self.cursor = v_3
# do, line 210
v_4 = self.cursor
try:
# call mark_regions, line 210
if not self.r_mark_regions():
raise lab5()
except lab5: pass
self.cursor = v_4
# backwards, line 211
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 211
# do, line 213
v_5 = self.limit - self.cursor
try:
# call Step_1a, line 213
if not self.r_Step_1a():
raise lab6()
except lab6: pass
self.cursor = self.limit - v_5
# or, line 215
try:
v_6 = self.limit - self.cursor
try:
# call exception2, line 215
if not self.r_exception2():
raise lab8()
raise lab7()
except lab8: pass
self.cursor = self.limit - v_6
# (, line 215
# do, line 217
v_7 = self.limit - self.cursor
try:
# call Step_1b, line 217
if not self.r_Step_1b():
raise lab9()
except lab9: pass
self.cursor = self.limit - v_7
# do, line 218
v_8 = self.limit - self.cursor
try:
# call Step_1c, line 218
if not self.r_Step_1c():
raise lab10()
except lab10: pass
self.cursor = self.limit - v_8
# do, line 220
v_9 = self.limit - self.cursor
try:
# call Step_2, line 220
if not self.r_Step_2():
raise lab11()
except lab11: pass
self.cursor = self.limit - v_9
# do, line 221
v_10 = self.limit - self.cursor
try:
# call Step_3, line 221
if not self.r_Step_3():
raise lab12()
except lab12: pass
self.cursor = self.limit - v_10
# do, line 222
v_11 = self.limit - self.cursor
try:
# call Step_4, line 222
if not self.r_Step_4():
raise lab13()
except lab13: pass
self.cursor = self.limit - v_11
# do, line 224
v_12 = self.limit - self.cursor
try:
# call Step_5, line 224
if not self.r_Step_5():
raise lab14()
except lab14: pass
self.cursor = self.limit - v_12
except lab7: pass
self.cursor = self.limit_backward
# do, line 227
v_13 = self.cursor
try:
# call postlude, line 227
if not self.r_postlude():
raise lab15()
except lab15: pass
self.cursor = v_13
except lab0: pass
return True
def equals(self, o):
return isinstance(o, EnglishStemmer)
def hashCode(self):
return hash("EnglishStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
class lab10(BaseException): pass
class lab11(BaseException): pass
class lab12(BaseException): pass
class lab13(BaseException): pass
class lab14(BaseException): pass
class lab15(BaseException): pass
| 35,043 | Python | .py | 1,065 | 18.550235 | 93 | 0.404758 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,971 | porter_stemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/porter_stemmer.py | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class PorterStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"s", -1, 3),
Among(u"ies", 0, 2),
Among(u"sses", 0, 1),
Among(u"ss", 0, -1)
]
a_1 = [
Among(u"", -1, 3),
Among(u"bb", 0, 2),
Among(u"dd", 0, 2),
Among(u"ff", 0, 2),
Among(u"gg", 0, 2),
Among(u"bl", 0, 1),
Among(u"mm", 0, 2),
Among(u"nn", 0, 2),
Among(u"pp", 0, 2),
Among(u"rr", 0, 2),
Among(u"at", 0, 1),
Among(u"tt", 0, 2),
Among(u"iz", 0, 1)
]
a_2 = [
Among(u"ed", -1, 2),
Among(u"eed", 0, 1),
Among(u"ing", -1, 2)
]
a_3 = [
Among(u"anci", -1, 3),
Among(u"enci", -1, 2),
Among(u"abli", -1, 4),
Among(u"eli", -1, 6),
Among(u"alli", -1, 9),
Among(u"ousli", -1, 12),
Among(u"entli", -1, 5),
Among(u"aliti", -1, 10),
Among(u"biliti", -1, 14),
Among(u"iviti", -1, 13),
Among(u"tional", -1, 1),
Among(u"ational", 10, 8),
Among(u"alism", -1, 10),
Among(u"ation", -1, 8),
Among(u"ization", 13, 7),
Among(u"izer", -1, 7),
Among(u"ator", -1, 8),
Among(u"iveness", -1, 13),
Among(u"fulness", -1, 11),
Among(u"ousness", -1, 12)
]
a_4 = [
Among(u"icate", -1, 2),
Among(u"ative", -1, 3),
Among(u"alize", -1, 1),
Among(u"iciti", -1, 2),
Among(u"ical", -1, 2),
Among(u"ful", -1, 3),
Among(u"ness", -1, 3)
]
a_5 = [
Among(u"ic", -1, 1),
Among(u"ance", -1, 1),
Among(u"ence", -1, 1),
Among(u"able", -1, 1),
Among(u"ible", -1, 1),
Among(u"ate", -1, 1),
Among(u"ive", -1, 1),
Among(u"ize", -1, 1),
Among(u"iti", -1, 1),
Among(u"al", -1, 1),
Among(u"ism", -1, 1),
Among(u"ion", -1, 2),
Among(u"er", -1, 1),
Among(u"ous", -1, 1),
Among(u"ant", -1, 1),
Among(u"ent", -1, 1),
Among(u"ment", 15, 1),
Among(u"ement", 16, 1),
Among(u"ou", -1, 1)
]
g_v = [17, 65, 16, 1]
g_v_WXY = [1, 17, 65, 208, 1]
B_Y_found = False
I_p2 = 0
I_p1 = 0
def copy_from(self, other):
self.B_Y_found = other.B_Y_found
self.I_p2 = other.I_p2
self.I_p1 = other.I_p1
super.copy_from(other)
def r_shortv(self):
# (, line 19
if not self.out_grouping_b(PorterStemmer.g_v_WXY, 89, 121):
return False
if not self.in_grouping_b(PorterStemmer.g_v, 97, 121):
return False
if not self.out_grouping_b(PorterStemmer.g_v, 97, 121):
return False
return True
def r_R1(self):
if not self.I_p1 <= self.cursor:
return False
return True
def r_R2(self):
if not self.I_p2 <= self.cursor:
return False
return True
def r_Step_1a(self):
# (, line 24
# [, line 25
self.ket = self.cursor
# substring, line 25
among_var = self.find_among_b(PorterStemmer.a_0, 4)
if among_var == 0:
return False
# ], line 25
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 26
# <-, line 26
if not self.slice_from(u"ss"):
return False
elif among_var == 2:
# (, line 27
# <-, line 27
if not self.slice_from(u"i"):
return False
elif among_var == 3:
# (, line 29
# delete, line 29
if not self.slice_del():
return False
return True
def r_Step_1b(self):
# (, line 33
# [, line 34
self.ket = self.cursor
# substring, line 34
among_var = self.find_among_b(PorterStemmer.a_2, 3)
if among_var == 0:
return False
# ], line 34
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 35
# call R1, line 35
if not self.r_R1():
return False
# <-, line 35
if not self.slice_from(u"ee"):
return False
elif among_var == 2:
# (, line 37
# test, line 38
v_1 = self.limit - self.cursor
# gopast, line 38
try:
while True:
try:
if not self.in_grouping_b(PorterStemmer.g_v, 97, 121):
raise lab1()
raise lab0()
except lab1: pass
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
except lab0: pass
self.cursor = self.limit - v_1
# delete, line 38
if not self.slice_del():
return False
# test, line 39
v_3 = self.limit - self.cursor
# substring, line 39
among_var = self.find_among_b(PorterStemmer.a_1, 13)
if among_var == 0:
return False
self.cursor = self.limit - v_3
if among_var == 0:
return False
elif among_var == 1:
# (, line 41
# <+, line 41
c = self.cursor
self.insert(self.cursor, self.cursor, u"e")
self.cursor = c
elif among_var == 2:
# (, line 44
# [, line 44
self.ket = self.cursor
# next, line 44
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
# ], line 44
self.bra = self.cursor
# delete, line 44
if not self.slice_del():
return False
elif among_var == 3:
# (, line 45
# atmark, line 45
if self.cursor != self.I_p1:
return False
# test, line 45
v_4 = self.limit - self.cursor
# call shortv, line 45
if not self.r_shortv():
return False
self.cursor = self.limit - v_4
# <+, line 45
c = self.cursor
self.insert(self.cursor, self.cursor, u"e")
self.cursor = c
return True
def r_Step_1c(self):
# (, line 51
# [, line 52
self.ket = self.cursor
# or, line 52
try:
v_1 = self.limit - self.cursor
try:
# literal, line 52
if not self.eq_s_b(1, u"y"):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# literal, line 52
if not self.eq_s_b(1, u"Y"):
return False
except lab0: pass
# ], line 52
self.bra = self.cursor
# gopast, line 53
try:
while True:
try:
if not self.in_grouping_b(PorterStemmer.g_v, 97, 121):
raise lab3()
raise lab2()
except lab3: pass
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
except lab2: pass
# <-, line 54
if not self.slice_from(u"i"):
return False
return True
def r_Step_2(self):
# (, line 57
# [, line 58
self.ket = self.cursor
# substring, line 58
among_var = self.find_among_b(PorterStemmer.a_3, 20)
if among_var == 0:
return False
# ], line 58
self.bra = self.cursor
# call R1, line 58
if not self.r_R1():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 59
# <-, line 59
if not self.slice_from(u"tion"):
return False
elif among_var == 2:
# (, line 60
# <-, line 60
if not self.slice_from(u"ence"):
return False
elif among_var == 3:
# (, line 61
# <-, line 61
if not self.slice_from(u"ance"):
return False
elif among_var == 4:
# (, line 62
# <-, line 62
if not self.slice_from(u"able"):
return False
elif among_var == 5:
# (, line 63
# <-, line 63
if not self.slice_from(u"ent"):
return False
elif among_var == 6:
# (, line 64
# <-, line 64
if not self.slice_from(u"e"):
return False
elif among_var == 7:
# (, line 66
# <-, line 66
if not self.slice_from(u"ize"):
return False
elif among_var == 8:
# (, line 68
# <-, line 68
if not self.slice_from(u"ate"):
return False
elif among_var == 9:
# (, line 69
# <-, line 69
if not self.slice_from(u"al"):
return False
elif among_var == 10:
# (, line 71
# <-, line 71
if not self.slice_from(u"al"):
return False
elif among_var == 11:
# (, line 72
# <-, line 72
if not self.slice_from(u"ful"):
return False
elif among_var == 12:
# (, line 74
# <-, line 74
if not self.slice_from(u"ous"):
return False
elif among_var == 13:
# (, line 76
# <-, line 76
if not self.slice_from(u"ive"):
return False
elif among_var == 14:
# (, line 77
# <-, line 77
if not self.slice_from(u"ble"):
return False
return True
def r_Step_3(self):
# (, line 81
# [, line 82
self.ket = self.cursor
# substring, line 82
among_var = self.find_among_b(PorterStemmer.a_4, 7)
if among_var == 0:
return False
# ], line 82
self.bra = self.cursor
# call R1, line 82
if not self.r_R1():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 83
# <-, line 83
if not self.slice_from(u"al"):
return False
elif among_var == 2:
# (, line 85
# <-, line 85
if not self.slice_from(u"ic"):
return False
elif among_var == 3:
# (, line 87
# delete, line 87
if not self.slice_del():
return False
return True
def r_Step_4(self):
# (, line 91
# [, line 92
self.ket = self.cursor
# substring, line 92
among_var = self.find_among_b(PorterStemmer.a_5, 19)
if among_var == 0:
return False
# ], line 92
self.bra = self.cursor
# call R2, line 92
if not self.r_R2():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 95
# delete, line 95
if not self.slice_del():
return False
elif among_var == 2:
# (, line 96
# or, line 96
try:
v_1 = self.limit - self.cursor
try:
# literal, line 96
if not self.eq_s_b(1, u"s"):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# literal, line 96
if not self.eq_s_b(1, u"t"):
return False
except lab0: pass
# delete, line 96
if not self.slice_del():
return False
return True
def r_Step_5a(self):
# (, line 100
# [, line 101
self.ket = self.cursor
# literal, line 101
if not self.eq_s_b(1, u"e"):
return False
# ], line 101
self.bra = self.cursor
# or, line 102
try:
v_1 = self.limit - self.cursor
try:
# call R2, line 102
if not self.r_R2():
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# (, line 102
# call R1, line 102
if not self.r_R1():
return False
# not, line 102
v_2 = self.limit - self.cursor
try:
# call shortv, line 102
if not self.r_shortv():
raise lab2()
return False
except lab2: pass
self.cursor = self.limit - v_2
except lab0: pass
# delete, line 103
if not self.slice_del():
return False
return True
def r_Step_5b(self):
# (, line 106
# [, line 107
self.ket = self.cursor
# literal, line 107
if not self.eq_s_b(1, u"l"):
return False
# ], line 107
self.bra = self.cursor
# call R2, line 108
if not self.r_R2():
return False
# literal, line 108
if not self.eq_s_b(1, u"l"):
return False
# delete, line 109
if not self.slice_del():
return False
return True
def _stem(self):
# (, line 113
# unset Y_found, line 115
self.B_Y_found = False
# do, line 116
v_1 = self.cursor
try:
# (, line 116
# [, line 116
self.bra = self.cursor
# literal, line 116
if not self.eq_s(1, u"y"):
raise lab0()
# ], line 116
self.ket = self.cursor
# <-, line 116
if not self.slice_from(u"Y"):
return False
# set Y_found, line 116
self.B_Y_found = True
except lab0: pass
self.cursor = v_1
# do, line 117
v_2 = self.cursor
try:
# repeat, line 117
try:
while True:
try:
v_3 = self.cursor
try:
# (, line 117
# goto, line 117
try:
while True:
v_4 = self.cursor
try:
# (, line 117
if not self.in_grouping(PorterStemmer.g_v, 97, 121):
raise lab6()
# [, line 117
self.bra = self.cursor
# literal, line 117
if not self.eq_s(1, u"y"):
raise lab6()
# ], line 117
self.ket = self.cursor
self.cursor = v_4
raise lab5()
except lab6: pass
self.cursor = v_4
if self.cursor >= self.limit:
raise lab4()
self.cursor += 1
except lab5: pass
# <-, line 117
if not self.slice_from(u"Y"):
return False
# set Y_found, line 117
self.B_Y_found = True
raise lab3()
except lab4: pass
self.cursor = v_3
raise lab2()
except lab3: pass
except lab2: pass
except lab1: pass
self.cursor = v_2
self.I_p1 = self.limit;
self.I_p2 = self.limit;
# do, line 121
v_5 = self.cursor
try:
# (, line 121
# gopast, line 122
try:
while True:
try:
if not self.in_grouping(PorterStemmer.g_v, 97, 121):
raise lab9()
raise lab8()
except lab9: pass
if self.cursor >= self.limit:
raise lab7()
self.cursor += 1
except lab8: pass
# gopast, line 122
try:
while True:
try:
if not self.out_grouping(PorterStemmer.g_v, 97, 121):
raise lab11()
raise lab10()
except lab11: pass
if self.cursor >= self.limit:
raise lab7()
self.cursor += 1
except lab10: pass
# setmark p1, line 122
self.I_p1 = self.cursor
# gopast, line 123
try:
while True:
try:
if not self.in_grouping(PorterStemmer.g_v, 97, 121):
raise lab13()
raise lab12()
except lab13: pass
if self.cursor >= self.limit:
raise lab7()
self.cursor += 1
except lab12: pass
# gopast, line 123
try:
while True:
try:
if not self.out_grouping(PorterStemmer.g_v, 97, 121):
raise lab15()
raise lab14()
except lab15: pass
if self.cursor >= self.limit:
raise lab7()
self.cursor += 1
except lab14: pass
# setmark p2, line 123
self.I_p2 = self.cursor
except lab7: pass
self.cursor = v_5
# backwards, line 126
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 126
# do, line 127
v_10 = self.limit - self.cursor
try:
# call Step_1a, line 127
if not self.r_Step_1a():
raise lab16()
except lab16: pass
self.cursor = self.limit - v_10
# do, line 128
v_11 = self.limit - self.cursor
try:
# call Step_1b, line 128
if not self.r_Step_1b():
raise lab17()
except lab17: pass
self.cursor = self.limit - v_11
# do, line 129
v_12 = self.limit - self.cursor
try:
# call Step_1c, line 129
if not self.r_Step_1c():
raise lab18()
except lab18: pass
self.cursor = self.limit - v_12
# do, line 130
v_13 = self.limit - self.cursor
try:
# call Step_2, line 130
if not self.r_Step_2():
raise lab19()
except lab19: pass
self.cursor = self.limit - v_13
# do, line 131
v_14 = self.limit - self.cursor
try:
# call Step_3, line 131
if not self.r_Step_3():
raise lab20()
except lab20: pass
self.cursor = self.limit - v_14
# do, line 132
v_15 = self.limit - self.cursor
try:
# call Step_4, line 132
if not self.r_Step_4():
raise lab21()
except lab21: pass
self.cursor = self.limit - v_15
# do, line 133
v_16 = self.limit - self.cursor
try:
# call Step_5a, line 133
if not self.r_Step_5a():
raise lab22()
except lab22: pass
self.cursor = self.limit - v_16
# do, line 134
v_17 = self.limit - self.cursor
try:
# call Step_5b, line 134
if not self.r_Step_5b():
raise lab23()
except lab23: pass
self.cursor = self.limit - v_17
self.cursor = self.limit_backward
# do, line 137
v_18 = self.cursor
try:
# (, line 137
# Boolean test Y_found, line 137
if not self.B_Y_found:
raise lab24()
# repeat, line 137
try:
while True:
try:
v_19 = self.cursor
try:
# (, line 137
# goto, line 137
try:
while True:
v_20 = self.cursor
try:
# (, line 137
# [, line 137
self.bra = self.cursor
# literal, line 137
if not self.eq_s(1, u"Y"):
raise lab29()
# ], line 137
self.ket = self.cursor
self.cursor = v_20
raise lab28()
except lab29: pass
self.cursor = v_20
if self.cursor >= self.limit:
raise lab27()
self.cursor += 1
except lab28: pass
# <-, line 137
if not self.slice_from(u"y"):
return False
raise lab26()
except lab27: pass
self.cursor = v_19
raise lab25()
except lab26: pass
except lab25: pass
except lab24: pass
self.cursor = v_18
return True
def equals(self, o):
return isinstance(o, PorterStemmer)
def hashCode(self):
return hash("PorterStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
class lab10(BaseException): pass
class lab11(BaseException): pass
class lab12(BaseException): pass
class lab13(BaseException): pass
class lab14(BaseException): pass
class lab15(BaseException): pass
class lab16(BaseException): pass
class lab17(BaseException): pass
class lab18(BaseException): pass
class lab19(BaseException): pass
class lab20(BaseException): pass
class lab21(BaseException): pass
class lab22(BaseException): pass
class lab23(BaseException): pass
class lab24(BaseException): pass
class lab25(BaseException): pass
class lab26(BaseException): pass
class lab27(BaseException): pass
class lab28(BaseException): pass
class lab29(BaseException): pass
| 24,625 | Python | .py | 753 | 18.863214 | 92 | 0.420485 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,972 | french_stemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/french_stemmer.py | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class FrenchStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"col", -1, -1),
Among(u"par", -1, -1),
Among(u"tap", -1, -1)
]
a_1 = [
Among(u"", -1, 4),
Among(u"I", 0, 1),
Among(u"U", 0, 2),
Among(u"Y", 0, 3)
]
a_2 = [
Among(u"iqU", -1, 3),
Among(u"abl", -1, 3),
Among(u"I\u00E8r", -1, 4),
Among(u"i\u00E8r", -1, 4),
Among(u"eus", -1, 2),
Among(u"iv", -1, 1)
]
a_3 = [
Among(u"ic", -1, 2),
Among(u"abil", -1, 1),
Among(u"iv", -1, 3)
]
a_4 = [
Among(u"iqUe", -1, 1),
Among(u"atrice", -1, 2),
Among(u"ance", -1, 1),
Among(u"ence", -1, 5),
Among(u"logie", -1, 3),
Among(u"able", -1, 1),
Among(u"isme", -1, 1),
Among(u"euse", -1, 11),
Among(u"iste", -1, 1),
Among(u"ive", -1, 8),
Among(u"if", -1, 8),
Among(u"usion", -1, 4),
Among(u"ation", -1, 2),
Among(u"ution", -1, 4),
Among(u"ateur", -1, 2),
Among(u"iqUes", -1, 1),
Among(u"atrices", -1, 2),
Among(u"ances", -1, 1),
Among(u"ences", -1, 5),
Among(u"logies", -1, 3),
Among(u"ables", -1, 1),
Among(u"ismes", -1, 1),
Among(u"euses", -1, 11),
Among(u"istes", -1, 1),
Among(u"ives", -1, 8),
Among(u"ifs", -1, 8),
Among(u"usions", -1, 4),
Among(u"ations", -1, 2),
Among(u"utions", -1, 4),
Among(u"ateurs", -1, 2),
Among(u"ments", -1, 15),
Among(u"ements", 30, 6),
Among(u"issements", 31, 12),
Among(u"it\u00E9s", -1, 7),
Among(u"ment", -1, 15),
Among(u"ement", 34, 6),
Among(u"issement", 35, 12),
Among(u"amment", 34, 13),
Among(u"emment", 34, 14),
Among(u"aux", -1, 10),
Among(u"eaux", 39, 9),
Among(u"eux", -1, 1),
Among(u"it\u00E9", -1, 7)
]
a_5 = [
Among(u"ira", -1, 1),
Among(u"ie", -1, 1),
Among(u"isse", -1, 1),
Among(u"issante", -1, 1),
Among(u"i", -1, 1),
Among(u"irai", 4, 1),
Among(u"ir", -1, 1),
Among(u"iras", -1, 1),
Among(u"ies", -1, 1),
Among(u"\u00EEmes", -1, 1),
Among(u"isses", -1, 1),
Among(u"issantes", -1, 1),
Among(u"\u00EEtes", -1, 1),
Among(u"is", -1, 1),
Among(u"irais", 13, 1),
Among(u"issais", 13, 1),
Among(u"irions", -1, 1),
Among(u"issions", -1, 1),
Among(u"irons", -1, 1),
Among(u"issons", -1, 1),
Among(u"issants", -1, 1),
Among(u"it", -1, 1),
Among(u"irait", 21, 1),
Among(u"issait", 21, 1),
Among(u"issant", -1, 1),
Among(u"iraIent", -1, 1),
Among(u"issaIent", -1, 1),
Among(u"irent", -1, 1),
Among(u"issent", -1, 1),
Among(u"iront", -1, 1),
Among(u"\u00EEt", -1, 1),
Among(u"iriez", -1, 1),
Among(u"issiez", -1, 1),
Among(u"irez", -1, 1),
Among(u"issez", -1, 1)
]
a_6 = [
Among(u"a", -1, 3),
Among(u"era", 0, 2),
Among(u"asse", -1, 3),
Among(u"ante", -1, 3),
Among(u"\u00E9e", -1, 2),
Among(u"ai", -1, 3),
Among(u"erai", 5, 2),
Among(u"er", -1, 2),
Among(u"as", -1, 3),
Among(u"eras", 8, 2),
Among(u"\u00E2mes", -1, 3),
Among(u"asses", -1, 3),
Among(u"antes", -1, 3),
Among(u"\u00E2tes", -1, 3),
Among(u"\u00E9es", -1, 2),
Among(u"ais", -1, 3),
Among(u"erais", 15, 2),
Among(u"ions", -1, 1),
Among(u"erions", 17, 2),
Among(u"assions", 17, 3),
Among(u"erons", -1, 2),
Among(u"ants", -1, 3),
Among(u"\u00E9s", -1, 2),
Among(u"ait", -1, 3),
Among(u"erait", 23, 2),
Among(u"ant", -1, 3),
Among(u"aIent", -1, 3),
Among(u"eraIent", 26, 2),
Among(u"\u00E8rent", -1, 2),
Among(u"assent", -1, 3),
Among(u"eront", -1, 2),
Among(u"\u00E2t", -1, 3),
Among(u"ez", -1, 2),
Among(u"iez", 32, 2),
Among(u"eriez", 33, 2),
Among(u"assiez", 33, 3),
Among(u"erez", 32, 2),
Among(u"\u00E9", -1, 2)
]
a_7 = [
Among(u"e", -1, 3),
Among(u"I\u00E8re", 0, 2),
Among(u"i\u00E8re", 0, 2),
Among(u"ion", -1, 1),
Among(u"Ier", -1, 2),
Among(u"ier", -1, 2),
Among(u"\u00EB", -1, 4)
]
a_8 = [
Among(u"ell", -1, -1),
Among(u"eill", -1, -1),
Among(u"enn", -1, -1),
Among(u"onn", -1, -1),
Among(u"ett", -1, -1)
]
g_v = [17, 65, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 130, 103, 8, 5]
g_keep_with_s = [1, 65, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128]
I_p2 = 0
I_p1 = 0
I_pV = 0
def copy_from(self, other):
self.I_p2 = other.I_p2
self.I_p1 = other.I_p1
self.I_pV = other.I_pV
super.copy_from(other)
def r_prelude(self):
# repeat, line 38
try:
while True:
try:
v_1 = self.cursor
try:
# goto, line 38
try:
while True:
v_2 = self.cursor
try:
# (, line 38
# or, line 44
try:
v_3 = self.cursor
try:
# (, line 40
if not self.in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab6()
# [, line 40
self.bra = self.cursor
# or, line 40
try:
v_4 = self.cursor
try:
# (, line 40
# literal, line 40
if not self.eq_s(1, u"u"):
raise lab8()
# ], line 40
self.ket = self.cursor
if not self.in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab8()
# <-, line 40
if not self.slice_from(u"U"):
return False
raise lab7()
except lab8: pass
self.cursor = v_4
try:
# (, line 41
# literal, line 41
if not self.eq_s(1, u"i"):
raise lab9()
# ], line 41
self.ket = self.cursor
if not self.in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab9()
# <-, line 41
if not self.slice_from(u"I"):
return False
raise lab7()
except lab9: pass
self.cursor = v_4
# (, line 42
# literal, line 42
if not self.eq_s(1, u"y"):
raise lab6()
# ], line 42
self.ket = self.cursor
# <-, line 42
if not self.slice_from(u"Y"):
return False
except lab7: pass
raise lab5()
except lab6: pass
self.cursor = v_3
try:
# (, line 45
# [, line 45
self.bra = self.cursor
# literal, line 45
if not self.eq_s(1, u"y"):
raise lab10()
# ], line 45
self.ket = self.cursor
if not self.in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab10()
# <-, line 45
if not self.slice_from(u"Y"):
return False
raise lab5()
except lab10: pass
self.cursor = v_3
# (, line 47
# literal, line 47
if not self.eq_s(1, u"q"):
raise lab4()
# [, line 47
self.bra = self.cursor
# literal, line 47
if not self.eq_s(1, u"u"):
raise lab4()
# ], line 47
self.ket = self.cursor
# <-, line 47
if not self.slice_from(u"U"):
return False
except lab5: pass
self.cursor = v_2
raise lab3()
except lab4: pass
self.cursor = v_2
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
except lab3: pass
raise lab1()
except lab2: pass
self.cursor = v_1
raise lab0()
except lab1: pass
except lab0: pass
return True
def r_mark_regions(self):
# (, line 50
self.I_pV = self.limit;
self.I_p1 = self.limit;
self.I_p2 = self.limit;
# do, line 56
v_1 = self.cursor
try:
# (, line 56
# or, line 58
try:
v_2 = self.cursor
try:
# (, line 57
if not self.in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab2()
if not self.in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab2()
# next, line 57
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
raise lab1()
except lab2: pass
self.cursor = v_2
try:
# among, line 59
if self.find_among(FrenchStemmer.a_0, 3) == 0:
raise lab3()
raise lab1()
except lab3: pass
self.cursor = v_2
# (, line 66
# next, line 66
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
# gopast, line 66
try:
while True:
try:
if not self.in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab5()
raise lab4()
except lab5: pass
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
except lab4: pass
except lab1: pass
# setmark pV, line 67
self.I_pV = self.cursor
except lab0: pass
self.cursor = v_1
# do, line 69
v_4 = self.cursor
try:
# (, line 69
# gopast, line 70
try:
while True:
try:
if not self.in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab8()
raise lab7()
except lab8: pass
if self.cursor >= self.limit:
raise lab6()
self.cursor += 1
except lab7: pass
# gopast, line 70
try:
while True:
try:
if not self.out_grouping(FrenchStemmer.g_v, 97, 251):
raise lab10()
raise lab9()
except lab10: pass
if self.cursor >= self.limit:
raise lab6()
self.cursor += 1
except lab9: pass
# setmark p1, line 70
self.I_p1 = self.cursor
# gopast, line 71
try:
while True:
try:
if not self.in_grouping(FrenchStemmer.g_v, 97, 251):
raise lab12()
raise lab11()
except lab12: pass
if self.cursor >= self.limit:
raise lab6()
self.cursor += 1
except lab11: pass
# gopast, line 71
try:
while True:
try:
if not self.out_grouping(FrenchStemmer.g_v, 97, 251):
raise lab14()
raise lab13()
except lab14: pass
if self.cursor >= self.limit:
raise lab6()
self.cursor += 1
except lab13: pass
# setmark p2, line 71
self.I_p2 = self.cursor
except lab6: pass
self.cursor = v_4
return True
def r_postlude(self):
# repeat, line 75
try:
while True:
try:
v_1 = self.cursor
try:
# (, line 75
# [, line 77
self.bra = self.cursor
# substring, line 77
among_var = self.find_among(FrenchStemmer.a_1, 4)
if among_var == 0:
raise lab2()
# ], line 77
self.ket = self.cursor
if among_var == 0:
raise lab2()
elif among_var == 1:
# (, line 78
# <-, line 78
if not self.slice_from(u"i"):
return False
elif among_var == 2:
# (, line 79
# <-, line 79
if not self.slice_from(u"u"):
return False
elif among_var == 3:
# (, line 80
# <-, line 80
if not self.slice_from(u"y"):
return False
elif among_var == 4:
# (, line 81
# next, line 81
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
raise lab1()
except lab2: pass
self.cursor = v_1
raise lab0()
except lab1: pass
except lab0: pass
return True
def r_RV(self):
if not self.I_pV <= self.cursor:
return False
return True
def r_R1(self):
if not self.I_p1 <= self.cursor:
return False
return True
def r_R2(self):
if not self.I_p2 <= self.cursor:
return False
return True
def r_standard_suffix(self):
# (, line 91
# [, line 92
self.ket = self.cursor
# substring, line 92
among_var = self.find_among_b(FrenchStemmer.a_4, 43)
if among_var == 0:
return False
# ], line 92
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 96
# call R2, line 96
if not self.r_R2():
return False
# delete, line 96
if not self.slice_del():
return False
elif among_var == 2:
# (, line 99
# call R2, line 99
if not self.r_R2():
return False
# delete, line 99
if not self.slice_del():
return False
# try, line 100
v_1 = self.limit - self.cursor
try:
# (, line 100
# [, line 100
self.ket = self.cursor
# literal, line 100
if not self.eq_s_b(2, u"ic"):
self.cursor = self.limit - v_1
raise lab0()
# ], line 100
self.bra = self.cursor
# or, line 100
try:
v_2 = self.limit - self.cursor
try:
# (, line 100
# call R2, line 100
if not self.r_R2():
raise lab2()
# delete, line 100
if not self.slice_del():
return False
raise lab1()
except lab2: pass
self.cursor = self.limit - v_2
# <-, line 100
if not self.slice_from(u"iqU"):
return False
except lab1: pass
except lab0: pass
elif among_var == 3:
# (, line 104
# call R2, line 104
if not self.r_R2():
return False
# <-, line 104
if not self.slice_from(u"log"):
return False
elif among_var == 4:
# (, line 107
# call R2, line 107
if not self.r_R2():
return False
# <-, line 107
if not self.slice_from(u"u"):
return False
elif among_var == 5:
# (, line 110
# call R2, line 110
if not self.r_R2():
return False
# <-, line 110
if not self.slice_from(u"ent"):
return False
elif among_var == 6:
# (, line 113
# call RV, line 114
if not self.r_RV():
return False
# delete, line 114
if not self.slice_del():
return False
# try, line 115
v_3 = self.limit - self.cursor
try:
# (, line 115
# [, line 116
self.ket = self.cursor
# substring, line 116
among_var = self.find_among_b(FrenchStemmer.a_2, 6)
if among_var == 0:
self.cursor = self.limit - v_3
raise lab3()
# ], line 116
self.bra = self.cursor
if among_var == 0:
self.cursor = self.limit - v_3
raise lab3()
elif among_var == 1:
# (, line 117
# call R2, line 117
if not self.r_R2():
self.cursor = self.limit - v_3
raise lab3()
# delete, line 117
if not self.slice_del():
return False
# [, line 117
self.ket = self.cursor
# literal, line 117
if not self.eq_s_b(2, u"at"):
self.cursor = self.limit - v_3
raise lab3()
# ], line 117
self.bra = self.cursor
# call R2, line 117
if not self.r_R2():
self.cursor = self.limit - v_3
raise lab3()
# delete, line 117
if not self.slice_del():
return False
elif among_var == 2:
# (, line 118
# or, line 118
try:
v_4 = self.limit - self.cursor
try:
# (, line 118
# call R2, line 118
if not self.r_R2():
raise lab5()
# delete, line 118
if not self.slice_del():
return False
raise lab4()
except lab5: pass
self.cursor = self.limit - v_4
# (, line 118
# call R1, line 118
if not self.r_R1():
self.cursor = self.limit - v_3
raise lab3()
# <-, line 118
if not self.slice_from(u"eux"):
return False
except lab4: pass
elif among_var == 3:
# (, line 120
# call R2, line 120
if not self.r_R2():
self.cursor = self.limit - v_3
raise lab3()
# delete, line 120
if not self.slice_del():
return False
elif among_var == 4:
# (, line 122
# call RV, line 122
if not self.r_RV():
self.cursor = self.limit - v_3
raise lab3()
# <-, line 122
if not self.slice_from(u"i"):
return False
except lab3: pass
elif among_var == 7:
# (, line 128
# call R2, line 129
if not self.r_R2():
return False
# delete, line 129
if not self.slice_del():
return False
# try, line 130
v_5 = self.limit - self.cursor
try:
# (, line 130
# [, line 131
self.ket = self.cursor
# substring, line 131
among_var = self.find_among_b(FrenchStemmer.a_3, 3)
if among_var == 0:
self.cursor = self.limit - v_5
raise lab6()
# ], line 131
self.bra = self.cursor
if among_var == 0:
self.cursor = self.limit - v_5
raise lab6()
elif among_var == 1:
# (, line 132
# or, line 132
try:
v_6 = self.limit - self.cursor
try:
# (, line 132
# call R2, line 132
if not self.r_R2():
raise lab8()
# delete, line 132
if not self.slice_del():
return False
raise lab7()
except lab8: pass
self.cursor = self.limit - v_6
# <-, line 132
if not self.slice_from(u"abl"):
return False
except lab7: pass
elif among_var == 2:
# (, line 133
# or, line 133
try:
v_7 = self.limit - self.cursor
try:
# (, line 133
# call R2, line 133
if not self.r_R2():
raise lab10()
# delete, line 133
if not self.slice_del():
return False
raise lab9()
except lab10: pass
self.cursor = self.limit - v_7
# <-, line 133
if not self.slice_from(u"iqU"):
return False
except lab9: pass
elif among_var == 3:
# (, line 134
# call R2, line 134
if not self.r_R2():
self.cursor = self.limit - v_5
raise lab6()
# delete, line 134
if not self.slice_del():
return False
except lab6: pass
elif among_var == 8:
# (, line 140
# call R2, line 141
if not self.r_R2():
return False
# delete, line 141
if not self.slice_del():
return False
# try, line 142
v_8 = self.limit - self.cursor
try:
# (, line 142
# [, line 142
self.ket = self.cursor
# literal, line 142
if not self.eq_s_b(2, u"at"):
self.cursor = self.limit - v_8
raise lab11()
# ], line 142
self.bra = self.cursor
# call R2, line 142
if not self.r_R2():
self.cursor = self.limit - v_8
raise lab11()
# delete, line 142
if not self.slice_del():
return False
# [, line 142
self.ket = self.cursor
# literal, line 142
if not self.eq_s_b(2, u"ic"):
self.cursor = self.limit - v_8
raise lab11()
# ], line 142
self.bra = self.cursor
# or, line 142
try:
v_9 = self.limit - self.cursor
try:
# (, line 142
# call R2, line 142
if not self.r_R2():
raise lab13()
# delete, line 142
if not self.slice_del():
return False
raise lab12()
except lab13: pass
self.cursor = self.limit - v_9
# <-, line 142
if not self.slice_from(u"iqU"):
return False
except lab12: pass
except lab11: pass
elif among_var == 9:
# (, line 144
# <-, line 144
if not self.slice_from(u"eau"):
return False
elif among_var == 10:
# (, line 145
# call R1, line 145
if not self.r_R1():
return False
# <-, line 145
if not self.slice_from(u"al"):
return False
elif among_var == 11:
# (, line 147
# or, line 147
try:
v_10 = self.limit - self.cursor
try:
# (, line 147
# call R2, line 147
if not self.r_R2():
raise lab15()
# delete, line 147
if not self.slice_del():
return False
raise lab14()
except lab15: pass
self.cursor = self.limit - v_10
# (, line 147
# call R1, line 147
if not self.r_R1():
return False
# <-, line 147
if not self.slice_from(u"eux"):
return False
except lab14: pass
elif among_var == 12:
# (, line 150
# call R1, line 150
if not self.r_R1():
return False
if not self.out_grouping_b(FrenchStemmer.g_v, 97, 251):
return False
# delete, line 150
if not self.slice_del():
return False
elif among_var == 13:
# (, line 155
# call RV, line 155
if not self.r_RV():
return False
# fail, line 155
# (, line 155
# <-, line 155
if not self.slice_from(u"ant"):
return False
return False
elif among_var == 14:
# (, line 156
# call RV, line 156
if not self.r_RV():
return False
# fail, line 156
# (, line 156
# <-, line 156
if not self.slice_from(u"ent"):
return False
return False
elif among_var == 15:
# (, line 158
# test, line 158
v_11 = self.limit - self.cursor
# (, line 158
if not self.in_grouping_b(FrenchStemmer.g_v, 97, 251):
return False
# call RV, line 158
if not self.r_RV():
return False
self.cursor = self.limit - v_11
# fail, line 158
# (, line 158
# delete, line 158
if not self.slice_del():
return False
return False
return True
def r_i_verb_suffix(self):
# setlimit, line 163
v_1 = self.limit - self.cursor
# tomark, line 163
if self.cursor < self.I_pV:
return False
self.cursor = self.I_pV
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 163
# [, line 164
self.ket = self.cursor
# substring, line 164
among_var = self.find_among_b(FrenchStemmer.a_5, 35)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 164
self.bra = self.cursor
if among_var == 0:
self.limit_backward = v_2
return False
elif among_var == 1:
# (, line 170
if not self.out_grouping_b(FrenchStemmer.g_v, 97, 251):
self.limit_backward = v_2
return False
# delete, line 170
if not self.slice_del():
return False
self.limit_backward = v_2
return True
def r_verb_suffix(self):
# setlimit, line 174
v_1 = self.limit - self.cursor
# tomark, line 174
if self.cursor < self.I_pV:
return False
self.cursor = self.I_pV
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 174
# [, line 175
self.ket = self.cursor
# substring, line 175
among_var = self.find_among_b(FrenchStemmer.a_6, 38)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 175
self.bra = self.cursor
if among_var == 0:
self.limit_backward = v_2
return False
elif among_var == 1:
# (, line 177
# call R2, line 177
if not self.r_R2():
self.limit_backward = v_2
return False
# delete, line 177
if not self.slice_del():
return False
elif among_var == 2:
# (, line 185
# delete, line 185
if not self.slice_del():
return False
elif among_var == 3:
# (, line 190
# delete, line 190
if not self.slice_del():
return False
# try, line 191
v_3 = self.limit - self.cursor
try:
# (, line 191
# [, line 191
self.ket = self.cursor
# literal, line 191
if not self.eq_s_b(1, u"e"):
self.cursor = self.limit - v_3
raise lab0()
# ], line 191
self.bra = self.cursor
# delete, line 191
if not self.slice_del():
return False
except lab0: pass
self.limit_backward = v_2
return True
def r_residual_suffix(self):
# (, line 198
# try, line 199
v_1 = self.limit - self.cursor
try:
# (, line 199
# [, line 199
self.ket = self.cursor
# literal, line 199
if not self.eq_s_b(1, u"s"):
self.cursor = self.limit - v_1
raise lab0()
# ], line 199
self.bra = self.cursor
# test, line 199
v_2 = self.limit - self.cursor
if not self.out_grouping_b(FrenchStemmer.g_keep_with_s, 97, 232):
self.cursor = self.limit - v_1
raise lab0()
self.cursor = self.limit - v_2
# delete, line 199
if not self.slice_del():
return False
except lab0: pass
# setlimit, line 200
v_3 = self.limit - self.cursor
# tomark, line 200
if self.cursor < self.I_pV:
return False
self.cursor = self.I_pV
v_4 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_3
# (, line 200
# [, line 201
self.ket = self.cursor
# substring, line 201
among_var = self.find_among_b(FrenchStemmer.a_7, 7)
if among_var == 0:
self.limit_backward = v_4
return False
# ], line 201
self.bra = self.cursor
if among_var == 0:
self.limit_backward = v_4
return False
elif among_var == 1:
# (, line 202
# call R2, line 202
if not self.r_R2():
self.limit_backward = v_4
return False
# or, line 202
try:
v_5 = self.limit - self.cursor
try:
# literal, line 202
if not self.eq_s_b(1, u"s"):
raise lab2()
raise lab1()
except lab2: pass
self.cursor = self.limit - v_5
# literal, line 202
if not self.eq_s_b(1, u"t"):
self.limit_backward = v_4
return False
except lab1: pass
# delete, line 202
if not self.slice_del():
return False
elif among_var == 2:
# (, line 204
# <-, line 204
if not self.slice_from(u"i"):
return False
elif among_var == 3:
# (, line 205
# delete, line 205
if not self.slice_del():
return False
elif among_var == 4:
# (, line 206
# literal, line 206
if not self.eq_s_b(2, u"gu"):
self.limit_backward = v_4
return False
# delete, line 206
if not self.slice_del():
return False
self.limit_backward = v_4
return True
def r_un_double(self):
# (, line 211
# test, line 212
v_1 = self.limit - self.cursor
# among, line 212
if self.find_among_b(FrenchStemmer.a_8, 5) == 0:
return False
self.cursor = self.limit - v_1
# [, line 212
self.ket = self.cursor
# next, line 212
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
# ], line 212
self.bra = self.cursor
# delete, line 212
if not self.slice_del():
return False
return True
def r_un_accent(self):
# (, line 215
# atleast, line 216
v_1 = 1
# atleast, line 216
try:
while True:
try:
try:
if not self.out_grouping_b(FrenchStemmer.g_v, 97, 251):
raise lab2()
v_1 -= 1
raise lab1()
except lab2: pass
raise lab0()
except lab1: pass
except lab0: pass
if v_1 > 0:
return False
# [, line 217
self.ket = self.cursor
# or, line 217
try:
v_3 = self.limit - self.cursor
try:
# literal, line 217
if not self.eq_s_b(1, u"\u00E9"):
raise lab4()
raise lab3()
except lab4: pass
self.cursor = self.limit - v_3
# literal, line 217
if not self.eq_s_b(1, u"\u00E8"):
return False
except lab3: pass
# ], line 217
self.bra = self.cursor
# <-, line 217
if not self.slice_from(u"e"):
return False
return True
def _stem(self):
# (, line 221
# do, line 223
v_1 = self.cursor
try:
# call prelude, line 223
if not self.r_prelude():
raise lab0()
except lab0: pass
self.cursor = v_1
# do, line 224
v_2 = self.cursor
try:
# call mark_regions, line 224
if not self.r_mark_regions():
raise lab1()
except lab1: pass
self.cursor = v_2
# backwards, line 225
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 225
# do, line 227
v_3 = self.limit - self.cursor
try:
# (, line 227
# or, line 237
try:
v_4 = self.limit - self.cursor
try:
# (, line 228
# and, line 233
v_5 = self.limit - self.cursor
# (, line 229
# or, line 229
try:
v_6 = self.limit - self.cursor
try:
# call standard_suffix, line 229
if not self.r_standard_suffix():
raise lab6()
raise lab5()
except lab6: pass
self.cursor = self.limit - v_6
try:
# call i_verb_suffix, line 230
if not self.r_i_verb_suffix():
raise lab7()
raise lab5()
except lab7: pass
self.cursor = self.limit - v_6
# call verb_suffix, line 231
if not self.r_verb_suffix():
raise lab4()
except lab5: pass
self.cursor = self.limit - v_5
# try, line 234
v_7 = self.limit - self.cursor
try:
# (, line 234
# [, line 234
self.ket = self.cursor
# or, line 234
try:
v_8 = self.limit - self.cursor
try:
# (, line 234
# literal, line 234
if not self.eq_s_b(1, u"Y"):
raise lab10()
# ], line 234
self.bra = self.cursor
# <-, line 234
if not self.slice_from(u"i"):
return False
raise lab9()
except lab10: pass
self.cursor = self.limit - v_8
# (, line 235
# literal, line 235
if not self.eq_s_b(1, u"\u00E7"):
self.cursor = self.limit - v_7
raise lab8()
# ], line 235
self.bra = self.cursor
# <-, line 235
if not self.slice_from(u"c"):
return False
except lab9: pass
except lab8: pass
raise lab3()
except lab4: pass
self.cursor = self.limit - v_4
# call residual_suffix, line 238
if not self.r_residual_suffix():
raise lab2()
except lab3: pass
except lab2: pass
self.cursor = self.limit - v_3
# do, line 243
v_9 = self.limit - self.cursor
try:
# call un_double, line 243
if not self.r_un_double():
raise lab11()
except lab11: pass
self.cursor = self.limit - v_9
# do, line 244
v_10 = self.limit - self.cursor
try:
# call un_accent, line 244
if not self.r_un_accent():
raise lab12()
except lab12: pass
self.cursor = self.limit - v_10
self.cursor = self.limit_backward
# do, line 246
v_11 = self.cursor
try:
# call postlude, line 246
if not self.r_postlude():
raise lab13()
except lab13: pass
self.cursor = v_11
return True
def equals(self, o):
return isinstance(o, FrenchStemmer)
def hashCode(self):
return hash("FrenchStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
class lab10(BaseException): pass
class lab11(BaseException): pass
class lab12(BaseException): pass
class lab13(BaseException): pass
class lab14(BaseException): pass
class lab15(BaseException): pass
| 45,794 | Python | .py | 1,247 | 18.844427 | 104 | 0.366365 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,973 | dutch_stemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/dutch_stemmer.py | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class DutchStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"", -1, 6),
Among(u"\u00E1", 0, 1),
Among(u"\u00E4", 0, 1),
Among(u"\u00E9", 0, 2),
Among(u"\u00EB", 0, 2),
Among(u"\u00ED", 0, 3),
Among(u"\u00EF", 0, 3),
Among(u"\u00F3", 0, 4),
Among(u"\u00F6", 0, 4),
Among(u"\u00FA", 0, 5),
Among(u"\u00FC", 0, 5)
]
a_1 = [
Among(u"", -1, 3),
Among(u"I", 0, 2),
Among(u"Y", 0, 1)
]
a_2 = [
Among(u"dd", -1, -1),
Among(u"kk", -1, -1),
Among(u"tt", -1, -1)
]
a_3 = [
Among(u"ene", -1, 2),
Among(u"se", -1, 3),
Among(u"en", -1, 2),
Among(u"heden", 2, 1),
Among(u"s", -1, 3)
]
a_4 = [
Among(u"end", -1, 1),
Among(u"ig", -1, 2),
Among(u"ing", -1, 1),
Among(u"lijk", -1, 3),
Among(u"baar", -1, 4),
Among(u"bar", -1, 5)
]
a_5 = [
Among(u"aa", -1, -1),
Among(u"ee", -1, -1),
Among(u"oo", -1, -1),
Among(u"uu", -1, -1)
]
g_v = [17, 65, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128]
g_v_I = [1, 0, 0, 17, 65, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128]
g_v_j = [17, 67, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128]
I_p2 = 0
I_p1 = 0
B_e_found = False
def copy_from(self, other):
self.I_p2 = other.I_p2
self.I_p1 = other.I_p1
self.B_e_found = other.B_e_found
super.copy_from(other)
def r_prelude(self):
# (, line 41
# test, line 42
v_1 = self.cursor
# repeat, line 42
try:
while True:
try:
v_2 = self.cursor
try:
# (, line 42
# [, line 43
self.bra = self.cursor
# substring, line 43
among_var = self.find_among(DutchStemmer.a_0, 11)
if among_var == 0:
raise lab2()
# ], line 43
self.ket = self.cursor
if among_var == 0:
raise lab2()
elif among_var == 1:
# (, line 45
# <-, line 45
if not self.slice_from(u"a"):
return False
elif among_var == 2:
# (, line 47
# <-, line 47
if not self.slice_from(u"e"):
return False
elif among_var == 3:
# (, line 49
# <-, line 49
if not self.slice_from(u"i"):
return False
elif among_var == 4:
# (, line 51
# <-, line 51
if not self.slice_from(u"o"):
return False
elif among_var == 5:
# (, line 53
# <-, line 53
if not self.slice_from(u"u"):
return False
elif among_var == 6:
# (, line 54
# next, line 54
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
raise lab1()
except lab2: pass
self.cursor = v_2
raise lab0()
except lab1: pass
except lab0: pass
self.cursor = v_1
# try, line 57
v_3 = self.cursor
try:
# (, line 57
# [, line 57
self.bra = self.cursor
# literal, line 57
if not self.eq_s(1, u"y"):
self.cursor = v_3
raise lab3()
# ], line 57
self.ket = self.cursor
# <-, line 57
if not self.slice_from(u"Y"):
return False
except lab3: pass
# repeat, line 58
try:
while True:
try:
v_4 = self.cursor
try:
# goto, line 58
try:
while True:
v_5 = self.cursor
try:
# (, line 58
if not self.in_grouping(DutchStemmer.g_v, 97, 232):
raise lab8()
# [, line 59
self.bra = self.cursor
# or, line 59
try:
v_6 = self.cursor
try:
# (, line 59
# literal, line 59
if not self.eq_s(1, u"i"):
raise lab10()
# ], line 59
self.ket = self.cursor
if not self.in_grouping(DutchStemmer.g_v, 97, 232):
raise lab10()
# <-, line 59
if not self.slice_from(u"I"):
return False
raise lab9()
except lab10: pass
self.cursor = v_6
# (, line 60
# literal, line 60
if not self.eq_s(1, u"y"):
raise lab8()
# ], line 60
self.ket = self.cursor
# <-, line 60
if not self.slice_from(u"Y"):
return False
except lab9: pass
self.cursor = v_5
raise lab7()
except lab8: pass
self.cursor = v_5
if self.cursor >= self.limit:
raise lab6()
self.cursor += 1
except lab7: pass
raise lab5()
except lab6: pass
self.cursor = v_4
raise lab4()
except lab5: pass
except lab4: pass
return True
def r_mark_regions(self):
# (, line 64
self.I_p1 = self.limit;
self.I_p2 = self.limit;
# gopast, line 69
try:
while True:
try:
if not self.in_grouping(DutchStemmer.g_v, 97, 232):
raise lab1()
raise lab0()
except lab1: pass
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab0: pass
# gopast, line 69
try:
while True:
try:
if not self.out_grouping(DutchStemmer.g_v, 97, 232):
raise lab3()
raise lab2()
except lab3: pass
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab2: pass
# setmark p1, line 69
self.I_p1 = self.cursor
# try, line 70
try:
# (, line 70
if not self.I_p1 < 3:
raise lab4()
self.I_p1 = 3;
except lab4: pass
# gopast, line 71
try:
while True:
try:
if not self.in_grouping(DutchStemmer.g_v, 97, 232):
raise lab6()
raise lab5()
except lab6: pass
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab5: pass
# gopast, line 71
try:
while True:
try:
if not self.out_grouping(DutchStemmer.g_v, 97, 232):
raise lab8()
raise lab7()
except lab8: pass
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab7: pass
# setmark p2, line 71
self.I_p2 = self.cursor
return True
def r_postlude(self):
# repeat, line 75
try:
while True:
try:
v_1 = self.cursor
try:
# (, line 75
# [, line 77
self.bra = self.cursor
# substring, line 77
among_var = self.find_among(DutchStemmer.a_1, 3)
if among_var == 0:
raise lab2()
# ], line 77
self.ket = self.cursor
if among_var == 0:
raise lab2()
elif among_var == 1:
# (, line 78
# <-, line 78
if not self.slice_from(u"y"):
return False
elif among_var == 2:
# (, line 79
# <-, line 79
if not self.slice_from(u"i"):
return False
elif among_var == 3:
# (, line 80
# next, line 80
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
raise lab1()
except lab2: pass
self.cursor = v_1
raise lab0()
except lab1: pass
except lab0: pass
return True
def r_R1(self):
if not self.I_p1 <= self.cursor:
return False
return True
def r_R2(self):
if not self.I_p2 <= self.cursor:
return False
return True
def r_undouble(self):
# (, line 90
# test, line 91
v_1 = self.limit - self.cursor
# among, line 91
if self.find_among_b(DutchStemmer.a_2, 3) == 0:
return False
self.cursor = self.limit - v_1
# [, line 91
self.ket = self.cursor
# next, line 91
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
# ], line 91
self.bra = self.cursor
# delete, line 91
if not self.slice_del():
return False
return True
def r_e_ending(self):
# (, line 94
# unset e_found, line 95
self.B_e_found = False
# [, line 96
self.ket = self.cursor
# literal, line 96
if not self.eq_s_b(1, u"e"):
return False
# ], line 96
self.bra = self.cursor
# call R1, line 96
if not self.r_R1():
return False
# test, line 96
v_1 = self.limit - self.cursor
if not self.out_grouping_b(DutchStemmer.g_v, 97, 232):
return False
self.cursor = self.limit - v_1
# delete, line 96
if not self.slice_del():
return False
# set e_found, line 97
self.B_e_found = True
# call undouble, line 98
if not self.r_undouble():
return False
return True
def r_en_ending(self):
# (, line 101
# call R1, line 102
if not self.r_R1():
return False
# and, line 102
v_1 = self.limit - self.cursor
if not self.out_grouping_b(DutchStemmer.g_v, 97, 232):
return False
self.cursor = self.limit - v_1
# not, line 102
v_2 = self.limit - self.cursor
try:
# literal, line 102
if not self.eq_s_b(3, u"gem"):
raise lab0()
return False
except lab0: pass
self.cursor = self.limit - v_2
# delete, line 102
if not self.slice_del():
return False
# call undouble, line 103
if not self.r_undouble():
return False
return True
def r_standard_suffix(self):
# (, line 106
# do, line 107
v_1 = self.limit - self.cursor
try:
# (, line 107
# [, line 108
self.ket = self.cursor
# substring, line 108
among_var = self.find_among_b(DutchStemmer.a_3, 5)
if among_var == 0:
raise lab0()
# ], line 108
self.bra = self.cursor
if among_var == 0:
raise lab0()
elif among_var == 1:
# (, line 110
# call R1, line 110
if not self.r_R1():
raise lab0()
# <-, line 110
if not self.slice_from(u"heid"):
return False
elif among_var == 2:
# (, line 113
# call en_ending, line 113
if not self.r_en_ending():
raise lab0()
elif among_var == 3:
# (, line 116
# call R1, line 116
if not self.r_R1():
raise lab0()
if not self.out_grouping_b(DutchStemmer.g_v_j, 97, 232):
raise lab0()
# delete, line 116
if not self.slice_del():
return False
except lab0: pass
self.cursor = self.limit - v_1
# do, line 120
v_2 = self.limit - self.cursor
try:
# call e_ending, line 120
if not self.r_e_ending():
raise lab1()
except lab1: pass
self.cursor = self.limit - v_2
# do, line 122
v_3 = self.limit - self.cursor
try:
# (, line 122
# [, line 122
self.ket = self.cursor
# literal, line 122
if not self.eq_s_b(4, u"heid"):
raise lab2()
# ], line 122
self.bra = self.cursor
# call R2, line 122
if not self.r_R2():
raise lab2()
# not, line 122
v_4 = self.limit - self.cursor
try:
# literal, line 122
if not self.eq_s_b(1, u"c"):
raise lab3()
raise lab2()
except lab3: pass
self.cursor = self.limit - v_4
# delete, line 122
if not self.slice_del():
return False
# [, line 123
self.ket = self.cursor
# literal, line 123
if not self.eq_s_b(2, u"en"):
raise lab2()
# ], line 123
self.bra = self.cursor
# call en_ending, line 123
if not self.r_en_ending():
raise lab2()
except lab2: pass
self.cursor = self.limit - v_3
# do, line 126
v_5 = self.limit - self.cursor
try:
# (, line 126
# [, line 127
self.ket = self.cursor
# substring, line 127
among_var = self.find_among_b(DutchStemmer.a_4, 6)
if among_var == 0:
raise lab4()
# ], line 127
self.bra = self.cursor
if among_var == 0:
raise lab4()
elif among_var == 1:
# (, line 129
# call R2, line 129
if not self.r_R2():
raise lab4()
# delete, line 129
if not self.slice_del():
return False
# or, line 130
try:
v_6 = self.limit - self.cursor
try:
# (, line 130
# [, line 130
self.ket = self.cursor
# literal, line 130
if not self.eq_s_b(2, u"ig"):
raise lab6()
# ], line 130
self.bra = self.cursor
# call R2, line 130
if not self.r_R2():
raise lab6()
# not, line 130
v_7 = self.limit - self.cursor
try:
# literal, line 130
if not self.eq_s_b(1, u"e"):
raise lab7()
raise lab6()
except lab7: pass
self.cursor = self.limit - v_7
# delete, line 130
if not self.slice_del():
return False
raise lab5()
except lab6: pass
self.cursor = self.limit - v_6
# call undouble, line 130
if not self.r_undouble():
raise lab4()
except lab5: pass
elif among_var == 2:
# (, line 133
# call R2, line 133
if not self.r_R2():
raise lab4()
# not, line 133
v_8 = self.limit - self.cursor
try:
# literal, line 133
if not self.eq_s_b(1, u"e"):
raise lab8()
raise lab4()
except lab8: pass
self.cursor = self.limit - v_8
# delete, line 133
if not self.slice_del():
return False
elif among_var == 3:
# (, line 136
# call R2, line 136
if not self.r_R2():
raise lab4()
# delete, line 136
if not self.slice_del():
return False
# call e_ending, line 136
if not self.r_e_ending():
raise lab4()
elif among_var == 4:
# (, line 139
# call R2, line 139
if not self.r_R2():
raise lab4()
# delete, line 139
if not self.slice_del():
return False
elif among_var == 5:
# (, line 142
# call R2, line 142
if not self.r_R2():
raise lab4()
# Boolean test e_found, line 142
if not self.B_e_found:
raise lab4()
# delete, line 142
if not self.slice_del():
return False
except lab4: pass
self.cursor = self.limit - v_5
# do, line 146
v_9 = self.limit - self.cursor
try:
# (, line 146
if not self.out_grouping_b(DutchStemmer.g_v_I, 73, 232):
raise lab9()
# test, line 148
v_10 = self.limit - self.cursor
# (, line 148
# among, line 149
if self.find_among_b(DutchStemmer.a_5, 4) == 0:
raise lab9()
if not self.out_grouping_b(DutchStemmer.g_v, 97, 232):
raise lab9()
self.cursor = self.limit - v_10
# [, line 152
self.ket = self.cursor
# next, line 152
if self.cursor <= self.limit_backward:
raise lab9()
self.cursor -= 1
# ], line 152
self.bra = self.cursor
# delete, line 152
if not self.slice_del():
return False
except lab9: pass
self.cursor = self.limit - v_9
return True
def _stem(self):
# (, line 157
# do, line 159
v_1 = self.cursor
try:
# call prelude, line 159
if not self.r_prelude():
raise lab0()
except lab0: pass
self.cursor = v_1
# do, line 160
v_2 = self.cursor
try:
# call mark_regions, line 160
if not self.r_mark_regions():
raise lab1()
except lab1: pass
self.cursor = v_2
# backwards, line 161
self.limit_backward = self.cursor
self.cursor = self.limit
# do, line 162
v_3 = self.limit - self.cursor
try:
# call standard_suffix, line 162
if not self.r_standard_suffix():
raise lab2()
except lab2: pass
self.cursor = self.limit - v_3
self.cursor = self.limit_backward
# do, line 163
v_4 = self.cursor
try:
# call postlude, line 163
if not self.r_postlude():
raise lab3()
except lab3: pass
self.cursor = v_4
return True
def equals(self, o):
return isinstance(o, DutchStemmer)
def hashCode(self):
return hash("DutchStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
class lab10(BaseException): pass
| 23,184 | Python | .py | 660 | 18.619697 | 95 | 0.383924 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,974 | hungarian_stemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/hungarian_stemmer.py | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class HungarianStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"cs", -1, -1),
Among(u"dzs", -1, -1),
Among(u"gy", -1, -1),
Among(u"ly", -1, -1),
Among(u"ny", -1, -1),
Among(u"sz", -1, -1),
Among(u"ty", -1, -1),
Among(u"zs", -1, -1)
]
a_1 = [
Among(u"\u00E1", -1, 1),
Among(u"\u00E9", -1, 2)
]
a_2 = [
Among(u"bb", -1, -1),
Among(u"cc", -1, -1),
Among(u"dd", -1, -1),
Among(u"ff", -1, -1),
Among(u"gg", -1, -1),
Among(u"jj", -1, -1),
Among(u"kk", -1, -1),
Among(u"ll", -1, -1),
Among(u"mm", -1, -1),
Among(u"nn", -1, -1),
Among(u"pp", -1, -1),
Among(u"rr", -1, -1),
Among(u"ccs", -1, -1),
Among(u"ss", -1, -1),
Among(u"zzs", -1, -1),
Among(u"tt", -1, -1),
Among(u"vv", -1, -1),
Among(u"ggy", -1, -1),
Among(u"lly", -1, -1),
Among(u"nny", -1, -1),
Among(u"tty", -1, -1),
Among(u"ssz", -1, -1),
Among(u"zz", -1, -1)
]
a_3 = [
Among(u"al", -1, 1),
Among(u"el", -1, 2)
]
a_4 = [
Among(u"ba", -1, -1),
Among(u"ra", -1, -1),
Among(u"be", -1, -1),
Among(u"re", -1, -1),
Among(u"ig", -1, -1),
Among(u"nak", -1, -1),
Among(u"nek", -1, -1),
Among(u"val", -1, -1),
Among(u"vel", -1, -1),
Among(u"ul", -1, -1),
Among(u"n\u00E1l", -1, -1),
Among(u"n\u00E9l", -1, -1),
Among(u"b\u00F3l", -1, -1),
Among(u"r\u00F3l", -1, -1),
Among(u"t\u00F3l", -1, -1),
Among(u"b\u00F5l", -1, -1),
Among(u"r\u00F5l", -1, -1),
Among(u"t\u00F5l", -1, -1),
Among(u"\u00FCl", -1, -1),
Among(u"n", -1, -1),
Among(u"an", 19, -1),
Among(u"ban", 20, -1),
Among(u"en", 19, -1),
Among(u"ben", 22, -1),
Among(u"k\u00E9ppen", 22, -1),
Among(u"on", 19, -1),
Among(u"\u00F6n", 19, -1),
Among(u"k\u00E9pp", -1, -1),
Among(u"kor", -1, -1),
Among(u"t", -1, -1),
Among(u"at", 29, -1),
Among(u"et", 29, -1),
Among(u"k\u00E9nt", 29, -1),
Among(u"ank\u00E9nt", 32, -1),
Among(u"enk\u00E9nt", 32, -1),
Among(u"onk\u00E9nt", 32, -1),
Among(u"ot", 29, -1),
Among(u"\u00E9rt", 29, -1),
Among(u"\u00F6t", 29, -1),
Among(u"hez", -1, -1),
Among(u"hoz", -1, -1),
Among(u"h\u00F6z", -1, -1),
Among(u"v\u00E1", -1, -1),
Among(u"v\u00E9", -1, -1)
]
a_5 = [
Among(u"\u00E1n", -1, 2),
Among(u"\u00E9n", -1, 1),
Among(u"\u00E1nk\u00E9nt", -1, 3)
]
a_6 = [
Among(u"stul", -1, 2),
Among(u"astul", 0, 1),
Among(u"\u00E1stul", 0, 3),
Among(u"st\u00FCl", -1, 2),
Among(u"est\u00FCl", 3, 1),
Among(u"\u00E9st\u00FCl", 3, 4)
]
a_7 = [
Among(u"\u00E1", -1, 1),
Among(u"\u00E9", -1, 2)
]
a_8 = [
Among(u"k", -1, 7),
Among(u"ak", 0, 4),
Among(u"ek", 0, 6),
Among(u"ok", 0, 5),
Among(u"\u00E1k", 0, 1),
Among(u"\u00E9k", 0, 2),
Among(u"\u00F6k", 0, 3)
]
a_9 = [
Among(u"\u00E9i", -1, 7),
Among(u"\u00E1\u00E9i", 0, 6),
Among(u"\u00E9\u00E9i", 0, 5),
Among(u"\u00E9", -1, 9),
Among(u"k\u00E9", 3, 4),
Among(u"ak\u00E9", 4, 1),
Among(u"ek\u00E9", 4, 1),
Among(u"ok\u00E9", 4, 1),
Among(u"\u00E1k\u00E9", 4, 3),
Among(u"\u00E9k\u00E9", 4, 2),
Among(u"\u00F6k\u00E9", 4, 1),
Among(u"\u00E9\u00E9", 3, 8)
]
a_10 = [
Among(u"a", -1, 18),
Among(u"ja", 0, 17),
Among(u"d", -1, 16),
Among(u"ad", 2, 13),
Among(u"ed", 2, 13),
Among(u"od", 2, 13),
Among(u"\u00E1d", 2, 14),
Among(u"\u00E9d", 2, 15),
Among(u"\u00F6d", 2, 13),
Among(u"e", -1, 18),
Among(u"je", 9, 17),
Among(u"nk", -1, 4),
Among(u"unk", 11, 1),
Among(u"\u00E1nk", 11, 2),
Among(u"\u00E9nk", 11, 3),
Among(u"\u00FCnk", 11, 1),
Among(u"uk", -1, 8),
Among(u"juk", 16, 7),
Among(u"\u00E1juk", 17, 5),
Among(u"\u00FCk", -1, 8),
Among(u"j\u00FCk", 19, 7),
Among(u"\u00E9j\u00FCk", 20, 6),
Among(u"m", -1, 12),
Among(u"am", 22, 9),
Among(u"em", 22, 9),
Among(u"om", 22, 9),
Among(u"\u00E1m", 22, 10),
Among(u"\u00E9m", 22, 11),
Among(u"o", -1, 18),
Among(u"\u00E1", -1, 19),
Among(u"\u00E9", -1, 20)
]
a_11 = [
Among(u"id", -1, 10),
Among(u"aid", 0, 9),
Among(u"jaid", 1, 6),
Among(u"eid", 0, 9),
Among(u"jeid", 3, 6),
Among(u"\u00E1id", 0, 7),
Among(u"\u00E9id", 0, 8),
Among(u"i", -1, 15),
Among(u"ai", 7, 14),
Among(u"jai", 8, 11),
Among(u"ei", 7, 14),
Among(u"jei", 10, 11),
Among(u"\u00E1i", 7, 12),
Among(u"\u00E9i", 7, 13),
Among(u"itek", -1, 24),
Among(u"eitek", 14, 21),
Among(u"jeitek", 15, 20),
Among(u"\u00E9itek", 14, 23),
Among(u"ik", -1, 29),
Among(u"aik", 18, 26),
Among(u"jaik", 19, 25),
Among(u"eik", 18, 26),
Among(u"jeik", 21, 25),
Among(u"\u00E1ik", 18, 27),
Among(u"\u00E9ik", 18, 28),
Among(u"ink", -1, 20),
Among(u"aink", 25, 17),
Among(u"jaink", 26, 16),
Among(u"eink", 25, 17),
Among(u"jeink", 28, 16),
Among(u"\u00E1ink", 25, 18),
Among(u"\u00E9ink", 25, 19),
Among(u"aitok", -1, 21),
Among(u"jaitok", 32, 20),
Among(u"\u00E1itok", -1, 22),
Among(u"im", -1, 5),
Among(u"aim", 35, 4),
Among(u"jaim", 36, 1),
Among(u"eim", 35, 4),
Among(u"jeim", 38, 1),
Among(u"\u00E1im", 35, 2),
Among(u"\u00E9im", 35, 3)
]
g_v = [17, 65, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 17, 52, 14]
I_p1 = 0
def copy_from(self, other):
self.I_p1 = other.I_p1
super.copy_from(other)
def r_mark_regions(self):
# (, line 44
self.I_p1 = self.limit;
# or, line 51
try:
v_1 = self.cursor
try:
# (, line 48
if not self.in_grouping(HungarianStemmer.g_v, 97, 252):
raise lab1()
# goto, line 48
try:
while True:
v_2 = self.cursor
try:
if not self.out_grouping(HungarianStemmer.g_v, 97, 252):
raise lab3()
self.cursor = v_2
raise lab2()
except lab3: pass
self.cursor = v_2
if self.cursor >= self.limit:
raise lab1()
self.cursor += 1
except lab2: pass
# or, line 49
try:
v_3 = self.cursor
try:
# among, line 49
if self.find_among(HungarianStemmer.a_0, 8) == 0:
raise lab5()
raise lab4()
except lab5: pass
self.cursor = v_3
# next, line 49
if self.cursor >= self.limit:
raise lab1()
self.cursor += 1
except lab4: pass
# setmark p1, line 50
self.I_p1 = self.cursor
raise lab0()
except lab1: pass
self.cursor = v_1
# (, line 53
if not self.out_grouping(HungarianStemmer.g_v, 97, 252):
return False
# gopast, line 53
try:
while True:
try:
if not self.in_grouping(HungarianStemmer.g_v, 97, 252):
raise lab7()
raise lab6()
except lab7: pass
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab6: pass
# setmark p1, line 53
self.I_p1 = self.cursor
except lab0: pass
return True
def r_R1(self):
if not self.I_p1 <= self.cursor:
return False
return True
def r_v_ending(self):
# (, line 60
# [, line 61
self.ket = self.cursor
# substring, line 61
among_var = self.find_among_b(HungarianStemmer.a_1, 2)
if among_var == 0:
return False
# ], line 61
self.bra = self.cursor
# call R1, line 61
if not self.r_R1():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 62
# <-, line 62
if not self.slice_from(u"a"):
return False
elif among_var == 2:
# (, line 63
# <-, line 63
if not self.slice_from(u"e"):
return False
return True
def r_double(self):
# (, line 67
# test, line 68
v_1 = self.limit - self.cursor
# among, line 68
if self.find_among_b(HungarianStemmer.a_2, 23) == 0:
return False
self.cursor = self.limit - v_1
return True
def r_undouble(self):
# (, line 72
# next, line 73
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
# [, line 73
self.ket = self.cursor
# hop, line 73
c = self.cursor - 1
if self.limit_backward > c or c > self.limit:
return False
self.cursor = c
# ], line 73
self.bra = self.cursor
# delete, line 73
if not self.slice_del():
return False
return True
def r_instrum(self):
# (, line 76
# [, line 77
self.ket = self.cursor
# substring, line 77
among_var = self.find_among_b(HungarianStemmer.a_3, 2)
if among_var == 0:
return False
# ], line 77
self.bra = self.cursor
# call R1, line 77
if not self.r_R1():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 78
# call double, line 78
if not self.r_double():
return False
elif among_var == 2:
# (, line 79
# call double, line 79
if not self.r_double():
return False
# delete, line 81
if not self.slice_del():
return False
# call undouble, line 82
if not self.r_undouble():
return False
return True
def r_case(self):
# (, line 86
# [, line 87
self.ket = self.cursor
# substring, line 87
if self.find_among_b(HungarianStemmer.a_4, 44) == 0:
return False
# ], line 87
self.bra = self.cursor
# call R1, line 87
if not self.r_R1():
return False
# delete, line 111
if not self.slice_del():
return False
# call v_ending, line 112
if not self.r_v_ending():
return False
return True
def r_case_special(self):
# (, line 115
# [, line 116
self.ket = self.cursor
# substring, line 116
among_var = self.find_among_b(HungarianStemmer.a_5, 3)
if among_var == 0:
return False
# ], line 116
self.bra = self.cursor
# call R1, line 116
if not self.r_R1():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 117
# <-, line 117
if not self.slice_from(u"e"):
return False
elif among_var == 2:
# (, line 118
# <-, line 118
if not self.slice_from(u"a"):
return False
elif among_var == 3:
# (, line 119
# <-, line 119
if not self.slice_from(u"a"):
return False
return True
def r_case_other(self):
# (, line 123
# [, line 124
self.ket = self.cursor
# substring, line 124
among_var = self.find_among_b(HungarianStemmer.a_6, 6)
if among_var == 0:
return False
# ], line 124
self.bra = self.cursor
# call R1, line 124
if not self.r_R1():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 125
# delete, line 125
if not self.slice_del():
return False
elif among_var == 2:
# (, line 126
# delete, line 126
if not self.slice_del():
return False
elif among_var == 3:
# (, line 127
# <-, line 127
if not self.slice_from(u"a"):
return False
elif among_var == 4:
# (, line 128
# <-, line 128
if not self.slice_from(u"e"):
return False
return True
def r_factive(self):
# (, line 132
# [, line 133
self.ket = self.cursor
# substring, line 133
among_var = self.find_among_b(HungarianStemmer.a_7, 2)
if among_var == 0:
return False
# ], line 133
self.bra = self.cursor
# call R1, line 133
if not self.r_R1():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 134
# call double, line 134
if not self.r_double():
return False
elif among_var == 2:
# (, line 135
# call double, line 135
if not self.r_double():
return False
# delete, line 137
if not self.slice_del():
return False
# call undouble, line 138
if not self.r_undouble():
return False
return True
def r_plural(self):
# (, line 141
# [, line 142
self.ket = self.cursor
# substring, line 142
among_var = self.find_among_b(HungarianStemmer.a_8, 7)
if among_var == 0:
return False
# ], line 142
self.bra = self.cursor
# call R1, line 142
if not self.r_R1():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 143
# <-, line 143
if not self.slice_from(u"a"):
return False
elif among_var == 2:
# (, line 144
# <-, line 144
if not self.slice_from(u"e"):
return False
elif among_var == 3:
# (, line 145
# delete, line 145
if not self.slice_del():
return False
elif among_var == 4:
# (, line 146
# delete, line 146
if not self.slice_del():
return False
elif among_var == 5:
# (, line 147
# delete, line 147
if not self.slice_del():
return False
elif among_var == 6:
# (, line 148
# delete, line 148
if not self.slice_del():
return False
elif among_var == 7:
# (, line 149
# delete, line 149
if not self.slice_del():
return False
return True
def r_owned(self):
# (, line 153
# [, line 154
self.ket = self.cursor
# substring, line 154
among_var = self.find_among_b(HungarianStemmer.a_9, 12)
if among_var == 0:
return False
# ], line 154
self.bra = self.cursor
# call R1, line 154
if not self.r_R1():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 155
# delete, line 155
if not self.slice_del():
return False
elif among_var == 2:
# (, line 156
# <-, line 156
if not self.slice_from(u"e"):
return False
elif among_var == 3:
# (, line 157
# <-, line 157
if not self.slice_from(u"a"):
return False
elif among_var == 4:
# (, line 158
# delete, line 158
if not self.slice_del():
return False
elif among_var == 5:
# (, line 159
# <-, line 159
if not self.slice_from(u"e"):
return False
elif among_var == 6:
# (, line 160
# <-, line 160
if not self.slice_from(u"a"):
return False
elif among_var == 7:
# (, line 161
# delete, line 161
if not self.slice_del():
return False
elif among_var == 8:
# (, line 162
# <-, line 162
if not self.slice_from(u"e"):
return False
elif among_var == 9:
# (, line 163
# delete, line 163
if not self.slice_del():
return False
return True
def r_sing_owner(self):
# (, line 167
# [, line 168
self.ket = self.cursor
# substring, line 168
among_var = self.find_among_b(HungarianStemmer.a_10, 31)
if among_var == 0:
return False
# ], line 168
self.bra = self.cursor
# call R1, line 168
if not self.r_R1():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 169
# delete, line 169
if not self.slice_del():
return False
elif among_var == 2:
# (, line 170
# <-, line 170
if not self.slice_from(u"a"):
return False
elif among_var == 3:
# (, line 171
# <-, line 171
if not self.slice_from(u"e"):
return False
elif among_var == 4:
# (, line 172
# delete, line 172
if not self.slice_del():
return False
elif among_var == 5:
# (, line 173
# <-, line 173
if not self.slice_from(u"a"):
return False
elif among_var == 6:
# (, line 174
# <-, line 174
if not self.slice_from(u"e"):
return False
elif among_var == 7:
# (, line 175
# delete, line 175
if not self.slice_del():
return False
elif among_var == 8:
# (, line 176
# delete, line 176
if not self.slice_del():
return False
elif among_var == 9:
# (, line 177
# delete, line 177
if not self.slice_del():
return False
elif among_var == 10:
# (, line 178
# <-, line 178
if not self.slice_from(u"a"):
return False
elif among_var == 11:
# (, line 179
# <-, line 179
if not self.slice_from(u"e"):
return False
elif among_var == 12:
# (, line 180
# delete, line 180
if not self.slice_del():
return False
elif among_var == 13:
# (, line 181
# delete, line 181
if not self.slice_del():
return False
elif among_var == 14:
# (, line 182
# <-, line 182
if not self.slice_from(u"a"):
return False
elif among_var == 15:
# (, line 183
# <-, line 183
if not self.slice_from(u"e"):
return False
elif among_var == 16:
# (, line 184
# delete, line 184
if not self.slice_del():
return False
elif among_var == 17:
# (, line 185
# delete, line 185
if not self.slice_del():
return False
elif among_var == 18:
# (, line 186
# delete, line 186
if not self.slice_del():
return False
elif among_var == 19:
# (, line 187
# <-, line 187
if not self.slice_from(u"a"):
return False
elif among_var == 20:
# (, line 188
# <-, line 188
if not self.slice_from(u"e"):
return False
return True
def r_plur_owner(self):
# (, line 192
# [, line 193
self.ket = self.cursor
# substring, line 193
among_var = self.find_among_b(HungarianStemmer.a_11, 42)
if among_var == 0:
return False
# ], line 193
self.bra = self.cursor
# call R1, line 193
if not self.r_R1():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 194
# delete, line 194
if not self.slice_del():
return False
elif among_var == 2:
# (, line 195
# <-, line 195
if not self.slice_from(u"a"):
return False
elif among_var == 3:
# (, line 196
# <-, line 196
if not self.slice_from(u"e"):
return False
elif among_var == 4:
# (, line 197
# delete, line 197
if not self.slice_del():
return False
elif among_var == 5:
# (, line 198
# delete, line 198
if not self.slice_del():
return False
elif among_var == 6:
# (, line 199
# delete, line 199
if not self.slice_del():
return False
elif among_var == 7:
# (, line 200
# <-, line 200
if not self.slice_from(u"a"):
return False
elif among_var == 8:
# (, line 201
# <-, line 201
if not self.slice_from(u"e"):
return False
elif among_var == 9:
# (, line 202
# delete, line 202
if not self.slice_del():
return False
elif among_var == 10:
# (, line 203
# delete, line 203
if not self.slice_del():
return False
elif among_var == 11:
# (, line 204
# delete, line 204
if not self.slice_del():
return False
elif among_var == 12:
# (, line 205
# <-, line 205
if not self.slice_from(u"a"):
return False
elif among_var == 13:
# (, line 206
# <-, line 206
if not self.slice_from(u"e"):
return False
elif among_var == 14:
# (, line 207
# delete, line 207
if not self.slice_del():
return False
elif among_var == 15:
# (, line 208
# delete, line 208
if not self.slice_del():
return False
elif among_var == 16:
# (, line 209
# delete, line 209
if not self.slice_del():
return False
elif among_var == 17:
# (, line 210
# delete, line 210
if not self.slice_del():
return False
elif among_var == 18:
# (, line 211
# <-, line 211
if not self.slice_from(u"a"):
return False
elif among_var == 19:
# (, line 212
# <-, line 212
if not self.slice_from(u"e"):
return False
elif among_var == 20:
# (, line 214
# delete, line 214
if not self.slice_del():
return False
elif among_var == 21:
# (, line 215
# delete, line 215
if not self.slice_del():
return False
elif among_var == 22:
# (, line 216
# <-, line 216
if not self.slice_from(u"a"):
return False
elif among_var == 23:
# (, line 217
# <-, line 217
if not self.slice_from(u"e"):
return False
elif among_var == 24:
# (, line 218
# delete, line 218
if not self.slice_del():
return False
elif among_var == 25:
# (, line 219
# delete, line 219
if not self.slice_del():
return False
elif among_var == 26:
# (, line 220
# delete, line 220
if not self.slice_del():
return False
elif among_var == 27:
# (, line 221
# <-, line 221
if not self.slice_from(u"a"):
return False
elif among_var == 28:
# (, line 222
# <-, line 222
if not self.slice_from(u"e"):
return False
elif among_var == 29:
# (, line 223
# delete, line 223
if not self.slice_del():
return False
return True
def _stem(self):
# (, line 228
# do, line 229
v_1 = self.cursor
try:
# call mark_regions, line 229
if not self.r_mark_regions():
raise lab0()
except lab0: pass
self.cursor = v_1
# backwards, line 230
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 230
# do, line 231
v_2 = self.limit - self.cursor
try:
# call instrum, line 231
if not self.r_instrum():
raise lab1()
except lab1: pass
self.cursor = self.limit - v_2
# do, line 232
v_3 = self.limit - self.cursor
try:
# call case, line 232
if not self.r_case():
raise lab2()
except lab2: pass
self.cursor = self.limit - v_3
# do, line 233
v_4 = self.limit - self.cursor
try:
# call case_special, line 233
if not self.r_case_special():
raise lab3()
except lab3: pass
self.cursor = self.limit - v_4
# do, line 234
v_5 = self.limit - self.cursor
try:
# call case_other, line 234
if not self.r_case_other():
raise lab4()
except lab4: pass
self.cursor = self.limit - v_5
# do, line 235
v_6 = self.limit - self.cursor
try:
# call factive, line 235
if not self.r_factive():
raise lab5()
except lab5: pass
self.cursor = self.limit - v_6
# do, line 236
v_7 = self.limit - self.cursor
try:
# call owned, line 236
if not self.r_owned():
raise lab6()
except lab6: pass
self.cursor = self.limit - v_7
# do, line 237
v_8 = self.limit - self.cursor
try:
# call sing_owner, line 237
if not self.r_sing_owner():
raise lab7()
except lab7: pass
self.cursor = self.limit - v_8
# do, line 238
v_9 = self.limit - self.cursor
try:
# call plur_owner, line 238
if not self.r_plur_owner():
raise lab8()
except lab8: pass
self.cursor = self.limit - v_9
# do, line 239
v_10 = self.limit - self.cursor
try:
# call plural, line 239
if not self.r_plural():
raise lab9()
except lab9: pass
self.cursor = self.limit - v_10
self.cursor = self.limit_backward
return True
def equals(self, o):
return isinstance(o, HungarianStemmer)
def hashCode(self):
return hash("HungarianStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
| 30,026 | Python | .py | 983 | 19.19532 | 84 | 0.439453 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,975 | __init__.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/__init__.py | __all__ = ('language', 'stemmer')
from .danish_stemmer import DanishStemmer
from .dutch_stemmer import DutchStemmer
from .english_stemmer import EnglishStemmer
from .finnish_stemmer import FinnishStemmer
from .french_stemmer import FrenchStemmer
from .german_stemmer import GermanStemmer
from .hungarian_stemmer import HungarianStemmer
from .italian_stemmer import ItalianStemmer
from .norwegian_stemmer import NorwegianStemmer
from .porter_stemmer import PorterStemmer
from .portuguese_stemmer import PortugueseStemmer
from .romanian_stemmer import RomanianStemmer
from .russian_stemmer import RussianStemmer
from .spanish_stemmer import SpanishStemmer
from .swedish_stemmer import SwedishStemmer
from .turkish_stemmer import TurkishStemmer
_languages = {
'danish': DanishStemmer,
'dutch': DutchStemmer,
'english': EnglishStemmer,
'finnish': FinnishStemmer,
'french': FrenchStemmer,
'german': GermanStemmer,
'hungarian': HungarianStemmer,
'italian': ItalianStemmer,
'norwegian': NorwegianStemmer,
'porter': PorterStemmer,
'portuguese': PortugueseStemmer,
'romanian': RomanianStemmer,
'russian': RussianStemmer,
'spanish': SpanishStemmer,
'swedish': SwedishStemmer,
'turkish': TurkishStemmer,
}
try:
import Stemmer
cext_available = True
except ImportError:
cext_available = False
def algorithms():
if cext_available:
return Stemmer.language()
else:
return list(_languages.keys())
def stemmer(lang):
if cext_available:
return Stemmer.Stemmer(lang)
if lang.lower() in _languages:
return _languages[lang.lower()]()
else:
raise KeyError("Stemming algorithm '%s' not found" % lang)
| 1,718 | Python | .py | 52 | 29.326923 | 66 | 0.763998 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,976 | italian_stemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/italian_stemmer.py | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class ItalianStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"", -1, 7),
Among(u"qu", 0, 6),
Among(u"\u00E1", 0, 1),
Among(u"\u00E9", 0, 2),
Among(u"\u00ED", 0, 3),
Among(u"\u00F3", 0, 4),
Among(u"\u00FA", 0, 5)
]
a_1 = [
Among(u"", -1, 3),
Among(u"I", 0, 1),
Among(u"U", 0, 2)
]
a_2 = [
Among(u"la", -1, -1),
Among(u"cela", 0, -1),
Among(u"gliela", 0, -1),
Among(u"mela", 0, -1),
Among(u"tela", 0, -1),
Among(u"vela", 0, -1),
Among(u"le", -1, -1),
Among(u"cele", 6, -1),
Among(u"gliele", 6, -1),
Among(u"mele", 6, -1),
Among(u"tele", 6, -1),
Among(u"vele", 6, -1),
Among(u"ne", -1, -1),
Among(u"cene", 12, -1),
Among(u"gliene", 12, -1),
Among(u"mene", 12, -1),
Among(u"sene", 12, -1),
Among(u"tene", 12, -1),
Among(u"vene", 12, -1),
Among(u"ci", -1, -1),
Among(u"li", -1, -1),
Among(u"celi", 20, -1),
Among(u"glieli", 20, -1),
Among(u"meli", 20, -1),
Among(u"teli", 20, -1),
Among(u"veli", 20, -1),
Among(u"gli", 20, -1),
Among(u"mi", -1, -1),
Among(u"si", -1, -1),
Among(u"ti", -1, -1),
Among(u"vi", -1, -1),
Among(u"lo", -1, -1),
Among(u"celo", 31, -1),
Among(u"glielo", 31, -1),
Among(u"melo", 31, -1),
Among(u"telo", 31, -1),
Among(u"velo", 31, -1)
]
a_3 = [
Among(u"ando", -1, 1),
Among(u"endo", -1, 1),
Among(u"ar", -1, 2),
Among(u"er", -1, 2),
Among(u"ir", -1, 2)
]
a_4 = [
Among(u"ic", -1, -1),
Among(u"abil", -1, -1),
Among(u"os", -1, -1),
Among(u"iv", -1, 1)
]
a_5 = [
Among(u"ic", -1, 1),
Among(u"abil", -1, 1),
Among(u"iv", -1, 1)
]
a_6 = [
Among(u"ica", -1, 1),
Among(u"logia", -1, 3),
Among(u"osa", -1, 1),
Among(u"ista", -1, 1),
Among(u"iva", -1, 9),
Among(u"anza", -1, 1),
Among(u"enza", -1, 5),
Among(u"ice", -1, 1),
Among(u"atrice", 7, 1),
Among(u"iche", -1, 1),
Among(u"logie", -1, 3),
Among(u"abile", -1, 1),
Among(u"ibile", -1, 1),
Among(u"usione", -1, 4),
Among(u"azione", -1, 2),
Among(u"uzione", -1, 4),
Among(u"atore", -1, 2),
Among(u"ose", -1, 1),
Among(u"ante", -1, 1),
Among(u"mente", -1, 1),
Among(u"amente", 19, 7),
Among(u"iste", -1, 1),
Among(u"ive", -1, 9),
Among(u"anze", -1, 1),
Among(u"enze", -1, 5),
Among(u"ici", -1, 1),
Among(u"atrici", 25, 1),
Among(u"ichi", -1, 1),
Among(u"abili", -1, 1),
Among(u"ibili", -1, 1),
Among(u"ismi", -1, 1),
Among(u"usioni", -1, 4),
Among(u"azioni", -1, 2),
Among(u"uzioni", -1, 4),
Among(u"atori", -1, 2),
Among(u"osi", -1, 1),
Among(u"anti", -1, 1),
Among(u"amenti", -1, 6),
Among(u"imenti", -1, 6),
Among(u"isti", -1, 1),
Among(u"ivi", -1, 9),
Among(u"ico", -1, 1),
Among(u"ismo", -1, 1),
Among(u"oso", -1, 1),
Among(u"amento", -1, 6),
Among(u"imento", -1, 6),
Among(u"ivo", -1, 9),
Among(u"it\u00E0", -1, 8),
Among(u"ist\u00E0", -1, 1),
Among(u"ist\u00E8", -1, 1),
Among(u"ist\u00EC", -1, 1)
]
a_7 = [
Among(u"isca", -1, 1),
Among(u"enda", -1, 1),
Among(u"ata", -1, 1),
Among(u"ita", -1, 1),
Among(u"uta", -1, 1),
Among(u"ava", -1, 1),
Among(u"eva", -1, 1),
Among(u"iva", -1, 1),
Among(u"erebbe", -1, 1),
Among(u"irebbe", -1, 1),
Among(u"isce", -1, 1),
Among(u"ende", -1, 1),
Among(u"are", -1, 1),
Among(u"ere", -1, 1),
Among(u"ire", -1, 1),
Among(u"asse", -1, 1),
Among(u"ate", -1, 1),
Among(u"avate", 16, 1),
Among(u"evate", 16, 1),
Among(u"ivate", 16, 1),
Among(u"ete", -1, 1),
Among(u"erete", 20, 1),
Among(u"irete", 20, 1),
Among(u"ite", -1, 1),
Among(u"ereste", -1, 1),
Among(u"ireste", -1, 1),
Among(u"ute", -1, 1),
Among(u"erai", -1, 1),
Among(u"irai", -1, 1),
Among(u"isci", -1, 1),
Among(u"endi", -1, 1),
Among(u"erei", -1, 1),
Among(u"irei", -1, 1),
Among(u"assi", -1, 1),
Among(u"ati", -1, 1),
Among(u"iti", -1, 1),
Among(u"eresti", -1, 1),
Among(u"iresti", -1, 1),
Among(u"uti", -1, 1),
Among(u"avi", -1, 1),
Among(u"evi", -1, 1),
Among(u"ivi", -1, 1),
Among(u"isco", -1, 1),
Among(u"ando", -1, 1),
Among(u"endo", -1, 1),
Among(u"Yamo", -1, 1),
Among(u"iamo", -1, 1),
Among(u"avamo", -1, 1),
Among(u"evamo", -1, 1),
Among(u"ivamo", -1, 1),
Among(u"eremo", -1, 1),
Among(u"iremo", -1, 1),
Among(u"assimo", -1, 1),
Among(u"ammo", -1, 1),
Among(u"emmo", -1, 1),
Among(u"eremmo", 54, 1),
Among(u"iremmo", 54, 1),
Among(u"immo", -1, 1),
Among(u"ano", -1, 1),
Among(u"iscano", 58, 1),
Among(u"avano", 58, 1),
Among(u"evano", 58, 1),
Among(u"ivano", 58, 1),
Among(u"eranno", -1, 1),
Among(u"iranno", -1, 1),
Among(u"ono", -1, 1),
Among(u"iscono", 65, 1),
Among(u"arono", 65, 1),
Among(u"erono", 65, 1),
Among(u"irono", 65, 1),
Among(u"erebbero", -1, 1),
Among(u"irebbero", -1, 1),
Among(u"assero", -1, 1),
Among(u"essero", -1, 1),
Among(u"issero", -1, 1),
Among(u"ato", -1, 1),
Among(u"ito", -1, 1),
Among(u"uto", -1, 1),
Among(u"avo", -1, 1),
Among(u"evo", -1, 1),
Among(u"ivo", -1, 1),
Among(u"ar", -1, 1),
Among(u"ir", -1, 1),
Among(u"er\u00E0", -1, 1),
Among(u"ir\u00E0", -1, 1),
Among(u"er\u00F2", -1, 1),
Among(u"ir\u00F2", -1, 1)
]
g_v = [17, 65, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 8, 2, 1]
g_AEIO = [17, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 8, 2]
g_CG = [17]
I_p2 = 0
I_p1 = 0
I_pV = 0
def copy_from(self, other):
self.I_p2 = other.I_p2
self.I_p1 = other.I_p1
self.I_pV = other.I_pV
super.copy_from(other)
def r_prelude(self):
# (, line 34
# test, line 35
v_1 = self.cursor
# repeat, line 35
try:
while True:
try:
v_2 = self.cursor
try:
# (, line 35
# [, line 36
self.bra = self.cursor
# substring, line 36
among_var = self.find_among(ItalianStemmer.a_0, 7)
if among_var == 0:
raise lab2()
# ], line 36
self.ket = self.cursor
if among_var == 0:
raise lab2()
elif among_var == 1:
# (, line 37
# <-, line 37
if not self.slice_from(u"\u00E0"):
return False
elif among_var == 2:
# (, line 38
# <-, line 38
if not self.slice_from(u"\u00E8"):
return False
elif among_var == 3:
# (, line 39
# <-, line 39
if not self.slice_from(u"\u00EC"):
return False
elif among_var == 4:
# (, line 40
# <-, line 40
if not self.slice_from(u"\u00F2"):
return False
elif among_var == 5:
# (, line 41
# <-, line 41
if not self.slice_from(u"\u00F9"):
return False
elif among_var == 6:
# (, line 42
# <-, line 42
if not self.slice_from(u"qU"):
return False
elif among_var == 7:
# (, line 43
# next, line 43
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
raise lab1()
except lab2: pass
self.cursor = v_2
raise lab0()
except lab1: pass
except lab0: pass
self.cursor = v_1
# repeat, line 46
try:
while True:
try:
v_3 = self.cursor
try:
# goto, line 46
try:
while True:
v_4 = self.cursor
try:
# (, line 46
if not self.in_grouping(ItalianStemmer.g_v, 97, 249):
raise lab7()
# [, line 47
self.bra = self.cursor
# or, line 47
try:
v_5 = self.cursor
try:
# (, line 47
# literal, line 47
if not self.eq_s(1, u"u"):
raise lab9()
# ], line 47
self.ket = self.cursor
if not self.in_grouping(ItalianStemmer.g_v, 97, 249):
raise lab9()
# <-, line 47
if not self.slice_from(u"U"):
return False
raise lab8()
except lab9: pass
self.cursor = v_5
# (, line 48
# literal, line 48
if not self.eq_s(1, u"i"):
raise lab7()
# ], line 48
self.ket = self.cursor
if not self.in_grouping(ItalianStemmer.g_v, 97, 249):
raise lab7()
# <-, line 48
if not self.slice_from(u"I"):
return False
except lab8: pass
self.cursor = v_4
raise lab6()
except lab7: pass
self.cursor = v_4
if self.cursor >= self.limit:
raise lab5()
self.cursor += 1
except lab6: pass
raise lab4()
except lab5: pass
self.cursor = v_3
raise lab3()
except lab4: pass
except lab3: pass
return True
def r_mark_regions(self):
# (, line 52
self.I_pV = self.limit;
self.I_p1 = self.limit;
self.I_p2 = self.limit;
# do, line 58
v_1 = self.cursor
try:
# (, line 58
# or, line 60
try:
v_2 = self.cursor
try:
# (, line 59
if not self.in_grouping(ItalianStemmer.g_v, 97, 249):
raise lab2()
# or, line 59
try:
v_3 = self.cursor
try:
# (, line 59
if not self.out_grouping(ItalianStemmer.g_v, 97, 249):
raise lab4()
# gopast, line 59
try:
while True:
try:
if not self.in_grouping(ItalianStemmer.g_v, 97, 249):
raise lab6()
raise lab5()
except lab6: pass
if self.cursor >= self.limit:
raise lab4()
self.cursor += 1
except lab5: pass
raise lab3()
except lab4: pass
self.cursor = v_3
# (, line 59
if not self.in_grouping(ItalianStemmer.g_v, 97, 249):
raise lab2()
# gopast, line 59
try:
while True:
try:
if not self.out_grouping(ItalianStemmer.g_v, 97, 249):
raise lab8()
raise lab7()
except lab8: pass
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
except lab7: pass
except lab3: pass
raise lab1()
except lab2: pass
self.cursor = v_2
# (, line 61
if not self.out_grouping(ItalianStemmer.g_v, 97, 249):
raise lab0()
# or, line 61
try:
v_6 = self.cursor
try:
# (, line 61
if not self.out_grouping(ItalianStemmer.g_v, 97, 249):
raise lab10()
# gopast, line 61
try:
while True:
try:
if not self.in_grouping(ItalianStemmer.g_v, 97, 249):
raise lab12()
raise lab11()
except lab12: pass
if self.cursor >= self.limit:
raise lab10()
self.cursor += 1
except lab11: pass
raise lab9()
except lab10: pass
self.cursor = v_6
# (, line 61
if not self.in_grouping(ItalianStemmer.g_v, 97, 249):
raise lab0()
# next, line 61
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
except lab9: pass
except lab1: pass
# setmark pV, line 62
self.I_pV = self.cursor
except lab0: pass
self.cursor = v_1
# do, line 64
v_8 = self.cursor
try:
# (, line 64
# gopast, line 65
try:
while True:
try:
if not self.in_grouping(ItalianStemmer.g_v, 97, 249):
raise lab15()
raise lab14()
except lab15: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab14: pass
# gopast, line 65
try:
while True:
try:
if not self.out_grouping(ItalianStemmer.g_v, 97, 249):
raise lab17()
raise lab16()
except lab17: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab16: pass
# setmark p1, line 65
self.I_p1 = self.cursor
# gopast, line 66
try:
while True:
try:
if not self.in_grouping(ItalianStemmer.g_v, 97, 249):
raise lab19()
raise lab18()
except lab19: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab18: pass
# gopast, line 66
try:
while True:
try:
if not self.out_grouping(ItalianStemmer.g_v, 97, 249):
raise lab21()
raise lab20()
except lab21: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab20: pass
# setmark p2, line 66
self.I_p2 = self.cursor
except lab13: pass
self.cursor = v_8
return True
def r_postlude(self):
# repeat, line 70
try:
while True:
try:
v_1 = self.cursor
try:
# (, line 70
# [, line 72
self.bra = self.cursor
# substring, line 72
among_var = self.find_among(ItalianStemmer.a_1, 3)
if among_var == 0:
raise lab2()
# ], line 72
self.ket = self.cursor
if among_var == 0:
raise lab2()
elif among_var == 1:
# (, line 73
# <-, line 73
if not self.slice_from(u"i"):
return False
elif among_var == 2:
# (, line 74
# <-, line 74
if not self.slice_from(u"u"):
return False
elif among_var == 3:
# (, line 75
# next, line 75
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
raise lab1()
except lab2: pass
self.cursor = v_1
raise lab0()
except lab1: pass
except lab0: pass
return True
def r_RV(self):
if not self.I_pV <= self.cursor:
return False
return True
def r_R1(self):
if not self.I_p1 <= self.cursor:
return False
return True
def r_R2(self):
if not self.I_p2 <= self.cursor:
return False
return True
def r_attached_pronoun(self):
# (, line 86
# [, line 87
self.ket = self.cursor
# substring, line 87
if self.find_among_b(ItalianStemmer.a_2, 37) == 0:
return False
# ], line 87
self.bra = self.cursor
# among, line 97
among_var = self.find_among_b(ItalianStemmer.a_3, 5)
if among_var == 0:
return False
# (, line 97
# call RV, line 97
if not self.r_RV():
return False
if among_var == 0:
return False
elif among_var == 1:
# (, line 98
# delete, line 98
if not self.slice_del():
return False
elif among_var == 2:
# (, line 99
# <-, line 99
if not self.slice_from(u"e"):
return False
return True
def r_standard_suffix(self):
# (, line 103
# [, line 104
self.ket = self.cursor
# substring, line 104
among_var = self.find_among_b(ItalianStemmer.a_6, 51)
if among_var == 0:
return False
# ], line 104
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 111
# call R2, line 111
if not self.r_R2():
return False
# delete, line 111
if not self.slice_del():
return False
elif among_var == 2:
# (, line 113
# call R2, line 113
if not self.r_R2():
return False
# delete, line 113
if not self.slice_del():
return False
# try, line 114
v_1 = self.limit - self.cursor
try:
# (, line 114
# [, line 114
self.ket = self.cursor
# literal, line 114
if not self.eq_s_b(2, u"ic"):
self.cursor = self.limit - v_1
raise lab0()
# ], line 114
self.bra = self.cursor
# call R2, line 114
if not self.r_R2():
self.cursor = self.limit - v_1
raise lab0()
# delete, line 114
if not self.slice_del():
return False
except lab0: pass
elif among_var == 3:
# (, line 117
# call R2, line 117
if not self.r_R2():
return False
# <-, line 117
if not self.slice_from(u"log"):
return False
elif among_var == 4:
# (, line 119
# call R2, line 119
if not self.r_R2():
return False
# <-, line 119
if not self.slice_from(u"u"):
return False
elif among_var == 5:
# (, line 121
# call R2, line 121
if not self.r_R2():
return False
# <-, line 121
if not self.slice_from(u"ente"):
return False
elif among_var == 6:
# (, line 123
# call RV, line 123
if not self.r_RV():
return False
# delete, line 123
if not self.slice_del():
return False
elif among_var == 7:
# (, line 124
# call R1, line 125
if not self.r_R1():
return False
# delete, line 125
if not self.slice_del():
return False
# try, line 126
v_2 = self.limit - self.cursor
try:
# (, line 126
# [, line 127
self.ket = self.cursor
# substring, line 127
among_var = self.find_among_b(ItalianStemmer.a_4, 4)
if among_var == 0:
self.cursor = self.limit - v_2
raise lab1()
# ], line 127
self.bra = self.cursor
# call R2, line 127
if not self.r_R2():
self.cursor = self.limit - v_2
raise lab1()
# delete, line 127
if not self.slice_del():
return False
if among_var == 0:
self.cursor = self.limit - v_2
raise lab1()
elif among_var == 1:
# (, line 128
# [, line 128
self.ket = self.cursor
# literal, line 128
if not self.eq_s_b(2, u"at"):
self.cursor = self.limit - v_2
raise lab1()
# ], line 128
self.bra = self.cursor
# call R2, line 128
if not self.r_R2():
self.cursor = self.limit - v_2
raise lab1()
# delete, line 128
if not self.slice_del():
return False
except lab1: pass
elif among_var == 8:
# (, line 133
# call R2, line 134
if not self.r_R2():
return False
# delete, line 134
if not self.slice_del():
return False
# try, line 135
v_3 = self.limit - self.cursor
try:
# (, line 135
# [, line 136
self.ket = self.cursor
# substring, line 136
among_var = self.find_among_b(ItalianStemmer.a_5, 3)
if among_var == 0:
self.cursor = self.limit - v_3
raise lab2()
# ], line 136
self.bra = self.cursor
if among_var == 0:
self.cursor = self.limit - v_3
raise lab2()
elif among_var == 1:
# (, line 137
# call R2, line 137
if not self.r_R2():
self.cursor = self.limit - v_3
raise lab2()
# delete, line 137
if not self.slice_del():
return False
except lab2: pass
elif among_var == 9:
# (, line 141
# call R2, line 142
if not self.r_R2():
return False
# delete, line 142
if not self.slice_del():
return False
# try, line 143
v_4 = self.limit - self.cursor
try:
# (, line 143
# [, line 143
self.ket = self.cursor
# literal, line 143
if not self.eq_s_b(2, u"at"):
self.cursor = self.limit - v_4
raise lab3()
# ], line 143
self.bra = self.cursor
# call R2, line 143
if not self.r_R2():
self.cursor = self.limit - v_4
raise lab3()
# delete, line 143
if not self.slice_del():
return False
# [, line 143
self.ket = self.cursor
# literal, line 143
if not self.eq_s_b(2, u"ic"):
self.cursor = self.limit - v_4
raise lab3()
# ], line 143
self.bra = self.cursor
# call R2, line 143
if not self.r_R2():
self.cursor = self.limit - v_4
raise lab3()
# delete, line 143
if not self.slice_del():
return False
except lab3: pass
return True
def r_verb_suffix(self):
# setlimit, line 148
v_1 = self.limit - self.cursor
# tomark, line 148
if self.cursor < self.I_pV:
return False
self.cursor = self.I_pV
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 148
# [, line 149
self.ket = self.cursor
# substring, line 149
among_var = self.find_among_b(ItalianStemmer.a_7, 87)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 149
self.bra = self.cursor
if among_var == 0:
self.limit_backward = v_2
return False
elif among_var == 1:
# (, line 163
# delete, line 163
if not self.slice_del():
return False
self.limit_backward = v_2
return True
def r_vowel_suffix(self):
# (, line 170
# try, line 171
v_1 = self.limit - self.cursor
try:
# (, line 171
# [, line 172
self.ket = self.cursor
if not self.in_grouping_b(ItalianStemmer.g_AEIO, 97, 242):
self.cursor = self.limit - v_1
raise lab0()
# ], line 172
self.bra = self.cursor
# call RV, line 172
if not self.r_RV():
self.cursor = self.limit - v_1
raise lab0()
# delete, line 172
if not self.slice_del():
return False
# [, line 173
self.ket = self.cursor
# literal, line 173
if not self.eq_s_b(1, u"i"):
self.cursor = self.limit - v_1
raise lab0()
# ], line 173
self.bra = self.cursor
# call RV, line 173
if not self.r_RV():
self.cursor = self.limit - v_1
raise lab0()
# delete, line 173
if not self.slice_del():
return False
except lab0: pass
# try, line 175
v_2 = self.limit - self.cursor
try:
# (, line 175
# [, line 176
self.ket = self.cursor
# literal, line 176
if not self.eq_s_b(1, u"h"):
self.cursor = self.limit - v_2
raise lab1()
# ], line 176
self.bra = self.cursor
if not self.in_grouping_b(ItalianStemmer.g_CG, 99, 103):
self.cursor = self.limit - v_2
raise lab1()
# call RV, line 176
if not self.r_RV():
self.cursor = self.limit - v_2
raise lab1()
# delete, line 176
if not self.slice_del():
return False
except lab1: pass
return True
def _stem(self):
# (, line 181
# do, line 182
v_1 = self.cursor
try:
# call prelude, line 182
if not self.r_prelude():
raise lab0()
except lab0: pass
self.cursor = v_1
# do, line 183
v_2 = self.cursor
try:
# call mark_regions, line 183
if not self.r_mark_regions():
raise lab1()
except lab1: pass
self.cursor = v_2
# backwards, line 184
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 184
# do, line 185
v_3 = self.limit - self.cursor
try:
# call attached_pronoun, line 185
if not self.r_attached_pronoun():
raise lab2()
except lab2: pass
self.cursor = self.limit - v_3
# do, line 186
v_4 = self.limit - self.cursor
try:
# (, line 186
# or, line 186
try:
v_5 = self.limit - self.cursor
try:
# call standard_suffix, line 186
if not self.r_standard_suffix():
raise lab5()
raise lab4()
except lab5: pass
self.cursor = self.limit - v_5
# call verb_suffix, line 186
if not self.r_verb_suffix():
raise lab3()
except lab4: pass
except lab3: pass
self.cursor = self.limit - v_4
# do, line 187
v_6 = self.limit - self.cursor
try:
# call vowel_suffix, line 187
if not self.r_vowel_suffix():
raise lab6()
except lab6: pass
self.cursor = self.limit - v_6
self.cursor = self.limit_backward
# do, line 189
v_7 = self.cursor
try:
# call postlude, line 189
if not self.r_postlude():
raise lab7()
except lab7: pass
self.cursor = v_7
return True
def equals(self, o):
return isinstance(o, ItalianStemmer)
def hashCode(self):
return hash("ItalianStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
class lab10(BaseException): pass
class lab11(BaseException): pass
class lab12(BaseException): pass
class lab13(BaseException): pass
class lab14(BaseException): pass
class lab15(BaseException): pass
class lab16(BaseException): pass
class lab17(BaseException): pass
class lab18(BaseException): pass
class lab19(BaseException): pass
class lab20(BaseException): pass
class lab21(BaseException): pass
| 34,904 | Python | .py | 986 | 19.353955 | 97 | 0.383825 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,977 | finnish_stemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/finnish_stemmer.py | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class FinnishStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"pa", -1, 1),
Among(u"sti", -1, 2),
Among(u"kaan", -1, 1),
Among(u"han", -1, 1),
Among(u"kin", -1, 1),
Among(u"h\u00E4n", -1, 1),
Among(u"k\u00E4\u00E4n", -1, 1),
Among(u"ko", -1, 1),
Among(u"p\u00E4", -1, 1),
Among(u"k\u00F6", -1, 1)
]
a_1 = [
Among(u"lla", -1, -1),
Among(u"na", -1, -1),
Among(u"ssa", -1, -1),
Among(u"ta", -1, -1),
Among(u"lta", 3, -1),
Among(u"sta", 3, -1)
]
a_2 = [
Among(u"ll\u00E4", -1, -1),
Among(u"n\u00E4", -1, -1),
Among(u"ss\u00E4", -1, -1),
Among(u"t\u00E4", -1, -1),
Among(u"lt\u00E4", 3, -1),
Among(u"st\u00E4", 3, -1)
]
a_3 = [
Among(u"lle", -1, -1),
Among(u"ine", -1, -1)
]
a_4 = [
Among(u"nsa", -1, 3),
Among(u"mme", -1, 3),
Among(u"nne", -1, 3),
Among(u"ni", -1, 2),
Among(u"si", -1, 1),
Among(u"an", -1, 4),
Among(u"en", -1, 6),
Among(u"\u00E4n", -1, 5),
Among(u"ns\u00E4", -1, 3)
]
a_5 = [
Among(u"aa", -1, -1),
Among(u"ee", -1, -1),
Among(u"ii", -1, -1),
Among(u"oo", -1, -1),
Among(u"uu", -1, -1),
Among(u"\u00E4\u00E4", -1, -1),
Among(u"\u00F6\u00F6", -1, -1)
]
a_6 = [
Among(u"a", -1, 8),
Among(u"lla", 0, -1),
Among(u"na", 0, -1),
Among(u"ssa", 0, -1),
Among(u"ta", 0, -1),
Among(u"lta", 4, -1),
Among(u"sta", 4, -1),
Among(u"tta", 4, 9),
Among(u"lle", -1, -1),
Among(u"ine", -1, -1),
Among(u"ksi", -1, -1),
Among(u"n", -1, 7),
Among(u"han", 11, 1),
Among(u"den", 11, -1, "r_VI"),
Among(u"seen", 11, -1, "r_LONG"),
Among(u"hen", 11, 2),
Among(u"tten", 11, -1, "r_VI"),
Among(u"hin", 11, 3),
Among(u"siin", 11, -1, "r_VI"),
Among(u"hon", 11, 4),
Among(u"h\u00E4n", 11, 5),
Among(u"h\u00F6n", 11, 6),
Among(u"\u00E4", -1, 8),
Among(u"ll\u00E4", 22, -1),
Among(u"n\u00E4", 22, -1),
Among(u"ss\u00E4", 22, -1),
Among(u"t\u00E4", 22, -1),
Among(u"lt\u00E4", 26, -1),
Among(u"st\u00E4", 26, -1),
Among(u"tt\u00E4", 26, 9)
]
a_7 = [
Among(u"eja", -1, -1),
Among(u"mma", -1, 1),
Among(u"imma", 1, -1),
Among(u"mpa", -1, 1),
Among(u"impa", 3, -1),
Among(u"mmi", -1, 1),
Among(u"immi", 5, -1),
Among(u"mpi", -1, 1),
Among(u"impi", 7, -1),
Among(u"ej\u00E4", -1, -1),
Among(u"mm\u00E4", -1, 1),
Among(u"imm\u00E4", 10, -1),
Among(u"mp\u00E4", -1, 1),
Among(u"imp\u00E4", 12, -1)
]
a_8 = [
Among(u"i", -1, -1),
Among(u"j", -1, -1)
]
a_9 = [
Among(u"mma", -1, 1),
Among(u"imma", 0, -1)
]
g_AEI = [17, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8]
g_V1 = [17, 65, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 32]
g_V2 = [17, 65, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 32]
g_particle_end = [17, 97, 24, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 32]
B_ending_removed = False
S_x = ""
I_p2 = 0
I_p1 = 0
def copy_from(self, other):
self.B_ending_removed = other.B_ending_removed
self.S_x = other.S_x
self.I_p2 = other.I_p2
self.I_p1 = other.I_p1
super.copy_from(other)
def r_mark_regions(self):
# (, line 41
self.I_p1 = self.limit;
self.I_p2 = self.limit;
# goto, line 46
try:
while True:
v_1 = self.cursor
try:
if not self.in_grouping(FinnishStemmer.g_V1, 97, 246):
raise lab1()
self.cursor = v_1
raise lab0()
except lab1: pass
self.cursor = v_1
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab0: pass
# gopast, line 46
try:
while True:
try:
if not self.out_grouping(FinnishStemmer.g_V1, 97, 246):
raise lab3()
raise lab2()
except lab3: pass
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab2: pass
# setmark p1, line 46
self.I_p1 = self.cursor
# goto, line 47
try:
while True:
v_3 = self.cursor
try:
if not self.in_grouping(FinnishStemmer.g_V1, 97, 246):
raise lab5()
self.cursor = v_3
raise lab4()
except lab5: pass
self.cursor = v_3
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab4: pass
# gopast, line 47
try:
while True:
try:
if not self.out_grouping(FinnishStemmer.g_V1, 97, 246):
raise lab7()
raise lab6()
except lab7: pass
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab6: pass
# setmark p2, line 47
self.I_p2 = self.cursor
return True
def r_R2(self):
if not self.I_p2 <= self.cursor:
return False
return True
def r_particle_etc(self):
# (, line 54
# setlimit, line 55
v_1 = self.limit - self.cursor
# tomark, line 55
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 55
# [, line 55
self.ket = self.cursor
# substring, line 55
among_var = self.find_among_b(FinnishStemmer.a_0, 10)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 55
self.bra = self.cursor
self.limit_backward = v_2
if among_var == 0:
return False
elif among_var == 1:
# (, line 62
if not self.in_grouping_b(FinnishStemmer.g_particle_end, 97, 246):
return False
elif among_var == 2:
# (, line 64
# call R2, line 64
if not self.r_R2():
return False
# delete, line 66
if not self.slice_del():
return False
return True
def r_possessive(self):
# (, line 68
# setlimit, line 69
v_1 = self.limit - self.cursor
# tomark, line 69
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 69
# [, line 69
self.ket = self.cursor
# substring, line 69
among_var = self.find_among_b(FinnishStemmer.a_4, 9)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 69
self.bra = self.cursor
self.limit_backward = v_2
if among_var == 0:
return False
elif among_var == 1:
# (, line 72
# not, line 72
v_3 = self.limit - self.cursor
try:
# literal, line 72
if not self.eq_s_b(1, u"k"):
raise lab0()
return False
except lab0: pass
self.cursor = self.limit - v_3
# delete, line 72
if not self.slice_del():
return False
elif among_var == 2:
# (, line 74
# delete, line 74
if not self.slice_del():
return False
# [, line 74
self.ket = self.cursor
# literal, line 74
if not self.eq_s_b(3, u"kse"):
return False
# ], line 74
self.bra = self.cursor
# <-, line 74
if not self.slice_from(u"ksi"):
return False
elif among_var == 3:
# (, line 78
# delete, line 78
if not self.slice_del():
return False
elif among_var == 4:
# (, line 81
# among, line 81
if self.find_among_b(FinnishStemmer.a_1, 6) == 0:
return False
# delete, line 81
if not self.slice_del():
return False
elif among_var == 5:
# (, line 83
# among, line 83
if self.find_among_b(FinnishStemmer.a_2, 6) == 0:
return False
# delete, line 84
if not self.slice_del():
return False
elif among_var == 6:
# (, line 86
# among, line 86
if self.find_among_b(FinnishStemmer.a_3, 2) == 0:
return False
# delete, line 86
if not self.slice_del():
return False
return True
def r_LONG(self):
# among, line 91
if self.find_among_b(FinnishStemmer.a_5, 7) == 0:
return False
return True
def r_VI(self):
# (, line 93
# literal, line 93
if not self.eq_s_b(1, u"i"):
return False
if not self.in_grouping_b(FinnishStemmer.g_V2, 97, 246):
return False
return True
def r_case_ending(self):
# (, line 95
# setlimit, line 96
v_1 = self.limit - self.cursor
# tomark, line 96
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 96
# [, line 96
self.ket = self.cursor
# substring, line 96
among_var = self.find_among_b(FinnishStemmer.a_6, 30)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 96
self.bra = self.cursor
self.limit_backward = v_2
if among_var == 0:
return False
elif among_var == 1:
# (, line 98
# literal, line 98
if not self.eq_s_b(1, u"a"):
return False
elif among_var == 2:
# (, line 99
# literal, line 99
if not self.eq_s_b(1, u"e"):
return False
elif among_var == 3:
# (, line 100
# literal, line 100
if not self.eq_s_b(1, u"i"):
return False
elif among_var == 4:
# (, line 101
# literal, line 101
if not self.eq_s_b(1, u"o"):
return False
elif among_var == 5:
# (, line 102
# literal, line 102
if not self.eq_s_b(1, u"\u00E4"):
return False
elif among_var == 6:
# (, line 103
# literal, line 103
if not self.eq_s_b(1, u"\u00F6"):
return False
elif among_var == 7:
# (, line 111
# try, line 111
v_3 = self.limit - self.cursor
try:
# (, line 111
# and, line 113
v_4 = self.limit - self.cursor
# or, line 112
try:
v_5 = self.limit - self.cursor
try:
# call LONG, line 111
if not self.r_LONG():
raise lab2()
raise lab1()
except lab2: pass
self.cursor = self.limit - v_5
# literal, line 112
if not self.eq_s_b(2, u"ie"):
self.cursor = self.limit - v_3
raise lab0()
except lab1: pass
self.cursor = self.limit - v_4
# next, line 113
if self.cursor <= self.limit_backward:
self.cursor = self.limit - v_3
raise lab0()
self.cursor -= 1
# ], line 113
self.bra = self.cursor
except lab0: pass
elif among_var == 8:
# (, line 119
if not self.in_grouping_b(FinnishStemmer.g_V1, 97, 246):
return False
if not self.out_grouping_b(FinnishStemmer.g_V1, 97, 246):
return False
elif among_var == 9:
# (, line 121
# literal, line 121
if not self.eq_s_b(1, u"e"):
return False
# delete, line 138
if not self.slice_del():
return False
# set ending_removed, line 139
self.B_ending_removed = True
return True
def r_other_endings(self):
# (, line 141
# setlimit, line 142
v_1 = self.limit - self.cursor
# tomark, line 142
if self.cursor < self.I_p2:
return False
self.cursor = self.I_p2
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 142
# [, line 142
self.ket = self.cursor
# substring, line 142
among_var = self.find_among_b(FinnishStemmer.a_7, 14)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 142
self.bra = self.cursor
self.limit_backward = v_2
if among_var == 0:
return False
elif among_var == 1:
# (, line 146
# not, line 146
v_3 = self.limit - self.cursor
try:
# literal, line 146
if not self.eq_s_b(2, u"po"):
raise lab0()
return False
except lab0: pass
self.cursor = self.limit - v_3
# delete, line 151
if not self.slice_del():
return False
return True
def r_i_plural(self):
# (, line 153
# setlimit, line 154
v_1 = self.limit - self.cursor
# tomark, line 154
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 154
# [, line 154
self.ket = self.cursor
# substring, line 154
if self.find_among_b(FinnishStemmer.a_8, 2) == 0:
self.limit_backward = v_2
return False
# ], line 154
self.bra = self.cursor
self.limit_backward = v_2
# delete, line 158
if not self.slice_del():
return False
return True
def r_t_plural(self):
# (, line 160
# setlimit, line 161
v_1 = self.limit - self.cursor
# tomark, line 161
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 161
# [, line 162
self.ket = self.cursor
# literal, line 162
if not self.eq_s_b(1, u"t"):
self.limit_backward = v_2
return False
# ], line 162
self.bra = self.cursor
# test, line 162
v_3 = self.limit - self.cursor
if not self.in_grouping_b(FinnishStemmer.g_V1, 97, 246):
self.limit_backward = v_2
return False
self.cursor = self.limit - v_3
# delete, line 163
if not self.slice_del():
return False
self.limit_backward = v_2
# setlimit, line 165
v_4 = self.limit - self.cursor
# tomark, line 165
if self.cursor < self.I_p2:
return False
self.cursor = self.I_p2
v_5 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_4
# (, line 165
# [, line 165
self.ket = self.cursor
# substring, line 165
among_var = self.find_among_b(FinnishStemmer.a_9, 2)
if among_var == 0:
self.limit_backward = v_5
return False
# ], line 165
self.bra = self.cursor
self.limit_backward = v_5
if among_var == 0:
return False
elif among_var == 1:
# (, line 167
# not, line 167
v_6 = self.limit - self.cursor
try:
# literal, line 167
if not self.eq_s_b(2, u"po"):
raise lab0()
return False
except lab0: pass
self.cursor = self.limit - v_6
# delete, line 170
if not self.slice_del():
return False
return True
def r_tidy(self):
# (, line 172
# setlimit, line 173
v_1 = self.limit - self.cursor
# tomark, line 173
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 173
# do, line 174
v_3 = self.limit - self.cursor
try:
# (, line 174
# and, line 174
v_4 = self.limit - self.cursor
# call LONG, line 174
if not self.r_LONG():
raise lab0()
self.cursor = self.limit - v_4
# (, line 174
# [, line 174
self.ket = self.cursor
# next, line 174
if self.cursor <= self.limit_backward:
raise lab0()
self.cursor -= 1
# ], line 174
self.bra = self.cursor
# delete, line 174
if not self.slice_del():
return False
except lab0: pass
self.cursor = self.limit - v_3
# do, line 175
v_5 = self.limit - self.cursor
try:
# (, line 175
# [, line 175
self.ket = self.cursor
if not self.in_grouping_b(FinnishStemmer.g_AEI, 97, 228):
raise lab1()
# ], line 175
self.bra = self.cursor
if not self.out_grouping_b(FinnishStemmer.g_V1, 97, 246):
raise lab1()
# delete, line 175
if not self.slice_del():
return False
except lab1: pass
self.cursor = self.limit - v_5
# do, line 176
v_6 = self.limit - self.cursor
try:
# (, line 176
# [, line 176
self.ket = self.cursor
# literal, line 176
if not self.eq_s_b(1, u"j"):
raise lab2()
# ], line 176
self.bra = self.cursor
# or, line 176
try:
v_7 = self.limit - self.cursor
try:
# literal, line 176
if not self.eq_s_b(1, u"o"):
raise lab4()
raise lab3()
except lab4: pass
self.cursor = self.limit - v_7
# literal, line 176
if not self.eq_s_b(1, u"u"):
raise lab2()
except lab3: pass
# delete, line 176
if not self.slice_del():
return False
except lab2: pass
self.cursor = self.limit - v_6
# do, line 177
v_8 = self.limit - self.cursor
try:
# (, line 177
# [, line 177
self.ket = self.cursor
# literal, line 177
if not self.eq_s_b(1, u"o"):
raise lab5()
# ], line 177
self.bra = self.cursor
# literal, line 177
if not self.eq_s_b(1, u"j"):
raise lab5()
# delete, line 177
if not self.slice_del():
return False
except lab5: pass
self.cursor = self.limit - v_8
self.limit_backward = v_2
# goto, line 179
try:
while True:
v_9 = self.limit - self.cursor
try:
if not self.out_grouping_b(FinnishStemmer.g_V1, 97, 246):
raise lab7()
self.cursor = self.limit - v_9
raise lab6()
except lab7: pass
self.cursor = self.limit - v_9
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
except lab6: pass
# [, line 179
self.ket = self.cursor
# next, line 179
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
# ], line 179
self.bra = self.cursor
# -> x, line 179
self.S_x = self.slice_to(self.S_x)
if self.S_x == '':
return False
# name x, line 179
if not self.eq_v_b(self.S_x):
return False
# delete, line 179
if not self.slice_del():
return False
return True
def _stem(self):
# (, line 183
# do, line 185
v_1 = self.cursor
try:
# call mark_regions, line 185
if not self.r_mark_regions():
raise lab0()
except lab0: pass
self.cursor = v_1
# unset ending_removed, line 186
self.B_ending_removed = False
# backwards, line 187
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 187
# do, line 188
v_2 = self.limit - self.cursor
try:
# call particle_etc, line 188
if not self.r_particle_etc():
raise lab1()
except lab1: pass
self.cursor = self.limit - v_2
# do, line 189
v_3 = self.limit - self.cursor
try:
# call possessive, line 189
if not self.r_possessive():
raise lab2()
except lab2: pass
self.cursor = self.limit - v_3
# do, line 190
v_4 = self.limit - self.cursor
try:
# call case_ending, line 190
if not self.r_case_ending():
raise lab3()
except lab3: pass
self.cursor = self.limit - v_4
# do, line 191
v_5 = self.limit - self.cursor
try:
# call other_endings, line 191
if not self.r_other_endings():
raise lab4()
except lab4: pass
self.cursor = self.limit - v_5
# or, line 192
try:
v_6 = self.limit - self.cursor
try:
# (, line 192
# Boolean test ending_removed, line 192
if not self.B_ending_removed:
raise lab6()
# do, line 192
v_7 = self.limit - self.cursor
try:
# call i_plural, line 192
if not self.r_i_plural():
raise lab7()
except lab7: pass
self.cursor = self.limit - v_7
raise lab5()
except lab6: pass
self.cursor = self.limit - v_6
# do, line 192
v_8 = self.limit - self.cursor
try:
# call t_plural, line 192
if not self.r_t_plural():
raise lab8()
except lab8: pass
self.cursor = self.limit - v_8
except lab5: pass
# do, line 193
v_9 = self.limit - self.cursor
try:
# call tidy, line 193
if not self.r_tidy():
raise lab9()
except lab9: pass
self.cursor = self.limit - v_9
self.cursor = self.limit_backward
return True
def equals(self, o):
return isinstance(o, FinnishStemmer)
def hashCode(self):
return hash("FinnishStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
| 25,600 | Python | .py | 802 | 20.387781 | 82 | 0.460292 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,978 | portuguese_stemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/portuguese_stemmer.py | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class PortugueseStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"", -1, 3),
Among(u"\u00E3", 0, 1),
Among(u"\u00F5", 0, 2)
]
a_1 = [
Among(u"", -1, 3),
Among(u"a~", 0, 1),
Among(u"o~", 0, 2)
]
a_2 = [
Among(u"ic", -1, -1),
Among(u"ad", -1, -1),
Among(u"os", -1, -1),
Among(u"iv", -1, 1)
]
a_3 = [
Among(u"ante", -1, 1),
Among(u"avel", -1, 1),
Among(u"\u00EDvel", -1, 1)
]
a_4 = [
Among(u"ic", -1, 1),
Among(u"abil", -1, 1),
Among(u"iv", -1, 1)
]
a_5 = [
Among(u"ica", -1, 1),
Among(u"\u00E2ncia", -1, 1),
Among(u"\u00EAncia", -1, 4),
Among(u"ira", -1, 9),
Among(u"adora", -1, 1),
Among(u"osa", -1, 1),
Among(u"ista", -1, 1),
Among(u"iva", -1, 8),
Among(u"eza", -1, 1),
Among(u"log\u00EDa", -1, 2),
Among(u"idade", -1, 7),
Among(u"ante", -1, 1),
Among(u"mente", -1, 6),
Among(u"amente", 12, 5),
Among(u"\u00E1vel", -1, 1),
Among(u"\u00EDvel", -1, 1),
Among(u"uci\u00F3n", -1, 3),
Among(u"ico", -1, 1),
Among(u"ismo", -1, 1),
Among(u"oso", -1, 1),
Among(u"amento", -1, 1),
Among(u"imento", -1, 1),
Among(u"ivo", -1, 8),
Among(u"a\u00E7a~o", -1, 1),
Among(u"ador", -1, 1),
Among(u"icas", -1, 1),
Among(u"\u00EAncias", -1, 4),
Among(u"iras", -1, 9),
Among(u"adoras", -1, 1),
Among(u"osas", -1, 1),
Among(u"istas", -1, 1),
Among(u"ivas", -1, 8),
Among(u"ezas", -1, 1),
Among(u"log\u00EDas", -1, 2),
Among(u"idades", -1, 7),
Among(u"uciones", -1, 3),
Among(u"adores", -1, 1),
Among(u"antes", -1, 1),
Among(u"a\u00E7o~es", -1, 1),
Among(u"icos", -1, 1),
Among(u"ismos", -1, 1),
Among(u"osos", -1, 1),
Among(u"amentos", -1, 1),
Among(u"imentos", -1, 1),
Among(u"ivos", -1, 8)
]
a_6 = [
Among(u"ada", -1, 1),
Among(u"ida", -1, 1),
Among(u"ia", -1, 1),
Among(u"aria", 2, 1),
Among(u"eria", 2, 1),
Among(u"iria", 2, 1),
Among(u"ara", -1, 1),
Among(u"era", -1, 1),
Among(u"ira", -1, 1),
Among(u"ava", -1, 1),
Among(u"asse", -1, 1),
Among(u"esse", -1, 1),
Among(u"isse", -1, 1),
Among(u"aste", -1, 1),
Among(u"este", -1, 1),
Among(u"iste", -1, 1),
Among(u"ei", -1, 1),
Among(u"arei", 16, 1),
Among(u"erei", 16, 1),
Among(u"irei", 16, 1),
Among(u"am", -1, 1),
Among(u"iam", 20, 1),
Among(u"ariam", 21, 1),
Among(u"eriam", 21, 1),
Among(u"iriam", 21, 1),
Among(u"aram", 20, 1),
Among(u"eram", 20, 1),
Among(u"iram", 20, 1),
Among(u"avam", 20, 1),
Among(u"em", -1, 1),
Among(u"arem", 29, 1),
Among(u"erem", 29, 1),
Among(u"irem", 29, 1),
Among(u"assem", 29, 1),
Among(u"essem", 29, 1),
Among(u"issem", 29, 1),
Among(u"ado", -1, 1),
Among(u"ido", -1, 1),
Among(u"ando", -1, 1),
Among(u"endo", -1, 1),
Among(u"indo", -1, 1),
Among(u"ara~o", -1, 1),
Among(u"era~o", -1, 1),
Among(u"ira~o", -1, 1),
Among(u"ar", -1, 1),
Among(u"er", -1, 1),
Among(u"ir", -1, 1),
Among(u"as", -1, 1),
Among(u"adas", 47, 1),
Among(u"idas", 47, 1),
Among(u"ias", 47, 1),
Among(u"arias", 50, 1),
Among(u"erias", 50, 1),
Among(u"irias", 50, 1),
Among(u"aras", 47, 1),
Among(u"eras", 47, 1),
Among(u"iras", 47, 1),
Among(u"avas", 47, 1),
Among(u"es", -1, 1),
Among(u"ardes", 58, 1),
Among(u"erdes", 58, 1),
Among(u"irdes", 58, 1),
Among(u"ares", 58, 1),
Among(u"eres", 58, 1),
Among(u"ires", 58, 1),
Among(u"asses", 58, 1),
Among(u"esses", 58, 1),
Among(u"isses", 58, 1),
Among(u"astes", 58, 1),
Among(u"estes", 58, 1),
Among(u"istes", 58, 1),
Among(u"is", -1, 1),
Among(u"ais", 71, 1),
Among(u"eis", 71, 1),
Among(u"areis", 73, 1),
Among(u"ereis", 73, 1),
Among(u"ireis", 73, 1),
Among(u"\u00E1reis", 73, 1),
Among(u"\u00E9reis", 73, 1),
Among(u"\u00EDreis", 73, 1),
Among(u"\u00E1sseis", 73, 1),
Among(u"\u00E9sseis", 73, 1),
Among(u"\u00EDsseis", 73, 1),
Among(u"\u00E1veis", 73, 1),
Among(u"\u00EDeis", 73, 1),
Among(u"ar\u00EDeis", 84, 1),
Among(u"er\u00EDeis", 84, 1),
Among(u"ir\u00EDeis", 84, 1),
Among(u"ados", -1, 1),
Among(u"idos", -1, 1),
Among(u"amos", -1, 1),
Among(u"\u00E1ramos", 90, 1),
Among(u"\u00E9ramos", 90, 1),
Among(u"\u00EDramos", 90, 1),
Among(u"\u00E1vamos", 90, 1),
Among(u"\u00EDamos", 90, 1),
Among(u"ar\u00EDamos", 95, 1),
Among(u"er\u00EDamos", 95, 1),
Among(u"ir\u00EDamos", 95, 1),
Among(u"emos", -1, 1),
Among(u"aremos", 99, 1),
Among(u"eremos", 99, 1),
Among(u"iremos", 99, 1),
Among(u"\u00E1ssemos", 99, 1),
Among(u"\u00EAssemos", 99, 1),
Among(u"\u00EDssemos", 99, 1),
Among(u"imos", -1, 1),
Among(u"armos", -1, 1),
Among(u"ermos", -1, 1),
Among(u"irmos", -1, 1),
Among(u"\u00E1mos", -1, 1),
Among(u"ar\u00E1s", -1, 1),
Among(u"er\u00E1s", -1, 1),
Among(u"ir\u00E1s", -1, 1),
Among(u"eu", -1, 1),
Among(u"iu", -1, 1),
Among(u"ou", -1, 1),
Among(u"ar\u00E1", -1, 1),
Among(u"er\u00E1", -1, 1),
Among(u"ir\u00E1", -1, 1)
]
a_7 = [
Among(u"a", -1, 1),
Among(u"i", -1, 1),
Among(u"o", -1, 1),
Among(u"os", -1, 1),
Among(u"\u00E1", -1, 1),
Among(u"\u00ED", -1, 1),
Among(u"\u00F3", -1, 1)
]
a_8 = [
Among(u"e", -1, 1),
Among(u"\u00E7", -1, 2),
Among(u"\u00E9", -1, 1),
Among(u"\u00EA", -1, 1)
]
g_v = [17, 65, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 19, 12, 2]
I_p2 = 0
I_p1 = 0
I_pV = 0
def copy_from(self, other):
self.I_p2 = other.I_p2
self.I_p1 = other.I_p1
self.I_pV = other.I_pV
super.copy_from(other)
def r_prelude(self):
# repeat, line 36
try:
while True:
try:
v_1 = self.cursor
try:
# (, line 36
# [, line 37
self.bra = self.cursor
# substring, line 37
among_var = self.find_among(PortugueseStemmer.a_0, 3)
if among_var == 0:
raise lab2()
# ], line 37
self.ket = self.cursor
if among_var == 0:
raise lab2()
elif among_var == 1:
# (, line 38
# <-, line 38
if not self.slice_from(u"a~"):
return False
elif among_var == 2:
# (, line 39
# <-, line 39
if not self.slice_from(u"o~"):
return False
elif among_var == 3:
# (, line 40
# next, line 40
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
raise lab1()
except lab2: pass
self.cursor = v_1
raise lab0()
except lab1: pass
except lab0: pass
return True
def r_mark_regions(self):
# (, line 44
self.I_pV = self.limit;
self.I_p1 = self.limit;
self.I_p2 = self.limit;
# do, line 50
v_1 = self.cursor
try:
# (, line 50
# or, line 52
try:
v_2 = self.cursor
try:
# (, line 51
if not self.in_grouping(PortugueseStemmer.g_v, 97, 250):
raise lab2()
# or, line 51
try:
v_3 = self.cursor
try:
# (, line 51
if not self.out_grouping(PortugueseStemmer.g_v, 97, 250):
raise lab4()
# gopast, line 51
try:
while True:
try:
if not self.in_grouping(PortugueseStemmer.g_v, 97, 250):
raise lab6()
raise lab5()
except lab6: pass
if self.cursor >= self.limit:
raise lab4()
self.cursor += 1
except lab5: pass
raise lab3()
except lab4: pass
self.cursor = v_3
# (, line 51
if not self.in_grouping(PortugueseStemmer.g_v, 97, 250):
raise lab2()
# gopast, line 51
try:
while True:
try:
if not self.out_grouping(PortugueseStemmer.g_v, 97, 250):
raise lab8()
raise lab7()
except lab8: pass
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
except lab7: pass
except lab3: pass
raise lab1()
except lab2: pass
self.cursor = v_2
# (, line 53
if not self.out_grouping(PortugueseStemmer.g_v, 97, 250):
raise lab0()
# or, line 53
try:
v_6 = self.cursor
try:
# (, line 53
if not self.out_grouping(PortugueseStemmer.g_v, 97, 250):
raise lab10()
# gopast, line 53
try:
while True:
try:
if not self.in_grouping(PortugueseStemmer.g_v, 97, 250):
raise lab12()
raise lab11()
except lab12: pass
if self.cursor >= self.limit:
raise lab10()
self.cursor += 1
except lab11: pass
raise lab9()
except lab10: pass
self.cursor = v_6
# (, line 53
if not self.in_grouping(PortugueseStemmer.g_v, 97, 250):
raise lab0()
# next, line 53
if self.cursor >= self.limit:
raise lab0()
self.cursor += 1
except lab9: pass
except lab1: pass
# setmark pV, line 54
self.I_pV = self.cursor
except lab0: pass
self.cursor = v_1
# do, line 56
v_8 = self.cursor
try:
# (, line 56
# gopast, line 57
try:
while True:
try:
if not self.in_grouping(PortugueseStemmer.g_v, 97, 250):
raise lab15()
raise lab14()
except lab15: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab14: pass
# gopast, line 57
try:
while True:
try:
if not self.out_grouping(PortugueseStemmer.g_v, 97, 250):
raise lab17()
raise lab16()
except lab17: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab16: pass
# setmark p1, line 57
self.I_p1 = self.cursor
# gopast, line 58
try:
while True:
try:
if not self.in_grouping(PortugueseStemmer.g_v, 97, 250):
raise lab19()
raise lab18()
except lab19: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab18: pass
# gopast, line 58
try:
while True:
try:
if not self.out_grouping(PortugueseStemmer.g_v, 97, 250):
raise lab21()
raise lab20()
except lab21: pass
if self.cursor >= self.limit:
raise lab13()
self.cursor += 1
except lab20: pass
# setmark p2, line 58
self.I_p2 = self.cursor
except lab13: pass
self.cursor = v_8
return True
def r_postlude(self):
# repeat, line 62
try:
while True:
try:
v_1 = self.cursor
try:
# (, line 62
# [, line 63
self.bra = self.cursor
# substring, line 63
among_var = self.find_among(PortugueseStemmer.a_1, 3)
if among_var == 0:
raise lab2()
# ], line 63
self.ket = self.cursor
if among_var == 0:
raise lab2()
elif among_var == 1:
# (, line 64
# <-, line 64
if not self.slice_from(u"\u00E3"):
return False
elif among_var == 2:
# (, line 65
# <-, line 65
if not self.slice_from(u"\u00F5"):
return False
elif among_var == 3:
# (, line 66
# next, line 66
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
raise lab1()
except lab2: pass
self.cursor = v_1
raise lab0()
except lab1: pass
except lab0: pass
return True
def r_RV(self):
if not self.I_pV <= self.cursor:
return False
return True
def r_R1(self):
if not self.I_p1 <= self.cursor:
return False
return True
def r_R2(self):
if not self.I_p2 <= self.cursor:
return False
return True
def r_standard_suffix(self):
# (, line 76
# [, line 77
self.ket = self.cursor
# substring, line 77
among_var = self.find_among_b(PortugueseStemmer.a_5, 45)
if among_var == 0:
return False
# ], line 77
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 92
# call R2, line 93
if not self.r_R2():
return False
# delete, line 93
if not self.slice_del():
return False
elif among_var == 2:
# (, line 97
# call R2, line 98
if not self.r_R2():
return False
# <-, line 98
if not self.slice_from(u"log"):
return False
elif among_var == 3:
# (, line 101
# call R2, line 102
if not self.r_R2():
return False
# <-, line 102
if not self.slice_from(u"u"):
return False
elif among_var == 4:
# (, line 105
# call R2, line 106
if not self.r_R2():
return False
# <-, line 106
if not self.slice_from(u"ente"):
return False
elif among_var == 5:
# (, line 109
# call R1, line 110
if not self.r_R1():
return False
# delete, line 110
if not self.slice_del():
return False
# try, line 111
v_1 = self.limit - self.cursor
try:
# (, line 111
# [, line 112
self.ket = self.cursor
# substring, line 112
among_var = self.find_among_b(PortugueseStemmer.a_2, 4)
if among_var == 0:
self.cursor = self.limit - v_1
raise lab0()
# ], line 112
self.bra = self.cursor
# call R2, line 112
if not self.r_R2():
self.cursor = self.limit - v_1
raise lab0()
# delete, line 112
if not self.slice_del():
return False
if among_var == 0:
self.cursor = self.limit - v_1
raise lab0()
elif among_var == 1:
# (, line 113
# [, line 113
self.ket = self.cursor
# literal, line 113
if not self.eq_s_b(2, u"at"):
self.cursor = self.limit - v_1
raise lab0()
# ], line 113
self.bra = self.cursor
# call R2, line 113
if not self.r_R2():
self.cursor = self.limit - v_1
raise lab0()
# delete, line 113
if not self.slice_del():
return False
except lab0: pass
elif among_var == 6:
# (, line 121
# call R2, line 122
if not self.r_R2():
return False
# delete, line 122
if not self.slice_del():
return False
# try, line 123
v_2 = self.limit - self.cursor
try:
# (, line 123
# [, line 124
self.ket = self.cursor
# substring, line 124
among_var = self.find_among_b(PortugueseStemmer.a_3, 3)
if among_var == 0:
self.cursor = self.limit - v_2
raise lab1()
# ], line 124
self.bra = self.cursor
if among_var == 0:
self.cursor = self.limit - v_2
raise lab1()
elif among_var == 1:
# (, line 127
# call R2, line 127
if not self.r_R2():
self.cursor = self.limit - v_2
raise lab1()
# delete, line 127
if not self.slice_del():
return False
except lab1: pass
elif among_var == 7:
# (, line 133
# call R2, line 134
if not self.r_R2():
return False
# delete, line 134
if not self.slice_del():
return False
# try, line 135
v_3 = self.limit - self.cursor
try:
# (, line 135
# [, line 136
self.ket = self.cursor
# substring, line 136
among_var = self.find_among_b(PortugueseStemmer.a_4, 3)
if among_var == 0:
self.cursor = self.limit - v_3
raise lab2()
# ], line 136
self.bra = self.cursor
if among_var == 0:
self.cursor = self.limit - v_3
raise lab2()
elif among_var == 1:
# (, line 139
# call R2, line 139
if not self.r_R2():
self.cursor = self.limit - v_3
raise lab2()
# delete, line 139
if not self.slice_del():
return False
except lab2: pass
elif among_var == 8:
# (, line 145
# call R2, line 146
if not self.r_R2():
return False
# delete, line 146
if not self.slice_del():
return False
# try, line 147
v_4 = self.limit - self.cursor
try:
# (, line 147
# [, line 148
self.ket = self.cursor
# literal, line 148
if not self.eq_s_b(2, u"at"):
self.cursor = self.limit - v_4
raise lab3()
# ], line 148
self.bra = self.cursor
# call R2, line 148
if not self.r_R2():
self.cursor = self.limit - v_4
raise lab3()
# delete, line 148
if not self.slice_del():
return False
except lab3: pass
elif among_var == 9:
# (, line 152
# call RV, line 153
if not self.r_RV():
return False
# literal, line 153
if not self.eq_s_b(1, u"e"):
return False
# <-, line 154
if not self.slice_from(u"ir"):
return False
return True
def r_verb_suffix(self):
# setlimit, line 159
v_1 = self.limit - self.cursor
# tomark, line 159
if self.cursor < self.I_pV:
return False
self.cursor = self.I_pV
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 159
# [, line 160
self.ket = self.cursor
# substring, line 160
among_var = self.find_among_b(PortugueseStemmer.a_6, 120)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 160
self.bra = self.cursor
if among_var == 0:
self.limit_backward = v_2
return False
elif among_var == 1:
# (, line 179
# delete, line 179
if not self.slice_del():
return False
self.limit_backward = v_2
return True
def r_residual_suffix(self):
# (, line 183
# [, line 184
self.ket = self.cursor
# substring, line 184
among_var = self.find_among_b(PortugueseStemmer.a_7, 7)
if among_var == 0:
return False
# ], line 184
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 187
# call RV, line 187
if not self.r_RV():
return False
# delete, line 187
if not self.slice_del():
return False
return True
def r_residual_form(self):
# (, line 191
# [, line 192
self.ket = self.cursor
# substring, line 192
among_var = self.find_among_b(PortugueseStemmer.a_8, 4)
if among_var == 0:
return False
# ], line 192
self.bra = self.cursor
if among_var == 0:
return False
elif among_var == 1:
# (, line 194
# call RV, line 194
if not self.r_RV():
return False
# delete, line 194
if not self.slice_del():
return False
# [, line 194
self.ket = self.cursor
# or, line 194
try:
v_1 = self.limit - self.cursor
try:
# (, line 194
# literal, line 194
if not self.eq_s_b(1, u"u"):
raise lab1()
# ], line 194
self.bra = self.cursor
# test, line 194
v_2 = self.limit - self.cursor
# literal, line 194
if not self.eq_s_b(1, u"g"):
raise lab1()
self.cursor = self.limit - v_2
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# (, line 195
# literal, line 195
if not self.eq_s_b(1, u"i"):
return False
# ], line 195
self.bra = self.cursor
# test, line 195
v_3 = self.limit - self.cursor
# literal, line 195
if not self.eq_s_b(1, u"c"):
return False
self.cursor = self.limit - v_3
except lab0: pass
# call RV, line 195
if not self.r_RV():
return False
# delete, line 195
if not self.slice_del():
return False
elif among_var == 2:
# (, line 196
# <-, line 196
if not self.slice_from(u"c"):
return False
return True
def _stem(self):
# (, line 201
# do, line 202
v_1 = self.cursor
try:
# call prelude, line 202
if not self.r_prelude():
raise lab0()
except lab0: pass
self.cursor = v_1
# do, line 203
v_2 = self.cursor
try:
# call mark_regions, line 203
if not self.r_mark_regions():
raise lab1()
except lab1: pass
self.cursor = v_2
# backwards, line 204
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 204
# do, line 205
v_3 = self.limit - self.cursor
try:
# (, line 205
# or, line 209
try:
v_4 = self.limit - self.cursor
try:
# (, line 206
# and, line 207
v_5 = self.limit - self.cursor
# (, line 206
# or, line 206
try:
v_6 = self.limit - self.cursor
try:
# call standard_suffix, line 206
if not self.r_standard_suffix():
raise lab6()
raise lab5()
except lab6: pass
self.cursor = self.limit - v_6
# call verb_suffix, line 206
if not self.r_verb_suffix():
raise lab4()
except lab5: pass
self.cursor = self.limit - v_5
# do, line 207
v_7 = self.limit - self.cursor
try:
# (, line 207
# [, line 207
self.ket = self.cursor
# literal, line 207
if not self.eq_s_b(1, u"i"):
raise lab7()
# ], line 207
self.bra = self.cursor
# test, line 207
v_8 = self.limit - self.cursor
# literal, line 207
if not self.eq_s_b(1, u"c"):
raise lab7()
self.cursor = self.limit - v_8
# call RV, line 207
if not self.r_RV():
raise lab7()
# delete, line 207
if not self.slice_del():
return False
except lab7: pass
self.cursor = self.limit - v_7
raise lab3()
except lab4: pass
self.cursor = self.limit - v_4
# call residual_suffix, line 209
if not self.r_residual_suffix():
raise lab2()
except lab3: pass
except lab2: pass
self.cursor = self.limit - v_3
# do, line 211
v_9 = self.limit - self.cursor
try:
# call residual_form, line 211
if not self.r_residual_form():
raise lab8()
except lab8: pass
self.cursor = self.limit - v_9
self.cursor = self.limit_backward
# do, line 213
v_10 = self.cursor
try:
# call postlude, line 213
if not self.r_postlude():
raise lab9()
except lab9: pass
self.cursor = v_10
return True
def equals(self, o):
return isinstance(o, PortugueseStemmer)
def hashCode(self):
return hash("PortugueseStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
class lab10(BaseException): pass
class lab11(BaseException): pass
class lab12(BaseException): pass
class lab13(BaseException): pass
class lab14(BaseException): pass
class lab15(BaseException): pass
class lab16(BaseException): pass
class lab17(BaseException): pass
class lab18(BaseException): pass
class lab19(BaseException): pass
class lab20(BaseException): pass
class lab21(BaseException): pass
| 32,115 | Python | .py | 921 | 19.65038 | 96 | 0.399024 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,979 | danish_stemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/danish_stemmer.py | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class DanishStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"hed", -1, 1),
Among(u"ethed", 0, 1),
Among(u"ered", -1, 1),
Among(u"e", -1, 1),
Among(u"erede", 3, 1),
Among(u"ende", 3, 1),
Among(u"erende", 5, 1),
Among(u"ene", 3, 1),
Among(u"erne", 3, 1),
Among(u"ere", 3, 1),
Among(u"en", -1, 1),
Among(u"heden", 10, 1),
Among(u"eren", 10, 1),
Among(u"er", -1, 1),
Among(u"heder", 13, 1),
Among(u"erer", 13, 1),
Among(u"s", -1, 2),
Among(u"heds", 16, 1),
Among(u"es", 16, 1),
Among(u"endes", 18, 1),
Among(u"erendes", 19, 1),
Among(u"enes", 18, 1),
Among(u"ernes", 18, 1),
Among(u"eres", 18, 1),
Among(u"ens", 16, 1),
Among(u"hedens", 24, 1),
Among(u"erens", 24, 1),
Among(u"ers", 16, 1),
Among(u"ets", 16, 1),
Among(u"erets", 28, 1),
Among(u"et", -1, 1),
Among(u"eret", 30, 1)
]
a_1 = [
Among(u"gd", -1, -1),
Among(u"dt", -1, -1),
Among(u"gt", -1, -1),
Among(u"kt", -1, -1)
]
a_2 = [
Among(u"ig", -1, 1),
Among(u"lig", 0, 1),
Among(u"elig", 1, 1),
Among(u"els", -1, 1),
Among(u"l\u00F8st", -1, 2)
]
g_v = [17, 65, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 0, 128]
g_s_ending = [239, 254, 42, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16]
I_x = 0
I_p1 = 0
S_ch = ""
def copy_from(self, other):
self.I_x = other.I_x
self.I_p1 = other.I_p1
self.S_ch = other.S_ch
super.copy_from(other)
def r_mark_regions(self):
# (, line 29
self.I_p1 = self.limit;
# test, line 33
v_1 = self.cursor
# (, line 33
# hop, line 33
c = self.cursor + 3
if 0 > c or c > self.limit:
return False
self.cursor = c
# setmark x, line 33
self.I_x = self.cursor
self.cursor = v_1
# goto, line 34
try:
while True:
v_2 = self.cursor
try:
if not self.in_grouping(DanishStemmer.g_v, 97, 248):
raise lab1()
self.cursor = v_2
raise lab0()
except lab1: pass
self.cursor = v_2
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab0: pass
# gopast, line 34
try:
while True:
try:
if not self.out_grouping(DanishStemmer.g_v, 97, 248):
raise lab3()
raise lab2()
except lab3: pass
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab2: pass
# setmark p1, line 34
self.I_p1 = self.cursor
# try, line 35
try:
# (, line 35
if not (self.I_p1 < self.I_x):
raise lab4()
self.I_p1 = self.I_x;
except lab4: pass
return True
def r_main_suffix(self):
# (, line 40
# setlimit, line 41
v_1 = self.limit - self.cursor
# tomark, line 41
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 41
# [, line 41
self.ket = self.cursor
# substring, line 41
among_var = self.find_among_b(DanishStemmer.a_0, 32)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 41
self.bra = self.cursor
self.limit_backward = v_2
if among_var == 0:
return False
elif among_var == 1:
# (, line 48
# delete, line 48
if not self.slice_del():
return False
elif among_var == 2:
# (, line 50
if not self.in_grouping_b(DanishStemmer.g_s_ending, 97, 229):
return False
# delete, line 50
if not self.slice_del():
return False
return True
def r_consonant_pair(self):
# (, line 54
# test, line 55
v_1 = self.limit - self.cursor
# (, line 55
# setlimit, line 56
v_2 = self.limit - self.cursor
# tomark, line 56
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_3 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_2
# (, line 56
# [, line 56
self.ket = self.cursor
# substring, line 56
if self.find_among_b(DanishStemmer.a_1, 4) == 0:
self.limit_backward = v_3
return False
# ], line 56
self.bra = self.cursor
self.limit_backward = v_3
self.cursor = self.limit - v_1
# next, line 62
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
# ], line 62
self.bra = self.cursor
# delete, line 62
if not self.slice_del():
return False
return True
def r_other_suffix(self):
# (, line 65
# do, line 66
v_1 = self.limit - self.cursor
try:
# (, line 66
# [, line 66
self.ket = self.cursor
# literal, line 66
if not self.eq_s_b(2, u"st"):
raise lab0()
# ], line 66
self.bra = self.cursor
# literal, line 66
if not self.eq_s_b(2, u"ig"):
raise lab0()
# delete, line 66
if not self.slice_del():
return False
except lab0: pass
self.cursor = self.limit - v_1
# setlimit, line 67
v_2 = self.limit - self.cursor
# tomark, line 67
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_3 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_2
# (, line 67
# [, line 67
self.ket = self.cursor
# substring, line 67
among_var = self.find_among_b(DanishStemmer.a_2, 5)
if among_var == 0:
self.limit_backward = v_3
return False
# ], line 67
self.bra = self.cursor
self.limit_backward = v_3
if among_var == 0:
return False
elif among_var == 1:
# (, line 70
# delete, line 70
if not self.slice_del():
return False
# do, line 70
v_4 = self.limit - self.cursor
try:
# call consonant_pair, line 70
if not self.r_consonant_pair():
raise lab1()
except lab1: pass
self.cursor = self.limit - v_4
elif among_var == 2:
# (, line 72
# <-, line 72
if not self.slice_from(u"l\u00F8s"):
return False
return True
def r_undouble(self):
# (, line 75
# setlimit, line 76
v_1 = self.limit - self.cursor
# tomark, line 76
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 76
# [, line 76
self.ket = self.cursor
if not self.out_grouping_b(DanishStemmer.g_v, 97, 248):
self.limit_backward = v_2
return False
# ], line 76
self.bra = self.cursor
# -> ch, line 76
self.S_ch = self.slice_to(self.S_ch)
if self.S_ch == '':
return False
self.limit_backward = v_2
# name ch, line 77
if not self.eq_v_b(self.S_ch):
return False
# delete, line 78
if not self.slice_del():
return False
return True
def _stem(self):
# (, line 82
# do, line 84
v_1 = self.cursor
try:
# call mark_regions, line 84
if not self.r_mark_regions():
raise lab0()
except lab0: pass
self.cursor = v_1
# backwards, line 85
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 85
# do, line 86
v_2 = self.limit - self.cursor
try:
# call main_suffix, line 86
if not self.r_main_suffix():
raise lab1()
except lab1: pass
self.cursor = self.limit - v_2
# do, line 87
v_3 = self.limit - self.cursor
try:
# call consonant_pair, line 87
if not self.r_consonant_pair():
raise lab2()
except lab2: pass
self.cursor = self.limit - v_3
# do, line 88
v_4 = self.limit - self.cursor
try:
# call other_suffix, line 88
if not self.r_other_suffix():
raise lab3()
except lab3: pass
self.cursor = self.limit - v_4
# do, line 89
v_5 = self.limit - self.cursor
try:
# call undouble, line 89
if not self.r_undouble():
raise lab4()
except lab4: pass
self.cursor = self.limit - v_5
self.cursor = self.limit_backward
return True
def equals(self, o):
return isinstance(o, DanishStemmer)
def hashCode(self):
return hash("DanishStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
| 10,457 | Python | .py | 339 | 20.522124 | 78 | 0.48429 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,980 | swedish_stemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/swedish_stemmer.py | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class SwedishStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"a", -1, 1),
Among(u"arna", 0, 1),
Among(u"erna", 0, 1),
Among(u"heterna", 2, 1),
Among(u"orna", 0, 1),
Among(u"ad", -1, 1),
Among(u"e", -1, 1),
Among(u"ade", 6, 1),
Among(u"ande", 6, 1),
Among(u"arne", 6, 1),
Among(u"are", 6, 1),
Among(u"aste", 6, 1),
Among(u"en", -1, 1),
Among(u"anden", 12, 1),
Among(u"aren", 12, 1),
Among(u"heten", 12, 1),
Among(u"ern", -1, 1),
Among(u"ar", -1, 1),
Among(u"er", -1, 1),
Among(u"heter", 18, 1),
Among(u"or", -1, 1),
Among(u"s", -1, 2),
Among(u"as", 21, 1),
Among(u"arnas", 22, 1),
Among(u"ernas", 22, 1),
Among(u"ornas", 22, 1),
Among(u"es", 21, 1),
Among(u"ades", 26, 1),
Among(u"andes", 26, 1),
Among(u"ens", 21, 1),
Among(u"arens", 29, 1),
Among(u"hetens", 29, 1),
Among(u"erns", 21, 1),
Among(u"at", -1, 1),
Among(u"andet", -1, 1),
Among(u"het", -1, 1),
Among(u"ast", -1, 1)
]
a_1 = [
Among(u"dd", -1, -1),
Among(u"gd", -1, -1),
Among(u"nn", -1, -1),
Among(u"dt", -1, -1),
Among(u"gt", -1, -1),
Among(u"kt", -1, -1),
Among(u"tt", -1, -1)
]
a_2 = [
Among(u"ig", -1, 1),
Among(u"lig", 0, 1),
Among(u"els", -1, 1),
Among(u"fullt", -1, 3),
Among(u"l\u00F6st", -1, 2)
]
g_v = [17, 65, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24, 0, 32]
g_s_ending = [119, 127, 149]
I_x = 0
I_p1 = 0
def copy_from(self, other):
self.I_x = other.I_x
self.I_p1 = other.I_p1
super.copy_from(other)
def r_mark_regions(self):
# (, line 26
self.I_p1 = self.limit;
# test, line 29
v_1 = self.cursor
# (, line 29
# hop, line 29
c = self.cursor + 3
if 0 > c or c > self.limit:
return False
self.cursor = c
# setmark x, line 29
self.I_x = self.cursor
self.cursor = v_1
# goto, line 30
try:
while True:
v_2 = self.cursor
try:
if not self.in_grouping(SwedishStemmer.g_v, 97, 246):
raise lab1()
self.cursor = v_2
raise lab0()
except lab1: pass
self.cursor = v_2
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab0: pass
# gopast, line 30
try:
while True:
try:
if not self.out_grouping(SwedishStemmer.g_v, 97, 246):
raise lab3()
raise lab2()
except lab3: pass
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab2: pass
# setmark p1, line 30
self.I_p1 = self.cursor
# try, line 31
try:
# (, line 31
if not (self.I_p1 < self.I_x):
raise lab4()
self.I_p1 = self.I_x;
except lab4: pass
return True
def r_main_suffix(self):
# (, line 36
# setlimit, line 37
v_1 = self.limit - self.cursor
# tomark, line 37
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 37
# [, line 37
self.ket = self.cursor
# substring, line 37
among_var = self.find_among_b(SwedishStemmer.a_0, 37)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 37
self.bra = self.cursor
self.limit_backward = v_2
if among_var == 0:
return False
elif among_var == 1:
# (, line 44
# delete, line 44
if not self.slice_del():
return False
elif among_var == 2:
# (, line 46
if not self.in_grouping_b(SwedishStemmer.g_s_ending, 98, 121):
return False
# delete, line 46
if not self.slice_del():
return False
return True
def r_consonant_pair(self):
# setlimit, line 50
v_1 = self.limit - self.cursor
# tomark, line 50
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 50
# and, line 52
v_3 = self.limit - self.cursor
# among, line 51
if self.find_among_b(SwedishStemmer.a_1, 7) == 0:
self.limit_backward = v_2
return False
self.cursor = self.limit - v_3
# (, line 52
# [, line 52
self.ket = self.cursor
# next, line 52
if self.cursor <= self.limit_backward:
self.limit_backward = v_2
return False
self.cursor -= 1
# ], line 52
self.bra = self.cursor
# delete, line 52
if not self.slice_del():
return False
self.limit_backward = v_2
return True
def r_other_suffix(self):
# setlimit, line 55
v_1 = self.limit - self.cursor
# tomark, line 55
if self.cursor < self.I_p1:
return False
self.cursor = self.I_p1
v_2 = self.limit_backward
self.limit_backward = self.cursor
self.cursor = self.limit - v_1
# (, line 55
# [, line 56
self.ket = self.cursor
# substring, line 56
among_var = self.find_among_b(SwedishStemmer.a_2, 5)
if among_var == 0:
self.limit_backward = v_2
return False
# ], line 56
self.bra = self.cursor
if among_var == 0:
self.limit_backward = v_2
return False
elif among_var == 1:
# (, line 57
# delete, line 57
if not self.slice_del():
return False
elif among_var == 2:
# (, line 58
# <-, line 58
if not self.slice_from(u"l\u00F6s"):
return False
elif among_var == 3:
# (, line 59
# <-, line 59
if not self.slice_from(u"full"):
return False
self.limit_backward = v_2
return True
def _stem(self):
# (, line 64
# do, line 66
v_1 = self.cursor
try:
# call mark_regions, line 66
if not self.r_mark_regions():
raise lab0()
except lab0: pass
self.cursor = v_1
# backwards, line 67
self.limit_backward = self.cursor
self.cursor = self.limit
# (, line 67
# do, line 68
v_2 = self.limit - self.cursor
try:
# call main_suffix, line 68
if not self.r_main_suffix():
raise lab1()
except lab1: pass
self.cursor = self.limit - v_2
# do, line 69
v_3 = self.limit - self.cursor
try:
# call consonant_pair, line 69
if not self.r_consonant_pair():
raise lab2()
except lab2: pass
self.cursor = self.limit - v_3
# do, line 70
v_4 = self.limit - self.cursor
try:
# call other_suffix, line 70
if not self.r_other_suffix():
raise lab3()
except lab3: pass
self.cursor = self.limit - v_4
self.cursor = self.limit_backward
return True
def equals(self, o):
return isinstance(o, SwedishStemmer)
def hashCode(self):
return hash("SwedishStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
| 8,673 | Python | .py | 282 | 20.599291 | 78 | 0.484519 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,981 | german_stemmer.py | DamnWidget_anaconda/anaconda_lib/snowballstemmer/german_stemmer.py | # self file was generated automatically by the Snowball to Python interpreter
from .basestemmer import BaseStemmer
from .among import Among
class GermanStemmer(BaseStemmer):
'''
self class was automatically generated by a Snowball to Python interpreter
It implements the stemming algorithm defined by a snowball script.
'''
serialVersionUID = 1
a_0 = [
Among(u"", -1, 6),
Among(u"U", 0, 2),
Among(u"Y", 0, 1),
Among(u"\u00E4", 0, 3),
Among(u"\u00F6", 0, 4),
Among(u"\u00FC", 0, 5)
]
a_1 = [
Among(u"e", -1, 2),
Among(u"em", -1, 1),
Among(u"en", -1, 2),
Among(u"ern", -1, 1),
Among(u"er", -1, 1),
Among(u"s", -1, 3),
Among(u"es", 5, 2)
]
a_2 = [
Among(u"en", -1, 1),
Among(u"er", -1, 1),
Among(u"st", -1, 2),
Among(u"est", 2, 1)
]
a_3 = [
Among(u"ig", -1, 1),
Among(u"lich", -1, 1)
]
a_4 = [
Among(u"end", -1, 1),
Among(u"ig", -1, 2),
Among(u"ung", -1, 1),
Among(u"lich", -1, 3),
Among(u"isch", -1, 2),
Among(u"ik", -1, 2),
Among(u"heit", -1, 3),
Among(u"keit", -1, 4)
]
g_v = [17, 65, 16, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 32, 8]
g_s_ending = [117, 30, 5]
g_st_ending = [117, 30, 4]
I_x = 0
I_p2 = 0
I_p1 = 0
def copy_from(self, other):
self.I_x = other.I_x
self.I_p2 = other.I_p2
self.I_p1 = other.I_p1
super.copy_from(other)
def r_prelude(self):
# (, line 33
# test, line 35
v_1 = self.cursor
# repeat, line 35
try:
while True:
try:
v_2 = self.cursor
try:
# (, line 35
# or, line 38
try:
v_3 = self.cursor
try:
# (, line 36
# [, line 37
self.bra = self.cursor
# literal, line 37
if not self.eq_s(1, u"\u00DF"):
raise lab4()
# ], line 37
self.ket = self.cursor
# <-, line 37
if not self.slice_from(u"ss"):
return False
raise lab3()
except lab4: pass
self.cursor = v_3
# next, line 38
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
except lab3: pass
raise lab1()
except lab2: pass
self.cursor = v_2
raise lab0()
except lab1: pass
except lab0: pass
self.cursor = v_1
# repeat, line 41
try:
while True:
try:
v_4 = self.cursor
try:
# goto, line 41
try:
while True:
v_5 = self.cursor
try:
# (, line 41
if not self.in_grouping(GermanStemmer.g_v, 97, 252):
raise lab9()
# [, line 42
self.bra = self.cursor
# or, line 42
try:
v_6 = self.cursor
try:
# (, line 42
# literal, line 42
if not self.eq_s(1, u"u"):
raise lab11()
# ], line 42
self.ket = self.cursor
if not self.in_grouping(GermanStemmer.g_v, 97, 252):
raise lab11()
# <-, line 42
if not self.slice_from(u"U"):
return False
raise lab10()
except lab11: pass
self.cursor = v_6
# (, line 43
# literal, line 43
if not self.eq_s(1, u"y"):
raise lab9()
# ], line 43
self.ket = self.cursor
if not self.in_grouping(GermanStemmer.g_v, 97, 252):
raise lab9()
# <-, line 43
if not self.slice_from(u"Y"):
return False
except lab10: pass
self.cursor = v_5
raise lab8()
except lab9: pass
self.cursor = v_5
if self.cursor >= self.limit:
raise lab7()
self.cursor += 1
except lab8: pass
raise lab6()
except lab7: pass
self.cursor = v_4
raise lab5()
except lab6: pass
except lab5: pass
return True
def r_mark_regions(self):
# (, line 47
self.I_p1 = self.limit;
self.I_p2 = self.limit;
# test, line 52
v_1 = self.cursor
# (, line 52
# hop, line 52
c = self.cursor + 3
if 0 > c or c > self.limit:
return False
self.cursor = c
# setmark x, line 52
self.I_x = self.cursor
self.cursor = v_1
# gopast, line 54
try:
while True:
try:
if not self.in_grouping(GermanStemmer.g_v, 97, 252):
raise lab1()
raise lab0()
except lab1: pass
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab0: pass
# gopast, line 54
try:
while True:
try:
if not self.out_grouping(GermanStemmer.g_v, 97, 252):
raise lab3()
raise lab2()
except lab3: pass
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab2: pass
# setmark p1, line 54
self.I_p1 = self.cursor
# try, line 55
try:
# (, line 55
if not (self.I_p1 < self.I_x):
raise lab4()
self.I_p1 = self.I_x;
except lab4: pass
# gopast, line 56
try:
while True:
try:
if not self.in_grouping(GermanStemmer.g_v, 97, 252):
raise lab6()
raise lab5()
except lab6: pass
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab5: pass
# gopast, line 56
try:
while True:
try:
if not self.out_grouping(GermanStemmer.g_v, 97, 252):
raise lab8()
raise lab7()
except lab8: pass
if self.cursor >= self.limit:
return False
self.cursor += 1
except lab7: pass
# setmark p2, line 56
self.I_p2 = self.cursor
return True
def r_postlude(self):
# repeat, line 60
try:
while True:
try:
v_1 = self.cursor
try:
# (, line 60
# [, line 62
self.bra = self.cursor
# substring, line 62
among_var = self.find_among(GermanStemmer.a_0, 6)
if among_var == 0:
raise lab2()
# ], line 62
self.ket = self.cursor
if among_var == 0:
raise lab2()
elif among_var == 1:
# (, line 63
# <-, line 63
if not self.slice_from(u"y"):
return False
elif among_var == 2:
# (, line 64
# <-, line 64
if not self.slice_from(u"u"):
return False
elif among_var == 3:
# (, line 65
# <-, line 65
if not self.slice_from(u"a"):
return False
elif among_var == 4:
# (, line 66
# <-, line 66
if not self.slice_from(u"o"):
return False
elif among_var == 5:
# (, line 67
# <-, line 67
if not self.slice_from(u"u"):
return False
elif among_var == 6:
# (, line 68
# next, line 68
if self.cursor >= self.limit:
raise lab2()
self.cursor += 1
raise lab1()
except lab2: pass
self.cursor = v_1
raise lab0()
except lab1: pass
except lab0: pass
return True
def r_R1(self):
if not self.I_p1 <= self.cursor:
return False
return True
def r_R2(self):
if not self.I_p2 <= self.cursor:
return False
return True
def r_standard_suffix(self):
# (, line 78
# do, line 79
v_1 = self.limit - self.cursor
try:
# (, line 79
# [, line 80
self.ket = self.cursor
# substring, line 80
among_var = self.find_among_b(GermanStemmer.a_1, 7)
if among_var == 0:
raise lab0()
# ], line 80
self.bra = self.cursor
# call R1, line 80
if not self.r_R1():
raise lab0()
if among_var == 0:
raise lab0()
elif among_var == 1:
# (, line 82
# delete, line 82
if not self.slice_del():
return False
elif among_var == 2:
# (, line 85
# delete, line 85
if not self.slice_del():
return False
# try, line 86
v_2 = self.limit - self.cursor
try:
# (, line 86
# [, line 86
self.ket = self.cursor
# literal, line 86
if not self.eq_s_b(1, u"s"):
self.cursor = self.limit - v_2
raise lab1()
# ], line 86
self.bra = self.cursor
# literal, line 86
if not self.eq_s_b(3, u"nis"):
self.cursor = self.limit - v_2
raise lab1()
# delete, line 86
if not self.slice_del():
return False
except lab1: pass
elif among_var == 3:
# (, line 89
if not self.in_grouping_b(GermanStemmer.g_s_ending, 98, 116):
raise lab0()
# delete, line 89
if not self.slice_del():
return False
except lab0: pass
self.cursor = self.limit - v_1
# do, line 93
v_3 = self.limit - self.cursor
try:
# (, line 93
# [, line 94
self.ket = self.cursor
# substring, line 94
among_var = self.find_among_b(GermanStemmer.a_2, 4)
if among_var == 0:
raise lab2()
# ], line 94
self.bra = self.cursor
# call R1, line 94
if not self.r_R1():
raise lab2()
if among_var == 0:
raise lab2()
elif among_var == 1:
# (, line 96
# delete, line 96
if not self.slice_del():
return False
elif among_var == 2:
# (, line 99
if not self.in_grouping_b(GermanStemmer.g_st_ending, 98, 116):
raise lab2()
# hop, line 99
c = self.cursor - 3
if self.limit_backward > c or c > self.limit:
raise lab2()
self.cursor = c
# delete, line 99
if not self.slice_del():
return False
except lab2: pass
self.cursor = self.limit - v_3
# do, line 103
v_4 = self.limit - self.cursor
try:
# (, line 103
# [, line 104
self.ket = self.cursor
# substring, line 104
among_var = self.find_among_b(GermanStemmer.a_4, 8)
if among_var == 0:
raise lab3()
# ], line 104
self.bra = self.cursor
# call R2, line 104
if not self.r_R2():
raise lab3()
if among_var == 0:
raise lab3()
elif among_var == 1:
# (, line 106
# delete, line 106
if not self.slice_del():
return False
# try, line 107
v_5 = self.limit - self.cursor
try:
# (, line 107
# [, line 107
self.ket = self.cursor
# literal, line 107
if not self.eq_s_b(2, u"ig"):
self.cursor = self.limit - v_5
raise lab4()
# ], line 107
self.bra = self.cursor
# not, line 107
v_6 = self.limit - self.cursor
try:
# literal, line 107
if not self.eq_s_b(1, u"e"):
raise lab5()
self.cursor = self.limit - v_5
raise lab4()
except lab5: pass
self.cursor = self.limit - v_6
# call R2, line 107
if not self.r_R2():
self.cursor = self.limit - v_5
raise lab4()
# delete, line 107
if not self.slice_del():
return False
except lab4: pass
elif among_var == 2:
# (, line 110
# not, line 110
v_7 = self.limit - self.cursor
try:
# literal, line 110
if not self.eq_s_b(1, u"e"):
raise lab6()
raise lab3()
except lab6: pass
self.cursor = self.limit - v_7
# delete, line 110
if not self.slice_del():
return False
elif among_var == 3:
# (, line 113
# delete, line 113
if not self.slice_del():
return False
# try, line 114
v_8 = self.limit - self.cursor
try:
# (, line 114
# [, line 115
self.ket = self.cursor
# or, line 115
try:
v_9 = self.limit - self.cursor
try:
# literal, line 115
if not self.eq_s_b(2, u"er"):
raise lab9()
raise lab8()
except lab9: pass
self.cursor = self.limit - v_9
# literal, line 115
if not self.eq_s_b(2, u"en"):
self.cursor = self.limit - v_8
raise lab7()
except lab8: pass
# ], line 115
self.bra = self.cursor
# call R1, line 115
if not self.r_R1():
self.cursor = self.limit - v_8
raise lab7()
# delete, line 115
if not self.slice_del():
return False
except lab7: pass
elif among_var == 4:
# (, line 119
# delete, line 119
if not self.slice_del():
return False
# try, line 120
v_10 = self.limit - self.cursor
try:
# (, line 120
# [, line 121
self.ket = self.cursor
# substring, line 121
among_var = self.find_among_b(GermanStemmer.a_3, 2)
if among_var == 0:
self.cursor = self.limit - v_10
raise lab10()
# ], line 121
self.bra = self.cursor
# call R2, line 121
if not self.r_R2():
self.cursor = self.limit - v_10
raise lab10()
if among_var == 0:
self.cursor = self.limit - v_10
raise lab10()
elif among_var == 1:
# (, line 123
# delete, line 123
if not self.slice_del():
return False
except lab10: pass
except lab3: pass
self.cursor = self.limit - v_4
return True
def _stem(self):
# (, line 133
# do, line 134
v_1 = self.cursor
try:
# call prelude, line 134
if not self.r_prelude():
raise lab0()
except lab0: pass
self.cursor = v_1
# do, line 135
v_2 = self.cursor
try:
# call mark_regions, line 135
if not self.r_mark_regions():
raise lab1()
except lab1: pass
self.cursor = v_2
# backwards, line 136
self.limit_backward = self.cursor
self.cursor = self.limit
# do, line 137
v_3 = self.limit - self.cursor
try:
# call standard_suffix, line 137
if not self.r_standard_suffix():
raise lab2()
except lab2: pass
self.cursor = self.limit - v_3
self.cursor = self.limit_backward
# do, line 138
v_4 = self.cursor
try:
# call postlude, line 138
if not self.r_postlude():
raise lab3()
except lab3: pass
self.cursor = v_4
return True
def equals(self, o):
return isinstance(o, GermanStemmer)
def hashCode(self):
return hash("GermanStemmer")
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
class lab10(BaseException): pass
class lab11(BaseException): pass
| 21,402 | Python | .py | 583 | 18.331046 | 96 | 0.362818 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,982 | local_process.py | DamnWidget_anaconda/anaconda_lib/workers/local_process.py |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
from ..helpers import create_subprocess
from ..helpers import debug_enabled, active_view
class LocalProcess(object):
"""Starts a new local instance of the JsonServer
"""
def __init__(self, interpreter):
self.interpreter = interpreter
self._process = None
self.error = ''
self.tip = ''
@property
def healthy(self):
"""Checks if the jsonserver process is healthy
"""
if debug_enabled(active_view()):
# if debug is active, the process is hadnled manually
return True
if self._process.poll() is not None:
self.error = 'the jsonserver process is terminated'
self.tip = 'check your operating system logs'
return False
return True
def start(self):
"""
Create the subprocess that start the anaconda JsonServer process
using the configured Python Interpreter
"""
if debug_enabled(active_view()):
# if we are in debug mode the JsonServer is handled manually
return True
args, kwargs = self.interpreter.arguments
self._process = create_subprocess(args, **kwargs)
if self._process is None:
# we can't spawn a new process for jsonserver, Wrong config?
self._set_wrong_config_error()
return False
return True
def stop(self):
"""Stop the current process
"""
if self._process is not None and self._process.poll() is None:
self._process.kill()
self._process = None
def _set_wrong_config_error(self):
"""Set the local error and tip for bad python interpreter configuration
"""
example = '/usr/bin/python'
if os.name == 'nt':
example = r'C:\\Python27\\python.exe'
self.error = (
'Anaconda can not spawn a new process with your current '
'configured python interpreter ({})'.format(
self.interpreter.raw_interpreter
)
)
self.tip = (
'Make sure your interpreter is a valid binary and is in '
'your PATH or use an absolute path to it, '
'for example: {}'.format(example)
)
| 2,422 | Python | .py | 63 | 29.079365 | 79 | 0.603419 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,983 | worker.py | DamnWidget_anaconda/anaconda_lib/workers/worker.py |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import errno
import socket
import sublime
from ..logger import Log
from ..helpers import get_settings
from ..jsonclient import AsynClient
from ..constants import WorkerStatus
from ..decorators import auto_project_switch_ng
from ..helpers import debug_enabled, active_view, is_remote_session
from .process import WorkerProcess
from .interpreter import Interpreter
class Worker(object):
"""Base class for workers
"""
def __init__(self, interpreter):
self.status = WorkerStatus.incomplete
self.interpreter = interpreter
self.process = WorkerProcess(interpreter).take()
self.client = None
@property
def unix_socket(self):
"""Determine if we use an Unix Socket
"""
for_local = self.interpreter.for_local
return for_local and sublime.platform() == 'linux'
def start(self):
"""Start the worker
"""
if not debug_enabled(active_view()):
if self.process is None:
Log.fatal('Worker process is None!!')
return
if not self.process.start():
msg = (
'{} process can not start a new anaconda JsonServer '
'in the operating system because:\n\n{}\n\n{}'.format(
self.process, self.process.error, self.process.tip
)
)
Log.error(msg)
if self.status != WorkerStatus.faulty:
if not get_settings(
active_view(), 'swallow_startup_errors', False):
sublime.error_message(msg)
self.status = WorkerStatus.faulty
return
if not self.check():
msg = (
'Anaconda worker could not start because:\n\n{}\n\n{}'.format(
self.error, self.tip
)
)
Log.error(msg)
if self.status != WorkerStatus.faulty:
if not get_settings(
active_view(), 'swallow_startup_errors', False):
sublime.error_message(msg)
self.status = WorkerStatus.faulty
return
host, port = self.interpreter.host, self.interpreter.port
if self.unix_socket:
port = 0
self.client = AsynClient(int(port), host=host)
self.status = WorkerStatus.healthy
if hasattr(self, 'reconnecting') and self.reconnecting:
self.reconnecting = False
def stop(self):
"""Stop the worker
"""
pass
def check(self):
"""This method must be re-implemented in base classes
"""
raise RuntimeError('this method must be re-implemented')
def renew_interpreter(self, raw_interpreter):
"""Renew the interpreter object (as it has changed in the configuration)
"""
self.interpreter = Interpreter(raw_interpreter)
self.process.interpreter = self.interpreter
@auto_project_switch_ng
def _execute(self, callback, **data):
"""Execute the given method in the remote server
"""
self.client.send_command(callback, **data)
def _get_service_socket(self, timeout=0.05):
"""Helper function that returns a socket to the JsonServer process
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
s.connect((self.interpreter.host, int(self.interpreter.port)))
return s
def _get_service_unix_socket(self, timeout=0.05):
"""Helper function that returns a unix socket to JsonServer process
"""
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.settimeout(timeout)
s.connect(self.interpreter.host)
return s
def _append_context_data(self, data):
"""Append contextual data depending on the worker type
"""
view = active_view()
if is_remote_session(view):
directory_map = self.interpreter.pathmap
if directory_map is None:
return
for local_dir, remote_dir in directory_map.items():
# the directory os mapped on the remote machine
data['filename'] = view.file_name().replace(
local_dir, remote_dir
)
break
def _status(self, timeout=2):
"""Check the socket status, return True if it is operable
"""
service_func = {
True: self._get_service_unix_socket,
False: self._get_service_socket
}
try:
s = service_func[self.unix_socket](timeout)
s.close()
self.error = False
except socket.timeout:
self.error = 'connection to {}:{} timed out after {}s'.format(
self.interpreter.host, self.interpreter.port, timeout
)
return False
except socket.error as error:
if error.errno == errno.ECONNREFUSED:
if self.unix_socket:
self.error = 'can not connect to {}'.format(
self.interpreter.host
)
else:
self.error = 'can not connect to {} in port {}'.format(
self.interpreter.host, self.interpreter.port
)
else:
self.error = 'unexpected exception: {}'.format(error)
return False
return True
| 5,709 | Python | .py | 144 | 28.159722 | 80 | 0.57241 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,984 | remote_worker.py | DamnWidget_anaconda/anaconda_lib/workers/remote_worker.py |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from ..logger import Log
from .worker import Worker
from ..helpers import project_name
from ..constants import WorkerStatus
class RemoteWorker(Worker):
"""This class implements a remote worker
"""
def __init__(self, interpreter):
self.reconnecting = False
super(RemoteWorker, self).__init__(interpreter)
def stop(self):
"""Stop it now please
"""
self.client.close()
self.status = WorkerStatus.incomplete
def check(self):
"""Perform common checks
"""
if self.interpreter.host is None or self.interpreter.port is None:
self.error = 'Host and port must be configured'
self.tip = 'Fix your `python_interpreter` configuration'
return False
return self._status()
def on_python_interpreter_switch(self, raw_python_interpreter):
"""This method is called when there is a python interpreter change
"""
def _fire_worker():
# just fire this workewr, is not useful anymore
self.stop()
self.status = WorkerStatus.quiting
Log.info('Firing worker {}...'.format(self))
if self.interpreter.project_name is not None:
if project_name() != self.interpreter.project_name:
self.renew_interpreter(raw_python_interpreter)
# check if our interpeeter is not remote anymore
if not self.interpreter.for_remote:
_fire_worker()
self.reconnecting = True
self.stop()
if self.interpreter.raw_interpreter != raw_python_interpreter:
# check if our interpreter is not remote anymore
self.renew_interpreter(raw_python_interpreter)
if not self.interpreter.for_remote:
return _fire_worker()
self.reconnecting = True
self.stop()
def _status(self, timeout=2):
"""Check the socker status and returnn True if is operable
"""
self.tip = (
'check that your Internet is working, the remote host is '
'available from your network and the minserver.py is '
'running in the remote host on port {}'.format(
self.interpreter.port
)
)
return super(RemoteWorker, self)._status(timeout)
| 2,508 | Python | .py | 59 | 32.322034 | 74 | 0.617441 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,985 | vagrant_worker.py | DamnWidget_anaconda/anaconda_lib/workers/vagrant_worker.py |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import time
import sublime
from .worker import Worker
from ..helpers import project_name
from ..constants import WorkerStatus
from ..progress_bar import ProgressBar
from ..vagrant import VagrantMachineGlobalInfo, VagrantStartMachine
class VagrantWorker(Worker):
"""
This class implements a local worker that uses a instance of anaconda
minserver in a local vagrant guest VM
"""
def __init__(self, interpreter):
super(VagrantWorker, self).__init__(interpreter)
def start(self):
"""Start the vagrant worker
"""
if not self.check_config():
return False
return super(VagrantWorker, self).start()
def check_config(self):
"""Check the configuration looks fine
"""
if self.interpreter.network is None:
self.interpreter.network = 'forwarded'
network = self.interpreter.network
if network == 'public' and self.interpreter.dev is None:
self.error = (
'network is configured as public but no device is specified'
)
self.tip = (
'Specify a network device using `dev=<net_iface> or '
'use a different network topology'
)
return False
if network == 'private' and self.interpreter.address is None:
self.error = (
'vagrant network configured as private but '
'no address has been supplied'
)
self.tip = (
'Add the address parameter to your vagrant URI or change the '
'network parameter to forwarded'
)
return False
if not self._check_status():
self.error = 'vagrant machine {} is not running'.format(
self.interpreter.machine)
self.tip = 'Start the vagrant machine'
start_now = sublime.ok_cancel_dialog(
'{} virtual machine is not running, do you want to start it '
'now (it may take a while)?'.format(
self.interpreter.machine), 'Start Now'
)
if start_now:
sublime.active_window().run_command(
'show_panel', {'panel': 'console', 'toggle': False})
try:
messages = {
'start': 'Starting {} VM, please wait...'.format(
self.interpreter.machine
),
'end': 'Done!',
'fail': 'Machine {} could not be started'.format(
self.interpreter.machine
), 'timeout': ''
}
pbar = ProgressBar(messages)
VagrantStartMachine(
self.interpreter.machine, self.interpreter.vagrant_root
)
except RuntimeError as error:
pbar.terminate(status=pbar.Status.FAILURE)
sublime.error_message(str(error))
return False
else:
pbar.terminate()
sublime.message_dialog('Machine {} started.'.format(
self.interpreter.machine
))
return self.check()
return False
return True
def check(self):
"""Perform required checks to conclude if it's safe to operate
"""
if self.interpreter.manual is None:
if not self.process.healthy:
self.error = self.process.error
self.tip = self.process.tip
return False
start = time.time()
while not self._status():
if time.time() - start >= 2: # 2s
self.error = "can't connect to the minserver on {}:{}".format(
self.interpreter.host, self.interpreter.port
)
self.tip = 'check your vagrant machine is running'
return False
time.sleep(0.1)
return True
def stop(self):
"""Stop it now please
"""
self.process.stop()
self.client.close()
self.status = WorkerStatus.incomplete
def on_python_interpreter_switch(self, raw_python_interpreter):
"""This method is called when there is a python interpreter change
"""
switch = False
if self.interpreter.project_name is not None:
if project_name() != self.interpreter.project_name:
switch = True
if self.interpreter.raw_interpreter != raw_python_interpreter:
switch = True
if switch:
# check if our interpreter is not local anymore
self.renew_interpreter(raw_python_interpreter)
if not self.interpreter.for_vagrant:
# just fire this worker, it's not useful anymore
self.stop()
self.status = WorkerStatus.quiting
return
self.reconnecting = True
self.stop()
def _check_status(self):
"""Check vagrant statsu and translate machine ID
"""
try:
vagrant_info = VagrantMachineGlobalInfo(self.interpreter.machine)
except RuntimeError as error:
self.errr = error
self.tip = 'Install vagrant or add it to your path'
return False
if not vagrant_info.machine_id:
self.error = 'Vagrant machine {} does not exists'.format(
vagrant_info.machine
)
self.tip = 'Create and start your Vagrant machine'
return False
self.interpreter.machine_id = vagrant_info.machine_id
self.interpreter.vagrant_root = vagrant_info.directory
return vagrant_info.status == 'running'
def _status(self, timeout=0.5):
"""Check the socket status, return True e if is operable
"""
self.tip = (
'check that your vagrant machine is running and the minserver'
'is being executed in the guest {} port {}'.format(
self.interpreter.machine, self.interpreter.port
)
)
return super(VagrantWorker, self)._status(timeout)
| 6,485 | Python | .py | 156 | 28.544872 | 79 | 0.554743 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,986 | interpreter.py | DamnWidget_anaconda/anaconda_lib/workers/interpreter.py |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
import socket
from urllib.parse import urlparse, parse_qs
import sublime
from ..logger import Log
from ..unix_socket import UnixSocketPath
from ..helpers import project_name, debug_enabled
from ..helpers import get_settings, active_view, get_interpreter
from ..vagrant import VagrantIPAddressGlobal, VagrantMachineGlobalInfo
class Interpreter(object):
"""Parses a configured Python Interpreter
"""
def __init__(self, interpreter_string):
self.__data = {}
self.__raw_interpreter = interpreter_string
self.__parse_raw_interpreter()
self.__project_name = ''
def __getattr__(self, attr_name):
"""Return data as it was part of the object itself
"""
return self.__data.get(attr_name, None)
@property
def raw_interpreter(self):
return self.__raw_interpreter
@property
def for_local(self):
"""Returns True if this interpreter is configured for local
"""
return self.scheme == 'file'
@property
def for_remote(self):
"""Return True if this interpreter is configured for remote
"""
return self.scheme == 'tcp'
@property
def for_vagrant(self):
"""Return True if this interpreter is configured for vagrant
"""
return self.scheme == 'vagrant'
@property
def project_name(self):
"""Set project name if necessary and return it back
"""
if not self.__project_name:
self.__project_name = project_name()
return self.__project_name
def renew_interpreter(self):
"""Renew the whole intrepreter
"""
if not self.for_local:
return
self.__prepare_local_interpreter()
def __prepare_local_interpreter(self):
"""Prepare data for the local interpreter if scheme is lcoal
"""
view = active_view()
self.__extract_port(view)
self.__extract_paths(view)
self.__extract_python_interpreter(view)
self.__extract_script()
args = [self.python, '-B', self.script_file, '-p', self.project_name]
if self.port is not None:
args.append(str(self.port))
if len(self.paths) > 0:
paths = [p for p in self.paths if os.path.exists(p)]
args.extend(['-e', ','.join(paths)])
args.extend([str(os.getpid())])
kwargs = {}
folders = sublime.active_window().folders()
if len(folders) > 0 and os.path.exists(folders[0]):
kwargs['cwd'] = folders[0]
self.__data['arguments'] = (args, kwargs)
def __extract_port(self, view):
"""Extract the port to connect to
"""
if sublime.platform() != 'linux':
self.__data['host'] = 'localhost'
else:
self.__data['host'] = self.__get_unix_domain_socket()
return
if debug_enabled(view):
port = get_settings(view, 'jsonserver_debug_port', 9999)
self.__data['port'] = port
return
if sublime.platform() != 'linux':
s = socket.socket()
s.bind(('', 0))
self.__data['port'] = s.getsockname()[1]
s.close()
def __extract_paths(self, view):
"""Extract a list of paths to be added to jedi
"""
extra = get_settings(view, 'extra_paths', [])
paths = [os.path.expanduser(os.path.expandvars(p)) for p in extra]
try:
paths.extend(sublime.active_window().folders())
except AttributeError:
Log.warning(
'Your `extra_paths` configuration is a string but we are '
'expecting a list of strings.'
)
paths = paths.split(',')
paths.extend(sublime.active_window().folder())
self.__data['paths'] = paths
def __extract_python_interpreter(self, view):
"""Extract the configured python interpreter
"""
try:
urldata = urlparse(
os.path.expanduser(
os.path.expandvars(get_interpreter(view))
)
)
if len(urldata.scheme) == 1:
# Assume this comes from a Windows path if scheme is a single character
python = os.path.join('{}:'.format(urldata.scheme), urldata.path)
else:
python = urldata.path
if '$VIRTUAL_ENV' in python:
Log.warning(
'WARNING: your anaconda configured python interpreter '
'is {} but there is no $VIRTUAL_ENV key in your '
'environment, falling back to `python`'.format(python)
)
except Exception:
python = 'python'
finally:
self.__data['python'] = python
def __extract_script(self):
"""Extrct the jsonserver.py script location
"""
self.__data['script_file'] = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'anaconda_server', 'jsonserver.py'
)
def __get_unix_domain_socket(self):
"""Compound the Unix domain socket path
"""
if sublime.platform() != 'linux':
return 'localhost'
return UnixSocketPath(self.project_name).socket
def __parse_raw_interpreter(self):
"""Parses the raw interpreter string for later simple use
"""
urldata = urlparse(self.__raw_interpreter)
self.__data['scheme'] = urldata.scheme if urldata.scheme else 'file'
if len(self.__data['scheme']) == 1:
self.__data['scheme'] = 'file'
if urldata.query:
options = parse_qs(urldata.query)
for key, value in options.items():
self.__data[key] = (
value if key in ['extra', 'pathmap'] else value[0]
)
if self.for_local:
# we are set up for local return now and do our thing
return self.__prepare_local_interpreter()
if urldata.query and 'manual=' in urldata.query:
self.__data['scheme'] = 'tcp'
netloc = urldata.netloc
if '@' in urldata.netloc:
left, netloc = netloc.split('@')
self.__data['username'], self.__data['password'] = left.split(':')
if self.for_remote:
self.__data['host'], self.__data['port'] = netloc.split(':')
if self.for_vagrant:
self.__data['machine'], self.__data['port'] = netloc.split(':')
if self.for_vagrant:
self.__data['network'] = self.__data.get('network', 'forwarded')
self.__data['interpreter'] = (
self.__data.get('interpreter', 'python')
)
_vagrant_hosts = {
'forwarded': 'localhost',
'private': self.address,
'public': VagrantIPAddressGlobal(
VagrantMachineGlobalInfo(self.machine).machine_id, self.dev
).ip_address
}
self.__data['host'] = _vagrant_hosts[self.network]
pathmap = {}
for map_data in self.__data.get('pathmap', []):
split_data = map_data.split(',')
if len(split_data) != 2:
Log.warning('pathmap corruption? -> {}'.format(map_data))
continue
local_path = os.path.expanduser(os.path.expandvars(split_data[0]))
remote_path = os.path.expanduser(os.path.expandvars(split_data[1]))
pathmap[local_path] = remote_path
self.__data['pathmap'] = pathmap
def __repr__(self):
"""String representation
"""
try:
return ' '.join(self.arguments[0])
except TypeError:
rep = ''
for k, v in self.__data.items():
rep + k + ': ' + v + '\n'
return rep
| 8,127 | Python | .py | 202 | 29.514851 | 87 | 0.558944 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,987 | remote_process.py | DamnWidget_anaconda/anaconda_lib/workers/remote_process.py |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
class StubProcess(object):
"""Self descriptive class name, right?
"""
def __init__(self, interpreter):
self._process = None
self._interpreter = None
def start(self):
"""Just returns True and does nothing
"""
return True
| 416 | Python | .py | 12 | 28.5 | 72 | 0.650754 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,988 | process.py | DamnWidget_anaconda/anaconda_lib/workers/process.py |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from .local_process import LocalProcess
from .remote_process import StubProcess
from .vagrant_process import VagrantProcess
class WorkerProcess(object):
"""Return a right processer based in the scheme
"""
_processers = {'tcp': StubProcess, 'vagrant': VagrantProcess}
def __init__(self, interpreter):
self._interpreter = interpreter
def take(self):
scheme = self._interpreter.scheme
return self._processers.get(scheme, LocalProcess)(self._interpreter)
| 636 | Python | .py | 14 | 40.785714 | 76 | 0.739837 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,989 | vagrant_process.py | DamnWidget_anaconda/anaconda_lib/workers/vagrant_process.py |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
import time
import shlex
import socket
import subprocess
from ..logger import Log
from ..helpers import create_subprocess
from ..helpers import debug_enabled, active_view, get_settings
class VagrantProcess(object):
"""Starts a new instance of the minserver into a vagrant guest
"""
def __init__(self, interpreter):
self.interpreter = interpreter
self._process = None
self.errpr = ''
self.tip = ''
@property
def healthy(self):
"""Checks if the vagrant process is healthy
"""
if debug_enabled:
return True
if self._process.poll() is not None:
self.error = 'the minserver process is terminated in the guest'
self.tip = 'check your vagrant machine and configuration'
return False
return True
def start(self):
"""Create the subprocess for the vagrant minserver process
"""
# first check if we are operating manual mode or server is up
if self.interpreter.manual is not None or self._up_already():
return True
args, kwargs = self._prepare_arguments()
self._process = create_subprocess(args, **kwargs)
time.sleep(1) # give it some time
if self._process is None or self._process.poll() is not None:
# we can't spawn the vagrant command. Not installed? Running?
output, error = self._process.communicate()
if error == b'Connection to 127.0.0.1 closed.\r\n':
return True # probably the minserver is running already
self.error = (
'Anaconda can not spawn the `vagrant` application to run `{}` '
'\n\nProcess output: {}\nProcess error: {}'.format(
' '.join(args),
output.decode('utf8'),
error.decode('utf8').replace('\n', ' ')
)
)
self.tip = 'Check your vagrant installation/configuration'
return False
return True
def _up_already(self):
"""Return True if the minserver is running already on guest
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.5)
s.connect((self.interpreter.host, self.interpreter.port))
s.close()
self.interpreter.manual = True
except:
return False
return True
def _prepare_arguments(self):
"""Prepare subprocess arguments
"""
script_file = self._compose_script_file()
paths = self._compose_extra_paths()
cmd = 'vagrant ssh {} -c "{}"'.format(
self.interpreter.machine_id,
'{} {} -p {}{} {}'.format(
self.interpreter.interpreter,
script_file,
self.interpreter.project_name,
" '{}'".format('-e ' + ','.join(paths) if paths else ' '),
self.interpreter.port
)
)
kwargs = {'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE}
return shlex.split(cmd, posix=os.name != 'nt'), kwargs
def _compose_script_file(self):
"""Compose the script file location using the interpreter context
"""
target_os = self.interpreter.os
target_os = 'posix' if target_os is None else target_os.lower()
sep = '\\' if target_os == 'windows' else '/'
shared = self.interpreter.shared
if shared is None:
shared = '/anaconda' if target_os == 'posix' else 'C:\\anaconda'
return '{0}{1}anaconda_server{1}minserver.py'.format(shared, sep)
def _compose_extra_paths(self):
"""Compose extra paths (if any) using the CV context
"""
extra_paths = []
try:
self.interpreter.extra.extend([])
except AttributeError:
if self.interpreter.extra is not None:
Log.warning(
'Your `extra` query option is a string but a list '
'was expected'
)
extra_paths.extend(self.interpreter.extra.split(','))
else:
extra_paths.extend(self.interpreter.extra)
extra_paths.extend(get_settings(active_view(), 'extra_paths', []))
return extra_paths
| 4,501 | Python | .py | 110 | 30.518182 | 79 | 0.580527 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,990 | local_worker.py | DamnWidget_anaconda/anaconda_lib/workers/local_worker.py |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import time
import platform
import sublime
from ..logger import Log
from .worker import Worker
from ..helpers import project_name, get_socket_timeout
from ..constants import WorkerStatus
from ..builder.python_builder import AnacondaSetPythonBuilder
class LocalWorker(Worker):
"""This class implements a local worker that uses a local jsonserver
"""
def __init__(self, interpreter):
self.reconnecting = False
super(LocalWorker, self).__init__(interpreter)
def check(self):
"""Perform required checks to conclude if it is safe to operate
"""
if not self.process.healthy:
self.error = self.process.error
self.tip = self.process.tip
return False
timeout = get_socket_timeout(0.2)
start = time.time()
times = 1
interval = timeout * 10
while not self._status(timeout):
if time.time() - start >= interval: # expressed in seconds
msg = '{}. tried to connect {} times during {} seconds'
self.error = msg.format(self.error, times, interval)
return False
time.sleep(0.1)
times += 1
return True
def start(self):
"""Start the worker
"""
self._update_python_builder()
if self.reconnecting:
self.interpreter.renew_interpreter()
super(LocalWorker, self).start()
def stop(self):
"""Stop it now please
"""
self.process.stop()
self.client.close()
self.status = WorkerStatus.incomplete
def on_python_interpreter_switch(self, raw_python_interpreter):
"""This method is called when there is a python interpreter switch
"""
switch = False
if self.interpreter.project_name is not None:
if project_name() != self.interpreter.project_name:
switch = True
if self.process._process.args[0] != raw_python_interpreter:
switch = True
if switch:
# check if our interpreter is not local anymore
self.renew_interpreter(raw_python_interpreter)
if not self.interpreter.for_local:
# just fire this worker, it's not useful anymore
Log.info('Firing worker {}...'.format(self))
self.stop()
self.status = WorkerStatus.quiting
return
self.reconnecting = True
self.stop()
def _update_python_builder(self):
"""Update the python builder in the config file
"""
p_data = sublime.active_window().project_data()
if p_data is not None:
AnacondaSetPythonBuilder().update_interpreter_build_system(
self.interpreter.python
)
def _status(self, timeout=0.05):
"""Check the socket status, returns True if it is operable
"""
check = 'that you can connect to your localhost'
addr = '("localhost", {})'.format(self.interpreter.port)
if sublime.platform() == 'linux':
check = (
'that the Unix Domain Socket file {} exists and that you can '
'connect to it'
).format(self.interpreter.host)
addr = self.interpreter.host
address_family = (
'AF_INET' if platform.system().lower() != 'linux' else 'AF_UNIX'
)
self.tip = (
'check that there is Python process executing the anaconda '
'jsonserver.py script running in your system. If there is, check '
'{} writing the following script in your Sublime Text 3 console:'
'\n\nimport socket; socket.socket(socket.{}, '
'socket.SOCK_STREAM).connect({})\n\nIf anaconda works just fine '
'after you received this error and the command above worked you '
'can make anaconda to do not show you this error anymore setting '
'the \'swallow_startup_errors\' to \'true\' in your '
'configuration file.'.format(check, address_family, addr)
)
return super(LocalWorker, self)._status(timeout)
| 4,332 | Python | .py | 102 | 32.27451 | 78 | 0.604424 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,991 | market.py | DamnWidget_anaconda/anaconda_lib/workers/market.py |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import threading
import sublime
from ..info import Repr
from ..logger import Log
from ..constants import WorkerStatus
from .interpreter import Interpreter
from .local_worker import LocalWorker
from .remote_worker import RemoteWorker
from .vagrant_worker import VagrantWorker
from ..helpers import active_view, get_interpreter
class Market(object, metaclass=Repr):
"""When you need a worker you hire one in the market
"""
_worker_pool = {}
_lock = threading.RLock()
_workers_type = {'tcp': RemoteWorker, 'vagrant': VagrantWorker}
def hire(self):
"""Hire the right worker from the market pool
"""
raw_interpreter = get_interpreter(active_view())
itprt = Interpreter(raw_interpreter)
return self._workers_type.get(itprt.scheme, LocalWorker)(itprt)
def add(self, window_id, worker):
"""Add the given worker into the workers pool
"""
with self._lock:
if self._worker_pool.get(window_id) is None:
self._worker_pool[window_id] = worker
else:
Log.warning(
'tried to append an existent worker for window {} to '
'the workers market. Skipping...'.format(window_id)
)
def get(self, window_id):
"""Retrieve a worker for the given window_id from the workers market
"""
with self._lock:
worker = self._worker_pool.get(window_id)
return worker
def fire(self, window_id):
"""Remote a worker from the workers market
"""
worker = self._worker_pool.pop(window_id, None)
if worker is None:
Log.error(
'tried to remove a worker that is not part of the workers '
'market for window {}. Skipping'.format(window_id)
)
return
@classmethod
def execute(cls, callback, **data):
"""Execute the given method remotely and call the callback with result
"""
def _start_worker(wk, cb, **d):
wk.start()
if wk.status == WorkerStatus.healthy:
wk._execute(cb, **d)
return
sublime.set_timeout_async(lambda: _start_worker(wk, cb, **d), 5000)
window_id = sublime.active_window().id()
worker = cls.get(cls, window_id)
if worker is None:
# hire a new worker
worker = cls.hire(cls)
cls.add(cls, window_id, worker)
if worker.status == WorkerStatus.faulty:
return
if worker.status == WorkerStatus.quiting:
cls.fire(cls, window_id)
return
if worker.client is not None:
if not worker.client.connected:
worker.reconnecting = True
worker.state = WorkerStatus.incomplete
_start_worker(worker, callback, **data)
else:
worker._append_context_data(data)
worker._execute(callback, **data)
if worker.status == WorkerStatus.quiting:
# that means that we need to let the worker go
cls.fire(cls, window_id)
else:
_start_worker(worker, callback, **data)
@classmethod
def lookup(cls, window_id):
"""Alias for get
"""
return cls.get(cls, window_id)
@classmethod
def _repr(cls):
"""Returns a representation of the Market
"""
workers_data = []
for window_id, worker in cls._worker_pool.items():
workers_data.append('Window ID: {}\n{}'.format(window_id, worker))
return '{} workers in the market\n\n{}'.format(
len(cls._worker_pool), '\n\n'.join(workers_data)
)
| 3,927 | Python | .py | 100 | 29.31 | 79 | 0.590155 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,992 | tree.py | DamnWidget_anaconda/anaconda_lib/parso/tree.py | from abc import abstractmethod, abstractproperty
from typing import List, Optional, Tuple, Union
from parso.utils import split_lines
def search_ancestor(node: 'NodeOrLeaf', *node_types: str) -> 'Optional[BaseNode]':
"""
Recursively looks at the parents of a node and returns the first found node
that matches ``node_types``. Returns ``None`` if no matching node is found.
This function is deprecated, use :meth:`NodeOrLeaf.search_ancestor` instead.
:param node: The ancestors of this node will be checked.
:param node_types: type names that are searched for.
"""
n = node.parent
while n is not None:
if n.type in node_types:
return n
n = n.parent
return None
class NodeOrLeaf:
"""
The base class for nodes and leaves.
"""
__slots__ = ('parent',)
type: str
'''
The type is a string that typically matches the types of the grammar file.
'''
parent: 'Optional[BaseNode]'
'''
The parent :class:`BaseNode` of this node or leaf.
None if this is the root node.
'''
def get_root_node(self):
"""
Returns the root node of a parser tree. The returned node doesn't have
a parent node like all the other nodes/leaves.
"""
scope = self
while scope.parent is not None:
scope = scope.parent
return scope
def get_next_sibling(self):
"""
Returns the node immediately following this node in this parent's
children list. If this node does not have a next sibling, it is None
"""
parent = self.parent
if parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(parent.children):
if child is self:
try:
return self.parent.children[i + 1]
except IndexError:
return None
def get_previous_sibling(self):
"""
Returns the node immediately preceding this node in this parent's
children list. If this node does not have a previous sibling, it is
None.
"""
parent = self.parent
if parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i - 1]
def get_previous_leaf(self):
"""
Returns the previous leaf in the parser tree.
Returns `None` if this is the first element in the parser tree.
"""
if self.parent is None:
return None
node = self
while True:
c = node.parent.children
i = c.index(node)
if i == 0:
node = node.parent
if node.parent is None:
return None
else:
node = c[i - 1]
break
while True:
try:
node = node.children[-1]
except AttributeError: # A Leaf doesn't have children.
return node
def get_next_leaf(self):
"""
Returns the next leaf in the parser tree.
Returns None if this is the last element in the parser tree.
"""
if self.parent is None:
return None
node = self
while True:
c = node.parent.children
i = c.index(node)
if i == len(c) - 1:
node = node.parent
if node.parent is None:
return None
else:
node = c[i + 1]
break
while True:
try:
node = node.children[0]
except AttributeError: # A Leaf doesn't have children.
return node
@abstractproperty
def start_pos(self) -> Tuple[int, int]:
"""
Returns the starting position of the prefix as a tuple, e.g. `(3, 4)`.
:return tuple of int: (line, column)
"""
@abstractproperty
def end_pos(self) -> Tuple[int, int]:
"""
Returns the end position of the prefix as a tuple, e.g. `(3, 4)`.
:return tuple of int: (line, column)
"""
@abstractmethod
def get_start_pos_of_prefix(self):
"""
Returns the start_pos of the prefix. This means basically it returns
the end_pos of the last prefix. The `get_start_pos_of_prefix()` of the
prefix `+` in `2 + 1` would be `(1, 1)`, while the start_pos is
`(1, 2)`.
:return tuple of int: (line, column)
"""
@abstractmethod
def get_first_leaf(self):
"""
Returns the first leaf of a node or itself if this is a leaf.
"""
@abstractmethod
def get_last_leaf(self):
"""
Returns the last leaf of a node or itself if this is a leaf.
"""
@abstractmethod
def get_code(self, include_prefix=True):
"""
Returns the code that was the input for the parser for this node.
:param include_prefix: Removes the prefix (whitespace and comments) of
e.g. a statement.
"""
def search_ancestor(self, *node_types: str) -> 'Optional[BaseNode]':
"""
Recursively looks at the parents of this node or leaf and returns the
first found node that matches ``node_types``. Returns ``None`` if no
matching node is found.
:param node_types: type names that are searched for.
"""
node = self.parent
while node is not None:
if node.type in node_types:
return node
node = node.parent
return None
def dump(self, *, indent: Optional[Union[int, str]] = 4) -> str:
"""
Returns a formatted dump of the parser tree rooted at this node or leaf. This is
mainly useful for debugging purposes.
The ``indent`` parameter is interpreted in a similar way as :py:func:`ast.dump`.
If ``indent`` is a non-negative integer or string, then the tree will be
pretty-printed with that indent level. An indent level of 0, negative, or ``""``
will only insert newlines. ``None`` selects the single line representation.
Using a positive integer indent indents that many spaces per level. If
``indent`` is a string (such as ``"\\t"``), that string is used to indent each
level.
:param indent: Indentation style as described above. The default indentation is
4 spaces, which yields a pretty-printed dump.
>>> import parso
>>> print(parso.parse("lambda x, y: x + y").dump())
Module([
Lambda([
Keyword('lambda', (1, 0)),
Param([
Name('x', (1, 7), prefix=' '),
Operator(',', (1, 8)),
]),
Param([
Name('y', (1, 10), prefix=' '),
]),
Operator(':', (1, 11)),
PythonNode('arith_expr', [
Name('x', (1, 13), prefix=' '),
Operator('+', (1, 15), prefix=' '),
Name('y', (1, 17), prefix=' '),
]),
]),
EndMarker('', (1, 18)),
])
"""
if indent is None:
newline = False
indent_string = ''
elif isinstance(indent, int):
newline = True
indent_string = ' ' * indent
elif isinstance(indent, str):
newline = True
indent_string = indent
else:
raise TypeError(f"expect 'indent' to be int, str or None, got {indent!r}")
def _format_dump(node: NodeOrLeaf, indent: str = '', top_level: bool = True) -> str:
result = ''
node_type = type(node).__name__
if isinstance(node, Leaf):
result += f'{indent}{node_type}('
if isinstance(node, ErrorLeaf):
result += f'{node.token_type!r}, '
elif isinstance(node, TypedLeaf):
result += f'{node.type!r}, '
result += f'{node.value!r}, {node.start_pos!r}'
if node.prefix:
result += f', prefix={node.prefix!r}'
result += ')'
elif isinstance(node, BaseNode):
result += f'{indent}{node_type}('
if isinstance(node, Node):
result += f'{node.type!r}, '
result += '['
if newline:
result += '\n'
for child in node.children:
result += _format_dump(child, indent=indent + indent_string, top_level=False)
result += f'{indent}])'
else: # pragma: no cover
# We shouldn't ever reach here, unless:
# - `NodeOrLeaf` is incorrectly subclassed else where
# - or a node's children list contains invalid nodes or leafs
# Both are unexpected internal errors.
raise TypeError(f'unsupported node encountered: {node!r}')
if not top_level:
if newline:
result += ',\n'
else:
result += ', '
return result
return _format_dump(self)
class Leaf(NodeOrLeaf):
'''
Leafs are basically tokens with a better API. Leafs exactly know where they
were defined and what text preceeds them.
'''
__slots__ = ('value', 'line', 'column', 'prefix')
prefix: str
def __init__(self, value: str, start_pos: Tuple[int, int], prefix: str = '') -> None:
self.value = value
'''
:py:func:`str` The value of the current token.
'''
self.start_pos = start_pos
self.prefix = prefix
'''
:py:func:`str` Typically a mixture of whitespace and comments. Stuff
that is syntactically irrelevant for the syntax tree.
'''
self.parent: Optional[BaseNode] = None
'''
The parent :class:`BaseNode` of this leaf.
'''
@property
def start_pos(self) -> Tuple[int, int]:
return self.line, self.column
@start_pos.setter
def start_pos(self, value: Tuple[int, int]) -> None:
self.line = value[0]
self.column = value[1]
def get_start_pos_of_prefix(self):
previous_leaf = self.get_previous_leaf()
if previous_leaf is None:
lines = split_lines(self.prefix)
# + 1 is needed because split_lines always returns at least [''].
return self.line - len(lines) + 1, 0 # It's the first leaf.
return previous_leaf.end_pos
def get_first_leaf(self):
return self
def get_last_leaf(self):
return self
def get_code(self, include_prefix=True):
if include_prefix:
return self.prefix + self.value
else:
return self.value
@property
def end_pos(self) -> Tuple[int, int]:
lines = split_lines(self.value)
end_pos_line = self.line + len(lines) - 1
# Check for multiline token
if self.line == end_pos_line:
end_pos_column = self.column + len(lines[-1])
else:
end_pos_column = len(lines[-1])
return end_pos_line, end_pos_column
def __repr__(self):
value = self.value
if not value:
value = self.type
return "<%s: %s>" % (type(self).__name__, value)
class TypedLeaf(Leaf):
__slots__ = ('type',)
def __init__(self, type, value, start_pos, prefix=''):
super().__init__(value, start_pos, prefix)
self.type = type
class BaseNode(NodeOrLeaf):
"""
The super class for all nodes.
A node has children, a type and possibly a parent node.
"""
__slots__ = ('children',)
def __init__(self, children: List[NodeOrLeaf]) -> None:
self.children = children
"""
A list of :class:`NodeOrLeaf` child nodes.
"""
self.parent: Optional[BaseNode] = None
'''
The parent :class:`BaseNode` of this node.
None if this is the root node.
'''
for child in children:
child.parent = self
@property
def start_pos(self) -> Tuple[int, int]:
return self.children[0].start_pos
def get_start_pos_of_prefix(self):
return self.children[0].get_start_pos_of_prefix()
@property
def end_pos(self) -> Tuple[int, int]:
return self.children[-1].end_pos
def _get_code_for_children(self, children, include_prefix):
if include_prefix:
return "".join(c.get_code() for c in children)
else:
first = children[0].get_code(include_prefix=False)
return first + "".join(c.get_code() for c in children[1:])
def get_code(self, include_prefix=True):
return self._get_code_for_children(self.children, include_prefix)
def get_leaf_for_position(self, position, include_prefixes=False):
"""
Get the :py:class:`parso.tree.Leaf` at ``position``
:param tuple position: A position tuple, row, column. Rows start from 1
:param bool include_prefixes: If ``False``, ``None`` will be returned if ``position`` falls
on whitespace or comments before a leaf
:return: :py:class:`parso.tree.Leaf` at ``position``, or ``None``
"""
def binary_search(lower, upper):
if lower == upper:
element = self.children[lower]
if not include_prefixes and position < element.start_pos:
# We're on a prefix.
return None
# In case we have prefixes, a leaf always matches
try:
return element.get_leaf_for_position(position, include_prefixes)
except AttributeError:
return element
index = int((lower + upper) / 2)
element = self.children[index]
if position <= element.end_pos:
return binary_search(lower, index)
else:
return binary_search(index + 1, upper)
if not ((1, 0) <= position <= self.children[-1].end_pos):
raise ValueError('Please provide a position that exists within this node.')
return binary_search(0, len(self.children) - 1)
def get_first_leaf(self):
return self.children[0].get_first_leaf()
def get_last_leaf(self):
return self.children[-1].get_last_leaf()
def __repr__(self):
code = self.get_code().replace('\n', ' ').replace('\r', ' ').strip()
return "<%s: %s@%s,%s>" % \
(type(self).__name__, code, self.start_pos[0], self.start_pos[1])
class Node(BaseNode):
"""Concrete implementation for interior nodes."""
__slots__ = ('type',)
def __init__(self, type, children):
super().__init__(children)
self.type = type
def __repr__(self):
return "%s(%s, %r)" % (self.__class__.__name__, self.type, self.children)
class ErrorNode(BaseNode):
"""
A node that contains valid nodes/leaves that we're follow by a token that
was invalid. This basically means that the leaf after this node is where
Python would mark a syntax error.
"""
__slots__ = ()
type = 'error_node'
class ErrorLeaf(Leaf):
"""
A leaf that is either completely invalid in a language (like `$` in Python)
or is invalid at that position. Like the star in `1 +* 1`.
"""
__slots__ = ('token_type',)
type = 'error_leaf'
def __init__(self, token_type, value, start_pos, prefix=''):
super().__init__(value, start_pos, prefix)
self.token_type = token_type
def __repr__(self):
return "<%s: %s:%s, %s>" % \
(type(self).__name__, self.token_type, repr(self.value), self.start_pos)
| 16,153 | Python | .py | 413 | 28.835351 | 99 | 0.553272 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,993 | file_io.py | DamnWidget_anaconda/anaconda_lib/parso/file_io.py | import os
from pathlib import Path
from typing import Union
class FileIO:
def __init__(self, path: Union[os.PathLike, str]):
if isinstance(path, str):
path = Path(path)
self.path = path
def read(self): # Returns bytes/str
# We would like to read unicode here, but we cannot, because we are not
# sure if it is a valid unicode file. Therefore just read whatever is
# here.
with open(self.path, 'rb') as f:
return f.read()
def get_last_modified(self):
"""
Returns float - timestamp or None, if path doesn't exist.
"""
try:
return os.path.getmtime(self.path)
except FileNotFoundError:
return None
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.path)
class KnownContentFileIO(FileIO):
def __init__(self, path, content):
super().__init__(path)
self._content = content
def read(self):
return self._content
| 1,023 | Python | .py | 30 | 26.433333 | 79 | 0.595939 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,994 | utils.py | DamnWidget_anaconda/anaconda_lib/parso/utils.py | import re
import sys
from ast import literal_eval
from functools import total_ordering
from typing import NamedTuple, Sequence, Union
# The following is a list in Python that are line breaks in str.splitlines, but
# not in Python. In Python only \r (Carriage Return, 0xD) and \n (Line Feed,
# 0xA) are allowed to split lines.
_NON_LINE_BREAKS = (
'\v', # Vertical Tabulation 0xB
'\f', # Form Feed 0xC
'\x1C', # File Separator
'\x1D', # Group Separator
'\x1E', # Record Separator
'\x85', # Next Line (NEL - Equivalent to CR+LF.
# Used to mark end-of-line on some IBM mainframes.)
'\u2028', # Line Separator
'\u2029', # Paragraph Separator
)
class Version(NamedTuple):
major: int
minor: int
micro: int
def split_lines(string: str, keepends: bool = False) -> Sequence[str]:
r"""
Intended for Python code. In contrast to Python's :py:meth:`str.splitlines`,
looks at form feeds and other special characters as normal text. Just
splits ``\n`` and ``\r\n``.
Also different: Returns ``[""]`` for an empty string input.
In Python 2.7 form feeds are used as normal characters when using
str.splitlines. However in Python 3 somewhere there was a decision to split
also on form feeds.
"""
if keepends:
lst = string.splitlines(True)
# We have to merge lines that were broken by form feed characters.
merge = []
for i, line in enumerate(lst):
try:
last_chr = line[-1]
except IndexError:
pass
else:
if last_chr in _NON_LINE_BREAKS:
merge.append(i)
for index in reversed(merge):
try:
lst[index] = lst[index] + lst[index + 1]
del lst[index + 1]
except IndexError:
# index + 1 can be empty and therefore there's no need to
# merge.
pass
# The stdlib's implementation of the end is inconsistent when calling
# it with/without keepends. One time there's an empty string in the
# end, one time there's none.
if string.endswith('\n') or string.endswith('\r') or string == '':
lst.append('')
return lst
else:
return re.split(r'\n|\r\n|\r', string)
def python_bytes_to_unicode(
source: Union[str, bytes], encoding: str = 'utf-8', errors: str = 'strict'
) -> str:
"""
Checks for unicode BOMs and PEP 263 encoding declarations. Then returns a
unicode object like in :py:meth:`bytes.decode`.
:param encoding: See :py:meth:`bytes.decode` documentation.
:param errors: See :py:meth:`bytes.decode` documentation. ``errors`` can be
``'strict'``, ``'replace'`` or ``'ignore'``.
"""
def detect_encoding():
"""
For the implementation of encoding definitions in Python, look at:
- http://www.python.org/dev/peps/pep-0263/
- http://docs.python.org/2/reference/lexical_analysis.html#encoding-declarations
"""
byte_mark = literal_eval(r"b'\xef\xbb\xbf'")
if source.startswith(byte_mark):
# UTF-8 byte-order mark
return 'utf-8'
first_two_lines = re.match(br'(?:[^\r\n]*(?:\r\n|\r|\n)){0,2}', source).group(0)
possible_encoding = re.search(br"coding[=:]\s*([-\w.]+)",
first_two_lines)
if possible_encoding:
e = possible_encoding.group(1)
if not isinstance(e, str):
e = str(e, 'ascii', 'replace')
return e
else:
# the default if nothing else has been set -> PEP 263
return encoding
if isinstance(source, str):
# only cast str/bytes
return source
encoding = detect_encoding()
try:
# Cast to unicode
return str(source, encoding, errors)
except LookupError:
if errors == 'replace':
# This is a weird case that can happen if the given encoding is not
# a valid encoding. This usually shouldn't happen with provided
# encodings, but can happen if somebody uses encoding declarations
# like `# coding: foo-8`.
return str(source, 'utf-8', errors)
raise
def version_info() -> Version:
"""
Returns a namedtuple of parso's version, similar to Python's
``sys.version_info``.
"""
from parso import __version__
tupl = re.findall(r'[a-z]+|\d+', __version__)
return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)])
class _PythonVersionInfo(NamedTuple):
major: int
minor: int
@total_ordering
class PythonVersionInfo(_PythonVersionInfo):
def __gt__(self, other):
if isinstance(other, tuple):
if len(other) != 2:
raise ValueError("Can only compare to tuples of length 2.")
return (self.major, self.minor) > other
super().__gt__(other)
return (self.major, self.minor)
def __eq__(self, other):
if isinstance(other, tuple):
if len(other) != 2:
raise ValueError("Can only compare to tuples of length 2.")
return (self.major, self.minor) == other
super().__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
def _parse_version(version) -> PythonVersionInfo:
match = re.match(r'(\d+)(?:\.(\d{1,2})(?:\.\d+)?)?((a|b|rc)\d)?$', version)
if match is None:
raise ValueError('The given version is not in the right format. '
'Use something like "3.8" or "3".')
major = int(match.group(1))
minor = match.group(2)
if minor is None:
# Use the latest Python in case it's not exactly defined, because the
# grammars are typically backwards compatible?
if major == 2:
minor = "7"
elif major == 3:
minor = "6"
else:
raise NotImplementedError("Sorry, no support yet for those fancy new/old versions.")
minor = int(minor)
return PythonVersionInfo(major, minor)
def parse_version_string(version: str = None) -> PythonVersionInfo:
"""
Checks for a valid version number (e.g. `3.8` or `3.10.1` or `3`) and
returns a corresponding version info that is always two characters long in
decimal.
"""
if version is None:
version = '%s.%s' % sys.version_info[:2]
if not isinstance(version, str):
raise TypeError('version must be a string like "3.8"')
return _parse_version(version)
| 6,620 | Python | .py | 164 | 32.109756 | 96 | 0.599751 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,995 | cache.py | DamnWidget_anaconda/anaconda_lib/parso/cache.py | import time
import os
import sys
import hashlib
import gc
import shutil
import platform
import logging
import warnings
import pickle
from pathlib import Path
from typing import Dict, Any
LOG = logging.getLogger(__name__)
_CACHED_FILE_MINIMUM_SURVIVAL = 60 * 10 # 10 minutes
"""
Cached files should survive at least a few minutes.
"""
_CACHED_FILE_MAXIMUM_SURVIVAL = 60 * 60 * 24 * 30
"""
Maximum time for a cached file to survive if it is not
accessed within.
"""
_CACHED_SIZE_TRIGGER = 600
"""
This setting limits the amount of cached files. It's basically a way to start
garbage collection.
The reasoning for this limit being as big as it is, is the following:
Numpy, Pandas, Matplotlib and Tensorflow together use about 500 files. This
makes Jedi use ~500mb of memory. Since we might want a bit more than those few
libraries, we just increase it a bit.
"""
_PICKLE_VERSION = 33
"""
Version number (integer) for file system cache.
Increment this number when there are any incompatible changes in
the parser tree classes. For example, the following changes
are regarded as incompatible.
- A class name is changed.
- A class is moved to another module.
- A __slot__ of a class is changed.
"""
_VERSION_TAG = '%s-%s%s-%s' % (
platform.python_implementation(),
sys.version_info[0],
sys.version_info[1],
_PICKLE_VERSION
)
"""
Short name for distinguish Python implementations and versions.
It's a bit similar to `sys.implementation.cache_tag`.
See: http://docs.python.org/3/library/sys.html#sys.implementation
"""
def _get_default_cache_path():
if platform.system().lower() == 'windows':
dir_ = Path(os.getenv('LOCALAPPDATA') or '~', 'Parso', 'Parso')
elif platform.system().lower() == 'darwin':
dir_ = Path('~', 'Library', 'Caches', 'Parso')
else:
dir_ = Path(os.getenv('XDG_CACHE_HOME') or '~/.cache', 'parso')
return dir_.expanduser()
_default_cache_path = _get_default_cache_path()
"""
The path where the cache is stored.
On Linux, this defaults to ``~/.cache/parso/``, on OS X to
``~/Library/Caches/Parso/`` and on Windows to ``%LOCALAPPDATA%\\Parso\\Parso\\``.
On Linux, if environment variable ``$XDG_CACHE_HOME`` is set,
``$XDG_CACHE_HOME/parso`` is used instead of the default one.
"""
_CACHE_CLEAR_THRESHOLD = 60 * 60 * 24
def _get_cache_clear_lock_path(cache_path=None):
"""
The path where the cache lock is stored.
Cache lock will prevent continous cache clearing and only allow garbage
collection once a day (can be configured in _CACHE_CLEAR_THRESHOLD).
"""
cache_path = cache_path or _default_cache_path
return cache_path.joinpath("PARSO-CACHE-LOCK")
parser_cache: Dict[str, Any] = {}
class _NodeCacheItem:
def __init__(self, node, lines, change_time=None):
self.node = node
self.lines = lines
if change_time is None:
change_time = time.time()
self.change_time = change_time
self.last_used = change_time
def load_module(hashed_grammar, file_io, cache_path=None):
"""
Returns a module or None, if it fails.
"""
p_time = file_io.get_last_modified()
if p_time is None:
return None
try:
module_cache_item = parser_cache[hashed_grammar][file_io.path]
if p_time <= module_cache_item.change_time:
module_cache_item.last_used = time.time()
return module_cache_item.node
except KeyError:
return _load_from_file_system(
hashed_grammar,
file_io.path,
p_time,
cache_path=cache_path
)
def _load_from_file_system(hashed_grammar, path, p_time, cache_path=None):
cache_path = _get_hashed_path(hashed_grammar, path, cache_path=cache_path)
try:
if p_time > os.path.getmtime(cache_path):
# Cache is outdated
return None
with open(cache_path, 'rb') as f:
gc.disable()
try:
module_cache_item = pickle.load(f)
finally:
gc.enable()
except FileNotFoundError:
return None
else:
_set_cache_item(hashed_grammar, path, module_cache_item)
LOG.debug('pickle loaded: %s', path)
return module_cache_item.node
def _set_cache_item(hashed_grammar, path, module_cache_item):
if sum(len(v) for v in parser_cache.values()) >= _CACHED_SIZE_TRIGGER:
# Garbage collection of old cache files.
# We are basically throwing everything away that hasn't been accessed
# in 10 minutes.
cutoff_time = time.time() - _CACHED_FILE_MINIMUM_SURVIVAL
for key, path_to_item_map in parser_cache.items():
parser_cache[key] = {
path: node_item
for path, node_item in path_to_item_map.items()
if node_item.last_used > cutoff_time
}
parser_cache.setdefault(hashed_grammar, {})[path] = module_cache_item
def try_to_save_module(hashed_grammar, file_io, module, lines, pickling=True, cache_path=None):
path = file_io.path
try:
p_time = None if path is None else file_io.get_last_modified()
except OSError:
p_time = None
pickling = False
item = _NodeCacheItem(module, lines, p_time)
_set_cache_item(hashed_grammar, path, item)
if pickling and path is not None:
try:
_save_to_file_system(hashed_grammar, path, item, cache_path=cache_path)
except PermissionError:
# It's not really a big issue if the cache cannot be saved to the
# file system. It's still in RAM in that case. However we should
# still warn the user that this is happening.
warnings.warn(
'Tried to save a file to %s, but got permission denied.' % path,
Warning
)
else:
_remove_cache_and_update_lock(cache_path=cache_path)
def _save_to_file_system(hashed_grammar, path, item, cache_path=None):
with open(_get_hashed_path(hashed_grammar, path, cache_path=cache_path), 'wb') as f:
pickle.dump(item, f, pickle.HIGHEST_PROTOCOL)
def clear_cache(cache_path=None):
if cache_path is None:
cache_path = _default_cache_path
shutil.rmtree(cache_path)
parser_cache.clear()
def clear_inactive_cache(
cache_path=None,
inactivity_threshold=_CACHED_FILE_MAXIMUM_SURVIVAL,
):
if cache_path is None:
cache_path = _default_cache_path
if not cache_path.exists():
return False
for dirname in os.listdir(cache_path):
version_path = cache_path.joinpath(dirname)
if not version_path.is_dir():
continue
for file in os.scandir(version_path):
if file.stat().st_atime + _CACHED_FILE_MAXIMUM_SURVIVAL <= time.time():
try:
os.remove(file.path)
except OSError: # silently ignore all failures
continue
else:
return True
def _touch(path):
try:
os.utime(path, None)
except FileNotFoundError:
try:
file = open(path, 'a')
file.close()
except (OSError, IOError): # TODO Maybe log this?
return False
return True
def _remove_cache_and_update_lock(cache_path=None):
lock_path = _get_cache_clear_lock_path(cache_path=cache_path)
try:
clear_lock_time = os.path.getmtime(lock_path)
except FileNotFoundError:
clear_lock_time = None
if (
clear_lock_time is None # first time
or clear_lock_time + _CACHE_CLEAR_THRESHOLD <= time.time()
):
if not _touch(lock_path):
# First make sure that as few as possible other cleanup jobs also
# get started. There is still a race condition but it's probably
# not a big problem.
return False
clear_inactive_cache(cache_path=cache_path)
def _get_hashed_path(hashed_grammar, path, cache_path=None):
directory = _get_cache_directory_path(cache_path=cache_path)
file_hash = hashlib.sha256(str(path).encode("utf-8")).hexdigest()
return os.path.join(directory, '%s-%s.pkl' % (hashed_grammar, file_hash))
def _get_cache_directory_path(cache_path=None):
if cache_path is None:
cache_path = _default_cache_path
directory = cache_path.joinpath(_VERSION_TAG)
if not directory.exists():
os.makedirs(directory)
return directory
| 8,452 | Python | .py | 223 | 31.394619 | 95 | 0.654274 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,996 | __init__.py | DamnWidget_anaconda/anaconda_lib/parso/__init__.py | r"""
Parso is a Python parser that supports error recovery and round-trip parsing
for different Python versions (in multiple Python versions). Parso is also able
to list multiple syntax errors in your python file.
Parso has been battle-tested by jedi_. It was pulled out of jedi to be useful
for other projects as well.
Parso consists of a small API to parse Python and analyse the syntax tree.
.. _jedi: https://github.com/davidhalter/jedi
A simple example:
>>> import parso
>>> module = parso.parse('hello + 1', version="3.9")
>>> expr = module.children[0]
>>> expr
PythonNode(arith_expr, [<Name: hello@1,0>, <Operator: +>, <Number: 1>])
>>> print(expr.get_code())
hello + 1
>>> name = expr.children[0]
>>> name
<Name: hello@1,0>
>>> name.end_pos
(1, 5)
>>> expr.end_pos
(1, 9)
To list multiple issues:
>>> grammar = parso.load_grammar()
>>> module = grammar.parse('foo +\nbar\ncontinue')
>>> error1, error2 = grammar.iter_errors(module)
>>> error1.message
'SyntaxError: invalid syntax'
>>> error2.message
"SyntaxError: 'continue' not properly in loop"
"""
from parso.parser import ParserSyntaxError
from parso.grammar import Grammar, load_grammar
from parso.utils import split_lines, python_bytes_to_unicode
__version__ = '0.8.3'
def parse(code=None, **kwargs):
"""
A utility function to avoid loading grammars.
Params are documented in :py:meth:`parso.Grammar.parse`.
:param str version: The version used by :py:func:`parso.load_grammar`.
"""
version = kwargs.pop('version', None)
grammar = load_grammar(version=version)
return grammar.parse(code, **kwargs)
| 1,607 | Python | .py | 45 | 33.711111 | 79 | 0.728857 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,997 | grammar.py | DamnWidget_anaconda/anaconda_lib/parso/grammar.py | import hashlib
import os
from typing import Generic, TypeVar, Union, Dict, Optional, Any
from pathlib import Path
from parso._compatibility import is_pypy
from parso.pgen2 import generate_grammar
from parso.utils import split_lines, python_bytes_to_unicode, \
PythonVersionInfo, parse_version_string
from parso.python.diff import DiffParser
from parso.python.tokenize import tokenize_lines, tokenize
from parso.python.token import PythonTokenTypes
from parso.cache import parser_cache, load_module, try_to_save_module
from parso.parser import BaseParser
from parso.python.parser import Parser as PythonParser
from parso.python.errors import ErrorFinderConfig
from parso.python import pep8
from parso.file_io import FileIO, KnownContentFileIO
from parso.normalizer import RefactoringNormalizer, NormalizerConfig
_loaded_grammars: Dict[str, 'Grammar'] = {}
_NodeT = TypeVar("_NodeT")
class Grammar(Generic[_NodeT]):
"""
:py:func:`parso.load_grammar` returns instances of this class.
Creating custom none-python grammars by calling this is not supported, yet.
:param text: A BNF representation of your grammar.
"""
_start_nonterminal: str
_error_normalizer_config: Optional[ErrorFinderConfig] = None
_token_namespace: Any = None
_default_normalizer_config: NormalizerConfig = pep8.PEP8NormalizerConfig()
def __init__(self, text: str, *, tokenizer, parser=BaseParser, diff_parser=None):
self._pgen_grammar = generate_grammar(
text,
token_namespace=self._get_token_namespace()
)
self._parser = parser
self._tokenizer = tokenizer
self._diff_parser = diff_parser
self._hashed = hashlib.sha256(text.encode("utf-8")).hexdigest()
def parse(self,
code: Union[str, bytes] = None,
*,
error_recovery=True,
path: Union[os.PathLike, str] = None,
start_symbol: str = None,
cache=False,
diff_cache=False,
cache_path: Union[os.PathLike, str] = None,
file_io: FileIO = None) -> _NodeT:
"""
If you want to parse a Python file you want to start here, most likely.
If you need finer grained control over the parsed instance, there will be
other ways to access it.
:param str code: A unicode or bytes string. When it's not possible to
decode bytes to a string, returns a
:py:class:`UnicodeDecodeError`.
:param bool error_recovery: If enabled, any code will be returned. If
it is invalid, it will be returned as an error node. If disabled,
you will get a ParseError when encountering syntax errors in your
code.
:param str start_symbol: The grammar rule (nonterminal) that you want
to parse. Only allowed to be used when error_recovery is False.
:param str path: The path to the file you want to open. Only needed for caching.
:param bool cache: Keeps a copy of the parser tree in RAM and on disk
if a path is given. Returns the cached trees if the corresponding
files on disk have not changed. Note that this stores pickle files
on your file system (e.g. for Linux in ``~/.cache/parso/``).
:param bool diff_cache: Diffs the cached python module against the new
code and tries to parse only the parts that have changed. Returns
the same (changed) module that is found in cache. Using this option
requires you to not do anything anymore with the cached modules
under that path, because the contents of it might change. This
option is still somewhat experimental. If you want stability,
please don't use it.
:param bool cache_path: If given saves the parso cache in this
directory. If not given, defaults to the default cache places on
each platform.
:return: A subclass of :py:class:`parso.tree.NodeOrLeaf`. Typically a
:py:class:`parso.python.tree.Module`.
"""
if code is None and path is None and file_io is None:
raise TypeError("Please provide either code or a path.")
if isinstance(path, str):
path = Path(path)
if isinstance(cache_path, str):
cache_path = Path(cache_path)
if start_symbol is None:
start_symbol = self._start_nonterminal
if error_recovery and start_symbol != 'file_input':
raise NotImplementedError("This is currently not implemented.")
if file_io is None:
if code is None:
file_io = FileIO(path) # type: ignore
else:
file_io = KnownContentFileIO(path, code)
if cache and file_io.path is not None:
module_node = load_module(self._hashed, file_io, cache_path=cache_path)
if module_node is not None:
return module_node # type: ignore
if code is None:
code = file_io.read()
code = python_bytes_to_unicode(code)
lines = split_lines(code, keepends=True)
if diff_cache:
if self._diff_parser is None:
raise TypeError("You have to define a diff parser to be able "
"to use this option.")
try:
module_cache_item = parser_cache[self._hashed][file_io.path]
except KeyError:
pass
else:
module_node = module_cache_item.node
old_lines = module_cache_item.lines
if old_lines == lines:
return module_node # type: ignore
new_node = self._diff_parser(
self._pgen_grammar, self._tokenizer, module_node
).update(
old_lines=old_lines,
new_lines=lines
)
try_to_save_module(self._hashed, file_io, new_node, lines,
# Never pickle in pypy, it's slow as hell.
pickling=cache and not is_pypy,
cache_path=cache_path)
return new_node # type: ignore
tokens = self._tokenizer(lines)
p = self._parser(
self._pgen_grammar,
error_recovery=error_recovery,
start_nonterminal=start_symbol
)
root_node = p.parse(tokens=tokens)
if cache or diff_cache:
try_to_save_module(self._hashed, file_io, root_node, lines,
# Never pickle in pypy, it's slow as hell.
pickling=cache and not is_pypy,
cache_path=cache_path)
return root_node # type: ignore
def _get_token_namespace(self):
ns = self._token_namespace
if ns is None:
raise ValueError("The token namespace should be set.")
return ns
def iter_errors(self, node):
"""
Given a :py:class:`parso.tree.NodeOrLeaf` returns a generator of
:py:class:`parso.normalizer.Issue` objects. For Python this is
a list of syntax/indentation errors.
"""
if self._error_normalizer_config is None:
raise ValueError("No error normalizer specified for this grammar.")
return self._get_normalizer_issues(node, self._error_normalizer_config)
def refactor(self, base_node, node_to_str_map):
return RefactoringNormalizer(node_to_str_map).walk(base_node)
def _get_normalizer(self, normalizer_config):
if normalizer_config is None:
normalizer_config = self._default_normalizer_config
if normalizer_config is None:
raise ValueError("You need to specify a normalizer, because "
"there's no default normalizer for this tree.")
return normalizer_config.create_normalizer(self)
def _normalize(self, node, normalizer_config=None):
"""
TODO this is not public, yet.
The returned code will be normalized, e.g. PEP8 for Python.
"""
normalizer = self._get_normalizer(normalizer_config)
return normalizer.walk(node)
def _get_normalizer_issues(self, node, normalizer_config=None):
normalizer = self._get_normalizer(normalizer_config)
normalizer.walk(node)
return normalizer.issues
def __repr__(self):
nonterminals = self._pgen_grammar.nonterminal_to_dfas.keys()
txt = ' '.join(list(nonterminals)[:3]) + ' ...'
return '<%s:%s>' % (self.__class__.__name__, txt)
class PythonGrammar(Grammar):
_error_normalizer_config = ErrorFinderConfig()
_token_namespace = PythonTokenTypes
_start_nonterminal = 'file_input'
def __init__(self, version_info: PythonVersionInfo, bnf_text: str):
super().__init__(
bnf_text,
tokenizer=self._tokenize_lines,
parser=PythonParser,
diff_parser=DiffParser
)
self.version_info = version_info
def _tokenize_lines(self, lines, **kwargs):
return tokenize_lines(lines, version_info=self.version_info, **kwargs)
def _tokenize(self, code):
# Used by Jedi.
return tokenize(code, version_info=self.version_info)
def load_grammar(*, version: str = None, path: str = None):
"""
Loads a :py:class:`parso.Grammar`. The default version is the current Python
version.
:param str version: A python version string, e.g. ``version='3.8'``.
:param str path: A path to a grammar file
"""
version_info = parse_version_string(version)
file = path or os.path.join(
'python',
'grammar%s%s.txt' % (version_info.major, version_info.minor)
)
global _loaded_grammars
path = os.path.join(os.path.dirname(__file__), file)
try:
return _loaded_grammars[path]
except KeyError:
try:
with open(path) as f:
bnf_text = f.read()
grammar = PythonGrammar(version_info, bnf_text)
return _loaded_grammars.setdefault(path, grammar)
except FileNotFoundError:
message = "Python version %s.%s is currently not supported." % (
version_info.major, version_info.minor
)
raise NotImplementedError(message)
| 10,483 | Python | .py | 222 | 36.603604 | 88 | 0.622272 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,998 | parser.py | DamnWidget_anaconda/anaconda_lib/parso/parser.py | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright David Halter and Contributors
# Modifications are dual-licensed: MIT and PSF.
# 99% of the code is different from pgen2, now.
"""
The ``Parser`` tries to convert the available Python code in an easy to read
format, something like an abstract syntax tree. The classes who represent this
tree, are sitting in the :mod:`parso.tree` module.
The Python module ``tokenize`` is a very important part in the ``Parser``,
because it splits the code into different words (tokens). Sometimes it looks a
bit messy. Sorry for that! You might ask now: "Why didn't you use the ``ast``
module for this? Well, ``ast`` does a very good job understanding proper Python
code, but fails to work as soon as there's a single line of broken code.
There's one important optimization that needs to be known: Statements are not
being parsed completely. ``Statement`` is just a representation of the tokens
within the statement. This lowers memory usage and cpu time and reduces the
complexity of the ``Parser`` (there's another parser sitting inside
``Statement``, which produces ``Array`` and ``Call``).
"""
from typing import Dict, Type
from parso import tree
from parso.pgen2.generator import ReservedString
class ParserSyntaxError(Exception):
"""
Contains error information about the parser tree.
May be raised as an exception.
"""
def __init__(self, message, error_leaf):
self.message = message
self.error_leaf = error_leaf
class InternalParseError(Exception):
"""
Exception to signal the parser is stuck and error recovery didn't help.
Basically this shouldn't happen. It's a sign that something is really
wrong.
"""
def __init__(self, msg, type_, value, start_pos):
Exception.__init__(self, "%s: type=%r, value=%r, start_pos=%r" %
(msg, type_.name, value, start_pos))
self.msg = msg
self.type = type
self.value = value
self.start_pos = start_pos
class Stack(list):
def _allowed_transition_names_and_token_types(self):
def iterate():
# An API just for Jedi.
for stack_node in reversed(self):
for transition in stack_node.dfa.transitions:
if isinstance(transition, ReservedString):
yield transition.value
else:
yield transition # A token type
if not stack_node.dfa.is_final:
break
return list(iterate())
class StackNode:
def __init__(self, dfa):
self.dfa = dfa
self.nodes = []
@property
def nonterminal(self):
return self.dfa.from_rule
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.dfa, self.nodes)
def _token_to_transition(grammar, type_, value):
# Map from token to label
if type_.value.contains_syntax:
# Check for reserved words (keywords)
try:
return grammar.reserved_syntax_strings[value]
except KeyError:
pass
return type_
class BaseParser:
"""Parser engine.
A Parser instance contains state pertaining to the current token
sequence, and should not be used concurrently by different threads
to parse separate token sequences.
See python/tokenize.py for how to get input tokens by a string.
When a syntax error occurs, error_recovery() is called.
"""
node_map: Dict[str, Type[tree.BaseNode]] = {}
default_node = tree.Node
leaf_map: Dict[str, Type[tree.Leaf]] = {}
default_leaf = tree.Leaf
def __init__(self, pgen_grammar, start_nonterminal='file_input', error_recovery=False):
self._pgen_grammar = pgen_grammar
self._start_nonterminal = start_nonterminal
self._error_recovery = error_recovery
def parse(self, tokens):
first_dfa = self._pgen_grammar.nonterminal_to_dfas[self._start_nonterminal][0]
self.stack = Stack([StackNode(first_dfa)])
for token in tokens:
self._add_token(token)
while True:
tos = self.stack[-1]
if not tos.dfa.is_final:
# We never broke out -- EOF is too soon -- Unfinished statement.
# However, the error recovery might have added the token again, if
# the stack is empty, we're fine.
raise InternalParseError(
"incomplete input", token.type, token.string, token.start_pos
)
if len(self.stack) > 1:
self._pop()
else:
return self.convert_node(tos.nonterminal, tos.nodes)
def error_recovery(self, token):
if self._error_recovery:
raise NotImplementedError("Error Recovery is not implemented")
else:
type_, value, start_pos, prefix = token
error_leaf = tree.ErrorLeaf(type_, value, start_pos, prefix)
raise ParserSyntaxError('SyntaxError: invalid syntax', error_leaf)
def convert_node(self, nonterminal, children):
try:
node = self.node_map[nonterminal](children)
except KeyError:
node = self.default_node(nonterminal, children)
return node
def convert_leaf(self, type_, value, prefix, start_pos):
try:
return self.leaf_map[type_](value, start_pos, prefix)
except KeyError:
return self.default_leaf(value, start_pos, prefix)
def _add_token(self, token):
"""
This is the only core function for parsing. Here happens basically
everything. Everything is well prepared by the parser generator and we
only apply the necessary steps here.
"""
grammar = self._pgen_grammar
stack = self.stack
type_, value, start_pos, prefix = token
transition = _token_to_transition(grammar, type_, value)
while True:
try:
plan = stack[-1].dfa.transitions[transition]
break
except KeyError:
if stack[-1].dfa.is_final:
self._pop()
else:
self.error_recovery(token)
return
except IndexError:
raise InternalParseError("too much input", type_, value, start_pos)
stack[-1].dfa = plan.next_dfa
for push in plan.dfa_pushes:
stack.append(StackNode(push))
leaf = self.convert_leaf(type_, value, prefix, start_pos)
stack[-1].nodes.append(leaf)
def _pop(self):
tos = self.stack.pop()
# If there's exactly one child, return that child instead of
# creating a new node. We still create expr_stmt and
# file_input though, because a lot of Jedi depends on its
# logic.
if len(tos.nodes) == 1:
new_node = tos.nodes[0]
else:
new_node = self.convert_node(tos.dfa.from_rule, tos.nodes)
self.stack[-1].nodes.append(new_node)
| 7,182 | Python | .py | 166 | 34.295181 | 91 | 0.630952 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
29,999 | normalizer.py | DamnWidget_anaconda/anaconda_lib/parso/normalizer.py | from contextlib import contextmanager
from typing import Dict, List
class _NormalizerMeta(type):
def __new__(cls, name, bases, dct):
new_cls = type.__new__(cls, name, bases, dct)
new_cls.rule_value_classes = {}
new_cls.rule_type_classes = {}
return new_cls
class Normalizer(metaclass=_NormalizerMeta):
_rule_type_instances: Dict[str, List[type]] = {}
_rule_value_instances: Dict[str, List[type]] = {}
def __init__(self, grammar, config):
self.grammar = grammar
self._config = config
self.issues = []
self._rule_type_instances = self._instantiate_rules('rule_type_classes')
self._rule_value_instances = self._instantiate_rules('rule_value_classes')
def _instantiate_rules(self, attr):
dct = {}
for base in type(self).mro():
rules_map = getattr(base, attr, {})
for type_, rule_classes in rules_map.items():
new = [rule_cls(self) for rule_cls in rule_classes]
dct.setdefault(type_, []).extend(new)
return dct
def walk(self, node):
self.initialize(node)
value = self.visit(node)
self.finalize()
return value
def visit(self, node):
try:
children = node.children
except AttributeError:
return self.visit_leaf(node)
else:
with self.visit_node(node):
return ''.join(self.visit(child) for child in children)
@contextmanager
def visit_node(self, node):
self._check_type_rules(node)
yield
def _check_type_rules(self, node):
for rule in self._rule_type_instances.get(node.type, []):
rule.feed_node(node)
def visit_leaf(self, leaf):
self._check_type_rules(leaf)
for rule in self._rule_value_instances.get(leaf.value, []):
rule.feed_node(leaf)
return leaf.prefix + leaf.value
def initialize(self, node):
pass
def finalize(self):
pass
def add_issue(self, node, code, message):
issue = Issue(node, code, message)
if issue not in self.issues:
self.issues.append(issue)
return True
@classmethod
def register_rule(cls, *, value=None, values=(), type=None, types=()):
"""
Use it as a class decorator::
normalizer = Normalizer('grammar', 'config')
@normalizer.register_rule(value='foo')
class MyRule(Rule):
error_code = 42
"""
values = list(values)
types = list(types)
if value is not None:
values.append(value)
if type is not None:
types.append(type)
if not values and not types:
raise ValueError("You must register at least something.")
def decorator(rule_cls):
for v in values:
cls.rule_value_classes.setdefault(v, []).append(rule_cls)
for t in types:
cls.rule_type_classes.setdefault(t, []).append(rule_cls)
return rule_cls
return decorator
class NormalizerConfig:
normalizer_class = Normalizer
def create_normalizer(self, grammar):
if self.normalizer_class is None:
return None
return self.normalizer_class(grammar, self)
class Issue:
def __init__(self, node, code, message):
self.code = code
"""
An integer code that stands for the type of error.
"""
self.message = message
"""
A message (string) for the issue.
"""
self.start_pos = node.start_pos
"""
The start position position of the error as a tuple (line, column). As
always in |parso| the first line is 1 and the first column 0.
"""
self.end_pos = node.end_pos
def __eq__(self, other):
return self.start_pos == other.start_pos and self.code == other.code
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.code, self.start_pos))
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.code)
class Rule:
code: int
message: str
def __init__(self, normalizer):
self._normalizer = normalizer
def is_issue(self, node):
raise NotImplementedError()
def get_node(self, node):
return node
def _get_message(self, message, node):
if message is None:
message = self.message
if message is None:
raise ValueError("The message on the class is not set.")
return message
def add_issue(self, node, code=None, message=None):
if code is None:
code = self.code
if code is None:
raise ValueError("The error code on the class is not set.")
message = self._get_message(message, node)
self._normalizer.add_issue(node, code, message)
def feed_node(self, node):
if self.is_issue(node):
issue_node = self.get_node(node)
self.add_issue(issue_node)
class RefactoringNormalizer(Normalizer):
def __init__(self, node_to_str_map):
self._node_to_str_map = node_to_str_map
def visit(self, node):
try:
return self._node_to_str_map[node]
except KeyError:
return super().visit(node)
def visit_leaf(self, leaf):
try:
return self._node_to_str_map[leaf]
except KeyError:
return super().visit_leaf(leaf)
| 5,597 | Python | .py | 152 | 27.677632 | 82 | 0.588072 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |