id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
30,000 | generator.py | DamnWidget_anaconda/anaconda_lib/parso/pgen2/generator.py | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright David Halter and Contributors
# Modifications are dual-licensed: MIT and PSF.
"""
This module defines the data structures used to represent a grammar.
Specifying grammars in pgen is possible with this grammar::
grammar: (NEWLINE | rule)* ENDMARKER
rule: NAME ':' rhs NEWLINE
rhs: items ('|' items)*
items: item+
item: '[' rhs ']' | atom ['+' | '*']
atom: '(' rhs ')' | NAME | STRING
This grammar is self-referencing.
This parser generator (pgen2) was created by Guido Rossum and used for lib2to3.
Most of the code has been refactored to make it more Pythonic. Since this was a
"copy" of the CPython Parser parser "pgen", there was some work needed to make
it more readable. It should also be slightly faster than the original pgen2,
because we made some optimizations.
"""
from ast import literal_eval
from typing import TypeVar, Generic, Mapping, Sequence, Set, Union
from parso.pgen2.grammar_parser import GrammarParser, NFAState
_TokenTypeT = TypeVar("_TokenTypeT")
class Grammar(Generic[_TokenTypeT]):
"""
Once initialized, this class supplies the grammar tables for the
parsing engine implemented by parse.py. The parsing engine
accesses the instance variables directly.
The only important part in this parsers are dfas and transitions between
dfas.
"""
def __init__(self,
start_nonterminal: str,
rule_to_dfas: Mapping[str, Sequence['DFAState[_TokenTypeT]']],
reserved_syntax_strings: Mapping[str, 'ReservedString']):
self.nonterminal_to_dfas = rule_to_dfas
self.reserved_syntax_strings = reserved_syntax_strings
self.start_nonterminal = start_nonterminal
class DFAPlan:
"""
Plans are used for the parser to create stack nodes and do the proper
DFA state transitions.
"""
def __init__(self, next_dfa: 'DFAState', dfa_pushes: Sequence['DFAState'] = []):
self.next_dfa = next_dfa
self.dfa_pushes = dfa_pushes
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.next_dfa, self.dfa_pushes)
class DFAState(Generic[_TokenTypeT]):
"""
The DFAState object is the core class for pretty much anything. DFAState
are the vertices of an ordered graph while arcs and transitions are the
edges.
Arcs are the initial edges, where most DFAStates are not connected and
transitions are then calculated to connect the DFA state machines that have
different nonterminals.
"""
def __init__(self, from_rule: str, nfa_set: Set[NFAState], final: NFAState):
assert isinstance(nfa_set, set)
assert isinstance(next(iter(nfa_set)), NFAState)
assert isinstance(final, NFAState)
self.from_rule = from_rule
self.nfa_set = nfa_set
# map from terminals/nonterminals to DFAState
self.arcs: Mapping[str, DFAState] = {}
# In an intermediary step we set these nonterminal arcs (which has the
# same structure as arcs). These don't contain terminals anymore.
self.nonterminal_arcs: Mapping[str, DFAState] = {}
# Transitions are basically the only thing that the parser is using
# with is_final. Everyting else is purely here to create a parser.
self.transitions: Mapping[Union[_TokenTypeT, ReservedString], DFAPlan] = {}
self.is_final = final in nfa_set
def add_arc(self, next_, label):
assert isinstance(label, str)
assert label not in self.arcs
assert isinstance(next_, DFAState)
self.arcs[label] = next_
def unifystate(self, old, new):
for label, next_ in self.arcs.items():
if next_ is old:
self.arcs[label] = new
def __eq__(self, other):
# Equality test -- ignore the nfa_set instance variable
assert isinstance(other, DFAState)
if self.is_final != other.is_final:
return False
# Can't just return self.arcs == other.arcs, because that
# would invoke this method recursively, with cycles...
if len(self.arcs) != len(other.arcs):
return False
for label, next_ in self.arcs.items():
if next_ is not other.arcs.get(label):
return False
return True
def __repr__(self):
return '<%s: %s is_final=%s>' % (
self.__class__.__name__, self.from_rule, self.is_final
)
class ReservedString:
"""
Most grammars will have certain keywords and operators that are mentioned
in the grammar as strings (e.g. "if") and not token types (e.g. NUMBER).
This class basically is the former.
"""
def __init__(self, value: str):
self.value = value
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.value)
def _simplify_dfas(dfas):
"""
This is not theoretically optimal, but works well enough.
Algorithm: repeatedly look for two states that have the same
set of arcs (same labels pointing to the same nodes) and
unify them, until things stop changing.
dfas is a list of DFAState instances
"""
changes = True
while changes:
changes = False
for i, state_i in enumerate(dfas):
for j in range(i + 1, len(dfas)):
state_j = dfas[j]
if state_i == state_j:
del dfas[j]
for state in dfas:
state.unifystate(state_j, state_i)
changes = True
break
def _make_dfas(start, finish):
"""
Uses the powerset construction algorithm to create DFA states from sets of
NFA states.
Also does state reduction if some states are not needed.
"""
# To turn an NFA into a DFA, we define the states of the DFA
# to correspond to *sets* of states of the NFA. Then do some
# state reduction.
assert isinstance(start, NFAState)
assert isinstance(finish, NFAState)
def addclosure(nfa_state, base_nfa_set):
assert isinstance(nfa_state, NFAState)
if nfa_state in base_nfa_set:
return
base_nfa_set.add(nfa_state)
for nfa_arc in nfa_state.arcs:
if nfa_arc.nonterminal_or_string is None:
addclosure(nfa_arc.next, base_nfa_set)
base_nfa_set = set()
addclosure(start, base_nfa_set)
states = [DFAState(start.from_rule, base_nfa_set, finish)]
for state in states: # NB states grows while we're iterating
arcs = {}
# Find state transitions and store them in arcs.
for nfa_state in state.nfa_set:
for nfa_arc in nfa_state.arcs:
if nfa_arc.nonterminal_or_string is not None:
nfa_set = arcs.setdefault(nfa_arc.nonterminal_or_string, set())
addclosure(nfa_arc.next, nfa_set)
# Now create the dfa's with no None's in arcs anymore. All Nones have
# been eliminated and state transitions (arcs) are properly defined, we
# just need to create the dfa's.
for nonterminal_or_string, nfa_set in arcs.items():
for nested_state in states:
if nested_state.nfa_set == nfa_set:
# The DFA state already exists for this rule.
break
else:
nested_state = DFAState(start.from_rule, nfa_set, finish)
states.append(nested_state)
state.add_arc(nested_state, nonterminal_or_string)
return states # List of DFAState instances; first one is start
def _dump_nfa(start, finish):
print("Dump of NFA for", start.from_rule)
todo = [start]
for i, state in enumerate(todo):
print(" State", i, state is finish and "(final)" or "")
for arc in state.arcs:
label, next_ = arc.nonterminal_or_string, arc.next
if next_ in todo:
j = todo.index(next_)
else:
j = len(todo)
todo.append(next_)
if label is None:
print(" -> %d" % j)
else:
print(" %s -> %d" % (label, j))
def _dump_dfas(dfas):
print("Dump of DFA for", dfas[0].from_rule)
for i, state in enumerate(dfas):
print(" State", i, state.is_final and "(final)" or "")
for nonterminal, next_ in state.arcs.items():
print(" %s -> %d" % (nonterminal, dfas.index(next_)))
def generate_grammar(bnf_grammar: str, token_namespace) -> Grammar:
"""
``bnf_text`` is a grammar in extended BNF (using * for repetition, + for
at-least-once repetition, [] for optional parts, | for alternatives and ()
for grouping).
It's not EBNF according to ISO/IEC 14977. It's a dialect Python uses in its
own parser.
"""
rule_to_dfas = {}
start_nonterminal = None
for nfa_a, nfa_z in GrammarParser(bnf_grammar).parse():
# _dump_nfa(nfa_a, nfa_z)
dfas = _make_dfas(nfa_a, nfa_z)
# _dump_dfas(dfas)
# oldlen = len(dfas)
_simplify_dfas(dfas)
# newlen = len(dfas)
rule_to_dfas[nfa_a.from_rule] = dfas
# print(nfa_a.from_rule, oldlen, newlen)
if start_nonterminal is None:
start_nonterminal = nfa_a.from_rule
reserved_strings: Mapping[str, ReservedString] = {}
for nonterminal, dfas in rule_to_dfas.items():
for dfa_state in dfas:
for terminal_or_nonterminal, next_dfa in dfa_state.arcs.items():
if terminal_or_nonterminal in rule_to_dfas:
dfa_state.nonterminal_arcs[terminal_or_nonterminal] = next_dfa
else:
transition = _make_transition(
token_namespace,
reserved_strings,
terminal_or_nonterminal
)
dfa_state.transitions[transition] = DFAPlan(next_dfa)
_calculate_tree_traversal(rule_to_dfas)
return Grammar(start_nonterminal, rule_to_dfas, reserved_strings) # type: ignore
def _make_transition(token_namespace, reserved_syntax_strings, label):
"""
Creates a reserved string ("if", "for", "*", ...) or returns the token type
(NUMBER, STRING, ...) for a given grammar terminal.
"""
if label[0].isalpha():
# A named token (e.g. NAME, NUMBER, STRING)
return getattr(token_namespace, label)
else:
# Either a keyword or an operator
assert label[0] in ('"', "'"), label
assert not label.startswith('"""') and not label.startswith("'''")
value = literal_eval(label)
try:
return reserved_syntax_strings[value]
except KeyError:
r = reserved_syntax_strings[value] = ReservedString(value)
return r
def _calculate_tree_traversal(nonterminal_to_dfas):
"""
By this point we know how dfas can move around within a stack node, but we
don't know how we can add a new stack node (nonterminal transitions).
"""
# Map from grammar rule (nonterminal) name to a set of tokens.
first_plans = {}
nonterminals = list(nonterminal_to_dfas.keys())
nonterminals.sort()
for nonterminal in nonterminals:
if nonterminal not in first_plans:
_calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal)
# Now that we have calculated the first terminals, we are sure that
# there is no left recursion.
for dfas in nonterminal_to_dfas.values():
for dfa_state in dfas:
transitions = dfa_state.transitions
for nonterminal, next_dfa in dfa_state.nonterminal_arcs.items():
for transition, pushes in first_plans[nonterminal].items():
if transition in transitions:
prev_plan = transitions[transition]
# Make sure these are sorted so that error messages are
# at least deterministic
choices = sorted([
(
prev_plan.dfa_pushes[0].from_rule
if prev_plan.dfa_pushes
else prev_plan.next_dfa.from_rule
),
(
pushes[0].from_rule
if pushes else next_dfa.from_rule
),
])
raise ValueError(
"Rule %s is ambiguous; given a %s token, we "
"can't determine if we should evaluate %s or %s."
% (
(
dfa_state.from_rule,
transition,
) + tuple(choices)
)
)
transitions[transition] = DFAPlan(next_dfa, pushes)
def _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal):
"""
Calculates the first plan in the first_plans dictionary for every given
nonterminal. This is going to be used to know when to create stack nodes.
"""
dfas = nonterminal_to_dfas[nonterminal]
new_first_plans = {}
first_plans[nonterminal] = None # dummy to detect left recursion
# We only need to check the first dfa. All the following ones are not
# interesting to find first terminals.
state = dfas[0]
for transition, next_ in state.transitions.items():
# It's a string. We have finally found a possible first token.
new_first_plans[transition] = [next_.next_dfa]
for nonterminal2, next_ in state.nonterminal_arcs.items():
# It's a nonterminal and we have either a left recursion issue
# in the grammar or we have to recurse.
try:
first_plans2 = first_plans[nonterminal2]
except KeyError:
first_plans2 = _calculate_first_plans(nonterminal_to_dfas, first_plans, nonterminal2)
else:
if first_plans2 is None:
raise ValueError("left recursion for rule %r" % nonterminal)
for t, pushes in first_plans2.items():
new_first_plans[t] = [next_] + pushes
first_plans[nonterminal] = new_first_plans
return new_first_plans
| 14,570 | Python | .py | 322 | 35.108696 | 97 | 0.607133 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,001 | __init__.py | DamnWidget_anaconda/anaconda_lib/parso/pgen2/__init__.py | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Copyright 2014 David Halter and Contributors
# Modifications are dual-licensed: MIT and PSF.
from parso.pgen2.generator import generate_grammar
| 382 | Python | .py | 8 | 46.5 | 67 | 0.817204 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,002 | grammar_parser.py | DamnWidget_anaconda/anaconda_lib/parso/pgen2/grammar_parser.py | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright David Halter and Contributors
# Modifications are dual-licensed: MIT and PSF.
from typing import Optional, Iterator, Tuple, List
from parso.python.tokenize import tokenize
from parso.utils import parse_version_string
from parso.python.token import PythonTokenTypes
class NFAArc:
def __init__(self, next_: 'NFAState', nonterminal_or_string: Optional[str]):
self.next: NFAState = next_
self.nonterminal_or_string: Optional[str] = nonterminal_or_string
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.nonterminal_or_string)
class NFAState:
def __init__(self, from_rule: str):
self.from_rule: str = from_rule
self.arcs: List[NFAArc] = []
def add_arc(self, next_, nonterminal_or_string=None):
assert nonterminal_or_string is None or isinstance(nonterminal_or_string, str)
assert isinstance(next_, NFAState)
self.arcs.append(NFAArc(next_, nonterminal_or_string))
def __repr__(self):
return '<%s: from %s>' % (self.__class__.__name__, self.from_rule)
class GrammarParser:
"""
The parser for Python grammar files.
"""
def __init__(self, bnf_grammar: str):
self._bnf_grammar = bnf_grammar
self.generator = tokenize(
bnf_grammar,
version_info=parse_version_string('3.9')
)
self._gettoken() # Initialize lookahead
def parse(self) -> Iterator[Tuple[NFAState, NFAState]]:
# grammar: (NEWLINE | rule)* ENDMARKER
while self.type != PythonTokenTypes.ENDMARKER:
while self.type == PythonTokenTypes.NEWLINE:
self._gettoken()
# rule: NAME ':' rhs NEWLINE
self._current_rule_name = self._expect(PythonTokenTypes.NAME)
self._expect(PythonTokenTypes.OP, ':')
a, z = self._parse_rhs()
self._expect(PythonTokenTypes.NEWLINE)
yield a, z
def _parse_rhs(self):
# rhs: items ('|' items)*
a, z = self._parse_items()
if self.value != "|":
return a, z
else:
aa = NFAState(self._current_rule_name)
zz = NFAState(self._current_rule_name)
while True:
# Add the possibility to go into the state of a and come back
# to finish.
aa.add_arc(a)
z.add_arc(zz)
if self.value != "|":
break
self._gettoken()
a, z = self._parse_items()
return aa, zz
def _parse_items(self):
# items: item+
a, b = self._parse_item()
while self.type in (PythonTokenTypes.NAME, PythonTokenTypes.STRING) \
or self.value in ('(', '['):
c, d = self._parse_item()
# Need to end on the next item.
b.add_arc(c)
b = d
return a, b
def _parse_item(self):
# item: '[' rhs ']' | atom ['+' | '*']
if self.value == "[":
self._gettoken()
a, z = self._parse_rhs()
self._expect(PythonTokenTypes.OP, ']')
# Make it also possible that there is no token and change the
# state.
a.add_arc(z)
return a, z
else:
a, z = self._parse_atom()
value = self.value
if value not in ("+", "*"):
return a, z
self._gettoken()
# Make it clear that we can go back to the old state and repeat.
z.add_arc(a)
if value == "+":
return a, z
else:
# The end state is the same as the beginning, nothing must
# change.
return a, a
def _parse_atom(self):
# atom: '(' rhs ')' | NAME | STRING
if self.value == "(":
self._gettoken()
a, z = self._parse_rhs()
self._expect(PythonTokenTypes.OP, ')')
return a, z
elif self.type in (PythonTokenTypes.NAME, PythonTokenTypes.STRING):
a = NFAState(self._current_rule_name)
z = NFAState(self._current_rule_name)
# Make it clear that the state transition requires that value.
a.add_arc(z, self.value)
self._gettoken()
return a, z
else:
self._raise_error("expected (...) or NAME or STRING, got %s/%s",
self.type, self.value)
def _expect(self, type_, value=None):
if self.type != type_:
self._raise_error("expected %s, got %s [%s]",
type_, self.type, self.value)
if value is not None and self.value != value:
self._raise_error("expected %s, got %s", value, self.value)
value = self.value
self._gettoken()
return value
def _gettoken(self):
tup = next(self.generator)
self.type, self.value, self.begin, prefix = tup
def _raise_error(self, msg, *args):
if args:
try:
msg = msg % args
except:
msg = " ".join([msg] + list(map(str, args)))
line = self._bnf_grammar.splitlines()[self.begin[0] - 1]
raise SyntaxError(msg, ('<grammar>', self.begin[0],
self.begin[1], line))
| 5,515 | Python | .py | 137 | 29.364964 | 86 | 0.54267 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,003 | errors.py | DamnWidget_anaconda/anaconda_lib/parso/python/errors.py | # -*- coding: utf-8 -*-
import codecs
import warnings
import re
from contextlib import contextmanager
from parso.normalizer import Normalizer, NormalizerConfig, Issue, Rule
from parso.python.tokenize import _get_token_collection
_BLOCK_STMTS = ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt')
_STAR_EXPR_PARENTS = ('testlist_star_expr', 'testlist_comp', 'exprlist')
# This is the maximal block size given by python.
_MAX_BLOCK_SIZE = 20
_MAX_INDENT_COUNT = 100
ALLOWED_FUTURES = (
'nested_scopes', 'generators', 'division', 'absolute_import',
'with_statement', 'print_function', 'unicode_literals', 'generator_stop',
)
_COMP_FOR_TYPES = ('comp_for', 'sync_comp_for')
def _get_rhs_name(node, version):
type_ = node.type
if type_ == "lambdef":
return "lambda"
elif type_ == "atom":
comprehension = _get_comprehension_type(node)
first, second = node.children[:2]
if comprehension is not None:
return comprehension
elif second.type == "dictorsetmaker":
if version < (3, 8):
return "literal"
else:
if second.children[1] == ":" or second.children[0] == "**":
return "dict display"
else:
return "set display"
elif (
first == "("
and (second == ")"
or (len(node.children) == 3 and node.children[1].type == "testlist_comp"))
):
return "tuple"
elif first == "(":
return _get_rhs_name(_remove_parens(node), version=version)
elif first == "[":
return "list"
elif first == "{" and second == "}":
return "dict display"
elif first == "{" and len(node.children) > 2:
return "set display"
elif type_ == "keyword":
if "yield" in node.value:
return "yield expression"
if version < (3, 8):
return "keyword"
else:
return str(node.value)
elif type_ == "operator" and node.value == "...":
return "Ellipsis"
elif type_ == "comparison":
return "comparison"
elif type_ in ("string", "number", "strings"):
return "literal"
elif type_ == "yield_expr":
return "yield expression"
elif type_ == "test":
return "conditional expression"
elif type_ in ("atom_expr", "power"):
if node.children[0] == "await":
return "await expression"
elif node.children[-1].type == "trailer":
trailer = node.children[-1]
if trailer.children[0] == "(":
return "function call"
elif trailer.children[0] == "[":
return "subscript"
elif trailer.children[0] == ".":
return "attribute"
elif (
("expr" in type_ and "star_expr" not in type_) # is a substring
or "_test" in type_
or type_ in ("term", "factor")
):
return "operator"
elif type_ == "star_expr":
return "starred"
elif type_ == "testlist_star_expr":
return "tuple"
elif type_ == "fstring":
return "f-string expression"
return type_ # shouldn't reach here
def _iter_stmts(scope):
"""
Iterates over all statements and splits up simple_stmt.
"""
for child in scope.children:
if child.type == 'simple_stmt':
for child2 in child.children:
if child2.type == 'newline' or child2 == ';':
continue
yield child2
else:
yield child
def _get_comprehension_type(atom):
first, second = atom.children[:2]
if second.type == 'testlist_comp' and second.children[1].type in _COMP_FOR_TYPES:
if first == '[':
return 'list comprehension'
else:
return 'generator expression'
elif second.type == 'dictorsetmaker' and second.children[-1].type in _COMP_FOR_TYPES:
if second.children[1] == ':':
return 'dict comprehension'
else:
return 'set comprehension'
return None
def _is_future_import(import_from):
# It looks like a __future__ import that is relative is still a future
# import. That feels kind of odd, but whatever.
# if import_from.level != 0:
# return False
from_names = import_from.get_from_names()
return [n.value for n in from_names] == ['__future__']
def _remove_parens(atom):
"""
Returns the inner part of an expression like `(foo)`. Also removes nested
parens.
"""
try:
children = atom.children
except AttributeError:
pass
else:
if len(children) == 3 and children[0] == '(':
return _remove_parens(atom.children[1])
return atom
def _skip_parens_bottom_up(node):
"""
Returns an ancestor node of an expression, skipping all levels of parens
bottom-up.
"""
while node.parent is not None:
node = node.parent
if node.type != 'atom' or node.children[0] != '(':
return node
return None
def _iter_params(parent_node):
return (n for n in parent_node.children if n.type == 'param' or n.type == 'operator')
def _is_future_import_first(import_from):
"""
Checks if the import is the first statement of a file.
"""
found_docstring = False
for stmt in _iter_stmts(import_from.get_root_node()):
if stmt.type == 'string' and not found_docstring:
continue
found_docstring = True
if stmt == import_from:
return True
if stmt.type == 'import_from' and _is_future_import(stmt):
continue
return False
def _iter_definition_exprs_from_lists(exprlist):
def check_expr(child):
if child.type == 'atom':
if child.children[0] == '(':
testlist_comp = child.children[1]
if testlist_comp.type == 'testlist_comp':
yield from _iter_definition_exprs_from_lists(testlist_comp)
return
else:
# It's a paren that doesn't do anything, like 1 + (1)
yield from check_expr(testlist_comp)
return
elif child.children[0] == '[':
yield testlist_comp
return
yield child
if exprlist.type in _STAR_EXPR_PARENTS:
for child in exprlist.children[::2]:
yield from check_expr(child)
else:
yield from check_expr(exprlist)
def _get_expr_stmt_definition_exprs(expr_stmt):
exprs = []
for list_ in expr_stmt.children[:-2:2]:
if list_.type in ('testlist_star_expr', 'testlist'):
exprs += _iter_definition_exprs_from_lists(list_)
else:
exprs.append(list_)
return exprs
def _get_for_stmt_definition_exprs(for_stmt):
exprlist = for_stmt.children[1]
return list(_iter_definition_exprs_from_lists(exprlist))
def _is_argument_comprehension(argument):
return argument.children[1].type in _COMP_FOR_TYPES
def _any_fstring_error(version, node):
if version < (3, 9) or node is None:
return False
if node.type == "error_node":
return any(child.type == "fstring_start" for child in node.children)
elif node.type == "fstring":
return True
else:
return node.search_ancestor("fstring")
class _Context:
def __init__(self, node, add_syntax_error, parent_context=None):
self.node = node
self.blocks = []
self.parent_context = parent_context
self._used_name_dict = {}
self._global_names = []
self._local_params_names = []
self._nonlocal_names = []
self._nonlocal_names_in_subscopes = []
self._add_syntax_error = add_syntax_error
def is_async_funcdef(self):
# Stupidly enough async funcdefs can have two different forms,
# depending if a decorator is used or not.
return self.is_function() \
and self.node.parent.type in ('async_funcdef', 'async_stmt')
def is_function(self):
return self.node.type == 'funcdef'
def add_name(self, name):
parent_type = name.parent.type
if parent_type == 'trailer':
# We are only interested in first level names.
return
if parent_type == 'global_stmt':
self._global_names.append(name)
elif parent_type == 'nonlocal_stmt':
self._nonlocal_names.append(name)
elif parent_type == 'funcdef':
self._local_params_names.extend(
[param.name.value for param in name.parent.get_params()]
)
else:
self._used_name_dict.setdefault(name.value, []).append(name)
def finalize(self):
"""
Returns a list of nonlocal names that need to be part of that scope.
"""
self._analyze_names(self._global_names, 'global')
self._analyze_names(self._nonlocal_names, 'nonlocal')
global_name_strs = {n.value: n for n in self._global_names}
for nonlocal_name in self._nonlocal_names:
try:
global_name = global_name_strs[nonlocal_name.value]
except KeyError:
continue
message = "name '%s' is nonlocal and global" % global_name.value
if global_name.start_pos < nonlocal_name.start_pos:
error_name = global_name
else:
error_name = nonlocal_name
self._add_syntax_error(error_name, message)
nonlocals_not_handled = []
for nonlocal_name in self._nonlocal_names_in_subscopes:
search = nonlocal_name.value
if search in self._local_params_names:
continue
if search in global_name_strs or self.parent_context is None:
message = "no binding for nonlocal '%s' found" % nonlocal_name.value
self._add_syntax_error(nonlocal_name, message)
elif not self.is_function() or \
nonlocal_name.value not in self._used_name_dict:
nonlocals_not_handled.append(nonlocal_name)
return self._nonlocal_names + nonlocals_not_handled
def _analyze_names(self, globals_or_nonlocals, type_):
def raise_(message):
self._add_syntax_error(base_name, message % (base_name.value, type_))
params = []
if self.node.type == 'funcdef':
params = self.node.get_params()
for base_name in globals_or_nonlocals:
found_global_or_nonlocal = False
# Somehow Python does it the reversed way.
for name in reversed(self._used_name_dict.get(base_name.value, [])):
if name.start_pos > base_name.start_pos:
# All following names don't have to be checked.
found_global_or_nonlocal = True
parent = name.parent
if parent.type == 'param' and parent.name == name:
# Skip those here, these definitions belong to the next
# scope.
continue
if name.is_definition():
if parent.type == 'expr_stmt' \
and parent.children[1].type == 'annassign':
if found_global_or_nonlocal:
# If it's after the global the error seems to be
# placed there.
base_name = name
raise_("annotated name '%s' can't be %s")
break
else:
message = "name '%s' is assigned to before %s declaration"
else:
message = "name '%s' is used prior to %s declaration"
if not found_global_or_nonlocal:
raise_(message)
# Only add an error for the first occurence.
break
for param in params:
if param.name.value == base_name.value:
raise_("name '%s' is parameter and %s"),
@contextmanager
def add_block(self, node):
self.blocks.append(node)
yield
self.blocks.pop()
def add_context(self, node):
return _Context(node, self._add_syntax_error, parent_context=self)
def close_child_context(self, child_context):
self._nonlocal_names_in_subscopes += child_context.finalize()
class ErrorFinder(Normalizer):
"""
Searches for errors in the syntax tree.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._error_dict = {}
self.version = self.grammar.version_info
def initialize(self, node):
def create_context(node):
if node is None:
return None
parent_context = create_context(node.parent)
if node.type in ('classdef', 'funcdef', 'file_input'):
return _Context(node, self._add_syntax_error, parent_context)
return parent_context
self.context = create_context(node) or _Context(node, self._add_syntax_error)
self._indentation_count = 0
def visit(self, node):
if node.type == 'error_node':
with self.visit_node(node):
# Don't need to investigate the inners of an error node. We
# might find errors in there that should be ignored, because
# the error node itself already shows that there's an issue.
return ''
return super().visit(node)
@contextmanager
def visit_node(self, node):
self._check_type_rules(node)
if node.type in _BLOCK_STMTS:
with self.context.add_block(node):
if len(self.context.blocks) == _MAX_BLOCK_SIZE:
self._add_syntax_error(node, "too many statically nested blocks")
yield
return
elif node.type == 'suite':
self._indentation_count += 1
if self._indentation_count == _MAX_INDENT_COUNT:
self._add_indentation_error(node.children[1], "too many levels of indentation")
yield
if node.type == 'suite':
self._indentation_count -= 1
elif node.type in ('classdef', 'funcdef'):
context = self.context
self.context = context.parent_context
self.context.close_child_context(context)
def visit_leaf(self, leaf):
if leaf.type == 'error_leaf':
if leaf.token_type in ('INDENT', 'ERROR_DEDENT'):
# Indents/Dedents itself never have a prefix. They are just
# "pseudo" tokens that get removed by the syntax tree later.
# Therefore in case of an error we also have to check for this.
spacing = list(leaf.get_next_leaf()._split_prefix())[-1]
if leaf.token_type == 'INDENT':
message = 'unexpected indent'
else:
message = 'unindent does not match any outer indentation level'
self._add_indentation_error(spacing, message)
else:
if leaf.value.startswith('\\'):
message = 'unexpected character after line continuation character'
else:
match = re.match('\\w{,2}("{1,3}|\'{1,3})', leaf.value)
if match is None:
message = 'invalid syntax'
if (
self.version >= (3, 9)
and leaf.value in _get_token_collection(
self.version
).always_break_tokens
):
message = "f-string: " + message
else:
if len(match.group(1)) == 1:
message = 'EOL while scanning string literal'
else:
message = 'EOF while scanning triple-quoted string literal'
self._add_syntax_error(leaf, message)
return ''
elif leaf.value == ':':
parent = leaf.parent
if parent.type in ('classdef', 'funcdef'):
self.context = self.context.add_context(parent)
# The rest is rule based.
return super().visit_leaf(leaf)
def _add_indentation_error(self, spacing, message):
self.add_issue(spacing, 903, "IndentationError: " + message)
def _add_syntax_error(self, node, message):
self.add_issue(node, 901, "SyntaxError: " + message)
def add_issue(self, node, code, message):
# Overwrite the default behavior.
# Check if the issues are on the same line.
line = node.start_pos[0]
args = (code, message, node)
self._error_dict.setdefault(line, args)
def finalize(self):
self.context.finalize()
for code, message, node in self._error_dict.values():
self.issues.append(Issue(node, code, message))
class IndentationRule(Rule):
code = 903
def _get_message(self, message, node):
message = super()._get_message(message, node)
return "IndentationError: " + message
@ErrorFinder.register_rule(type='error_node')
class _ExpectIndentedBlock(IndentationRule):
message = 'expected an indented block'
def get_node(self, node):
leaf = node.get_next_leaf()
return list(leaf._split_prefix())[-1]
def is_issue(self, node):
# This is the beginning of a suite that is not indented.
return node.children[-1].type == 'newline'
class ErrorFinderConfig(NormalizerConfig):
normalizer_class = ErrorFinder
class SyntaxRule(Rule):
code = 901
def _get_message(self, message, node):
message = super()._get_message(message, node)
if (
"f-string" not in message
and _any_fstring_error(self._normalizer.version, node)
):
message = "f-string: " + message
return "SyntaxError: " + message
@ErrorFinder.register_rule(type='error_node')
class _InvalidSyntaxRule(SyntaxRule):
message = "invalid syntax"
fstring_message = "f-string: invalid syntax"
def get_node(self, node):
return node.get_next_leaf()
def is_issue(self, node):
error = node.get_next_leaf().type != 'error_leaf'
if (
error
and _any_fstring_error(self._normalizer.version, node)
):
self.add_issue(node, message=self.fstring_message)
else:
# Error leafs will be added later as an error.
return error
@ErrorFinder.register_rule(value='await')
class _AwaitOutsideAsync(SyntaxRule):
message = "'await' outside async function"
def is_issue(self, leaf):
return not self._normalizer.context.is_async_funcdef()
def get_error_node(self, node):
# Return the whole await statement.
return node.parent
@ErrorFinder.register_rule(value='break')
class _BreakOutsideLoop(SyntaxRule):
message = "'break' outside loop"
def is_issue(self, leaf):
in_loop = False
for block in self._normalizer.context.blocks:
if block.type in ('for_stmt', 'while_stmt'):
in_loop = True
return not in_loop
@ErrorFinder.register_rule(value='continue')
class _ContinueChecks(SyntaxRule):
message = "'continue' not properly in loop"
message_in_finally = "'continue' not supported inside 'finally' clause"
def is_issue(self, leaf):
in_loop = False
for block in self._normalizer.context.blocks:
if block.type in ('for_stmt', 'while_stmt'):
in_loop = True
if block.type == 'try_stmt':
last_block = block.children[-3]
if (
last_block == "finally"
and leaf.start_pos > last_block.start_pos
and self._normalizer.version < (3, 8)
):
self.add_issue(leaf, message=self.message_in_finally)
return False # Error already added
if not in_loop:
return True
@ErrorFinder.register_rule(value='from')
class _YieldFromCheck(SyntaxRule):
message = "'yield from' inside async function"
def get_node(self, leaf):
return leaf.parent.parent # This is the actual yield statement.
def is_issue(self, leaf):
return leaf.parent.type == 'yield_arg' \
and self._normalizer.context.is_async_funcdef()
@ErrorFinder.register_rule(type='name')
class _NameChecks(SyntaxRule):
message = 'cannot assign to __debug__'
message_none = 'cannot assign to None'
def is_issue(self, leaf):
self._normalizer.context.add_name(leaf)
if leaf.value == '__debug__' and leaf.is_definition():
return True
@ErrorFinder.register_rule(type='string')
class _StringChecks(SyntaxRule):
message = "bytes can only contain ASCII literal characters."
def is_issue(self, leaf):
string_prefix = leaf.string_prefix.lower()
if 'b' in string_prefix \
and any(c for c in leaf.value if ord(c) > 127):
# b'ä'
return True
if 'r' not in string_prefix:
# Raw strings don't need to be checked if they have proper
# escaping.
payload = leaf._get_payload()
if 'b' in string_prefix:
payload = payload.encode('utf-8')
func = codecs.escape_decode
else:
func = codecs.unicode_escape_decode
try:
with warnings.catch_warnings():
# The warnings from parsing strings are not relevant.
warnings.filterwarnings('ignore')
func(payload)
except UnicodeDecodeError as e:
self.add_issue(leaf, message='(unicode error) ' + str(e))
except ValueError as e:
self.add_issue(leaf, message='(value error) ' + str(e))
@ErrorFinder.register_rule(value='*')
class _StarCheck(SyntaxRule):
message = "named arguments must follow bare *"
def is_issue(self, leaf):
params = leaf.parent
if params.type == 'parameters' and params:
after = params.children[params.children.index(leaf) + 1:]
after = [child for child in after
if child not in (',', ')') and not child.star_count]
return len(after) == 0
@ErrorFinder.register_rule(value='**')
class _StarStarCheck(SyntaxRule):
# e.g. {**{} for a in [1]}
# TODO this should probably get a better end_pos including
# the next sibling of leaf.
message = "dict unpacking cannot be used in dict comprehension"
def is_issue(self, leaf):
if leaf.parent.type == 'dictorsetmaker':
comp_for = leaf.get_next_sibling().get_next_sibling()
return comp_for is not None and comp_for.type in _COMP_FOR_TYPES
@ErrorFinder.register_rule(value='yield')
@ErrorFinder.register_rule(value='return')
class _ReturnAndYieldChecks(SyntaxRule):
message = "'return' with value in async generator"
message_async_yield = "'yield' inside async function"
def get_node(self, leaf):
return leaf.parent
def is_issue(self, leaf):
if self._normalizer.context.node.type != 'funcdef':
self.add_issue(self.get_node(leaf), message="'%s' outside function" % leaf.value)
elif self._normalizer.context.is_async_funcdef() \
and any(self._normalizer.context.node.iter_yield_exprs()):
if leaf.value == 'return' and leaf.parent.type == 'return_stmt':
return True
@ErrorFinder.register_rule(type='strings')
class _BytesAndStringMix(SyntaxRule):
# e.g. 's' b''
message = "cannot mix bytes and nonbytes literals"
def _is_bytes_literal(self, string):
if string.type == 'fstring':
return False
return 'b' in string.string_prefix.lower()
def is_issue(self, node):
first = node.children[0]
first_is_bytes = self._is_bytes_literal(first)
for string in node.children[1:]:
if first_is_bytes != self._is_bytes_literal(string):
return True
@ErrorFinder.register_rule(type='import_as_names')
class _TrailingImportComma(SyntaxRule):
# e.g. from foo import a,
message = "trailing comma not allowed without surrounding parentheses"
def is_issue(self, node):
if node.children[-1] == ',' and node.parent.children[-1] != ')':
return True
@ErrorFinder.register_rule(type='import_from')
class _ImportStarInFunction(SyntaxRule):
message = "import * only allowed at module level"
def is_issue(self, node):
return node.is_star_import() and self._normalizer.context.parent_context is not None
@ErrorFinder.register_rule(type='import_from')
class _FutureImportRule(SyntaxRule):
message = "from __future__ imports must occur at the beginning of the file"
def is_issue(self, node):
if _is_future_import(node):
if not _is_future_import_first(node):
return True
for from_name, future_name in node.get_paths():
name = future_name.value
allowed_futures = list(ALLOWED_FUTURES)
if self._normalizer.version >= (3, 7):
allowed_futures.append('annotations')
if name == 'braces':
self.add_issue(node, message="not a chance")
elif name == 'barry_as_FLUFL':
m = "Seriously I'm not implementing this :) ~ Dave"
self.add_issue(node, message=m)
elif name not in allowed_futures:
message = "future feature %s is not defined" % name
self.add_issue(node, message=message)
@ErrorFinder.register_rule(type='star_expr')
class _StarExprRule(SyntaxRule):
message_iterable_unpacking = "iterable unpacking cannot be used in comprehension"
def is_issue(self, node):
def check_delete_starred(node):
while node.parent is not None:
node = node.parent
if node.type == 'del_stmt':
return True
if node.type not in (*_STAR_EXPR_PARENTS, 'atom'):
return False
return False
if self._normalizer.version >= (3, 9):
ancestor = node.parent
else:
ancestor = _skip_parens_bottom_up(node)
# starred expression not in tuple/list/set
if ancestor.type not in (*_STAR_EXPR_PARENTS, 'dictorsetmaker') \
and not (ancestor.type == 'atom' and ancestor.children[0] != '('):
self.add_issue(node, message="can't use starred expression here")
return
if check_delete_starred(node):
if self._normalizer.version >= (3, 9):
self.add_issue(node, message="cannot delete starred")
else:
self.add_issue(node, message="can't use starred expression here")
return
if node.parent.type == 'testlist_comp':
# [*[] for a in [1]]
if node.parent.children[1].type in _COMP_FOR_TYPES:
self.add_issue(node, message=self.message_iterable_unpacking)
@ErrorFinder.register_rule(types=_STAR_EXPR_PARENTS)
class _StarExprParentRule(SyntaxRule):
def is_issue(self, node):
def is_definition(node, ancestor):
if ancestor is None:
return False
type_ = ancestor.type
if type_ == 'trailer':
return False
if type_ == 'expr_stmt':
return node.start_pos < ancestor.children[-1].start_pos
return is_definition(node, ancestor.parent)
if is_definition(node, node.parent):
args = [c for c in node.children if c != ',']
starred = [c for c in args if c.type == 'star_expr']
if len(starred) > 1:
if self._normalizer.version < (3, 9):
message = "two starred expressions in assignment"
else:
message = "multiple starred expressions in assignment"
self.add_issue(starred[1], message=message)
elif starred:
count = args.index(starred[0])
if count >= 256:
message = "too many expressions in star-unpacking assignment"
self.add_issue(starred[0], message=message)
@ErrorFinder.register_rule(type='annassign')
class _AnnotatorRule(SyntaxRule):
# True: int
# {}: float
message = "illegal target for annotation"
def get_node(self, node):
return node.parent
def is_issue(self, node):
type_ = None
lhs = node.parent.children[0]
lhs = _remove_parens(lhs)
try:
children = lhs.children
except AttributeError:
pass
else:
if ',' in children or lhs.type == 'atom' and children[0] == '(':
type_ = 'tuple'
elif lhs.type == 'atom' and children[0] == '[':
type_ = 'list'
trailer = children[-1]
if type_ is None:
if not (lhs.type == 'name'
# subscript/attributes are allowed
or lhs.type in ('atom_expr', 'power')
and trailer.type == 'trailer'
and trailer.children[0] != '('):
return True
else:
# x, y: str
message = "only single target (not %s) can be annotated"
self.add_issue(lhs.parent, message=message % type_)
@ErrorFinder.register_rule(type='argument')
class _ArgumentRule(SyntaxRule):
def is_issue(self, node):
first = node.children[0]
if self._normalizer.version < (3, 8):
# a((b)=c) is valid in <3.8
first = _remove_parens(first)
if node.children[1] == '=' and first.type != 'name':
if first.type == 'lambdef':
# f(lambda: 1=1)
if self._normalizer.version < (3, 8):
message = "lambda cannot contain assignment"
else:
message = 'expression cannot contain assignment, perhaps you meant "=="?'
else:
# f(+x=1)
if self._normalizer.version < (3, 8):
message = "keyword can't be an expression"
else:
message = 'expression cannot contain assignment, perhaps you meant "=="?'
self.add_issue(first, message=message)
if _is_argument_comprehension(node) and node.parent.type == 'classdef':
self.add_issue(node, message='invalid syntax')
@ErrorFinder.register_rule(type='nonlocal_stmt')
class _NonlocalModuleLevelRule(SyntaxRule):
message = "nonlocal declaration not allowed at module level"
def is_issue(self, node):
return self._normalizer.context.parent_context is None
@ErrorFinder.register_rule(type='arglist')
class _ArglistRule(SyntaxRule):
@property
def message(self):
if self._normalizer.version < (3, 7):
return "Generator expression must be parenthesized if not sole argument"
else:
return "Generator expression must be parenthesized"
def is_issue(self, node):
arg_set = set()
kw_only = False
kw_unpacking_only = False
for argument in node.children:
if argument == ',':
continue
if argument.type == 'argument':
first = argument.children[0]
if _is_argument_comprehension(argument) and len(node.children) >= 2:
# a(a, b for b in c)
return True
if first in ('*', '**'):
if first == '*':
if kw_unpacking_only:
# foo(**kwargs, *args)
message = "iterable argument unpacking " \
"follows keyword argument unpacking"
self.add_issue(argument, message=message)
else:
kw_unpacking_only = True
else: # Is a keyword argument.
kw_only = True
if first.type == 'name':
if first.value in arg_set:
# f(x=1, x=2)
message = "keyword argument repeated"
if self._normalizer.version >= (3, 9):
message += ": {}".format(first.value)
self.add_issue(first, message=message)
else:
arg_set.add(first.value)
else:
if kw_unpacking_only:
# f(**x, y)
message = "positional argument follows keyword argument unpacking"
self.add_issue(argument, message=message)
elif kw_only:
# f(x=2, y)
message = "positional argument follows keyword argument"
self.add_issue(argument, message=message)
@ErrorFinder.register_rule(type='parameters')
@ErrorFinder.register_rule(type='lambdef')
class _ParameterRule(SyntaxRule):
# def f(x=3, y): pass
message = "non-default argument follows default argument"
def is_issue(self, node):
param_names = set()
default_only = False
star_seen = False
for p in _iter_params(node):
if p.type == 'operator':
if p.value == '*':
star_seen = True
default_only = False
continue
if p.name.value in param_names:
message = "duplicate argument '%s' in function definition"
self.add_issue(p.name, message=message % p.name.value)
param_names.add(p.name.value)
if not star_seen:
if p.default is None and not p.star_count:
if default_only:
return True
elif p.star_count:
star_seen = True
default_only = False
else:
default_only = True
@ErrorFinder.register_rule(type='try_stmt')
class _TryStmtRule(SyntaxRule):
message = "default 'except:' must be last"
def is_issue(self, try_stmt):
default_except = None
for except_clause in try_stmt.children[3::3]:
if except_clause in ('else', 'finally'):
break
if except_clause == 'except':
default_except = except_clause
elif default_except is not None:
self.add_issue(default_except, message=self.message)
@ErrorFinder.register_rule(type='fstring')
class _FStringRule(SyntaxRule):
_fstring_grammar = None
message_expr = "f-string expression part cannot include a backslash"
message_nested = "f-string: expressions nested too deeply"
message_conversion = "f-string: invalid conversion character: expected 's', 'r', or 'a'"
def _check_format_spec(self, format_spec, depth):
self._check_fstring_contents(format_spec.children[1:], depth)
def _check_fstring_expr(self, fstring_expr, depth):
if depth >= 2:
self.add_issue(fstring_expr, message=self.message_nested)
expr = fstring_expr.children[1]
if '\\' in expr.get_code():
self.add_issue(expr, message=self.message_expr)
children_2 = fstring_expr.children[2]
if children_2.type == 'operator' and children_2.value == '=':
conversion = fstring_expr.children[3]
else:
conversion = children_2
if conversion.type == 'fstring_conversion':
name = conversion.children[1]
if name.value not in ('s', 'r', 'a'):
self.add_issue(name, message=self.message_conversion)
format_spec = fstring_expr.children[-2]
if format_spec.type == 'fstring_format_spec':
self._check_format_spec(format_spec, depth + 1)
def is_issue(self, fstring):
self._check_fstring_contents(fstring.children[1:-1])
def _check_fstring_contents(self, children, depth=0):
for fstring_content in children:
if fstring_content.type == 'fstring_expr':
self._check_fstring_expr(fstring_content, depth)
class _CheckAssignmentRule(SyntaxRule):
def _check_assignment(self, node, is_deletion=False, is_namedexpr=False, is_aug_assign=False):
error = None
type_ = node.type
if type_ == 'lambdef':
error = 'lambda'
elif type_ == 'atom':
first, second = node.children[:2]
error = _get_comprehension_type(node)
if error is None:
if second.type == 'dictorsetmaker':
if self._normalizer.version < (3, 8):
error = 'literal'
else:
if second.children[1] == ':':
error = 'dict display'
else:
error = 'set display'
elif first == "{" and second == "}":
if self._normalizer.version < (3, 8):
error = 'literal'
else:
error = "dict display"
elif first == "{" and len(node.children) > 2:
if self._normalizer.version < (3, 8):
error = 'literal'
else:
error = "set display"
elif first in ('(', '['):
if second.type == 'yield_expr':
error = 'yield expression'
elif second.type == 'testlist_comp':
# ([a, b] := [1, 2])
# ((a, b) := [1, 2])
if is_namedexpr:
if first == '(':
error = 'tuple'
elif first == '[':
error = 'list'
# This is not a comprehension, they were handled
# further above.
for child in second.children[::2]:
self._check_assignment(child, is_deletion, is_namedexpr, is_aug_assign)
else: # Everything handled, must be useless brackets.
self._check_assignment(second, is_deletion, is_namedexpr, is_aug_assign)
elif type_ == 'keyword':
if node.value == "yield":
error = "yield expression"
elif self._normalizer.version < (3, 8):
error = 'keyword'
else:
error = str(node.value)
elif type_ == 'operator':
if node.value == '...':
error = 'Ellipsis'
elif type_ == 'comparison':
error = 'comparison'
elif type_ in ('string', 'number', 'strings'):
error = 'literal'
elif type_ == 'yield_expr':
# This one seems to be a slightly different warning in Python.
message = 'assignment to yield expression not possible'
self.add_issue(node, message=message)
elif type_ == 'test':
error = 'conditional expression'
elif type_ in ('atom_expr', 'power'):
if node.children[0] == 'await':
error = 'await expression'
elif node.children[-2] == '**':
error = 'operator'
else:
# Has a trailer
trailer = node.children[-1]
assert trailer.type == 'trailer'
if trailer.children[0] == '(':
error = 'function call'
elif is_namedexpr and trailer.children[0] == '[':
error = 'subscript'
elif is_namedexpr and trailer.children[0] == '.':
error = 'attribute'
elif type_ == "fstring":
if self._normalizer.version < (3, 8):
error = 'literal'
else:
error = "f-string expression"
elif type_ in ('testlist_star_expr', 'exprlist', 'testlist'):
for child in node.children[::2]:
self._check_assignment(child, is_deletion, is_namedexpr, is_aug_assign)
elif ('expr' in type_ and type_ != 'star_expr' # is a substring
or '_test' in type_
or type_ in ('term', 'factor')):
error = 'operator'
elif type_ == "star_expr":
if is_deletion:
if self._normalizer.version >= (3, 9):
error = "starred"
else:
self.add_issue(node, message="can't use starred expression here")
else:
if self._normalizer.version >= (3, 9):
ancestor = node.parent
else:
ancestor = _skip_parens_bottom_up(node)
if ancestor.type not in _STAR_EXPR_PARENTS and not is_aug_assign \
and not (ancestor.type == 'atom' and ancestor.children[0] == '['):
message = "starred assignment target must be in a list or tuple"
self.add_issue(node, message=message)
self._check_assignment(node.children[1])
if error is not None:
if is_namedexpr:
message = 'cannot use assignment expressions with %s' % error
else:
cannot = "can't" if self._normalizer.version < (3, 8) else "cannot"
message = ' '.join([cannot, "delete" if is_deletion else "assign to", error])
self.add_issue(node, message=message)
@ErrorFinder.register_rule(type='sync_comp_for')
class _CompForRule(_CheckAssignmentRule):
message = "asynchronous comprehension outside of an asynchronous function"
def is_issue(self, node):
expr_list = node.children[1]
if expr_list.type != 'expr_list': # Already handled.
self._check_assignment(expr_list)
return node.parent.children[0] == 'async' \
and not self._normalizer.context.is_async_funcdef()
@ErrorFinder.register_rule(type='expr_stmt')
class _ExprStmtRule(_CheckAssignmentRule):
message = "illegal expression for augmented assignment"
extended_message = "'{target}' is an " + message
def is_issue(self, node):
augassign = node.children[1]
is_aug_assign = augassign != '=' and augassign.type != 'annassign'
if self._normalizer.version <= (3, 8) or not is_aug_assign:
for before_equal in node.children[:-2:2]:
self._check_assignment(before_equal, is_aug_assign=is_aug_assign)
if is_aug_assign:
target = _remove_parens(node.children[0])
# a, a[b], a.b
if target.type == "name" or (
target.type in ("atom_expr", "power")
and target.children[1].type == "trailer"
and target.children[-1].children[0] != "("
):
return False
if self._normalizer.version <= (3, 8):
return True
else:
self.add_issue(
node,
message=self.extended_message.format(
target=_get_rhs_name(node.children[0], self._normalizer.version)
),
)
@ErrorFinder.register_rule(type='with_item')
class _WithItemRule(_CheckAssignmentRule):
def is_issue(self, with_item):
self._check_assignment(with_item.children[2])
@ErrorFinder.register_rule(type='del_stmt')
class _DelStmtRule(_CheckAssignmentRule):
def is_issue(self, del_stmt):
child = del_stmt.children[1]
if child.type != 'expr_list': # Already handled.
self._check_assignment(child, is_deletion=True)
@ErrorFinder.register_rule(type='expr_list')
class _ExprListRule(_CheckAssignmentRule):
def is_issue(self, expr_list):
for expr in expr_list.children[::2]:
self._check_assignment(expr)
@ErrorFinder.register_rule(type='for_stmt')
class _ForStmtRule(_CheckAssignmentRule):
def is_issue(self, for_stmt):
# Some of the nodes here are already used, so no else if
expr_list = for_stmt.children[1]
if expr_list.type != 'expr_list': # Already handled.
self._check_assignment(expr_list)
@ErrorFinder.register_rule(type='namedexpr_test')
class _NamedExprRule(_CheckAssignmentRule):
# namedexpr_test: test [':=' test]
def is_issue(self, namedexpr_test):
# assigned name
first = namedexpr_test.children[0]
def search_namedexpr_in_comp_for(node):
while True:
parent = node.parent
if parent is None:
return parent
if parent.type == 'sync_comp_for' and parent.children[3] == node:
return parent
node = parent
if search_namedexpr_in_comp_for(namedexpr_test):
# [i+1 for i in (i := range(5))]
# [i+1 for i in (j := range(5))]
# [i+1 for i in (lambda: (j := range(5)))()]
message = 'assignment expression cannot be used in a comprehension iterable expression'
self.add_issue(namedexpr_test, message=message)
# defined names
exprlist = list()
def process_comp_for(comp_for):
if comp_for.type == 'sync_comp_for':
comp = comp_for
elif comp_for.type == 'comp_for':
comp = comp_for.children[1]
exprlist.extend(_get_for_stmt_definition_exprs(comp))
def search_all_comp_ancestors(node):
has_ancestors = False
while True:
node = node.search_ancestor('testlist_comp', 'dictorsetmaker')
if node is None:
break
for child in node.children:
if child.type in _COMP_FOR_TYPES:
process_comp_for(child)
has_ancestors = True
break
return has_ancestors
# check assignment expressions in comprehensions
search_all = search_all_comp_ancestors(namedexpr_test)
if search_all:
if self._normalizer.context.node.type == 'classdef':
message = 'assignment expression within a comprehension ' \
'cannot be used in a class body'
self.add_issue(namedexpr_test, message=message)
namelist = [expr.value for expr in exprlist if expr.type == 'name']
if first.type == 'name' and first.value in namelist:
# [i := 0 for i, j in range(5)]
# [[(i := i) for j in range(5)] for i in range(5)]
# [i for i, j in range(5) if True or (i := 1)]
# [False and (i := 0) for i, j in range(5)]
message = 'assignment expression cannot rebind ' \
'comprehension iteration variable %r' % first.value
self.add_issue(namedexpr_test, message=message)
self._check_assignment(first, is_namedexpr=True)
| 47,955 | Python | .py | 1,083 | 32.210526 | 99 | 0.559859 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,004 | tree.py | DamnWidget_anaconda/anaconda_lib/parso/python/tree.py | """
This is the syntax tree for Python 3 syntaxes. The classes represent
syntax elements like functions and imports.
All of the nodes can be traced back to the `Python grammar file
<https://docs.python.org/3/reference/grammar.html>`_. If you want to know how
a tree is structured, just analyse that file (for each Python version it's a
bit different).
There's a lot of logic here that makes it easier for Jedi (and other libraries)
to deal with a Python syntax tree.
By using :py:meth:`parso.tree.NodeOrLeaf.get_code` on a module, you can get
back the 1-to-1 representation of the input given to the parser. This is
important if you want to refactor a parser tree.
>>> from parso import parse
>>> parser = parse('import os')
>>> module = parser.get_root_node()
>>> module
<Module: @1-1>
Any subclasses of :class:`Scope`, including :class:`Module` has an attribute
:attr:`iter_imports <Scope.iter_imports>`:
>>> list(module.iter_imports())
[<ImportName: import os@1,0>]
Changes to the Python Grammar
-----------------------------
A few things have changed when looking at Python grammar files:
- :class:`Param` does not exist in Python grammar files. It is essentially a
part of a ``parameters`` node. |parso| splits it up to make it easier to
analyse parameters. However this just makes it easier to deal with the syntax
tree, it doesn't actually change the valid syntax.
- A few nodes like `lambdef` and `lambdef_nocond` have been merged in the
syntax tree to make it easier to do deal with them.
Parser Tree Classes
-------------------
"""
import re
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from typing import Tuple
from parso.tree import Node, BaseNode, Leaf, ErrorNode, ErrorLeaf, search_ancestor # noqa
from parso.python.prefix import split_prefix
from parso.utils import split_lines
_FLOW_CONTAINERS = set(['if_stmt', 'while_stmt', 'for_stmt', 'try_stmt',
'with_stmt', 'async_stmt', 'suite'])
_RETURN_STMT_CONTAINERS = set(['suite', 'simple_stmt']) | _FLOW_CONTAINERS
_FUNC_CONTAINERS = set(
['suite', 'simple_stmt', 'decorated', 'async_funcdef']
) | _FLOW_CONTAINERS
_GET_DEFINITION_TYPES = set([
'expr_stmt', 'sync_comp_for', 'with_stmt', 'for_stmt', 'import_name',
'import_from', 'param', 'del_stmt', 'namedexpr_test',
])
_IMPORTS = set(['import_name', 'import_from'])
class DocstringMixin:
__slots__ = ()
def get_doc_node(self):
"""
Returns the string leaf of a docstring. e.g. ``r'''foo'''``.
"""
if self.type == 'file_input':
node = self.children[0]
elif self.type in ('funcdef', 'classdef'):
node = self.children[self.children.index(':') + 1]
if node.type == 'suite': # Normally a suite
node = node.children[1] # -> NEWLINE stmt
else: # ExprStmt
simple_stmt = self.parent
c = simple_stmt.parent.children
index = c.index(simple_stmt)
if not index:
return None
node = c[index - 1]
if node.type == 'simple_stmt':
node = node.children[0]
if node.type == 'string':
return node
return None
class PythonMixin:
"""
Some Python specific utilities.
"""
__slots__ = ()
def get_name_of_position(self, position):
"""
Given a (line, column) tuple, returns a :py:class:`Name` or ``None`` if
there is no name at that position.
"""
for c in self.children:
if isinstance(c, Leaf):
if c.type == 'name' and c.start_pos <= position <= c.end_pos:
return c
else:
result = c.get_name_of_position(position)
if result is not None:
return result
return None
class PythonLeaf(PythonMixin, Leaf):
__slots__ = ()
def _split_prefix(self):
return split_prefix(self, self.get_start_pos_of_prefix())
def get_start_pos_of_prefix(self):
"""
Basically calls :py:meth:`parso.tree.NodeOrLeaf.get_start_pos_of_prefix`.
"""
# TODO it is really ugly that we have to override it. Maybe change
# indent error leafs somehow? No idea how, though.
previous_leaf = self.get_previous_leaf()
if previous_leaf is not None and previous_leaf.type == 'error_leaf' \
and previous_leaf.token_type in ('INDENT', 'DEDENT', 'ERROR_DEDENT'):
previous_leaf = previous_leaf.get_previous_leaf()
if previous_leaf is None: # It's the first leaf.
lines = split_lines(self.prefix)
# + 1 is needed because split_lines always returns at least [''].
return self.line - len(lines) + 1, 0 # It's the first leaf.
return previous_leaf.end_pos
class _LeafWithoutNewlines(PythonLeaf):
"""
Simply here to optimize performance.
"""
__slots__ = ()
@property
def end_pos(self) -> Tuple[int, int]:
return self.line, self.column + len(self.value)
# Python base classes
class PythonBaseNode(PythonMixin, BaseNode):
__slots__ = ()
class PythonNode(PythonMixin, Node):
__slots__ = ()
class PythonErrorNode(PythonMixin, ErrorNode):
__slots__ = ()
class PythonErrorLeaf(ErrorLeaf, PythonLeaf):
__slots__ = ()
class EndMarker(_LeafWithoutNewlines):
__slots__ = ()
type = 'endmarker'
def __repr__(self):
return "<%s: prefix=%s end_pos=%s>" % (
type(self).__name__, repr(self.prefix), self.end_pos
)
class Newline(PythonLeaf):
"""Contains NEWLINE and ENDMARKER tokens."""
__slots__ = ()
type = 'newline'
def __repr__(self):
return "<%s: %s>" % (type(self).__name__, repr(self.value))
class Name(_LeafWithoutNewlines):
"""
A string. Sometimes it is important to know if the string belongs to a name
or not.
"""
type = 'name'
__slots__ = ()
def __repr__(self):
return "<%s: %s@%s,%s>" % (type(self).__name__, self.value,
self.line, self.column)
def is_definition(self, include_setitem=False):
"""
Returns True if the name is being defined.
"""
return self.get_definition(include_setitem=include_setitem) is not None
def get_definition(self, import_name_always=False, include_setitem=False):
"""
Returns None if there's no definition for a name.
:param import_name_always: Specifies if an import name is always a
definition. Normally foo in `from foo import bar` is not a
definition.
"""
node = self.parent
type_ = node.type
if type_ in ('funcdef', 'classdef'):
if self == node.name:
return node
return None
if type_ == 'except_clause':
if self.get_previous_sibling() == 'as':
return node.parent # The try_stmt.
return None
while node is not None:
if node.type == 'suite':
return None
if node.type in _GET_DEFINITION_TYPES:
if self in node.get_defined_names(include_setitem):
return node
if import_name_always and node.type in _IMPORTS:
return node
return None
node = node.parent
return None
class Literal(PythonLeaf):
__slots__ = ()
class Number(Literal):
type = 'number'
__slots__ = ()
class String(Literal):
type = 'string'
__slots__ = ()
@property
def string_prefix(self):
return re.match(r'\w*(?=[\'"])', self.value).group(0)
def _get_payload(self):
match = re.search(
r'''('{3}|"{3}|'|")(.*)$''',
self.value,
flags=re.DOTALL
)
return match.group(2)[:-len(match.group(1))]
class FStringString(PythonLeaf):
"""
f-strings contain f-string expressions and normal python strings. These are
the string parts of f-strings.
"""
type = 'fstring_string'
__slots__ = ()
class FStringStart(PythonLeaf):
"""
f-strings contain f-string expressions and normal python strings. These are
the string parts of f-strings.
"""
type = 'fstring_start'
__slots__ = ()
class FStringEnd(PythonLeaf):
"""
f-strings contain f-string expressions and normal python strings. These are
the string parts of f-strings.
"""
type = 'fstring_end'
__slots__ = ()
class _StringComparisonMixin:
def __eq__(self, other):
"""
Make comparisons with strings easy.
Improves the readability of the parser.
"""
if isinstance(other, str):
return self.value == other
return self is other
def __hash__(self):
return hash(self.value)
class Operator(_LeafWithoutNewlines, _StringComparisonMixin):
type = 'operator'
__slots__ = ()
class Keyword(_LeafWithoutNewlines, _StringComparisonMixin):
type = 'keyword'
__slots__ = ()
class Scope(PythonBaseNode, DocstringMixin):
"""
Super class for the parser tree, which represents the state of a python
text file.
A Scope is either a function, class or lambda.
"""
__slots__ = ()
def __init__(self, children):
super().__init__(children)
def iter_funcdefs(self):
"""
Returns a generator of `funcdef` nodes.
"""
return self._search_in_scope('funcdef')
def iter_classdefs(self):
"""
Returns a generator of `classdef` nodes.
"""
return self._search_in_scope('classdef')
def iter_imports(self):
"""
Returns a generator of `import_name` and `import_from` nodes.
"""
return self._search_in_scope('import_name', 'import_from')
def _search_in_scope(self, *names):
def scan(children):
for element in children:
if element.type in names:
yield element
if element.type in _FUNC_CONTAINERS:
yield from scan(element.children)
return scan(self.children)
def get_suite(self):
"""
Returns the part that is executed by the function.
"""
return self.children[-1]
def __repr__(self):
try:
name = self.name.value
except AttributeError:
name = ''
return "<%s: %s@%s-%s>" % (type(self).__name__, name,
self.start_pos[0], self.end_pos[0])
class Module(Scope):
"""
The top scope, which is always a module.
Depending on the underlying parser this may be a full module or just a part
of a module.
"""
__slots__ = ('_used_names',)
type = 'file_input'
def __init__(self, children):
super().__init__(children)
self._used_names = None
def _iter_future_import_names(self):
"""
:return: A list of future import names.
:rtype: list of str
"""
# In Python it's not allowed to use future imports after the first
# actual (non-future) statement. However this is not a linter here,
# just return all future imports. If people want to scan for issues
# they should use the API.
for imp in self.iter_imports():
if imp.type == 'import_from' and imp.level == 0:
for path in imp.get_paths():
names = [name.value for name in path]
if len(names) == 2 and names[0] == '__future__':
yield names[1]
def get_used_names(self):
"""
Returns all the :class:`Name` leafs that exist in this module. This
includes both definitions and references of names.
"""
if self._used_names is None:
# Don't directly use self._used_names to eliminate a lookup.
dct = {}
def recurse(node):
try:
children = node.children
except AttributeError:
if node.type == 'name':
arr = dct.setdefault(node.value, [])
arr.append(node)
else:
for child in children:
recurse(child)
recurse(self)
self._used_names = UsedNamesMapping(dct)
return self._used_names
class Decorator(PythonBaseNode):
type = 'decorator'
__slots__ = ()
class ClassOrFunc(Scope):
__slots__ = ()
@property
def name(self):
"""
Returns the `Name` leaf that defines the function or class name.
"""
return self.children[1]
def get_decorators(self):
"""
:rtype: list of :class:`Decorator`
"""
decorated = self.parent
if decorated.type == 'async_funcdef':
decorated = decorated.parent
if decorated.type == 'decorated':
if decorated.children[0].type == 'decorators':
return decorated.children[0].children
else:
return decorated.children[:1]
else:
return []
class Class(ClassOrFunc):
"""
Used to store the parsed contents of a python class.
"""
type = 'classdef'
__slots__ = ()
def __init__(self, children):
super().__init__(children)
def get_super_arglist(self):
"""
Returns the `arglist` node that defines the super classes. It returns
None if there are no arguments.
"""
if self.children[2] != '(': # Has no parentheses
return None
else:
if self.children[3] == ')': # Empty parentheses
return None
else:
return self.children[3]
def _create_params(parent, argslist_list):
"""
`argslist_list` is a list that can contain an argslist as a first item, but
most not. It's basically the items between the parameter brackets (which is
at most one item).
This function modifies the parser structure. It generates `Param` objects
from the normal ast. Those param objects do not exist in a normal ast, but
make the evaluation of the ast tree so much easier.
You could also say that this function replaces the argslist node with a
list of Param objects.
"""
try:
first = argslist_list[0]
except IndexError:
return []
if first.type in ('name', 'fpdef'):
return [Param([first], parent)]
elif first == '*':
return [first]
else: # argslist is a `typedargslist` or a `varargslist`.
if first.type == 'tfpdef':
children = [first]
else:
children = first.children
new_children = []
start = 0
# Start with offset 1, because the end is higher.
for end, child in enumerate(children + [None], 1):
if child is None or child == ',':
param_children = children[start:end]
if param_children: # Could as well be comma and then end.
if param_children[0] == '*' \
and (len(param_children) == 1
or param_children[1] == ',') \
or param_children[0] == '/':
for p in param_children:
p.parent = parent
new_children += param_children
else:
new_children.append(Param(param_children, parent))
start = end
return new_children
class Function(ClassOrFunc):
"""
Used to store the parsed contents of a python function.
Children::
0. <Keyword: def>
1. <Name>
2. parameter list (including open-paren and close-paren <Operator>s)
3. or 5. <Operator: :>
4. or 6. Node() representing function body
3. -> (if annotation is also present)
4. annotation (if present)
"""
type = 'funcdef'
def __init__(self, children):
super().__init__(children)
parameters = self.children[2] # After `def foo`
parameters_children = parameters.children[1:-1]
# If input parameters list already has Param objects, keep it as is;
# otherwise, convert it to a list of Param objects.
if not any(isinstance(child, Param) for child in parameters_children):
parameters.children[1:-1] = _create_params(parameters, parameters_children)
def _get_param_nodes(self):
return self.children[2].children
def get_params(self):
"""
Returns a list of `Param()`.
"""
return [p for p in self._get_param_nodes() if p.type == 'param']
@property
def name(self):
return self.children[1] # First token after `def`
def iter_yield_exprs(self):
"""
Returns a generator of `yield_expr`.
"""
def scan(children):
for element in children:
if element.type in ('classdef', 'funcdef', 'lambdef'):
continue
try:
nested_children = element.children
except AttributeError:
if element.value == 'yield':
if element.parent.type == 'yield_expr':
yield element.parent
else:
yield element
else:
yield from scan(nested_children)
return scan(self.children)
def iter_return_stmts(self):
"""
Returns a generator of `return_stmt`.
"""
def scan(children):
for element in children:
if element.type == 'return_stmt' \
or element.type == 'keyword' and element.value == 'return':
yield element
if element.type in _RETURN_STMT_CONTAINERS:
yield from scan(element.children)
return scan(self.children)
def iter_raise_stmts(self):
"""
Returns a generator of `raise_stmt`. Includes raise statements inside try-except blocks
"""
def scan(children):
for element in children:
if element.type == 'raise_stmt' \
or element.type == 'keyword' and element.value == 'raise':
yield element
if element.type in _RETURN_STMT_CONTAINERS:
yield from scan(element.children)
return scan(self.children)
def is_generator(self):
"""
:return bool: Checks if a function is a generator or not.
"""
return next(self.iter_yield_exprs(), None) is not None
@property
def annotation(self):
"""
Returns the test node after `->` or `None` if there is no annotation.
"""
try:
if self.children[3] == "->":
return self.children[4]
assert self.children[3] == ":"
return None
except IndexError:
return None
class Lambda(Function):
"""
Lambdas are basically trimmed functions, so give it the same interface.
Children::
0. <Keyword: lambda>
*. <Param x> for each argument x
-2. <Operator: :>
-1. Node() representing body
"""
type = 'lambdef'
__slots__ = ()
def __init__(self, children):
# We don't want to call the Function constructor, call its parent.
super(Function, self).__init__(children)
# Everything between `lambda` and the `:` operator is a parameter.
parameters_children = self.children[1:-2]
# If input children list already has Param objects, keep it as is;
# otherwise, convert it to a list of Param objects.
if not any(isinstance(child, Param) for child in parameters_children):
self.children[1:-2] = _create_params(self, parameters_children)
@property
def name(self):
"""
Raises an AttributeError. Lambdas don't have a defined name.
"""
raise AttributeError("lambda is not named.")
def _get_param_nodes(self):
return self.children[1:-2]
@property
def annotation(self):
"""
Returns `None`, lambdas don't have annotations.
"""
return None
def __repr__(self):
return "<%s@%s>" % (self.__class__.__name__, self.start_pos)
class Flow(PythonBaseNode):
__slots__ = ()
class IfStmt(Flow):
type = 'if_stmt'
__slots__ = ()
def get_test_nodes(self):
"""
E.g. returns all the `test` nodes that are named as x, below:
if x:
pass
elif x:
pass
"""
for i, c in enumerate(self.children):
if c in ('elif', 'if'):
yield self.children[i + 1]
def get_corresponding_test_node(self, node):
"""
Searches for the branch in which the node is and returns the
corresponding test node (see function above). However if the node is in
the test node itself and not in the suite return None.
"""
start_pos = node.start_pos
for check_node in reversed(list(self.get_test_nodes())):
if check_node.start_pos < start_pos:
if start_pos < check_node.end_pos:
return None
# In this case the node is within the check_node itself,
# not in the suite
else:
return check_node
def is_node_after_else(self, node):
"""
Checks if a node is defined after `else`.
"""
for c in self.children:
if c == 'else':
if node.start_pos > c.start_pos:
return True
else:
return False
class WhileStmt(Flow):
type = 'while_stmt'
__slots__ = ()
class ForStmt(Flow):
type = 'for_stmt'
__slots__ = ()
def get_testlist(self):
"""
Returns the input node ``y`` from: ``for x in y:``.
"""
return self.children[3]
def get_defined_names(self, include_setitem=False):
return _defined_names(self.children[1], include_setitem)
class TryStmt(Flow):
type = 'try_stmt'
__slots__ = ()
def get_except_clause_tests(self):
"""
Returns the ``test`` nodes found in ``except_clause`` nodes.
Returns ``[None]`` for except clauses without an exception given.
"""
for node in self.children:
if node.type == 'except_clause':
yield node.children[1]
elif node == 'except':
yield None
class WithStmt(Flow):
type = 'with_stmt'
__slots__ = ()
def get_defined_names(self, include_setitem=False):
"""
Returns the a list of `Name` that the with statement defines. The
defined names are set after `as`.
"""
names = []
for with_item in self.children[1:-2:2]:
# Check with items for 'as' names.
if with_item.type == 'with_item':
names += _defined_names(with_item.children[2], include_setitem)
return names
def get_test_node_from_name(self, name):
node = name.search_ancestor("with_item")
if node is None:
raise ValueError('The name is not actually part of a with statement.')
return node.children[0]
class Import(PythonBaseNode):
__slots__ = ()
def get_path_for_name(self, name):
"""
The path is the list of names that leads to the searched name.
:return list of Name:
"""
try:
# The name may be an alias. If it is, just map it back to the name.
name = self._aliases()[name]
except KeyError:
pass
for path in self.get_paths():
if name in path:
return path[:path.index(name) + 1]
raise ValueError('Name should be defined in the import itself')
def is_nested(self):
return False # By default, sub classes may overwrite this behavior
def is_star_import(self):
return self.children[-1] == '*'
class ImportFrom(Import):
type = 'import_from'
__slots__ = ()
def get_defined_names(self, include_setitem=False):
"""
Returns the a list of `Name` that the import defines. The
defined names are set after `import` or in case an alias - `as` - is
present that name is returned.
"""
return [alias or name for name, alias in self._as_name_tuples()]
def _aliases(self):
"""Mapping from alias to its corresponding name."""
return dict((alias, name) for name, alias in self._as_name_tuples()
if alias is not None)
def get_from_names(self):
for n in self.children[1:]:
if n not in ('.', '...'):
break
if n.type == 'dotted_name': # from x.y import
return n.children[::2]
elif n == 'import': # from . import
return []
else: # from x import
return [n]
@property
def level(self):
"""The level parameter of ``__import__``."""
level = 0
for n in self.children[1:]:
if n in ('.', '...'):
level += len(n.value)
else:
break
return level
def _as_name_tuples(self):
last = self.children[-1]
if last == ')':
last = self.children[-2]
elif last == '*':
return # No names defined directly.
if last.type == 'import_as_names':
as_names = last.children[::2]
else:
as_names = [last]
for as_name in as_names:
if as_name.type == 'name':
yield as_name, None
else:
yield as_name.children[::2] # yields x, y -> ``x as y``
def get_paths(self):
"""
The import paths defined in an import statement. Typically an array
like this: ``[<Name: datetime>, <Name: date>]``.
:return list of list of Name:
"""
dotted = self.get_from_names()
if self.children[-1] == '*':
return [dotted]
return [dotted + [name] for name, alias in self._as_name_tuples()]
class ImportName(Import):
"""For ``import_name`` nodes. Covers normal imports without ``from``."""
type = 'import_name'
__slots__ = ()
def get_defined_names(self, include_setitem=False):
"""
Returns the a list of `Name` that the import defines. The defined names
is always the first name after `import` or in case an alias - `as` - is
present that name is returned.
"""
return [alias or path[0] for path, alias in self._dotted_as_names()]
@property
def level(self):
"""The level parameter of ``__import__``."""
return 0 # Obviously 0 for imports without from.
def get_paths(self):
return [path for path, alias in self._dotted_as_names()]
def _dotted_as_names(self):
"""Generator of (list(path), alias) where alias may be None."""
dotted_as_names = self.children[1]
if dotted_as_names.type == 'dotted_as_names':
as_names = dotted_as_names.children[::2]
else:
as_names = [dotted_as_names]
for as_name in as_names:
if as_name.type == 'dotted_as_name':
alias = as_name.children[2]
as_name = as_name.children[0]
else:
alias = None
if as_name.type == 'name':
yield [as_name], alias
else:
# dotted_names
yield as_name.children[::2], alias
def is_nested(self):
"""
This checks for the special case of nested imports, without aliases and
from statement::
import foo.bar
"""
return bool([1 for path, alias in self._dotted_as_names()
if alias is None and len(path) > 1])
def _aliases(self):
"""
:return list of Name: Returns all the alias
"""
return dict((alias, path[-1]) for path, alias in self._dotted_as_names()
if alias is not None)
class KeywordStatement(PythonBaseNode):
"""
For the following statements: `assert`, `del`, `global`, `nonlocal`,
`raise`, `return`, `yield`.
`pass`, `continue` and `break` are not in there, because they are just
simple keywords and the parser reduces it to a keyword.
"""
__slots__ = ()
@property
def type(self):
"""
Keyword statements start with the keyword and end with `_stmt`. You can
crosscheck this with the Python grammar.
"""
return '%s_stmt' % self.keyword
@property
def keyword(self):
return self.children[0].value
def get_defined_names(self, include_setitem=False):
keyword = self.keyword
if keyword == 'del':
return _defined_names(self.children[1], include_setitem)
if keyword in ('global', 'nonlocal'):
return self.children[1::2]
return []
class AssertStmt(KeywordStatement):
__slots__ = ()
@property
def assertion(self):
return self.children[1]
class GlobalStmt(KeywordStatement):
__slots__ = ()
def get_global_names(self):
return self.children[1::2]
class ReturnStmt(KeywordStatement):
__slots__ = ()
class YieldExpr(PythonBaseNode):
type = 'yield_expr'
__slots__ = ()
def _defined_names(current, include_setitem):
"""
A helper function to find the defined names in statements, for loops and
list comprehensions.
"""
names = []
if current.type in ('testlist_star_expr', 'testlist_comp', 'exprlist', 'testlist'):
for child in current.children[::2]:
names += _defined_names(child, include_setitem)
elif current.type in ('atom', 'star_expr'):
names += _defined_names(current.children[1], include_setitem)
elif current.type in ('power', 'atom_expr'):
if current.children[-2] != '**': # Just if there's no operation
trailer = current.children[-1]
if trailer.children[0] == '.':
names.append(trailer.children[1])
elif trailer.children[0] == '[' and include_setitem:
for node in current.children[-2::-1]:
if node.type == 'trailer':
names.append(node.children[1])
break
if node.type == 'name':
names.append(node)
break
else:
names.append(current)
return names
class ExprStmt(PythonBaseNode, DocstringMixin):
type = 'expr_stmt'
__slots__ = ()
def get_defined_names(self, include_setitem=False):
"""
Returns a list of `Name` defined before the `=` sign.
"""
names = []
if self.children[1].type == 'annassign':
names = _defined_names(self.children[0], include_setitem)
return [
name
for i in range(0, len(self.children) - 2, 2)
if '=' in self.children[i + 1].value
for name in _defined_names(self.children[i], include_setitem)
] + names
def get_rhs(self):
"""Returns the right-hand-side of the equals."""
node = self.children[-1]
if node.type == 'annassign':
if len(node.children) == 4:
node = node.children[3]
else:
node = node.children[1]
return node
def yield_operators(self):
"""
Returns a generator of `+=`, `=`, etc. or None if there is no operation.
"""
first = self.children[1]
if first.type == 'annassign':
if len(first.children) <= 2:
return # No operator is available, it's just PEP 484.
first = first.children[2]
yield first
yield from self.children[3::2]
class NamedExpr(PythonBaseNode):
type = 'namedexpr_test'
def get_defined_names(self, include_setitem=False):
return _defined_names(self.children[0], include_setitem)
class Param(PythonBaseNode):
"""
It's a helper class that makes business logic with params much easier. The
Python grammar defines no ``param`` node. It defines it in a different way
that is not really suited to working with parameters.
"""
type = 'param'
def __init__(self, children, parent=None):
super().__init__(children)
self.parent = parent
@property
def star_count(self):
"""
Is `0` in case of `foo`, `1` in case of `*foo` or `2` in case of
`**foo`.
"""
first = self.children[0]
if first in ('*', '**'):
return len(first.value)
return 0
@property
def default(self):
"""
The default is the test node that appears after the `=`. Is `None` in
case no default is present.
"""
has_comma = self.children[-1] == ','
try:
if self.children[-2 - int(has_comma)] == '=':
return self.children[-1 - int(has_comma)]
except IndexError:
return None
@property
def annotation(self):
"""
The default is the test node that appears after `:`. Is `None` in case
no annotation is present.
"""
tfpdef = self._tfpdef()
if tfpdef.type == 'tfpdef':
assert tfpdef.children[1] == ":"
assert len(tfpdef.children) == 3
annotation = tfpdef.children[2]
return annotation
else:
return None
def _tfpdef(self):
"""
tfpdef: see e.g. grammar36.txt.
"""
offset = int(self.children[0] in ('*', '**'))
return self.children[offset]
@property
def name(self):
"""
The `Name` leaf of the param.
"""
if self._tfpdef().type == 'tfpdef':
return self._tfpdef().children[0]
else:
return self._tfpdef()
def get_defined_names(self, include_setitem=False):
return [self.name]
@property
def position_index(self):
"""
Property for the positional index of a paramter.
"""
index = self.parent.children.index(self)
try:
keyword_only_index = self.parent.children.index('*')
if index > keyword_only_index:
# Skip the ` *, `
index -= 2
except ValueError:
pass
try:
keyword_only_index = self.parent.children.index('/')
if index > keyword_only_index:
# Skip the ` /, `
index -= 2
except ValueError:
pass
return index - 1
def get_parent_function(self):
"""
Returns the function/lambda of a parameter.
"""
return self.search_ancestor('funcdef', 'lambdef')
def get_code(self, include_prefix=True, include_comma=True):
"""
Like all the other get_code functions, but includes the param
`include_comma`.
:param include_comma bool: If enabled includes the comma in the string output.
"""
if include_comma:
return super().get_code(include_prefix)
children = self.children
if children[-1] == ',':
children = children[:-1]
return self._get_code_for_children(
children,
include_prefix=include_prefix
)
def __repr__(self):
default = '' if self.default is None else '=%s' % self.default.get_code()
return '<%s: %s>' % (type(self).__name__, str(self._tfpdef()) + default)
class SyncCompFor(PythonBaseNode):
type = 'sync_comp_for'
__slots__ = ()
def get_defined_names(self, include_setitem=False):
"""
Returns the a list of `Name` that the comprehension defines.
"""
# allow async for
return _defined_names(self.children[1], include_setitem)
# This is simply here so an older Jedi version can work with this new parso
# version. Can be deleted in the next release.
CompFor = SyncCompFor
class UsedNamesMapping(Mapping):
"""
This class exists for the sole purpose of creating an immutable dict.
"""
def __init__(self, dct):
self._dict = dct
def __getitem__(self, key):
return self._dict[key]
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def __hash__(self):
return id(self)
def __eq__(self, other):
# Comparing these dicts does not make sense.
return self is other
| 37,187 | Python | .py | 1,005 | 27.769154 | 95 | 0.569064 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,005 | token.py | DamnWidget_anaconda/anaconda_lib/parso/python/token.py | from __future__ import absolute_import
from enum import Enum
class TokenType:
name: str
contains_syntax: bool
def __init__(self, name: str, contains_syntax: bool = False):
self.name = name
self.contains_syntax = contains_syntax
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.name)
class PythonTokenTypes(Enum):
STRING = TokenType('STRING')
NUMBER = TokenType('NUMBER')
NAME = TokenType('NAME', contains_syntax=True)
ERRORTOKEN = TokenType('ERRORTOKEN')
NEWLINE = TokenType('NEWLINE')
INDENT = TokenType('INDENT')
DEDENT = TokenType('DEDENT')
ERROR_DEDENT = TokenType('ERROR_DEDENT')
FSTRING_STRING = TokenType('FSTRING_STRING')
FSTRING_START = TokenType('FSTRING_START')
FSTRING_END = TokenType('FSTRING_END')
OP = TokenType('OP', contains_syntax=True)
ENDMARKER = TokenType('ENDMARKER')
| 909 | Python | .py | 24 | 32.75 | 65 | 0.676538 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,006 | pep8.py | DamnWidget_anaconda/anaconda_lib/parso/python/pep8.py | import re
from contextlib import contextmanager
from typing import Tuple
from parso.python.errors import ErrorFinder, ErrorFinderConfig
from parso.normalizer import Rule
from parso.python.tree import Flow, Scope
_IMPORT_TYPES = ('import_name', 'import_from')
_SUITE_INTRODUCERS = ('classdef', 'funcdef', 'if_stmt', 'while_stmt',
'for_stmt', 'try_stmt', 'with_stmt')
_NON_STAR_TYPES = ('term', 'import_from', 'power')
_OPENING_BRACKETS = '(', '[', '{'
_CLOSING_BRACKETS = ')', ']', '}'
_FACTOR = '+', '-', '~'
_ALLOW_SPACE = '*', '+', '-', '**', '/', '//', '@'
_BITWISE_OPERATOR = '<<', '>>', '|', '&', '^'
_NEEDS_SPACE: Tuple[str, ...] = (
'=', '%', '->',
'<', '>', '==', '>=', '<=', '<>', '!=',
'+=', '-=', '*=', '@=', '/=', '%=', '&=', '|=', '^=', '<<=',
'>>=', '**=', '//=')
_NEEDS_SPACE += _BITWISE_OPERATOR
_IMPLICIT_INDENTATION_TYPES = ('dictorsetmaker', 'argument')
_POSSIBLE_SLICE_PARENTS = ('subscript', 'subscriptlist', 'sliceop')
class IndentationTypes:
VERTICAL_BRACKET = object()
HANGING_BRACKET = object()
BACKSLASH = object()
SUITE = object()
IMPLICIT = object()
class IndentationNode(object):
type = IndentationTypes.SUITE
def __init__(self, config, indentation, parent=None):
self.bracket_indentation = self.indentation = indentation
self.parent = parent
def __repr__(self):
return '<%s>' % self.__class__.__name__
def get_latest_suite_node(self):
n = self
while n is not None:
if n.type == IndentationTypes.SUITE:
return n
n = n.parent
class BracketNode(IndentationNode):
def __init__(self, config, leaf, parent, in_suite_introducer=False):
self.leaf = leaf
# Figure out here what the indentation is. For chained brackets
# we can basically use the previous indentation.
previous_leaf = leaf
n = parent
if n.type == IndentationTypes.IMPLICIT:
n = n.parent
while True:
if hasattr(n, 'leaf') and previous_leaf.line != n.leaf.line:
break
previous_leaf = previous_leaf.get_previous_leaf()
if not isinstance(n, BracketNode) or previous_leaf != n.leaf:
break
n = n.parent
parent_indentation = n.indentation
next_leaf = leaf.get_next_leaf()
if '\n' in next_leaf.prefix or '\r' in next_leaf.prefix:
# This implies code like:
# foobarbaz(
# a,
# b,
# )
self.bracket_indentation = parent_indentation \
+ config.closing_bracket_hanging_indentation
self.indentation = parent_indentation + config.indentation
self.type = IndentationTypes.HANGING_BRACKET
else:
# Implies code like:
# foobarbaz(
# a,
# b,
# )
expected_end_indent = leaf.end_pos[1]
if '\t' in config.indentation:
self.indentation = None
else:
self.indentation = ' ' * expected_end_indent
self.bracket_indentation = self.indentation
self.type = IndentationTypes.VERTICAL_BRACKET
if in_suite_introducer and parent.type == IndentationTypes.SUITE \
and self.indentation == parent_indentation + config.indentation:
self.indentation += config.indentation
# The closing bracket should have the same indentation.
self.bracket_indentation = self.indentation
self.parent = parent
class ImplicitNode(BracketNode):
"""
Implicit indentation after keyword arguments, default arguments,
annotations and dict values.
"""
def __init__(self, config, leaf, parent):
super().__init__(config, leaf, parent)
self.type = IndentationTypes.IMPLICIT
next_leaf = leaf.get_next_leaf()
if leaf == ':' and '\n' not in next_leaf.prefix and '\r' not in next_leaf.prefix:
self.indentation += ' '
class BackslashNode(IndentationNode):
type = IndentationTypes.BACKSLASH
def __init__(self, config, parent_indentation, containing_leaf, spacing, parent=None):
expr_stmt = containing_leaf.search_ancestor('expr_stmt')
if expr_stmt is not None:
equals = expr_stmt.children[-2]
if '\t' in config.indentation:
# TODO unite with the code of BracketNode
self.indentation = None
else:
# If the backslash follows the equals, use normal indentation
# otherwise it should align with the equals.
if equals.end_pos == spacing.start_pos:
self.indentation = parent_indentation + config.indentation
else:
# +1 because there is a space.
self.indentation = ' ' * (equals.end_pos[1] + 1)
else:
self.indentation = parent_indentation + config.indentation
self.bracket_indentation = self.indentation
self.parent = parent
def _is_magic_name(name):
return name.value.startswith('__') and name.value.endswith('__')
class PEP8Normalizer(ErrorFinder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._previous_part = None
self._previous_leaf = None
self._on_newline = True
self._newline_count = 0
self._wanted_newline_count = None
self._max_new_lines_in_prefix = 0
self._new_statement = True
self._implicit_indentation_possible = False
# The top of stack of the indentation nodes.
self._indentation_tos = self._last_indentation_tos = \
IndentationNode(self._config, indentation='')
self._in_suite_introducer = False
if ' ' in self._config.indentation:
self._indentation_type = 'spaces'
self._wrong_indentation_char = '\t'
else:
self._indentation_type = 'tabs'
self._wrong_indentation_char = ' '
@contextmanager
def visit_node(self, node):
with super().visit_node(node):
with self._visit_node(node):
yield
@contextmanager
def _visit_node(self, node):
typ = node.type
if typ in 'import_name':
names = node.get_defined_names()
if len(names) > 1:
for name in names[:1]:
self.add_issue(name, 401, 'Multiple imports on one line')
elif typ == 'lambdef':
expr_stmt = node.parent
# Check if it's simply defining a single name, not something like
# foo.bar or x[1], where using a lambda could make more sense.
if expr_stmt.type == 'expr_stmt' and any(n.type == 'name'
for n in expr_stmt.children[:-2:2]):
self.add_issue(node, 731, 'Do not assign a lambda expression, use a def')
elif typ == 'try_stmt':
for child in node.children:
# Here we can simply check if it's an except, because otherwise
# it would be an except_clause.
if child.type == 'keyword' and child.value == 'except':
self.add_issue(child, 722, 'Do not use bare except, specify exception instead')
elif typ == 'comparison':
for child in node.children:
if child.type not in ('atom_expr', 'power'):
continue
if len(child.children) > 2:
continue
trailer = child.children[1]
atom = child.children[0]
if trailer.type == 'trailer' and atom.type == 'name' \
and atom.value == 'type':
self.add_issue(node, 721, "Do not compare types, use 'isinstance()")
break
elif typ == 'file_input':
endmarker = node.children[-1]
prev = endmarker.get_previous_leaf()
prefix = endmarker.prefix
if (not prefix.endswith('\n') and not prefix.endswith('\r') and (
prefix or prev is None or prev.value not in {'\n', '\r\n', '\r'})):
self.add_issue(endmarker, 292, "No newline at end of file")
if typ in _IMPORT_TYPES:
simple_stmt = node.parent
module = simple_stmt.parent
if module.type == 'file_input':
index = module.children.index(simple_stmt)
for child in module.children[:index]:
children = [child]
if child.type == 'simple_stmt':
# Remove the newline.
children = child.children[:-1]
found_docstring = False
for c in children:
if c.type == 'string' and not found_docstring:
continue
found_docstring = True
if c.type == 'expr_stmt' and \
all(_is_magic_name(n) for n in c.get_defined_names()):
continue
if c.type in _IMPORT_TYPES or isinstance(c, Flow):
continue
self.add_issue(node, 402, 'Module level import not at top of file')
break
else:
continue
break
implicit_indentation_possible = typ in _IMPLICIT_INDENTATION_TYPES
in_introducer = typ in _SUITE_INTRODUCERS
if in_introducer:
self._in_suite_introducer = True
elif typ == 'suite':
if self._indentation_tos.type == IndentationTypes.BACKSLASH:
self._indentation_tos = self._indentation_tos.parent
self._indentation_tos = IndentationNode(
self._config,
self._indentation_tos.indentation + self._config.indentation,
parent=self._indentation_tos
)
elif implicit_indentation_possible:
self._implicit_indentation_possible = True
yield
if typ == 'suite':
assert self._indentation_tos.type == IndentationTypes.SUITE
self._indentation_tos = self._indentation_tos.parent
# If we dedent, no lines are needed anymore.
self._wanted_newline_count = None
elif implicit_indentation_possible:
self._implicit_indentation_possible = False
if self._indentation_tos.type == IndentationTypes.IMPLICIT:
self._indentation_tos = self._indentation_tos.parent
elif in_introducer:
self._in_suite_introducer = False
if typ in ('classdef', 'funcdef'):
self._wanted_newline_count = self._get_wanted_blank_lines_count()
def _check_tabs_spaces(self, spacing):
if self._wrong_indentation_char in spacing.value:
self.add_issue(spacing, 101, 'Indentation contains ' + self._indentation_type)
return True
return False
def _get_wanted_blank_lines_count(self):
suite_node = self._indentation_tos.get_latest_suite_node()
return int(suite_node.parent is None) + 1
def _reset_newlines(self, spacing, leaf, is_comment=False):
self._max_new_lines_in_prefix = \
max(self._max_new_lines_in_prefix, self._newline_count)
wanted = self._wanted_newline_count
if wanted is not None:
# Need to substract one
blank_lines = self._newline_count - 1
if wanted > blank_lines and leaf.type != 'endmarker':
# In case of a comment we don't need to add the issue, yet.
if not is_comment:
# TODO end_pos wrong.
code = 302 if wanted == 2 else 301
message = "expected %s blank line, found %s" \
% (wanted, blank_lines)
self.add_issue(spacing, code, message)
self._wanted_newline_count = None
else:
self._wanted_newline_count = None
if not is_comment:
wanted = self._get_wanted_blank_lines_count()
actual = self._max_new_lines_in_prefix - 1
val = leaf.value
needs_lines = (
val == '@' and leaf.parent.type == 'decorator'
or (
val == 'class'
or val == 'async' and leaf.get_next_leaf() == 'def'
or val == 'def' and self._previous_leaf != 'async'
) and leaf.parent.parent.type != 'decorated'
)
if needs_lines and actual < wanted:
func_or_cls = leaf.parent
suite = func_or_cls.parent
if suite.type == 'decorated':
suite = suite.parent
# The first leaf of a file or a suite should not need blank
# lines.
if suite.children[int(suite.type == 'suite')] != func_or_cls:
code = 302 if wanted == 2 else 301
message = "expected %s blank line, found %s" \
% (wanted, actual)
self.add_issue(spacing, code, message)
self._max_new_lines_in_prefix = 0
self._newline_count = 0
def visit_leaf(self, leaf):
super().visit_leaf(leaf)
for part in leaf._split_prefix():
if part.type == 'spacing':
# This part is used for the part call after for.
break
self._visit_part(part, part.create_spacing_part(), leaf)
self._analyse_non_prefix(leaf)
self._visit_part(leaf, part, leaf)
# Cleanup
self._last_indentation_tos = self._indentation_tos
self._new_statement = leaf.type == 'newline'
# TODO does this work? with brackets and stuff?
if leaf.type == 'newline' and \
self._indentation_tos.type == IndentationTypes.BACKSLASH:
self._indentation_tos = self._indentation_tos.parent
if leaf.value == ':' and leaf.parent.type in _SUITE_INTRODUCERS:
self._in_suite_introducer = False
elif leaf.value == 'elif':
self._in_suite_introducer = True
if not self._new_statement:
self._reset_newlines(part, leaf)
self._max_blank_lines = 0
self._previous_leaf = leaf
return leaf.value
def _visit_part(self, part, spacing, leaf):
value = part.value
type_ = part.type
if type_ == 'error_leaf':
return
if value == ',' and part.parent.type == 'dictorsetmaker':
self._indentation_tos = self._indentation_tos.parent
node = self._indentation_tos
if type_ == 'comment':
if value.startswith('##'):
# Whole blocks of # should not raise an error.
if value.lstrip('#'):
self.add_issue(part, 266, "Too many leading '#' for block comment.")
elif self._on_newline:
if not re.match(r'#:? ', value) and not value == '#' \
and not (value.startswith('#!') and part.start_pos == (1, 0)):
self.add_issue(part, 265, "Block comment should start with '# '")
else:
if not re.match(r'#:? [^ ]', value):
self.add_issue(part, 262, "Inline comment should start with '# '")
self._reset_newlines(spacing, leaf, is_comment=True)
elif type_ == 'newline':
if self._newline_count > self._get_wanted_blank_lines_count():
self.add_issue(part, 303, "Too many blank lines (%s)" % self._newline_count)
elif leaf in ('def', 'class') \
and leaf.parent.parent.type == 'decorated':
self.add_issue(part, 304, "Blank lines found after function decorator")
self._newline_count += 1
if type_ == 'backslash':
# TODO is this enough checking? What about ==?
if node.type != IndentationTypes.BACKSLASH:
if node.type != IndentationTypes.SUITE:
self.add_issue(part, 502, 'The backslash is redundant between brackets')
else:
indentation = node.indentation
if self._in_suite_introducer and node.type == IndentationTypes.SUITE:
indentation += self._config.indentation
self._indentation_tos = BackslashNode(
self._config,
indentation,
part,
spacing,
parent=self._indentation_tos
)
elif self._on_newline:
indentation = spacing.value
if node.type == IndentationTypes.BACKSLASH \
and self._previous_part.type == 'newline':
self._indentation_tos = self._indentation_tos.parent
if not self._check_tabs_spaces(spacing):
should_be_indentation = node.indentation
if type_ == 'comment':
# Comments can be dedented. So we have to care for that.
n = self._last_indentation_tos
while True:
if len(indentation) > len(n.indentation):
break
should_be_indentation = n.indentation
self._last_indentation_tos = n
if n == node:
break
n = n.parent
if self._new_statement:
if type_ == 'newline':
if indentation:
self.add_issue(spacing, 291, 'Trailing whitespace')
elif indentation != should_be_indentation:
s = '%s %s' % (len(self._config.indentation), self._indentation_type)
self.add_issue(part, 111, 'Indentation is not a multiple of ' + s)
else:
if value in '])}':
should_be_indentation = node.bracket_indentation
else:
should_be_indentation = node.indentation
if self._in_suite_introducer and indentation == \
node.get_latest_suite_node().indentation \
+ self._config.indentation:
self.add_issue(part, 129, "Line with same indent as next logical block")
elif indentation != should_be_indentation:
if not self._check_tabs_spaces(spacing) and part.value not in \
{'\n', '\r\n', '\r'}:
if value in '])}':
if node.type == IndentationTypes.VERTICAL_BRACKET:
self.add_issue(
part,
124,
"Closing bracket does not match visual indentation"
)
else:
self.add_issue(
part,
123,
"Losing bracket does not match "
"indentation of opening bracket's line"
)
else:
if len(indentation) < len(should_be_indentation):
if node.type == IndentationTypes.VERTICAL_BRACKET:
self.add_issue(
part,
128,
'Continuation line under-indented for visual indent'
)
elif node.type == IndentationTypes.BACKSLASH:
self.add_issue(
part,
122,
'Continuation line missing indentation or outdented'
)
elif node.type == IndentationTypes.IMPLICIT:
self.add_issue(part, 135, 'xxx')
else:
self.add_issue(
part,
121,
'Continuation line under-indented for hanging indent'
)
else:
if node.type == IndentationTypes.VERTICAL_BRACKET:
self.add_issue(
part,
127,
'Continuation line over-indented for visual indent'
)
elif node.type == IndentationTypes.IMPLICIT:
self.add_issue(part, 136, 'xxx')
else:
self.add_issue(
part,
126,
'Continuation line over-indented for hanging indent'
)
else:
self._check_spacing(part, spacing)
self._check_line_length(part, spacing)
# -------------------------------
# Finalizing. Updating the state.
# -------------------------------
if value and value in '()[]{}' and type_ != 'error_leaf' \
and part.parent.type != 'error_node':
if value in _OPENING_BRACKETS:
self._indentation_tos = BracketNode(
self._config, part,
parent=self._indentation_tos,
in_suite_introducer=self._in_suite_introducer
)
else:
assert node.type != IndentationTypes.IMPLICIT
self._indentation_tos = self._indentation_tos.parent
elif value in ('=', ':') and self._implicit_indentation_possible \
and part.parent.type in _IMPLICIT_INDENTATION_TYPES:
indentation = node.indentation
self._indentation_tos = ImplicitNode(
self._config, part, parent=self._indentation_tos
)
self._on_newline = type_ in ('newline', 'backslash', 'bom')
self._previous_part = part
self._previous_spacing = spacing
def _check_line_length(self, part, spacing):
if part.type == 'backslash':
last_column = part.start_pos[1] + 1
else:
last_column = part.end_pos[1]
if last_column > self._config.max_characters \
and spacing.start_pos[1] <= self._config.max_characters:
# Special case for long URLs in multi-line docstrings or comments,
# but still report the error when the 72 first chars are whitespaces.
report = True
if part.type == 'comment':
splitted = part.value[1:].split()
if len(splitted) == 1 \
and (part.end_pos[1] - len(splitted[0])) < 72:
report = False
if report:
self.add_issue(
part,
501,
'Line too long (%s > %s characters)' %
(last_column, self._config.max_characters),
)
def _check_spacing(self, part, spacing):
def add_if_spaces(*args):
if spaces:
return self.add_issue(*args)
def add_not_spaces(*args):
if not spaces:
return self.add_issue(*args)
spaces = spacing.value
prev = self._previous_part
if prev is not None and prev.type == 'error_leaf' or part.type == 'error_leaf':
return
type_ = part.type
if '\t' in spaces:
self.add_issue(spacing, 223, 'Used tab to separate tokens')
elif type_ == 'comment':
if len(spaces) < self._config.spaces_before_comment:
self.add_issue(spacing, 261, 'At least two spaces before inline comment')
elif type_ == 'newline':
add_if_spaces(spacing, 291, 'Trailing whitespace')
elif len(spaces) > 1:
self.add_issue(spacing, 221, 'Multiple spaces used')
else:
if prev in _OPENING_BRACKETS:
message = "Whitespace after '%s'" % part.value
add_if_spaces(spacing, 201, message)
elif part in _CLOSING_BRACKETS:
message = "Whitespace before '%s'" % part.value
add_if_spaces(spacing, 202, message)
elif part in (',', ';') or part == ':' \
and part.parent.type not in _POSSIBLE_SLICE_PARENTS:
message = "Whitespace before '%s'" % part.value
add_if_spaces(spacing, 203, message)
elif prev == ':' and prev.parent.type in _POSSIBLE_SLICE_PARENTS:
pass # TODO
elif prev in (',', ';', ':'):
add_not_spaces(spacing, 231, "missing whitespace after '%s'")
elif part == ':': # Is a subscript
# TODO
pass
elif part in ('*', '**') and part.parent.type not in _NON_STAR_TYPES \
or prev in ('*', '**') \
and prev.parent.type not in _NON_STAR_TYPES:
# TODO
pass
elif prev in _FACTOR and prev.parent.type == 'factor':
pass
elif prev == '@' and prev.parent.type == 'decorator':
pass # TODO should probably raise an error if there's a space here
elif part in _NEEDS_SPACE or prev in _NEEDS_SPACE:
if part == '=' and part.parent.type in ('argument', 'param') \
or prev == '=' and prev.parent.type in ('argument', 'param'):
if part == '=':
param = part.parent
else:
param = prev.parent
if param.type == 'param' and param.annotation:
add_not_spaces(spacing, 252, 'Expected spaces around annotation equals')
else:
add_if_spaces(
spacing,
251,
'Unexpected spaces around keyword / parameter equals'
)
elif part in _BITWISE_OPERATOR or prev in _BITWISE_OPERATOR:
add_not_spaces(
spacing,
227,
'Missing whitespace around bitwise or shift operator'
)
elif part == '%' or prev == '%':
add_not_spaces(spacing, 228, 'Missing whitespace around modulo operator')
else:
message_225 = 'Missing whitespace between tokens'
add_not_spaces(spacing, 225, message_225)
elif type_ == 'keyword' or prev.type == 'keyword':
add_not_spaces(spacing, 275, 'Missing whitespace around keyword')
else:
prev_spacing = self._previous_spacing
if prev in _ALLOW_SPACE and spaces != prev_spacing.value \
and '\n' not in self._previous_leaf.prefix \
and '\r' not in self._previous_leaf.prefix:
message = "Whitespace before operator doesn't match with whitespace after"
self.add_issue(spacing, 229, message)
if spaces and part not in _ALLOW_SPACE and prev not in _ALLOW_SPACE:
message_225 = 'Missing whitespace between tokens'
# self.add_issue(spacing, 225, message_225)
# TODO why only brackets?
if part in _OPENING_BRACKETS:
message = "Whitespace before '%s'" % part.value
add_if_spaces(spacing, 211, message)
def _analyse_non_prefix(self, leaf):
typ = leaf.type
if typ == 'name' and leaf.value in ('l', 'O', 'I'):
if leaf.is_definition():
message = "Do not define %s named 'l', 'O', or 'I' one line"
if leaf.parent.type == 'class' and leaf.parent.name == leaf:
self.add_issue(leaf, 742, message % 'classes')
elif leaf.parent.type == 'function' and leaf.parent.name == leaf:
self.add_issue(leaf, 743, message % 'function')
else:
self.add_issuadd_issue(741, message % 'variables', leaf)
elif leaf.value == ':':
if isinstance(leaf.parent, (Flow, Scope)) and leaf.parent.type != 'lambdef':
next_leaf = leaf.get_next_leaf()
if next_leaf.type != 'newline':
if leaf.parent.type == 'funcdef':
self.add_issue(next_leaf, 704, 'Multiple statements on one line (def)')
else:
self.add_issue(next_leaf, 701, 'Multiple statements on one line (colon)')
elif leaf.value == ';':
if leaf.get_next_leaf().type in ('newline', 'endmarker'):
self.add_issue(leaf, 703, 'Statement ends with a semicolon')
else:
self.add_issue(leaf, 702, 'Multiple statements on one line (semicolon)')
elif leaf.value in ('==', '!='):
comparison = leaf.parent
index = comparison.children.index(leaf)
left = comparison.children[index - 1]
right = comparison.children[index + 1]
for node in left, right:
if node.type == 'keyword' or node.type == 'name':
if node.value == 'None':
message = "comparison to None should be 'if cond is None:'"
self.add_issue(leaf, 711, message)
break
elif node.value in ('True', 'False'):
message = "comparison to False/True should be " \
"'if cond is True:' or 'if cond:'"
self.add_issue(leaf, 712, message)
break
elif leaf.value in ('in', 'is'):
comparison = leaf.parent
if comparison.type == 'comparison' and comparison.parent.type == 'not_test':
if leaf.value == 'in':
self.add_issue(leaf, 713, "test for membership should be 'not in'")
else:
self.add_issue(leaf, 714, "test for object identity should be 'is not'")
elif typ == 'string':
# Checking multiline strings
for i, line in enumerate(leaf.value.splitlines()[1:]):
indentation = re.match(r'[ \t]*', line).group(0)
start_pos = leaf.line + i, len(indentation)
# TODO check multiline indentation.
start_pos
elif typ == 'endmarker':
if self._newline_count >= 2:
self.add_issue(leaf, 391, 'Blank line at end of file')
def add_issue(self, node, code, message):
if self._previous_leaf is not None:
if self._previous_leaf.search_ancestor('error_node') is not None:
return
if self._previous_leaf.type == 'error_leaf':
return
if node.search_ancestor('error_node') is not None:
return
if code in (901, 903):
# 901 and 903 are raised by the ErrorFinder.
super().add_issue(node, code, message)
else:
# Skip ErrorFinder here, because it has custom behavior.
super(ErrorFinder, self).add_issue(node, code, message)
class PEP8NormalizerConfig(ErrorFinderConfig):
normalizer_class = PEP8Normalizer
"""
Normalizing to PEP8. Not really implemented, yet.
"""
def __init__(self, indentation=' ' * 4, hanging_indentation=None,
max_characters=79, spaces_before_comment=2):
self.indentation = indentation
if hanging_indentation is None:
hanging_indentation = indentation
self.hanging_indentation = hanging_indentation
self.closing_bracket_hanging_indentation = ''
self.break_after_binary = False
self.max_characters = max_characters
self.spaces_before_comment = spaces_before_comment
# TODO this is not yet ready.
# @PEP8Normalizer.register_rule(type='endmarker')
class BlankLineAtEnd(Rule):
code = 392
message = 'Blank line at end of file'
def is_issue(self, leaf):
return self._newline_count >= 2
| 33,779 | Python | .py | 682 | 33.175953 | 99 | 0.506089 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,007 | prefix.py | DamnWidget_anaconda/anaconda_lib/parso/python/prefix.py | import re
from codecs import BOM_UTF8
from typing import Tuple
from parso.python.tokenize import group
unicode_bom = BOM_UTF8.decode('utf-8')
class PrefixPart:
def __init__(self, leaf, typ, value, spacing='', start_pos=None):
assert start_pos is not None
self.parent = leaf
self.type = typ
self.value = value
self.spacing = spacing
self.start_pos: Tuple[int, int] = start_pos
@property
def end_pos(self) -> Tuple[int, int]:
if self.value.endswith('\n') or self.value.endswith('\r'):
return self.start_pos[0] + 1, 0
if self.value == unicode_bom:
# The bom doesn't have a length at the start of a Python file.
return self.start_pos
return self.start_pos[0], self.start_pos[1] + len(self.value)
def create_spacing_part(self):
column = self.start_pos[1] - len(self.spacing)
return PrefixPart(
self.parent, 'spacing', self.spacing,
start_pos=(self.start_pos[0], column)
)
def __repr__(self):
return '%s(%s, %s, %s)' % (
self.__class__.__name__,
self.type,
repr(self.value),
self.start_pos
)
def search_ancestor(self, *node_types):
node = self.parent
while node is not None:
if node.type in node_types:
return node
node = node.parent
return None
_comment = r'#[^\n\r\f]*'
_backslash = r'\\\r?\n|\\\r'
_newline = r'\r?\n|\r'
_form_feed = r'\f'
_only_spacing = '$'
_spacing = r'[ \t]*'
_bom = unicode_bom
_regex = group(
_comment, _backslash, _newline, _form_feed, _only_spacing, _bom,
capture=True
)
_regex = re.compile(group(_spacing, capture=True) + _regex)
_types = {
'#': 'comment',
'\\': 'backslash',
'\f': 'formfeed',
'\n': 'newline',
'\r': 'newline',
unicode_bom: 'bom'
}
def split_prefix(leaf, start_pos):
line, column = start_pos
start = 0
value = spacing = ''
bom = False
while start != len(leaf.prefix):
match = _regex.match(leaf.prefix, start)
spacing = match.group(1)
value = match.group(2)
if not value:
break
type_ = _types[value[0]]
yield PrefixPart(
leaf, type_, value, spacing,
start_pos=(line, column + start - int(bom) + len(spacing))
)
if type_ == 'bom':
bom = True
start = match.end(0)
if value.endswith('\n') or value.endswith('\r'):
line += 1
column = -start
if value:
spacing = ''
yield PrefixPart(
leaf, 'spacing', spacing,
start_pos=(line, column + start)
)
| 2,743 | Python | .py | 89 | 23.47191 | 74 | 0.557072 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,008 | parser.py | DamnWidget_anaconda/anaconda_lib/parso/python/parser.py | from parso.python import tree
from parso.python.token import PythonTokenTypes
from parso.parser import BaseParser
NAME = PythonTokenTypes.NAME
INDENT = PythonTokenTypes.INDENT
DEDENT = PythonTokenTypes.DEDENT
class Parser(BaseParser):
"""
This class is used to parse a Python file, it then divides them into a
class structure of different scopes.
:param pgen_grammar: The grammar object of pgen2. Loaded by load_grammar.
"""
node_map = {
'expr_stmt': tree.ExprStmt,
'classdef': tree.Class,
'funcdef': tree.Function,
'file_input': tree.Module,
'import_name': tree.ImportName,
'import_from': tree.ImportFrom,
'break_stmt': tree.KeywordStatement,
'continue_stmt': tree.KeywordStatement,
'return_stmt': tree.ReturnStmt,
'raise_stmt': tree.KeywordStatement,
'yield_expr': tree.YieldExpr,
'del_stmt': tree.KeywordStatement,
'pass_stmt': tree.KeywordStatement,
'global_stmt': tree.GlobalStmt,
'nonlocal_stmt': tree.KeywordStatement,
'print_stmt': tree.KeywordStatement,
'assert_stmt': tree.AssertStmt,
'if_stmt': tree.IfStmt,
'with_stmt': tree.WithStmt,
'for_stmt': tree.ForStmt,
'while_stmt': tree.WhileStmt,
'try_stmt': tree.TryStmt,
'sync_comp_for': tree.SyncCompFor,
# Not sure if this is the best idea, but IMO it's the easiest way to
# avoid extreme amounts of work around the subtle difference of 2/3
# grammar in list comoprehensions.
'decorator': tree.Decorator,
'lambdef': tree.Lambda,
'lambdef_nocond': tree.Lambda,
'namedexpr_test': tree.NamedExpr,
}
default_node = tree.PythonNode
# Names/Keywords are handled separately
_leaf_map = {
PythonTokenTypes.STRING: tree.String,
PythonTokenTypes.NUMBER: tree.Number,
PythonTokenTypes.NEWLINE: tree.Newline,
PythonTokenTypes.ENDMARKER: tree.EndMarker,
PythonTokenTypes.FSTRING_STRING: tree.FStringString,
PythonTokenTypes.FSTRING_START: tree.FStringStart,
PythonTokenTypes.FSTRING_END: tree.FStringEnd,
}
def __init__(self, pgen_grammar, error_recovery=True, start_nonterminal='file_input'):
super().__init__(pgen_grammar, start_nonterminal,
error_recovery=error_recovery)
self.syntax_errors = []
self._omit_dedent_list = []
self._indent_counter = 0
def parse(self, tokens):
if self._error_recovery:
if self._start_nonterminal != 'file_input':
raise NotImplementedError
tokens = self._recovery_tokenize(tokens)
return super().parse(tokens)
def convert_node(self, nonterminal, children):
"""
Convert raw node information to a PythonBaseNode instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
try:
node = self.node_map[nonterminal](children)
except KeyError:
if nonterminal == 'suite':
# We don't want the INDENT/DEDENT in our parser tree. Those
# leaves are just cancer. They are virtual leaves and not real
# ones and therefore have pseudo start/end positions and no
# prefixes. Just ignore them.
children = [children[0]] + children[2:-1]
node = self.default_node(nonterminal, children)
return node
def convert_leaf(self, type, value, prefix, start_pos):
# print('leaf', repr(value), token.tok_name[type])
if type == NAME:
if value in self._pgen_grammar.reserved_syntax_strings:
return tree.Keyword(value, start_pos, prefix)
else:
return tree.Name(value, start_pos, prefix)
return self._leaf_map.get(type, tree.Operator)(value, start_pos, prefix)
def error_recovery(self, token):
tos_nodes = self.stack[-1].nodes
if tos_nodes:
last_leaf = tos_nodes[-1].get_last_leaf()
else:
last_leaf = None
if self._start_nonterminal == 'file_input' and \
(token.type == PythonTokenTypes.ENDMARKER
or token.type == DEDENT and not last_leaf.value.endswith('\n')
and not last_leaf.value.endswith('\r')):
# In Python statements need to end with a newline. But since it's
# possible (and valid in Python) that there's no newline at the
# end of a file, we have to recover even if the user doesn't want
# error recovery.
if self.stack[-1].dfa.from_rule == 'simple_stmt':
try:
plan = self.stack[-1].dfa.transitions[PythonTokenTypes.NEWLINE]
except KeyError:
pass
else:
if plan.next_dfa.is_final and not plan.dfa_pushes:
# We are ignoring here that the newline would be
# required for a simple_stmt.
self.stack[-1].dfa = plan.next_dfa
self._add_token(token)
return
if not self._error_recovery:
return super().error_recovery(token)
def current_suite(stack):
# For now just discard everything that is not a suite or
# file_input, if we detect an error.
for until_index, stack_node in reversed(list(enumerate(stack))):
# `suite` can sometimes be only simple_stmt, not stmt.
if stack_node.nonterminal == 'file_input':
break
elif stack_node.nonterminal == 'suite':
# In the case where we just have a newline we don't want to
# do error recovery here. In all other cases, we want to do
# error recovery.
if len(stack_node.nodes) != 1:
break
return until_index
until_index = current_suite(self.stack)
if self._stack_removal(until_index + 1):
self._add_token(token)
else:
typ, value, start_pos, prefix = token
if typ == INDENT:
# For every deleted INDENT we have to delete a DEDENT as well.
# Otherwise the parser will get into trouble and DEDENT too early.
self._omit_dedent_list.append(self._indent_counter)
error_leaf = tree.PythonErrorLeaf(typ.name, value, start_pos, prefix)
self.stack[-1].nodes.append(error_leaf)
tos = self.stack[-1]
if tos.nonterminal == 'suite':
# Need at least one statement in the suite. This happend with the
# error recovery above.
try:
tos.dfa = tos.dfa.arcs['stmt']
except KeyError:
# We're already in a final state.
pass
def _stack_removal(self, start_index):
all_nodes = [node for stack_node in self.stack[start_index:] for node in stack_node.nodes]
if all_nodes:
node = tree.PythonErrorNode(all_nodes)
self.stack[start_index - 1].nodes.append(node)
self.stack[start_index:] = []
return bool(all_nodes)
def _recovery_tokenize(self, tokens):
for token in tokens:
typ = token[0]
if typ == DEDENT:
# We need to count indents, because if we just omit any DEDENT,
# we might omit them in the wrong place.
o = self._omit_dedent_list
if o and o[-1] == self._indent_counter:
o.pop()
self._indent_counter -= 1
continue
self._indent_counter -= 1
elif typ == INDENT:
self._indent_counter += 1
yield token
| 8,108 | Python | .py | 177 | 33.892655 | 98 | 0.588206 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,009 | diff.py | DamnWidget_anaconda/anaconda_lib/parso/python/diff.py | """
The diff parser is trying to be a faster version of the normal parser by trying
to reuse the nodes of a previous pass over the same file. This is also called
incremental parsing in parser literature. The difference is mostly that with
incremental parsing you get a range that needs to be reparsed. Here we
calculate that range ourselves by using difflib. After that it's essentially
incremental parsing.
The biggest issue of this approach is that we reuse nodes in a mutable way. The
intial design and idea is quite problematic for this parser, but it is also
pretty fast. Measurements showed that just copying nodes in Python is simply
quite a bit slower (especially for big files >3 kLOC). Therefore we did not
want to get rid of the mutable nodes, since this is usually not an issue.
This is by far the hardest software I ever wrote, exactly because the initial
design is crappy. When you have to account for a lot of mutable state, it
creates a ton of issues that you would otherwise not have. This file took
probably 3-6 months to write, which is insane for a parser.
There is a fuzzer in that helps test this whole thing. Please use it if you
make changes here. If you run the fuzzer like::
test/fuzz_diff_parser.py random -n 100000
you can be pretty sure that everything is still fine. I sometimes run the
fuzzer up to 24h to make sure everything is still ok.
"""
import re
import difflib
from collections import namedtuple
import logging
from parso.utils import split_lines
from parso.python.parser import Parser
from parso.python.tree import EndMarker
from parso.python.tokenize import PythonToken, BOM_UTF8_STRING
from parso.python.token import PythonTokenTypes
LOG = logging.getLogger(__name__)
DEBUG_DIFF_PARSER = False
_INDENTATION_TOKENS = 'INDENT', 'ERROR_DEDENT', 'DEDENT'
NEWLINE = PythonTokenTypes.NEWLINE
DEDENT = PythonTokenTypes.DEDENT
NAME = PythonTokenTypes.NAME
ERROR_DEDENT = PythonTokenTypes.ERROR_DEDENT
ENDMARKER = PythonTokenTypes.ENDMARKER
def _is_indentation_error_leaf(node):
return node.type == 'error_leaf' and node.token_type in _INDENTATION_TOKENS
def _get_previous_leaf_if_indentation(leaf):
while leaf and _is_indentation_error_leaf(leaf):
leaf = leaf.get_previous_leaf()
return leaf
def _get_next_leaf_if_indentation(leaf):
while leaf and _is_indentation_error_leaf(leaf):
leaf = leaf.get_next_leaf()
return leaf
def _get_suite_indentation(tree_node):
return _get_indentation(tree_node.children[1])
def _get_indentation(tree_node):
return tree_node.start_pos[1]
def _assert_valid_graph(node):
"""
Checks if the parent/children relationship is correct.
This is a check that only runs during debugging/testing.
"""
try:
children = node.children
except AttributeError:
# Ignore INDENT is necessary, because indent/dedent tokens don't
# contain value/prefix and are just around, because of the tokenizer.
if node.type == 'error_leaf' and node.token_type in _INDENTATION_TOKENS:
assert not node.value
assert not node.prefix
return
# Calculate the content between two start positions.
previous_leaf = _get_previous_leaf_if_indentation(node.get_previous_leaf())
if previous_leaf is None:
content = node.prefix
previous_start_pos = 1, 0
else:
assert previous_leaf.end_pos <= node.start_pos, \
(previous_leaf, node)
content = previous_leaf.value + node.prefix
previous_start_pos = previous_leaf.start_pos
if '\n' in content or '\r' in content:
splitted = split_lines(content)
line = previous_start_pos[0] + len(splitted) - 1
actual = line, len(splitted[-1])
else:
actual = previous_start_pos[0], previous_start_pos[1] + len(content)
if content.startswith(BOM_UTF8_STRING) \
and node.get_start_pos_of_prefix() == (1, 0):
# Remove the byte order mark
actual = actual[0], actual[1] - 1
assert node.start_pos == actual, (node.start_pos, actual)
else:
for child in children:
assert child.parent == node, (node, child)
_assert_valid_graph(child)
def _assert_nodes_are_equal(node1, node2):
try:
children1 = node1.children
except AttributeError:
assert not hasattr(node2, 'children'), (node1, node2)
assert node1.value == node2.value, (node1, node2)
assert node1.type == node2.type, (node1, node2)
assert node1.prefix == node2.prefix, (node1, node2)
assert node1.start_pos == node2.start_pos, (node1, node2)
return
else:
try:
children2 = node2.children
except AttributeError:
assert False, (node1, node2)
for n1, n2 in zip(children1, children2):
_assert_nodes_are_equal(n1, n2)
assert len(children1) == len(children2), '\n' + repr(children1) + '\n' + repr(children2)
def _get_debug_error_message(module, old_lines, new_lines):
current_lines = split_lines(module.get_code(), keepends=True)
current_diff = difflib.unified_diff(new_lines, current_lines)
old_new_diff = difflib.unified_diff(old_lines, new_lines)
import parso
return (
"There's an issue with the diff parser. Please "
"report (parso v%s) - Old/New:\n%s\nActual Diff (May be empty):\n%s"
% (parso.__version__, ''.join(old_new_diff), ''.join(current_diff))
)
def _get_last_line(node_or_leaf):
last_leaf = node_or_leaf.get_last_leaf()
if _ends_with_newline(last_leaf):
return last_leaf.start_pos[0]
else:
n = last_leaf.get_next_leaf()
if n.type == 'endmarker' and '\n' in n.prefix:
# This is a very special case and has to do with error recovery in
# Parso. The problem is basically that there's no newline leaf at
# the end sometimes (it's required in the grammar, but not needed
# actually before endmarker, CPython just adds a newline to make
# source code pass the parser, to account for that Parso error
# recovery allows small_stmt instead of simple_stmt).
return last_leaf.end_pos[0] + 1
return last_leaf.end_pos[0]
def _skip_dedent_error_leaves(leaf):
while leaf is not None and leaf.type == 'error_leaf' and leaf.token_type == 'DEDENT':
leaf = leaf.get_previous_leaf()
return leaf
def _ends_with_newline(leaf, suffix=''):
leaf = _skip_dedent_error_leaves(leaf)
if leaf.type == 'error_leaf':
typ = leaf.token_type.lower()
else:
typ = leaf.type
return typ == 'newline' or suffix.endswith('\n') or suffix.endswith('\r')
def _flows_finished(pgen_grammar, stack):
"""
if, while, for and try might not be finished, because another part might
still be parsed.
"""
for stack_node in stack:
if stack_node.nonterminal in ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt'):
return False
return True
def _func_or_class_has_suite(node):
if node.type == 'decorated':
node = node.children[-1]
if node.type in ('async_funcdef', 'async_stmt'):
node = node.children[-1]
return node.type in ('classdef', 'funcdef') and node.children[-1].type == 'suite'
def _suite_or_file_input_is_valid(pgen_grammar, stack):
if not _flows_finished(pgen_grammar, stack):
return False
for stack_node in reversed(stack):
if stack_node.nonterminal == 'decorator':
# A decorator is only valid with the upcoming function.
return False
if stack_node.nonterminal == 'suite':
# If only newline is in the suite, the suite is not valid, yet.
return len(stack_node.nodes) > 1
# Not reaching a suite means that we're dealing with file_input levels
# where there's no need for a valid statement in it. It can also be empty.
return True
def _is_flow_node(node):
if node.type == 'async_stmt':
node = node.children[1]
try:
value = node.children[0].value
except AttributeError:
return False
return value in ('if', 'for', 'while', 'try', 'with')
class _PositionUpdatingFinished(Exception):
pass
def _update_positions(nodes, line_offset, last_leaf):
for node in nodes:
try:
children = node.children
except AttributeError:
# Is a leaf
node.line += line_offset
if node is last_leaf:
raise _PositionUpdatingFinished
else:
_update_positions(children, line_offset, last_leaf)
class DiffParser:
"""
An advanced form of parsing a file faster. Unfortunately comes with huge
side effects. It changes the given module.
"""
def __init__(self, pgen_grammar, tokenizer, module):
self._pgen_grammar = pgen_grammar
self._tokenizer = tokenizer
self._module = module
def _reset(self):
self._copy_count = 0
self._parser_count = 0
self._nodes_tree = _NodesTree(self._module)
def update(self, old_lines, new_lines):
'''
The algorithm works as follows:
Equal:
- Assure that the start is a newline, otherwise parse until we get
one.
- Copy from parsed_until_line + 1 to max(i2 + 1)
- Make sure that the indentation is correct (e.g. add DEDENT)
- Add old and change positions
Insert:
- Parse from parsed_until_line + 1 to min(j2 + 1), hopefully not
much more.
Returns the new module node.
'''
LOG.debug('diff parser start')
# Reset the used names cache so they get regenerated.
self._module._used_names = None
self._parser_lines_new = new_lines
self._reset()
line_length = len(new_lines)
sm = difflib.SequenceMatcher(None, old_lines, self._parser_lines_new)
opcodes = sm.get_opcodes()
LOG.debug('line_lengths old: %s; new: %s' % (len(old_lines), line_length))
for operation, i1, i2, j1, j2 in opcodes:
LOG.debug('-> code[%s] old[%s:%s] new[%s:%s]',
operation, i1 + 1, i2, j1 + 1, j2)
if j2 == line_length and new_lines[-1] == '':
# The empty part after the last newline is not relevant.
j2 -= 1
if operation == 'equal':
line_offset = j1 - i1
self._copy_from_old_parser(line_offset, i1 + 1, i2, j2)
elif operation == 'replace':
self._parse(until_line=j2)
elif operation == 'insert':
self._parse(until_line=j2)
else:
assert operation == 'delete'
# With this action all change will finally be applied and we have a
# changed module.
self._nodes_tree.close()
if DEBUG_DIFF_PARSER:
# If there is reasonable suspicion that the diff parser is not
# behaving well, this should be enabled.
try:
code = ''.join(new_lines)
assert self._module.get_code() == code
_assert_valid_graph(self._module)
without_diff_parser_module = Parser(
self._pgen_grammar,
error_recovery=True
).parse(self._tokenizer(new_lines))
_assert_nodes_are_equal(self._module, without_diff_parser_module)
except AssertionError:
print(_get_debug_error_message(self._module, old_lines, new_lines))
raise
last_pos = self._module.end_pos[0]
if last_pos != line_length:
raise Exception(
('(%s != %s) ' % (last_pos, line_length))
+ _get_debug_error_message(self._module, old_lines, new_lines)
)
LOG.debug('diff parser end')
return self._module
def _enabled_debugging(self, old_lines, lines_new):
if self._module.get_code() != ''.join(lines_new):
LOG.warning('parser issue:\n%s\n%s', ''.join(old_lines), ''.join(lines_new))
def _copy_from_old_parser(self, line_offset, start_line_old, until_line_old, until_line_new):
last_until_line = -1
while until_line_new > self._nodes_tree.parsed_until_line:
parsed_until_line_old = self._nodes_tree.parsed_until_line - line_offset
line_stmt = self._get_old_line_stmt(parsed_until_line_old + 1)
if line_stmt is None:
# Parse 1 line at least. We don't need more, because we just
# want to get into a state where the old parser has statements
# again that can be copied (e.g. not lines within parentheses).
self._parse(self._nodes_tree.parsed_until_line + 1)
else:
p_children = line_stmt.parent.children
index = p_children.index(line_stmt)
if start_line_old == 1 \
and p_children[0].get_first_leaf().prefix.startswith(BOM_UTF8_STRING):
# If there's a BOM in the beginning, just reparse. It's too
# complicated to account for it otherwise.
copied_nodes = []
else:
from_ = self._nodes_tree.parsed_until_line + 1
copied_nodes = self._nodes_tree.copy_nodes(
p_children[index:],
until_line_old,
line_offset
)
# Match all the nodes that are in the wanted range.
if copied_nodes:
self._copy_count += 1
to = self._nodes_tree.parsed_until_line
LOG.debug('copy old[%s:%s] new[%s:%s]',
copied_nodes[0].start_pos[0],
copied_nodes[-1].end_pos[0] - 1, from_, to)
else:
# We have copied as much as possible (but definitely not too
# much). Therefore we just parse a bit more.
self._parse(self._nodes_tree.parsed_until_line + 1)
# Since there are potential bugs that might loop here endlessly, we
# just stop here.
assert last_until_line != self._nodes_tree.parsed_until_line, last_until_line
last_until_line = self._nodes_tree.parsed_until_line
def _get_old_line_stmt(self, old_line):
leaf = self._module.get_leaf_for_position((old_line, 0), include_prefixes=True)
if _ends_with_newline(leaf):
leaf = leaf.get_next_leaf()
if leaf.get_start_pos_of_prefix()[0] == old_line:
node = leaf
while node.parent.type not in ('file_input', 'suite'):
node = node.parent
# Make sure that if only the `else:` line of an if statement is
# copied that not the whole thing is going to be copied.
if node.start_pos[0] >= old_line:
return node
# Must be on the same line. Otherwise we need to parse that bit.
return None
def _parse(self, until_line):
"""
Parses at least until the given line, but might just parse more until a
valid state is reached.
"""
last_until_line = 0
while until_line > self._nodes_tree.parsed_until_line:
node = self._try_parse_part(until_line)
nodes = node.children
self._nodes_tree.add_parsed_nodes(nodes, self._keyword_token_indents)
if self._replace_tos_indent is not None:
self._nodes_tree.indents[-1] = self._replace_tos_indent
LOG.debug(
'parse_part from %s to %s (to %s in part parser)',
nodes[0].get_start_pos_of_prefix()[0],
self._nodes_tree.parsed_until_line,
node.end_pos[0] - 1
)
# Since the tokenizer sometimes has bugs, we cannot be sure that
# this loop terminates. Therefore assert that there's always a
# change.
assert last_until_line != self._nodes_tree.parsed_until_line, last_until_line
last_until_line = self._nodes_tree.parsed_until_line
def _try_parse_part(self, until_line):
"""
Sets up a normal parser that uses a spezialized tokenizer to only parse
until a certain position (or a bit longer if the statement hasn't
ended.
"""
self._parser_count += 1
# TODO speed up, shouldn't copy the whole list all the time.
# memoryview?
parsed_until_line = self._nodes_tree.parsed_until_line
lines_after = self._parser_lines_new[parsed_until_line:]
tokens = self._diff_tokenize(
lines_after,
until_line,
line_offset=parsed_until_line
)
self._active_parser = Parser(
self._pgen_grammar,
error_recovery=True
)
return self._active_parser.parse(tokens=tokens)
def _diff_tokenize(self, lines, until_line, line_offset=0):
was_newline = False
indents = self._nodes_tree.indents
initial_indentation_count = len(indents)
tokens = self._tokenizer(
lines,
start_pos=(line_offset + 1, 0),
indents=indents,
is_first_token=line_offset == 0,
)
stack = self._active_parser.stack
self._replace_tos_indent = None
self._keyword_token_indents = {}
# print('start', line_offset + 1, indents)
for token in tokens:
# print(token, indents)
typ = token.type
if typ == DEDENT:
if len(indents) < initial_indentation_count:
# We are done here, only thing that can come now is an
# endmarker or another dedented code block.
while True:
typ, string, start_pos, prefix = token = next(tokens)
if typ in (DEDENT, ERROR_DEDENT):
if typ == ERROR_DEDENT:
# We want to force an error dedent in the next
# parser/pass. To make this possible we just
# increase the location by one.
self._replace_tos_indent = start_pos[1] + 1
pass
else:
break
if '\n' in prefix or '\r' in prefix:
prefix = re.sub(r'[^\n\r]+\Z', '', prefix)
else:
assert start_pos[1] >= len(prefix), repr(prefix)
if start_pos[1] - len(prefix) == 0:
prefix = ''
yield PythonToken(
ENDMARKER, '',
start_pos,
prefix
)
break
elif typ == NEWLINE and token.start_pos[0] >= until_line:
was_newline = True
elif was_newline:
was_newline = False
if len(indents) == initial_indentation_count:
# Check if the parser is actually in a valid suite state.
if _suite_or_file_input_is_valid(self._pgen_grammar, stack):
yield PythonToken(ENDMARKER, '', token.start_pos, '')
break
if typ == NAME and token.string in ('class', 'def'):
self._keyword_token_indents[token.start_pos] = list(indents)
yield token
class _NodesTreeNode:
_ChildrenGroup = namedtuple(
'_ChildrenGroup',
'prefix children line_offset last_line_offset_leaf')
def __init__(self, tree_node, parent=None, indentation=0):
self.tree_node = tree_node
self._children_groups = []
self.parent = parent
self._node_children = []
self.indentation = indentation
def finish(self):
children = []
for prefix, children_part, line_offset, last_line_offset_leaf in self._children_groups:
first_leaf = _get_next_leaf_if_indentation(
children_part[0].get_first_leaf()
)
first_leaf.prefix = prefix + first_leaf.prefix
if line_offset != 0:
try:
_update_positions(
children_part, line_offset, last_line_offset_leaf)
except _PositionUpdatingFinished:
pass
children += children_part
self.tree_node.children = children
# Reset the parents
for node in children:
node.parent = self.tree_node
for node_child in self._node_children:
node_child.finish()
def add_child_node(self, child_node):
self._node_children.append(child_node)
def add_tree_nodes(self, prefix, children, line_offset=0,
last_line_offset_leaf=None):
if last_line_offset_leaf is None:
last_line_offset_leaf = children[-1].get_last_leaf()
group = self._ChildrenGroup(
prefix, children, line_offset, last_line_offset_leaf
)
self._children_groups.append(group)
def get_last_line(self, suffix):
line = 0
if self._children_groups:
children_group = self._children_groups[-1]
last_leaf = _get_previous_leaf_if_indentation(
children_group.last_line_offset_leaf
)
line = last_leaf.end_pos[0] + children_group.line_offset
# Newlines end on the next line, which means that they would cover
# the next line. That line is not fully parsed at this point.
if _ends_with_newline(last_leaf, suffix):
line -= 1
line += len(split_lines(suffix)) - 1
if suffix and not suffix.endswith('\n') and not suffix.endswith('\r'):
# This is the end of a file (that doesn't end with a newline).
line += 1
if self._node_children:
return max(line, self._node_children[-1].get_last_line(suffix))
return line
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.tree_node)
class _NodesTree:
def __init__(self, module):
self._base_node = _NodesTreeNode(module)
self._working_stack = [self._base_node]
self._module = module
self._prefix_remainder = ''
self.prefix = ''
self.indents = [0]
@property
def parsed_until_line(self):
return self._working_stack[-1].get_last_line(self.prefix)
def _update_insertion_node(self, indentation):
for node in reversed(list(self._working_stack)):
if node.indentation < indentation or node is self._working_stack[0]:
return node
self._working_stack.pop()
def add_parsed_nodes(self, tree_nodes, keyword_token_indents):
old_prefix = self.prefix
tree_nodes = self._remove_endmarker(tree_nodes)
if not tree_nodes:
self.prefix = old_prefix + self.prefix
return
assert tree_nodes[0].type != 'newline'
node = self._update_insertion_node(tree_nodes[0].start_pos[1])
assert node.tree_node.type in ('suite', 'file_input')
node.add_tree_nodes(old_prefix, tree_nodes)
# tos = Top of stack
self._update_parsed_node_tos(tree_nodes[-1], keyword_token_indents)
def _update_parsed_node_tos(self, tree_node, keyword_token_indents):
if tree_node.type == 'suite':
def_leaf = tree_node.parent.children[0]
new_tos = _NodesTreeNode(
tree_node,
indentation=keyword_token_indents[def_leaf.start_pos][-1],
)
new_tos.add_tree_nodes('', list(tree_node.children))
self._working_stack[-1].add_child_node(new_tos)
self._working_stack.append(new_tos)
self._update_parsed_node_tos(tree_node.children[-1], keyword_token_indents)
elif _func_or_class_has_suite(tree_node):
self._update_parsed_node_tos(tree_node.children[-1], keyword_token_indents)
def _remove_endmarker(self, tree_nodes):
"""
Helps cleaning up the tree nodes that get inserted.
"""
last_leaf = tree_nodes[-1].get_last_leaf()
is_endmarker = last_leaf.type == 'endmarker'
self._prefix_remainder = ''
if is_endmarker:
prefix = last_leaf.prefix
separation = max(prefix.rfind('\n'), prefix.rfind('\r'))
if separation > -1:
# Remove the whitespace part of the prefix after a newline.
# That is not relevant if parentheses were opened. Always parse
# until the end of a line.
last_leaf.prefix, self._prefix_remainder = \
last_leaf.prefix[:separation + 1], last_leaf.prefix[separation + 1:]
self.prefix = ''
if is_endmarker:
self.prefix = last_leaf.prefix
tree_nodes = tree_nodes[:-1]
return tree_nodes
def _get_matching_indent_nodes(self, tree_nodes, is_new_suite):
# There might be a random dedent where we have to stop copying.
# Invalid indents are ok, because the parser handled that
# properly before. An invalid dedent can happen, because a few
# lines above there was an invalid indent.
node_iterator = iter(tree_nodes)
if is_new_suite:
yield next(node_iterator)
first_node = next(node_iterator)
indent = _get_indentation(first_node)
if not is_new_suite and indent not in self.indents:
return
yield first_node
for n in node_iterator:
if _get_indentation(n) != indent:
return
yield n
def copy_nodes(self, tree_nodes, until_line, line_offset):
"""
Copies tree nodes from the old parser tree.
Returns the number of tree nodes that were copied.
"""
if tree_nodes[0].type in ('error_leaf', 'error_node'):
# Avoid copying errors in the beginning. Can lead to a lot of
# issues.
return []
indentation = _get_indentation(tree_nodes[0])
old_working_stack = list(self._working_stack)
old_prefix = self.prefix
old_indents = self.indents
self.indents = [i for i in self.indents if i <= indentation]
self._update_insertion_node(indentation)
new_nodes, self._working_stack, self.prefix, added_indents = self._copy_nodes(
list(self._working_stack),
tree_nodes,
until_line,
line_offset,
self.prefix,
)
if new_nodes:
self.indents += added_indents
else:
self._working_stack = old_working_stack
self.prefix = old_prefix
self.indents = old_indents
return new_nodes
def _copy_nodes(self, working_stack, nodes, until_line, line_offset,
prefix='', is_nested=False):
new_nodes = []
added_indents = []
nodes = list(self._get_matching_indent_nodes(
nodes,
is_new_suite=is_nested,
))
new_prefix = ''
for node in nodes:
if node.start_pos[0] > until_line:
break
if node.type == 'endmarker':
break
if node.type == 'error_leaf' and node.token_type in ('DEDENT', 'ERROR_DEDENT'):
break
# TODO this check might take a bit of time for large files. We
# might want to change this to do more intelligent guessing or
# binary search.
if _get_last_line(node) > until_line:
# We can split up functions and classes later.
if _func_or_class_has_suite(node):
new_nodes.append(node)
break
try:
c = node.children
except AttributeError:
pass
else:
# This case basically appears with error recovery of one line
# suites like `def foo(): bar.-`. In this case we might not
# include a newline in the statement and we need to take care
# of that.
n = node
if n.type == 'decorated':
n = n.children[-1]
if n.type in ('async_funcdef', 'async_stmt'):
n = n.children[-1]
if n.type in ('classdef', 'funcdef'):
suite_node = n.children[-1]
else:
suite_node = c[-1]
if suite_node.type in ('error_leaf', 'error_node'):
break
new_nodes.append(node)
# Pop error nodes at the end from the list
if new_nodes:
while new_nodes:
last_node = new_nodes[-1]
if (last_node.type in ('error_leaf', 'error_node')
or _is_flow_node(new_nodes[-1])):
# Error leafs/nodes don't have a defined start/end. Error
# nodes might not end with a newline (e.g. if there's an
# open `(`). Therefore ignore all of them unless they are
# succeeded with valid parser state.
# If we copy flows at the end, they might be continued
# after the copy limit (in the new parser).
# In this while loop we try to remove until we find a newline.
new_prefix = ''
new_nodes.pop()
while new_nodes:
last_node = new_nodes[-1]
if last_node.get_last_leaf().type == 'newline':
break
new_nodes.pop()
continue
if len(new_nodes) > 1 and new_nodes[-2].type == 'error_node':
# The problem here is that Parso error recovery sometimes
# influences nodes before this node.
# Since the new last node is an error node this will get
# cleaned up in the next while iteration.
new_nodes.pop()
continue
break
if not new_nodes:
return [], working_stack, prefix, added_indents
tos = working_stack[-1]
last_node = new_nodes[-1]
had_valid_suite_last = False
# Pop incomplete suites from the list
if _func_or_class_has_suite(last_node):
suite = last_node
while suite.type != 'suite':
suite = suite.children[-1]
indent = _get_suite_indentation(suite)
added_indents.append(indent)
suite_tos = _NodesTreeNode(suite, indentation=_get_indentation(last_node))
# Don't need to pass line_offset here, it's already done by the
# parent.
suite_nodes, new_working_stack, new_prefix, ai = self._copy_nodes(
working_stack + [suite_tos], suite.children, until_line, line_offset,
is_nested=True,
)
added_indents += ai
if len(suite_nodes) < 2:
# A suite only with newline is not valid.
new_nodes.pop()
new_prefix = ''
else:
assert new_nodes
tos.add_child_node(suite_tos)
working_stack = new_working_stack
had_valid_suite_last = True
if new_nodes:
if not _ends_with_newline(new_nodes[-1].get_last_leaf()) and not had_valid_suite_last:
p = new_nodes[-1].get_next_leaf().prefix
# We are not allowed to remove the newline at the end of the
# line, otherwise it's going to be missing. This happens e.g.
# if a bracket is around before that moves newlines to
# prefixes.
new_prefix = split_lines(p, keepends=True)[0]
if had_valid_suite_last:
last = new_nodes[-1]
if last.type == 'decorated':
last = last.children[-1]
if last.type in ('async_funcdef', 'async_stmt'):
last = last.children[-1]
last_line_offset_leaf = last.children[-2].get_last_leaf()
assert last_line_offset_leaf == ':'
else:
last_line_offset_leaf = new_nodes[-1].get_last_leaf()
tos.add_tree_nodes(
prefix, new_nodes, line_offset, last_line_offset_leaf,
)
prefix = new_prefix
self._prefix_remainder = ''
return new_nodes, working_stack, prefix, added_indents
def close(self):
self._base_node.finish()
# Add an endmarker.
try:
last_leaf = self._module.get_last_leaf()
except IndexError:
end_pos = [1, 0]
else:
last_leaf = _skip_dedent_error_leaves(last_leaf)
end_pos = list(last_leaf.end_pos)
lines = split_lines(self.prefix)
assert len(lines) > 0
if len(lines) == 1:
if lines[0].startswith(BOM_UTF8_STRING) and end_pos == [1, 0]:
end_pos[1] -= 1
end_pos[1] += len(lines[0])
else:
end_pos[0] += len(lines) - 1
end_pos[1] = len(lines[-1])
endmarker = EndMarker('', tuple(end_pos), self.prefix + self._prefix_remainder)
endmarker.parent = self._module
self._module.children.append(endmarker)
| 34,206 | Python | .py | 747 | 33.904953 | 98 | 0.57473 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,010 | tokenize.py | DamnWidget_anaconda/anaconda_lib/parso/python/tokenize.py | # -*- coding: utf-8 -*-
"""
This tokenizer has been copied from the ``tokenize.py`` standard library
tokenizer. The reason was simple: The standard library tokenizer fails
if the indentation is not right. To make it possible to do error recovery the
tokenizer needed to be rewritten.
Basically this is a stripped down version of the standard library module, so
you can read the documentation there. Additionally we included some speed and
memory optimizations here.
"""
from __future__ import absolute_import
import sys
import re
import itertools as _itertools
from codecs import BOM_UTF8
from typing import NamedTuple, Tuple, Iterator, Iterable, List, Dict, \
Pattern, Set
from parso.python.token import PythonTokenTypes
from parso.utils import split_lines, PythonVersionInfo, parse_version_string
# Maximum code point of Unicode 6.0: 0x10ffff (1,114,111)
MAX_UNICODE = '\U0010ffff'
STRING = PythonTokenTypes.STRING
NAME = PythonTokenTypes.NAME
NUMBER = PythonTokenTypes.NUMBER
OP = PythonTokenTypes.OP
NEWLINE = PythonTokenTypes.NEWLINE
INDENT = PythonTokenTypes.INDENT
DEDENT = PythonTokenTypes.DEDENT
ENDMARKER = PythonTokenTypes.ENDMARKER
ERRORTOKEN = PythonTokenTypes.ERRORTOKEN
ERROR_DEDENT = PythonTokenTypes.ERROR_DEDENT
FSTRING_START = PythonTokenTypes.FSTRING_START
FSTRING_STRING = PythonTokenTypes.FSTRING_STRING
FSTRING_END = PythonTokenTypes.FSTRING_END
class TokenCollection(NamedTuple):
pseudo_token: Pattern
single_quoted: Set[str]
triple_quoted: Set[str]
endpats: Dict[str, Pattern]
whitespace: Pattern
fstring_pattern_map: Dict[str, str]
always_break_tokens: Tuple[str]
BOM_UTF8_STRING = BOM_UTF8.decode('utf-8')
_token_collection_cache: Dict[PythonVersionInfo, TokenCollection] = {}
def group(*choices, capture=False, **kwargs):
assert not kwargs
start = '('
if not capture:
start += '?:'
return start + '|'.join(choices) + ')'
def maybe(*choices):
return group(*choices) + '?'
# Return the empty string, plus all of the valid string prefixes.
def _all_string_prefixes(*, include_fstring=False, only_fstring=False):
def different_case_versions(prefix):
for s in _itertools.product(*[(c, c.upper()) for c in prefix]):
yield ''.join(s)
# The valid string prefixes. Only contain the lower case versions,
# and don't contain any permuations (include 'fr', but not
# 'rf'). The various permutations will be generated.
valid_string_prefixes = ['b', 'r', 'u', 'br']
result = {''}
if include_fstring:
f = ['f', 'fr']
if only_fstring:
valid_string_prefixes = f
result = set()
else:
valid_string_prefixes += f
elif only_fstring:
return set()
# if we add binary f-strings, add: ['fb', 'fbr']
for prefix in valid_string_prefixes:
for t in _itertools.permutations(prefix):
# create a list with upper and lower versions of each
# character
result.update(different_case_versions(t))
return result
def _compile(expr):
return re.compile(expr, re.UNICODE)
def _get_token_collection(version_info):
try:
return _token_collection_cache[tuple(version_info)]
except KeyError:
_token_collection_cache[tuple(version_info)] = result = \
_create_token_collection(version_info)
return result
unicode_character_name = r'[A-Za-z0-9\-]+(?: [A-Za-z0-9\-]+)*'
fstring_string_single_line = _compile(
r'(?:\{\{|\}\}|\\N\{' + unicode_character_name
+ r'\}|\\(?:\r\n?|\n)|\\[^\r\nN]|[^{}\r\n\\])+'
)
fstring_string_multi_line = _compile(
r'(?:\{\{|\}\}|\\N\{' + unicode_character_name + r'\}|\\[^N]|[^{}\\])+'
)
fstring_format_spec_single_line = _compile(r'(?:\\(?:\r\n?|\n)|[^{}\r\n])+')
fstring_format_spec_multi_line = _compile(r'[^{}]+')
def _create_token_collection(version_info):
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
whitespace = _compile(Whitespace)
Comment = r'#[^\r\n]*'
Name = '([A-Za-z_0-9\u0080-' + MAX_UNICODE + ']+)'
Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
Binnumber = r'0[bB](?:_?[01])+'
Octnumber = r'0[oO](?:_?[0-7])+'
Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Note that since _all_string_prefixes includes the empty string,
# StringPrefix can be the empty string (making it optional).
possible_prefixes = _all_string_prefixes()
StringPrefix = group(*possible_prefixes)
StringPrefixWithF = group(*_all_string_prefixes(include_fstring=True))
fstring_prefixes = _all_string_prefixes(include_fstring=True, only_fstring=True)
FStringStart = group(*fstring_prefixes)
# Tail end of ' string.
Single = r"(?:\\.|[^'\\])*'"
# Tail end of " string.
Double = r'(?:\\.|[^"\\])*"'
# Tail end of ''' string.
Single3 = r"(?:\\.|'(?!'')|[^'\\])*'''"
# Tail end of """ string.
Double3 = r'(?:\\.|"(?!"")|[^"\\])*"""'
Triple = group(StringPrefixWithF + "'''", StringPrefixWithF + '"""')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?",
r"//=?", r"->",
r"[+\-*/%&@`|^!=<>]=?",
r"~")
Bracket = '[][(){}]'
special_args = [r'\.\.\.', r'\r\n?', r'\n', r'[;.,@]']
if version_info >= (3, 8):
special_args.insert(0, ":=?")
else:
special_args.insert(0, ":")
Special = group(*special_args)
Funny = group(Operator, Bracket, Special)
# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\r\n'\\]*(?:\\.[^\r\n'\\]*)*"
+ group("'", r'\\(?:\r\n?|\n)'),
StringPrefix + r'"[^\r\n"\\]*(?:\\.[^\r\n"\\]*)*'
+ group('"', r'\\(?:\r\n?|\n)'))
pseudo_extra_pool = [Comment, Triple]
all_quotes = '"', "'", '"""', "'''"
if fstring_prefixes:
pseudo_extra_pool.append(FStringStart + group(*all_quotes))
PseudoExtras = group(r'\\(?:\r\n?|\n)|\Z', *pseudo_extra_pool)
PseudoToken = group(Whitespace, capture=True) + \
group(PseudoExtras, Number, Funny, ContStr, Name, capture=True)
# For a given string prefix plus quotes, endpats maps it to a regex
# to match the remainder of that string. _prefix can be empty, for
# a normal single or triple quoted string (with no prefix).
endpats = {}
for _prefix in possible_prefixes:
endpats[_prefix + "'"] = _compile(Single)
endpats[_prefix + '"'] = _compile(Double)
endpats[_prefix + "'''"] = _compile(Single3)
endpats[_prefix + '"""'] = _compile(Double3)
# A set of all of the single and triple quoted string prefixes,
# including the opening quotes.
single_quoted = set()
triple_quoted = set()
fstring_pattern_map = {}
for t in possible_prefixes:
for quote in '"', "'":
single_quoted.add(t + quote)
for quote in '"""', "'''":
triple_quoted.add(t + quote)
for t in fstring_prefixes:
for quote in all_quotes:
fstring_pattern_map[t + quote] = quote
ALWAYS_BREAK_TOKENS = (';', 'import', 'class', 'def', 'try', 'except',
'finally', 'while', 'with', 'return', 'continue',
'break', 'del', 'pass', 'global', 'assert', 'nonlocal')
pseudo_token_compiled = _compile(PseudoToken)
return TokenCollection(
pseudo_token_compiled, single_quoted, triple_quoted, endpats,
whitespace, fstring_pattern_map, set(ALWAYS_BREAK_TOKENS)
)
class Token(NamedTuple):
type: PythonTokenTypes
string: str
start_pos: Tuple[int, int]
prefix: str
@property
def end_pos(self) -> Tuple[int, int]:
lines = split_lines(self.string)
if len(lines) > 1:
return self.start_pos[0] + len(lines) - 1, 0
else:
return self.start_pos[0], self.start_pos[1] + len(self.string)
class PythonToken(Token):
def __repr__(self):
return ('TokenInfo(type=%s, string=%r, start_pos=%r, prefix=%r)' %
self._replace(type=self.type.name))
class FStringNode:
def __init__(self, quote):
self.quote = quote
self.parentheses_count = 0
self.previous_lines = ''
self.last_string_start_pos = None
# In the syntax there can be multiple format_spec's nested:
# {x:{y:3}}
self.format_spec_count = 0
def open_parentheses(self, character):
self.parentheses_count += 1
def close_parentheses(self, character):
self.parentheses_count -= 1
if self.parentheses_count == 0:
# No parentheses means that the format spec is also finished.
self.format_spec_count = 0
def allow_multiline(self):
return len(self.quote) == 3
def is_in_expr(self):
return self.parentheses_count > self.format_spec_count
def is_in_format_spec(self):
return not self.is_in_expr() and self.format_spec_count
def _close_fstring_if_necessary(fstring_stack, string, line_nr, column, additional_prefix):
for fstring_stack_index, node in enumerate(fstring_stack):
lstripped_string = string.lstrip()
len_lstrip = len(string) - len(lstripped_string)
if lstripped_string.startswith(node.quote):
token = PythonToken(
FSTRING_END,
node.quote,
(line_nr, column + len_lstrip),
prefix=additional_prefix+string[:len_lstrip],
)
additional_prefix = ''
assert not node.previous_lines
del fstring_stack[fstring_stack_index:]
return token, '', len(node.quote) + len_lstrip
return None, additional_prefix, 0
def _find_fstring_string(endpats, fstring_stack, line, lnum, pos):
tos = fstring_stack[-1]
allow_multiline = tos.allow_multiline()
if tos.is_in_format_spec():
if allow_multiline:
regex = fstring_format_spec_multi_line
else:
regex = fstring_format_spec_single_line
else:
if allow_multiline:
regex = fstring_string_multi_line
else:
regex = fstring_string_single_line
match = regex.match(line, pos)
if match is None:
return tos.previous_lines, pos
if not tos.previous_lines:
tos.last_string_start_pos = (lnum, pos)
string = match.group(0)
for fstring_stack_node in fstring_stack:
end_match = endpats[fstring_stack_node.quote].match(string)
if end_match is not None:
string = end_match.group(0)[:-len(fstring_stack_node.quote)]
new_pos = pos
new_pos += len(string)
# even if allow_multiline is False, we still need to check for trailing
# newlines, because a single-line f-string can contain line continuations
if string.endswith('\n') or string.endswith('\r'):
tos.previous_lines += string
string = ''
else:
string = tos.previous_lines + string
return string, new_pos
def tokenize(
code: str, *, version_info: PythonVersionInfo, start_pos: Tuple[int, int] = (1, 0)
) -> Iterator[PythonToken]:
"""Generate tokens from a the source code (string)."""
lines = split_lines(code, keepends=True)
return tokenize_lines(lines, version_info=version_info, start_pos=start_pos)
def _print_tokens(func):
"""
A small helper function to help debug the tokenize_lines function.
"""
def wrapper(*args, **kwargs):
for token in func(*args, **kwargs):
print(token) # This print is intentional for debugging!
yield token
return wrapper
# @_print_tokens
def tokenize_lines(
lines: Iterable[str],
*,
version_info: PythonVersionInfo,
indents: List[int] = None,
start_pos: Tuple[int, int] = (1, 0),
is_first_token=True,
) -> Iterator[PythonToken]:
"""
A heavily modified Python standard library tokenizer.
Additionally to the default information, yields also the prefix of each
token. This idea comes from lib2to3. The prefix contains all information
that is irrelevant for the parser like newlines in parentheses or comments.
"""
def dedent_if_necessary(start):
while start < indents[-1]:
if start > indents[-2]:
yield PythonToken(ERROR_DEDENT, '', (lnum, start), '')
indents[-1] = start
break
indents.pop()
yield PythonToken(DEDENT, '', spos, '')
pseudo_token, single_quoted, triple_quoted, endpats, whitespace, \
fstring_pattern_map, always_break_tokens, = \
_get_token_collection(version_info)
paren_level = 0 # count parentheses
if indents is None:
indents = [0]
max_ = 0
numchars = '0123456789'
contstr = ''
contline: str
contstr_start: Tuple[int, int]
endprog: Pattern
# We start with a newline. This makes indent at the first position
# possible. It's not valid Python, but still better than an INDENT in the
# second line (and not in the first). This makes quite a few things in
# Jedi's fast parser possible.
new_line = True
prefix = '' # Should never be required, but here for safety
additional_prefix = ''
lnum = start_pos[0] - 1
fstring_stack: List[FStringNode] = []
for line in lines: # loop over lines in stream
lnum += 1
pos = 0
max_ = len(line)
if is_first_token:
if line.startswith(BOM_UTF8_STRING):
additional_prefix = BOM_UTF8_STRING
line = line[1:]
max_ = len(line)
# Fake that the part before was already parsed.
line = '^' * start_pos[1] + line
pos = start_pos[1]
max_ += start_pos[1]
is_first_token = False
if contstr: # continued string
endmatch = endprog.match(line) # noqa: F821
if endmatch:
pos = endmatch.end(0)
yield PythonToken(
STRING, contstr + line[:pos],
contstr_start, prefix) # noqa: F821
contstr = ''
contline = ''
else:
contstr = contstr + line
contline = contline + line
continue
while pos < max_:
if fstring_stack:
tos = fstring_stack[-1]
if not tos.is_in_expr():
string, pos = _find_fstring_string(endpats, fstring_stack, line, lnum, pos)
if string:
yield PythonToken(
FSTRING_STRING, string,
tos.last_string_start_pos,
# Never has a prefix because it can start anywhere and
# include whitespace.
prefix=''
)
tos.previous_lines = ''
continue
if pos == max_:
break
rest = line[pos:]
fstring_end_token, additional_prefix, quote_length = _close_fstring_if_necessary(
fstring_stack,
rest,
lnum,
pos,
additional_prefix,
)
pos += quote_length
if fstring_end_token is not None:
yield fstring_end_token
continue
# in an f-string, match until the end of the string
if fstring_stack:
string_line = line
for fstring_stack_node in fstring_stack:
quote = fstring_stack_node.quote
end_match = endpats[quote].match(line, pos)
if end_match is not None:
end_match_string = end_match.group(0)
if len(end_match_string) - len(quote) + pos < len(string_line):
string_line = line[:pos] + end_match_string[:-len(quote)]
pseudomatch = pseudo_token.match(string_line, pos)
else:
pseudomatch = pseudo_token.match(line, pos)
if pseudomatch:
prefix = additional_prefix + pseudomatch.group(1)
additional_prefix = ''
start, pos = pseudomatch.span(2)
spos = (lnum, start)
token = pseudomatch.group(2)
if token == '':
assert prefix
additional_prefix = prefix
# This means that we have a line with whitespace/comments at
# the end, which just results in an endmarker.
break
initial = token[0]
else:
match = whitespace.match(line, pos)
initial = line[match.end()]
start = match.end()
spos = (lnum, start)
if new_line and initial not in '\r\n#' and (initial != '\\' or pseudomatch is None):
new_line = False
if paren_level == 0 and not fstring_stack:
indent_start = start
if indent_start > indents[-1]:
yield PythonToken(INDENT, '', spos, '')
indents.append(indent_start)
yield from dedent_if_necessary(indent_start)
if not pseudomatch: # scan for tokens
match = whitespace.match(line, pos)
if new_line and paren_level == 0 and not fstring_stack:
yield from dedent_if_necessary(match.end())
pos = match.end()
new_line = False
yield PythonToken(
ERRORTOKEN, line[pos], (lnum, pos),
additional_prefix + match.group(0)
)
additional_prefix = ''
pos += 1
continue
if (initial in numchars # ordinary number
or (initial == '.' and token != '.' and token != '...')):
yield PythonToken(NUMBER, token, spos, prefix)
elif pseudomatch.group(3) is not None: # ordinary name
if token in always_break_tokens and (fstring_stack or paren_level):
fstring_stack[:] = []
paren_level = 0
# We only want to dedent if the token is on a new line.
m = re.match(r'[ \f\t]*$', line[:start])
if m is not None:
yield from dedent_if_necessary(m.end())
if token.isidentifier():
yield PythonToken(NAME, token, spos, prefix)
else:
yield from _split_illegal_unicode_name(token, spos, prefix)
elif initial in '\r\n':
if any(not f.allow_multiline() for f in fstring_stack):
fstring_stack.clear()
if not new_line and paren_level == 0 and not fstring_stack:
yield PythonToken(NEWLINE, token, spos, prefix)
else:
additional_prefix = prefix + token
new_line = True
elif initial == '#': # Comments
assert not token.endswith("\n") and not token.endswith("\r")
if fstring_stack and fstring_stack[-1].is_in_expr():
# `#` is not allowed in f-string expressions
yield PythonToken(ERRORTOKEN, initial, spos, prefix)
pos = start + 1
else:
additional_prefix = prefix + token
elif token in triple_quoted:
endprog = endpats[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield PythonToken(STRING, token, spos, prefix)
else:
contstr_start = spos # multiple lines
contstr = line[start:]
contline = line
break
# Check up to the first 3 chars of the token to see if
# they're in the single_quoted set. If so, they start
# a string.
# We're using the first 3, because we're looking for
# "rb'" (for example) at the start of the token. If
# we switch to longer prefixes, this needs to be
# adjusted.
# Note that initial == token[:1].
# Also note that single quote checking must come after
# triple quote checking (above).
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] in '\r\n': # continued string
# This means that a single quoted string ends with a
# backslash and is continued.
contstr_start = lnum, start
endprog = (endpats.get(initial) or endpats.get(token[1])
or endpats.get(token[2]))
contstr = line[start:]
contline = line
break
else: # ordinary string
yield PythonToken(STRING, token, spos, prefix)
elif token in fstring_pattern_map: # The start of an fstring.
fstring_stack.append(FStringNode(fstring_pattern_map[token]))
yield PythonToken(FSTRING_START, token, spos, prefix)
elif initial == '\\' and line[start:] in ('\\\n', '\\\r\n', '\\\r'): # continued stmt
additional_prefix += prefix + line[start:]
break
else:
if token in '([{':
if fstring_stack:
fstring_stack[-1].open_parentheses(token)
else:
paren_level += 1
elif token in ')]}':
if fstring_stack:
fstring_stack[-1].close_parentheses(token)
else:
if paren_level:
paren_level -= 1
elif token.startswith(':') and fstring_stack \
and fstring_stack[-1].parentheses_count \
- fstring_stack[-1].format_spec_count == 1:
# `:` and `:=` both count
fstring_stack[-1].format_spec_count += 1
token = ':'
pos = start + 1
yield PythonToken(OP, token, spos, prefix)
if contstr:
yield PythonToken(ERRORTOKEN, contstr, contstr_start, prefix)
if contstr.endswith('\n') or contstr.endswith('\r'):
new_line = True
if fstring_stack:
tos = fstring_stack[-1]
if tos.previous_lines:
yield PythonToken(
FSTRING_STRING, tos.previous_lines,
tos.last_string_start_pos,
# Never has a prefix because it can start anywhere and
# include whitespace.
prefix=''
)
end_pos = lnum, max_
# As the last position we just take the maximally possible position. We
# remove -1 for the last new line.
for indent in indents[1:]:
indents.pop()
yield PythonToken(DEDENT, '', end_pos, '')
yield PythonToken(ENDMARKER, '', end_pos, additional_prefix)
def _split_illegal_unicode_name(token, start_pos, prefix):
def create_token():
return PythonToken(ERRORTOKEN if is_illegal else NAME, found, pos, prefix)
found = ''
is_illegal = False
pos = start_pos
for i, char in enumerate(token):
if is_illegal:
if char.isidentifier():
yield create_token()
found = char
is_illegal = False
prefix = ''
pos = start_pos[0], start_pos[1] + i
else:
found += char
else:
new_found = found + char
if new_found.isidentifier():
found = new_found
else:
if found:
yield create_token()
prefix = ''
pos = start_pos[0], start_pos[1] + i
found = char
is_illegal = True
if found:
yield create_token()
if __name__ == "__main__":
path = sys.argv[1]
with open(path) as f:
code = f.read()
for token in tokenize(code, version_info=parse_version_string('3.10')):
print(token)
| 25,795 | Python | .py | 592 | 32.440878 | 98 | 0.552175 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,011 | python_builder.py | DamnWidget_anaconda/anaconda_lib/builder/python_builder.py |
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
from string import Template
import sublime
from ..helpers import get_settings, active_view, is_remote_session
class AnacondaSetPythonBuilder(object):
"""Sets or modifies the builder of the current project
"""
def update_interpreter_build_system(self, cmd):
"""Updates the project and adds/modifies the build system
"""
view = active_view()
if get_settings(view, 'auto_python_builder_enabled', True) is False:
return
if is_remote_session(view):
return
if cmd is None:
sublime.message_dialog(
'Your python interpreter is not set or is invalid'
)
return
project = self._get_project()
if project.get('build_systems', False) is not False:
if type(project['build_systems']) is list:
done = False
current_list = project['build_systems']
for i in range(len(current_list)):
build = current_list[i]
if build['name'] == 'Anaconda Python Builder':
current_list[i] = self._parse_tpl(cmd)
done = True
break
if not done:
project['build_systems'].append(self._parse_tpl(cmd))
else:
sublime.message_dialog(
'Your project build_systems is messed up'
)
else:
project.update({
'build_systems': [self._parse_tpl(cmd)]
})
self._save_project(project)
def _get_project(self):
"""Get Project configuration
"""
return sublime.active_window().project_data()
def _parse_tpl(self, cmd):
"""Parses the builder template
"""
template_file = os.path.join(
os.path.dirname(__file__),
'..', '..', 'templates', 'python_build.tpl'
)
with open(template_file, 'r', encoding='utf8') as tplfile:
template = Template(tplfile.read())
cmd = cmd.replace('\\', '\\\\')
return sublime.decode_value(
template.safe_substitute({'python_interpreter': cmd})
)
def _save_project(self, project_data):
"""Save project configuration
"""
sublime.active_window().set_project_data(project_data)
| 2,545 | Python | .py | 65 | 27.815385 | 76 | 0.558537 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,012 | linting.py | DamnWidget_anaconda/listeners/linting.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import time
import sublime
import sublime_plugin
from ..anaconda_lib._typing import Callable, Dict, Any
from ..anaconda_lib.helpers import (
check_linting, get_settings, check_linting_behaviour,
ONLY_CODE, NOT_SCRATCH, LINTING_ENABLED, is_code
)
from ..anaconda_lib.linting.sublime import (
ANACONDA, erase_lint_marks, run_linter,
last_selected_lineno, update_statusbar
)
class BackgroundLinter(sublime_plugin.EventListener):
"""Background linter, can be turned off via plugin settings
"""
check_auto_lint = False
def __init__(self, lang: str='Python', linter: Callable=run_linter, non_auto: bool=False) -> None: # noqa
super(BackgroundLinter, self).__init__()
self.lang = lang
self._force_non_auto = non_auto
self.run_linter = linter
self.last_selected_line = -1
sublime.set_timeout(self.lint, 1000)
def lint(self) -> None:
view = sublime.active_window().active_view()
if get_settings(view, 'anaconda_linting_behaviour') != 'always':
if not self.check_auto_lint:
self.check_auto_lint = True
return
delay = get_settings(view, 'anaconda_linter_delay', 0.5)
valid_code = is_code(view, lang=self.lang.lower())
if not ANACONDA['ALREADY_LINTED'] and valid_code:
if time.time() - ANACONDA['LAST_PULSE'] >= delay:
ANACONDA['ALREADY_LINTED'] = True
self.run_linter(view)
if not self._force_non_auto:
sublime.set_timeout(lambda: self.lint(), int(delay * 1000))
def on_modified(self, view: sublime.View) -> None:
"""
Called after changes have been made to a view.
Runs in a separate thread, and does not block the application.
"""
constraints = ONLY_CODE | NOT_SCRATCH | LINTING_ENABLED
if check_linting(view, constraints, code=self.lang.lower()):
# remove previous linting marks if configured to do so
if not get_settings(view, 'anaconda_linter_persistent', False):
erase_lint_marks(view)
# check lint behavior and lint if always and auto lint is set
if check_linting_behaviour(view, ['always']):
# update the last selected line number
self.last_selected_line = -1
ANACONDA['LAST_PULSE'] = time.time()
ANACONDA['ALREADY_LINTED'] = False
if self.check_auto_lint:
self.lint()
else:
self._erase_marks_if_no_linting(view)
def on_load(self, view: sublime.View) -> None:
"""Called after load a file
"""
if (check_linting(view, ONLY_CODE, code=self.lang.lower()) and
check_linting_behaviour(view, ['always', 'load-save'])):
if self.lang in view.settings().get('syntax'):
self.run_linter(view)
else:
self._erase_marks_if_no_linting(view)
def on_pre_close(self, view: sublime.View) -> None:
"""Called when the view is about to be closed
"""
self._erase_marks(view)
for severity in ['VIOLATIONS', 'WARNINGS', 'ERRORS']:
ANACONDA[severity][view.id()] = {}
def on_post_save(self, view: sublime.View) -> None:
"""Called post file save event
"""
if check_linting(
view, NOT_SCRATCH | LINTING_ENABLED, code=self.lang.lower()):
if self.lang in view.settings().get('syntax'):
if get_settings(
view, "anaconda_linter_show_errors_on_save", False):
self.run_linter(view, self._show_errors_list)
else:
self.run_linter(view)
else:
self._erase_marks_if_no_linting(view)
def _show_errors_list(self, parse_results: Callable[[Dict[str, Any]], None], data: Dict[str, Any]) -> None: # noqa
"""Hook the parser_results callback and append some functions
"""
parse_results(data)
sublime.active_window().run_command('anaconda_get_lines')
def on_activated(self, view: sublime.View) -> None:
"""Called when a view gain the focus
"""
if (check_linting(
view, ONLY_CODE | LINTING_ENABLED, code=self.lang.lower()) and
check_linting_behaviour(view, ['always'])):
if self.lang in view.settings().get('syntax'):
self.run_linter(view)
else:
self._erase_marks_if_no_linting(view)
def on_selection_modified(self, view: sublime.View) -> None:
"""Called on selection modified
"""
constraints = ONLY_CODE | NOT_SCRATCH | LINTING_ENABLED
if (not check_linting(view, constraints, code=self.lang.lower()) or
self.lang not in view.settings().get('syntax')):
return
last_selected_line = last_selected_lineno(view)
if last_selected_line != self.last_selected_line:
self.last_selected_line = last_selected_line
update_statusbar(view)
def _erase_marks_if_no_linting(self, view: sublime.View) -> None:
"""Erase the anaconda marks if linting is disabled
"""
if not check_linting(view, LINTING_ENABLED, code=self.lang.lower()):
self._erase_marks(view)
def _erase_marks(self, view: sublime.View) -> None:
"""Just a wrapper for erase_lint_marks
"""
erase_lint_marks(view)
| 5,657 | Python | .py | 122 | 36.155738 | 119 | 0.605124 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,013 | signatures.py | DamnWidget_anaconda/listeners/signatures.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
from functools import partial
import sublime
import sublime_plugin
from ..anaconda_lib.worker import Worker
from ..anaconda_lib.tooltips import Tooltip
from ..anaconda_lib.kite import Integration
from ..anaconda_lib._typing import Dict, Tuple, Any
from ..anaconda_lib.helpers import prepare_send_data, is_python, get_settings
class AnacondaSignaturesEventListener(sublime_plugin.EventListener):
"""Signatures on status bar event listener class
"""
doc = None # type: str
signature = None
exclude = (
'None', 'NoneType', 'str', 'int', 'float', 'True',
'False', 'in', 'or', 'and', 'bool'
)
def on_modified(self, view: sublime.View) -> None:
"""Called after changes has been made to a view
"""
if view.command_history(0)[0] in ("expand_tabs", "unexpand_tabs"):
return
if not is_python(view) or not get_settings(view, 'display_signatures'):
return
if Integration.enabled():
return
try:
location = view.rowcol(view.sel()[0].begin())
if view.substr(view.sel()[0].begin()) in ['(', ')']:
location = (location[0], location[1] - 1)
data = prepare_send_data(location, 'doc', 'jedi')
use_tooltips = get_settings(
view, 'enable_signatures_tooltip', True
)
st_version = int(sublime.version())
if st_version >= 3070:
data['html'] = use_tooltips
currying = partial(self.prepare_data_status, view)
if use_tooltips and st_version >= 3070:
currying = partial(self.prepare_data_tooltip, view)
data["settings"] = {
'python_interpreter': get_settings(view, 'python_interpreter', '')
}
Worker().execute(currying, **data)
except Exception as error:
logging.error(error)
def prepare_data_tooltip(
self, view: sublime.View, data: Dict[str, Any]) -> Any:
"""Prepare the returned data for tooltips
"""
merge_doc = get_settings(view, 'merge_signatures_and_doc')
if (data['success'] and 'No docstring' not
in data['doc'] and data['doc'] != 'list\n'):
try:
i = data['doc'].split('<br>').index("")
except ValueError:
self.signature = data['doc']
self.doc = ''
if self._signature_excluded(self.signature):
return
return self._show_popup(view)
if merge_doc:
self.doc = '<br>'.join(data['doc'].split('<br>')[i:]).replace(
" ", " ")
self.signature = '<br> '.join(
data['doc'].split('<br>')[0:i])
if self.signature is not None and self.signature != "":
if not self._signature_excluded(self.signature):
return self._show_popup(view)
if view.is_popup_visible():
view.hide_popup()
view.erase_status('anaconda_doc')
def prepare_data_status(
self, view: sublime.View, data: Dict[str, Any]) -> Any:
"""Prepare the returned data for status
"""
if (data['success'] and 'No docstring' not
in data['doc'] and data['doc'] != 'list\n'):
self.signature = data['doc']
if self._signature_excluded(self.signature):
return
try:
self.signature = self.signature.splitlines()[2]
except KeyError:
return
return self._show_status(view)
def _show_popup(self, view: sublime.View) -> None:
"""Show message in a popup if sublime text version is >= 3070
"""
show_doc = get_settings(view, 'merge_signatures_and_doc', True)
content = {'content': self.signature}
display_tooltip = 'signature'
if show_doc:
content = {'signature': self.signature, 'doc': self.doc}
display_tooltip = 'signature_doc'
css = get_settings(view, 'anaconda_tooltip_theme', 'popup')
Tooltip(css).show_tooltip(
view, display_tooltip, content, partial(self._show_status, view))
def _show_status(self, view: sublime.View) -> None:
"""Show message in the view status bar
"""
view.set_status(
'anaconda_doc', 'Anaconda: {}'.format(self.signature)
)
def _signature_excluded(self, signature: str) -> Tuple[str]:
"""Whether to supress displaying information for the given signature.
"""
# Check for the empty string first so the indexing in the next tests
# can't hit an exception, and we don't want to show an empty signature.
return ((signature == "") or
(signature.split('(', 1)[0].strip() in self.exclude) or
(signature.lstrip().split(None, 1)[0] in self.exclude))
| 5,181 | Python | .py | 115 | 34.191304 | 82 | 0.572677 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,014 | autopep8.py | DamnWidget_anaconda/listeners/autopep8.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import sublime_plugin
from ..anaconda_lib.helpers import get_settings, is_python
class AnacondaAutoformatPEP8EventListener(sublime_plugin.EventListener):
"""Anaconda AutoPEP8 formatter event listener class
"""
def on_pre_save(self, view: sublime_plugin.sublime.View) -> None:
"""Called just before the file is going to be saved
"""
if is_python(view) and get_settings(view, 'auto_formatting'):
view.run_command('anaconda_auto_format')
| 617 | Python | .py | 12 | 45.833333 | 72 | 0.730769 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,015 | __init__.py | DamnWidget_anaconda/listeners/__init__.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from .linting import BackgroundLinter
from .completion import AnacondaCompletionEventListener
from .signatures import AnacondaSignaturesEventListener
from .autopep8 import AnacondaAutoformatPEP8EventListener
__all__ = [
'BackgroundLinter',
'AnacondaCompletionEventListener',
'AnacondaSignaturesEventListener',
'AnacondaAutoformatPEP8EventListener'
]
| 497 | Python | .py | 12 | 38.75 | 65 | 0.83368 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,016 | completion.py | DamnWidget_anaconda/listeners/completion.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import sublime
import sublime_plugin
from ..anaconda_lib.worker import Worker
from ..anaconda_lib.helpers import (
prepare_send_data, get_settings, active_view, is_python,
completion_is_disabled, dot_completion, enable_dot_completion
)
from ..anaconda_lib.decorators import profile
from ..anaconda_lib._typing import Dict, List, Tuple, Any
JUST_COMPLETED = False
class AnacondaCompletionEventListener(sublime_plugin.EventListener):
"""Anaconda completion events listener class
"""
completions = [] # type: List[Tuple[str]]
ready_from_defer = False
@profile
def on_query_completions(self, view: sublime.View, prefix: str, locations: List[Tuple[int]]) -> Tuple[List[Tuple[str]], int]: # noqa
"""Sublime Text autocompletion event handler
"""
if not is_python(view, autocomplete_ignore_repl=True):
return
if completion_is_disabled(view):
return
if not dot_completion(view):
enable_dot_completion(view)
global JUST_COMPLETED
if self.ready_from_defer is True:
completion_flags = 0
if get_settings(view, 'suppress_word_completions', False):
completion_flags = sublime.INHIBIT_WORD_COMPLETIONS
if get_settings(view, 'suppress_explicit_completions', False):
completion_flags |= sublime.INHIBIT_EXPLICIT_COMPLETIONS
cpl = self.completions
self.completions = []
self.ready_from_defer = False
JUST_COMPLETED = True
return (cpl, completion_flags)
location = view.rowcol(locations[0])
data = prepare_send_data(location, 'autocomplete', 'jedi')
data["settings"] = {
'python_interpreter': get_settings(view, 'python_interpreter', ''),
}
Worker().execute(self._complete, **data)
def on_modified(self, view: sublime.View) -> None:
"""Called after changes has been made to a view.
"""
if not is_python(view, autocomplete_ignore_repl=True):
return
global JUST_COMPLETED
if (view.substr(view.sel()[0].begin() - 1) == '(' and
view.substr(view.sel()[0].begin()) == ')'):
if JUST_COMPLETED:
view.run_command('anaconda_complete_funcargs')
JUST_COMPLETED = False
elif view.substr(sublime.Region(
view.sel()[0].begin() - 7, view.sel()[0].end())) == 'import ':
self._run_auto_complete()
def _complete(self, data: Dict[str, Any]) -> None:
view = active_view()
# Temporary fix for completion bug in ST4
if int(sublime.version()) >= 4000:
if view.substr(view.sel()[0].begin() - 1) == ':' or view.substr(view.sel()[0].begin() - 1) == ')':
return
proposals = data['completions'] if data['success'] else []
if proposals:
if int(sublime.version()) >= 3103 and view.is_auto_complete_visible(): # noqa
view.run_command("hide_auto_complete")
else:
view.run_command("hide_auto_complete")
self.completions = proposals
self.ready_from_defer = True
# if the tab key is used to complete just undo the last insertion
if view.command_history(0)[0] == 'insert_best_completion':
if view.substr(sublime.Region(
view.sel()[0].begin() - 5,
view.sel()[0].end())) == 'self.':
view.run_command('undo')
self._run_auto_complete()
def _run_auto_complete(self) -> None:
"""Efectively call autocomplete using the ST API
"""
active_view().run_command("auto_complete", {
'disable_auto_insert': True,
'api_completions_only': get_settings(
active_view(), 'hide_snippets_on_completion', False),
'next_completion_if_showing': False,
'auto_complete_commit_on_tab': True,
})
| 4,194 | Python | .py | 90 | 36.1 | 137 | 0.599558 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,017 | minserver.py | DamnWidget_anaconda/anaconda_server/minserver.py | # -*- coding: utf8 -*-
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
import sys
import time
import socket
import logging
import asyncore
import asynchat
import traceback
from logging import handlers
from optparse import OptionParser
# we use ujson if it's available on the target intrepreter
try:
import ujson as json
except ImportError:
import json
sys.path.insert(0, os.path.join(
os.path.split(os.path.split(__file__)[0])[0], 'anaconda_lib'))
from jedi import settings as jedi_settings
from lib.path import log_directory
from lib.contexts import json_decode
from handlers import ANACONDA_HANDLERS
from lib.anaconda_handler import AnacondaHandler
DEBUG_MODE = False
logger = logging.getLogger('')
PY3 = True if sys.version_info >= (3,) else False
class JSONHandler(asynchat.async_chat):
"""Handles JSON messages from a client
"""
def __init__(self, sock, server):
self.server = server
self.rbuffer = []
asynchat.async_chat.__init__(self, sock)
self.set_terminator(b"\r\n" if PY3 else "\r\n")
def return_back(self, data):
"""Send data back to the client
"""
if data is not None:
data = '{0}\r\n'.format(json.dumps(data))
data = bytes(data, 'utf8') if PY3 else data
if DEBUG_MODE is True:
print('About push back to ST3: {0}'.format(data))
logging.info('About push back to ST3: {0}'.format(data))
self.push(data)
def collect_incoming_data(self, data):
"""Called when data is ready to be read
"""
self.rbuffer.append(data)
def found_terminator(self):
"""Called when the terminator is found in the buffer
"""
message = b''.join(self.rbuffer) if PY3 else ''.join(self.rbuffer)
self.rbuffer = []
with json_decode(message) as data:
if not data:
logging.info('No data received in the handler')
return
if data['method'] == 'check':
self.return_back(message='Ok', uid=data['uid'])
return
self.server.last_call = time.time()
if type(data) is dict:
logging.info(
'client requests: {0}'.format(data['method'])
)
method = data.pop('method')
uid = data.pop('uid')
vid = data.pop('vid', None)
handler_type = data.pop('handler')
self.handle_command(handler_type, method, uid, vid, data)
else:
logging.error(
'client sent something that I don\'t understand: {0}'.format(
data
)
)
def handle_command(self, handler_type, method, uid, vid, data):
"""Call the right commands handler
"""
# lazy initialization of anaconda plugins
if not AnacondaHandler._registry.initialized:
AnacondaHandler._registry.initialize()
handler = ANACONDA_HANDLERS.get(
handler_type, AnacondaHandler.get_handler(handler_type))
handler(method, data, uid, vid, self.return_back, DEBUG_MODE).run()
class JSONServer(asyncore.dispatcher):
"""Asynchronous standard library TCP JSON server
"""
allow_reuse_address = False
request_queue_size = 5
address_familty = socket.AF_INET
socket_type = socket.SOCK_STREAM
def __init__(self, address, handler=JSONHandler):
self.address = address
self.handler = handler
asyncore.dispatcher.__init__(self)
self.create_socket(self.address_familty, self.socket_type)
self.last_call = time.time()
self.bind(self.address)
logging.debug('bind: address=%s' % (address,))
self.listen(self.request_queue_size)
logging.debug('listen: backlog=%d' % (self.request_queue_size,))
@property
def fileno(self):
return self.socket.fileno()
def serve_forever(self):
asyncore.loop()
def shutdown(self):
self.handle_close()
def handle_accept(self):
"""Called when we accept and incomming connection
"""
sock, addr = self.accept()
self.logger.info('Incomming connection from {0}'.format(repr(addr)))
self.handler(sock, self)
def handle_close(self):
"""Called when close
"""
logging.info('Closing the socket, server will be shutdown now...')
self.close()
def get_logger(path):
"""Build file logger
"""
if not os.path.exists(path):
os.makedirs(path)
log = logging.getLogger('')
log.setLevel(logging.DEBUG)
hdlr = handlers.RotatingFileHandler(
filename=os.path.join(path, 'anaconda_jsonserver.log'),
maxBytes=10000000,
backupCount=5,
encoding='utf-8'
)
formatter = logging.Formatter('%(asctime)s: %(levelname)-8s: %(message)s')
hdlr.setFormatter(formatter)
log.addHandler(hdlr)
return log
def log_traceback():
"""Just log the traceback
"""
logging.error(get_log_traceback())
def get_log_traceback():
"""Get the traceback log msg
"""
error = []
for traceback_line in traceback.format_exc().splitlines():
error.append(traceback_line)
return '\n'.join(error)
if __name__ == "__main__":
opt_parser = OptionParser(usage=(
'usage: %prog -p <project> -e <extra_paths> port'
))
opt_parser.add_option(
'-p', '--project', action='store', dest='project', help='project name'
)
opt_parser.add_option(
'-e', '--extra_paths', action='store', dest='extra_paths',
help='extra paths (separed by comma) that should be added to sys.paths'
)
options, args = opt_parser.parse_args()
if len(args) != 1:
opt_parser.error('you have to pass a port number')
port = int(args[0])
if options.project is not None:
jedi_settings.cache_directory = os.path.join(
jedi_settings.cache_directory, options.project
)
log_directory = os.path.join(log_directory, options.project)
if not os.path.exists(jedi_settings.cache_directory):
os.makedirs(jedi_settings.cache_directory)
if options.extra_paths is not None:
for path in options.extra_paths.split(','):
if path not in sys.path:
sys.path.insert(0, path)
logger = get_logger(log_directory)
try:
server = JSONServer(('0.0.0.0', port))
logger.info(
'Anaconda Server started in port {0} with cache dir {1}{2}'.format(
port, jedi_settings.cache_directory,
' and extra paths {0}'.format(
options.extra_paths
) if options.extra_paths is not None else ''
)
)
except Exception as error:
log_traceback()
logger.error(error)
sys.exit(-1)
server.logger = logger
# start the server
server.serve_forever()
| 7,096 | Python | .py | 194 | 28.675258 | 79 | 0.619173 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,018 | autoreload.py | DamnWidget_anaconda/anaconda_server/autoreload.py | #!/usr/bin/env python
#
# Autoreloader for jsonserver for development.
# Run with:
# python3 autoreload.py python3 jsonserver.py -p<project_name> 9999 DEBUG
#
import os
import sys
import subprocess
import time
from pathlib import Path
def file_filter(path):
return (not path.name.startswith(".")) and (path.suffix not in (".swp",) )
def file_times(path):
absolute_path = path.resolve()
for file in filter(file_filter, absolute_path.iterdir()):
if file.is_dir():
for x in file_times(file):
yield x
else:
yield os.path.getctime(file)
def print_stdout(process):
stdout = process.stdout
if stdout != None:
print(stdout)
stderr = process.stderr
if stderr != None:
print(stderr)
# We concatenate all of the arguments together, and treat that as the command to run
command = " ".join(sys.argv[1:])
# The path to watch
path = Path("..")
# How often we check the filesystem for changes (in seconds)
wait = 1
# The process to autoreload
print("Started: ", command)
process = subprocess.Popen(command, shell=True)
# The current maximum file modified time under the watched directory
last_mtime = max(file_times(path))
while True:
max_mtime = max(file_times(path))
print_stdout(process)
if max_mtime > last_mtime:
last_mtime = max_mtime
print("Restarting process.")
process.kill()
process = subprocess.Popen(command, shell=True)
time.sleep(wait)
| 1,496 | Python | .py | 48 | 26.791667 | 84 | 0.691771 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,019 | process.py | DamnWidget_anaconda/anaconda_server/process.py |
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
import logging
import subprocess
def spawn(args, **kwargs):
"""Spawn a subprocess and return it back
"""
if 'cwd' not in kwargs:
kwargs['cwd'] = os.path.dirname(os.path.abspath(__file__))
kwargs['bufsize'] = -1
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
kwargs['startupinfo'] = startupinfo
try:
return subprocess.Popen(args, **kwargs)
except Exception as error:
msg = (
'Your operating system denied the spawn of {0} process: {1}'
).format(args[0], error)
logging.error(msg)
raise RuntimeError(msg)
| 826 | Python | .py | 23 | 29.73913 | 72 | 0.657035 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,020 | __init__.py | DamnWidget_anaconda/anaconda_server/__init__.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
| 128 | Python | .py | 2 | 62.5 | 65 | 0.776 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,021 | jsonserver.py | DamnWidget_anaconda/anaconda_server/jsonserver.py | #!/usr/bin/env python
# -*- coding: utf8 -*-
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
import sys
import time
import socket
import logging
import platform
import asyncore
import asynchat
import threading
import traceback
import subprocess
from logging import handlers
from optparse import OptionParser
from os import chmod
from os.path import dirname, join, abspath
from operator import xor
# we use ujson if it's available on the target interpreter
try:
import ujson as json
except ImportError:
import json
PROJECT_ROOT = dirname(dirname(abspath(__file__)))
sys.path.insert(0, join(PROJECT_ROOT, 'anaconda_lib'))
from lib.path import log_directory
from jedi import set_debug_function
from lib.contexts import json_decode
from unix_socket import UnixSocketPath, get_current_umask
from handlers import ANACONDA_HANDLERS
from jedi import settings as jedi_settings
from lib.anaconda_handler import AnacondaHandler
DEBUG_MODE = False
logger = logging.getLogger('')
PY3 = True if sys.version_info >= (3,) else False
class JSONHandler(asynchat.async_chat):
"""Hadnles JSON messages from a client
"""
def __init__(self, sock, server):
self.server = server
self.rbuffer = []
asynchat.async_chat.__init__(self, sock)
self.set_terminator(b"\r\n" if PY3 else "\r\n")
def return_back(self, data):
"""Send data back to the client
"""
if data is not None:
print(data)
data = '{0}\r\n'.format(json.dumps(data))
data = bytes(data, 'utf8') if PY3 else data
if DEBUG_MODE is True:
print('About push back to ST3: {0}'.format(data))
logging.info('About push back to ST3: {0}'.format(data))
self.push(data)
def collect_incoming_data(self, data):
"""Called when data is ready to be read
"""
self.rbuffer.append(data)
def found_terminator(self):
"""Called when the terminator is found in the buffer
"""
message = b''.join(self.rbuffer) if PY3 else ''.join(self.rbuffer)
self.rbuffer = []
with json_decode(message) as data:
if not data:
logging.info('No data received in the handler')
return
if data['method'] == 'check':
logging.info('Check received')
self.return_back(message='Ok', uid=data['uid'])
return
self.server.last_call = time.time()
if isinstance(data, dict):
logging.info(
'client requests: {0}'.format(data['method'])
)
method = data.pop('method')
uid = data.pop('uid')
vid = data.pop('vid', None)
settings = data.pop('settings', {})
handler_type = data.pop('handler')
if DEBUG_MODE is True:
print('Received method: {0}, handler: {1}'.format(
method, handler_type)
)
try:
self.handle_command(
handler_type, method, uid, vid, settings, data,
)
except Exception as error:
logging.error(error)
log_traceback()
self.return_back({
'success': False, 'uid': uid,
'vid': vid, 'error': str(error)
})
else:
logging.error(
'client sent somethinf that I don\'t understand: {0}'.format(
data
)
)
def handle_command(self, handler_type, method, uid, vid, settings, data):
"""Call the right commands handler
"""
# lazy initialization of anaconda plugins
if not AnacondaHandler._registry.initialized:
AnacondaHandler._registry.initialize()
handler = ANACONDA_HANDLERS.get(
handler_type, AnacondaHandler.get_handler(handler_type))
if DEBUG_MODE is True:
print('{0} handler retrieved from registry'.format(handler))
handler(
method, data, uid, vid, settings, self.return_back, DEBUG_MODE,
).run()
class JSONServer(asyncore.dispatcher):
"""Asynchronous standard library TCP JSON server
"""
allow_reuse_address = False
request_queue_size = 5
if platform.system().lower() != 'linux':
address_family = socket.AF_INET
else:
address_family = socket.AF_UNIX
socket_type = socket.SOCK_STREAM
def __init__(self, address, handler=JSONHandler):
self.address = address
self.handler = handler
asyncore.dispatcher.__init__(self)
self.create_socket(self.address_family, self.socket_type)
self.last_call = time.time()
self.bind(self.address)
if hasattr(socket, 'AF_UNIX') and \
self.address_family == socket.AF_UNIX:
# WSL 1903 fix
chmod(self.address, xor(0o777, get_current_umask()))
logging.debug('bind: address=%s' % (address,))
self.listen(self.request_queue_size)
logging.debug('listen: backlog=%d' % (self.request_queue_size,))
@property
def fileno(self):
return self.socket.fileno()
def serve_forever(self):
asyncore.loop()
def shutdown(self):
self.handle_close()
def handle_accept(self):
"""Called when we accept and incoming connection
"""
sock, addr = self.accept()
self.logger.info('Incoming connection from {0}'.format(
repr(addr) or 'unix socket')
)
self.handler(sock, self)
def handle_close(self):
"""Called when close
"""
logging.info('Closing the socket, server will be shutdown now...')
self.close()
class Checker(threading.Thread):
"""Check that the ST3 PID already exists every delta seconds
"""
MAX_INACTIVITY = 1800 # 30 minutes in seconds
def __init__(self, server, pid, delta=5):
threading.Thread.__init__(self)
self.server = server
self.delta = delta
self.daemon = True
self.die = False
self.pid = int(pid)
def run(self):
while not self.die:
if time.time() - self.server.last_call > self.MAX_INACTIVITY:
# is now more than 30 minutes of inactivity
self.server.logger.info(
'detected inactivity for more than 30 minutes... '
'shuting down...'
)
break
self._check()
if not self.die:
time.sleep(self.delta)
self.server.shutdown()
if os.name == 'nt':
def _isprocessrunning(self, timeout=MAX_INACTIVITY * 1000):
"""Blocking until process has exited or timeout is reached.
"""
import ctypes
kernel32 = ctypes.windll.kernel32
SYNCHRONIZE = 0x00100000
WAIT_TIMEOUT = 0x00000102
hprocess = kernel32.OpenProcess(SYNCHRONIZE, False, self.pid)
if hprocess == 0:
return False
ret = kernel32.WaitForSingleObject(hprocess, timeout)
kernel32.CloseHandle(hprocess)
return ret == WAIT_TIMEOUT
else:
def _isprocessrunning(self):
"""Returning immediately whether process is running.
"""
try:
os.kill(self.pid, 0)
except OSError:
return False
return True
def _check(self):
"""Check for the ST3 pid
"""
if not self._isprocessrunning():
self.server.logger.info(
'process {0} does not exists stopping server...'.format(
self.pid
)
)
self.die = True
def get_logger(path):
"""Build file logger
"""
if not os.path.exists(path):
os.makedirs(path)
log = logging.getLogger('')
log.setLevel(logging.DEBUG)
hdlr = handlers.RotatingFileHandler(
filename=join(path, 'anaconda_jsonserver.log'),
maxBytes=10000000,
backupCount=5,
encoding='utf-8'
)
formatter = logging.Formatter('%(asctime)s: %(levelname)-8s: %(message)s')
hdlr.setFormatter(formatter)
log.addHandler(hdlr)
return log
def log_traceback():
"""Just log the traceback
"""
logging.error(traceback.format_exc())
if __name__ == "__main__":
WINDOWS = os.name == 'nt'
LINUX = platform.system().lower() == 'linux'
opt_parser = OptionParser(usage=(
'usage: %prog -p <project> -e <extra_paths> port'
)) if WINDOWS else OptionParser(usage=(
"usage: %prog -p <project> -e <extra_paths> ST3_PID")
)
opt_parser.add_option(
'-p', '--project', action='store', dest='project', help='project name'
)
opt_parser.add_option(
'-e', '--extra_paths', action='store', dest='extra_paths',
help='extra paths (separed by comma) that should be added to sys.paths'
)
options, args = opt_parser.parse_args()
port, PID = None, None
if not LINUX:
if len(args) != 2:
opt_parser.error('you have to pass a port number and PID')
port = int(args[0])
PID = args[1]
else:
if len(args) != 1:
opt_parser.error('you have to pass a Sublime Text 3 PID')
PID = args[0]
if options.project is not None:
jedi_settings.cache_directory = join(
jedi_settings.cache_directory, options.project
)
log_directory = join(log_directory, options.project)
if not os.path.exists(jedi_settings.cache_directory):
os.makedirs(jedi_settings.cache_directory)
if options.extra_paths is not None:
for path in options.extra_paths.split(','):
if path not in sys.path:
sys.path.insert(0, path)
logger = get_logger(log_directory)
try:
server = None
if not LINUX:
server = JSONServer(('localhost', port))
else:
unix_socket_path = UnixSocketPath(options.project)
if not os.path.exists(dirname(unix_socket_path.socket)):
os.makedirs(dirname(unix_socket_path.socket))
if os.path.exists(unix_socket_path.socket):
os.unlink(unix_socket_path.socket)
server = JSONServer(unix_socket_path.socket)
logger.info(
'Anaconda Server started in {0} for '
'PID {1} with cache dir {2}{3}'.format(
port or unix_socket_path.socket, PID,
jedi_settings.cache_directory,
' and extra paths {0}'.format(
options.extra_paths
) if options.extra_paths is not None else ''
)
)
except Exception as error:
log_traceback()
logger.error(str(error))
if server is not None:
server.shutdown()
sys.exit(-1)
server.logger = logger
# start PID checker thread
if PID != 'DEBUG':
checker = Checker(server, pid=PID, delta=1)
checker.start()
else:
logger.info('Anaconda Server started in DEBUG mode...')
print('DEBUG MODE')
DEBUG_MODE = True
set_debug_function(notices=True)
# start the server
server.serve_forever()
| 11,544 | Python | .py | 316 | 27.183544 | 79 | 0.588847 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,022 | anaconda_handler.py | DamnWidget_anaconda/anaconda_server/lib/anaconda_handler.py |
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import inspect
from .compat import AnacondaHandlerProvider
class AnacondaHandler(AnacondaHandlerProvider):
"""All anaconda handlers should inherit from this class
The constructor method pass a command (that is a string representation
of the command to invoque) and we call the super class method
`_register_command` so super classes of this class *must* implement
that method.
If you need to overrie the constructor in an specific handler, make sure
that you call the base class constructor with:
super(HandlerName, self).__init__(command, data, uid)
"""
def __init__(self, command, data, uid, vid, settings, callback, debug=False):
self.uid = uid
self.vid = vid
self.data = data
self.debug = debug
self.callback = callback
self.command = command
self.settings = settings
def run(self):
"""Call the specific method
"""
command = getattr(self, self.command)
try:
func_code = command.func_code
except AttributeError:
# Renamed in Python 3
func_code = command.__code__
# Loop through self.data, pulling out the parameters specified in the command
kwargs = {}
for argument, value in self.data.items():
if argument in inspect.getargs(func_code).args:
kwargs[argument] = value
command(**kwargs)
@classmethod
def get_handler(cls, handler_type):
"""Return the given handler type if registered
"""
return cls._registry.get(handler_type)
| 1,740 | Python | .py | 42 | 33.642857 | 85 | 0.663501 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,023 | path.py | DamnWidget_anaconda/anaconda_server/lib/path.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
import platform
logpath = {
'linux': os.path.join('~', '.local', 'share', 'anaconda', 'logs'),
'darwin': os.path.join('~', 'Library', 'Logs', 'anaconda'),
'windows': os.path.join(os.getenv('APPDATA') or '~', 'Anaconda', 'Logs')
}
log_directory = os.path.expanduser(
logpath.get(platform.system().lower())
)
| 464 | Python | .py | 12 | 36 | 76 | 0.671875 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,024 | meta_handler.py | DamnWidget_anaconda/anaconda_server/lib/meta_handler.py |
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from .registry import HandlerRegistry
class AnacondaHandlerMeta(type):
"""Register new anaconda handlers
"""
def __init__(cls, name, bases, attrs):
if not hasattr(cls, '_registry'):
cls._registry = HandlerRegistry()
if hasattr(cls, '__handler_type__'):
cls._registry.register(cls)
| 466 | Python | .py | 11 | 36.090909 | 65 | 0.674833 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,025 | __init__.py | DamnWidget_anaconda/anaconda_server/lib/__init__.py |
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
| 128 | Python | .py | 2 | 62.5 | 65 | 0.776 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,026 | contexts.py | DamnWidget_anaconda/anaconda_server/lib/contexts.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
"""Anaconda JsonServer contexts
"""
import sys
import json
from contextlib import contextmanager
@contextmanager
def json_decode(data):
PY26 = False
data = data.replace(b'\t', b'\\t')
if sys.version_info < (2, 6, 5):
PY26 = True
fixed_keys = {}
try:
if PY26:
for k, v in json.loads(data.decode('utf8')).iteritems():
fixed_keys[str(k)] = v
yield fixed_keys
else:
yield json.loads(data.decode('utf8'))
except ValueError:
try:
yield eval(data)
except Exception:
yield str(data.decode('utf8'))
| 766 | Python | .py | 26 | 22.730769 | 68 | 0.609524 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,027 | registry.py | DamnWidget_anaconda/anaconda_server/lib/registry.py |
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
import sys
import logging
class HandlerRegistry(object):
"""Register anaconda JsonServer handlers
"""
initialized = False
def __init__(self):
self._handlers = {}
def initialize(self):
"""Load handlers from anaconda installed plugins
"""
if self.initialized:
return
self._import_plugin_handlers()
self.initialized = True
def get(self, handler_type):
"""Retrieve the given handler type or none
"""
return self._handlers.get(handler_type)
def register(self, handler):
"""Register a new handler
"""
self._handlers[handler.__handler_type__] = handler
def _import_plugin_handlers(self):
"""Import hadnlers from anaconda plugins
"""
path = os.path.join(os.path.dirname(__file__), '../../../')
packages = [
os.path.join(path, f) for f in os.listdir(path)
if f.startswith('anaconda_')
]
for package in packages:
if 'vagrant' in package or not os.path.isdir(package):
continue
lang = package.rsplit('anaconda_', 1)[1]
sys.path.append('{}/plugin'.format(package))
mod_name = 'handlers_{}'.format(lang)
mod = __import__(mod_name, globals(), locals())
logging.info(
'[anaconda_plugins] imported handlers for {}'.format(mod)
)
| 1,594 | Python | .py | 44 | 27.409091 | 73 | 0.588657 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,028 | __init__.py | DamnWidget_anaconda/anaconda_server/lib/compat/__init__.py |
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import sys
if sys.version_info >= (3, 0):
from .python3 import AnacondaHandlerProvider
else:
from python2 import AnacondaHandlerProvider
__all__ = ['AnacondaHandlerProvider']
| 315 | Python | .py | 8 | 36.75 | 65 | 0.774834 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,029 | python2.py | DamnWidget_anaconda/anaconda_server/lib/compat/python2.py |
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from ..meta_handler import AnacondaHandlerMeta
class AnacondaHandlerProvider:
"""Just a convenience wrapper
"""
__metaclass__ = AnacondaHandlerMeta
| 292 | Python | .py | 7 | 38.285714 | 65 | 0.771429 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,030 | python3.py | DamnWidget_anaconda/anaconda_server/lib/compat/python3.py |
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from ..meta_handler import AnacondaHandlerMeta
class AnacondaHandlerProvider(metaclass=AnacondaHandlerMeta):
"""Just a convenience wrapper
"""
| 282 | Python | .py | 6 | 44 | 65 | 0.794118 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,031 | jedi_handler.py | DamnWidget_anaconda/anaconda_server/handlers/jedi_handler.py | # -*- coding: utf8 -*-
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import jedi
from lib.anaconda_handler import AnacondaHandler
from jedi.api import refactoring as jedi_refactor
from commands import Doc, Goto, GotoAssignment, Rename, FindUsages
from commands import CompleteParameters, AutoComplete
logger = logging.getLogger('')
class JediHandler(AnacondaHandler):
"""Handle requests to execute Jedi related commands to the JsonServer
The JsonServer instantiate an object of this class passing the method
to execute as it came from the Sublime Text 3 Anaconda plugin
"""
def run(self):
"""Call the specific method (override base class)"""
self.real_callback = self.callback
self.callback = self.handle_result_and_purge_cache
super(JediHandler, self).run()
def handle_result_and_purge_cache(self, result):
"""Handle the result from the call and purge in memory jedi cache"""
try:
jedi.cache.clear_time_caches()
except:
jedi.cache.clear_caches()
self.real_callback(result)
@property
def script(self):
"""Generates a new valid Jedi Script and return it back"""
return self.jedi_script(**self.data)
def jedi_script(
self, source, line, offset, filename='', encoding='utf8', **kw
):
"""Generate an usable Jedi Script"""
jedi_project = jedi.get_default_project(filename)
return jedi.Script(source, project=jedi_project)
def rename(self, directories, new_word):
"""Rename the object under the cursor by the given word"""
Rename(
self.callback,
self.uid,
self.script,
directories,
new_word,
jedi_refactor,
)
def autocomplete(self):
"""Call autocomplete"""
AutoComplete(
self.callback,
self.data.get("line", 1),
self.data.get("offset", 0),
self.uid,
self.script,
)
def parameters(self):
"""Call complete parameter"""
CompleteParameters(
self.callback,
self.data.get("line", 1),
self.data.get("offset", 0),
self.uid,
self.script,
self.settings,
)
def usages(self):
"""Call find usages"""
FindUsages(
self.callback,
self.data.get("line", 1),
self.data.get("offset", 0),
self.uid,
self.script,
)
def goto(self):
"""Call goto"""
Goto(
self.callback,
self.data.get("line", 1),
self.data.get("offset", 0),
self.uid,
self.script,
)
def goto_assignment(self):
"""Call goto_assignment"""
GotoAssignment(
self.callback,
self.data.get("line", 1),
self.data.get("offset", 0),
self.uid,
self.script,
)
def doc(self, html=False):
"""Call doc"""
Doc(
self.callback,
self.data.get("line", 1),
self.data.get("offset", 0),
self.uid,
self.script,
html,
)
| 3,381 | Python | .py | 103 | 23.524272 | 76 | 0.575823 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,032 | qa_handler.py | DamnWidget_anaconda/anaconda_server/handlers/qa_handler.py | # -*- coding: utf8 -*-
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from commands import McCabe
from lib.anaconda_handler import AnacondaHandler
from linting.anaconda_mccabe import AnacondaMcCabe
class QAHandler(AnacondaHandler):
"""Handle request to execute quality assurance commands form the JsonServer
"""
def mccabe(self, code, threshold, filename):
"""Return the McCabe code complexity errors
"""
McCabe(
self.callback, self.uid, self.vid, AnacondaMcCabe,
code, threshold, filename
)
| 646 | Python | .py | 16 | 34.75 | 79 | 0.716346 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,033 | python_lint_handler.py | DamnWidget_anaconda/anaconda_server/handlers/python_lint_handler.py | # -*- coding: utf8 -*-
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
import sys
from functools import partial
sys.path.append(os.path.join(os.path.dirname(__file__), '../../anaconda_lib'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from import_validator import Validator
from linting.anaconda_pep8 import Pep8Linter
from lib.anaconda_handler import AnacondaHandler
from linting.anaconda_pyflakes import PyFlakesLinter
from linting.anaconda_mypy import MyPy as AnacondaMyPy
from linting.anaconda_pep257 import PEP257 as AnacondaPep257
from commands import PyFlakes, PEP257, PEP8, PyLint, ImportValidator, MyPy
try:
from linting.anaconda_pylint import PyLinter
from linting.anaconda_pylint import numversion
PYLINT_AVAILABLE = True
except ImportError:
PYLINT_AVAILABLE = False
class PythonLintHandler(AnacondaHandler):
"""Handle request to execute Python linting commands form the JsonServer"""
def __init__(
self, command, data, uid, vid, settings, callback, debug=False
):
self.uid = uid
self.vid = vid
self.data = data
self.debug = debug
self.callback = callback
self.command = command
self.settings = settings
self._linters = {
'pyflakes': False,
'pylint': False,
'pep8': False,
'pep257': False,
'import_validator': False,
}
self._errors = []
self._failures = []
def lint(self, code=None, filename=None):
"""This is called from the JsonServer"""
self._configure_linters()
for linter_name, expected in self._linters.items():
if expected is True:
func = getattr(self, linter_name)
func(code, filename)
if len(self._errors) == 0 and len(self._failures) > 0:
self.callback(
{
'success': False,
'errors': '. '.join([str(e) for e in self._failures]),
'uid': self.uid,
'vid': self.vid,
}
)
return
self.callback(
{
'success': True,
'errors': self._errors,
'uid': self.uid,
'vid': self.vid,
}
)
def pyflakes(self, code=None, filename=None):
"""Run the PyFlakes linter"""
lint = PyFlakesLinter
PyFlakes(
self._merge,
self.uid,
self.vid,
lint,
self.settings,
code,
filename,
)
def pep8(self, code=None, filename=None):
"""Run the pep8 linter"""
lint = Pep8Linter
PEP8(
self._merge,
self.uid,
self.vid,
lint,
self.settings,
code,
filename,
)
def pep257(self, code=None, filename=None):
"""Run the pep257 linter"""
lint = AnacondaPep257
ignore = self.settings.get('pep257_ignore')
PEP257(self._merge, self.uid, self.vid, lint, ignore, code, filename)
def pylint(self, code=None, filename=None):
"""Run the pyling linter"""
if not PYLINT_AVAILABLE:
errors = 'Your configured python interpreter can\'t import pylint'
self._failures.append(errors)
return
rcfile = self.settings.get('pylint_rcfile', False)
if numversion < (2, 4, 4):
PyLint(
partial(self._normalize, self.settings),
self.uid,
self.vid,
PyLinter,
rcfile,
filename,
)
else:
PyLint(
self._normalize, self.uid, self.vid, PyLinter, rcfile, filename
)
def import_validator(self, code, filename=None):
"""Run the import validator linter"""
lint = Validator
ImportValidator(
self._merge,
self.uid,
self.vid,
lint,
code,
filename,
self.settings,
)
def mypy(self, code=None, filename=None):
"""Run the mypy linter"""
lint = AnacondaMyPy
MyPy(
self._merge,
self.uid,
self.vid,
lint,
code,
filename,
self.mypypath,
self.settings,
)
def _normalize(self, data):
"""Normalize pylint data before to merge"""
normalized_errors = []
for error_level, error_data in data.get('errors', {}).items():
pylint_ignores = self.settings.get('pylint_ignore', [])
pylint_rcfile = self.settings.get('pylint_rcfile')
for error in error_data:
try:
if error['code'] in pylint_ignores and not pylint_rcfile:
continue
except TypeError:
print(
'Anaconda: pylint_ignore option must be a list of '
'strings but we got a {} '.format(type(pylint_ignores))
)
normalized_error = {
'underline_range': True,
'level': error_level,
'message': error['message'],
'offset': int(error.get('offset', 0)),
'lineno': int(error['line']),
}
normalized_errors.append(normalized_error)
if data.get('errors') is not None:
data['errors'] = normalized_errors
self._merge(data)
def _configure_linters(self):
"""Enable or disable linters"""
self._linters['pyflakes'] = self.settings.get('use_pyflakes', True)
self._linters['pylint'] = self.settings.get('use_pylint', False)
self._linters['pep257'] = self.settings.get('use_pep257', False)
self._linters['mypy'] = self.settings.get('use_mypy', False)
self._linters['pep8'] = self.settings.get('pep8', True)
self._linters['import_validator'] = self.settings.get(
'validate_imports', False
)
# disable pyflakes if pylint is in use
if self._linters['pylint'] is True:
self._linters['pyflakes'] = False
if self._linters['mypy']:
self.mypypath = self.settings.get('mypypath')
def _merge(self, lint_result):
"""Merge the given linter results"""
if lint_result['success'] is True:
self._errors += lint_result['errors']
else:
self._failures.append(lint_result['error'])
| 6,816 | Python | .py | 188 | 25.12766 | 79 | 0.544144 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,034 | __init__.py | DamnWidget_anaconda/anaconda_server/handlers/__init__.py | # Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../../anaconda_lib'))
from .qa_handler import QAHandler
from .jedi_handler import JediHandler
from .autoformat_handler import AutoFormatHandler
from .python_lint_handler import PythonLintHandler
ANACONDA_HANDLERS = {
'qa': QAHandler,
'jedi': JediHandler,
'autoformat': AutoFormatHandler,
'python_linter': PythonLintHandler
}
__all__ = [
'QAHandler', 'JediHandler', 'AutoFormatHandler', 'PythonLintHandler',
'ANACONDA_HANDLERS'
]
| 663 | Python | .py | 19 | 32.368421 | 78 | 0.757433 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,035 | autoformat_handler.py | DamnWidget_anaconda/anaconda_server/handlers/autoformat_handler.py | # -*- coding: utf8 -*-
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from commands import AutoPep8
from lib.anaconda_handler import AnacondaHandler
class AutoFormatHandler(AnacondaHandler):
"""Handle request to execute auto format commands form the JsonServer
"""
def pep8(self, code):
"""Run PEP8 auto format command
"""
AutoPep8(self.callback, self.uid, self.vid, code, self.settings)
| 510 | Python | .py | 12 | 38 | 73 | 0.729675 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,036 | autocomplete.py | DamnWidget_anaconda/anaconda_server/commands/autocomplete.py | # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import traceback
from .base import Command
DEBUG_MODE = False
class AutoComplete(Command):
"""Return Jedi completions"""
def __init__(self, callback, line, col, uid, script):
self.script = script
self.line = line
self.col = col
super(AutoComplete, self).__init__(callback, uid)
def run(self):
"""Run the command"""
try:
completions = self.script.complete(line=self.line, column=self.col)
if DEBUG_MODE is True:
logging.info(completions)
data = [
('{0}\t{1}'.format(comp.name, comp.type), comp.name)
for comp in completions
]
self.callback(
{'success': True, 'completions': data, 'uid': self.uid}
)
except Exception as error:
msg = 'The underlying Jedi library as raised an exception'
logging.error(msg)
logging.error(error)
print(traceback.format_exc())
if DEBUG_MODE:
logging.debug(traceback.format_exc())
self.callback(
{'success': False, 'error': str(error), 'uid': self.uid}
)
| 1,352 | Python | .py | 36 | 27.416667 | 79 | 0.573068 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,037 | autoformat.py | DamnWidget_anaconda/anaconda_server/commands/autoformat.py | # -*- coding: utf8 -*-
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
"""
This file is a wrapper for autopep8 library.
"""
import sys
import logging
import traceback
from .base import Command
from autopep.autopep8_lib import autopep8
class AutoPep8(Command):
"""Run autopep8 in the given file
"""
def __init__(self, callback, uid, vid, code, settings):
self.vid = vid
self.code = code
self.options = autopep8.parse_args(self.parse_settings(settings))
super(AutoPep8, self).__init__(callback, uid)
def run(self):
"""Run the command
"""
if sys.version_info < (3, 0):
self.code = unicode(self.code)
try:
self.callback({
'success': True,
'buffer': autopep8.fix_lines(
self.code.splitlines(), options=self.options),
'uid': self.uid,
'vid': self.vid
})
except Exception as error:
logging.error(str(error))
print(traceback.format_exc().splitlines())
logging.debug(traceback.format_exc().splitlines())
self.callback({
'success': False,
'error': str(error),
'uid': self.uid,
'vid': self.vid
})
def parse_settings(self, settings):
"""Map anaconda settings to autopep8 settings
"""
args = []
args += ['-a'] * settings.get('aggressive', 0)
if len(settings.get('autoformat_ignore', [])) > 0:
args += ['--ignore={0}'.format(
','.join(settings.get('autoformat_ignore')))]
if len(settings.get('autoformat_select', [])) > 0:
args += ['--select={0}'.format(
','.join(settings.get('autoformat_select')))]
args += ['--max-line-length={0}'.format(
settings.get('pep8_max_line_length', 79))]
args += ['--indent-size={0}'.format(
settings.get('tab_size', 4))]
args += ['anaconda_rocks']
return args
| 2,167 | Python | .py | 59 | 27.067797 | 73 | 0.547062 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,038 | lint.py | DamnWidget_anaconda/anaconda_server/commands/lint.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import traceback
from .base import Command
class Lint(Command):
"""Run PyFlakes and Pep8 linters and return back results
"""
def __init__(self, callback, uid, vid, linter, settings, code, filename):
self.vid = vid
self.code = code
self.linter = linter
self.settings = settings
self.filename = filename
super(Lint, self).__init__(callback, uid)
def run(self):
"""Run the command
"""
try:
self.callback({
'success': True,
'errors': self.linter.Linter().run_linter(
self.settings, self.code, self.filename),
'uid': self.uid,
'vid': self.vid
})
except Exception as error:
logging.error(error)
logging.debug(traceback.format_exc().splitlines())
self.callback({
'success': False,
'error': error,
'uid': self.uid,
'vid': self.vid
})
| 1,192 | Python | .py | 35 | 23.8 | 77 | 0.547433 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,039 | pep257.py | DamnWidget_anaconda/anaconda_server/commands/pep257.py |
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import traceback
from .base import Command
class PEP257(Command):
"""Run pep257 linter and return back results
"""
def __init__(self, callback, uid, vid, linter, ignore, code, filename):
self.vid = vid
self.code = code
self.filename = filename
self.ignore = ignore
self.linter = linter
super(PEP257, self).__init__(callback, uid)
def run(self):
"""Run the command
"""
try:
self.callback({
'success': True,
'errors': self.linter(
self.code, self.filename, self.ignore).execute(),
'uid': self.uid,
'vid': self.vid,
})
except Exception as error:
logging.error(error)
logging.debug(traceback.format_exc().splitlines())
self.callback({
'success': False,
'error': error,
'uid': self.uid,
'vid': self.vid
})
| 1,167 | Python | .py | 35 | 23.085714 | 75 | 0.540036 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,040 | complete_parameters.py | DamnWidget_anaconda/anaconda_server/commands/complete_parameters.py | # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from .base import Command, get_function_parameters
class CompleteParameters(Command):
"""Get back a python definition where to go"""
def __init__(self, callback, line, col, uid, script, settings):
self.script = script
self.line = line
self.col = col
self.settings = settings
super(CompleteParameters, self).__init__(callback, uid)
def run(self):
"""Run the command"""
completions = []
complete_all = self.settings.get('complete_all_parameters', False)
try:
signatures = self.script.get_signatures(
line=self.line, column=self.col
)[0]
except IndexError:
signatures = None
params = get_function_parameters(signatures)
for i, p in enumerate(params):
try:
name, value = p
except ValueError:
name = p[0]
value = None
name = name.replace('param ', '')
if value is None:
completions.append('${%d:%s}' % (i + 1, name))
else:
if complete_all is True:
completions.append('%s=${%d:%s}' % (name, i + 1, value))
self.callback(
{
'success': True,
'template': ', '.join(completions),
'uid': self.uid,
}
)
| 1,537 | Python | .py | 41 | 26.268293 | 76 | 0.53468 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,041 | goto.py | DamnWidget_anaconda/anaconda_server/commands/goto.py | # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from .base import Command
class Goto(Command):
"""Get back a python definition where to go"""
def __init__(self, callback, line, col, uid, script):
self.script = script
self.line = line
self.col = col
super(Goto, self).__init__(callback, uid)
def _get_definitions(self):
definitions = self.script.goto(
line=self.line, column=self.col, follow_imports=True
)
if all(d.type == 'import' for d in definitions):
definitions = self.script.goto_definitions()
return definitions
def run(self):
"""Run the command"""
try:
definitions = self._get_definitions()
except:
data = []
success = False
else:
# we use a set here to avoid duplication
data = set(
[
(i.full_name, i.module_path, i.line, i.column + 1)
for i in definitions
if not i.in_builtin_module()
]
)
success = True
self.callback(
{'success': success, 'result': list(data), 'uid': self.uid}
)
class GotoAssignment(Goto):
"""Get back a python assignment where to go"""
def _get_definitions(self):
return self.script.goto_assignments()
| 1,479 | Python | .py | 41 | 26.097561 | 71 | 0.561711 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,042 | __init__.py | DamnWidget_anaconda/anaconda_server/commands/__init__.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from .doc import Doc
from .mypy import MyPy
from .lint import Lint
from .goto import Goto, GotoAssignment
from .pep8 import PEP8
from .pep257 import PEP257
from .mccabe import McCabe
from .rename import Rename
from .pylint import PyLint
from .pyflakes import PyFlakes
from .autoformat import AutoPep8
from .find_usages import FindUsages
from .autocomplete import AutoComplete
from .import_validator import ImportValidator
from .complete_parameters import CompleteParameters
__all__ = [
'Doc',
'MyPy',
'Lint',
'Goto',
'GotoAssignment',
'PEP8',
'PEP257',
'McCabe',
'Rename',
'PyLint',
'PyFlakes',
'AutoPep8',
'FindUsages',
'AutoComplete',
'ImportValidator',
'CompleteParameters'
]
| 874 | Python | .py | 35 | 22.028571 | 65 | 0.74491 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,043 | base.py | DamnWidget_anaconda/anaconda_server/commands/base.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
class Command(object):
"""Base class for every command that runs on Json Server
"""
def __init__(self, callback, uid):
self.uid = uid
self.callback = callback
self.run()
def get_function_parameters(call_def):
"""
Return list function parameters, prepared for sublime completion.
Tuple contains parameter name and default value
"""
if not call_def:
return []
params = []
for param in call_def.params:
cleaned_param = param.description
if '*' in cleaned_param or cleaned_param == 'self':
continue
params.append([s.strip() for s in cleaned_param.split('=')])
return params
| 820 | Python | .py | 23 | 29.304348 | 69 | 0.656489 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,044 | doc.py | DamnWidget_anaconda/anaconda_server/commands/doc.py | # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import sys
import logging
from .base import Command
# We are forced to use this not Pythonic import approach as the incomplete
# module `future.moves.html` distributed by https://github.com/PythonCharmers
# breaks the doc.py logic if it is present in the user sysrem as it contains
# just the `escape` method but not the `unescape` one so even if it get
# imported, this command just crashes and forces a JsonServer new instance
if sys.version_info >= (3, 0):
import html
if sys.version_info < (3, 4):
import html as cgi
from html.parser import HTMLParser
else:
# python2 uses cgi
import cgi
from HTMLParser import HTMLParser
class Doc(Command):
"""Get back a python definition where to go"""
def __init__(self, callback, line, col, uid, script, html):
self.script = script
self.html = html
self.line = line
self.col = col
super(Doc, self).__init__(callback, uid)
def run(self):
"""Run the command"""
processed = []
try:
definitions = self.script.infer(line=self.line, column=self.col)
except Exception as error:
logging.debug(error)
logging.debug(self.script)
definitions = []
if not definitions:
success = False
docs = []
else:
docs = []
success = True
for definition in definitions:
if definition not in processed:
docs.append(
self._plain(definition)
if not self.html
else self._html(definition)
)
processed.append(definition)
self.callback(
{
'success': success,
'doc': (
'<br><br>' if self.html else '\n' + '-' * 79 + '\n'
).join(docs),
'uid': self.uid,
}
)
def _plain(sef, definition):
"""Generate a documentation string for use as plain text"""
return 'Docstring for {0}\n{1}\n{2}'.format(
definition.full_name, '=' * 40, definition.docstring()
)
def _html(self, definition):
"""Generate documentation string in HTML format"""
if sys.version_info >= (3, 4):
escaped_doc = html.escape(
html.unescape(definition.docstring()), quote=False
)
else:
try:
escaped_doc = cgi.escape(
HTMLParser.unescape.__func__(
HTMLParser, definition.docstring().encode('utf8')
)
)
except AttributeError:
# Python 3.x < 3.4
escaped_doc = cgi.escape(
HTMLParser.unescape(HTMLParser, definition.docstring())
)
escaped_doc = escaped_doc.replace('\n', '<br>')
return '{0}\n{1}'.format(definition.full_name, escaped_doc)
| 3,173 | Python | .py | 84 | 26.714286 | 77 | 0.551432 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,045 | rename.py | DamnWidget_anaconda/anaconda_server/commands/rename.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
import logging
import traceback
from .base import Command
class Rename(Command):
"""Get back a python definition where to go
"""
def __init__(self, callback, uid, script, directories, new_word, refactor):
self.script = script
self.new_word = new_word
self.jedi_refactor = refactor
self.directories = directories
super(Rename, self).__init__(callback, uid)
def run(self):
"""Run the command
"""
renames = {}
try:
usages = self.script.usages()
proposals = self.jedi_refactor.rename(self.script, self.new_word)
for u in usages:
path = os.path.dirname(u.module_path)
if self.is_same_path(path):
if u.module_path not in renames:
renames[u.module_path] = []
thefile = proposals.new_files().get(u.module_path)
if thefile is None:
continue
lineno = u.line - 1
line = thefile.splitlines()[lineno]
renames[u.module_path].append({
'lineno': lineno, 'line': line
})
success = True
except Exception as error:
logging.error(error)
logging.debug(traceback.format_exc().splitlines())
success = False
self.callback({
'success': success, 'renames': renames, 'uid': self.uid
})
def is_same_path(self, path):
"""Determines if the given path is a subdirectory of our paths
"""
for directory in self.directories:
if os.path.commonprefix([directory, path]) == directory:
return True
return False
| 1,940 | Python | .py | 50 | 27.2 | 79 | 0.554904 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,046 | pep8.py | DamnWidget_anaconda/anaconda_server/commands/pep8.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import traceback
from .base import Command
class PEP8(Command):
"""Run PEP8 linter and return back results
"""
def __init__(self, callback, uid, vid, linter, settings, code, filename):
self.vid = vid
self.code = code
self.linter = linter
self.settings = settings
self.filename = filename
super(PEP8, self).__init__(callback, uid)
def run(self):
"""Run the command
"""
try:
self.callback({
'success': True,
'errors': self.linter().lint(
self.settings, self.code, self.filename),
'uid': self.uid,
'vid': self.vid
})
except Exception as error:
logging.error(error)
logging.debug(traceback.format_exc().splitlines())
self.callback({
'success': False,
'error': error,
'uid': self.uid,
'vid': self.vid
})
| 1,165 | Python | .py | 35 | 23.028571 | 77 | 0.540107 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,047 | mccabe.py | DamnWidget_anaconda/anaconda_server/commands/mccabe.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import traceback
from .base import Command
class McCabe(Command):
"""Run McCabe complexity checker and return back results
"""
def __init__(self, callback, uid, vid, mccabe, code, threshold, filename):
self.vid = vid
self.code = code
self.filename = filename if filename is not None else ''
self.threshold = threshold
self.mccabe = mccabe(self.code, self.filename)
super(McCabe, self).__init__(callback, uid)
def run(self):
"""Run the command
"""
try:
self.callback({
'success': True,
'errors': self.mccabe.get_code_complexity(self.threshold),
'uid': self.uid,
'vid': self.vid
})
except Exception as error:
print(error)
logging.error(error)
logging.debug(traceback.format_exc().splitlines())
self.callback({
'success': False,
'error': str(error),
'uid': self.uid,
'vid': self.vid
})
| 1,241 | Python | .py | 35 | 25.428571 | 78 | 0.564274 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,048 | find_usages.py | DamnWidget_anaconda/anaconda_server/commands/find_usages.py | # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from .base import Command
class FindUsages(Command):
"""Get back a python usages for the given object"""
def __init__(self, callback, line, col, uid, script):
self.script = script
self.line = line
self.col = col
super(FindUsages, self).__init__(callback, uid)
def run(self):
"""Run the command"""
try:
usages = self.script.get_references(
line=self.line, column=self.col
)
success = True
except:
usages = None
success = False
try:
self.callback(
{
'success': success,
'result': [
(i.full_name, i.module_path, i.line, i.column)
for i in usages
if not i.in_builtin_module()
]
if usages is not None
else [],
'uid': self.uid,
}
)
except ValueError:
self.callback(
{
'success': success,
'result': [
(i.name, i.module_path, i.line, i.column)
for i in usages
if not i.in_builtin_module()
]
if usages is not None
else [],
'uid': self.uid,
}
)
| 1,625 | Python | .py | 48 | 19.041667 | 70 | 0.429299 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,049 | pylint.py | DamnWidget_anaconda/anaconda_server/commands/pylint.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import traceback
from .base import Command
class PyLint(Command):
"""Run PyLint and return back results
"""
def __init__(self, callback, uid, vid, linter, rcfile, filename):
self.vid = vid
self.filename = filename
self.linter = linter
self.rcfile = rcfile
super(PyLint, self).__init__(callback, uid)
def run(self):
"""Run the command
"""
try:
self.callback({
'success': True,
'errors': self.linter(
self.filename, self.rcfile).parse_errors(),
'uid': self.uid,
'vid': self.vid
})
except Exception as error:
logging.error(error)
logging.debug(traceback.format_exc().splitlines())
self.callback({
'success': False,
'error': error,
'uid': self.uid,
'vid': self.vid
})
| 1,122 | Python | .py | 34 | 22.705882 | 69 | 0.537963 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,050 | pyflakes.py | DamnWidget_anaconda/anaconda_server/commands/pyflakes.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import traceback
from .base import Command
class PyFlakes(Command):
"""Run PyFlakes linter and return back results
"""
def __init__(self, callback, uid, vid, linter, settings, code, filename):
self.vid = vid
self.code = code
self.linter = linter
self.settings = settings
self.filename = filename
super(PyFlakes, self).__init__(callback, uid)
def run(self):
"""Run the command
"""
try:
self.callback({
'success': True,
'errors': self.linter().lint(
self.settings, self.code, self.filename),
'uid': self.uid,
'vid': self.vid
})
except Exception as error:
logging.error(error)
logging.debug(traceback.format_exc().splitlines())
self.callback({
'success': False,
'error': error,
'uid': self.uid,
'vid': self.vid
})
| 1,177 | Python | .py | 35 | 23.371429 | 77 | 0.544974 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,051 | mypy.py | DamnWidget_anaconda/anaconda_server/commands/mypy.py |
# Copyright (C) 2013 - 2016 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import traceback
from .base import Command
class MyPy(Command):
"""Run mypy linter and return back results
"""
def __init__(
self, callback, uid, vid, linter,
code, filename, mypypath, settings):
self.vid = vid
self.code = code
self.filename = filename
self.mypypath = mypypath
self.settings = settings['mypy_settings']
self.linter = linter
super(MyPy, self).__init__(callback, uid)
def run(self):
"""Run the command
"""
try:
self.callback({
'success': True,
'errors': self.linter(
self.code, self.filename, self.mypypath, self.settings
).execute(),
'uid': self.uid,
'vid': self.vid,
})
except Exception as error:
logging.error(error)
logging.debug(traceback.format_exc())
self.callback({
'success': False,
'error': error,
'uid': self.uid,
'vid': self.vid
})
| 1,280 | Python | .py | 39 | 22.282051 | 74 | 0.531225 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,052 | import_validator.py | DamnWidget_anaconda/anaconda_server/commands/import_validator.py |
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import traceback
from .base import Command
class ImportValidator(Command):
"""Run the ImportValidate to detect invalid imports
"""
def __init__(self, callback, uid, vid, linter, code, filename, settings):
self.vid = vid
self.code = code
self.linter = linter
self.filename = filename
self.settings = settings
super(ImportValidator, self).__init__(callback, uid)
def run(self):
"""Run the command
"""
try:
v = self.linter(self.code, self.filename, self.settings)
self.callback({
'success': True,
'errors': [] if v.is_valid() else self._convert(v),
'uid': self.uid,
'vid': self.vid
})
except Exception as error:
logging.error(error)
logging.debug(traceback.format_exc().splitlines())
self.callback({
'success': False,
'error': error,
'uid': self.uid,
'vid': self.vid
})
def _convert(self, validator):
"""Build report for the validator
"""
errors = []
for line, lineno in validator.errors:
errors.append({
'level': 'E',
'lineno': lineno,
'offset': 0,
'code': 801,
'raw_error': '[E] ImportValidator (801): {0}'.format(line),
'message': '[E] ImportValidator (%s): %s',
'underline_range': True
})
return errors
| 1,744 | Python | .py | 50 | 23.92 | 77 | 0.523753 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,053 | prev_lint_error.py | DamnWidget_anaconda/commands/prev_lint_error.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import sublime
import sublime_plugin
from ..anaconda_lib.helpers import get_settings
from ..anaconda_lib.helpers import valid_languages
from ..anaconda_lib.linting.sublime import ANACONDA, update_statusbar
class AnacondaPrevLintError(sublime_plugin.WindowCommand):
"""Jump to the previous lint error on the page
"""
def run(self) -> None:
self.jump(self._harvest_prev())
update_statusbar(self.window.active_view())
def is_enabled(self) -> bool:
"""Determines if the command is enabled
"""
view = self.window.active_view()
if (view.file_name() in ANACONDA['DISABLED']
or not get_settings(view, 'anaconda_linting')):
return False
location = view.sel()[0].begin()
for lang in valid_languages():
matcher = 'source.{}'.format(lang)
if view.match_selector(location, matcher) is True:
return True
return False
def jump(self, lineno: int = None) -> None:
"""Jump to a line in the view buffer
"""
if lineno is None:
sublime.status_message('No lint errors')
return
pt = self.window.active_view().text_point(lineno, 0)
self.window.active_view().sel().clear()
self.window.active_view().sel().add(sublime.Region(pt))
self.window.active_view().show_at_center(pt)
def _harvest_prev(self) -> int:
"""Harvest the prev error that we find and return it back
"""
(cur_line, cur_col) = self.window.active_view().rowcol(
self.window.active_view().sel()[0].begin()
)
lines = set([])
vid = self.window.active_view().id()
for error_type in ['ERRORS', 'WARNINGS', 'VIOLATIONS']:
for line, _ in ANACONDA[error_type].get(vid, {}).items():
lines.add(int(line))
lines = sorted(lines)
if not len(lines):
return None
if cur_line is not None and lines[0] < cur_line:
lines = [l for l in lines if l < cur_line]
return lines[-1] if len(lines) > 0 else None
| 2,264 | Python | .py | 53 | 33.886792 | 69 | 0.612226 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,054 | autoformat.py | DamnWidget_anaconda/commands/autoformat.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import logging
import traceback
import sublime
import sublime_plugin
from ..anaconda_lib.worker import Worker
from ..anaconda_lib._typing import Dict, Any
from ..anaconda_lib.progress_bar import ProgressBar
from ..anaconda_lib.helpers import get_settings, is_python, get_window_view
from ..anaconda_lib.jsonclient import Callback
class AnacondaAutoFormat(sublime_plugin.TextCommand):
"""Execute autopep8 formating
"""
data = None
def run(self, edit: sublime.Edit) -> None:
if self.data is not None:
self.replace(edit)
return
aggresive_level = get_settings(self.view, 'aggressive', 0)
if aggresive_level > 0:
if not sublime.ok_cancel_dialog(
'You have an aggressive level of {} this may cause '
'anaconda to change things that you don\'t really want to '
'change.\n\nAre you sure do you want to continue?'.format(
aggresive_level
)
):
return
self.code = self.view.substr(sublime.Region(0, self.view.size()))
settings = {
'aggressive': aggresive_level,
'list-fixes': get_settings(self.view, 'list-fixes', False),
'autoformat_ignore': get_settings(
self.view, 'autoformat_ignore', []
),
'autoformat_select': get_settings(
self.view, 'autoformat_select', []
),
'pep8_max_line_length': get_settings(
self.view, 'pep8_max_line_length', 79
),
'tab_size': get_settings(self.view, 'tab_size', 4)
}
try:
messages = {
'start': 'Autoformatting. Please wait... ',
'end': 'Autoformatting done!',
'fail': 'Autoformatting failed, buffer not changed.',
'timeout': 'Autoformatting failed, buffer not changed.',
}
self.pbar = ProgressBar(messages)
self.pbar.start()
self.view.set_read_only(True)
data = {
'vid': self.view.id(),
'code': self.code,
'method': 'pep8',
'settings': settings,
'handler': 'autoformat'
}
timeout = get_settings(self.view, 'auto_formatting_timeout', 1)
callback = Callback(timeout=timeout)
callback.on(success=self.get_data)
callback.on(error=self.on_failure)
callback.on(timeout=self.on_failure)
Worker().execute(callback, **data)
except:
logging.error(traceback.format_exc())
def on_failure(self, *args: Any, **kwargs: Any) -> None:
self.pbar.terminate(status=self.pbar.Status.FAILURE)
self.view.set_read_only(False)
def is_enabled(self) -> bool:
"""Determine if this command is enabled or not
"""
return is_python(self.view, True)
def get_data(self, data: Dict[str, Any]) -> None:
"""Collect the returned data from autopep8
"""
self.data = data
self.pbar.terminate()
self.view.set_read_only(False)
self.view.run_command('anaconda_auto_format')
def replace(self, edit: sublime.Edit) -> None:
"""Replace the old code with what autopep8 gave to us
"""
view = get_window_view(self.data['vid'])
if self.code != self.data.get('buffer'):
region = sublime.Region(0, view.size())
view.replace(edit, region, self.data.get('buffer'))
if get_settings(view, 'auto_formatting'):
sublime.set_timeout(lambda: view.run_command("save"), 0)
self.code = None
self.data = None
| 3,913 | Python | .py | 94 | 30.829787 | 75 | 0.578462 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,055 | next_lint_error.py | DamnWidget_anaconda/commands/next_lint_error.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import sublime
import sublime_plugin
from ..anaconda_lib.helpers import get_settings
from ..anaconda_lib.helpers import valid_languages
from ..anaconda_lib.linting.sublime import ANACONDA, update_statusbar
class AnacondaNextLintError(sublime_plugin.WindowCommand):
"""Jump to the next lint error on the page
"""
def run(self) -> None:
self.jump(self._harvest_next())
update_statusbar(self.window.active_view())
def is_enabled(self) -> bool:
"""Determines if the command is enabled
"""
view = self.window.active_view()
if (view.file_name() in ANACONDA['DISABLED']
or not get_settings(view, 'anaconda_linting')):
return False
location = view.sel()[0].begin()
for lang in valid_languages():
matcher = 'source.{}'.format(lang)
if view.match_selector(location, matcher) is True:
return True
return False
def jump(self, lineno: int = None) -> None:
"""Jump to a line in the view buffer
"""
if lineno is None:
sublime.status_message('No lint errors')
return
pt = self.window.active_view().text_point(lineno, 0)
self.window.active_view().sel().clear()
self.window.active_view().sel().add(sublime.Region(pt))
self.window.active_view().show_at_center(pt)
def _harvest_next(self) -> int:
"""Harvest the next error that we find and return it back
"""
(cur_line, cur_col) = self.window.active_view().rowcol(
self.window.active_view().sel()[0].begin()
)
lines = set([])
vid = self.window.active_view().id()
for error_type in ['ERRORS', 'WARNINGS', 'VIOLATIONS']:
for line, _ in ANACONDA[error_type].get(vid, {}).items():
lines.add(int(line))
lines = sorted(lines)
if not len(lines):
return None
if cur_line is not None and lines[-1] > cur_line:
lines = [l for l in lines if l > cur_line]
return lines[0] if len(lines) > 0 else None
| 2,260 | Python | .py | 53 | 33.811321 | 69 | 0.611517 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,056 | autoimport.py | DamnWidget_anaconda/commands/autoimport.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import re
import sublime
import sublime_plugin
from ..anaconda_lib.helpers import is_python
from ..anaconda_lib._typing import Tuple, Any # noqa
from ..anaconda_lib.linting.sublime import ANACONDA
class AnacondaAutoImport(sublime_plugin.TextCommand):
"""Execute auto import for undefined names
"""
def run(self, edit: sublime.Edit) -> None:
self.data = None # type: List[str]
location = self.view.rowcol(self.view.sel()[0].begin())
if not self._detected_undefined_name(location):
sublime.message_dialog(
'The word under the cursor is not an undefined name.')
return
for name in self.data:
self.insert_import(edit, name)
def is_enabled(self) -> bool:
"""Determine if this command is enabled or not
"""
return is_python(self.view, True)
def insert_import(self, edit: sublime.Edit, name: str) -> None:
iline = self._guess_insertion_line()
import_str = 'import {name}\n\n\n'.format(name=name)
current_lines = self.view.lines(sublime.Region(0, self.view.size()))
import_point = current_lines[iline].begin()
self.view.insert(edit, import_point, import_str)
def _guess_insertion_line(self) -> int:
view_code = self.view.substr(sublime.Region(0, self.view.size()))
match = re.search(r'^(@.+|def|class)\s+', view_code, re.M)
if match is not None:
code = view_code[:match.start()]
else:
# No class or function definition in this file, search for existing
# import
match = re.search(r'\s+.* tropmi', view_code[::-1], re.M)
code = view_code[:len(view_code) - match.start()]
return len(code.split('\n')) - 1
def _detected_undefined_name(self, location: Tuple[int]) -> bool:
vid = self.view.id()
errors_mapping = {0: 'ERRORS', 1: 'WARNINGS', 2: 'VIOLATIONS'}
for i, error_type in errors_mapping.items():
for line, strings in ANACONDA[error_type].get(vid, {}).items():
for string in strings:
if (location[0] == line and 'Undefined ' in string):
if self.data is None:
self.data = []
self.data.append(string.split('\'')[1])
return False if self.data is None else True
| 2,530 | Python | .py | 52 | 39 | 79 | 0.60935 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,057 | set_python_interpreter.py | DamnWidget_anaconda/commands/set_python_interpreter.py | import logging
import traceback
import sublime
import sublime_plugin
from ..anaconda_lib._typing import Dict, Any
from ..anaconda_lib.helpers import is_python
from ..anaconda_lib.builder.python_builder import AnacondaSetPythonBuilder
class AnacondaSetPythonInterpreter(sublime_plugin.TextCommand):
"""Sets or modifies the Venv of the current project"""
def run(self, edit: sublime.Edit) -> None:
try:
sublime.active_window().show_input_panel(
"Python Path:", self.get_current_interpreter_path(),
self.update_interpreter_settings, None, None
)
except Exception:
logging.error(traceback.format_exc())
def update_interpreter_settings(self, venv_path: str) -> None:
"""Updates the project and adds/modifies the Venv path"""
project_data = self.get_project_data()
# Check if have settings set in the project settings
if project_data.get('settings', False):
try:
# Try to get the python_interpreter key
project_data['settings'].get('python_interpreter', False)
except AttributeError:
# If this happens that mean your settings is a sting not a dict
sublime.message_dialog(
'Ops your project settings is missed up'
)
else:
# Set the path and save the project
project_data['settings']['python_interpreter'] = venv_path
self.save_project_data(project_data)
else:
# This will excute if settings key is not in you project settings
project_data.update(
{
'settings': {'python_interpreter': venv_path}
}
)
self.save_project_data(project_data)
AnacondaSetPythonBuilder().update_interpreter_build_system(
venv_path
)
def save_project_data(self, data: Dict[str, Any]) -> None:
"""Saves the provided data to the project settings"""
sublime.active_window().set_project_data(data)
sublime.status_message("Python path is set successfuly")
def get_project_data(self) -> Dict[str, Any]:
"""Return the project data for the current window"""
return sublime.active_window().project_data() or {}
def get_current_interpreter_path(self) -> str:
"""Returns the current path from the settings if possible"""
try:
return self.get_project_data()['settings']['python_interpreter']
except Exception:
return ''
def is_enabled(self) -> bool:
"""Check this plug in is enabled"""
return is_python(self.view)
| 2,761 | Python | .py | 61 | 34.557377 | 79 | 0.617188 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,058 | complete_func_args.py | DamnWidget_anaconda/commands/complete_func_args.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import sublime
import sublime_plugin
from ..anaconda_lib.worker import Worker
from ..anaconda_lib._typing import Dict, Any
from ..anaconda_lib.callback import Callback
from ..anaconda_lib.helpers import (
get_settings, active_view, prepare_send_data, is_python
)
class AnacondaCompleteFuncargs(sublime_plugin.TextCommand):
"""
Function / Class constructor autocompletion command
This is directly ported fronm SublimeJEDI
"""
def run(self, edit: sublime.Edit, characters: str ='') -> None:
if not get_settings(self.view, 'complete_parameters', False):
return
self._insert_characters(edit)
location = active_view().rowcol(self.view.sel()[0].begin())
data = prepare_send_data(location, 'parameters', 'jedi')
data['settings'] = {
'complete_parameters': get_settings(
self.view, 'complete_parameters', False
),
'complete_all_parameters': get_settings(
self.view, 'complete_all_parameters', False
),
'python_interpreter': get_settings(
self.view, 'python_interpreter', ''
),
}
callback = Callback(on_success=self.insert_snippet)
Worker().execute(callback, **data)
def is_enabled(self) -> bool:
"""Determine if this command is enabled or not
"""
return is_python(self.view)
def _insert_characters(self, edit: sublime.Edit) -> None:
"""
Insert autocomplete character with closed pair
and update selection regions
:param edit: sublime.Edit
:param characters: str
"""
regions = [a for a in self.view.sel()]
self.view.sel().clear()
for region in reversed(regions):
if self.view.settings().get('auto_match_enabled', True):
position = region.end()
else:
position = region.begin()
self.view.sel().add(sublime.Region(position, position))
def insert_snippet(self, data: Dict[str, Any]) -> None:
"""Insert the snippet in the buffer
"""
template = data['template']
active_view().run_command('insert_snippet', {'contents': template})
class AnacondaFillFuncargs(sublime_plugin.TextCommand):
"""Trigger parameters autocompletion with key press."""
def run(self, edit, all=False):
req_args, all_args = 'complete_parameters', 'complete_all_parameters'
compl_par = get_settings(self.view, req_args, False)
compl_all = get_settings(self.view, all_args, False)
# no need to do anything, just run the command
if compl_all:
self.view.run_command("anaconda_complete_funcargs")
return
# temporarily enable autocompletion
if not compl_par:
self.view.settings().set(req_args, True)
if all:
self.view.settings().set(all_args, True)
self.view.run_command("anaconda_complete_funcargs")
# restore value as per settings
if not compl_par:
self.view.settings().set(req_args, False)
if all:
self.view.settings().set(all_args, False)
class AnacondaFuncargsKeyListener(sublime_plugin.EventListener):
"""Allow parameters autocompletion with key press."""
def on_query_context(self, view, key, operator, operand, match_all):
if key == 'anaconda_insert_funcargs':
def valid_scope():
scope = view.scope_name(pos)
if 'meta.function-call' in scope:
return True
elif 'meta.function_call' in scope:
return True
return False
pos = view.sel()[0].a
check = (view.substr(pos - 1) == '(' and view.substr(pos) == ')')
allow = get_settings(
view, 'parameters_completion_on_keypress', True)
if allow and check and valid_scope():
return True
return None
| 4,180 | Python | .py | 97 | 33.247423 | 77 | 0.613485 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,059 | goto.py | DamnWidget_anaconda/commands/goto.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import sublime
import sublime_plugin
from ..anaconda_lib.worker import Worker
from ..anaconda_lib.workers.market import Market
from ..anaconda_lib.helpers import is_remote_session
from ..anaconda_lib.explore_panel import ExplorerPanel
from ..anaconda_lib.helpers import prepare_send_data, is_python, get_settings
class AnacondaGoto(sublime_plugin.TextCommand):
"""Jedi GoTo a Python definition for Sublime Text
"""
JEDI_COMMAND = 'goto'
def run(self, edit: sublime.Edit) -> None:
try:
location = self.view.rowcol(self.view.sel()[0].begin())
data = prepare_send_data(location, self.JEDI_COMMAND, 'jedi')
data['settings'] = {
'python_interpreter': get_settings(
self.view, 'python_interpreter', ''
),
}
Worker().execute(self.on_success, **data)
except:
pass
def is_enabled(self) -> bool:
"""Determine if this command is enabled or not
"""
return is_python(self.view)
def on_success(self, data):
"""Called when a result comes from the query
"""
if not data.get('result'):
# fallback to ST3 builtin Goto Definition
return self.view.window().run_command('goto_definition')
symbols = []
for result in data['result']:
path = self._infere_context_data(result[1])
symbols.append({
'title': result[0],
'location': 'File: {} Line: {} Column: {}'.format(
path, result[2], result[3]
),
'position': '{}:{}:{}'.format(path, result[2], result[3])
})
ExplorerPanel(self.view, symbols).show([])
def _infere_context_data(self, path: str) -> str:
"""If this is a remote session, infere context data if any
"""
if is_remote_session(self.view):
window = self.view.window().id()
try:
interpreter = Market().get(window).interpreter
except Exception as e:
print('while getting interp for Window ID {}: {}'.format(
window, e)
)
return path
directory_map = interpreter.pathmap
if directory_map is None:
return path
for local_dir, remote_dir in directory_map.items():
if remote_dir in path:
return path.replace(remote_dir, local_dir)
return path
class AnacondaGotoAssignment(AnacondaGoto):
"""Jedi GoTo a Python assignment for Sublime Text
"""
JEDI_COMMAND = 'goto_assignment'
class AnacondaGotoPythonObject(AnacondaGoto):
"""Open prompt asking for Python path and JediGoto
"""
def input_package(self, package: str) -> None:
splitted = package.strip().split('.')
if len(splitted) == 1:
import_command = 'import %s' % splitted[0]
else:
import_command = 'from %s import %s' % (
'.'.join(splitted[:-1]), splitted[-1]
)
self.goto_python_object(import_command)
def goto_python_object(self, import_command: str) -> None:
try:
data = {
'filename': '',
'method': 'goto',
'line': 1,
'offset': len(import_command),
'source': import_command,
'handler': 'jedi'
}
Worker().execute(self.on_success, **data)
except:
raise
def run(self, edit: sublime.Edit) -> None:
sublime.active_window().show_input_panel(
'Provide object path:', '',
self.input_package, None, None
)
| 3,916 | Python | .py | 99 | 28.818182 | 77 | 0.561824 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,060 | __init__.py | DamnWidget_anaconda/commands/__init__.py | # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from .doc import AnacondaDoc
from .rename import AnacondaRename
from .mccabe import AnacondaMcCabe
from .get_lines import AnacondaGetLines
from .autoimport import AnacondaAutoImport
from .autoformat import AnacondaAutoFormat
from .find_usages import AnacondaFindUsages
from .enable_linting import AnacondaEnableLinting
from .next_lint_error import AnacondaNextLintError
from .prev_lint_error import AnacondaPrevLintError
from .disable_linting import AnacondaDisableLinting
from .complete_func_args import AnacondaCompleteFuncargs, AnacondaFillFuncargs
from .complete_func_args import AnacondaFuncargsKeyListener
from .set_python_interpreter import AnacondaSetPythonInterpreter
from .goto import (
AnacondaGoto, AnacondaGotoAssignment, AnacondaGotoPythonObject
)
from .test_runner import (
AnacondaRunCurrentFileTests, AnacondaRunProjectTests,
AnacondaRunCurrentTest, AnacondaRunLastTest
)
from .vagrant import (
AnacondaVagrantEnable, AnacondaVagrantInit, AnacondaVagrantStatus,
AnacondaVagrantUp, AnacondaVagrantReload, AnacondaVagrantSsh
)
__all__ = [
'AnacondaDoc',
'AnacondaGoto',
'AnacondaGotoAssignment',
'AnacondaGotoPythonObject',
'AnacondaRename',
'AnacondaMcCabe',
'AnacondaGetLines',
'AnacondaVagrantUp',
'AnacondaVagrantSsh',
'AnacondaAutoImport',
'AnacondaAutoFormat',
'AnacondaFindUsages',
'AnacondaVagrantInit',
'AnacondaRunLastTest',
'AnacondaEnableLinting',
'AnacondaNextLintError',
'AnacondaPrevLintError',
'AnacondaVagrantEnable',
'AnacondaVagrantStatus',
'AnacondaVagrantReload',
'AnacondaRunCurrentTest',
'AnacondaDisableLinting',
'AnacondaRunProjectTests',
'AnacondaCompleteFuncargs',
'AnacondaFillFuncargs',
'AnacondaFuncargsKeyListener',
'AnacondaSetPythonInterpreter',
'AnacondaRunCurrentFileTests',
]
| 1,988 | Python | .py | 57 | 31.526316 | 78 | 0.812338 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,061 | vagrant.py | DamnWidget_anaconda/commands/vagrant.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from functools import partial
import sublime
import sublime_plugin
from ..anaconda_lib import worker, vagrant
from ..anaconda_lib._typing import Dict, Any
class AnacondaVagrantEnable(sublime_plugin.WindowCommand):
"""Enable Vagrant on this window/project
"""
def run(self) -> None:
vagrant_worker = worker.WORKERS.get(sublime.active_window().id())
if vagrant_worker is not None:
vagrant_worker.support = True
class AnacondaVagrantBase(object):
"""Base class for vagrant commands
"""
data = None # type: Dict[str, Any]
def __init__(self):
super(AnacondaVagrantBase, self).__init__()
self.view = None # type: sublime.View
def print_status(self, edit: sublime.Edit) -> None:
"""Print the vagrant command output string into a Sublime Text panel
"""
vagrant_panel = self.view.window().create_output_panel(
'anaconda_vagrant'
)
vagrant_panel.set_read_only(False)
region = sublime.Region(0, vagrant_panel.size())
vagrant_panel.erase(edit, region)
vagrant_panel.insert(edit, 0, self.data.decode('utf8'))
self.data = None
vagrant_panel.set_read_only(True)
vagrant_panel.show(0)
self.view.window().run_command(
'show_panel', {'panel': 'output.anaconda_vagrant'}
)
def prepare_data(self, data: Dict[str, Any]) -> None:
"""Prepare the returned data and call the given command
"""
success, out, error = data
self.data = error if not success else out
sublime.active_window().run_command(self._class_name_to_command())
def _class_name_to_command(self):
"""Convert class name to command
"""
command = []
for i in range(len(self.__class__.__name__)):
c = self.__class__.__name__[i]
if i == 0:
command.append(c.lower())
elif i > 0 and c.isupper():
command.append('_')
command.append(c.lower())
else:
command.append(c)
return ''.join(command)
class AnacondaVagrantStatus(sublime_plugin.TextCommand, AnacondaVagrantBase):
"""Check vagrant status for configured project
"""
data = None # type: Dict[str, Any]
def run(self, edit: sublime.Edit) -> None:
if self.view.settings().get('vagrant_environment') is None:
return
cfg = self.view.settings().get('vagrant_environment')
if self.data is None:
try:
vagrant.VagrantStatus(
self.prepare_data,
cfg.get('directory', ''),
cfg.get('machine', 'default'), True
)
except Exception as error:
print(error)
else:
self.print_status(edit)
def prepare_data(self, data: Dict[str, Any]) -> None:
"""Prepare the returned data
"""
success, output = data
self.data = output
sublime.active_window().run_command('anaconda_vagrant_status')
class AnacondaVagrantInit(sublime_plugin.TextCommand, AnacondaVagrantBase):
"""Execute vagrant init with the given parameters
"""
def run(self, edit: sublime.Edit) -> None:
cfg = self.view.settings().get('vagrant_environment')
if self.data is None:
self.view.window().show_input_panel(
'Directory to init on:', '',
partial(self.input_directory, cfg), None, None
)
else:
self.print_status(edit)
def input_directory(self, cfg: Dict[str, Any], directory: str) -> None:
machine = cfg.get('machine', 'default')
vagrant.VagrantInit(self.prepare_data, directory, machine)
class AnacondaVagrantUp(sublime_plugin.TextCommand, AnacondaVagrantBase):
"""Execute vagrant up command
"""
def run(self, edit: sublime.Edit) -> None:
if self.view.settings().get('vagrant_environment') is None:
return
cfg = self.view.settings().get('vagrant_environment')
if self.data is None:
try:
machine = cfg.get('machine', 'default')
vagrant.VagrantUp(self.prepare_data, cfg['directory'], machine)
except Exception as error:
print(error)
else:
self.print_status(edit)
class AnacondaVagrantReload(sublime_plugin.TextCommand, AnacondaVagrantBase):
"""Execute vagrant reload command
"""
def run(self, edit: sublime.Edit) -> None:
if self.view.settings().get('vagrant_environment') is None:
return
cfg = self.view.settings().get('vagrant_environment')
if self.data is None:
try:
machine = cfg.get('machine', 'default')
vagrant.VagrantReload(
self.prepare_data, cfg['directory'], machine
)
except Exception as error:
print(error)
else:
self.print_status(edit)
class AnacondaVagrantSsh(sublime_plugin.TextCommand, AnacondaVagrantBase):
"""Execute remmote ssh command
"""
def run(self, edit: sublime.Edit) -> None:
if self.view.settings().get('vagrant_environment') is None:
return
cfg = self.view.settings().get('vagrant_environment')
if self.data is None:
self.view.window().show_input_panel(
'Command to execute:', '',
partial(self.input_command, cfg), None, None
)
else:
self.print_status(edit)
def input_command(self, cfg: Dict[str, Any], command: str) -> None:
machine = cfg.get('machine', 'default')
vagrant.VagrantSSH(
self.prepare_data, cfg['directory'], command, machine
)
| 6,022 | Python | .py | 148 | 30.912162 | 79 | 0.600069 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,062 | python_build.py | DamnWidget_anaconda/commands/python_build.py |
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import sublime
import sublime_plugin
from ..anaconda_lib import worker, callback
class AnacondaBaseBuild(object):
"""Base class for all anaconda builders
"""
def __init__(self, executable, *params):
self.buffer = ""
self.executable = executable
self.params = params
class AnacondaPythonBuild(sublime_plugin.TextCommand):
"""Build the current buffer using the configured python interpreter
""" | 571 | Python | .py | 15 | 33.666667 | 71 | 0.735883 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,063 | doc.py | DamnWidget_anaconda/commands/doc.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
from functools import partial
import sublime
import sublime_plugin
from ..anaconda_lib.worker import Worker
from ..anaconda_lib._typing import Dict, Any
from ..anaconda_lib.tooltips import Tooltip
from ..anaconda_lib.callback import Callback
from ..anaconda_lib.helpers import prepare_send_data, is_python, get_settings
class AnacondaDoc(sublime_plugin.TextCommand):
"""Jedi get documentation string for Sublime Text
"""
documentation = None
def run(self, edit: sublime.Edit) -> None:
if self.documentation is None:
try:
location = self.view.rowcol(self.view.sel()[0].begin())
if self.view.substr(self.view.sel()[0].begin()) in ['(', ')']:
location = (location[0], location[1] - 1)
data = prepare_send_data(location, 'doc', 'jedi')
if int(sublime.version()) >= 3070:
data['html'] = get_settings(
self.view, 'enable_docstrings_tooltip', False)
data["settings"] = {
'python_interpreter': get_settings(self.view, 'python_interpreter', ''),
}
Worker().execute(
Callback(on_success=self.prepare_data), **data
)
except Exception as error:
print(error)
else:
if get_settings(self.view, 'enable_docstrings_tooltip', False) \
and int(sublime.version()) >= 3070:
self.print_popup(edit)
else:
self.print_doc(edit)
def is_enabled(self) -> bool:
"""Determine if this command is enabled or not
"""
return is_python(self.view)
def prepare_data(self, data: Dict[str, Any]) -> None:
"""Prepare the returned data
"""
if data['success']:
self.documentation = data['doc']
if self.documentation is None or self.documentation == '':
self._show_status()
else:
sublime.active_window().run_command(self.name())
else:
self._show_status()
def print_doc(self, edit: sublime.Edit) -> None:
"""Print the documentation string into a Sublime Text panel
"""
doc_panel = self.view.window().create_output_panel(
'anaconda_documentation'
)
doc_panel.set_read_only(False)
region = sublime.Region(0, doc_panel.size())
doc_panel.erase(edit, region)
doc_panel.insert(edit, 0, self.documentation)
self.documentation = None
doc_panel.set_read_only(True)
doc_panel.show(0)
self.view.window().run_command(
'show_panel', {'panel': 'output.anaconda_documentation'}
)
def print_popup(self, edit) -> None:
"""Show message in a popup
"""
dlines = self.documentation.splitlines()
name = dlines[0]
docstring = ''.join(dlines[1:])
content = {'name': name, 'content': docstring}
self.documentation = None
css = get_settings(self.view, 'anaconda_tooltip_theme', 'popup')
Tooltip(css).show_tooltip(
self.view, 'doc', content, partial(self.print_doc, edit))
def _show_status(self) -> None:
"""Show message in the view status bar
"""
self.view.set_status(
'anaconda_doc', 'Anaconda: No documentation found'
)
sublime.set_timeout_async(
lambda: self.view.erase_status('anaconda_doc'), 5000
)
| 3,698 | Python | .py | 89 | 31.146067 | 92 | 0.582938 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,064 | rename.py | DamnWidget_anaconda/commands/rename.py | # Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import time
import logging
import traceback
import sublime
import sublime_plugin
from ..anaconda_lib.worker import Worker
from ..anaconda_lib._typing import Dict, Any
from ..anaconda_lib.callback import Callback
from ..anaconda_lib.helpers import prepare_send_data, is_python
class AnacondaRename(sublime_plugin.TextCommand):
"""Rename the word under the cursor to the given one in its total scope
"""
data = None # type: Dict[str, Any]
def run(self, edit: sublime.Edit) -> None:
if self.data is None:
try:
location = self.view.word(self.view.sel()[0].begin())
old_name = self.view.substr(location)
sublime.active_window().show_input_panel(
"Replace with:", old_name, self.input_replacement,
None, None
)
except Exception:
logging.error(traceback.format_exc())
else:
self.rename(edit)
def is_enabled(self) -> bool:
"""Determine if this command is enabled or not
"""
return is_python(self.view)
def input_replacement(self, replacement: str) -> None:
location = self.view.rowcol(self.view.sel()[0].begin())
data = prepare_send_data(location, 'rename', 'jedi')
data['directories'] = sublime.active_window().folders()
data['new_word'] = replacement
Worker().execute(Callback(on_success=self.store_data), **data)
def store_data(self, data: Dict[str, Any]) -> None:
"""Just store the data an call the command again
"""
self.data = data
self.view.run_command('anaconda_rename')
def rename(self, edit: sublime.Edit) -> None:
"""Rename in the buffer
"""
data = self.data
if data['success'] is True:
for filename, data in data['renames'].items():
for line in data:
view = sublime.active_window().open_file(
'{}:{}:0'.format(filename, line['lineno']),
sublime.ENCODED_POSITION
)
while view.is_loading():
time.sleep(0.01)
lines = view.lines(sublime.Region(0, view.size()))
view.replace(edit, lines[line['lineno']], line['line'])
self.data = None
| 2,518 | Python | .py | 59 | 32.254237 | 75 | 0.588621 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,065 | test_runner.py | DamnWidget_anaconda/commands/test_runner.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import os
import re
import functools
import sublime
import sublime_plugin
from ..anaconda_lib._typing import List, Tuple
from ..anaconda_lib.helpers import get_settings, git_installation, is_python
DEFAULT_TEST_COMMAND = "nosetests"
TEST_INCLUDE_FULL_PATH = True
TEST_DELIMETER = "."
TB_FILE = r'[ ]*File \"(...*?)\", line ([0-9]*)'
COMMAND_SEPARATOR = "&" if os.name == "nt" else ";"
TEST_PARAMS = {
'current_file_tests': '',
'current_test': '',
'project_tests': ''
}
def virtualenv(func):
"""
Wraps the _prepare_command call to add virtualenv init and stop if the
`test_virtualenv` is set
"""
@functools.wraps(func)
def wrapper(self):
result = func(self)
command = result
virtualenv = get_settings(self.view, 'test_virtualenv')
if virtualenv is not None:
cmd = 'source {}/bin/activate'.format(virtualenv)
if os.name != 'posix':
cmd = os.path.join(virtualenv, 'Scripts', 'activate')
command = '{0}{2}{1}{2}deactivate'.format(
cmd, result, COMMAND_SEPARATOR
)
return command
return wrapper
class TestMethodMatcher(object):
"""Match a test method under the cursor
"""
def find_test_path(self,
test_file_content: str,
class_delimeter: str =TEST_DELIMETER,
method_delimeter: str =TEST_DELIMETER) -> str:
"""Try to find the test path, returns None if can't be found
"""
test_class_pos = self.find_test_class(test_file_content)
if test_class_pos is not None:
test_class, pos = test_class_pos
result = class_delimeter + test_class
test_method = self.find_test_method(test_file_content[pos:])
if test_method is not None:
result += method_delimeter + test_method
return result
def find_test_method(self, test_file_content: str) -> str:
"""Try to find the test method, returns None if can't be found
"""
match_methods = re.findall(
r'\s?def\s+(\w+)\s?\(', test_file_content)
if match_methods:
return match_methods[-1] # the last one?
def find_test_class(self, test_file_content: str) -> List[Tuple[str, int]]:
"""Try to find the test class, return None if can't be found
"""
match_classes = [
(m.group(1), m.end()) for m in
re.finditer(r'\s?class\s+(\w+)\s?\(', test_file_content)]
if match_classes:
try:
return [
(c, p) for (c, p) in
match_classes if "Test" in c or "test" in c][-1]
except IndexError:
return match_classes[-1]
class AnacondaRunTestsBase(sublime_plugin.TextCommand):
"""
Run test commands based on project configuration
For example, for a Django project using nose2:
"settings": {
"test_before_command":
"source ~/.virtualenvs/<PROJECT>/bin/activate",
"test_command":
"./manage.py test --settings=tests.settings --noinput",
"test_after_command": "deactivate",
// This is the delimiter between the module and the class
"test_delimeter": ":", // "." by default
}
"""
@property
def output_syntax(self) -> str:
"""
Property that return back the PythonConsole output syntax.
This is needed because if anaconda has been installed using git
the path is different
"""
return 'Packages/{}/PythonConsole.hidden-tmLanguage'.format(
'anaconda' if git_installation else 'Anaconda'
)
@property
def output_theme(self) -> str:
"""
Property that return back the PythonConsole output theme.
This is needed because if anaconda has been installed using git
the path is different
"""
theme = get_settings(
self.view, 'test_runner_theme', 'PythonConsoleDark.hidden-tmTheme')
return 'Packages/{}/{}'.format(
'anaconda' if git_installation else 'Anaconda', theme
)
@property
def test_path(self) -> str:
"""Return back the tests path
"""
real_path = os.path.relpath(self.view.file_name(), self.test_root)
if not self.test_filepath_patterns:
real_path = real_path.replace(os.sep, '.')
print(real_path)
if real_path is not None:
if not self.test_filepath_patterns:
real_path = real_path[:-3]
return real_path
return ""
def is_enabled(self) -> bool:
"""Determine if this command is enabled or not
"""
return is_python(self.view, ignore_comments=True)
def run(self, edit: sublime.Edit) -> None:
"""Run the test or tests using the configured command
"""
self._load_settings()
command = self._prepare_command()
self._configure_output_window(width=160)
self.view.window().run_command(
'exec', {
'shell_cmd': command,
'working_dir': self.test_root,
'syntax': self.output_syntax,
"file_regex": TB_FILE
}
)
self._save_test_run(command)
def _load_settings(self) -> None:
sep = COMMAND_SEPARATOR
gs = get_settings
self.test_root = gs(
self.view, 'test_root', self.view.window().folders()[0]
)
self.test_command = gs(self.view, 'test_command', DEFAULT_TEST_COMMAND)
self.test_params_dict = gs(self.view, 'test_params', TEST_PARAMS)
self.before_test = gs(self.view, 'test_before_command')
if isinstance(self.before_test, list):
self.before_test = sep.join(self.before_test)
self.after_test = gs(self.view, 'test_after_command')
if isinstance(self.after_test, list):
self.after_test = sep.join(self.after_test)
self.test_include_full_path = gs(
self.view, 'test_include_full_path', TEST_INCLUDE_FULL_PATH)
self.test_delimeter = gs(self.view, 'test_delimeter', TEST_DELIMETER)
self.test_method_delimeter = gs(
self.view, 'test_method_delimeter', TEST_DELIMETER
)
self.test_filepath_patterns = gs(
self.view, 'test_filepath_patterns', False
)
self.output_show_color = gs(self.view, 'test_output_show_color', True)
@virtualenv
def _prepare_command(self) -> str:
"""Prepare the command to run adding pre tests and after tests
"""
command = [self.test_command, self.test_params, self.test_path]
if self.before_test is not None:
command = [self.before_test, COMMAND_SEPARATOR] + command
if self.after_test is not None:
command += [COMMAND_SEPARATOR, self.after_test]
print(command)
return ' '.join(command)
def _configure_output_window(self, width: int =80) -> None:
"""Configure the syntax and style of the output window
"""
panel = self.view.window().get_output_panel('exec')
panel.settings().set('wrap_width', width,)
if self.output_show_color:
panel.settings().set('color_scheme', self.output_theme)
def _save_test_run(self, command: str) -> None:
"""Save the last ran test
"""
s = sublime.load_settings('PythonTestRunner.last-run')
s.set('last_test_run', command)
sublime.save_settings('PythonTestRunner.last-run')
class AnacondaRunCurrentFileTests(AnacondaRunTestsBase):
"""Run tests in the current file
"""
@property
def test_params(self):
return self.test_params_dict.get('current_file_tests', '')
@property
def test_path(self) -> str:
path = super(AnacondaRunCurrentFileTests, self).test_path
if self.test_include_full_path:
return path
else:
return path.split(self.test_delimeter)[-1]
class AnacondaRunProjectTests(AnacondaRunTestsBase):
"""Run all tests in a project
"""
@property
def test_params(self):
return self.test_params_dict.get('project_tests', '')
@property
def test_path(self) -> str:
"""
Empty path should run all tests.
If the option `test_project_path` is set, return it instead
"""
return get_settings(self.view, 'test_project_path', '')
class AnacondaRunCurrentTest(AnacondaRunTestsBase):
"""Run test under cursor
"""
@property
def test_params(self):
return self.test_params_dict.get('current_test', '')
@property
def test_path(self) -> str:
"""Return the correct path to run the test under the cursor
"""
test_path = super(AnacondaRunCurrentTest, self).test_path
region = self.view.sel()[0]
line_region = self.view.line(region)
file_character_start = 0
text_string = self.view.substr(
sublime.Region(file_character_start, line_region.end())
)
test_name = TestMethodMatcher().find_test_path(
text_string,
class_delimeter=self.test_delimeter,
method_delimeter=self.test_method_delimeter
)
if test_name is not None:
path = test_path + test_name
if self.test_include_full_path:
return path
else:
return path.split(self.test_method_delimeter)[-1]
return ''
class AnacondaRunLastTest(AnacondaRunTestsBase):
"""Run the previous ran test
"""
def _prepare_command(self):
s = sublime.load_settings('PythonTestRunner.last-run')
return s.get('last_test_run')
| 10,020 | Python | .py | 252 | 30.611111 | 79 | 0.601918 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,066 | mccabe.py | DamnWidget_anaconda/commands/mccabe.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import sublime
import sublime_plugin
from ..anaconda_lib.worker import Worker
from ..anaconda_lib._typing import Dict, Any
from ..anaconda_lib.callback import Callback
from ..anaconda_lib.helpers import get_settings
class AnacondaMcCabe(sublime_plugin.WindowCommand):
"""Execute McCabe complexity checker
"""
def run(self) -> None:
view = self.window.active_view()
code = view.substr(sublime.Region(0, view.size()))
data = {
'code': code,
'threshold': get_settings(view, 'mccabe_threshold', 7),
'filename': view.file_name(),
'method': 'mccabe',
'handler': 'qa'
}
Worker().execute(Callback(on_success=self.prepare_data), **data)
def is_enabled(self) -> bool:
"""Determine if this command is enabled or not
"""
view = self.window.active_view()
location = view.sel()[0].begin()
matcher = 'source.python'
return view.match_selector(location, matcher)
def prepare_data(self, data: Dict[str, Any]) -> None:
"""Prepare the data to present in the quick panel
"""
if not data['success'] or data['errors'] is None:
sublime.status_message('Unable to run McCabe checker...')
return
if len(data['errors']) == 0:
view = self.window.active_view()
threshold = get_settings(view, 'mccabe_threshold', 7)
sublime.status_message(
'No code complexity beyond {} was found'.format(threshold)
)
self._show_options(data['errors'])
def _show_options(self, options: Dict[str, Any]) -> None:
"""Show a dropdown quickpanel with options to jump
"""
self.options = [] # type: List[List[str]]
for option in options:
self.options.append(
[option['message'], 'line: {}'.format(option['line'])]
)
self.window.show_quick_panel(self.options, self._jump)
def _jump(self, item: int) -> None:
"""Jump to a line in the view buffer
"""
if item == -1:
return
lineno = int(self.options[item][1].split(':')[1].strip()) - 1
pt = self.window.active_view().text_point(lineno, 0)
self.window.active_view().sel().clear()
self.window.active_view().sel().add(sublime.Region(pt))
self.window.active_view().show(pt)
| 2,570 | Python | .py | 61 | 33.262295 | 74 | 0.601045 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,067 | enable_linting.py | DamnWidget_anaconda/commands/enable_linting.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import sublime_plugin
from ..anaconda_lib.helpers import get_settings
from ..anaconda_lib.helpers import valid_languages
from ..anaconda_lib.linting.sublime import ANACONDA, run_linter
class AnacondaEnableLinting(sublime_plugin.WindowCommand):
"""Disable the linting for the current buffer
"""
def run(self) -> None:
view = self.window.active_view()
window_view = (self.window.id(), view.id())
filename = view.file_name()
if filename is not None and filename in ANACONDA['DISABLED']:
ANACONDA['DISABLED'].remove(filename)
elif filename is None and window_view in ANACONDA['DISABLED_BUFFERS']:
ANACONDA['DISABLED_BUFFERS'].remove(window_view)
run_linter(self.window.active_view())
def is_enabled(self) -> bool:
"""Determines if the command is enabled
"""
view = self.window.active_view()
window_view = (self.window.id(), view.id())
if ((view.file_name() not in ANACONDA['DISABLED']
and window_view not in ANACONDA['DISABLED_BUFFERS'])
or not get_settings(view, 'anaconda_linting')):
return False
location = view.sel()[0].begin()
for lang in valid_languages():
matcher = 'source.{}'.format(lang)
if view.match_selector(location, matcher) is True:
return True
return False
| 1,544 | Python | .py | 33 | 38.30303 | 78 | 0.652 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,068 | get_lines.py | DamnWidget_anaconda/commands/get_lines.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import sublime
import sublime_plugin
from ..anaconda_lib._typing import Dict, Any
from ..anaconda_lib.helpers import get_settings
from ..anaconda_lib.helpers import valid_languages
from ..anaconda_lib.linting.sublime import ANACONDA
class AnacondaGetLines(sublime_plugin.WindowCommand):
"""Get a quickpanel with all the errors and lines ready to jump to them
"""
def run(self) -> None:
errors = {} # type: Dict[int, str]
self._harvest_errors(errors, 'ERRORS')
self._harvest_errors(errors, 'WARNINGS')
self._harvest_errors(errors, 'VIOLATIONS')
if len(errors) > 0:
self.options = [] # type: List[List[str]]
for line, error_strings in errors.items():
for msg in error_strings:
self.options.append([msg, 'line: {}'.format(line)])
self.window.show_quick_panel(self.options, self._jump)
def is_enabled(self) -> bool:
"""Determines if the command is enabled
"""
view = self.window.active_view()
if (view.file_name() in ANACONDA['DISABLED'] or
not get_settings(view, 'anaconda_linting')):
return False
location = view.sel()[0].begin()
for lang in valid_languages():
matcher = 'source.{}'.format(lang)
if view.match_selector(location, matcher) is True:
return True
return False
def _harvest_errors(self, harvester: Dict[str, Any], error_type: str) -> None: # noqa
vid = self.window.active_view().id()
for line, error_strings in ANACONDA[error_type].get(vid, {}).items():
if line not in harvester:
harvester[line] = []
for error in error_strings:
harvester[line].append(error)
def _jump(self, item: int) -> None:
"""Jump to a line in the view buffer
"""
if item == -1:
return
lineno = int(self.options[item][1].split(':')[1].strip())
pt = self.window.active_view().text_point(lineno, 0)
self.window.active_view().sel().clear()
self.window.active_view().sel().add(sublime.Region(pt))
self.window.active_view().show(pt)
| 2,366 | Python | .py | 52 | 36.288462 | 90 | 0.612636 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,069 | find_usages.py | DamnWidget_anaconda/commands/find_usages.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import sublime_plugin
from ..anaconda_lib.worker import Worker
from ..anaconda_lib.callback import Callback
from ..anaconda_lib.explore_panel import ExplorerPanel
from ..anaconda_lib.helpers import prepare_send_data, active_view, is_python
class AnacondaFindUsages(sublime_plugin.TextCommand):
"""Jedi find usages for Sublime Text
"""
def run(self, edit: sublime_plugin.sublime.Edit) -> None:
try:
location = active_view().rowcol(self.view.sel()[0].begin())
data = prepare_send_data(location, 'usages', 'jedi')
Worker().execute(
Callback(on_success=self.on_success),
**data
)
except:
pass
def is_enabled(self) -> bool:
"""Determine if this command is enabled or not
"""
return is_python(self.view)
def on_success(self, data):
"""Method called after callback returns with a result
"""
if not data['result']:
sublime_plugin.sublime.status_message('Usages not found...')
return
usages = []
for usage in data['result']:
usages.append({
'title': usage[0],
'location': 'File: {} Line: {} Column: {}'.format(
usage[1], usage[2], usage[3]
),
'position': '{}:{}:{}'.format(usage[1], usage[2], usage[3])
})
ExplorerPanel(self.view, usages).show([], True)
| 1,615 | Python | .py | 40 | 30.8 | 76 | 0.587596 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,070 | disable_linting.py | DamnWidget_anaconda/commands/disable_linting.py |
# Copyright (C) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
import sublime_plugin
from ..anaconda_lib.helpers import get_settings
from ..anaconda_lib.helpers import valid_languages
from ..anaconda_lib.linting.sublime import ANACONDA, erase_lint_marks
class AnacondaDisableLinting(sublime_plugin.WindowCommand):
"""Disable the linting for the current buffer
"""
def run(self) -> None:
view = self.window.active_view()
if view.file_name() is not None:
ANACONDA['DISABLED'].append(view.file_name())
else:
ANACONDA['DISABLED_BUFFERS'].append((self.window.id(), view.id()))
erase_lint_marks(view)
def is_enabled(self) -> bool:
"""Determines if the command is enabled
"""
view = self.window.active_view()
if ((view.file_name() in ANACONDA['DISABLED']
and view.id() in ANACONDA['DISABLED_BUFFERS'])
or not get_settings(view, 'anaconda_linting')):
return False
location = view.sel()[0].begin()
for lang in valid_languages():
matcher = 'source.{}'.format(lang)
if view.match_selector(location, matcher) is True:
return True
return False
| 1,318 | Python | .py | 30 | 35.5 | 78 | 0.643696 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,071 | _pydecimal.pyi | DamnWidget_anaconda/anaconda_lib/jedi/third_party/typeshed/stdlib/3/_pydecimal.pyi | # This is a slight lie, the implementations aren't exactly identical
# However, in all likelihood, the differences are inconsequential
from decimal import *
| 157 | Python | .pyde | 3 | 51.333333 | 68 | 0.818182 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,072 | set_python_interpreter.py | DamnWidget_anaconda/commands/set_python_interpreter.py | import logging
import traceback
import sublime
import sublime_plugin
from ..anaconda_lib._typing import Dict, Any
from ..anaconda_lib.helpers import is_python
from ..anaconda_lib.builder.python_builder import AnacondaSetPythonBuilder
class AnacondaSetPythonInterpreter(sublime_plugin.TextCommand):
"""Sets or modifies the Venv of the current project"""
def run(self, edit: sublime.Edit) -> None:
try:
sublime.active_window().show_input_panel(
"Python Path:", self.get_current_interpreter_path(),
self.update_interpreter_settings, None, None
)
except Exception:
logging.error(traceback.format_exc())
def update_interpreter_settings(self, venv_path: str) -> None:
"""Updates the project and adds/modifies the Venv path"""
project_data = self.get_project_data()
# Check if have settings set in the project settings
if project_data.get('settings', False):
try:
# Try to get the python_interpreter key
project_data['settings'].get('python_interpreter', False)
except AttributeError:
# If this happens that mean your settings is a sting not a dict
sublime.message_dialog(
'Ops your project settings is missed up'
)
else:
# Set the path and save the project
project_data['settings']['python_interpreter'] = venv_path
self.save_project_data(project_data)
else:
# This will excute if settings key is not in you project settings
project_data.update(
{
'settings': {'python_interpreter': venv_path}
}
)
self.save_project_data(project_data)
AnacondaSetPythonBuilder().update_interpreter_build_system(
venv_path
)
def save_project_data(self, data: Dict[str, Any]) -> None:
"""Saves the provided data to the project settings"""
sublime.active_window().set_project_data(data)
sublime.status_message("Python path is set successfuly")
def get_project_data(self) -> Dict[str, Any]:
"""Return the project data for the current window"""
return sublime.active_window().project_data() or {}
def get_current_interpreter_path(self) -> str:
"""Returns the current path from the settings if possible"""
try:
return self.get_project_data()['settings']['python_interpreter']
except Exception:
return ''
def is_enabled(self) -> bool:
"""Check this plug in is enabled"""
return is_python(self.view)
| 2,761 | Python | .pyt | 61 | 34.557377 | 79 | 0.617188 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,073 | fix_metaclass.py | DamnWidget_anaconda/anaconda_lib/autopep/autopep8_lib/lib2to3/fixes/fix_metaclass.py | """Fixer for __metaclass__ = X -> (metaclass=X) methods.
The various forms of classef (inherits nothing, inherits once, inherints
many) don't parse the same in the CST so we look at ALL classes for
a __metaclass__ and if we find one normalize the inherits to all be
an arglist.
For one-liner classes ('class X: pass') there is no indent/dedent so
we normalize those into having a suite.
Moving the __metaclass__ into the classdef can also cause the class
body to be empty so there is some special casing for that as well.
This fixer also tries very hard to keep original indenting and spacing
in all those corner cases.
"""
# Author: Jack Diederich
# Local imports
from .. import fixer_base
from ..pygram import token
from ..fixer_util import Name, syms, Node, Leaf
def has_metaclass(parent):
""" we have to check the cls_node without changing it.
There are two possiblities:
1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
"""
for node in parent.children:
if node.type == syms.suite:
return has_metaclass(node)
elif node.type == syms.simple_stmt and node.children:
expr_node = node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
left_side = expr_node.children[0]
if isinstance(left_side, Leaf) and \
left_side.value == '__metaclass__':
return True
return False
def fixup_parse_tree(cls_node):
""" one-line classes don't get a suite in the parse tree so we add
one to normalize the tree
"""
for node in cls_node.children:
if node.type == syms.suite:
# already in the preferred format, do nothing
return
# !%@#! oneliners have no suite node, we have to fake one up
for i, node in enumerate(cls_node.children):
if node.type == token.COLON:
break
else:
raise ValueError("No class suite and no ':'!")
# move everything into a suite node
suite = Node(syms.suite, [])
while cls_node.children[i+1:]:
move_node = cls_node.children[i+1]
suite.append_child(move_node.clone())
move_node.remove()
cls_node.append_child(suite)
node = suite
def fixup_simple_stmt(parent, i, stmt_node):
""" if there is a semi-colon all the parts count as part of the same
simple_stmt. We just want the __metaclass__ part so we move
everything efter the semi-colon into its own simple_stmt node
"""
for semi_ind, node in enumerate(stmt_node.children):
if node.type == token.SEMI: # *sigh*
break
else:
return
node.remove() # kill the semicolon
new_expr = Node(syms.expr_stmt, [])
new_stmt = Node(syms.simple_stmt, [new_expr])
while stmt_node.children[semi_ind:]:
move_node = stmt_node.children[semi_ind]
new_expr.append_child(move_node.clone())
move_node.remove()
parent.insert_child(i, new_stmt)
new_leaf1 = new_stmt.children[0].children[0]
old_leaf1 = stmt_node.children[0].children[0]
new_leaf1.prefix = old_leaf1.prefix
def remove_trailing_newline(node):
if node.children and node.children[-1].type == token.NEWLINE:
node.children[-1].remove()
def find_metas(cls_node):
# find the suite node (Mmm, sweet nodes)
for node in cls_node.children:
if node.type == syms.suite:
break
else:
raise ValueError("No class suite!")
# look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
for i, simple_node in list(enumerate(node.children)):
if simple_node.type == syms.simple_stmt and simple_node.children:
expr_node = simple_node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
# Check if the expr_node is a simple assignment.
left_node = expr_node.children[0]
if isinstance(left_node, Leaf) and \
left_node.value == '__metaclass__':
# We found a assignment to __metaclass__.
fixup_simple_stmt(node, i, simple_node)
remove_trailing_newline(simple_node)
yield (node, i, simple_node)
def fixup_indent(suite):
""" If an INDENT is followed by a thing with a prefix then nuke the prefix
Otherwise we get in trouble when removing __metaclass__ at suite start
"""
kids = suite.children[::-1]
# find the first indent
while kids:
node = kids.pop()
if node.type == token.INDENT:
break
# find the first Leaf
while kids:
node = kids.pop()
if isinstance(node, Leaf) and node.type != token.DEDENT:
if node.prefix:
node.prefix = ''
return
else:
kids.extend(node.children[::-1])
class FixMetaclass(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef<any*>
"""
def transform(self, node, results):
if not has_metaclass(node):
return
fixup_parse_tree(node)
# find metaclasses, keep the last one
last_metaclass = None
for suite, i, stmt in find_metas(node):
last_metaclass = stmt
stmt.remove()
text_type = node.children[0].type # always Leaf(nnn, 'class')
# figure out what kind of classdef we have
if len(node.children) == 7:
# Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
# 0 1 2 3 4 5 6
if node.children[3].type == syms.arglist:
arglist = node.children[3]
# Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
else:
parent = node.children[3].clone()
arglist = Node(syms.arglist, [parent])
node.set_child(3, arglist)
elif len(node.children) == 6:
# Node(classdef, ['class', 'name', '(', ')', ':', suite])
# 0 1 2 3 4 5
arglist = Node(syms.arglist, [])
node.insert_child(3, arglist)
elif len(node.children) == 4:
# Node(classdef, ['class', 'name', ':', suite])
# 0 1 2 3
arglist = Node(syms.arglist, [])
node.insert_child(2, Leaf(token.RPAR, ')'))
node.insert_child(2, arglist)
node.insert_child(2, Leaf(token.LPAR, '('))
else:
raise ValueError("Unexpected class definition")
# now stick the metaclass in the arglist
meta_txt = last_metaclass.children[0].children[0]
meta_txt.value = 'metaclass'
orig_meta_prefix = meta_txt.prefix
if arglist.children:
arglist.append_child(Leaf(token.COMMA, ','))
meta_txt.prefix = ' '
else:
meta_txt.prefix = ''
# compact the expression "metaclass = Meta" -> "metaclass=Meta"
expr_stmt = last_metaclass.children[0]
assert expr_stmt.type == syms.expr_stmt
expr_stmt.children[1].prefix = ''
expr_stmt.children[2].prefix = ''
arglist.append_child(last_metaclass)
fixup_indent(suite)
# check for empty suite
if not suite.children:
# one-liner that was just __metaclass_
suite.remove()
pass_leaf = Leaf(text_type, 'pass')
pass_leaf.prefix = orig_meta_prefix
node.append_child(pass_leaf)
node.append_child(Leaf(token.NEWLINE, '\n'))
elif len(suite.children) > 1 and \
(suite.children[-2].type == token.INDENT and
suite.children[-1].type == token.DEDENT):
# there was only one line in the class body and it was __metaclass__
pass_leaf = Leaf(text_type, 'pass')
suite.insert_child(-1, pass_leaf)
suite.insert_child(-1, Leaf(token.NEWLINE, '\n'))
| 8,201 | Python | .tac | 190 | 33.768421 | 80 | 0.583971 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,074 | dataclasses.pyi | DamnWidget_anaconda/anaconda_lib/jedi/third_party/typeshed/third_party/3/dataclasses.pyi | import sys
from typing import Any, Callable, Dict, Generic, Iterable, List, Mapping, Optional, Tuple, Type, TypeVar, Union, overload
if sys.version_info >= (3, 9):
from types import GenericAlias
_T = TypeVar("_T")
class _MISSING_TYPE: ...
MISSING: _MISSING_TYPE
@overload
def asdict(obj: Any) -> Dict[str, Any]: ...
@overload
def asdict(obj: Any, *, dict_factory: Callable[[List[Tuple[str, Any]]], _T]) -> _T: ...
@overload
def astuple(obj: Any) -> Tuple[Any, ...]: ...
@overload
def astuple(obj: Any, *, tuple_factory: Callable[[List[Any]], _T]) -> _T: ...
@overload
def dataclass(_cls: Type[_T]) -> Type[_T]: ...
@overload
def dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]: ...
@overload
def dataclass(
*, init: bool = ..., repr: bool = ..., eq: bool = ..., order: bool = ..., unsafe_hash: bool = ..., frozen: bool = ...
) -> Callable[[Type[_T]], Type[_T]]: ...
class Field(Generic[_T]):
name: str
type: Type[_T]
default: _T
default_factory: Callable[[], _T]
repr: bool
hash: Optional[bool]
init: bool
compare: bool
metadata: Mapping[str, Any]
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
# NOTE: Actual return type is 'Field[_T]', but we want to help type checkers
# to understand the magic that happens at runtime.
@overload # `default` and `default_factory` are optional and mutually exclusive.
def field(
*,
default: _T,
init: bool = ...,
repr: bool = ...,
hash: Optional[bool] = ...,
compare: bool = ...,
metadata: Optional[Mapping[str, Any]] = ...,
) -> _T: ...
@overload
def field(
*,
default_factory: Callable[[], _T],
init: bool = ...,
repr: bool = ...,
hash: Optional[bool] = ...,
compare: bool = ...,
metadata: Optional[Mapping[str, Any]] = ...,
) -> _T: ...
@overload
def field(
*,
init: bool = ...,
repr: bool = ...,
hash: Optional[bool] = ...,
compare: bool = ...,
metadata: Optional[Mapping[str, Any]] = ...,
) -> Any: ...
def fields(class_or_instance: Any) -> Tuple[Field[Any], ...]: ...
def is_dataclass(obj: Any) -> bool: ...
class FrozenInstanceError(AttributeError): ...
class InitVar(Generic[_T]):
if sys.version_info >= (3, 9):
def __class_getitem__(cls, type: Any) -> GenericAlias: ...
def make_dataclass(
cls_name: str,
fields: Iterable[Union[str, Tuple[str, type], Tuple[str, type, Field[Any]]]],
*,
bases: Tuple[type, ...] = ...,
namespace: Optional[Dict[str, Any]] = ...,
init: bool = ...,
repr: bool = ...,
eq: bool = ...,
order: bool = ...,
unsafe_hash: bool = ...,
frozen: bool = ...,
) -> type: ...
def replace(obj: _T, **changes: Any) -> _T: ...
| 2,737 | Python | .tac | 86 | 28.488372 | 121 | 0.588569 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,075 | dataclasses.pyi | DamnWidget_anaconda/anaconda_lib/jedi/third_party/typeshed/stdlib/3.7/dataclasses.pyi | import sys
from typing import Any, Callable, Dict, Generic, Iterable, List, Mapping, Optional, Tuple, Type, TypeVar, Union, overload
if sys.version_info >= (3, 9):
from types import GenericAlias
_T = TypeVar("_T")
class _MISSING_TYPE: ...
MISSING: _MISSING_TYPE
@overload
def asdict(obj: Any) -> Dict[str, Any]: ...
@overload
def asdict(obj: Any, *, dict_factory: Callable[[List[Tuple[str, Any]]], _T]) -> _T: ...
@overload
def astuple(obj: Any) -> Tuple[Any, ...]: ...
@overload
def astuple(obj: Any, *, tuple_factory: Callable[[List[Any]], _T]) -> _T: ...
@overload
def dataclass(_cls: Type[_T]) -> Type[_T]: ...
@overload
def dataclass(_cls: None) -> Callable[[Type[_T]], Type[_T]]: ...
@overload
def dataclass(
*, init: bool = ..., repr: bool = ..., eq: bool = ..., order: bool = ..., unsafe_hash: bool = ..., frozen: bool = ...
) -> Callable[[Type[_T]], Type[_T]]: ...
class Field(Generic[_T]):
name: str
type: Type[_T]
default: _T
default_factory: Callable[[], _T]
repr: bool
hash: Optional[bool]
init: bool
compare: bool
metadata: Mapping[str, Any]
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
# NOTE: Actual return type is 'Field[_T]', but we want to help type checkers
# to understand the magic that happens at runtime.
@overload # `default` and `default_factory` are optional and mutually exclusive.
def field(
*,
default: _T,
init: bool = ...,
repr: bool = ...,
hash: Optional[bool] = ...,
compare: bool = ...,
metadata: Optional[Mapping[str, Any]] = ...,
) -> _T: ...
@overload
def field(
*,
default_factory: Callable[[], _T],
init: bool = ...,
repr: bool = ...,
hash: Optional[bool] = ...,
compare: bool = ...,
metadata: Optional[Mapping[str, Any]] = ...,
) -> _T: ...
@overload
def field(
*,
init: bool = ...,
repr: bool = ...,
hash: Optional[bool] = ...,
compare: bool = ...,
metadata: Optional[Mapping[str, Any]] = ...,
) -> Any: ...
def fields(class_or_instance: Any) -> Tuple[Field[Any], ...]: ...
def is_dataclass(obj: Any) -> bool: ...
class FrozenInstanceError(AttributeError): ...
class InitVar(Generic[_T]):
if sys.version_info >= (3, 9):
def __class_getitem__(cls, type: Any) -> GenericAlias: ...
def make_dataclass(
cls_name: str,
fields: Iterable[Union[str, Tuple[str, type], Tuple[str, type, Field[Any]]]],
*,
bases: Tuple[type, ...] = ...,
namespace: Optional[Dict[str, Any]] = ...,
init: bool = ...,
repr: bool = ...,
eq: bool = ...,
order: bool = ...,
unsafe_hash: bool = ...,
frozen: bool = ...,
) -> type: ...
def replace(obj: _T, **changes: Any) -> _T: ...
| 2,737 | Python | .tac | 86 | 28.488372 | 121 | 0.588569 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,076 | modwsgi.pyi | DamnWidget_anaconda/anaconda_lib/jedi/third_party/django-stubs/django-stubs/contrib/auth/handlers/modwsgi.pyi | from typing import Any, Dict
UserModel: Any
def check_password(environ: Dict[Any, Any], username: str, password: str) -> Any: ...
def groups_for_user(environ: Dict[Any, Any], username: str) -> Any: ...
| 204 | Python | .wsgi | 4 | 49.5 | 85 | 0.707071 | DamnWidget/anaconda | 2,213 | 260 | 184 | GPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,077 | analyze.py | mozilla_cipherscan/analyze.py | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Contributor: Julien Vehent jvehent@mozilla.com [:ulfr]
from __future__ import print_function
import sys, os, json, subprocess, logging, argparse, platform, re
from collections import namedtuple
from datetime import datetime
from copy import deepcopy
try:
from urllib2 import urlopen, URLError
except ModuleNotFoundError:
from urllib.request import urlopen
from urllib.error import URLError
def str_compat(data):
if sys.version_info >= (3,0):
data = str(data, 'utf-8')
return data
# has_good_pfs compares a given PFS configuration with a target
# dh parameter a target elliptic curve, and return true if good
# if `must_match` is True, the exact values are expected, if not
# larger pfs values than the targets are acceptable
def has_good_pfs(pfs, target_dh, target_ecc, must_match=False):
if target_ecc and 'ECDH,' in pfs:
# split string, expected format is 'ECDH,P-256,256bits'
ecc = pfs.split(',')[2].split('b')[0]
if int(ecc) < target_ecc:
return False
if must_match and int(ecc) != target_ecc:
return False
elif target_dh and 'DH,' in pfs:
dhparam = pfs.split(',')[1].split('b')[0]
if int(dhparam) < target_dh:
return False
if must_match and int(dhparam) != target_dh:
return False
return True
# is_fubar assumes that a configuration is not completely messed up
# and looks for reasons to think otherwise. it will return True if
# it finds one of these reason
def is_fubar(results):
logging.debug('entering fubar evaluation')
lvl = 'fubar'
fubar = False
has_ssl2 = False
has_wrong_pubkey = False
has_wrong_ec_pubkey = False
has_bad_sig = False
has_untrust_cert = False
has_wrong_pfs = False
for conn in results['ciphersuite']:
logging.debug('testing connection %s' % conn)
pubkey_bits = int(conn['pubkey'][0])
ec_kex = re.match(r"(ECDHE|EECDH|ECDH)-", conn['cipher'])
if conn['cipher'] not in (set(old["openssl_ciphers"]) | set(inter["openssl_ciphers"]) | set(modern["openssl_ciphers"])):
failures[lvl].append("remove cipher " + conn['cipher'])
logging.debug(conn['cipher'] + ' is in the list of fubar ciphers')
fubar = True
if 'SSLv2' in conn['protocols']:
has_ssl2 = True
logging.debug('SSLv2 is in the list of fubar protocols')
fubar = True
if not ec_kex and pubkey_bits < 2048:
has_wrong_pubkey = True
logging.debug(conn['pubkey'][0] + ' is a fubar pubkey size')
fubar = True
if ec_kex and pubkey_bits < 256:
has_wrong_ec_pubkey = True
logging.debug(conn['pubkey'][0] + ' is a fubar EC pubkey size')
fubar = True
if conn['pfs'] != 'None':
if not has_good_pfs(conn['pfs'], 1024, 160):
logging.debug(conn['pfs']+ ' is a fubar PFS parameters')
fubar = True
has_wrong_pfs = True
for sigalg in conn['sigalg']:
if sigalg not in (set(old["certificate_signatures"]) | set(inter["certificate_signatures"]) | set(modern["certificate_signatures"])):
logging.debug(sigalg + ' is a fubar cert signature')
fubar = True
if conn['trusted'] == 'False':
has_untrust_cert = True
logging.debug('The certificate is not trusted, which is quite fubar')
fubar = True
if has_ssl2:
failures[lvl].append("disable SSLv2")
if has_bad_sig:
failures[lvl].append("don't use a cert with a bad signature algorithm")
if has_wrong_pubkey:
failures[lvl].append("don't use a public key smaller than 2048 bits")
if has_wrong_ec_pubkey:
failures[lvl].append("don't use an EC key smaller than 256 bits")
if has_untrust_cert:
failures[lvl].append("don't use an untrusted or self-signed certificate")
if has_wrong_pfs:
failures[lvl].append("don't use DHE smaller than 1024bits or ECC smaller than 160bits")
return fubar
# is_old assumes a configuration *is* old, and will return False if
# the parameters of an old configuration are not found. Those parameters
# are defined in https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility
def is_old(results):
logging.debug('entering old evaluation')
lvl = 'old'
isold = True
has_3des = False
has_sha1 = True
has_pfs = True
has_ocsp = True
all_proto = []
for conn in results['ciphersuite']:
logging.debug('testing connection %s' % conn)
# flag unwanted ciphers
if conn['cipher'] not in old["openssl_ciphers"]:
logging.debug(conn['cipher'] + ' is not in the list of old ciphers')
failures[lvl].append("remove cipher " + conn['cipher'])
isold = False
# verify required 3des cipher is present
if conn['cipher'] == 'DES-CBC3-SHA':
has_3des = True
for proto in conn['protocols']:
if proto not in all_proto:
all_proto.append(proto)
# verify required sha1 signature is used
if 'sha1WithRSAEncryption' not in conn['sigalg']:
logging.debug(conn['sigalg'][0] + ' is a not an old signature')
has_sha1 = False
# verify required pfs parameter is used
if conn['pfs'] != 'None':
if not has_good_pfs(conn['pfs'], old["dh_param_size"], old["ecdh_param_size"], True):
logging.debug(conn['pfs']+ ' is not a good PFS parameter for the old configuration')
has_pfs = False
if conn['ocsp_stapling'] == 'False':
has_ocsp = False
extra_proto = set(all_proto) - set(old["tls_versions"])
for proto in extra_proto:
logging.debug("found protocol not wanted in the old configuration:" + proto)
failures[lvl].append('disable ' + proto)
isold = False
missing_proto = set(old["tls_versions"]) - set(all_proto)
for proto in missing_proto:
logging.debug("missing protocol wanted in the old configuration:" + proto)
failures[lvl].append('enable ' + proto)
isold = False
if not has_3des:
logging.debug("DES-CBC3-SHA is not supported and required by the old configuration")
failures[lvl].append("add cipher DES-CBC3-SHA")
isold = False
if not has_sha1:
failures[lvl].append("use a certificate with sha1WithRSAEncryption signature")
isold = False
if not has_pfs:
failures[lvl].append("use DHE of {dhe}bits and ECC of {ecdhe}bits".format(
dhe=old["dh_param_size"], ecdhe=old["ecdh_param_size"]))
isold = False
if not has_ocsp:
failures[lvl].append("consider enabling OCSP Stapling")
if results['serverside'] != ('True' if old['server_preferred_order'] else 'False'):
failures[lvl].append("enforce server side ordering" if old['server_preferred_order'] else "enforce client side ordering")
isold = False
return isold
# is_intermediate is similar to is_old but for intermediate configuration from
# https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29
def is_intermediate(results):
logging.debug('entering intermediate evaluation')
lvl = 'intermediate'
isinter = True
has_tls1 = False
has_pfs = True
has_sigalg = True
has_ocsp = True
all_proto = []
for conn in results['ciphersuite']:
logging.debug('testing connection %s' % conn)
if conn['cipher'] not in inter["openssl_ciphers"]:
logging.debug(conn['cipher'] + ' is not in the list of intermediate ciphers')
failures[lvl].append("remove cipher " + conn['cipher'])
isinter = False
for proto in conn['protocols']:
if proto not in all_proto:
all_proto.append(proto)
if 'TLSv1' in conn['protocols']:
has_tls1 = True
if conn['sigalg'][0] not in inter["certificate_signatures"]:
logging.debug(conn['sigalg'][0] + ' is a not an intermediate signature')
has_sigalg = False
if conn['pfs'] != 'None':
if not has_good_pfs(conn['pfs'], inter["dh_param_size"], inter["ecdh_param_size"]):
logging.debug(conn['pfs']+ ' is not a good PFS parameter for the intermediate configuration')
has_pfs = False
if conn['ocsp_stapling'] == 'False':
has_ocsp = False
extra_proto = set(all_proto) - set(inter["tls_versions"])
for proto in extra_proto:
logging.debug("found protocol not wanted in the intermediate configuration:" + proto)
failures[lvl].append('disable ' + proto)
isinter = False
missing_proto = set(inter["tls_versions"]) - set(all_proto)
for proto in missing_proto:
logging.debug("missing protocol wanted in the intermediate configuration:" + proto)
failures[lvl].append('consider enabling ' + proto)
if not has_sigalg:
failures[lvl].append("use a certificate signed with %s" % " or ".join(inter["certificate_signatures"]))
isinter = False
if not has_pfs:
failures[lvl].append("consider using DHE of at least 2048bits and ECC 256bit and greater")
if not has_ocsp:
failures[lvl].append("consider enabling OCSP Stapling")
if results['serverside'] != ('True' if inter['server_preferred_order'] else 'False'):
failures[lvl].append("enforce server side ordering" if inter['server_preferred_order'] else "enforce client side ordering")
isinter = False
return isinter
# is_modern is similar to is_old but for modern configuration from
# https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
def is_modern(results):
logging.debug('entering modern evaluation')
lvl = 'modern'
ismodern = True
has_pfs = True
has_sigalg = True
has_ocsp = True
all_proto = []
for conn in results['ciphersuite']:
logging.debug('testing connection %s' % conn)
if conn['cipher'] not in modern["openssl_ciphers"]:
logging.debug(conn['cipher'] + ' is not in the list of modern ciphers')
failures[lvl].append("remove cipher " + conn['cipher'])
ismodern = False
for proto in conn['protocols']:
if proto not in all_proto:
all_proto.append(proto)
if conn['sigalg'][0] not in modern["certificate_signatures"]:
logging.debug(conn['sigalg'][0] + ' is a not an modern signature')
has_sigalg = False
if conn['pfs'] != 'None':
if not has_good_pfs(conn['pfs'], modern["dh_param_size"], modern["ecdh_param_size"], True):
logging.debug(conn['pfs']+ ' is not a good PFS parameter for the modern configuration')
ismodern = False
has_pfs = False
if conn['ocsp_stapling'] == 'False':
has_ocsp = False
extra_proto = set(all_proto) - set(modern["tls_versions"])
for proto in extra_proto:
logging.debug("found protocol not wanted in the modern configuration:" + proto)
failures[lvl].append('disable ' + proto)
ismodern = False
missing_proto = set(modern["tls_versions"]) - set(all_proto)
for proto in missing_proto:
logging.debug("missing protocol wanted in the modern configuration:" + proto)
failures[lvl].append('consider enabling ' + proto)
if not has_sigalg:
failures[lvl].append("use a certificate signed with %s" % " or ".join(modern["certificate_signatures"]))
ismodern = False
if not has_pfs:
failures[lvl].append("use DHE of at least 2048bits and ECC 256bit and greater")
ismodern = False
if not has_ocsp:
failures[lvl].append("consider enabling OCSP Stapling")
if results['serverside'] != ('True' if modern['server_preferred_order'] else 'False'):
failures[lvl].append("enforce server side ordering" if modern['server_preferred_order'] else "enforce client side ordering")
ismodern = False
return ismodern
def is_ordered(results, ref_ciphersuite, lvl):
ordered = True
previous_pos = 0
# iterate through the list of ciphers returned by the target
for conn in results['ciphersuite']:
pos = 0
# compare against each cipher of the reference ciphersuite
for ref_cipher in ref_ciphersuite:
# if the target cipher matches the reference ciphersuite,
# look for its position against the reference and flag cipher
# that violate the reference ordering
if conn['cipher'] == ref_cipher:
logging.debug("{0} found in reference ciphersuite at position {1}".format(conn['cipher'], pos))
if pos < previous_pos:
failures[lvl].append("increase priority of {0} over {1}".format(conn['cipher'], ref_ciphersuite[previous_pos]))
ordered = False
# save current position
previous_pos = pos
pos += 1
if not ordered:
failures[lvl].append("fix ciphersuite ordering, use recommended " + lvl + " ciphersuite")
return ordered
def evaluate_all(results):
status = "obscure or unknown"
if len(results['ciphersuite']) == 0:
return "no"
if is_old(results):
status = "old"
if old["server_preferred_order"] and not is_ordered(results, old["openssl_ciphers"], "old"):
status = "old with bad ordering"
if is_intermediate(results):
status = "intermediate"
if inter["server_preferred_order"] and not is_ordered(results, inter["openssl_ciphers"], "intermediate"):
status = "intermediate with bad ordering"
if is_modern(results):
status = "modern"
if modern["server_preferred_order"] and not is_ordered(results, modern["openssl_ciphers"], "modern"):
status = "modern with bad ordering"
if is_fubar(results):
status = "bad"
return status
def process_results(data, level=None, do_json=False, do_nagios=False):
logging.debug('processing results on %s' % data)
exit_status = 0
results = dict()
# initialize the failures struct
global failures
json_output = dict()
failures = dict()
failures['fubar'] = []
failures['old'] = []
failures['intermediate'] = []
failures['modern'] = []
if not level:
level='none'
try:
results = json.loads(data)
except ValueError as e:
print("invalid json data: " + str(e))
try:
if results:
if do_json:
json_output['target'] = results['target']
d = datetime.utcnow()
json_output['utctimestamp'] = d.isoformat("T") + "Z"
json_output['level'] = evaluate_all(results)
json_output['target_level'] = level
json_output['compliance'] = False
if json_output['target_level'] in json_output['level']:
json_output['compliance'] = True
if operator:
json_output['operator'] = operator
else:
measured_lvl = evaluate_all(results)
print(results['target'] + " has " + measured_lvl + " ssl/tls")
if level != 'none':
if level in measured_lvl:
print("and complies with the '" + level + "' level")
else:
print("and DOES NOT comply with the '" + level + "' level")
except TypeError as e:
print("Error processing data: " + str(e))
return False
if do_json:
json_output['failures'] = deepcopy(failures)
print(json.dumps(json_output))
return True
if len(failures['fubar']) > 0:
print("\nThings that are bad:")
for failure in failures['fubar']:
print("* " + failure)
if do_nagios:
exit_status = 2
# print failures
if level != 'none':
if len(failures[level]) > 0:
print("\nChanges needed to match the " + level + " level:")
for failure in failures[level]:
print("* " + failure)
if do_nagios and exit_status < 2:
exit_status = 1
else:
for lvl in ['old', 'intermediate', 'modern']:
if len(failures[lvl]) > 0:
print("\nChanges needed to match the " + lvl + " level:")
for failure in failures[lvl]:
print("* " + failure)
if do_nagios and exit_status < 2:
exit_status = 1
return exit_status
def build_ciphers_lists():
sstlsurl = "https://statics.tls.security.mozilla.org/server-side-tls-conf.json"
conf = dict()
try:
raw = urlopen(sstlsurl).read()
conf = json.loads(raw)
logging.debug('retrieving online server side tls recommendations from %s' % sstlsurl)
except URLError:
with open('server-side-tls-conf.json', 'r') as f:
conf = json.load(f)
logging.debug('Error connecting to %s; using local archive of server side tls recommendations' % sstlsurl)
except:
print("failed to retrieve JSON configurations from %s" % sstlsurl)
sys.exit(23)
global old, inter, modern
old = conf["configurations"]["old"]
inter = conf["configurations"]["intermediate"]
modern = conf["configurations"]["modern"]
def main():
parser = argparse.ArgumentParser(
description='Analyze cipherscan results and provides guidelines to improve configuration.',
usage='\n* Analyze a single target, invokes cipherscan: $ ./analyze.py -t [target]' \
'\n* Evaluate json results passed through stdin: $ python analyze.py target_results.json' \
'\nexample: ./analyze.py -t mozilla.org',
epilog='Julien Vehent [:ulfr] - 2014')
parser.add_argument('-d', dest='debug', action='store_true',
help='debug output')
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'),
default=sys.stdin, help='cipherscan json results')
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'),
default=sys.stdout, help='json formatted analysis')
parser.add_argument('-l', dest='level',
help='target configuration level [old, intermediate, modern]')
parser.add_argument('-t', dest='target',
help='analyze a <target>, invokes cipherscan')
parser.add_argument('-o', dest='openssl',
help='path to openssl binary, if you don\'t like the default')
parser.add_argument('-j', dest='json', action='store_true',
help='output results in json format')
parser.add_argument('--ops', dest='operator',
help='optional name of the operator\'s team added into the JSON output (for database insertion)')
parser.add_argument('--nagios', dest='nagios', action='store_true',
help='use nagios-conformant exit codes')
args = parser.parse_args()
global mypath
mypath = os.path.dirname(os.path.realpath(sys.argv[0]))
if args.debug:
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
else:
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
global operator
operator=''
if args.operator:
operator=args.operator
build_ciphers_lists()
if args.target:
# evaluate target specified as argument
logging.debug('Invoking cipherscan with target: ' + args.target)
data=''
if args.openssl:
data = subprocess.check_output([mypath + '/cipherscan', '-o', args.openssl, '-j', args.target])
else:
data = subprocess.check_output([mypath + '/cipherscan', '-j', args.target])
data = str_compat(data)
exit_status=process_results(str(data), args.level, args.json, args.nagios)
else:
if os.fstat(args.infile.fileno()).st_size < 2:
logging.error("invalid input file")
parser.print_help()
if args.nagios:
sys.exit(3)
else:
sys.exit(1)
data = args.infile.readline()
logging.debug('Evaluating results from stdin: ' + data)
exit_status=process_results(data, args.level, args.json, args.nagios)
sys.exit(exit_status)
if __name__ == "__main__":
main()
| 20,759 | Python | .py | 451 | 37.339246 | 145 | 0.62402 | mozilla/cipherscan | 1,953 | 265 | 34 | MPL-2.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,078 | cscan.py | mozilla_cipherscan/cscan.py | # Copyright 2016(c) Hubert Kario
# This work is released under the Mozilla Public License Version 2.0
"""tlslite-ng based server configuration (and bug) scanner."""
from __future__ import print_function
from tlslite.messages import ClientHello, ServerHello, ServerHelloDone, Alert
from tlslite.constants import CipherSuite, \
AlertLevel
import sys
import json
import getopt
import itertools
from cscan.scanner import Scanner
from cscan.config import Firefox_42
from cscan.modifiers import set_hello_version
def scan_with_config(host, port, conf, hostname, __sentry=None, __cache={}):
"""Connect to server and return set of exchanged messages."""
assert __sentry is None
key = (host, port, conf, hostname)
if key in __cache:
if verbose and not json_out:
print(":", end='')
return __cache[key]
scanner = Scanner(conf, host, port, hostname)
ret = scanner.scan()
__cache[key] = ret
if verbose and not json_out:
print(".", end='')
sys.stdout.flush()
return ret
def simple_inspector(result):
"""
Perform simple check to see if connection was successful.
Returns True is connection was successful, server replied with
ServerHello and ServerHelloDone messages, and the cipher selected
was present in ciphers advertised by client, False otherwise
"""
if any(isinstance(x, ServerHelloDone) for x in result):
ch = next((x for x in result if isinstance(x, ClientHello)), None)
sh = next((x for x in result if isinstance(x, ServerHello)), None)
if ch and sh:
if sh.cipher_suite not in ch.cipher_suites:
# FAILURE cipher suite mismatch
return False
return True
# incomplete response or error
return False
def verbose_inspector(desc, result):
"""Describe the connection result in human-readable form."""
ret = "{0}:".format(desc)
if any(isinstance(x, ServerHelloDone) for x in result):
ch = next((x for x in result if isinstance(x, ClientHello)), None)
sh = next((x for x in result if isinstance(x, ServerHello)), None)
if sh and ch:
if sh.cipher_suite not in ch.cipher_suites:
ret += " FAILURE cipher suite mismatch"
return ret
name = CipherSuite.ietfNames[sh.cipher_suite] \
if sh.cipher_suite in CipherSuite.ietfNames \
else hex(sh.cipher_suite)
ret += " ok: {0}, {1}".format(sh.server_version,
name)
return ret
ret += " FAILURE "
errors = []
for msg in result:
if isinstance(msg, ClientHello):
continue
# check if returned message supports custom formatting
if msg.__class__.__format__ is not object.__format__:
errors += ["{:vxm}".format(msg)]
else:
errors += [repr(msg)]
# skip printing close errors after fatal alerts, they are expected
if isinstance(msg, Alert) and msg.level == AlertLevel.fatal:
break
ret += "\n".join(errors)
return ret
configs = {}
def load_configs():
"""Load known client configurations for later use in scanning."""
base_configs = [Firefox_42]
for conf in base_configs:
for version in ((3, 1), (3, 2), (3, 3), (3, 4), (3, 5), (3, 254)):
if conf().version != version:
# just changed version
gen = set_hello_version(conf(), version)
if gen.record_version > version:
gen.record_version = version
configs[gen.name] = gen
# Firefox 42 configs
gen = Firefox_42()
configs[gen.name] = gen
def scan_TLS_intolerancies(host, port, hostname):
"""Look for intolerancies (version, extensions, ...) in a TLS server."""
results = {}
def result_iterator(predicate):
"""
Selecting iterator over cached results.
Looks for matching result from already performed scans
"""
return (not simple_inspector(results[name]) for name in results
if predicate(configs[name]))
def result_cache(name, conf):
"""Perform scan if config is not in results, caches result."""
return results[name] if name in results \
else results.setdefault(name, scan_with_config(host, port, conf,
hostname))
def conf_iterator(predicate):
"""
Caching, selecting iterator over configs.
Returns an iterator that will go over configs that match the provided
predicate (a function that returns true or false depending if given
config is ok for test at hand) while saving the results to the
cache/verbose `results` log/dictionary
The iterator returns False for every connection that succeeded
(meaning the server is NOT intolerant to config and True to mean
that server IS intolerant to config.
"""
scan_iter = (not simple_inspector(result_cache(name, conf))
for name, conf in configs.items()
if predicate(conf))
return itertools.chain(result_iterator(predicate), scan_iter)
host_up = not all(conf_iterator(lambda conf: True))
intolerancies = {}
if not host_up:
if json_out:
print(json.dumps(intolerancies))
else:
print("Host does not seem to support SSL or TLS protocol")
return
intolerancies["SSL 3.254"] = all(conf_iterator(lambda conf:
conf.version == (3, 254)))
intolerancies["TLS 1.4"] = all(conf_iterator(lambda conf:
conf.version == (3, 5)))
intolerancies["TLS 1.3"] = all(conf_iterator(lambda conf:
conf.version == (3, 4)))
intolerancies["TLS 1.2"] = all(conf_iterator(lambda conf:
conf.version == (3, 3)))
intolerancies["TLS 1.1"] = all(conf_iterator(lambda conf:
conf.version == (3, 2)))
intolerancies["TLS 1.0"] = all(conf_iterator(lambda conf:
conf.version == (3, 1)))
if json_out:
print(json.dumps(intolerancies))
else:
if not no_header:
if verbose:
print()
print("Host {0}:{1} scan complete".format(host, port))
if hostname:
print("SNI hostname used: {0}".format(hostname))
if verbose:
print()
print("Individual probe results:")
for desc, ret in sorted(results.items()):
print(verbose_inspector(desc, ret))
print()
print("Intolerance to:")
for intolerance, value in sorted(intolerancies.items()):
print(" {0:20}: {1}".format(intolerance,
"PRESENT" if value else "absent"))
def single_probe(name):
"""Run a single probe against a server, print result."""
print(verbose_inspector(name, scan_with_config(host, port,
configs[name], hostname)))
def usage():
"""Print usage information."""
print("./cscan.py [ARGUMENTS] host[:port] [SNI-HOST-NAME]")
print()
print("-l, --list List probe names")
print("-p name, --probe Run just a single probe")
print("-j, --json Output in JSON format")
print("-v, --verbose Use verbose output")
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:],
"jvhlp:",
["json", "verbose", "help", "list",
"probe=", "no-header"])
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
json_out = False
verbose = False
list_probes = False
run_probe = None
no_header = False
for opt, arg in opts:
if opt in ('-j', '--json'):
json_out = True
elif opt in ('-v', '--verbose'):
verbose = True
elif opt in ('-h', '--help'):
usage()
sys.exit(0)
elif opt in ('-l', '--list'):
list_probes = True
elif opt in ('-p', '--probe'):
run_probe = arg
elif opt in ('--no-header', ):
no_header = True
else:
raise AssertionError("Unknown option {0}".format(opt))
if len(args) > 2:
print("Too many arguments")
usage()
sys.exit(2)
load_configs()
if list_probes:
for desc, ret in sorted(configs.items()):
print("{0}: {1}".format(desc, ret.__doc__))
sys.exit(0)
hostname = None
if len(args) == 2:
hostname = args[1]
hostaddr = args[0].split(":")
if len(hostaddr) > 1:
host, port = hostaddr
else:
host = hostaddr[0]
port = 443
if run_probe:
single_probe(run_probe)
else:
scan_TLS_intolerancies(host, port, hostname)
| 9,233 | Python | .py | 226 | 30.588496 | 77 | 0.571588 | mozilla/cipherscan | 1,953 | 265 | 34 | MPL-2.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,079 | modifiers.py | mozilla_cipherscan/cscan/modifiers.py | # Copyright (c) 2016 Hubert Kario
# Released under Mozilla Public License 2.0
"""Methods for modifying the scan configurations on the fly."""
from __future__ import print_function
proto_versions = {(3, 0): "SSLv3",
(3, 1): "TLSv1.0",
(3, 2): "TLSv1.1",
(3, 3): "TLSv1.2",
(3, 4): "TLSv1.3",
(3, 5): "TLSv1.4",
(3, 6): "TLSv1.5"}
def version_to_str(version):
"""Convert a version tuple to human-readable string."""
version_name = proto_versions.get(version, None)
if version_name is None:
version_name = "{0[0]}.{0[1]}".format(version)
return version_name
def set_hello_version(generator, version):
"""Set client hello version."""
generator.version = version
generator.modifications += [version_to_str(version)]
return generator
| 883 | Python | .py | 22 | 32.090909 | 63 | 0.590164 | mozilla/cipherscan | 1,953 | 265 | 34 | MPL-2.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,080 | messages.py | mozilla_cipherscan/cscan/messages.py | # Copyright (c) 2016 Hubert Kario
# Released under Mozilla Public License 2.0
"""Extensions and modification of the tlslite-ng messages classes."""
import tlslite.messages as messages
from tlslite.utils.compat import b2a_hex
from tlslite.constants import ContentType, CertificateType, ECCurveType, \
HashAlgorithm, SignatureAlgorithm
from tlslite.x509certchain import X509CertChain
from tlslite.utils.cryptomath import secureHash
from .constants import HandshakeType, CipherSuite, GroupName
# gotta go fast
# comparing client hello's using ClientHello.write() is painfully slow
# monkey patch in faster compare methods
def __CH_eq_fun(self, other):
"""
Check if the other is equal to the object.
always returns false if other is not a ClientHello object
"""
if not isinstance(other, messages.ClientHello):
return False
return self.ssl2 == other.ssl2 and \
self.client_version == other.client_version and \
self.random == other.random and \
self.session_id == other.session_id and \
self.cipher_suites == other.cipher_suites and \
self.compression_methods == other.compression_methods and \
self.extensions == other.extensions
messages.ClientHello.__eq__ = __CH_eq_fun
def __CH_ne_fun(self, other):
"""
Check if the other is not equal to the object.
always returns true if other is not a ClientHello object
"""
return not self.__eq__(other)
messages.ClientHello.__ne__ = __CH_ne_fun
def format_bytearray(byte_array, formatstr):
"""Format method for bytearrays."""
if 'x' in formatstr:
return b2a_hex(byte_array)
else:
return repr(byte_array)
def format_array(array, formatstr):
"""Return string representation of array while formatting elements."""
if array is None:
return "None"
else:
str_array = []
for elem in array:
if elem.__class__.__format__ is not object.__format__:
str_array += ["{0:{1}}".format(elem, formatstr)]
else:
str_array += [repr(elem)]
return "[" + ", ".join(str_array) + "]"
class ServerHello(messages.ServerHello):
"""Class with enhanced human-readable serialisation."""
def __format__(self, formatstr):
"""Return human readable representation of the object."""
extensions = format_array(self.extensions, formatstr)
random = format_bytearray(self.random, formatstr)
session_id = format_bytearray(self.session_id, formatstr)
cipher_suite = CipherSuite.ietfNames.get(self.cipher_suite,
self.cipher_suite)
if 'v' in formatstr:
cipher_suite = "CipherSuite.{0}".format(cipher_suite)
# TODO cipher_suites (including verbose)
# TODO compression_method (including verbose)
return ("ServerHello(server_version=({0[0]}, {0[1]}), random={1}, "
"session_id={2!r}, cipher_suite={3}, compression_method={4}, "
"_tack_ext={5}, extensions={6})").format(
self.server_version, random, session_id,
cipher_suite, self.compression_method, self._tack_ext,
extensions)
class Certificate(messages.Certificate):
"""Class with more robust certificate parsing and serialisation."""
def parse(self, parser):
"""Deserialise the object from binary data."""
index = parser.index
try:
return super(Certificate, self).parse(parser)
except (AssertionError, SyntaxError):
pass
parser.index = index
parser.startLengthCheck(3)
if self.certificateType == CertificateType.x509:
chainLength = parser.get(3)
index = 0
certificate_list = []
while index != chainLength:
certBytes = parser.getVarBytes(3)
certificate_list.append(certBytes)
index += len(certBytes)+3
if certificate_list:
self.certChain = certificate_list
else:
raise AssertionError()
parser.stopLengthCheck()
return self
def __format__(self, formatstr):
"""Advanced formatting for messages."""
hexify = False
verbose = False
digest = False
if 'h' in formatstr:
hexify = True
if 'v' in formatstr:
verbose = True
if 'm' in formatstr:
digest = True
if self.certChain is None:
cert_list = None
else:
if isinstance(self.certChain, X509CertChain):
cert_list = [cert.bytes for cert in self.certChain.x509List]
else:
cert_list = self.certChain
if digest:
cert_list = "[" + ", ".join(b2a_hex(secureHash(cert, 'sha256'))
for cert in cert_list) + "]"
else:
cert_list = [repr(cert) for cert in cert_list]
return "Certificate({0})".format(cert_list)
class NewSessionTicket(messages.HandshakeMsg):
"""Class for handling the Session Tickets from RFC 5077."""
def __init__(self):
"""Initilize new sesion ticket message object."""
super(NewSessionTicket, self).__init__(HandshakeType.session_ticket)
self.ticket_lifetime_hintt = 0
self.ticket = None
def parse(self, parser):
"""Parse the object from on-the-wire data."""
self.ticket_lifetime_hint = parser.get(4)
self.ticket = parser.getVarBytes(2)
return self
def __format__(self, formatstr):
"""Return human-readable representation of the object."""
ticket = format_bytearray(self.ticket, formatstr)
return "NewSessionTicket(ticket_lifetime_hint={0}, ticket={1})"\
.format(self.ticket_lifetime_hintt, ticket)
class CertificateStatus(messages.HandshakeMsg):
"""Class for handling the CertificateStatus OCSP staples from RFC 4366."""
def __init__(self):
"""Create a certificate status message handling object."""
super(CertificateStatus, self).__init__(
HandshakeType.certificate_status)
self.status_type = 0
self.response = None
def parse(self, parser):
"""Deserialise certificate status message from binary data."""
parser.startLengthCheck(3)
self.status_type = parser.get(1)
if self.status_type == 1: # FIXME, create registry
self.response = parser.getVarBytes(3)
else:
raise SyntaxError() # FIXME, use sane-er type
parser.stopLengthCheck()
return self
def __format__(self, formatstr):
"""Return human-readable representation of certificate status."""
response = format_bytearray(self.response, formatstr)
return "CertificateStatus(status_type={0}, response={1})"\
.format(self.status_type, response)
class Message(messages.Message):
"""Message class with more robust formatting capability."""
def __format__(self, formatstr):
"""Advanced formatting for messages."""
hexify = False
verbose = ""
if 'h' in formatstr:
hexify = True
if 'v' in formatstr:
verbose = "ContentType."
if hexify:
data = b2a_hex(self.data)
else:
data = repr(self.data)
return "Message(contentType={0}{1}, data={2})"\
.format(verbose, ContentType.toStr(self.contentType), data)
class ServerKeyExchange(messages.ServerKeyExchange):
"""ServerKeyExchange class with more robust formatting capability."""
def parse(self, parser):
"""more robust parser for SKE"""
try:
super(ServerKeyExchange, self).parse(parser)
except AssertionError:
pass
return self
def __format__(self, formatstr):
"""Return human-readable representation of the object."""
if 'v' in formatstr:
verbose = "CipherSuite."
else:
verbose = ""
ret = "ServerKeyExchange(cipherSuite={0}{1}, version={2}"\
.format(verbose, CipherSuite.ietfNames[self.cipherSuite],
self.version)
if self.srp_N:
ret += ", srp_N={0}, srp_g={1}, srp_s={2}, srp_B={3}"\
.format(self.srp_N, self.srp_g, self.srp_s, self.srp_B)
if self.dh_p:
ret += ", dh_p={0}, dh_g={1}, dh_Ys={2}"\
.format(self.dh_p, self.dh_g, self.dh_Ys)
if self.ecdh_Ys:
ecdh_Ys = format_bytearray(self.ecdh_Ys, formatstr)
ret += ", curve_type={0}, named_curve={1}, ecdh_Ys={2}"\
.format(ECCurveType.toStr(self.curve_type),
GroupName.toStr(self.named_curve), ecdh_Ys)
if self.signAlg:
ret += ", hashAlg={0}, signAlg={1}"\
.format(HashAlgorithm.toStr(self.hashAlg),
SignatureAlgorithm.toStr(self.signAlg))
if self.signature:
ret += ", signature={0}"\
.format(format_bytearray(self.signature, formatstr))
return ret + ")"
| 9,289 | Python | .py | 214 | 33.485981 | 79 | 0.607756 | mozilla/cipherscan | 1,953 | 265 | 34 | MPL-2.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,081 | extensions.py | mozilla_cipherscan/cscan/extensions.py | # Copyright 2016(c) Hubert Kario
# This work is released under the Mozilla Public License Version 2.0
"""Extra TLS extensions."""
import tlslite.extensions
from tlslite.utils.codec import Writer
from tlslite.utils.compat import b2a_hex
from .constants import ExtensionType, GroupName
import .messages
# make TLSExtensions hashable (__eq__ is already defined in base class)
tlslite.extensions.TLSExtension.__hash__ = lambda self: hash(self.extType) ^ \
hash(bytes(self.extData))
class RenegotiationExtension(tlslite.extensions.TLSExtension):
"""Secure Renegotiation extension RFC 5746."""
def __init__(self):
"""Initialize secure renegotiation extension."""
super(RenegotiationExtension, self).__init__(
extType=ExtensionType.renegotiation_info)
self.renegotiated_connection = None
def create(self, data):
"""Set the value of the Finished message."""
self.renegotiated_connection = data
@property
def extData(self):
"""Serialise the extension."""
if self.renegotiated_connection is None:
return bytearray(0)
writer = Writer()
writer.addVarSeq(self.renegotiated_connection, 1, 1)
return writer.bytes
def parse(self, parser):
"""Deserialise the extension from binary data."""
if parser.getRemainingLength() == 0:
self.renegotiated_connection = None
return
self.renegotiated_connection = parser.getVarBytes(1)
return self
def __repr__(self):
"""Human readable representation of extension."""
return "RenegotiationExtension(renegotiated_connection={0!r})"\
.format(self.renegotiated_connection)
def __format__(self, formatstr):
"""Formatted representation of extension."""
data = messages.format_bytearray(self.renegotiated_connection,
formatstr)
return "RenegotiationExtension(renegotiated_connection={0})"\
.format(data)
tlslite.extensions.TLSExtension._universalExtensions[
ExtensionType.renegotiation_info] = RenegotiationExtension
class SessionTicketExtension(tlslite.extensions.TLSExtension):
"""Session Ticket extension (a.k.a. OCSP staple)."""
def __init__(self):
"""Create Session Ticket extension."""
super(SessionTicketExtension, self).__init__(
extType=ExtensionType.session_ticket)
self.data = bytearray(0)
def parse(self, parser):
"""Deserialise the extension from binary data."""
self.data = parser.bytes
return self
def __format__(self, formatstr):
"""Print extension data in human-readable form."""
data = messages.format_bytearray(self.data, formatstr)
return "SessionTicketExtension(data={0})".format(data)
tlslite.extensions.TLSExtension._universalExtensions[
ExtensionType.session_ticket] = SessionTicketExtension
class ServerStatusRequestExtension(tlslite.extensions.TLSExtension):
"""Server Status Request extension."""
def __init__(self):
"""Create server status request extension."""
super(ServerStatusRequestExtension, self).__init__(
extType=ExtensionType.status_request)
def parse(self, parser):
"""Deserialise the extension from binary data."""
if parser.getRemainingLength() != 0:
raise SyntaxError() # FIXME
return self
def __repr__(self):
"""Human readable representation of the object."""
return "ServerStatusRequestExtension()"
tlslite.extensions.TLSExtension._serverExtensions[
ExtensionType.status_request] = ServerStatusRequestExtension
class KeyShareExtension(tlslite.extensions.TLSExtension):
"""TLS1.3 extension for handling key negotiation."""
def __init__(self):
"""Create key share extension object."""
super(KeyShareExtension, self).__init__(
extType=ExtensionType.key_share)
self.client_shares = None
def create(self, shares):
"""
Set the list of key shares to send.
@type shares: list of tuples
@param shares: a list of tuples where the first element is a NamedGroup
ID while the second element in a tuple is an opaque bytearray encoding
of the key share.
"""
self.client_shares = shares
return self
@property
def extData(self):
"""Serialise the extension."""
if self.client_shares is None:
return bytearray(0)
writer = Writer()
for group_id, share in self.client_shares:
writer.add(group_id, 2)
if group_id in GroupName.allFF:
share_length_length = 2
else:
share_length_length = 1
writer.addVarSeq(share, 1, share_length_length)
ext_writer = Writer()
ext_writer.add(len(writer.bytes), 2)
ext_writer.bytes += writer.bytes
return ext_writer.bytes
def parse(self, parser):
"""Deserialise the extension."""
if parser.getRemainingLength() == 0:
self.client_shares = None
return
self.client_shares = []
parser.startLengthCheck(2)
while not parser.atLengthCheck():
group_id = parser.get(2)
if group_id in GroupName.allFF:
share_length_length = 2
else:
share_length_length = 1
share = parser.getVarBytes(share_length_length)
self.client_shares.append((group_id, share))
return self
def __repr__(self):
"""Human readble representation of extension."""
return "KeyShareExtension({0!r})".format(self.client_shares)
def __format__(self, formatstr):
"""Formattable representation of extension."""
if self.client_shares is None:
return "KeyShareExtension(None)"
verbose = ""
hexlify = False
if 'v' in formatstr:
verbose = "GroupName."
if 'h' in formatstr:
hexlify = True
shares = []
for group_id, share in self.client_shares:
if hexlify:
share = b2a_hex(share)
else:
share = repr(share)
shares += ["({0}{1}, {2})".format(verbose,
GroupName.toStr(group_id),
share)]
return "KeyShareExtension([" + ",".join(shares) + "])"
tlslite.extensions.TLSExtension._universalExtensions[
ExtensionType.key_share] = KeyShareExtension
| 6,685 | Python | .py | 156 | 33.282051 | 79 | 0.634037 | mozilla/cipherscan | 1,953 | 265 | 34 | MPL-2.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,082 | config.py | mozilla_cipherscan/cscan/config.py | # Copyright (c) 2016 Hubert Kario <hkario@redhat.com>
# Released under Mozilla Public License Version 2.0
"""Typical Client Hello messages sent by different clients."""
import random
from tlslite.messages import ClientHello
from tlslite.constants import \
ECPointFormat, HashAlgorithm, SignatureAlgorithm
from tlslite.extensions import SNIExtension, SupportedGroupsExtension, \
TLSExtension, SignatureAlgorithmsExtension, NPNExtension, \
ECPointFormatsExtension
from tlslite.utils.cryptomath import numberToByteArray
from .constants import CipherSuite, ExtensionType, GroupName
class HelloConfig(object):
"""Base object for all Client Hello configurations."""
def __init__(self):
"""Initialize object with default settings."""
self._name = None
self.modifications = []
self.callbacks = []
self.version = (3, 3)
self.record_version = (3, 0)
self.ciphers = []
self.extensions = None
self.random = None
self.session_id = bytearray(0)
self.compression_methods = [0]
self.ssl2 = False
@property
def name(self):
"""Return the name of config with all the modifications applied."""
if self.modifications:
return "{0} ({1})".format(self._name,
", ".join(self.modifications))
else:
return self._name
@name.setter
def name(self, value):
"""Set the base name of the configuration."""
self._name = value
def __call__(self, hostname):
"""Generate a client hello object, use hostname in SNI extension."""
# SNI is special in that we don't want to send it if it is empty
if self.extensions:
sni = next((x for x in self.extensions
if isinstance(x, SNIExtension)),
None)
if sni:
if hostname is not None:
if sni.serverNames is None:
sni.serverNames = []
sni.hostNames = [hostname]
else:
# but if we were not provided with a host name, we want
# to remove empty extension
if sni.serverNames is None:
self.extensions = [x for x in self.extensions
if not isinstance(x, SNIExtension)]
if self.random:
rand = self.random
else:
# we're not doing any crypto with it, just need "something"
# TODO: place unix time at the beginning
rand = numberToByteArray(random.getrandbits(256), 32)
ch = ClientHello(self.ssl2).create(self.version, rand, self.session_id,
self.ciphers,
extensions=self.extensions)
ch.compression_methods = self.compression_methods
for cb in self.callbacks:
ch = cb(ch)
return ch
class Firefox_42(HelloConfig):
"""Create Client Hello like Firefox 42."""
def __init__(self):
"""Set the configuration to Firefox 42."""
super(Firefox_42, self).__init__()
self._name = "Firefox 42"
self.version = (3, 3)
self.record_version = (3, 1)
self.ciphers = [CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
CipherSuite.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
CipherSuite.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA,
CipherSuite.TLS_RSA_WITH_AES_256_CBC_SHA,
CipherSuite.TLS_RSA_WITH_3DES_EDE_CBC_SHA]
ext = self.extensions = []
ext.append(SNIExtension())
ext.append(TLSExtension(extType=ExtensionType.renegotiation_info)
.create(bytearray(1)))
ext.append(SupportedGroupsExtension().create([GroupName.secp256r1,
GroupName.secp384r1,
GroupName.secp521r1]))
ext.append(ECPointFormatsExtension()
.create([ECPointFormat.uncompressed]))
ext.append(TLSExtension(extType=ExtensionType.session_ticket))
ext.append(NPNExtension())
ext.append(TLSExtension(extType=ExtensionType.alpn)
.create(bytearray(b'\x00\x15' +
b'\x02' + b'h2' +
b'\x08' + b'spdy/3.1' +
b'\x08' + b'http/1.1')))
ext.append(TLSExtension(extType=ExtensionType.status_request)
.create(bytearray(b'\x01' +
b'\x00\x00' +
b'\x00\x00')))
sig_algs = []
for alg in ['sha256', 'sha384', 'sha512', 'sha1']:
sig_algs.append((getattr(HashAlgorithm, alg),
SignatureAlgorithm.rsa))
for alg in ['sha256', 'sha384', 'sha512', 'sha1']:
sig_algs.append((getattr(HashAlgorithm, alg),
SignatureAlgorithm.ecdsa))
for alg in ['sha256', 'sha1']:
sig_algs.append((getattr(HashAlgorithm, alg),
SignatureAlgorithm.dsa))
ext.append(SignatureAlgorithmsExtension()
.create(sig_algs))
| 5,861 | Python | .py | 121 | 33.438017 | 79 | 0.55055 | mozilla/cipherscan | 1,953 | 265 | 34 | MPL-2.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,083 | constants.py | mozilla_cipherscan/cscan/constants.py | # Copyright 2016(c) Hubert Kario
# This work is released under the Mozilla Public License Version 2.0
"""Extend the tlslite-ng constants with values it does not support."""
import tlslite.constants
from tlslite.constants import CipherSuite
CipherSuite.ecdheEcdsaSuites = []
# RFC 5289
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
CipherSuite.ietfNames[0xC02C] = 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384'
CipherSuite.ecdheEcdsaSuites.append(CipherSuite.
TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384)
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
CipherSuite.ietfNames[0xC02B] = 'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256'
CipherSuite.ecdheEcdsaSuites.append(CipherSuite.
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
CipherSuite.ietfNames[0xC024] = 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384'
CipherSuite.ecdheEcdsaSuites.append(CipherSuite.
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384)
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
CipherSuite.ietfNames[0xC023] = 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256'
CipherSuite.ecdheEcdsaSuites.append(CipherSuite.
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256)
# RFC 4492
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
CipherSuite.ietfNames[0xC00A] = 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA'
CipherSuite.ecdheEcdsaSuites.append(CipherSuite.
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA)
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
CipherSuite.ietfNames[0xC009] = 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA'
CipherSuite.ecdheEcdsaSuites.append(CipherSuite.
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA)
# RFC 7251
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_256_CCM = 0xC0Ad
CipherSuite.ietfNames[0xC0AD] = 'TLS_ECDHE_ECDSA_WITH_AES_256_CCM'
CipherSuite.ecdheEcdsaSuites.append(CipherSuite.
TLS_ECDHE_ECDSA_WITH_AES_256_CCM)
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_128_CCM = 0xC0AC
CipherSuite.ietfNames[0xC0AC] = 'TLS_ECDHE_ECDSA_WITH_AES_128_CCM'
CipherSuite.ecdheEcdsaSuites.append(CipherSuite.
TLS_ECDHE_ECDSA_WITH_AES_128_CCM)
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 = 0xC0AF
CipherSuite.ietfNames[0xC0AF] = 'TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8'
CipherSuite.ecdheEcdsaSuites.append(CipherSuite.
TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8)
CipherSuite.TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 = 0xC0AE
CipherSuite.ietfNames[0xC0AE] = 'TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8'
CipherSuite.ecdheEcdsaSuites.append(CipherSuite.
TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8)
CipherSuite.ecdhAllSuites.extend(CipherSuite.ecdheEcdsaSuites)
CipherSuite.certAllSuites.extend(CipherSuite.ecdheEcdsaSuites)
# obsolete stuff
CipherSuite.TLS_RSA_WITH_DES_CBC_SHA = 0x0009
CipherSuite.ietfNames[0x0009] = 'TLS_RSA_WITH_DES_CBC_SHA'
CipherSuite.TLS_RSA_EXPORT1024_WITH_RC4_56_SHA = 0x0064
CipherSuite.ietfNames[0x0064] = 'TLS_RSA_EXPORT1024_WITH_RC4_56_SHA'
CipherSuite.TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA = 0x0062
CipherSuite.ietfNames[0x0062] = 'TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA'
CipherSuite.TLS_RSA_EXPORT_WITH_RC4_40_MD5 = 0x0003
CipherSuite.ietfNames[0x0003] = 'TLS_RSA_EXPORT_WITH_RC4_40_MD5'
CipherSuite.TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 = 0x0006
CipherSuite.ietfNames[0x0006] = 'TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5'
# DSS
CipherSuite.dheDssSuites = []
CipherSuite.TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA = 0x0013
CipherSuite.ietfNames[0x0013] = 'TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA'
CipherSuite.dheDssSuites.append(CipherSuite.
TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA)
CipherSuite.TLS_DHE_DSS_WITH_DES_CBC_SHA = 0x0012
CipherSuite.ietfNames[0x0012] = 'TLS_DHE_DSS_WITH_DES_CBC_SHA'
CipherSuite.dheDssSuites.append(CipherSuite.
TLS_DHE_DSS_WITH_DES_CBC_SHA)
CipherSuite.TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA = 0x0063
CipherSuite.ietfNames[0x0063] = 'TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA'
CipherSuite.dheDssSuites.append(CipherSuite.
TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA)
CipherSuite.TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032
CipherSuite.ietfNames[0x0032] = 'TLS_DHE_DSS_WITH_AES_128_CBC_SHA'
CipherSuite.dheDssSuites.append(CipherSuite.
TLS_DHE_DSS_WITH_AES_128_CBC_SHA)
CipherSuite.TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038
CipherSuite.ietfNames[0x0038] = 'TLS_DHE_DSS_WITH_AES_256_CBC_SHA'
CipherSuite.dheDssSuites.append(CipherSuite.
TLS_DHE_DSS_WITH_AES_256_CBC_SHA)
CipherSuite.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040
CipherSuite.ietfNames[0x0040] = 'TLS_DHE_DSS_WITH_AES_128_CBC_SHA256'
CipherSuite.dheDssSuites.append(CipherSuite.
TLS_DHE_DSS_WITH_AES_128_CBC_SHA256)
CipherSuite.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006a
CipherSuite.ietfNames[0x006a] = 'TLS_DHE_DSS_WITH_AES_256_CBC_SHA256'
CipherSuite.dheDssSuites.append(CipherSuite.
TLS_DHE_DSS_WITH_AES_256_CBC_SHA256)
class ExtensionType(tlslite.constants.ExtensionType):
"""Definitions of TLS extension IDs."""
status_request = 5
alpn = 16
session_ticket = 35
heartbeat = 15 # RFC 6520
status_request_v2 = 17 # RFC 6961
padding = 21 # RFC 7685
max_fragment_legth = 1 # RFC 6066
# From: Eric Rescorla <ekr at rtfm.com>
# Date: Mon, 7 Dec 2015 05:36:22 -0800
# [TLS] TLS 1.3 ServerConfiguration
early_data = 40
pre_shared_key = 41
key_share = 42
cookie = 43
class GroupName(tlslite.constants.GroupName):
"""ECDH and FFDH key exchange group names."""
allEC = list(tlslite.constants.GroupName.allEC)
allFF = list(tlslite.constants.GroupName.allFF)
ecdh_x25519 = 29
allEC.append(ecdh_x25519)
ecdh_x448 = 30
allEC.append(ecdh_x448)
eddsa_ed25519 = 31
allEC.append(eddsa_ed25519)
eddsa_ed448 = 32
allEC.append(eddsa_ed448)
all = allEC + allFF
class HandshakeType(tlslite.constants.HandshakeType):
"""Type of messages in Handshake protocol."""
certificate_status = 22
session_ticket = 4
| 6,385 | Python | .py | 125 | 44.128 | 76 | 0.718489 | mozilla/cipherscan | 1,953 | 265 | 34 | MPL-2.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,084 | scanner.py | mozilla_cipherscan/cscan/scanner.py | # Copyright (c) 2016 Hubert Kario
# Released under the Mozilla Public License 2.0
"""Classes used for scanning servers and getting their responses."""
import socket
from .constants import CipherSuite, HandshakeType
from .messages import Certificate, ServerHello, Message, NewSessionTicket, \
CertificateStatus, ServerKeyExchange
from tlslite.constants import CertificateType, ContentType
from tlslite.messages import \
CertificateRequest, NextProtocol, ServerHelloDone, Alert
from tlslite.defragmenter import Defragmenter
from tlslite.messagesocket import MessageSocket
from tlslite.errors import TLSAbruptCloseError, TLSIllegalParameterException
class HandshakeParser(object):
"""Inteligent parser for handshake messages."""
def __init__(self, version=(3, 1),
cipher_suite=CipherSuite.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
certificate_type=CertificateType.x509):
"""Initialize parser object."""
self.version = version
self.cipher_suite = cipher_suite
self.certificate_type = certificate_type
def parse(self, parser):
"""Parse a handshake message."""
hs_type = parser.get(1)
if hs_type == HandshakeType.server_hello:
msg = ServerHello().parse(parser)
self.version = msg.server_version
self.cipher_suite = msg.cipher_suite
self.certificate_type = msg.certificate_type
return msg
elif hs_type == HandshakeType.certificate:
msg = Certificate(self.certificate_type)
elif hs_type == HandshakeType.server_key_exchange:
msg = ServerKeyExchange(self.cipher_suite, self.version)
elif hs_type == HandshakeType.certificate_request:
msg = CertificateRequest(self.version)
elif hs_type == HandshakeType.next_protocol:
msg = NextProtocol().parse(parser)
elif hs_type == HandshakeType.server_hello_done:
msg = ServerHelloDone()
elif hs_type == HandshakeType.session_ticket:
msg = NewSessionTicket()
elif hs_type == HandshakeType.certificate_status:
msg = CertificateStatus()
else:
raise ValueError("Unknown handshake type: {0}".format(hs_type))
# don't abort when we can't parse a message, save it as unparsed
try:
msg.parse(parser)
except SyntaxError:
msg = Message(ContentType.handshake, parser.bytes)
return msg
class Scanner(object):
"""Helper class for scanning a host and returning serialised responses."""
def __init__(self, hello_gen, host, port=443, hostname=None):
"""Initialize scanner."""
self.host = host
self.hello_gen = hello_gen
self.port = port
self.hostname = hostname
def scan(self):
"""Perform a scan on server."""
defragger = Defragmenter()
defragger.addStaticSize(ContentType.change_cipher_spec, 1)
defragger.addStaticSize(ContentType.alert, 2)
defragger.addDynamicSize(ContentType.handshake, 1, 3)
try:
raw_sock = socket.create_connection((self.host, self.port), 5)
except socket.error as e:
return [e]
sock = MessageSocket(raw_sock, defragger)
if self.hostname is not None:
client_hello = self.hello_gen(bytearray(self.hostname,
'utf-8'))
else:
client_hello = self.hello_gen(None)
# record layer version - TLSv1.x
# use the version from configuration, if present, or default to the
# RFC recommended (3, 1) for TLS and (3, 0) for SSLv3
if hasattr(client_hello, 'record_version'):
sock.version = client_hello.record_version
elif hasattr(self.hello_gen, 'record_version'):
sock.version = self.hello_gen.record_version
elif client_hello.client_version > (3, 1): # TLS1.0
sock.version = (3, 1)
else:
sock.version = client_hello.client_version
# we don't want to send invalid messages (SSLv2 hello in SSL record
# layer), so set the record layer version to SSLv2 if the hello is
# of SSLv2 format
if client_hello.ssl2:
sock.version = (0, 2)
# save the record version used in the end for later analysis
client_hello.record_version = sock.version
messages = [client_hello]
handshake_parser = HandshakeParser()
try:
sock.sendMessageBlocking(client_hello)
except socket.error as e:
messages.append(e)
return messages
except TLSAbruptCloseError as e:
sock.sock.close()
messages.append(e)
return messages
# get all the server messages that affect connection, abort as soon
# as they've been read
try:
while True:
header, parser = sock.recvMessageBlocking()
if header.type == ContentType.alert:
alert = Alert()
alert.parse(parser)
alert.record_version = header.version
messages += [alert]
elif header.type == ContentType.handshake:
msg = handshake_parser.parse(parser)
msg.record_version = header.version
messages += [msg]
if isinstance(msg, ServerHelloDone):
return messages
else:
raise TypeError("Unknown content type: {0}"
.format(header.type))
except (TLSAbruptCloseError, TLSIllegalParameterException,
ValueError, TypeError, socket.error, SyntaxError) as e:
messages += [e]
return messages
finally:
try:
sock.sock.close()
except (socket.error, OSError):
pass
| 6,035 | Python | .py | 134 | 33.761194 | 78 | 0.616366 | mozilla/cipherscan | 1,953 | 265 | 34 | MPL-2.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,085 | test_extensions.py | mozilla_cipherscan/cscan_tests/test_extensions.py | # Copyright (c) 2015 Hubert Kario
# Released under Mozilla Public License Version 2.0
try:
import unittest2 as unittest
except ImportError:
import unittest
from tlslite.utils.codec import Parser
from cscan.extensions import KeyShareExtension
from cscan.constants import GroupName
class TestKeyShareExtension(unittest.TestCase):
def test___init__(self):
ext = KeyShareExtension()
self.assertIsNotNone(ext)
def test_create(self):
ext = KeyShareExtension()
ext.create([(1, bytearray(b'\x12')),
(2, bytearray(b'\x33'))])
self.assertEqual(ext.client_shares, [(1, bytearray(b'\x12')),
(2, bytearray(b'\x33'))])
def test_write(self):
ext = KeyShareExtension()
ext.create([(GroupName.secp256r1, bytearray(b'\xff\xfa')),
(GroupName.ffdhe2048, bytearray(b'\xaf\xaa'))])
data = ext.write()
self.assertEqual(data, bytearray(
b'\x00\x2a\x00\x0d'
b'\x00\x0b'
b'\x00\x17\x02\xff\xfa'
b'\x01\x00\x00\x02\xaf\xaa'))
def test_write_with_no_data(self):
ext = KeyShareExtension()
data = ext.write()
self.assertEqual(data, bytearray(b'\x00\x2a\x00\x00'))
def test_parse(self):
parser = Parser(bytearray(
#b'\x00\x2a\x00\x0d'
b'\x00\x0b'
b'\x00\x17\x02\xff\xfa'
b'\x01\x00\x00\x02\xaf\xaa'))
ext = KeyShareExtension()
ext.parse(parser)
self.assertEqual(ext.client_shares,
[(GroupName.secp256r1, bytearray(b'\xff\xfa')),
(GroupName.ffdhe2048, bytearray(b'\xaf\xaa'))])
def test_parse_with_no_data(self):
parser = Parser(bytearray())
ext = KeyShareExtension()
ext.parse(parser)
self.assertIsNone(ext.client_shares)
def test___repr__(self):
ext = KeyShareExtension()
ext.create([(1, bytearray(b'\xff'))])
self.assertEqual("KeyShareExtension([(1, bytearray(b\'\\xff\'))])",
repr(ext))
| 2,158 | Python | .py | 54 | 29.722222 | 75 | 0.585495 | mozilla/cipherscan | 1,953 | 265 | 34 | MPL-2.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,086 | test_config.py | mozilla_cipherscan/cscan_tests/test_config.py | # Copyright (c) 2015 Hubert Kario
# Released under Mozilla Public License Version 2.0
try:
import unittest2 as unittest
except ImportError:
import unittest
from tlslite.messages import ClientHello
from tlslite.extensions import SNIExtension, SupportedGroupsExtension, \
ECPointFormatsExtension, NPNExtension, SignatureAlgorithmsExtension
from tlslite.utils.codec import Parser
from cscan.config import Firefox_42
from cscan.extensions import RenegotiationExtension
from cscan.constants import ExtensionType
class TestFirefox(unittest.TestCase):
def test_firefox_42(self):
gen = Firefox_42()
ch = gen(bytearray(b'example.com'))
self.assertIsNotNone(ch)
self.assertIsInstance(ch, ClientHello)
self.assertEqual(len(ch.write()), 176)
self.assertEqual(ch.client_version, (3, 3))
self.assertEqual(gen.record_version, (3, 1))
self.assertEqual(len(ch.cipher_suites), 11)
self.assertIsInstance(ch.extensions[0], SNIExtension)
self.assertEqual(ch.extensions[1].extType,
ExtensionType.renegotiation_info)
self.assertIsInstance(ch.extensions[2],
SupportedGroupsExtension)
self.assertIsInstance(ch.extensions[3],
ECPointFormatsExtension)
self.assertEqual(ch.extensions[4].extType,
ExtensionType.session_ticket)
# bug in tlslite-ng, removes NPN extensions from provided extensions
#self.assertIsInstance(ch.extensions[5],
# NPNExtension)
self.assertEqual(ch.extensions[5].extType,
ExtensionType.alpn)
self.assertEqual(ch.extensions[6].extType,
ExtensionType.status_request)
self.assertIsInstance(ch.extensions[7],
SignatureAlgorithmsExtension)
self.assertEqual(ch.compression_methods, [0])
if __name__ == "__main__":
unittest.main()
| 2,020 | Python | .py | 44 | 36.272727 | 76 | 0.671066 | mozilla/cipherscan | 1,953 | 265 | 34 | MPL-2.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,087 | parse_CAs.py | mozilla_cipherscan/top1m/parse_CAs.py | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Author: Hubert Kario - 2014
from __future__ import division, print_function
path = "./results/"
ca_certs_path = "./ca_files"
certs_path = "./certs"
""" only root CAs, no cached intermediate certs """
trust_path = "./ca_trusted"
import json
import sys
from collections import defaultdict
import os
from OpenSSL import crypto
from operator import itemgetter
invocations = defaultdict(int)
total = 0
hosts = 0
chains = defaultdict(int)
chain_len = defaultdict(int)
keysize = defaultdict(int)
keysize_per_chain = defaultdict(int)
root_CA = defaultdict(int)
sig_alg = defaultdict(int)
intermediate_CA = defaultdict(int)
effective_security = defaultdict(int)
subject_hashes = {}
issuer_hashes = {}
def get_cert_subject_name(cert):
subject = cert.get_subject()
commonName = None
organization = None
for elem,val in subject.get_components():
if elem == "CN" and commonName is None:
commonName = val
if elem == "O" and organization is None:
organization = val
s_hash = "(" + ("%0.8X" % subject.hash()).lower() + ") "
if commonName is not None:
return s_hash + commonName
elif organization is not None:
return s_hash + organization
else:
return s_hash
def get_path_for_hash(cert_hash):
f_name = certs_path + '/' + cert_hash + '.pem'
if not os.path.exists(f_name):
f_name = ca_certs_path + '/' + cert_hash + '.pem'
if not os.path.exists(f_name):
sys.stderr.write("File with hash {0} ({1}) is missing!\n".format(
cert_hash, f_name))
return None
return f_name
""" convert RSA and DSA key sizes to estimated Level of Security """
def rsa_key_size_to_los(size):
if size < 760:
return 40
elif size < 1020:
return 64
elif size < 2040:
return 80
elif size < 3068:
return 112
elif size < 4094:
return 128
elif size < 7660:
return 152
elif size < 15300:
return 192
else:
return 256
""" convert signature algotihm to estimated Level of Security """
def sig_alg_to_los(name):
if 'MD5' in name.upper():
return 64
elif 'SHA1' in name.upper():
return 80
elif 'SHA224' in name.upper():
return 112
elif 'SHA256' in name.upper():
return 128
elif 'SHA384' in name.upper():
return 192
elif 'SHA512' in name.upper():
return 256
else:
raise UnknownSigAlgError
def collect_key_sizes(file_names):
tmp_keysize = {}
""" don't collect signature alg for the self signed root """
with open(file_names[-1]) as cert_file:
cert_pem = cert_file.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
pubkey = cert.get_pubkey()
if pubkey.type() == crypto.TYPE_RSA:
keysize['RSA ' + str(pubkey.bits())] += 1
tmp_keysize['RSA ' + str(pubkey.bits())] = 1
security_level = rsa_key_size_to_los(pubkey.bits())
elif pubkey.type() == crypto.TYPE_DSA:
keysize['DSA ' + str(pubkey.bits())] += 1
tmp_keysize['DSA ' + str(pubkey.bits())] = 1
security_level = rsa_key_size_to_los(pubkey.bits())
# following 408 should be crypto.TYPE_ECDSA, but even new(ish) version
# of OpenSSL Python module don't define it
elif pubkey.type() == 408:
keysize['ECDSA ' + str(pubkey.bits())] += 1
tmp_keysize['ECDSA ' + str(pubkey.bits())] = 1
security_level = pubkey.bits()/2
else:
keysize[str(pubkey.type()) + ' ' + str(pubkey.bits())] += 1
security_level = 0
root_CA[get_cert_subject_name(cert)] += 1
""" exclude the self signed root and server cert from stats """
for f_name in file_names[1:-1]:
with open(f_name) as cert_file:
cert_pem = cert_file.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
pubkey = cert.get_pubkey()
if pubkey.type() == crypto.TYPE_RSA:
keysize['RSA ' + str(pubkey.bits())] += 1
tmp_keysize['RSA ' + str(pubkey.bits())] = 1
c_key_level = rsa_key_size_to_los(pubkey.bits())
elif pubkey.type() == crypto.TYPE_DSA:
keysize['DSA ' + str(pubkey.bits())] += 1
tmp_keysize['DSA ' + str(pubkey.bits())] = 1
c_key_level = rsa_key_size_to_los(pubkey.bits())
elif pubkey.type() == 408:
keysize['ECDSA ' + str(pubkey.bits())] += 1
tmp_keysize['ECDSA ' + str(pubkey.bits())] = 1
c_key_level = pubkey.bits() / 2
else:
keysize[str(pubkey.type()) + ' ' + str(pubkey.bits())] += 1
c_key_level = 0
if security_level > c_key_level:
security_level = c_key_level
sig_alg[cert.get_signature_algorithm()] += 1
c_sig_level = sig_alg_to_los(cert.get_signature_algorithm())
if security_level > c_sig_level:
security_level = c_sig_level
intermediate_CA[get_cert_subject_name(cert)] += 1
for key_s in tmp_keysize:
keysize_per_chain[key_s] += 1
# XXX doesn't handle the situation in which the CA uses its certificate
# for a web server properly
with open(file_names[0]) as cert_file:
cert_pem = cert_file.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)
pubkey = cert.get_pubkey()
if pubkey.type() == crypto.TYPE_RSA:
c_key_level = rsa_key_size_to_los(pubkey.bits())
elif pubkey.type() == crypto.TYPE_DSA:
c_key_level = rsa_key_size_to_los(pubkey.bits())
elif pubkey.type() == 408:
c_key_level = pubkey.bits() / 2
else:
c_key_level = 0
if security_level > c_key_level:
security_level = c_key_level
c_sig_level = sig_alg_to_los(cert.get_signature_algorithm())
if security_level > c_sig_level:
security_level = c_sig_level
effective_security[security_level] += 1
with open("parsed") as res_file:
for line in res_file:
try:
res = json.loads(line)
except ValueError as e:
print("can't process line: " + line)
continue
f=res
try:
server_chain_trusted = False
server_chain_complete = False
server_chains = []
valid = False
""" Keep certificates in memory for a given file """
known_certs = {}
if not "chains" in f:
continue
results = f["chains"]
""" discard hosts with empty results """
if len(results) < 1:
continue
""" loop over list of ciphers """
for entry in results:
""" skip invalid results """
if not 'chain' in entry:
continue
valid = True
if entry['chain'] == "untrusted":
continue
if entry['chain'] == "complete":
server_chain_complete = True
server_chain_trusted = True
if entry['chain'] == "incomplete":
server_chain_trusted = True
server_chains += [entry['certificates']]
if server_chain_trusted:
if server_chain_complete:
chains["complete"] += 1
print("complete: " + f['host'])
else:
chains["incomplete"] += 1
print("incomplete: " + f['host'])
else:
chains["untrusted"] += 1
print("untrusted: " + f['host'])
if valid:
hosts += 1
for chain in server_chains:
f_names = []
for hash in chain:
path = get_path_for_hash(hash)
f_names += [path]
collect_key_sizes(f_names)
chain_len[str(len(chain))] += 1
if len(chain) == 1:
sys.stderr.write("file with chain 1 long: " + line)
total += 1
except TypeError as e:
sys.stderr.write("can't process: " + line)
continue
""" Display stats """
#print("openssl invocations: " + str(invocations["openssl"]))
print("Statistics from " + str(total) + " chains provided by " + str(hosts) + " hosts")
print("\nServer provided chains Count Percent")
print("-------------------------+---------+-------")
for stat in sorted(chains):
percent = round(chains[stat] / hosts * 100, 4)
sys.stdout.write(stat.ljust(25) + " " + str(chains[stat]).ljust(10) + str(percent).ljust(4) + "\n")
print("\nTrusted chain statistics")
print("========================")
print("\nChain length Count Percent")
print("-------------------------+---------+-------")
for stat in sorted(chain_len):
percent = round(chain_len[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(25) + " " + str(chain_len[stat]).ljust(10) + str(percent).ljust(4) + "\n")
print("\nCA key size in chains Count")
print("-------------------------+---------")
for stat in sorted(keysize):
sys.stdout.write(stat.ljust(25) + " " + str(keysize[stat]).ljust(10) + "\n")
print("\nChains with CA key Count Percent")
print("-------------------------+---------+-------")
for stat in sorted(keysize_per_chain):
percent = round(keysize_per_chain[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(25) + " " + str(keysize_per_chain[stat]).ljust(10) + str(percent).ljust(4) + "\n")
print("\nSignature algorithm (ex. root) Count")
print("------------------------------+---------")
for stat in sorted(sig_alg):
sys.stdout.write(stat.ljust(30) + " " + str(sig_alg[stat]).ljust(10) + "\n")
print("\nEff. host cert chain LoS Count Percent")
print("-------------------------+---------+-------")
for stat in sorted(effective_security):
percent = round(effective_security[stat] / total * 100, 4)
sys.stdout.write(str(stat).ljust(25) + " " + str(effective_security[stat]).ljust(10) + str(percent).ljust(4) + "\n")
print("\nRoot CAs Count Percent")
print("---------------------------------------------+---------+-------")
for stat, val in sorted(root_CA.items(), key=itemgetter(1), reverse=True):
percent = round(val / total * 100, 4)
sys.stdout.write(stat.ljust(45)[0:45] + " " + str(val).ljust(10) + str(percent).ljust(4) + "\n")
print("\nIntermediate CA Count Percent")
print("---------------------------------------------+---------+-------")
for stat, val in sorted(intermediate_CA.items(), key=itemgetter(1), reverse=True):
percent = round(val / total * 100, 4)
sys.stdout.write(stat.ljust(45)[0:45] + " " + str(val).ljust(10) + str(percent).ljust(4) + "\n")
| 11,097 | Python | .py | 267 | 33.629213 | 120 | 0.561408 | mozilla/cipherscan | 1,953 | 265 | 34 | MPL-2.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,088 | parse_results.py | mozilla_cipherscan/top1m/parse_results.py | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Author: Julien Vehent [:ulfr] - 2013
# Contributors: Hubert Kario - 2014
from __future__ import division, print_function
path = "./results/"
import json
import sys
from collections import defaultdict
import operator
import os
import re
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
""" client config cipher simulation """
client_ciphers={}
""" list of ciphers offered by Firefox 29 by default """
client_ciphers['FF 29']=[
'ECDHE-ECDSA-AES128-GCM-SHA256',
'ECDHE-RSA-AES128-GCM-SHA256',
'ECDHE-ECDSA-AES256-SHA',
'ECDHE-ECDSA-AES128-SHA',
'ECDHE-RSA-AES128-SHA',
'ECDHE-RSA-AES256-SHA',
'ECDHE-RSA-DES-CBC3-SHA',
'ECDHE-ECDSA-RC4-SHA',
'ECDHE-RSA-RC4-SHA',
'DHE-RSA-AES128-SHA',
'DHE-DSS-AES128-SHA',
'DHE-RSA-CAMELLIA128-SHA',
'DHE-RSA-AES256-SHA',
'DHE-DSS-AES256-SHA',
'DHE-RSA-CAMELLIA256-SHA',
'EDH-RSA-DES-CBC3-SHA',
'AES128-SHA',
'CAMELLIA128-SHA',
'AES256-SHA',
'CAMELLIA256-SHA',
'DES-CBC3-SHA',
'RC4-SHA',
'RC4-MD5']
client_ciphers['FF 35']=[
'ECDHE-ECDSA-AES128-GCM-SHA256',
'ECDHE-RSA-AES128-GCM-SHA256',
'ECDHE-ECDSA-AES256-SHA',
'ECDHE-ECDSA-AES128-SHA',
'ECDHE-RSA-AES128-SHA',
'ECDHE-RSA-AES256-SHA',
'ECDHE-ECDSA-RC4-SHA',
'ECDHE-RSA-RC4-SHA',
'DHE-RSA-AES128-SHA',
'DHE-DSS-AES128-SHA',
'DHE-RSA-AES256-SHA',
'AES128-SHA',
'AES256-SHA',
'DES-CBC3-SHA',
'RC4-SHA',
'RC4-MD5']
client_ciphers['FF 44']=[
'ECDHE-ECDSA-AES128-GCM-SHA256',
'ECDHE-RSA-AES128-GCM-SHA256',
'ECDHE-ECDSA-AES256-SHA',
'ECDHE-ECDSA-AES128-SHA',
'ECDHE-RSA-AES128-SHA',
'ECDHE-RSA-AES256-SHA',
'DHE-RSA-AES128-SHA',
'DHE-RSA-AES256-SHA',
'AES128-SHA',
'AES256-SHA',
'DES-CBC3-SHA']
report_untrused=False
cipherstats = defaultdict(int)
# stats about different client performance
# ciphers selected by them, unsupported, etc.
client_RC4_Only_cipherstats={}
client_RC4_preferred_cipherstats={}
client_3DES_Only_cipherstats={}
client_3DES_preferred_cipherstats={}
client_incompatible_cipherstats={}
client_selected_cipherstats={}
for client_name in client_ciphers:
client_RC4_Only_cipherstats[client_name] = defaultdict(int)
client_RC4_preferred_cipherstats[client_name] = defaultdict(int)
client_3DES_Only_cipherstats[client_name] = defaultdict(int)
client_3DES_preferred_cipherstats[client_name] = defaultdict(int)
client_incompatible_cipherstats[client_name] = defaultdict(int)
client_selected_cipherstats[client_name] = defaultdict(int)
cipherordering = defaultdict(int)
pfsstats = defaultdict(int)
protocolstats = defaultdict(int)
handshakestats = defaultdict(int)
keysize = defaultdict(int)
sigalg = defaultdict(int)
tickethint = defaultdict(int)
eccfallback = defaultdict(int)
eccordering = defaultdict(int)
ecccurve = defaultdict(int)
npn = defaultdict(int)
ocspstaple = defaultdict(int)
fallbacks = defaultdict(int)
intolerancies = defaultdict(int)
impl_families = defaultdict(int)
# array with indexes of fallback names for the matrix report
fallback_ids = defaultdict(int)
i=0
fallback_ids['big-SSLv3'] = i
i+=1
fallback_ids['big-TLSv1.0'] = i
i+=1
fallback_ids['big-TLSv1.1'] = i
i+=1
fallback_ids['big-TLSv1.2'] = i
i+=1
# padding space
fallback_ids[' '] = i
i+=1
fallback_ids['small-SSLv3'] = i
i+=1
fallback_ids['small-TLSv1.0-notlsext'] = i
i+=1
fallback_ids['small-TLSv1.0'] = i
i+=1
fallback_ids['small-TLSv1.1'] = i
i+=1
fallback_ids['small-TLSv1.2'] = i
i+=1
# 2nd padding space
fallback_ids[' '] = i
i+=1
fallback_ids['v2-small-SSLv3'] = i
i+=1
fallback_ids['v2-small-TLSv1.0'] = i
i+=1
fallback_ids['v2-small-TLSv1.1'] = i
i+=1
fallback_ids['v2-small-TLSv1.2'] = i
i+=1
fallback_ids['v2-big-TLSv1.2'] = i
i+=1
# 3rd padding space
fallback_ids[' '] = i
pfssigalgfallback = defaultdict(int)
pfssigalgs = defaultdict(int)
pfssigalgsordering = defaultdict(int)
compression = defaultdict(int)
renegotiation = defaultdict(int)
dsarsastack = 0
total = 0
for r,d,flist in os.walk(path):
for f in flist:
""" initialize variables for stats of the current site """
temppfsstats = {}
tempkeystats = {}
tempecckeystats = {}
tempdsakeystats = {}
tempgostkeystats = {}
tempsigstats = {}
tempticketstats = {}
tempeccfallback = "unknown"
tempeccordering = "unknown"
tempecccurve = {}
tempnpn = {}
tempfallbacks = {}
tempintolerancies = {}
tempimpl_families = {}
""" supported ciphers by the server under scan """
tempcipherstats = {}
temppfssigalgordering = {}
temppfssigalgfallback = {}
temppfssigalgs = {}
tempcompression = {}
temprenegotiation = {}
ciphertypes = 0
AESGCM = False
AESCBC = False
AES = False
CHACHA20 = False
DES3 = False
CAMELLIA = False
RC4 = False
GOST89_cipher = False
""" variables to support handshake simulation for different clients """
client_RC4_Only={}
client_3DES_Only={}
client_compat={}
temp_client_incompat={}
client_RC4_Pref={}
client_3DES_Pref={}
client_selected={}
for client_name in client_ciphers:
# the following depends on client_compat, so by default it can be True
client_RC4_Only[client_name]=True
client_3DES_Only[client_name]=True
client_compat[client_name]=False
temp_client_incompat[client_name]={}
client_RC4_Pref[client_name]=None
client_3DES_Pref[client_name]=None
client_selected[client_name]=None
""" server side list of supported ciphers """
list_of_ciphers = []
ADH = False
DHE = False
AECDH = False
ECDHE = False
RSA = False
ECDH = False
DH = False
GOST2001_kex = False
SSL2 = False
SSL3 = False
TLS1 = False
TLS1_1 = False
TLS1_2 = False
dualstack = False
ECDSA = False
trusted = False
ocsp_stapling = None
""" process the file """
f_abs = os.path.join(r,f)
with open(f_abs) as json_file:
""" discard files that fail to load """
try:
results = json.load(json_file)
except ValueError:
continue
""" discard files with empty results """
if len(results['ciphersuite']) < 1:
# if there are no results from regular scan but there are
# from fallback attempts that means that the scan of a host
# is inconclusive
if 'configs' in results:
tolerance = [' '] * len(fallback_ids)
for entry in results['configs']:
config = results['configs'][entry]
if config['tolerant'] == "True" and \
config['trusted'] == "True":
# save which protocols passed
if entry in fallback_ids:
tolerance[fallback_ids[entry]] = 'v'
else:
fallback_ids[entry] = len(fallback_ids)
tolerance.insert(fallback_ids[entry], 'v')
# analysis of host won't be continued, so we have to add
# results to the permanent, not temporary table, but
# do that only when there actually were detected values
if "".join(tolerance).strip():
fallbacks["".join(tolerance).rstrip()] += 1
continue
""" save ECC fallback (new format) """
if 'curves_fallback' in results:
tempeccfallback = results['curves_fallback']
""" save ECC curve stats (old format) """
if 'curve_fallback' in results:
tempeccfallback = results['curve_fallback']
if 'curve_ordering' in results:
tempeccordering = results['curve_ordering']
if 'curve' in results:
for curve in results['curve']:
tempecccurve[curve] = 1
if len(results['curve']) == 1:
tempecccurve[curve + ' Only'] = 1
""" collect TLSv1.2 PFS ciphersuite sigalgs """
if 'sigalgs' in results:
if results['sigalgs']['ordering']:
temppfssigalgordering[results['sigalgs']['ordering']] = 1
if results['sigalgs']['ECDSA-fallback']:
temppfssigalgfallback['ECDSA ' + results['sigalgs']['ECDSA-fallback']] = 1
if results['sigalgs']['RSA-fallback']:
temppfssigalgfallback['RSA ' + results['sigalgs']['RSA-fallback']] = 1
if 'RSA' in results['sigalgs'] and results['sigalgs']['RSA'][0] != 'Fail':
for pfssigalg in results['sigalgs']['RSA']:
temppfssigalgs['RSA-' + pfssigalg]=1
if len(results['sigalgs']['RSA']) == 1:
temppfssigalgs['RSA-' + results['sigalgs']['RSA'][0] + ' Only'] = 1
if 'ECDSA' in results['sigalgs'] and results['sigalgs']['ECDSA'][0] != 'Fail':
for pfssigalg in results['sigalgs']['ECDSA']:
temppfssigalgs['ECDSA-' + pfssigalg]=1
if len(results['sigalgs']['ECDSA']) == 1:
temppfssigalgs['ECDSA-' + results['sigalgs']['ECDSA'][0] + ' Only'] = 1
if 'configs' in results:
tolerance = [' '] * len(fallback_ids)
for entry in results['configs']:
config = results['configs'][entry]
if not entry in fallback_ids:
fallback_ids[entry] = len(fallback_ids)
tolerance.insert(fallback_ids[entry], ' ')
if config['tolerant'] == "True":
tolerance[fallback_ids[entry]] = 'v'
else:
tolerance[fallback_ids[entry]] = 'X'
tempfallbacks["".join(tolerance).rstrip()] = 1
configs = results['configs']
try:
if configs['big-TLSv1.1']['tolerant'] != "True" and \
configs['big-TLSv1.2']['tolerant'] != "True" and \
configs['small-TLSv1.1']['tolerant'] != "True" and \
configs['small-TLSv1.2']['tolerant'] != "True":
if configs['v2-small-TLSv1.1']['tolerant'] != "True" and \
configs['v2-small-TLSv1.2']['tolerant'] != "True":
tempfallbacks['TLSv1.1+ strict Intolerance'] = 1
else:
tempfallbacks['TLSv1.1+ Intolerant'] = 1
if configs['big-TLSv1.1']['tolerant'] == "True" and \
configs['big-TLSv1.2']['tolerant'] != "True" and \
configs['small-TLSv1.1']['tolerant'] == "True" and \
configs['small-TLSv1.2']['tolerant'] != "True":
if configs['v2-small-TLSv1.2']['tolerant'] != "True":
tempfallbacks['TLSv1.2 strict Intolerance'] = 1
else:
tempfallbacks['TLSv1.2 Intolerant'] = 1
if configs['big-TLSv1.2']['tolerant'] != "True" and \
configs['big-TLSv1.1']['tolerant'] == "True" and \
configs['small-TLSv1.2']['tolerant'] == "True":
tempfallbacks['TLSv1.2 big Intolerance'] = 1
if configs['big-TLSv1.2']['tolerant'] != "True" and \
configs['small-TLSv1.0']['tolerant'] != "True" and \
configs['small-TLSv1.0-notlsext']['tolerant'] == "True":
tempfallbacks['TLS extension Intolerance'] = 1
if configs['big-TLSv1.2']['tolerant'] != "True" and \
configs['big-TLSv1.1']['tolerant'] != "True" and \
configs['big-TLSv1.0']['tolerant'] != "True" and \
(configs['small-TLSv1.2']['tolerant'] == "True" or
configs['v2-small-TLSv1.2']['tolerant'] == "True"):
tempfallbacks['Big handshake intolerance'] = 1
except KeyError:
pass
if 'intolerancies' in results:
intoler = results['intolerancies']
for name, val in intoler.items():
if val is True:
tempintolerancies[name] = 1
intol = [x.replace(' ', '_')
for x in tempintolerancies.keys()]
all_above_tls_1_2 = ('TLS_1.3', 'TLS_1.4', 'SSL_3.254',
'SSL_4.0', 'SSL_4.3', 'SSL_255.255')
if all(i in intol for i in all_above_tls_1_2):
for i in all_above_tls_1_2:
intol.remove(i)
intol.append('TLS_1.3+')
all_above_ssl_4_0 = ('SSL_4.3', 'SSL_4.0', 'SSL_255.255')
if all(i in intol for i in all_above_ssl_4_0):
for i in all_above_ssl_4_0:
intol.remove(i)
intol.append("SSL_4.0+")
if intol:
intol.sort(reverse=True)
tempimpl_families[" ".join(intol)] = 1
else:
tempintolerancies['x:missing information'] = 1
""" get some extra data about server """
if 'renegotiation' in results:
temprenegotiation[results['renegotiation']] = 1
if 'compression' in results:
tempcompression[results['compression']] = 1
""" loop over list of ciphers """
for entry in results['ciphersuite']:
# some servers return different certificates with different
# ciphers, also we may become redirected to other server with
# different config (because over-reactive IPS)
if 'False' in entry['trusted'] and report_untrused == False:
continue
list_of_ciphers.append(entry['cipher'])
# check if the advertised ciphers are not effectively RC4 Only
# for clients or incompatible with them
for client_name in client_ciphers:
if entry['cipher'] in client_ciphers[client_name]:
# if this is first cipher and we already are getting RC4
# then it means that RC4 is preferred (and client is
# compatible with server)
client_compat[client_name]=True
if not 'RC4' in entry['cipher']:
client_RC4_Only[client_name] = False
if not 'CBC3' in entry['cipher']:
client_3DES_Only[client_name] = False
else:
temp_client_incompat[client_name][entry['cipher']] = 1
""" store the ciphers supported """
if 'ADH' in entry['cipher'] or 'AECDH' in entry['cipher'] or \
'EXP' in entry['cipher'] or \
'DES-CBC3-MD5' in entry['cipher'] or \
'RC4-64-MD5' in entry['cipher'] or \
'IDEA-CBC-MD5' in entry['cipher']:
ciphertypes += 1
name = "z:" + entry['cipher']
tempcipherstats[name] = 1
tempcipherstats['Insecure'] = 1
elif 'AES128-GCM' in entry['cipher'] or 'AES256-GCM' in entry['cipher']:
if not AESGCM:
AES = True
AESGCM = True
ciphertypes += 1
elif 'AES' in entry['cipher']:
if not AESCBC:
AES = True
AESCBC = True
ciphertypes += 1
elif 'DES-CBC3' in entry['cipher']:
if not DES3:
DES3 = True
ciphertypes += 1
elif 'CAMELLIA' in entry['cipher']:
if not CAMELLIA:
CAMELLIA = True
ciphertypes += 1
elif 'RC4' in entry['cipher']:
if not RC4:
ciphertypes += 1
RC4 = True
elif 'CHACHA20' in entry['cipher']:
if not CHACHA20:
ciphertypes += 1
CHACHA20 = True
elif 'IDEA' in entry['cipher'] or 'SEED' in entry['cipher']:
ciphertypes += 1
name = "y:" + entry['cipher']
tempcipherstats[name] = 1
elif 'GOST89-GOST89' in entry['cipher']:
GOST89_cipher = True
ciphertypes += 1
name = "y:" + entry['cipher']
tempcipherstats[name] = 1
else:
ciphertypes += 1
name = "z:" + entry['cipher']
tempcipherstats[name] = 1
tempcipherstats['Insecure'] = 1
""" store key handshake methods """
if 'EXP' in entry['cipher']:
pass
elif 'AECDH' in entry['cipher']:
AECDH = True
elif 'ADH' in entry['cipher']:
ADH = True
elif 'ECDHE' in entry['cipher']:
ECDHE = True
temppfsstats[entry['pfs']] = 1
elif 'DHE' in entry['cipher'] or 'EDH' in entry['cipher']:
DHE = True
temppfsstats[entry['pfs']] = 1
elif 'ECDH' in entry['cipher']:
ECDH = True
elif 'DH' in entry['cipher']:
DH = True
elif entry['cipher'].startswith('GOST2001'):
GOST2001_kex = True
else:
RSA = True
""" save the key size """
if 'ECDSA' in entry['cipher'] or 'ECDH-RSA' in entry['cipher']:
ECDSA = True
tempecckeystats[entry['pubkey'][0]] = 1
elif 'DSS' in entry['cipher']:
tempdsakeystats[entry['pubkey'][0]] = 1
elif 'AECDH' in entry['cipher'] or 'ADH' in entry['cipher']:
""" skip """
elif 'GOST' in entry['cipher']:
tempgostkeystats[entry['pubkey'][0]] = 1
else:
tempkeystats[entry['pubkey'][0]] = 1
if ECDSA:
dualstack = True
if 'True' in entry['trusted'] and not 'ADH' in entry['cipher'] and not 'AECDH' in entry['cipher']:
trusted = True
""" save key signatures size """
tempsigstats[entry['sigalg'][0]] = 1
""" save tls ticket hint """
if 'ticket_hint' in entry:
tempticketstats[entry['ticket_hint']] = 1
""" check if OCSP stapling is supported """
if 'ocsp_stapling' in entry:
if entry['ocsp_stapling'] == 'True':
ocsp_stapling=True
else:
ocsp_stapling=False
""" store the versions of TLS supported """
for protocol in entry['protocols']:
if protocol == 'SSLv2':
SSL2 = True
elif protocol == 'SSLv3':
SSL3 = True
elif protocol == 'TLSv1':
TLS1 = True
elif protocol == 'TLSv1.1':
TLS1_1 = True
elif protocol == 'TLSv1.2':
TLS1_2 = True
# save NPN protocols supported
if 'npn' in entry:
for proto in entry['npn']:
tempnpn[proto] = 1
if len(entry['npn']) == 1:
tempnpn[proto + ' Only'] = 1
""" save ECC curves stats """
if 'curves_ordering' in entry:
tempeccordering = entry['curves_ordering']
if 'curves' in entry:
for curve in entry['curves']:
tempecccurve[curve] = 1
if len(entry['curves']) == 1:
tempecccurve[curve + ' Only'] = 1
json_file.close()
""" don't store stats from unusued servers """
if report_untrused == False and trusted == False:
continue
total += 1
""" done with this file, storing the stats """
if DHE or ECDHE:
pfsstats['Support PFS'] += 1
if 'DHE-' in results['ciphersuite'][0]['cipher'] or \
'EDH-' in results['ciphersuite'][0]['cipher']:
pfsstats['Prefer PFS'] += 1
pfsstats['Prefer ' + results['ciphersuite'][0]['pfs']] += 1
for s in temppfsstats:
pfsstats[s] += 1
for s in tempkeystats:
keysize['RSA ' + s] += 1
for s in tempecckeystats:
keysize['ECDSA ' + s] += 1
for s in tempdsakeystats:
keysize['DSA ' + s] += 1
for s in tempgostkeystats:
keysize['GOST ' + s] += 1
if dualstack:
dsarsastack += 1
""" save cipher ordering """
if 'serverside' in results:
if results['serverside'] == "False":
cipherordering['Client side'] += 1
else:
cipherordering['Server side'] += 1
else:
cipherordering['Unknown'] += 1
""" simulate handshake with clients """
for client_name in client_ciphers:
if client_compat[client_name]:
if 'serverside' in results and results['serverside'] == "False":
for cipher in client_ciphers[client_name]:
if cipher in list_of_ciphers:
client_selected[client_name] = cipher
if 'RC4' in cipher:
client_RC4_Pref[client_name] = True
if 'CBC3' in cipher:
client_3DES_Pref[client_name] = True
break
else:
for cipher in list_of_ciphers:
if cipher in client_ciphers[client_name]:
client_selected[client_name] = cipher
if 'RC4' in cipher:
client_RC4_Pref[client_name] = True
if 'CBC3' in cipher:
client_3DES_Pref[client_name] = True
break
for s in tempfallbacks:
fallbacks[s] += 1
for s in tempintolerancies:
intolerancies[s] += 1
for s in tempimpl_families:
impl_families[s] += 1
for s in tempsigstats:
sigalg[s] += 1
for s in temprenegotiation:
renegotiation[s] += 1
for s in tempcompression:
compression[s] += 1
if len(tempticketstats) == 1:
for s in tempticketstats:
tickethint[s + " only"] += 1
for s in tempticketstats:
tickethint[s] += 1
eccfallback[tempeccfallback] += 1
eccordering[tempeccordering] += 1
for s in tempecccurve:
ecccurve[s] += 1
for s in tempnpn:
npn[s] += 1
if ocsp_stapling is None:
ocspstaple['Unknown'] += 1
elif ocsp_stapling:
ocspstaple['Supported'] += 1
else:
ocspstaple['Unsupported'] += 1
for s in temppfssigalgfallback:
pfssigalgfallback[s] += 1
for s in temppfssigalgs:
pfssigalgs[s] += 1
for s in temppfssigalgordering:
pfssigalgsordering[s] += 1
""" store cipher stats """
if AESGCM:
cipherstats['AES-GCM'] += 1
if ciphertypes == 1:
cipherstats['AES-GCM Only'] += 1
if AES:
cipherstats['AES'] += 1
if AESCBC:
cipherstats['AES-CBC'] += 1
if ciphertypes == 1:
cipherstats['AES-CBC Only'] += 1
if (AESCBC and ciphertypes == 1) or (AESGCM and ciphertypes == 1)\
or (AESCBC and AESGCM and ciphertypes == 2):
cipherstats['AES Only'] += 1
if CHACHA20:
cipherstats['CHACHA20'] += 1
if ciphertypes == 1:
cipherstats['CHACHA20 Only'] += 1
if DES3:
cipherstats['3DES'] += 1
if ciphertypes == 1:
cipherstats['3DES Only'] += 1
if 'CBC3' in results['ciphersuite'][0]['cipher']:
if 'TLSv1.1' in results['ciphersuite'][0]['protocols'] or\
'TLSv1.2' in results['ciphersuite'][0]['protocols']:
cipherstats['3DES forced in TLS1.1+'] += 1
cipherstats['3DES Preferred'] += 1
if CAMELLIA:
cipherstats['CAMELLIA'] += 1
if ciphertypes == 1:
cipherstats['CAMELLIA Only'] += 1
if RC4:
cipherstats['RC4'] += 1
if ciphertypes == 1:
cipherstats['RC4 Only'] += 1
if 'RC4' in results['ciphersuite'][0]['cipher']:
if 'TLSv1.1' in results['ciphersuite'][0]['protocols'] or\
'TLSv1.2' in results['ciphersuite'][0]['protocols']:
cipherstats['RC4 forced in TLS1.1+'] += 1
cipherstats['RC4 Preferred'] += 1
for client_name in client_ciphers:
if client_compat[client_name]:
if 'ECDHE' in client_selected[client_name]:
client_selected_cipherstats[client_name]['x:ECDHE'] += 1
elif 'DHE' in client_selected[client_name] or \
'EDH' in client_selected[client_name]:
client_selected_cipherstats[client_name]['x:DHE'] += 1
else:
client_selected_cipherstats[client_name]['x:kRSA'] += 1
client_selected_cipherstats[client_name][client_selected[client_name]] += 1
if client_RC4_Only[client_name]:
cipherstats['x:' + client_name + ' RC4 Only'] += 1
for cipher in temp_client_incompat[client_name]:
client_RC4_Only_cipherstats[client_name][cipher] += 1
if client_RC4_Pref[client_name]:
cipherstats['x:' + client_name + ' RC4 Preferred'] += 1
for cipher in temp_client_incompat[client_name]:
client_RC4_preferred_cipherstats[client_name][cipher] += 1
if client_3DES_Only[client_name]:
cipherstats['x:' + client_name + ' 3DES Only'] += 1
for cipher in temp_client_incompat[client_name]:
client_3DES_Only_cipherstats[client_name][cipher] += 1
if client_3DES_Pref[client_name]:
cipherstats['x:' + client_name + ' 3DES Preferred'] += 1
for cipher in temp_client_incompat[client_name]:
client_3DES_preferred_cipherstats[client_name][cipher] += 1
else:
cipherstats['x:' + client_name + ' incompatible'] += 1
for cipher in temp_client_incompat[client_name]:
client_incompatible_cipherstats[client_name][cipher] += 1
for cipher in tempcipherstats:
cipherstats[cipher] += 1
""" store handshake stats """
if AECDH:
handshakestats['AECDH'] += 1
if ADH:
handshakestats['ADH'] += 1
if ECDHE:
handshakestats['ECDHE'] += 1
if DHE:
handshakestats['DHE'] += 1
if DHE and ECDHE:
handshakestats['ECDHE and DHE'] += 1
if ECDH:
handshakestats['ECDH'] += 1
if DH:
handshakestats['DH'] += 1
if GOST2001_kex:
handshakestats['GOST2001'] += 1
if RSA:
handshakestats['RSA'] += 1
""" store protocol stats """
if SSL2:
protocolstats['SSL2'] += 1
if not SSL3 and not TLS1 and not TLS1_1 and not TLS1_2:
protocolstats['SSL2 Only'] += 1
if SSL3:
protocolstats['SSL3'] += 1
if not SSL2 and not TLS1 and not TLS1_1 and not TLS1_2:
protocolstats['SSL3 Only'] += 1
if not TLS1 and not TLS1_1 and not TLS1_2:
protocolstats['SSL3 or lower Only'] += 1
if TLS1:
protocolstats['TLS1'] += 1
if not SSL2 and not SSL3 and not TLS1_1 and not TLS1_2:
protocolstats['TLS1 Only'] += 1
if not TLS1_1 and not TLS1_2:
protocolstats['TLS1 or lower Only'] += 1
if not SSL2 and (SSL3 or TLS1) and not TLS1_1 and not TLS1_2:
protocolstats['SSL3 or TLS1 Only'] += 1
if not SSL2 and not SSL3 and not TLS1:
protocolstats['TLS1.1 or up Only'] += 1
if TLS1_1:
protocolstats['TLS1.1'] += 1
if not SSL2 and not SSL3 and not TLS1 and not TLS1_2:
protocolstats['TLS1.1 Only'] += 1
if TLS1_2:
protocolstats['TLS1.2'] += 1
if not SSL2 and not SSL3 and not TLS1 and not TLS1_1:
protocolstats['TLS1.2 Only'] += 1
if TLS1_2 and not TLS1_1 and TLS1:
protocolstats['TLS1.2, 1.0 but not 1.1'] += 1
# for testing, break early
#if total % 1999 == 0:
# break
""" The 'x:' + client_name + ' RC4 Preferred' counts only sites that
effectively prefer RC4 when using given client, to make reporting more
readable, sum it with sites that do that for all ciphers"""
print("SSL/TLS survey of %i websites from Alexa's top 1 million" % total)
if report_untrused == False:
print("Stats only from connections that did provide valid certificates")
print("(or anonymous DH from servers that do also have valid certificate installed)\n")
""" Display stats """
print("\nSupported Ciphers Count Percent")
print("-------------------------+---------+-------")
for stat in sorted(cipherstats):
percent = round(cipherstats[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(25) + " " + str(cipherstats[stat]).ljust(10) + str(percent).ljust(4) + "\n")
print("\nCipher ordering Count Percent")
print("-------------------------+---------+-------")
for stat in sorted(cipherordering):
percent = round(cipherordering[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(25) + " " + str(cipherordering[stat]).ljust(10) + str(percent).ljust(4) + "\n")
print("\nCLIENT specific statistics\n")
for client_name in client_ciphers:
print("\n" + client_name + " selected ciphers Count Percent")
print("-----------------------------+---------+------")
for stat in sorted(client_selected_cipherstats[client_name]):
percent = round(client_selected_cipherstats[client_name][stat] / total * 100, 4)
sys.stdout.write(stat.ljust(30) + " " + str(client_selected_cipherstats[client_name][stat]).ljust(10) + str(percent).ljust(4) + "\n")
print("\n" + client_name + " RC4 Only other ciphers Count Percent")
print("-----------------------------+---------+------")
for stat in sorted(client_RC4_Only_cipherstats[client_name]):
percent = round(client_RC4_Only_cipherstats[client_name][stat] / total * 100, 4)
sys.stdout.write(stat.ljust(30) + " " + str(client_RC4_Only_cipherstats[client_name][stat]).ljust(10) + str(percent).ljust(4) + "\n")
print("\n" + client_name + " RC4 pref other ciphers Count Percent")
print("-----------------------------+---------+------")
for stat in sorted(client_RC4_preferred_cipherstats[client_name]):
percent = round(client_RC4_preferred_cipherstats[client_name][stat] / total * 100, 4)
sys.stdout.write(stat.ljust(30) + " " + str(client_RC4_preferred_cipherstats[client_name][stat]).ljust(10) + str(percent).ljust(4) + "\n")
print("\n" + client_name + " incompatible ciphers Count Percent")
print("-----------------------------+---------+------")
for stat in sorted(client_incompatible_cipherstats[client_name]):
percent = round(client_incompatible_cipherstats[client_name][stat] / total * 100, 4)
sys.stdout.write(stat.ljust(30) + " " + str(client_incompatible_cipherstats[client_name][stat]).ljust(10) + str(percent).ljust(4) + "\n")
print("\nSupported Handshakes Count Percent")
print("-------------------------+---------+-------")
for stat in sorted(handshakestats):
percent = round(handshakestats[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(25) + " " + str(handshakestats[stat]).ljust(10) + str(percent).ljust(4) + "\n")
print("\nSupported NPN protocols Count Percent ")
print("-------------------------+---------+--------")
for name, val in sorted(npn.items()):
percent = round(val / total * 100, 4)
sys.stdout.write(name.ljust(25) + " " + str(val).ljust(10) + str(percent).ljust(9) + "\n")
print("\nSupported PFS Count Percent PFS Percent")
print("-------------------------+---------+--------+-----------")
for stat in sorted(pfsstats):
percent = round(pfsstats[stat] / total * 100, 4)
pfspercent = 0
if "ECDH," in stat:
pfspercent = round(pfsstats[stat] / handshakestats['ECDHE'] * 100, 4)
elif "DH," in stat:
pfspercent = round(pfsstats[stat] / handshakestats['DHE'] * 100, 4)
sys.stdout.write(stat.ljust(25) + " " + str(pfsstats[stat]).ljust(10) + str(percent).ljust(9) + str(pfspercent) + "\n")
print("\nSupported ECC curves Count Percent ")
print("-------------------------+---------+--------")
for stat in sorted(ecccurve):
percent = round(ecccurve[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(25) + " " + str(ecccurve[stat]).ljust(10) + str(percent).ljust(9) + "\n")
print("\nUnsupported curve fallback Count Percent ")
print("------------------------------+---------+--------")
for stat in sorted(eccfallback):
percent = round(eccfallback[stat] / total * 100,4)
sys.stdout.write(stat.ljust(30) + " " + str(eccfallback[stat]).ljust(10) + str(percent).ljust(9) + "\n")
print("\nECC curve ordering Count Percent ")
print("-------------------------+---------+--------")
for stat in sorted(eccordering):
percent = round(eccordering[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(25) + " " + str(eccordering[stat]).ljust(10) + str(percent).ljust(9) + "\n")
print("\nTLSv1.2 PFS supported sigalgs Count Percent ")
print("------------------------------+---------+--------")
for stat in sorted(pfssigalgs):
percent = round(pfssigalgs[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(30) + " " + str(pfssigalgs[stat]).ljust(10) + str(percent).ljust(9) + "\n")
print("\nTLSv1.2 PFS ordering Count Percent ")
print("------------------------------+---------+--------")
for stat in sorted(pfssigalgsordering):
percent = round(pfssigalgsordering[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(30) + " " + str(pfssigalgsordering[stat]).ljust(10) + str(percent).ljust(9) + "\n")
print("\nTLSv1.2 PFS sigalg fallback Count Percent ")
print("------------------------------+---------+--------")
for stat in sorted(pfssigalgfallback):
percent = round(pfssigalgfallback[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(30) + " " + str(pfssigalgfallback[stat]).ljust(10) + str(percent).ljust(9) + "\n")
print("\nRenegotiation Count Percent ")
print("-------------------------+---------+--------")
for stat in natural_sort(renegotiation):
percent = round(renegotiation[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(25) + " " + str(renegotiation[stat]).ljust(10) + str(percent).ljust(9) + "\n")
print("\nCompression Count Percent ")
print("-------------------------+---------+--------")
for stat in natural_sort(compression):
percent = round(compression[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(25) + " " + str(compression[stat]).ljust(10) + str(percent).ljust(9) + "\n")
print("\nTLS session ticket hint Count Percent ")
print("-------------------------+---------+--------")
for stat in natural_sort(tickethint):
percent = round(tickethint[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(25) + " " + str(tickethint[stat]).ljust(10) + str(percent).ljust(9) + "\n")
print("\nCertificate sig alg Count Percent ")
print("-------------------------+---------+--------")
for stat in sorted(sigalg):
percent = round(sigalg[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(25) + " " + str(sigalg[stat]).ljust(10) + str(percent).ljust(9) + "\n")
print("\nCertificate key size Count Percent ")
print("-------------------------+---------+--------")
for stat in sorted(keysize):
percent = round(keysize[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(25) + " " + str(keysize[stat]).ljust(10) + str(percent).ljust(9) + "\n")
if total == 0:
total = 1
sys.stdout.write("RSA/ECDSA Dual Stack".ljust(25) + " " + str(dsarsastack).ljust(10) + str(round(dsarsastack/total * 100, 4)) + "\n")
print("\nOCSP stapling Count Percent ")
print("-------------------------+---------+--------")
for stat in sorted(ocspstaple):
percent = round(ocspstaple[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(25) + " " + str(ocspstaple[stat]).ljust(10) + str(percent).ljust(9) + "\n")
print("\nSupported Protocols Count Percent")
print("-------------------------+---------+-------")
for stat in sorted(protocolstats):
percent = round(protocolstats[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(25) + " " + str(protocolstats[stat]).ljust(10) + str(percent).ljust(4) + "\n")
print("\nRequired fallbacks Count Percent")
print("----------------------------------------+---------+-------")
print("big small v2 ")
print("----+-----+-----+------------------------+---------+-------")
for stat in sorted(fallbacks):
percent = round(fallbacks[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(40) + " " + str(fallbacks[stat]).ljust(10) + str(percent).ljust(4) + "\n")
print("\nFallback column names")
print("------------------------")
fallback_ids_sorted=sorted(fallback_ids.items(), key=operator.itemgetter(1))
for touple in fallback_ids_sorted:
print(str(touple[1]+1).rjust(3) + " " + str(touple[0]))
print("\nClient Hello intolerance Count Percent")
print("----------------------------------------+---------+-------")
for stat in natural_sort(intolerancies):
percent = round(intolerancies[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(40) + " " + str(intolerancies[stat]).ljust(10) + str(percent).ljust(4) + "\n")
print("\nImplementation families Count Percent")
print("-----------------------------------------------------+-----------+-------")
for stat in natural_sort(impl_families):
percent = round(impl_families[stat] / total * 100, 4)
sys.stdout.write(stat.ljust(50) + " " + str(impl_families[stat]).ljust(10) + str(percent).ljust(4) + "\n")
| 41,143 | Python | .py | 867 | 34.982699 | 146 | 0.518307 | mozilla/cipherscan | 1,953 | 265 | 34 | MPL-2.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,089 | setup.py | MTG_sms-tools/setup.py | import sys
import numpy
from Cython.Build import cythonize
from setuptools import Extension, setup
sourcefiles = [
"smstools/models/utilFunctions_C/utilFunctions.c",
"smstools/models/utilFunctions_C/cutilFunctions.pyx",
]
if sys.platform == "win32":
library = "msvcrt"
else:
library = "m"
extensions = [
Extension(
"smstools.models.utilFunctions_C.utilFunctions_C",
sourcefiles,
include_dirs=[numpy.get_include()],
libraries=[library],
),
]
setup(
name="SMS Tools",
ext_modules=cythonize(extensions),
)
| 570 | Python | .py | 24 | 19.958333 | 58 | 0.709024 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,090 | harmonicTransformations.py | MTG_sms-tools/smstools/transformations/harmonicTransformations.py | # transformations applied to the harmonics of a sound
import numpy as np
from scipy.interpolate import interp1d
def harmonicFreqScaling(
hfreq, hmag, freqScaling, freqStretching, timbrePreservation, fs
):
"""
Frequency scaling of the harmonics of a sound
hfreq, hmag: frequencies and magnitudes of input harmonics
freqScaling: scaling factors, in time-value pairs (value of 1 no scaling)
freqStretching: stretching factors, in time-value pairs (value of 1 no stretching)
timbrePreservation: 0 no timbre preservation, 1 timbre preservation
fs: sampling rate of input sound
returns yhfreq, yhmag: frequencies and magnitudes of output harmonics
"""
if freqScaling.size % 2 != 0: # raise exception if array not even length
raise ValueError("Frequency scaling array does not have an even size")
if freqStretching.size % 2 != 0: # raise exception if array not even length
raise ValueError("Frequency stretching array does not have an even size")
L = hfreq.shape[0] # number of frames
# create interpolation object with the scaling values
freqScalingEnv = np.interp(
np.arange(L), L * freqScaling[::2] / freqScaling[-2], freqScaling[1::2]
)
# create interpolation object with the stretching values
freqStretchingEnv = np.interp(
np.arange(L), L * freqStretching[::2] / freqStretching[-2], freqStretching[1::2]
)
yhfreq = np.zeros_like(hfreq) # create empty output matrix
yhmag = np.zeros_like(hmag) # create empty output matrix
for l in range(L): # go through all frames
ind_valid = np.where(hfreq[l, :] != 0)[0] # check if there are frequency values
if ind_valid.size == 0: # if no values go to next frame
continue
if (timbrePreservation == 1) & (ind_valid.size > 1): # create spectral envelope
# values of harmonic locations to be considered for interpolation
x_vals = np.append(np.append(0, hfreq[l, ind_valid]), fs / 2)
# values of harmonic magnitudes to be considered for interpolation
y_vals = np.append(np.append(hmag[l, 0], hmag[l, ind_valid]), hmag[l, -1])
specEnvelope = interp1d(
x_vals, y_vals, kind="linear", bounds_error=False, fill_value=-100
)
yhfreq[l, ind_valid] = (
hfreq[l, ind_valid] * freqScalingEnv[l]
) # scale frequencies
yhfreq[l, ind_valid] = yhfreq[l, ind_valid] * (
freqStretchingEnv[l] ** ind_valid
) # stretch frequencies
if (timbrePreservation == 1) & (ind_valid.size > 1): # if timbre preservation
yhmag[l, ind_valid] = specEnvelope(
yhfreq[l, ind_valid]
) # change amplitudes to maintain timbre
else:
yhmag[l, ind_valid] = hmag[l, ind_valid] # use same amplitudes as input
return yhfreq, yhmag
| 2,913 | Python | .py | 55 | 45.109091 | 88 | 0.66211 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,091 | stftTransformations.py | MTG_sms-tools/smstools/transformations/stftTransformations.py | # functions that implement transformations using the stft
import math
import os
import sys
import numpy as np
from scipy.signal import resample
from smstools.models import dftModel as DFT
def stftFiltering(x, fs, w, N, H, filter):
"""
Apply a filter to a sound by using the STFT
x: input sound, w: analysis window, N: FFT size, H: hop size
filter: magnitude response of filter with frequency-magnitude pairs (in dB)
returns y: output sound
"""
M = w.size # size of analysis window
hM1 = int(math.floor((M + 1) / 2)) # half analysis window size by rounding
hM2 = int(math.floor(M / 2)) # half analysis window size by floor
x = np.append(
np.zeros(hM2), x
) # add zeros at beginning to center first window at sample 0
x = np.append(x, np.zeros(hM1)) # add zeros at the end to analyze last sample
pin = hM1 # initialize sound pointer in middle of analysis window
pend = x.size - hM1 # last sample to start a frame
w = w / sum(w) # normalize analysis window
y = np.zeros(x.size) # initialize output array
while pin <= pend: # while sound pointer is smaller than last sample
# -----analysis-----
x1 = x[pin - hM1 : pin + hM2] # select one frame of input sound
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
# ------transformation-----
mY = mX + filter # filter input magnitude spectrum
# -----synthesis-----
y1 = DFT.dftSynth(mY, pX, M) # compute idft
y[pin - hM1 : pin + hM2] += H * y1 # overlap-add to generate output sound
pin += H # advance sound pointer
y = np.delete(
y, range(hM2)
) # delete half of first window which was added in stftAnal
y = np.delete(
y, range(y.size - hM1, y.size)
) # add zeros at the end to analyze last sample
return y
def stftMorph(x1, x2, fs, w1, N1, w2, N2, H1, smoothf, balancef):
"""
Morph of two sounds using the STFT
x1, x2: input sounds, fs: sampling rate
w1, w2: analysis windows, N1, N2: FFT sizes, H1: hop size
smoothf: smooth factor of sound 2, bigger than 0 to max of 1, where 1 is no smothing,
balancef: balance between the 2 sounds, from 0 to 1, where 0 is sound 1 and 1 is sound 2
returns y: output sound
"""
if N2 / 2 * smoothf < 3: # raise exception if decimation factor too small
raise ValueError("Smooth factor too small")
if smoothf > 1: # raise exception if decimation factor too big
raise ValueError("Smooth factor above 1")
if balancef > 1 or balancef < 0: # raise exception if balancef outside 0-1
raise ValueError("Balance factor outside range")
if H1 <= 0: # raise error if hop size 0 or negative
raise ValueError("Hop size (H1) smaller or equal to 0")
M1 = w1.size # size of analysis window
hM1_1 = int(math.floor((M1 + 1) / 2)) # half analysis window size by rounding
hM1_2 = int(math.floor(M1 / 2)) # half analysis window size by floor
L = int(x1.size / H1) # number of frames for x1
x1 = np.append(
np.zeros(hM1_2), x1
) # add zeros at beginning to center first window at sample 0
x1 = np.append(x1, np.zeros(hM1_1)) # add zeros at the end to analyze last sample
pin1 = hM1_1 # initialize sound pointer in middle of analysis window
w1 = w1 / sum(w1) # normalize analysis window
M2 = w2.size # size of analysis window
hM2_1 = int(math.floor((M2 + 1) / 2)) # half analysis window size by rounding
hM2_2 = int(math.floor(M2 / 2)) # half analysis window size by floor2
H2 = int(x2.size / L) # hop size for second sound
x2 = np.append(
np.zeros(hM2_2), x2
) # add zeros at beginning to center first window at sample 0
x2 = np.append(x2, np.zeros(hM2_1)) # add zeros at the end to analyze last sample
pin2 = hM2_1 # initialize sound pointer in middle of analysis window
y = np.zeros(x1.size) # initialize output array
for l in range(L):
# -----analysis-----
mX1, pX1 = DFT.dftAnal(x1[pin1 - hM1_1 : pin1 + hM1_2], w1, N1) # compute dft
mX2, pX2 = DFT.dftAnal(x2[pin2 - hM2_1 : pin2 + hM2_2], w2, N2) # compute dft
# -----transformation-----
mX2smooth = resample(
np.maximum(-200, mX2), int(mX2.size * smoothf)
) # smooth spectrum of second sound
mX2 = resample(mX2smooth, mX1.size) # generate back the same size spectrum
mY = balancef * mX2 + (1 - balancef) * mX1 # generate output spectrum
# -----synthesis-----
y[pin1 - hM1_1 : pin1 + hM1_2] += H1 * DFT.dftSynth(
mY, pX1, M1
) # overlap-add to generate output sound
pin1 += H1 # advance sound pointer
pin2 += H2 # advance sound pointer
y = np.delete(
y, range(hM1_2)
) # delete half of first window which was added in stftAnal
y = np.delete(
y, range(y.size - hM1_1, y.size)
) # add zeros at the end to analyze last sample
return y
| 5,016 | Python | .py | 102 | 42.95098 | 92 | 0.635381 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,092 | hpsTransformations.py | MTG_sms-tools/smstools/transformations/hpsTransformations.py | # functions that implement transformations using the hpsModel
import numpy as np
from scipy.interpolate import interp1d
def hpsTimeScale(hfreq, hmag, stocEnv, timeScaling):
"""
Time scaling of the harmonic plus stochastic representation
hfreq, hmag: harmonic frequencies and magnitudes, stocEnv: residual envelope
timeScaling: scaling factors, in time-value pairs
returns yhfreq, yhmag, ystocEnv: hps output representation
"""
if timeScaling.size % 2 != 0: # raise exception if array not even length
raise ValueError("Time scaling array does not have an even size")
L = hfreq[:, 0].size # number of input frames
maxInTime = max(timeScaling[::2]) # maximum value used as input times
maxOutTime = max(timeScaling[1::2]) # maximum value used in output times
outL = int(L * maxOutTime / maxInTime) # number of output frames
inFrames = (L - 1) * timeScaling[::2] / maxInTime # input time values in frames
outFrames = outL * timeScaling[1::2] / maxOutTime # output time values in frames
timeScalingEnv = interp1d(
outFrames, inFrames, fill_value=0
) # interpolation function
indexes = timeScalingEnv(np.arange(outL)) # generate frame indexes for the output
yhfreq = np.zeros((indexes.shape[0], hfreq.shape[1])) # allocate space for yhfreq
yhmag = np.zeros((indexes.shape[0], hmag.shape[1])) # allocate space for yhmag
ystocEnv = np.zeros(
(indexes.shape[0], stocEnv.shape[1])
) # allocate space for ystocEnv
frameIdx = 0
for l in indexes[1:]: # iterate over all output frame indexes
yhfreq[frameIdx, :] = hfreq[int(round(l)), :] # get the closest input frame
yhmag[frameIdx, :] = hmag[int(round(l)), :] # get the closest input frame
ystocEnv[frameIdx, :] = stocEnv[int(round(l)), :] # get the closest input frame
frameIdx += 1
return yhfreq, yhmag, ystocEnv
def hpsMorph(
hfreq1, hmag1, stocEnv1, hfreq2, hmag2, stocEnv2, hfreqIntp, hmagIntp, stocIntp
):
"""
Morph between two sounds using the harmonic plus stochastic model
hfreq1, hmag1, stocEnv1: hps representation of sound 1
hfreq2, hmag2, stocEnv2: hps representation of sound 2
hfreqIntp: interpolation factor between the harmonic frequencies of the two sounds, 0 is sound 1 and 1 is sound 2 (time,value pairs)
hmagIntp: interpolation factor between the harmonic magnitudes of the two sounds, 0 is sound 1 and 1 is sound 2 (time,value pairs)
stocIntp: interpolation factor between the stochastic representation of the two sounds, 0 is sound 1 and 1 is sound 2 (time,value pairs)
returns yhfreq, yhmag, ystocEnv: hps output representation
"""
if hfreqIntp.size % 2 != 0: # raise exception if array not even length
raise ValueError(
"Harmonic frequencies interpolation array does not have an even size"
)
if hmagIntp.size % 2 != 0: # raise exception if array not even length
raise ValueError("Harmonic magnitudes interpolation does not have an even size")
if stocIntp.size % 2 != 0: # raise exception if array not even length
raise ValueError("Stochastic component array does not have an even size")
L1 = hfreq1[:, 0].size # number of frames of sound 1
L2 = hfreq2[:, 0].size # number of frames of sound 2
hfreqIntp[::2] = (L1 - 1) * hfreqIntp[::2] / hfreqIntp[-2] # normalize input values
hmagIntp[::2] = (L1 - 1) * hmagIntp[::2] / hmagIntp[-2] # normalize input values
stocIntp[::2] = (L1 - 1) * stocIntp[::2] / stocIntp[-2] # normalize input values
hfreqIntpEnv = interp1d(
hfreqIntp[0::2], hfreqIntp[1::2], fill_value=0
) # interpolation function
hfreqIndexes = hfreqIntpEnv(np.arange(L1)) # generate frame indexes for the output
hmagIntpEnv = interp1d(
hmagIntp[0::2], hmagIntp[1::2], fill_value=0
) # interpolation function
hmagIndexes = hmagIntpEnv(np.arange(L1)) # generate frame indexes for the output
stocIntpEnv = interp1d(
stocIntp[0::2], stocIntp[1::2], fill_value=0
) # interpolation function
stocIndexes = stocIntpEnv(np.arange(L1)) # generate frame indexes for the output
yhfreq = np.zeros_like(hfreq1) # create empty output matrix
yhmag = np.zeros_like(hmag1) # create empty output matrix
ystocEnv = np.zeros_like(stocEnv1) # create empty output matrix
for l in range(L1): # generate morphed frames
dataIndex = int(round(((L2 - 1) * l) / float(L1 - 1)))
# identify harmonics that are present in both frames
harmonics = np.intersect1d(
np.array(np.nonzero(hfreq1[l, :]), dtype=np.int)[0],
np.array(np.nonzero(hfreq2[dataIndex, :]), dtype=np.int)[0],
)
# interpolate the frequencies of the existing harmonics
yhfreq[l, harmonics] = (1 - hfreqIndexes[l]) * hfreq1[
l, harmonics
] + hfreqIndexes[l] * hfreq2[dataIndex, harmonics]
# interpolate the magnitudes of the existing harmonics
yhmag[l, harmonics] = (1 - hmagIndexes[l]) * hmag1[l, harmonics] + hmagIndexes[
l
] * hmag2[dataIndex, harmonics]
# interpolate the stochastic envelopes of both frames
ystocEnv[l, :] = (1 - stocIndexes[l]) * stocEnv1[l, :] + stocIndexes[
l
] * stocEnv2[dataIndex, :]
return yhfreq, yhmag, ystocEnv
| 5,401 | Python | .py | 94 | 50.925532 | 141 | 0.681398 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,093 | sineTransformations.py | MTG_sms-tools/smstools/transformations/sineTransformations.py | # functions that implement transformations using the sineModel
import numpy as np
from scipy.interpolate import interp1d
def sineTimeScaling(sfreq, smag, timeScaling):
"""
Time scaling of sinusoidal tracks
sfreq, smag: frequencies and magnitudes of input sinusoidal tracks
timeScaling: scaling factors, in time-value pairs
returns ysfreq, ysmag: frequencies and magnitudes of output sinusoidal tracks
"""
if timeScaling.size % 2 != 0: # raise exception if array not even length
raise ValueError("Time scaling array does not have an even size")
L = sfreq.shape[0] # number of input frames
maxInTime = max(timeScaling[::2]) # maximum value used as input times
maxOutTime = max(timeScaling[1::2]) # maximum value used in output times
outL = int(L * maxOutTime / maxInTime) # number of output frames
inFrames = (L - 1) * timeScaling[::2] / maxInTime # input time values in frames
outFrames = outL * timeScaling[1::2] / maxOutTime # output time values in frames
timeScalingEnv = interp1d(
outFrames, inFrames, fill_value=0
) # interpolation function
indexes = timeScalingEnv(np.arange(outL)) # generate frame indexes for the output
ysfreq = sfreq[int(round(indexes[0])), :] # first output frame
ysmag = smag[int(round(indexes[0])), :] # first output frame
for l in indexes[1:]: # generate frames for output sine tracks
ysfreq = np.vstack(
(ysfreq, sfreq[int(round(l)), :])
) # get closest frame to scaling value
ysmag = np.vstack(
(ysmag, smag[int(round(l)), :])
) # get closest frame to scaling value
return ysfreq, ysmag
def sineFreqScaling(sfreq, freqScaling):
"""
Frequency scaling of sinusoidal tracks
sfreq: frequencies of input sinusoidal tracks
freqScaling: scaling factors, in time-value pairs (value of 1 is no scaling)
returns ysfreq: frequencies of output sinusoidal tracks
"""
if freqScaling.size % 2 != 0: # raise exception if array not even length
raise ValueError("Frequency scaling array does not have an even size")
L = sfreq.shape[0] # number of input frames
# create interpolation object from the scaling values
freqScalingEnv = np.interp(
np.arange(L), L * freqScaling[::2] / freqScaling[-2], freqScaling[1::2]
)
ysfreq = np.zeros_like(sfreq) # create empty output matrix
for l in range(L): # go through all frames
ind_valid = np.where(sfreq[l, :] != 0)[0] # check if there are frequency values
if ind_valid.size == 0: # if no values go to next frame
continue
ysfreq[l, ind_valid] = (
sfreq[l, ind_valid] * freqScalingEnv[l]
) # scale of frequencies
return ysfreq
| 2,788 | Python | .py | 55 | 44.472727 | 88 | 0.682685 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,094 | stochasticTransformations.py | MTG_sms-tools/smstools/transformations/stochasticTransformations.py | # functions that implement transformations using the hpsModel
import numpy as np
from scipy.interpolate import interp1d
def stochasticTimeScale(stocEnv, timeScaling):
"""
Time scaling of the stochastic representation of a sound
stocEnv: stochastic envelope
timeScaling: scaling factors, in time-value pairs
returns ystocEnv: stochastic envelope
"""
if timeScaling.size % 2 != 0: # raise exception if array not even length
raise ValueError("Time scaling array does not have an even size")
L = stocEnv[:, 0].size # number of input frames
outL = int(L * timeScaling[-1] / timeScaling[-2]) # number of synthesis frames
# create interpolation object with the time scaling values
timeScalingEnv = interp1d(
timeScaling[::2] / timeScaling[-2], timeScaling[1::2] / timeScaling[-1]
)
indexes = (L - 1) * timeScalingEnv(
np.arange(outL) / float(outL)
) # generate output time indexes
ystocEnv = stocEnv[0, :] # first output frame is same than input
for l in indexes[1:]: # step through the output frames
ystocEnv = np.vstack(
(ystocEnv, stocEnv[int(round(l)), :])
) # get the closest input frame
return ystocEnv
| 1,233 | Python | .py | 27 | 40.074074 | 83 | 0.693844 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,095 | utilFunctions.py | MTG_sms-tools/smstools/models/utilFunctions.py | import copy
import os
import subprocess
import sys
import numpy as np
from scipy.fft import fft, fftshift, ifft
from scipy.io.wavfile import read, write
from scipy.signal import resample
from scipy.signal.windows import blackmanharris, triang
try:
from smstools.models.utilFunctions_C import utilFunctions_C as UF_C
except ImportError:
print("\n")
print(
"-------------------------------------------------------------------------------"
)
print("Warning:")
print("Cython modules for some of the core functions were not imported.")
print("Please refer to the README.md file in the 'sms-tools' directory,")
print("for the instructions to compile the cython modules.")
print("Exiting the code!!")
print(
"-------------------------------------------------------------------------------"
)
print("\n")
sys.exit(0)
winsound_imported = False
if sys.platform == "win32":
try:
import winsound
winsound_imported = True
except:
print("You won't be able to play sounds, winsound could not be imported")
def isPower2(num):
"""
Check if num is power of two
"""
return ((num & (num - 1)) == 0) and num > 0
INT16_FAC = 2**15
INT32_FAC = 2**31
INT64_FAC = 2**63
norm_fact = {
"int16": INT16_FAC,
"int32": INT32_FAC,
"int64": INT64_FAC,
"float32": 1.0,
"float64": 1.0,
}
def wavread(filename):
"""
Read a sound file and convert it to a normalized floating point array
filename: name of file to read
returns fs: sampling rate of file, x: floating point array
"""
if os.path.isfile(filename) == False: # raise error if wrong input file
raise ValueError("Input file is wrong")
fs, x = read(filename)
if len(x.shape) != 1: # raise error if more than one channel
raise ValueError("Audio file should be mono")
if fs != 44100: # raise error if more than one channel
raise ValueError("Sampling rate of input sound should be 44100")
# scale down and convert audio into floating point number in range of -1 to 1
x = np.float32(x) / norm_fact[x.dtype.name]
return fs, x
def wavplay(filename):
"""
Play a wav audio file from system using OS calls
filename: name of file to read
"""
if os.path.isfile(filename) == False: # raise error if wrong input file
print(
"Input file does not exist. Make sure you computed the analysis/synthesis"
)
else:
if sys.platform == "linux" or sys.platform == "linux2":
# linux
subprocess.call(["aplay", filename])
elif sys.platform == "darwin":
# OS X
subprocess.call(["afplay", filename])
elif sys.platform == "win32":
if winsound_imported:
winsound.PlaySound(filename, winsound.SND_FILENAME)
else:
print("Cannot play sound, winsound could not be imported")
else:
print("Platform not recognized")
def wavwrite(y, fs, filename):
"""
Write a sound file from an array with the sound and the sampling rate
y: floating point array of one dimension, fs: sampling rate
filename: name of file to create
"""
x = copy.deepcopy(y) # copy array
x *= INT16_FAC # scaling floating point -1 to 1 range signal to int16 range
x = np.int16(x) # converting to int16 type
write(filename, fs, x)
def peakDetection(mX, t):
"""
Detect spectral peak locations
mX: magnitude spectrum, t: threshold
returns ploc: peak locations
"""
thresh = np.where(np.greater(mX[1:-1], t), mX[1:-1], 0) # locations above threshold
next_minor = np.where(
mX[1:-1] > mX[2:], mX[1:-1], 0
) # locations higher than the next one
prev_minor = np.where(
mX[1:-1] > mX[:-2], mX[1:-1], 0
) # locations higher than the previous one
ploc = thresh * next_minor * prev_minor # locations fulfilling the three criteria
ploc = ploc.nonzero()[0] + 1 # add 1 to compensate for previous steps
return ploc
def peakInterp(mX, pX, ploc):
"""
Interpolate peak values using parabolic interpolation
mX, pX: magnitude and phase spectrum, ploc: locations of peaks
returns iploc, ipmag, ipphase: interpolated peak location, magnitude and phase values
"""
val = mX[ploc] # magnitude of peak bin
lval = mX[ploc - 1] # magnitude of bin at left
rval = mX[ploc + 1] # magnitude of bin at right
iploc = ploc + 0.5 * (lval - rval) / (lval - 2 * val + rval) # center of parabola
ipmag = val - 0.25 * (lval - rval) * (iploc - ploc) # magnitude of peaks
ipphase = np.interp(
iploc, np.arange(0, pX.size), pX
) # phase of peaks by linear interpolation
return iploc, ipmag, ipphase
def sinc(x, N):
"""
Generate the main lobe of a sinc function (Dirichlet kernel)
x: array of indexes to compute; N: size of FFT to simulate
returns y: samples of the main lobe of a sinc function
"""
y = np.sin(N * x / 2) / np.sin(x / 2) # compute the sinc function
y[np.isnan(y)] = N # avoid NaN if x == 0
return y
def genBhLobe(x):
"""
Generate the main lobe of a Blackman-Harris window
x: bin positions to compute (real values)
returns y: main lobe os spectrum of a Blackman-Harris window
"""
N = 512 # size of fft to use
f = x * np.pi * 2 / N # frequency sampling
df = 2 * np.pi / N
y = np.zeros(x.size) # initialize window
consts = [0.35875, 0.48829, 0.14128, 0.01168] # window constants
for m in range(0, 4): # iterate over the four sincs to sum
y += (
consts[m] / 2 * (sinc(f - df * m, N) + sinc(f + df * m, N))
) # sum of scaled sinc functions
y = y / N / consts[0] # normalize
return y
def genSpecSines(ipfreq, ipmag, ipphase, N, fs):
"""
Generate a spectrum from a series of sine values, calling a C function
ipfreq, ipmag, ipphase: sine peaks frequencies, magnitudes and phases
N: size of the complex spectrum to generate; fs: sampling frequency
returns Y: generated complex spectrum of sines
"""
Y = UF_C.genSpecSines(N * ipfreq / float(fs), ipmag, ipphase, N)
return Y
def genSpecSines_p(ipfreq, ipmag, ipphase, N, fs):
"""
Generate a spectrum from a series of sine values
iploc, ipmag, ipphase: sine peaks locations, magnitudes and phases
N: size of the complex spectrum to generate; fs: sampling rate
returns Y: generated complex spectrum of sines
"""
Y = np.zeros(N, dtype=complex) # initialize output complex spectrum
hN = N // 2 # size of positive freq. spectrum
for i in range(0, ipfreq.size): # generate all sine spectral lobes
loc = N * ipfreq[i] / fs # it should be in range ]0,hN-1[
if loc == 0 or loc > hN - 1:
continue
binremainder = round(loc) - loc
lb = np.arange(
binremainder - 4, binremainder + 5
) # main lobe (real value) bins to read
lmag = genBhLobe(lb) * 10 ** (
ipmag[i] / 20
) # lobe magnitudes of the complex exponential
b = np.arange(round(loc) - 4, round(loc) + 5, dtype="int")
for m in range(0, 9):
if b[m] < 0: # peak lobe crosses DC bin
Y[-b[m]] += lmag[m] * np.exp(-1j * ipphase[i])
elif b[m] > hN: # peak lobe croses Nyquist bin
Y[b[m]] += lmag[m] * np.exp(-1j * ipphase[i])
elif b[m] == 0 or b[m] == hN: # peak lobe in the limits of the spectrum
Y[b[m]] += lmag[m] * np.exp(1j * ipphase[i]) + lmag[m] * np.exp(
-1j * ipphase[i]
)
else: # peak lobe in positive freq. range
Y[b[m]] += lmag[m] * np.exp(1j * ipphase[i])
Y[hN + 1 :] = Y[
hN - 1 : 0 : -1
].conjugate() # fill the negative part of the spectrum
return Y
def sinewaveSynth(freqs, amp, H, fs):
"""
Synthesis of one sinusoid with time-varying frequency
freqs, amps: array of frequencies and amplitudes of sinusoids
H: hop size, fs: sampling rate
returns y: output array sound
"""
t = np.arange(H) / float(fs) # time array
lastphase = 0 # initialize synthesis phase
lastfreq = freqs[0] # initialize synthesis frequency
y = np.array([]) # initialize output array
for l in range(freqs.size): # iterate over all frames
if (lastfreq == 0) & (freqs[l] == 0): # if 0 freq add zeros
A = np.zeros(H)
freq = np.zeros(H)
elif (lastfreq == 0) & (freqs[l] > 0): # if starting freq ramp up the amplitude
A = np.arange(0, amp, amp / H)
freq = np.ones(H) * freqs[l]
elif (lastfreq > 0) & (freqs[l] > 0): # if freqs in boundaries use both
A = np.ones(H) * amp
if lastfreq == freqs[l]:
freq = np.ones(H) * lastfreq
else:
freq = np.arange(lastfreq, freqs[l], (freqs[l] - lastfreq) / H)
elif (lastfreq > 0) & (freqs[l] == 0): # if ending freq ramp down the amplitude
A = np.arange(amp, 0, -amp / H)
freq = np.ones(H) * lastfreq
phase = 2 * np.pi * freq * t + lastphase # generate phase values
yh = A * np.cos(phase) # compute sine for one frame
lastfreq = freqs[l] # save frequency for phase propagation
lastphase = np.remainder(
phase[H - 1], 2 * np.pi
) # save phase to be use for next frame
y = np.append(y, yh) # append frame to previous one
return y
def cleaningTrack(track, minTrackLength=3):
"""
Delete fragments of one single track smaller than minTrackLength
track: array of values; minTrackLength: minimum duration of tracks in number of frames
returns cleanTrack: array of clean values
"""
nFrames = track.size # number of frames
cleanTrack = np.copy(track) # copy array
trackBegs = (
np.nonzero(
(track[: nFrames - 1] <= 0) & (track[1:] > 0) # beginning of track contours
)[0]
+ 1
)
if track[0] > 0:
trackBegs = np.insert(trackBegs, 0, 0)
trackEnds = np.nonzero((track[: nFrames - 1] > 0) & (track[1:] <= 0))[0] + 1
if track[nFrames - 1] > 0:
trackEnds = np.append(trackEnds, nFrames - 1)
trackLengths = 1 + trackEnds - trackBegs # lengths of trach contours
for i, j in zip(trackBegs, trackLengths): # delete short track contours
if j <= minTrackLength:
cleanTrack[i : i + j] = 0
return cleanTrack
def f0Twm(pfreq, pmag, ef0max, minf0, maxf0, f0t=0):
"""
Function that wraps the f0 detection function TWM, selecting the possible f0 candidates
and calling the function TWM with them
pfreq, pmag: peak frequencies and magnitudes,
ef0max: maximum error allowed, minf0, maxf0: minimum and maximum f0
f0t: f0 of previous frame if stable
returns f0: fundamental frequency in Hz
"""
if minf0 < 0: # raise exception if minf0 is smaller than 0
raise ValueError("Minimum fundamental frequency (minf0) smaller than 0")
if maxf0 >= 10000: # raise exception if maxf0 is bigger than 10000Hz
raise ValueError("Maximum fundamental frequency (maxf0) bigger than 10000Hz")
if (pfreq.size < 3) & (
f0t == 0
): # return 0 if less than 3 peaks and not previous f0
return 0
f0c = np.argwhere((pfreq > minf0) & (pfreq < maxf0))[
:, 0
] # use only peaks within given range
if f0c.size == 0: # return 0 if no peaks within range
return 0
f0cf = pfreq[f0c] # frequencies of peak candidates
f0cm = pmag[f0c] # magnitude of peak candidates
if f0t > 0: # if stable f0 in previous frame
shortlist = np.argwhere(np.abs(f0cf - f0t) < f0t / 2.0)[
:, 0
] # use only peaks close to it
maxc = np.argmax(f0cm)
maxcfd = f0cf[maxc] % f0t
if maxcfd > f0t / 2:
maxcfd = f0t - maxcfd
if (maxc not in shortlist) and (
maxcfd > (f0t / 4)
): # or the maximum magnitude peak is not a harmonic
shortlist = np.append(maxc, shortlist)
f0cf = f0cf[shortlist] # frequencies of candidates
if f0cf.size == 0: # return 0 if no peak candidates
return 0
f0, f0error = UF_C.twm(
pfreq, pmag, f0cf
) # call the TWM function with peak candidates, cython version
# f0, f0error = TWM_p(pfreq, pmag, f0cf) # call the TWM function with peak candidates, python version
if (f0 > 0) and (
f0error < ef0max
): # accept and return f0 if below max error allowed
return f0
else:
return 0
def TWM_p(pfreq, pmag, f0c):
"""
Two-way mismatch algorithm for f0 detection (by Beauchamp&Maher)
[better to use the C version of this function: UF_C.twm]
pfreq, pmag: peak frequencies in Hz and magnitudes,
f0c: frequencies of f0 candidates
returns f0, f0Error: fundamental frequency detected and its error
"""
p = 0.5 # weighting by frequency value
q = 1.4 # weighting related to magnitude of peaks
r = 0.5 # scaling related to magnitude of peaks
rho = 0.33 # weighting of MP error
Amax = max(pmag) # maximum peak magnitude
maxnpeaks = 10 # maximum number of peaks used
harmonic = np.matrix(f0c)
ErrorPM = np.zeros(harmonic.size) # initialize PM errors
MaxNPM = min(maxnpeaks, pfreq.size)
for i in range(0, MaxNPM): # predicted to measured mismatch error
difmatrixPM = harmonic.T * np.ones(pfreq.size)
difmatrixPM = abs(difmatrixPM - np.ones((harmonic.size, 1)) * pfreq)
FreqDistance = np.amin(difmatrixPM, axis=1) # minimum along rows
peakloc = np.argmin(difmatrixPM, axis=1)
Ponddif = np.array(FreqDistance) * (np.array(harmonic.T) ** (-p))
PeakMag = pmag[peakloc]
MagFactor = 10 ** ((PeakMag - Amax) / 20)
ErrorPM = ErrorPM + (Ponddif + MagFactor * (q * Ponddif - r)).T
harmonic = harmonic + f0c
ErrorMP = np.zeros(harmonic.size) # initialize MP errors
MaxNMP = min(maxnpeaks, pfreq.size)
for i in range(0, f0c.size): # measured to predicted mismatch error
nharm = np.round(pfreq[:MaxNMP] / f0c[i])
nharm = (nharm >= 1) * nharm + (nharm < 1)
FreqDistance = abs(pfreq[:MaxNMP] - nharm * f0c[i])
Ponddif = FreqDistance * (pfreq[:MaxNMP] ** (-p))
PeakMag = pmag[:MaxNMP]
MagFactor = 10 ** ((PeakMag - Amax) / 20)
ErrorMP[i] = sum(MagFactor * (Ponddif + MagFactor * (q * Ponddif - r)))
Error = (ErrorPM[0] / MaxNPM) + (rho * ErrorMP / MaxNMP) # total error
f0index = np.argmin(Error) # get the smallest error
f0 = f0c[f0index] # f0 with the smallest error
return f0, Error[f0index]
def sineSubtraction(x, N, H, sfreq, smag, sphase, fs):
"""
Subtract sinusoids from a sound
x: input sound, N: fft-size, H: hop-size
sfreq, smag, sphase: sinusoidal frequencies, magnitudes and phases
returns xr: residual sound
"""
hN = N // 2 # half of fft size
x = np.append(
np.zeros(hN), x
) # add zeros at beginning to center first window at sample 0
x = np.append(x, np.zeros(hN)) # add zeros at the end to analyze last sample
bh = blackmanharris(N) # blackman harris window
w = bh / sum(bh) # normalize window
sw = np.zeros(N) # initialize synthesis window
sw[hN - H : hN + H] = triang(2 * H) / w[hN - H : hN + H] # synthesis window
L = sfreq.shape[0] # number of frames, this works if no sines
xr = np.zeros(x.size) # initialize output array
pin = 0
for l in range(L):
xw = x[pin : pin + N] * w # window the input sound
X = fft(fftshift(xw)) # compute FFT
Yh = UF_C.genSpecSines(
N * sfreq[l, :] / fs, smag[l, :], sphase[l, :], N
) # generate spec sines, cython version
# Yh = genSpecSines_p(N*sfreq[l,:]/fs, smag[l,:], sphase[l,:], N, fs) # generate spec sines, python version
Xr = X - Yh # subtract sines from original spectrum
xrw = np.real(fftshift(ifft(Xr))) # inverse FFT
xr[pin : pin + N] += xrw * sw # overlap-add
pin += H # advance sound pointer
xr = np.delete(
xr, range(hN)
) # delete half of first window which was added in stftAnal
xr = np.delete(
xr, range(xr.size - hN, xr.size)
) # delete half of last window which was added in stftAnal
return xr
def stochasticResidualAnal(x, N, H, sfreq, smag, sphase, fs, stocf):
"""
Subtract sinusoids from a sound and approximate the residual with an envelope
x: input sound, N: fft size, H: hop-size
sfreq, smag, sphase: sinusoidal frequencies, magnitudes and phases
fs: sampling rate; stocf: stochastic factor, used in the approximation
returns stocEnv: stochastic approximation of residual
"""
hN = N // 2 # half of fft size
x = np.append(
np.zeros(hN), x
) # add zeros at beginning to center first window at sample 0
x = np.append(x, np.zeros(hN)) # add zeros at the end to analyze last sample
bh = blackmanharris(N) # synthesis window
w = bh / sum(bh) # normalize synthesis window
L = sfreq.shape[0] # number of frames, this works if no sines
pin = 0
for l in range(L):
xw = x[pin : pin + N] * w # window the input sound
X = fft(fftshift(xw)) # compute FFT
Yh = UF_C.genSpecSines(
N * sfreq[l, :] / fs, smag[l, :], sphase[l, :], N
) # generate spec sines, cython version
# Yh = genSpecSines_p(N*sfreq[l,:]/fs, smag[l,:], sphase[l,:], N, fs) # generate spec sines, python version
Xr = X - Yh # subtract sines from original spectrum
mXr = 20 * np.log10(abs(Xr[:hN])) # magnitude spectrum of residual
mXrenv = resample(
np.maximum(-200, mXr), mXr.size * stocf
) # decimate the mag spectrum
if l == 0: # if first frame
stocEnv = np.array([mXrenv])
else: # rest of frames
stocEnv = np.vstack((stocEnv, np.array([mXrenv])))
pin += H # advance sound pointer
return stocEnv
| 18,324 | Python | .py | 420 | 36.766667 | 119 | 0.612681 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,096 | harmonicModel.py | MTG_sms-tools/smstools/models/harmonicModel.py | # functions that implement analysis and synthesis of sounds using the Harmonic Model
# (for example usage check the interface directory)
import math
import numpy as np
from scipy.fft import ifft
from scipy.signal.windows import blackmanharris, triang
from smstools.models import dftModel as DFT
from smstools.models import sineModel as SM
from smstools.models import utilFunctions as UF
def f0Detection(x, fs, w, N, H, t, minf0, maxf0, f0et):
"""
Fundamental frequency detection of a sound using twm algorithm
x: input sound; fs: sampling rate; w: analysis window;
N: FFT size; t: threshold in negative dB,
minf0: minimum f0 frequency in Hz, maxf0: maximim f0 frequency in Hz,
f0et: error threshold in the f0 detection (ex: 5),
returns f0: fundamental frequency
"""
if minf0 < 0: # raise exception if minf0 is smaller than 0
raise ValueError("Minumum fundamental frequency (minf0) smaller than 0")
if maxf0 >= 10000: # raise exception if maxf0 is bigger than fs/2
raise ValueError("Maximum fundamental frequency (maxf0) bigger than 10000Hz")
if H <= 0: # raise error if hop size 0 or negative
raise ValueError("Hop size (H) smaller or equal to 0")
hN = N // 2 # size of positive spectrum
hM1 = int(math.floor((w.size + 1) / 2)) # half analysis window size by rounding
hM2 = int(math.floor(w.size / 2)) # half analysis window size by floor
x = np.append(
np.zeros(hM2), x
) # add zeros at beginning to center first window at sample 0
x = np.append(x, np.zeros(hM1)) # add zeros at the end to analyze last sample
pin = hM1 # init sound pointer in middle of anal window
pend = x.size - hM1 # last sample to start a frame
fftbuffer = np.zeros(N) # initialize buffer for FFT
w = w / sum(w) # normalize analysis window
f0 = [] # initialize f0 output
f0t = 0 # initialize f0 track
f0stable = 0 # initialize f0 stable
while pin < pend:
x1 = x[pin - hM1 : pin + hM2] # select frame
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
ploc = UF.peakDetection(mX, t) # detect peak locations
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) # refine peak values
ipfreq = fs * iploc / N # convert locations to Hez
f0t = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0, f0stable) # find f0
if ((f0stable == 0) & (f0t > 0)) or (
(f0stable > 0) & (np.abs(f0stable - f0t) < f0stable / 5.0)
):
f0stable = f0t # consider a stable f0 if it is close to the previous one
else:
f0stable = 0
f0 = np.append(f0, f0t) # add f0 to output array
pin += H # advance sound pointer
return f0
def harmonicDetection(pfreq, pmag, pphase, f0, nH, hfreqp, fs, harmDevSlope=0.01):
"""
Detection of the harmonics of a frame from a set of spectral peaks using f0
to the ideal harmonic series built on top of a fundamental frequency
pfreq, pmag, pphase: peak frequencies, magnitudes and phases
f0: fundamental frequency, nH: number of harmonics,
hfreqp: harmonic frequencies of previous frame,
fs: sampling rate; harmDevSlope: slope of change of the deviation allowed to perfect harmonic
returns hfreq, hmag, hphase: harmonic frequencies, magnitudes, phases
"""
if f0 <= 0: # if no f0 return no harmonics
return np.zeros(nH), np.zeros(nH), np.zeros(nH)
hfreq = np.zeros(nH) # initialize harmonic frequencies
hmag = np.zeros(nH) - 100 # initialize harmonic magnitudes
hphase = np.zeros(nH) # initialize harmonic phases
hf = f0 * np.arange(1, nH + 1) # initialize harmonic frequencies
hi = 0 # initialize harmonic index
if (
len(hfreqp) == 0
): # if no incomming harmonic tracks initialize to harmonic series
hfreqp = hf
while (f0 > 0) and (hi < nH) and (hf[hi] < fs / 2): # find harmonic peaks
pei = np.argmin(abs(pfreq - hf[hi])) # closest peak
dev1 = abs(pfreq[pei] - hf[hi]) # deviation from perfect harmonic
dev2 = (
abs(pfreq[pei] - hfreqp[hi]) if hfreqp[hi] > 0 else fs
) # deviation from previous frame
threshold = f0 / 3 + harmDevSlope * pfreq[pei]
if (dev1 < threshold) or (
dev2 < threshold
): # accept peak if deviation is small
hfreq[hi] = pfreq[pei] # harmonic frequencies
hmag[hi] = pmag[pei] # harmonic magnitudes
hphase[hi] = pphase[pei] # harmonic phases
hi += 1 # increase harmonic index
return hfreq, hmag, hphase
def harmonicModel(x, fs, w, N, t, nH, minf0, maxf0, f0et):
"""
Analysis/synthesis of a sound using the sinusoidal harmonic model
x: input sound, fs: sampling rate, w: analysis window,
N: FFT size (minimum 512), t: threshold in negative dB,
nH: maximum number of harmonics, minf0: minimum f0 frequency in Hz,
maxf0: maximim f0 frequency in Hz,
f0et: error threshold in the f0 detection (ex: 5),
returns y: output array sound
"""
hN = N // 2 # size of positive spectrum
hM1 = int(math.floor((w.size + 1) / 2)) # half analysis window size by rounding
hM2 = int(math.floor(w.size / 2)) # half analysis window size by floor
x = np.append(
np.zeros(hM2), x
) # add zeros at beginning to center first window at sample 0
x = np.append(x, np.zeros(hM1)) # add zeros at the end to analyze last sample
Ns = 512 # FFT size for synthesis (even)
H = Ns // 4 # Hop size used for analysis and synthesis
hNs = Ns // 2
pin = max(hNs, hM1) # init sound pointer in middle of anal window
pend = x.size - max(hNs, hM1) # last sample to start a frame
fftbuffer = np.zeros(N) # initialize buffer for FFT
yh = np.zeros(Ns) # initialize output sound frame
y = np.zeros(x.size) # initialize output array
w = w / sum(w) # normalize analysis window
sw = np.zeros(Ns) # initialize synthesis window
ow = triang(2 * H) # overlapping window
sw[hNs - H : hNs + H] = ow
bh = blackmanharris(Ns) # synthesis window
bh = bh / sum(bh) # normalize synthesis window
sw[hNs - H : hNs + H] = (
sw[hNs - H : hNs + H] / bh[hNs - H : hNs + H]
) # window for overlap-add
hfreqp = []
f0t = 0
f0stable = 0
while pin < pend:
# -----analysis-----
x1 = x[pin - hM1 : pin + hM2] # select frame
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
ploc = UF.peakDetection(mX, t) # detect peak locations
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) # refine peak values
ipfreq = fs * iploc / N
f0t = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0, f0stable) # find f0
if ((f0stable == 0) & (f0t > 0)) or (
(f0stable > 0) & (np.abs(f0stable - f0t) < f0stable / 5.0)
):
f0stable = f0t # consider a stable f0 if it is close to the previous one
else:
f0stable = 0
hfreq, hmag, hphase = harmonicDetection(
ipfreq, ipmag, ipphase, f0t, nH, hfreqp, fs
) # find harmonics
hfreqp = hfreq
# -----synthesis-----
Yh = UF.genSpecSines(hfreq, hmag, hphase, Ns, fs) # generate spec sines
fftbuffer = np.real(ifft(Yh)) # inverse FFT
yh[: hNs - 1] = fftbuffer[hNs + 1 :] # undo zero-phase window
yh[hNs - 1 :] = fftbuffer[: hNs + 1]
y[pin - hNs : pin + hNs] += sw * yh # overlap-add
pin += H # advance sound pointer
y = np.delete(
y, range(hM2)
) # delete half of first window which was added in stftAnal
y = np.delete(
y, range(y.size - hM1, y.size)
) # add zeros at the end to analyze last sample
return y
def harmonicModelAnal(
x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope=0.01, minSineDur=0.02
):
"""
Analysis of a sound using the sinusoidal harmonic model
x: input sound; fs: sampling rate, w: analysis window; N: FFT size (minimum 512); t: threshold in negative dB,
nH: maximum number of harmonics; minf0: minimum f0 frequency in Hz,
maxf0: maximim f0 frequency in Hz; f0et: error threshold in the f0 detection (ex: 5),
harmDevSlope: slope of harmonic deviation; minSineDur: minimum length of harmonics
returns xhfreq, xhmag, xhphase: harmonic frequencies, magnitudes and phases
"""
if minSineDur < 0: # raise exception if minSineDur is smaller than 0
raise ValueError("Minimum duration of sine tracks smaller than 0")
hN = N // 2 # size of positive spectrum
hM1 = int(math.floor((w.size + 1) / 2)) # half analysis window size by rounding
hM2 = int(math.floor(w.size / 2)) # half analysis window size by floor
x = np.append(
np.zeros(hM2), x
) # add zeros at beginning to center first window at sample 0
x = np.append(x, np.zeros(hM2)) # add zeros at the end to analyze last sample
pin = hM1 # init sound pointer in middle of anal window
pend = x.size - hM1 # last sample to start a frame
fftbuffer = np.zeros(N) # initialize buffer for FFT
w = w / sum(w) # normalize analysis window
hfreqp = [] # initialize harmonic frequencies of previous frame
f0t = 0 # initialize f0 track
f0stable = 0 # initialize f0 stable
while pin <= pend:
x1 = x[pin - hM1 : pin + hM2] # select frame
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
ploc = UF.peakDetection(mX, t) # detect peak locations
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) # refine peak values
ipfreq = fs * iploc / N # convert locations to Hz
f0t = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0, f0stable) # find f0
if ((f0stable == 0) & (f0t > 0)) or (
(f0stable > 0) & (np.abs(f0stable - f0t) < f0stable / 5.0)
):
f0stable = f0t # consider a stable f0 if it is close to the previous one
else:
f0stable = 0
hfreq, hmag, hphase = harmonicDetection(
ipfreq, ipmag, ipphase, f0t, nH, hfreqp, fs, harmDevSlope
) # find harmonics
hfreqp = hfreq
if pin == hM1: # first frame
xhfreq = np.array([hfreq])
xhmag = np.array([hmag])
xhphase = np.array([hphase])
else: # next frames
xhfreq = np.vstack((xhfreq, np.array([hfreq])))
xhmag = np.vstack((xhmag, np.array([hmag])))
xhphase = np.vstack((xhphase, np.array([hphase])))
pin += H # advance sound pointer
xhfreq = SM.cleaningSineTracks(
xhfreq, round(fs * minSineDur / H)
) # delete tracks shorter than minSineDur
return xhfreq, xhmag, xhphase
| 10,736 | Python | .py | 216 | 42.805556 | 114 | 0.633022 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,097 | dftModel.py | MTG_sms-tools/smstools/models/dftModel.py | # functions that implement analysis and synthesis of sounds using the Discrete Fourier Transform
# (for example usage check dftModel_function.py in the interface directory)
import math
import numpy as np
from scipy.fft import fft, ifft
from smstools.models import utilFunctions as UF
tol = 1e-14 # threshold used to compute phase
def dftModel(x, w, N):
"""
Analysis/synthesis of a signal using the discrete Fourier transform
x: input signal, w: analysis window, N: FFT size
returns y: output signal
"""
if not (UF.isPower2(N)): # raise error if N not a power of two
raise ValueError("FFT size (N) is not a power of 2")
if w.size > N: # raise error if window size bigger than fft size
raise ValueError("Window size (M) is bigger than FFT size")
if all(x == 0): # if input array is zeros return empty output
return np.zeros(x.size)
hN = (N // 2) + 1 # size of positive spectrum, it includes sample 0
hM1 = (w.size + 1) // 2 # half analysis window size by rounding
hM2 = int(math.floor(w.size / 2)) # half analysis window size by floor
fftbuffer = np.zeros(N) # initialize buffer for FFT
y = np.zeros(x.size) # initialize output array
# ----analysis--------
xw = x * w # window the input sound
fftbuffer[:hM1] = xw[hM2:] # zero-phase window in fftbuffer
fftbuffer[-hM2:] = xw[:hM2]
X = fft(fftbuffer) # compute FFT
absX = abs(X[:hN]) # compute ansolute value of positive side
absX[absX < np.finfo(float).eps] = np.finfo(
float
).eps # if zeros add epsilon to handle log
mX = 20 * np.log10(absX) # magnitude spectrum of positive frequencies in dB
pX = np.unwrap(np.angle(X[:hN])) # unwrapped phase spectrum of positive frequencies
# -----synthesis-----
Y = np.zeros(N, dtype=complex) # clean output spectrum
Y[:hN] = 10 ** (mX / 20) * np.exp(1j * pX) # generate positive frequencies
Y[hN:] = 10 ** (mX[-2:0:-1] / 20) * np.exp(
-1j * pX[-2:0:-1]
) # generate negative frequencies
fftbuffer = np.real(ifft(Y)) # compute inverse FFT
y[:hM2] = fftbuffer[-hM2:] # undo zero-phase window
y[hM2:] = fftbuffer[:hM1]
return y
def dftAnal(x, w, N):
"""
Analysis of a signal using the discrete Fourier transform
x: input signal, w: analysis window, N: FFT size
returns mX, pX: magnitude and phase spectrum
"""
if not (UF.isPower2(N)): # raise error if N not a power of two
raise ValueError("FFT size (N) is not a power of 2")
if w.size > N: # raise error if window size bigger than fft size
raise ValueError("Window size (M) is bigger than FFT size")
hN = (N // 2) + 1 # size of positive spectrum, it includes sample 0
hM1 = (w.size + 1) // 2 # half analysis window size by rounding
hM2 = w.size // 2 # half analysis window size by floor
fftbuffer = np.zeros(N) # initialize buffer for FFT
w = w / sum(w) # normalize analysis window
xw = x * w # window the input sound
fftbuffer[:hM1] = xw[hM2:] # zero-phase window in fftbuffer
fftbuffer[-hM2:] = xw[:hM2]
X = fft(fftbuffer) # compute FFT
absX = abs(X[:hN]) # compute ansolute value of positive side
absX[absX < np.finfo(float).eps] = np.finfo(
float
).eps # if zeros add epsilon to handle log
mX = 20 * np.log10(absX) # magnitude spectrum of positive frequencies in dB
X[:hN].real[
np.abs(X[:hN].real) < tol
] = 0.0 # for phase calculation set to 0 the small values
X[:hN].imag[
np.abs(X[:hN].imag) < tol
] = 0.0 # for phase calculation set to 0 the small values
pX = np.unwrap(np.angle(X[:hN])) # unwrapped phase spectrum of positive frequencies
return mX, pX
def dftSynth(mX, pX, M):
"""
Synthesis of a signal using the discrete Fourier transform
mX: magnitude spectrum, pX: phase spectrum, M: window size
returns y: output signal
"""
hN = mX.size # size of positive spectrum, it includes sample 0
N = (hN - 1) * 2 # FFT size
if not (UF.isPower2(N)): # raise error if N not a power of two, thus mX is wrong
raise ValueError("size of mX is not (N/2)+1")
hM1 = int(math.floor((M + 1) / 2)) # half analysis window size by rounding
hM2 = int(math.floor(M / 2)) # half analysis window size by floor
y = np.zeros(M) # initialize output array
Y = np.zeros(N, dtype=complex) # clean output spectrum
Y[:hN] = 10 ** (mX / 20) * np.exp(1j * pX) # generate positive frequencies
Y[hN:] = 10 ** (mX[-2:0:-1] / 20) * np.exp(
-1j * pX[-2:0:-1]
) # generate negative frequencies
fftbuffer = np.real(ifft(Y)) # compute inverse FFT
y[:hM2] = fftbuffer[-hM2:] # undo zero-phase window
y[hM2:] = fftbuffer[:hM1]
return y
| 4,809 | Python | .py | 99 | 43.313131 | 96 | 0.644501 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,098 | spsModel.py | MTG_sms-tools/smstools/models/spsModel.py | # functions that implement analysis and synthesis of sounds using the Sinusoidal plus Stochastic Model
# (for example usage check the interface directory)
import math
import numpy as np
from scipy.fft import fft, ifft
from scipy.signal import resample
from scipy.signal.windows import blackmanharris, hann, triang
from smstools.models import dftModel as DFT
from smstools.models import sineModel as SM
from smstools.models import stochasticModel as STM
from smstools.models import utilFunctions as UF
def spsModelAnal(
x, fs, w, N, H, t, minSineDur, maxnSines, freqDevOffset, freqDevSlope, stocf
):
"""
Analysis of a sound using the sinusoidal plus stochastic model
x: input sound, fs: sampling rate, w: analysis window; N: FFT size, t: threshold in negative dB,
minSineDur: minimum duration of sinusoidal tracks
maxnSines: maximum number of parallel sinusoids
freqDevOffset: frequency deviation allowed in the sinusoids from frame to frame at frequency 0
freqDevSlope: slope of the frequency deviation, higher frequencies have bigger deviation
stocf: decimation factor used for the stochastic approximation
returns hfreq, hmag, hphase: harmonic frequencies, magnitude and phases; stocEnv: stochastic residual
"""
# perform sinusoidal analysis
tfreq, tmag, tphase = SM.sineModelAnal(
x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope
)
Ns = 512
xr = UF.sineSubtraction(
x, Ns, H, tfreq, tmag, tphase, fs
) # subtract sinusoids from original sound
stocEnv = STM.stochasticModelAnal(
xr, H, H * 2, stocf
) # compute stochastic model of residual
return tfreq, tmag, tphase, stocEnv
def spsModelSynth(tfreq, tmag, tphase, stocEnv, N, H, fs):
"""
Synthesis of a sound using the sinusoidal plus stochastic model
tfreq, tmag, tphase: sinusoidal frequencies, amplitudes and phases; stocEnv: stochastic envelope
N: synthesis FFT size; H: hop size, fs: sampling rate
returns y: output sound, ys: sinusoidal component, yst: stochastic component
"""
ys = SM.sineModelSynth(tfreq, tmag, tphase, N, H, fs) # synthesize sinusoids
yst = STM.stochasticModelSynth(stocEnv, H, H * 2) # synthesize stochastic residual
y = (
ys[: min(ys.size, yst.size)] + yst[: min(ys.size, yst.size)]
) # sum sinusoids and stochastic components
return y, ys, yst
def spsModel(x, fs, w, N, t, stocf):
"""
Analysis/synthesis of a sound using the sinusoidal plus stochastic model
x: input sound, fs: sampling rate, w: analysis window,
N: FFT size (minimum 512), t: threshold in negative dB,
stocf: decimation factor of mag spectrum for stochastic analysis
returns y: output sound, ys: sinusoidal component, yst: stochastic component
"""
hM1 = int(math.floor((w.size + 1) / 2)) # half analysis window size by rounding
hM2 = int(math.floor(w.size / 2)) # half analysis window size by floor
Ns = 512 # FFT size for synthesis (even)
H = Ns // 4 # Hop size used for analysis and synthesis
hNs = Ns // 2
pin = max(hNs, hM1) # initialize sound pointer in middle of analysis window
pend = x.size - max(hNs, hM1) # last sample to start a frame
ysw = np.zeros(Ns) # initialize output sound frame
ystw = np.zeros(Ns) # initialize output sound frame
ys = np.zeros(x.size) # initialize output array
yst = np.zeros(x.size) # initialize output array
w = w / sum(w) # normalize analysis window
sw = np.zeros(Ns)
ow = triang(2 * H) # overlapping window
sw[hNs - H : hNs + H] = ow
bh = blackmanharris(Ns) # synthesis window
bh = bh / sum(bh) # normalize synthesis window
wr = bh # window for residual
sw[hNs - H : hNs + H] = sw[hNs - H : hNs + H] / bh[hNs - H : hNs + H]
sws = H * hann(Ns) / 2 # synthesis window for stochastic
while pin < pend:
# -----analysis-----
x1 = x[pin - hM1 : pin + hM2] # select frame
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
ploc = UF.peakDetection(mX, t) # find peaks
iploc, ipmag, ipphase = UF.peakInterp(
mX, pX, ploc
) # refine peak values iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) # refine peak values
ipfreq = fs * iploc / float(N) # convert peak locations to Hertz
ri = pin - hNs - 1 # input sound pointer for residual analysis
xw2 = x[ri : ri + Ns] * wr # window the input sound
fftbuffer = np.zeros(Ns) # reset buffer
fftbuffer[:hNs] = xw2[hNs:] # zero-phase window in fftbuffer
fftbuffer[hNs:] = xw2[:hNs]
X2 = fft(fftbuffer) # compute FFT for residual analysis
# -----synthesis-----
Ys = UF.genSpecSines(
ipfreq, ipmag, ipphase, Ns, fs
) # generate spec of sinusoidal component
Xr = X2 - Ys # get the residual complex spectrum
mXr = 20 * np.log10(abs(Xr[:hNs])) # magnitude spectrum of residual
mXrenv = resample(
np.maximum(-200, mXr), mXr.size * stocf
) # decimate the magnitude spectrum and avoid -Inf
stocEnv = resample(mXrenv, hNs) # interpolate to original size
pYst = 2 * np.pi * np.random.rand(hNs) # generate phase random values
Yst = np.zeros(Ns, dtype=complex)
Yst[:hNs] = 10 ** (stocEnv / 20) * np.exp(1j * pYst) # generate positive freq.
Yst[hNs + 1 :] = 10 ** (stocEnv[:0:-1] / 20) * np.exp(
-1j * pYst[:0:-1]
) # generate negative freq.
fftbuffer = np.real(ifft(Ys)) # inverse FFT of harmonic spectrum
ysw[: hNs - 1] = fftbuffer[hNs + 1 :] # undo zero-phase window
ysw[hNs - 1 :] = fftbuffer[: hNs + 1]
fftbuffer = np.real(ifft(Yst)) # inverse FFT of stochastic spectrum
ystw[: hNs - 1] = fftbuffer[hNs + 1 :] # undo zero-phase window
ystw[hNs - 1 :] = fftbuffer[: hNs + 1]
ys[ri : ri + Ns] += sw * ysw # overlap-add for sines
yst[ri : ri + Ns] += sws * ystw # overlap-add for stochastic
pin += H # advance sound pointer
y = ys + yst # sum of sinusoidal and residual components
return y, ys, yst
| 6,206 | Python | .py | 119 | 45.92437 | 114 | 0.658263 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |
30,099 | stft.py | MTG_sms-tools/smstools/models/stft.py | # functions that implement analysis and synthesis of sounds using the Short-Time Fourier Transform
# (for example usage check stft_function.py in the interface directory)
import numpy as np
from smstools.models import dftModel as DFT
def stft(x, w, N, H):
"""
Analysis/synthesis of a sound using the short-time Fourier transform
x: input sound, w: analysis window, N: FFT size, H: hop size
returns y: output sound
"""
if H <= 0: # raise error if hop size 0 or negative
raise ValueError("Hop size (H) smaller or equal to 0")
M = w.size # size of analysis window
hM1 = (M + 1) // 2 # half analysis window size by rounding
hM2 = M // 2 # half analysis window size by floor
x = np.append(
np.zeros(hM2), x
) # add zeros at beginning to center first window at sample 0
x = np.append(x, np.zeros(hM1)) # add zeros at the end to analyze last sample
pin = hM1 # initialize sound pointer in middle of analysis window
pend = x.size - hM1 # last sample to start a frame
w = w / sum(w) # normalize analysis window
y = np.zeros(x.size) # initialize output array
while pin <= pend: # while sound pointer is smaller than last sample
# -----analysis-----
x1 = x[pin - hM1 : pin + hM2] # select one frame of input sound
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
# -----synthesis-----
y1 = DFT.dftSynth(mX, pX, M) # compute idft
y[pin - hM1 : pin + hM2] += H * y1 # overlap-add to generate output sound
pin += H # advance sound pointer
y = np.delete(
y, range(hM2)
) # delete half of first window which was added in stftAnal
y = np.delete(
y, range(y.size - hM1, y.size)
) # delete half of the last window which as added in stftAnal
return y
def stftAnal(x, w, N, H):
"""
Analysis of a sound using the short-time Fourier transform
x: input array sound, w: analysis window, N: FFT size, H: hop size
returns xmX, xpX: magnitude and phase spectra
"""
if H <= 0: # raise error if hop size 0 or negative
raise ValueError("Hop size (H) smaller or equal to 0")
M = w.size # size of analysis window
hM1 = (M + 1) // 2 # half analysis window size by rounding
hM2 = M // 2 # half analysis window size by floor
x = np.append(
np.zeros(hM2), x
) # add zeros at beginning to center first window at sample 0
x = np.append(x, np.zeros(hM2)) # add zeros at the end to analyze last sample
pin = hM1 # initialize sound pointer in middle of analysis window
pend = x.size - hM1 # last sample to start a frame
w = w / sum(w) # normalize analysis window
xmX = [] # Initialise empty list for mX
xpX = [] # Initialise empty list for pX
while pin <= pend: # while sound pointer is smaller than last sample
x1 = x[pin - hM1 : pin + hM2] # select one frame of input sound
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
xmX.append(np.array(mX)) # Append output to list
xpX.append(np.array(pX))
pin += H # advance sound pointer
xmX = np.array(xmX) # Convert to numpy array
xpX = np.array(xpX)
return xmX, xpX
def stftSynth(mY, pY, M, H):
"""
Synthesis of a sound using the short-time Fourier transform
mY: magnitude spectra, pY: phase spectra, M: window size, H: hop-size
returns y: output sound
"""
hM1 = (M + 1) // 2 # half analysis window size by rounding
hM2 = M // 2 # half analysis window size by floor
nFrames = mY[:, 0].size # number of frames
y = np.zeros(nFrames * H + hM1 + hM2) # initialize output array
pin = hM1
for i in range(nFrames): # iterate over all frames
y1 = DFT.dftSynth(mY[i, :], pY[i, :], M) # compute idft
y[pin - hM1 : pin + hM2] += H * y1 # overlap-add to generate output sound
pin += H # advance sound pointer
y = np.delete(
y, range(hM2)
) # delete half of first window which was added in stftAnal
y = np.delete(
y, range(y.size - hM1, y.size)
) # delete the end of the sound that was added in stftAnal
return y
| 4,173 | Python | .py | 89 | 41.044944 | 98 | 0.634667 | MTG/sms-tools | 1,630 | 751 | 12 | AGPL-3.0 | 9/5/2024, 5:14:06 PM (Europe/Amsterdam) |