Search is not available for this dataset
text stringlengths 75 104k |
|---|
def _get_alternation(extractors, text):
"""Tries each extractor on the given text and returns the 'best fit'.
Best fit is defined by as the extractor whose result consumed the most text. If more than one
extractor is the best fit, the result of the one that appeared earliest in the list is returned.
If all extractors raise a DeadEnd, this method too will raise a DeadEnd.
"""
candidates = []
for extractor in extractors:
try:
candidates.append(_call_extractor(extractor, text))
except DeadEnd:
pass
if not candidates:
raise DeadEnd
result, *remaining = candidates
for candidate in remaining:
if len(candidate) > len(result):
result = candidate
return result |
def _get_repetition(extractor, text, *, bounds=(0, None), ignore_whitespace=False):
"""Tries to pull text with extractor repeatedly.
Bounds is a 2-tuple of (lbound, ubound) where lbound is a number and ubound is a number or None.
If the ubound is None, this method will execute extractor on text until extrator raises DeadEnd.
Otherwise, extractor will be called until it raises DeadEnd, or it has extracted ubound times.
If the number of children extracted is >= lbound, then a ParseNode with type repetition is
returned. Otherwise, DeadEnd is raised.
Bounds are interpreted as (lbound, ubound]
This method is used to implement:
- option (0, 1)
- zero_or_more (0, None)
- one_or_more (1, None)
- exact_repeat (n, n)
"""
minr, maxr = bounds
children = []
while maxr is None or len(children) <= maxr:
ignored_ws, use_text = _split_ignored(text, ignore_whitespace)
try:
child = _call_extractor(extractor, use_text)
child.add_ignored(ignored_ws)
except DeadEnd:
break
if child.is_empty:
break
children.append(child)
text = text[child.consumed:]
if len(children) >= minr:
return ParseNode(ParseNodeType.repetition,
children=children)
else:
raise DeadEnd() |
def _get_exclusion(extractor, exclusion, text):
"""Returns extractor's result if exclusion does not match.
If exclusion raises DeadEnd (meaning it did not match) then the result of extractor(text) is
returned. Otherwise, if exclusion does not raise DeadEnd it means it did match, and we then
raise DeadEnd.
"""
try:
_call_extractor(exclusion, text)
exclusion_matches = True
except DeadEnd:
exclusion_matches = False
if exclusion_matches:
raise DeadEnd()
else:
return _call_extractor(extractor, text) |
def _split_ignored(text, ignore_whitespace=True):
"""Return (leading whitespace, trailing text) if ignore_whitespace is true, or ("", text) if
False.
"""
if ignore_whitespace:
leading_ws_count = _count_leading_whitespace(text)
ignored_ws = text[:leading_ws_count]
use_text = text[leading_ws_count:]
return (ignored_ws, use_text)
else:
return ("", text) |
def _count_leading_whitespace(text):
"""Returns the number of characters at the beginning of text that are whitespace."""
idx = 0
for idx, char in enumerate(text):
if not char.isspace():
return idx
return idx + 1 |
def _call_extractor(extractor, text):
"""This method calls an extractor on some text.
If extractor is just a string, it is passed as the first value to _get_terminal. Otherwise it is
treated as a callable and text is passed directly to it.
This makes it so you can have a shorthand of terminal(val) <-> val.
"""
if isinstance(extractor, str):
return _get_terminal(extractor, text)
else:
return extractor(text) |
def position(self):
"""Gets the position of the text the ParseNode processed. If the ParseNode does not have its
own position, it looks to its first child for its position.
'Value Nodes' (terminals) must have their own position, otherwise this method will throw an
exception when it tries to get the position property of the string child.
"""
pos = self._position
if pos is None and self.children:
ch1 = self.children[0]
if isinstance(ch1, ParseNode):
pos = ch1.position
return pos |
def is_empty(self):
"""Returns True if this node has no children, or if all of its children are ParseNode instances
and are empty.
"""
return all(isinstance(c, ParseNode) and c.is_empty for c in self.children) |
def add_ignored(self, ignored):
"""Add ignored text to the node. This will add the length of the ignored text to the node's
consumed property.
"""
if ignored:
if self.ignored:
self.ignored = ignored + self.ignored
else:
self.ignored = ignored
self.consumed += len(ignored) |
def is_type(self, value):
"""Returns True if node_type == value.
If value is a tuple, node_type is checked against each member and True is returned if any of
them match.
"""
if isinstance(value, tuple):
for opt in value:
if self.node_type == opt:
return True
return False
else:
return self.node_type == value |
def flattened(self, pred=flattened_pred_default):
"""Flattens nodes by hoisting children up to ancestor nodes.
A node is hoisted if pred(node) returns True.
"""
if self.is_value:
return self
new_children = []
for child in self.children:
if child.is_empty:
continue
new_child = child.flattened(pred)
if pred(new_child, self):
new_children.extend(new_child.children)
else:
new_children.append(new_child)
return ParseNode(self.node_type,
children=new_children,
consumed=self.consumed,
position=self.position,
ignored=self.ignored) |
def trimmed(self, pred=trimmed_pred_default):
"""Trim a ParseTree.
A node is trimmed if pred(node) returns True.
"""
new_children = []
for child in self.children:
if isinstance(child, ParseNode):
new_child = child.trimmed(pred)
else:
new_child = child
if not pred(new_child, self):
new_children.append(new_child)
return ParseNode(self.node_type,
children=new_children,
consumed=self.consumed,
position=self.position,
ignored=self.ignored) |
def merged(self, other):
"""Returns a new ParseNode whose type is this node's type, and whose children are all the
children from this node and the other whose length is not 0.
"""
children = [c for c in itertools.chain(self.children, other.children) if len(c) > 0]
# NOTE: Only terminals should have ignored text attached to them, and terminals shouldn't be
# merged (probably) so it shouldn't be necessary to copy of ignored -- it should always
# be None. But, we'll go ahead and copy it over anyway, recognizing that other's
# ignored text will be lost.
return ParseNode(self.node_type,
children=children,
consumed=self.consumed + other.consumed,
ignored=self.ignored) |
def retyped(self, new_type):
"""Returns a new node with the same contents as self, but with a new node_type."""
return ParseNode(new_type,
children=list(self.children),
consumed=self.consumed,
position=self.position,
ignored=self.ignored) |
def compressed(self, new_type=None, *, include_ignored=False):
"""Turns the node into a value node, whose single string child is the concatenation of all its
children.
"""
values = []
consumed = 0
ignored = None
for i, child in enumerate(self.children):
consumed += child.consumed
if i == 0 and not include_ignored:
ignored = child.ignored
if child.is_value:
if include_ignored:
values.append("{0}{1}".format(child.ignored or "", child.value))
else:
values.append(child.value)
else:
values.append(child.compressed(include_ignored=include_ignored).value)
return ParseNode(new_type or self.node_type,
children=["".join(values)],
consumed=consumed,
ignored=ignored,
position=self.position) |
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
ApiCli.get_arguments(self)
# Get the host group name
if self.args.host_group_name is not None:
self.host_group_name = self.args.host_group_name
# Get the list of sources separated by commas
if self.args.sources is not None:
self.sources = self.args.sources
payload = {}
if self.host_group_name is not None:
payload['name'] = self.host_group_name
if self.sources is not None:
source_list = str.split(self.sources, ',')
if 'hostnames' not in payload:
payload['hostnames'] = []
for s in source_list:
payload['hostnames'].append(s)
self.data = json.dumps(payload, sort_keys=True)
self.headers = {'Content-Type': 'application/json', "Accept": "application/json"} |
def get_scope_list(self) -> list:
"""
Return the list of all contained scope from global to local
"""
# by default only return scoped name
lstparent = [self]
p = self.get_parent()
while p is not None:
lstparent.append(p)
p = p.get_parent()
return lstparent |
def get_scope_names(self) -> list:
"""
Return the list of all contained scope from global to local
"""
# allow global scope to have an None string instance
lscope = []
for scope in reversed(self.get_scope_list()):
if scope.name is not None:
# handle fun/block scope decoration
lscope.append(scope.name)
return lscope |
def position(self) -> Position:
"""The current position of the cursor."""
return Position(self._index, self._lineno, self._col_offset) |
def max_readed_position(self) -> Position:
"""The index of the deepest character readed."""
return Position(self._maxindex, self._maxline, self._maxcol) |
def step_next_char(self):
"""Puts the cursor on the next character."""
self._index += 1
self._col_offset += 1
if self._index > self._maxindex:
self._maxindex = self._index
self._maxcol = self._col_offset
self._maxline = self._lineno |
def step_next_line(self):
"""Sets cursor as beginning of next line."""
self._eol.append(self.position)
self._lineno += 1
self._col_offset = 0 |
def step_prev_line(self):
"""Sets cursor as end of previous line."""
#TODO(bps): raise explicit error for unregistered eol
#assert self._eol[-1].index == self._index
if len(self._eol) > 0:
self.position = self._eol.pop() |
def last_readed_line(self) -> str:
"""Usefull string to compute error message."""
mpos = self._cursor.max_readed_position
mindex = mpos.index
# search last \n
prevline = mindex - 1 if mindex == self.eos_index else mindex
while prevline >= 0 and self._content[prevline] != '\n':
prevline -= 1
# search next \n
nextline = mindex
while nextline < self.eos_index and self._content[nextline] != '\n':
nextline += 1
last_line = self._content[prevline + 1:nextline]
return last_line |
def incpos(self, length: int=1) -> int:
"""Increment the cursor to the next character."""
if length < 0:
raise ValueError("length must be positive")
i = 0
while (i < length):
if self._cursor.index < self._len:
if self.peek_char == '\n':
self._cursor.step_next_line()
self._cursor.step_next_char()
i += 1
return self._cursor.index |
def save_context(self) -> bool:
"""Save current position."""
self._contexts.append(self._cursor.position)
return True |
def restore_context(self) -> bool:
"""Rollback to previous saved position."""
self._cursor.position = self._contexts.pop()
return False |
def to_fmt(self) -> fmt.indentable:
"""
Return an Fmt representation for pretty-printing
"""
qual = "scope"
txt = fmt.sep(" ", [qual])
name = self.show_name()
if name != "":
txt.lsdata.append(name)
if len(self._hsig) > 0 or len(self.mapTypeTranslate) > 0:
lsb = []
if len(self.mapTypeTranslate) > 0:
lsb.append("translate:\n")
lsb.append(fmt.end("\n", self.mapTypeTranslate.to_fmt()))
for k in sorted(self._hsig.keys()):
s = self._hsig[k]
lsb.append(fmt.end("\n", [s.to_fmt()]))
block = fmt.block(":\n", "", fmt.tab(lsb))
txt.lsdata.append(block)
return txt |
def to_fmt(self):
"""
Return an Fmt representation for pretty-printing
"""
qual = "evalctx"
lseval = []
block = fmt.block(":\n", "", fmt.tab(lseval))
txt = fmt.sep(" ", [qual, block])
lseval.append(self._sig.to_fmt())
if len(self.resolution) > 0:
lsb = []
for k in sorted(self.resolution.keys()):
s = self.resolution[k]
if s is not None:
lsb.append(
fmt.end(
"\n",
["'%s': %s (%s)" % (k, s, s().show_name())]
)
)
else:
lsb.append(fmt.end("\n", ["'%s': Unresolved" % (k)]))
if self._translate_to is not None:
lsb.append("use translator:")
lsb.append(self._translate_to.to_fmt())
if self._variadic_types is not None:
lsb.append("variadic types:\n")
arity = self._sig.arity
for t in self._variadic_types:
lsb.append("[%d] : %s\n" % (arity, t))
arity += 1
lseval.append(fmt.block("\nresolution :\n", "", fmt.tab(lsb)))
return txt |
def to_fmt(self, with_from=False) -> fmt.indentable:
"""
Return a Fmt representation of Translator for pretty-printing
"""
txt = fmt.sep("\n", [
fmt.sep(
" ",
[
self._type_source,
"to",
self._type_target,
'=',
self._fun.to_fmt()
]
),
self._notify.get_content(with_from)
])
return txt |
def to_fmt(self):
"""
Return an Fmt representation for pretty-printing
"""
params = ""
txt = fmt.sep(" ", ['val'])
name = self.show_name()
if name != "":
txt.lsdata.append(name)
txt.lsdata.append('(%s)' % self.value)
txt.lsdata.append(': ' + self.tret)
return txt |
def to_fmt(self):
"""
Return an Fmt representation for pretty-printing
"""
params = ""
txt = fmt.sep(" ", ['fun'])
name = self.show_name()
if name != "":
txt.lsdata.append(name)
tparams = []
if self.tparams is not None:
tparams = list(self.tparams)
if self.variadic:
tparams.append('...')
params = '(' + ", ".join(tparams) + ')'
txt.lsdata.append(': ' + params)
txt.lsdata.append('-> ' + self.tret)
return txt |
def walk(self, lc: state.LivingContext, user_data=None, parent=None):
"""
TODO: should_test_type??
"""
global _cacheid
# root node autorefence
if parent is None:
parent = self
## walk attributes
if hasattr(self, '__dict__') and not isinstance(self, node.ListNode):
for k in sorted(vars(self).keys()):
print("RECURS key %s ID %d" % (k, id(getattr(self, k))))
walk(getattr(self, k), lc, user_data, self)
# k == ?
#print('test attr .%s' % k)
lc.checkAttr(k, self)
# check precond
lc.checkEventExpr()
# do sub Event (for unstrict mode)
lc.doSubEvent()
# ...as dict, walk values, match keys
if hasattr(self, 'keys'):
for k in sorted(self.keys()):
#print("RECURS ID %d" % id(self[k]))
walk(self[k], lc, user_data, self)
# k == ?
#print('test key [%s]' % repr(k))
lc.checkKey(k, self)
# check precond
lc.checkEventExpr()
# do sub Event (for unstrict mode)
lc.doSubEvent()
# ...as list, walk values, match indices
elif not isinstance(self, str) and hasattr(self, '__iter__'):
idx = 0
for i in self:
#print("RECURS ID %d" % id(i))
walk(i, lc, user_data, self)
# idx == ?
#print('test indice [%s]' % str(idx))
lc.checkIndice(idx, self)
idx += 1
# check precond
lc.checkEventExpr()
# do sub Event (for unstrict mode)
lc.doSubEvent()
# ...type or value
# type(self) == ?
#print("test type %s" % type(self))
lc.checkType(type(self), self, parent)
# self == ?
#print("test value %s" % str(self))
lc.checkValue(self)
## Check EVENTS
# TODO: what if the event do something
# but don't change current state and default change it!!!
lc.checkEventExpr()
#print("RESULT")
# check Event
lc.doResultEvent()
# check Hook
lc.doResultHook(self, user_data, parent)
# no transition, fallback to default
lc.doDefault()
# maintain the pool of LivingState
lc.resetLivingState() |
def set_name(self, name: str):
""" You could set the name after construction """
self.name = name
# update internal names
lsig = self._hsig.values()
self._hsig = {}
for s in lsig:
self._hsig[s.internal_name()] = s |
def count_types(self) -> int:
""" Count subtypes """
n = 0
for s in self._hsig.values():
if type(s).__name__ == 'Type':
n += 1
return n |
def count_vars(self) -> int:
""" Count var define by this scope """
n = 0
for s in self._hsig.values():
if hasattr(s, 'is_var') and s.is_var:
n += 1
return n |
def count_funs(self) -> int:
""" Count function define by this scope """
n = 0
for s in self._hsig.values():
if hasattr(s, 'is_fun') and s.is_fun:
n += 1
return n |
def __update_count(self):
""" Update internal counters """
self._ntypes = self.count_types()
self._nvars = self.count_vars()
self._nfuns = self.count_funs() |
def update(self, sig: list or Scope) -> Scope:
""" Update the Set with values of another Set """
values = sig
if hasattr(sig, 'values'):
values = sig.values()
for s in values:
if self.is_namespace:
s.set_parent(self)
if isinstance(s, Scope):
s.state = StateScope.EMBEDDED
self._hsig[s.internal_name()] = s
self.__update_count()
return self |
def union(self, sig: Scope) -> Scope:
""" Create a new Set produce by the union of 2 Set """
new = Scope(sig=self._hsig.values(), state=self.state)
new |= sig
return new |
def intersection_update(self, oset: Scope) -> Scope:
""" Update Set with common values of another Set """
keys = list(self._hsig.keys())
for k in keys:
if k not in oset:
del self._hsig[k]
else:
self._hsig[k] = oset.get(k)
return self |
def intersection(self, sig: Scope) -> Scope:
""" Create a new Set produce by the intersection of 2 Set """
new = Scope(sig=self._hsig.values(), state=self.state)
new &= sig
return new |
def difference_update(self, oset: Scope) -> Scope:
""" Remove values common with another Set """
keys = list(self._hsig.keys())
for k in keys:
if k in oset:
del self._hsig[k]
return self |
def difference(self, sig: Scope) -> Scope:
""" Create a new Set produce by a Set subtracted by another Set """
new = Scope(sig=self._hsig.values(), state=self.state)
new -= sig
return new |
def symmetric_difference_update(self, oset: Scope) -> Scope:
""" Remove common values
and Update specific values from another Set
"""
skey = set()
keys = list(self._hsig.keys())
for k in keys:
if k in oset:
skey.add(k)
for k in oset._hsig.keys():
if k not in skey:
self._hsig[k] = oset.get(k)
for k in skey:
del self._hsig[k]
return self |
def symmetric_difference(self, sig: Scope) -> Scope:
""" Create a new Set with values present in only one Set """
new = Scope(sig=self._hsig.values(), state=self.state)
new ^= sig
return new |
def add(self, it: Signature) -> bool:
""" Add it to the Set """
if isinstance(it, Scope):
it.state = StateScope.EMBEDDED
txt = it.internal_name()
it.set_parent(self)
if self.is_namespace:
txt = it.internal_name()
if txt == "":
txt = '_' + str(len(self._hsig))
if txt in self._hsig:
raise KeyError("Already exists %s" % txt)
self._hsig[txt] = it
self.__update_count()
return True |
def remove(self, it: Signature) -> bool:
""" Remove it but raise KeyError if not found """
txt = it.internal_name()
if txt not in self._hsig:
raise KeyError(it.show_name() + ' not in Set')
sig = self._hsig[txt]
if isinstance(sig, Scope):
sig.state = StateScope.LINKED
del self._hsig[txt]
return True |
def discard(self, it: Signature) -> bool:
""" Remove it only if present """
txt = it.internal_name()
if txt in self._hsig:
sig = self._hsig[txt]
if isinstance(sig, Scope):
sig.state = StateScope.LINKED
del self._hsig[txt]
return True
return False |
def values(self) -> [Signature]:
""" Retrieve all values """
if self.state == StateScope.EMBEDDED and self.parent is not None:
return list(self._hsig.values()) + list(self.parent().values())
else:
return self._hsig.values() |
def first(self) -> Signature:
""" Retrieve the first Signature ordered by mangling descendant """
k = sorted(self._hsig.keys())
return self._hsig[k[0]] |
def last(self) -> Signature:
""" Retrieve the last Signature ordered by mangling descendant """
k = sorted(self._hsig.keys())
return self._hsig[k[-1]] |
def get(self, key: str, default=None) -> Signature:
""" Get a signature instance by its internal_name """
item = default
if key in self._hsig:
item = self._hsig[key]
return item |
def get_by_symbol_name(self, name: str) -> Scope:
""" Retrieve a Set of all signature by symbol name """
lst = []
for s in self.values():
if s.name == name:
# create an EvalCtx only when necessary
lst.append(EvalCtx.from_sig(s))
# include parent
# TODO: see all case of local redefinition for
# global overloads
# possible algos... take all with different internal_name
if len(lst) == 0:
p = self.get_parent()
if p is not None:
return p.get_by_symbol_name(name)
rscope = Scope(sig=lst, state=StateScope.LINKED, is_namespace=False)
# inherit type/translation from parent
rscope.set_parent(self)
return rscope |
def getsig_by_symbol_name(self, name: str) -> Signature:
""" Retrieve the unique Signature of a symbol.
Fail if the Signature is not unique
"""
subscope = self.get_by_symbol_name(name)
if len(subscope) != 1:
raise KeyError("%s have multiple candidates in scope" % name)
v = list(subscope.values())
return v[0] |
def get_by_return_type(self, tname: str) -> Scope:
""" Retrieve a Set of all signature by (return) type """
lst = []
for s in self.values():
if hasattr(s, 'tret') and s.tret == tname:
lst.append(EvalCtx.from_sig(s))
rscope = Scope(sig=lst, state=StateScope.LINKED, is_namespace=False)
# inherit type/translation from parent
rscope.set_parent(self)
return rscope |
def get_all_polymorphic_return(self) -> bool:
""" For now, polymorphic return type are handle by symbol artefact.
--> possible multi-polymorphic but with different constraint attached!
"""
lst = []
for s in self.values():
if hasattr(s, 'tret') and s.tret.is_polymorphic:
# encapsulate s into a EvalCtx for meta-var resolution
lst.append(EvalCtx.from_sig(s))
rscope = Scope(sig=lst, state=StateScope.LINKED, is_namespace=False)
# inherit type/translation from parent
rscope.set_parent(self)
return rscope |
def get_by_params(self, params: [Scope]) -> (Scope, [[Scope]]):
""" Retrieve a Set of all signature that match the parameter list.
Return a pair:
pair[0] the overloads for the functions
pair[1] the overloads for the parameters
(a list of candidate list of parameters)
"""
lst = []
scopep = []
# for each of our signatures
for s in self.values():
# for each params of this signature
if hasattr(s, 'tparams'):
# number of matched params
mcnt = 0
# temporary collect
nbparam_sig = (0 if s.tparams is None else len(s.tparams))
nbparam_candidates = len(params)
# don't treat signature too short
if nbparam_sig > nbparam_candidates:
continue
# don't treat call signature too long if not variadic
if nbparam_candidates > nbparam_sig and not s.variadic:
continue
tmp = [None] * nbparam_candidates
variadic_types = []
for i in range(nbparam_candidates):
tmp[i] = Scope(state=StateScope.LINKED)
tmp[i].set_parent(self)
# match param of the expr
if i < nbparam_sig:
if params[i].state == StateScope.EMBEDDED:
raise ValueError(
("params[%d] of get_by_params is a StateScope."
+ "EMBEDDED scope... "
+ "read the doc and try a StateScope.FREE"
+ " or StateScope.LINKED.") % i
)
m = params[i].get_by_return_type(s.tparams[i])
if len(m) > 0:
mcnt += 1
tmp[i].update(m)
else:
# co/contra-variance
# we just need to search a t1->t2
# and add it into the tree (with/without warnings)
t1 = params[i]
t2 = s.tparams[i]
# if exist a fun (t1) -> t2
(is_convertible,
signature,
translator
) = t1.findTranslationTo(t2)
if is_convertible:
# add a translator in the EvalCtx
signature.use_translator(translator)
mcnt += 1
nscope = Scope(
sig=[signature],
state=StateScope.LINKED,
is_namespace=False
)
nscope.set_parent(self)
tmp[i].update(nscope)
elif s.tparams[i].is_polymorphic:
# handle polymorphic parameter
mcnt += 1
if not isinstance(params[i], Scope):
raise Exception(
"params[%d] must be a Scope" % i
)
tmp[i].update(params[i])
else:
# handle polymorphic return type
m = params[i].get_all_polymorphic_return()
if len(m) > 0:
mcnt += 1
tmp[i].update(m)
# for variadic extra parameters
else:
mcnt += 1
if not isinstance(params[i], Scope):
raise Exception("params[%d] must be a Scope" % i)
variadic_types.append(params[i].first().tret)
tmp[i].update(params[i])
# we have match all candidates
if mcnt == len(params):
# select this signature but
# box it (with EvalCtx) for type resolution
lst.append(EvalCtx.from_sig(s))
lastentry = lst[-1]
if lastentry.variadic:
lastentry.use_variadic_types(variadic_types)
scopep.append(tmp)
rscope = Scope(sig=lst, state=StateScope.LINKED, is_namespace=False)
# inherit type/translation from parent
rscope.set_parent(self)
return (rscope, scopep) |
def callInjector(self, old: Node, trans: Translator) -> Node:
""" If don't have injector call from parent """
if self.astTranslatorInjector is None:
if self.parent is not None:
# TODO: think if we forward for all StateScope
# forward to parent scope
return self.parent().callInjector(old, trans)
else:
raise TypeError("Must define an Translator Injector")
return self.astTranslatorInjector(old, trans) |
def findTranslationTo(self, t2: str) -> (bool, Signature, Translator):
""" Find an arrow (->)
aka a function able to translate something to t2
"""
if not t2.is_polymorphic:
collect = []
for s in self.values():
t1 = s.tret
if t1.is_polymorphic:
continue
if (s.tret in self.mapTypeTranslate):
if (t2 in self.mapTypeTranslate[t1]):
collect.append((
True,
s,
self.mapTypeTranslate[t1][t2]
))
# if len > 1 too many candidates
if len(collect) == 1:
return collect[0]
return (False, None, None) |
def normalize(ast: Node) -> Node:
"""
Normalize an AST nodes.
all builtins containers are replace by referencable subclasses
"""
res = ast
typemap = {DictNode, ListNode, TupleNode}
if type(ast) is dict:
res = DictNode(ast)
elif type(ast) is list:
res = ListNode(ast)
elif type(ast) is tuple:
res = TupleNode(ast)
# in-depth change
if hasattr(res, 'items'):
for k, v in res.items():
res[k] = normalize(v)
elif hasattr(res, '__getitem__'):
for idx, v in zip(range(len(res)), res):
res[idx] = normalize(v)
if type(res) not in typemap and hasattr(res, '__dict__'):
subattr = vars(res)
for k, v in subattr.items():
setattr(res, k, normalize(v))
return res |
def check(self, ndict: dict, info="") -> bool:
"""
Debug method, help detect cycle and/or
other incoherence in a tree of Node
"""
def iscycle(thing, ndict: dict, info: str) -> bool:
# check if not already here
idthing = id(thing)
ndict[info] = idthing
if idthing not in ndict:
# add myself
ndict[idthing] = "%s:%s no cycle" % (type(thing), info)
return False
else:
ndict[idthing] += "\n%s:%s cycle" % (type(thing), info)
return True
def recurs(thing, ndict: dict, info: str) -> bool:
if not iscycle(thing, ndict, info):
res = False
if isinstance(thing, list):
idx = 0
for i in thing:
res |= recurs(i, ndict, "%s[%d]" % (info, idx))
idx += 1
elif isinstance(thing, Node):
res |= thing.check(ndict, info)
elif isinstance(thing, dict):
for k, v in thing.items():
res |= recurs(v, ndict, "%s[%s]" % (info, k))
return res
return True
# add ME FIRST
if len(ndict) == 0:
ndict['self'] = id(self)
info = 'self'
if not iscycle(self, ndict, info):
res = False
if len(self) > 0:
if hasattr(self, 'keys'):
keys = list(self.keys())
for k in keys:
ndict["[" + repr(k) + "]"] = id(self[k])
res |= recurs(self[k], ndict, "%s[%s]" % (info, k))
keys = list(vars(self).keys())
for k in keys:
ndict["." + k] = id(getattr(self, k))
res |= recurs(getattr(self, k), ndict, "%s.%s" % (info, k))
return res
return True |
def set(self, othernode):
"""allow to completly mutate the node into any subclasses of Node"""
self.__class__ = othernode.__class__
self.clean()
if len(othernode) > 0:
for k, v in othernode.items():
self[k] = v
for k, v in vars(othernode).items():
setattr(self, k, v) |
def values(self):
"""
in order
"""
tmp = self
while tmp is not None:
yield tmp.data
tmp = tmp.next |
def rvalues(self):
"""
in reversed order
"""
tmp = self
while tmp is not None:
yield tmp.data
tmp = tmp.prev |
def _pixel_masked(hit, array):
''' Checks whether a hit (column/row) is masked or not. Array is 2D array with boolean elements corresponding to pixles indicating whether a pixel is disabled or not.
'''
if array.shape[0] > hit["column"] and array.shape[1] > hit["row"]:
return array[hit["column"], hit["row"]]
else:
return False |
def _finish_cluster(hits, clusters, cluster_size, cluster_hit_indices, cluster_index, cluster_id, charge_correction, noisy_pixels, disabled_pixels):
''' Set hit and cluster information of the cluster (e.g. number of hits in the cluster (cluster_size), total cluster charge (charge), ...).
'''
cluster_charge = 0
max_cluster_charge = -1
# necessary for charge weighted hit position
total_weighted_column = 0
total_weighted_row = 0
for i in range(cluster_size):
hit_index = cluster_hit_indices[i]
if hits[hit_index]['charge'] > max_cluster_charge:
seed_hit_index = hit_index
max_cluster_charge = hits[hit_index]['charge']
hits[hit_index]['is_seed'] = 0
hits[hit_index]['cluster_size'] = cluster_size
# include charge correction in sum
total_weighted_column += hits[hit_index]['column'] * (hits[hit_index]['charge'] + charge_correction)
total_weighted_row += hits[hit_index]['row'] * (hits[hit_index]['charge'] + charge_correction)
cluster_charge += hits[hit_index]['charge']
hits[hit_index]['cluster_ID'] = cluster_id
hits[seed_hit_index]['is_seed'] = 1
clusters[cluster_index]["ID"] = cluster_id
clusters[cluster_index]["n_hits"] = cluster_size
clusters[cluster_index]["charge"] = cluster_charge
clusters[cluster_index]['seed_column'] = hits[seed_hit_index]['column']
clusters[cluster_index]['seed_row'] = hits[seed_hit_index]['row']
# correct total charge value and calculate mean column and row
clusters[cluster_index]['mean_column'] = float(total_weighted_column) / (cluster_charge + cluster_size * charge_correction)
clusters[cluster_index]['mean_row'] = float(total_weighted_row) / (cluster_charge + cluster_size * charge_correction)
# Call end of cluster function hook
_end_of_cluster_function(
hits=hits,
clusters=clusters,
cluster_size=cluster_size,
cluster_hit_indices=cluster_hit_indices,
cluster_index=cluster_index,
cluster_id=cluster_id,
charge_correction=charge_correction,
noisy_pixels=noisy_pixels,
disabled_pixels=disabled_pixels,
seed_hit_index=seed_hit_index) |
def _finish_event(hits, clusters, start_event_hit_index, stop_event_hit_index, start_event_cluster_index, stop_event_cluster_index):
''' Set hit and cluster information of the event (e.g. number of cluster in the event (n_cluster), ...).
'''
for hit_index in range(start_event_hit_index, stop_event_hit_index):
hits[hit_index]['n_cluster'] = stop_event_cluster_index - start_event_cluster_index
for cluster_index in range(start_event_cluster_index, stop_event_cluster_index):
clusters[cluster_index]['event_number'] = hits[start_event_hit_index]['event_number']
# Call end of event function hook
_end_of_event_function(
hits=hits,
clusters=clusters,
start_event_hit_index=start_event_hit_index,
stop_event_hit_index=stop_event_hit_index,
start_event_cluster_index=start_event_cluster_index,
stop_event_cluster_index=stop_event_cluster_index) |
def _hit_ok(hit, min_hit_charge, max_hit_charge):
''' Check if given hit is withing the limits.
'''
# Omit hits with charge < min_hit_charge
if hit['charge'] < min_hit_charge:
return False
# Omit hits with charge > max_hit_charge
if max_hit_charge != 0 and hit['charge'] > max_hit_charge:
return False
return True |
def _set_1d_array(array, value, size=-1):
''' Set array elemets to value for given number of elements (if size is negative number set all elements to value).
'''
if size >= 0:
for i in range(size):
array[i] = value
else:
for i in range(array.shape[0]):
array[i] = value |
def _is_in_max_difference(value_1, value_2, max_difference):
''' Helper function to determine the difference of two values that can be np.uints. Works in python and numba mode.
Circumvents numba bug #1653
'''
if value_1 <= value_2:
return value_2 - value_1 <= max_difference
return value_1 - value_2 <= max_difference |
def _cluster_hits(hits, clusters, assigned_hit_array, cluster_hit_indices, column_cluster_distance, row_cluster_distance, frame_cluster_distance, min_hit_charge, max_hit_charge, ignore_same_hits, noisy_pixels, disabled_pixels):
''' Main precompiled function that loopes over the hits and clusters them
'''
total_hits = hits.shape[0]
if total_hits == 0:
return 0 # total clusters
max_cluster_hits = cluster_hit_indices.shape[0]
if total_hits != clusters.shape[0]:
raise ValueError("hits and clusters must be the same size")
if total_hits != assigned_hit_array.shape[0]:
raise ValueError("hits and assigned_hit_array must be the same size")
# Correction for charge weighting
# Some chips have non-zero charge for a charge value of zero, charge needs to be corrected to calculate cluster center correctly
if min_hit_charge == 0:
charge_correction = 1
else:
charge_correction = 0
# Temporary variables that are reset for each cluster or event
start_event_hit_index = 0
start_event_cluster_index = 0
cluster_size = 0
event_number = hits[0]['event_number']
event_cluster_index = 0
# Outer loop over all hits in the array (referred to as actual hit)
for i in range(total_hits):
# Check for new event and reset event variables
if _new_event(hits[i]['event_number'], event_number):
_finish_event(
hits=hits,
clusters=clusters,
start_event_hit_index=start_event_hit_index,
stop_event_hit_index=i,
start_event_cluster_index=start_event_cluster_index,
stop_event_cluster_index=start_event_cluster_index + event_cluster_index)
start_event_hit_index = i
start_event_cluster_index = start_event_cluster_index + event_cluster_index
event_number = hits[i]['event_number']
event_cluster_index = 0
if assigned_hit_array[i] > 0: # Hit was already assigned to a cluster in the inner loop, thus skip actual hit
continue
if not _hit_ok(
hit=hits[i],
min_hit_charge=min_hit_charge,
max_hit_charge=max_hit_charge) or (disabled_pixels.shape[0] != 0 and _pixel_masked(hits[i], disabled_pixels)):
_set_hit_invalid(hit=hits[i], cluster_id=-1)
assigned_hit_array[i] = 1
continue
# Set/reset cluster variables for new cluster
# Reset temp array with hit indices of actual cluster for the next cluster
_set_1d_array(cluster_hit_indices, -1, cluster_size)
cluster_hit_indices[0] = i
assigned_hit_array[i] = 1
cluster_size = 1 # actual cluster has one hit so far
for j in cluster_hit_indices: # Loop over all hits of the actual cluster; cluster_hit_indices is updated within the loop if new hit are found
if j < 0: # There are no more cluster hits found
break
for k in range(cluster_hit_indices[0] + 1, total_hits):
# Stop event hits loop if new event is reached
if _new_event(hits[k]['event_number'], event_number):
break
# Hit is already assigned to a cluster, thus skip actual hit
if assigned_hit_array[k] > 0:
continue
if not _hit_ok(
hit=hits[k],
min_hit_charge=min_hit_charge,
max_hit_charge=max_hit_charge) or (disabled_pixels.shape[0] != 0 and _pixel_masked(hits[k], disabled_pixels)):
_set_hit_invalid(hit=hits[k], cluster_id=-1)
assigned_hit_array[k] = 1
continue
# Check if event hit belongs to actual hit and thus to the actual cluster
if _is_in_max_difference(hits[j]['column'], hits[k]['column'], column_cluster_distance) and _is_in_max_difference(hits[j]['row'], hits[k]['row'], row_cluster_distance) and _is_in_max_difference(hits[j]['frame'], hits[k]['frame'], frame_cluster_distance):
if not ignore_same_hits or hits[j]['column'] != hits[k]['column'] or hits[j]['row'] != hits[k]['row']:
cluster_size += 1
if cluster_size > max_cluster_hits:
raise IndexError('cluster_hit_indices is too small to contain all cluster hits')
cluster_hit_indices[cluster_size - 1] = k
assigned_hit_array[k] = 1
else:
_set_hit_invalid(hit=hits[k], cluster_id=-2)
assigned_hit_array[k] = 1
# check for valid cluster and add it to the array
if cluster_size == 1 and noisy_pixels.shape[0] != 0 and _pixel_masked(hits[cluster_hit_indices[0]], noisy_pixels):
_set_hit_invalid(hit=hits[cluster_hit_indices[0]], cluster_id=-1)
else:
_finish_cluster(
hits=hits,
clusters=clusters,
cluster_size=cluster_size,
cluster_hit_indices=cluster_hit_indices,
cluster_index=start_event_cluster_index + event_cluster_index,
cluster_id=event_cluster_index,
charge_correction=charge_correction,
noisy_pixels=noisy_pixels,
disabled_pixels=disabled_pixels)
event_cluster_index += 1
# Last event is assumed to be finished at the end of the hit array, thus add info
_finish_event(
hits=hits,
clusters=clusters,
start_event_hit_index=start_event_hit_index,
stop_event_hit_index=total_hits,
start_event_cluster_index=start_event_cluster_index,
stop_event_cluster_index=start_event_cluster_index + event_cluster_index)
total_clusters = start_event_cluster_index + event_cluster_index
return total_clusters |
def get_compute_sig(self) -> Signature:
"""
Compute a signature Using resolution!!!
TODO: discuss of relevance of a final generation for a signature
"""
tret = []
tparams = []
for t in self.tret.components:
if t in self.resolution and self.resolution[t] is not None:
tret.append(self.resolution[t]().show_name())
else:
tret.append(t)
if hasattr(self, 'tparams'):
for p in self.tparams:
tp = []
for t in p.components:
if t in self.resolution and self.resolution[t] is not None:
tp.append(self.resolution[t]().show_name())
else:
tp.append(t)
tparams.append(" ".join(tp))
if self.variadic:
if self._variadic_types is None:
raise ValueError("Can't compute the sig "
+ "with unresolved variadic argument"
)
for p in self._variadic_types:
tp = []
for t in p.components:
if (t in self.resolution
and self.resolution[t] is not None
):
tp.append(self.resolution[t]().show_name())
else:
tp.append(t)
tparams.append(" ".join(tp))
ret = Fun(self.name, " ".join(tret), tparams)
# transform as-is into our internal Signature (Val, Var, whatever)
ret.__class__ = self._sig.__class__
return ret |
def set_parent(self, parent) -> object:
"""
When we add a parent (from Symbol), don't forget to resolve.
"""
ret = self
if parent is not None:
ret = self._sig.set_parent(parent)
self.resolve()
elif not hasattr(self, 'parent'):
# only if parent didn't exist yet
self.parent = None
return ret |
def resolve(self):
"""
Process the signature and find definition for type.
"""
# collect types for resolution
t2resolv = []
if hasattr(self._sig, 'tret'):
t2resolv.append(self._sig.tret)
if hasattr(self._sig, 'tparams') and self._sig.tparams is not None:
for p in self._sig.tparams:
t2resolv.append(p)
if self._translate_to is not None:
t2resolv.append(self._translate_to.target)
if self._variadic_types is not None:
for t in self._variadic_types:
t2resolv.append(t)
for t in t2resolv:
for c in t.components:
if c not in self.resolution or self.resolution[c] is None:
# try to find what is c
parent = self.get_parent()
if parent is not None:
sc = parent.get_by_symbol_name(c)
if len(sc) == 1:
sc = list(sc.values())[0]
# unwrap EvalCtx around Type
if isinstance(sc, EvalCtx):
sc = sc._sig
rtyp = weakref.ref(sc)
self.resolution[c] = rtyp
continue
# unresolved
self.resolution[c] = None |
def get_resolved_names(self, type_name: TypeName) -> list:
"""
Use self.resolution to subsitute type_name.
Allow to instanciate polymorphic type ?1, ?toto
"""
if not isinstance(type_name, TypeName):
raise Exception("Take a TypeName as parameter not a %s"
% type(type_name))
rnames = []
for name in type_name.components:
if name not in self.resolution:
raise Exception("Unknown type %s in a EvalCtx" % name)
rname = self.resolution[name]
if rname is not None:
rname = rname().show_name()
else:
rname = name
rnames.append(rname)
return rnames |
def set_resolved_name(self, ref: dict, type_name2solve: TypeName,
type_name_ref: TypeName):
"""
Warning!!! Need to rethink it when global poly type
"""
if self.resolution[type_name2solve.value] is None:
self.resolution[type_name2solve.value] = ref[type_name_ref.value] |
def to_fmt(self) -> fmt.indentable:
"""
Return an Fmt representation for pretty-printing
"""
lsb = []
if len(self._lsig) > 0:
for s in self._lsig:
lsb.append(s.to_fmt())
block = fmt.block("(", ")", fmt.sep(', ', lsb))
qual = "tuple"
txt = fmt.sep("", [qual, block])
return txt |
def internal_name(self):
"""
Return the unique internal name
"""
unq = super().internal_name()
if self.tret is not None:
unq += "_" + self.tret
return unq |
def _delete_local(self, filename):
"""Deletes the specified file from the local filesystem."""
if os.path.exists(filename):
os.remove(filename) |
def _delete_s3(self, filename, bucket_name):
"""Deletes the specified file from the given S3 bucket."""
conn = S3Connection(self.access_key_id, self.access_key_secret)
bucket = conn.get_bucket(bucket_name)
if type(filename).__name__ == 'Key':
filename = '/' + filename.name
path = self._get_s3_path(filename)
k = Key(bucket)
k.key = path
try:
bucket.delete_key(k)
except S3ResponseError:
pass |
def delete(self, filename, storage_type=None, bucket_name=None):
"""Deletes the specified file, either locally or from S3, depending on the file's storage type."""
if not (storage_type and bucket_name):
self._delete_local(filename)
else:
if storage_type != 's3':
raise ValueError('Storage type "%s" is invalid, the only supported storage type (apart from default local storage) is s3.' % storage_type)
self._delete_s3(filename, bucket_name) |
def _save_local(self, temp_file, filename, obj):
"""Saves the specified file to the local file system."""
path = self._get_path(filename)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path), self.permission | 0o111)
fd = open(path, 'wb')
# Thanks to:
# http://stackoverflow.com/a/3253276/2066849
temp_file.seek(0)
t = temp_file.read(1048576)
while t:
fd.write(t)
t = temp_file.read(1048576)
fd.close()
if self.filesize_field:
setattr(obj, self.filesize_field, os.path.getsize(path))
return filename |
def _save_s3(self, temp_file, filename, obj):
"""Saves the specified file to the configured S3 bucket."""
conn = S3Connection(self.access_key_id, self.access_key_secret)
bucket = conn.get_bucket(self.bucket_name)
path = self._get_s3_path(filename)
k = bucket.new_key(path)
k.set_contents_from_string(temp_file.getvalue())
k.set_acl(self.acl)
if self.filesize_field:
setattr(obj, self.filesize_field, k.size)
return filename |
def save(self, temp_file, filename, obj):
"""Saves the specified file to either S3 or the local filesystem, depending on the currently enabled storage type."""
if not (self.storage_type and self.bucket_name):
ret = self._save_local(temp_file, filename, obj)
else:
if self.storage_type != 's3':
raise ValueError('Storage type "%s" is invalid, the only supported storage type (apart from default local storage) is s3.' % self.storage_type)
ret = self._save_s3(temp_file, filename, obj)
if self.field_name:
setattr(obj, self.field_name, ret)
if self.storage_type == 's3':
if self.storage_type_field:
setattr(obj, self.storage_type_field, self.storage_type)
if self.bucket_name_field:
setattr(obj, self.bucket_name_field, self.bucket_name)
else:
if self.storage_type_field:
setattr(obj, self.storage_type_field, '')
if self.bucket_name_field:
setattr(obj, self.bucket_name_field, '')
return ret |
def _find_by_path_s3(self, path, bucket_name):
"""Finds files by licking an S3 bucket's contents by prefix."""
conn = S3Connection(self.access_key_id, self.access_key_secret)
bucket = conn.get_bucket(bucket_name)
s3_path = self._get_s3_path(path)
return bucket.list(prefix=s3_path) |
def find_by_path(self, path, storage_type=None, bucket_name=None):
"""Finds files at the specified path / prefix, either on S3 or on the local filesystem."""
if not (storage_type and bucket_name):
return self._find_by_path_local(path)
else:
if storage_type != 's3':
raise ValueError('Storage type "%s" is invalid, the only supported storage type (apart from default local storage) is s3.' % storage_type)
return self._find_by_path_s3(path, bucket_name) |
def enum(*sequential, **named):
"""
Build an enum statement
"""
#: build enums from parameter
enums = dict(zip(sequential, range(len(sequential))), **named)
enums['map'] = copy.copy(enums)
#: build reverse mapping
enums['rmap'] = {}
for key, value in enums.items():
if type(value) is int:
enums['rmap'][value] = key
return type('Enum', (), enums) |
def checktypes(func):
"""Decorator to verify arguments and return types."""
sig = inspect.signature(func)
types = {}
for param in sig.parameters.values():
# Iterate through function's parameters and build the list of
# arguments types
param_type = param.annotation
if param_type is param.empty or not inspect.isclass(param_type):
# Missing annotation or not a type, skip it
continue
types[param.name] = param_type
# If the argument has a type specified, let's check that its
# default value (if present) conforms with the type.
if (param.default is not param.empty and
not isinstance(param.default, param_type)):
raise ValueError(
"{func}: wrong type of a default value for {arg!r}".format(
func=func.__qualname__, arg=param.name)
)
def check_type(sig, arg_name, arg_type, arg_value):
# Internal function that encapsulates arguments type checking
if not isinstance(arg_value, arg_type):
raise ValueError("{func}: wrong type of {arg!r} argument, "
"{exp!r} expected, got {got!r}".
format(func=func.__qualname__, arg=arg_name,
exp=arg_type.__name__,
got=type(arg_value).__name__))
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Let's bind the arguments
ba = sig.bind(*args, **kwargs)
for arg_name, arg in ba.arguments.items():
# And iterate through the bound arguments
try:
type_ = types[arg_name]
except KeyError:
continue
else:
# OK, we have a type for the argument, lets get the
# corresponding parameter description from the signature object
param = sig.parameters[arg_name]
if param.kind == param.VAR_POSITIONAL:
# If this parameter is a variable-argument parameter,
# then we need to check each of its values
for value in arg:
check_type(sig, arg_name, type_, value)
elif param.kind == param.VAR_KEYWORD:
# If this parameter is a variable-keyword-argument
# parameter:
for subname, value in arg.items():
check_type(sig, arg_name + ':' + subname, type_, value)
else:
# And, finally, if this parameter a regular one:
check_type(sig, arg_name, type_, arg)
result = func(*ba.args, **ba.kwargs)
# The last bit - let's check that the result is correct
return_type = sig.return_annotation
if (return_type is not sig.empty and
isinstance(return_type, type) and
not isinstance(result, return_type)):
raise ValueError(
'{func}: wrong return type, {exp} expected, got {got}'.format(
func=func.__qualname__, exp=return_type.__name__,
got=type(result).__name__)
)
return result
return wrapper |
def set_one(chainmap, thing_name, callobject):
""" Add a mapping with key thing_name for callobject in chainmap with
namespace handling.
"""
namespaces = reversed(thing_name.split("."))
lstname = []
for name in namespaces:
lstname.insert(0, name)
strname = '.'.join(lstname)
chainmap[strname] = callobject |
def add_method(cls):
"""Attach a method to a class."""
def wrapper(f):
#if hasattr(cls, f.__name__):
# raise AttributeError("{} already has a '{}' attribute".format(
# cls.__name__, f.__name__))
setattr(cls, f.__name__, f)
return f
return wrapper |
def hook(cls, hookname=None, erase=False):
"""Attach a method to a parsing class and register it as a parser hook.
The method is registered with its name unless hookname is provided.
"""
if not hasattr(cls, '_hooks'):
raise TypeError(
"%s didn't seems to be a BasicParser subsclasse" % cls.__name__)
class_hook_list = cls._hooks
class_rule_list = cls._rules
def wrapper(f):
nonlocal hookname
add_method(cls)(f)
if hookname is None:
hookname = f.__name__
if not erase and (hookname in class_hook_list or hookname in class_rule_list):
raise TypeError("%s is already define has rule or hook" % hookname)
if '.' not in hookname:
hookname = '.'.join([cls.__module__, cls.__name__, hookname])
set_one(class_hook_list, hookname, f)
return f
return wrapper |
def rule(cls, rulename=None, erase=False):
"""Attach a method to a parsing class and register it as a parser rule.
The method is registered with its name unless rulename is provided.
"""
if not hasattr(cls, '_rules'):
raise TypeError(
"%s didn't seems to be a BasicParser subsclasse" % cls.__name__)
class_hook_list = cls._hooks
class_rule_list = cls._rules
def wrapper(f):
nonlocal rulename
add_method(cls)(f)
if rulename is None:
rulename = f.__name__
if not erase and (rulename in class_hook_list or rulename in class_rule_list):
raise TypeError("%s is already define has rule or hook" % rulename)
if '.' not in rulename:
rulename = cls.__module__ + '.' + cls.__name__ + '.' + rulename
set_one(class_rule_list, rulename, f)
return f
return wrapper |
def directive(directname=None):
"""Attach a class to a parsing class and register it as a parser directive.
The class is registered with its name unless directname is provided.
"""
global _directives
class_dir_list = _directives
def wrapper(f):
nonlocal directname
if directname is None:
directname = f.__name__
f.ns_name = directname
set_one(class_dir_list, directname, f)
return f
return wrapper |
def decorator(directname=None):
"""
Attach a class to a parsing decorator and register it to the global
decorator list.
The class is registered with its name unless directname is provided
"""
global _decorators
class_deco_list = _decorators
def wrapper(f):
nonlocal directname
if directname is None:
directname = f.__name__
f.ns_name = directname
set_one(class_deco_list, directname, f)
return wrapper |
def bind(self, dst: str, src: Node) -> bool:
"""Allow to alias a node to another name.
Useful to bind a node to _ as return of Rule::
R = [
__scope__:L [item:I #add_item(L, I]* #bind('_', L)
]
It's also the default behaviour of ':>'
"""
for m in self.rule_nodes.maps:
for k, v in m.items():
if k == dst:
m[k] = src
return True
raise Exception('%s not found' % dst) |
def read_eol(self) -> bool:
"""Return True if the parser can consume an EOL byte sequence."""
if self.read_eof():
return False
self._stream.save_context()
self.read_char('\r')
if self.read_char('\n'):
return self._stream.validate_context()
return self._stream.restore_context() |
def read_hex_integer(self) -> bool:
"""
read a hexadecimal number
Read the following BNF rule else return False::
readHexInteger = [
[ '0'..'9' | 'a'..'f' | 'A'..'F' ]+
]
"""
if self.read_eof():
return False
self._stream.save_context()
c = self._stream.peek_char
if c.isdigit() or ('a' <= c.lower() and c.lower() <= 'f'):
self._stream.incpos()
while not self.read_eof():
c = self._stream.peek_char
if not (c.isdigit() or ('a' <= c.lower() and c.lower() <= 'f')):
break
self._stream.incpos()
return self._stream.validate_context()
return self._stream.restore_context() |
def read_cstring(self) -> bool:
"""
read a double quoted string
Read following BNF rule else return False::
'"' -> ['\\' #char | ~'\\'] '"'
"""
self._stream.save_context()
idx = self._stream.index
if self.read_char("\"") and self.read_until("\"", "\\"):
txt = self._stream[idx:self._stream.index]
return self._stream.validate_context()
return self._stream.restore_context() |
def push_rule_nodes(self) -> bool:
"""Push context variable to store rule nodes."""
if self.rule_nodes is None:
self.rule_nodes = collections.ChainMap()
self.tag_cache = collections.ChainMap()
self.id_cache = collections.ChainMap()
else:
self.rule_nodes = self.rule_nodes.new_child()
self.tag_cache = self.tag_cache.new_child()
self.id_cache = self.id_cache.new_child()
return True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.