Search is not available for this dataset
text stringlengths 75 104k |
|---|
def pop_rule_nodes(self) -> bool:
"""Pop context variable that store rule nodes"""
self.rule_nodes = self.rule_nodes.parents
self.tag_cache = self.tag_cache.parents
self.id_cache = self.id_cache.parents
return True |
def value(self, n: Node) -> str:
"""Return the text value of the node"""
id_n = id(n)
idcache = self.id_cache
if id_n not in idcache:
return ""
name = idcache[id_n]
tag_cache = self.tag_cache
if name not in tag_cache:
raise Exception("Incoherent tag cache")
tag = tag_cache[name]
k = "%d:%d" % (tag._begin, tag._end)
valcache = self._streams[-1].value_cache
if k not in valcache:
valcache[k] = str(tag)
return valcache[k] |
def parsed_stream(self, content: str, name: str=None):
"""Push a new Stream into the parser.
All subsequent called functions will parse this new stream,
until the 'popStream' function is called.
"""
self._streams.append(Stream(content, name)) |
def begin_tag(self, name: str) -> Node:
"""Save the current index under the given name."""
# Check if we could attach tag cache to current rule_nodes scope
self.tag_cache[name] = Tag(self._stream, self._stream.index)
return True |
def end_tag(self, name: str) -> Node:
"""Extract the string between saved and current index."""
self.tag_cache[name].set_end(self._stream.index)
return True |
def set_rules(cls, rules: dict) -> bool:
"""
Merge internal rules set with the given rules
"""
cls._rules = cls._rules.new_child()
for rule_name, rule_pt in rules.items():
if '.' not in rule_name:
rule_name = cls.__module__ \
+ '.' + cls.__name__ \
+ '.' + rule_name
meta.set_one(cls._rules, rule_name, rule_pt)
return True |
def set_hooks(cls, hooks: dict) -> bool:
"""
Merge internal hooks set with the given hooks
"""
cls._hooks = cls._hooks.new_child()
for hook_name, hook_pt in hooks.items():
if '.' not in hook_name:
hook_name = cls.__module__ \
+ '.' + cls.__name__ \
+ '.' + hook_name
meta.set_one(cls._hooks, hook_name, hook_pt)
return True |
def set_directives(cls, directives: dict) -> bool:
"""
Merge internal directives set with the given directives.
For working directives, attach it only in the dsl.Parser class
"""
meta._directives = meta._directives.new_child()
for dir_name, dir_pt in directives.items():
meta.set_one(meta._directives, dir_name, dir_pt)
dir_pt.ns_name = dir_name
return True |
def eval_rule(self, name: str) -> Node:
"""Evaluate a rule by name."""
# context created by caller
n = Node()
id_n = id(n)
self.rule_nodes['_'] = n
self.id_cache[id_n] = '_'
# TODO: other behavior for empty rules?
if name not in self.__class__._rules:
self.diagnostic.notify(
error.Severity.ERROR,
"Unknown rule : %s" % name,
error.LocationInfo.from_stream(self._stream, is_error=True)
)
raise self.diagnostic
self._lastRule = name
rule_to_eval = self.__class__._rules[name]
# TODO: add packrat cache here, same rule - same pos == same res
res = rule_to_eval(self)
if res:
res = self.rule_nodes['_']
return res |
def eval_hook(self, name: str, ctx: list) -> Node:
"""Evaluate the hook by its name"""
if name not in self.__class__._hooks:
# TODO: don't always throw error, could have return True by default
self.diagnostic.notify(
error.Severity.ERROR,
"Unknown hook : %s" % name,
error.LocationInfo.from_stream(self._stream, is_error=True)
)
raise self.diagnostic
self._lastRule = '#' + name
res = self.__class__._hooks[name](self, *ctx)
if type(res) is not bool:
raise TypeError("Your hook %r didn't return a bool value" % name)
return res |
def peek_text(self, text: str) -> bool:
"""Same as readText but doesn't consume the stream."""
start = self._stream.index
stop = start + len(text)
if stop > self._stream.eos_index:
return False
return self._stream[self._stream.index:stop] == text |
def one_char(self) -> bool:
"""Read one byte in stream"""
if self.read_eof():
return False
self._stream.incpos()
return True |
def read_char(self, c: str) -> bool:
"""
Consume the c head byte, increment current index and return True
else return False. It use peekchar and it's the same as '' in BNF.
"""
if self.read_eof():
return False
self._stream.save_context()
if c == self._stream.peek_char:
self._stream.incpos()
return self._stream.validate_context()
return self._stream.restore_context() |
def read_until(self, c: str, inhibitor='\\') -> bool:
"""
Consume the stream while the c byte is not read, else return false
ex : if stream is " abcdef ", read_until("d"); consume "abcd".
"""
if self.read_eof():
return False
self._stream.save_context()
while not self.read_eof():
if self.peek_char(inhibitor):
# Delete inhibitor and inhibited character
self.one_char()
self.one_char()
if self.peek_char(c):
self._stream.incpos()
return self._stream.validate_context()
self._stream.incpos()
return self._stream.restore_context() |
def read_until_eof(self) -> bool:
"""Consume all the stream. Same as EOF in BNF."""
if self.read_eof():
return True
# TODO: read ALL
self._stream.save_context()
while not self.read_eof():
self._stream.incpos()
return self._stream.validate_context() |
def read_text(self, text: str) -> bool:
"""
Consume a strlen(text) text at current position in the stream
else return False.
Same as "" in BNF
ex : read_text("ls");.
"""
if self.read_eof():
return False
self._stream.save_context()
if self.peek_text(text):
self._stream.incpos(len(text))
return self._stream.validate_context()
return self._stream.restore_context() |
def read_range(self, begin: str, end: str) -> int:
"""
Consume head byte if it is >= begin and <= end else return false
Same as 'a'..'z' in BNF
"""
if self.read_eof():
return False
c = self._stream.peek_char
if begin <= c <= end:
self._stream.incpos()
return True
return False |
def ignore_blanks(self) -> bool:
"""Consume whitespace characters."""
self._stream.save_context()
if not self.read_eof() and self._stream.peek_char in " \t\v\f\r\n":
while (not self.read_eof()
and self._stream.peek_char in " \t\v\f\r\n"):
self._stream.incpos()
return self._stream.validate_context()
return self._stream.validate_context() |
def do_call(self, parser: BasicParser) -> Node:
"""
The Decorator call is the one that actually pushes/pops
the decorator in the active decorators list (parsing._decorators)
"""
valueparam = []
for v, t in self.param:
if t is Node:
valueparam.append(parser.rule_nodes[v])
elif type(v) is t:
valueparam.append(v)
else:
raise TypeError(
"Type mismatch expected {} got {}".format(t, type(v)))
if not self.checkParam(self.decorator_class, valueparam):
return False
decorator = self.decorator_class(*valueparam)
global _decorators
_decorators.append(decorator)
res = self.pt(parser)
_decorators.pop()
return res |
def internal_name(self):
"""
Return the unique internal name
"""
unq = 'f_' + super().internal_name()
if self.tparams is not None:
unq += "_" + "_".join(self.tparams)
if self.tret is not None:
unq += "_" + self.tret
return unq |
def set_hit_fields(self, hit_fields):
''' Tell the clusterizer the meaning of the field names.
The hit_fields parameter is a dict, e.g., {"new field name": "standard field name"}.
If None default mapping is set.
Example:
--------
Internally, the clusterizer uses the hit fields names "column"/"row". If the name of the hits fields are "x"/"y", call:
set_hit_fields(hit_fields={'x': 'column',
'y': 'row'})
'''
if not hit_fields:
hit_fields_mapping_inverse = {}
hit_fields_mapping = {}
else:
# Create also the inverse dictionary for faster lookup
hit_fields_mapping_inverse = dict((k, v) for k, v in hit_fields.items())
hit_fields_mapping = dict((v, k) for k, v in hit_fields.items())
for old_name, new_name in self._default_hit_fields_mapping.items():
if old_name not in hit_fields_mapping:
hit_fields_mapping[old_name] = new_name
hit_fields_mapping_inverse[new_name] = old_name
self._hit_fields_mapping = hit_fields_mapping
self._hit_fields_mapping_inverse = hit_fields_mapping_inverse |
def set_cluster_fields(self, cluster_fields):
''' Tell the clusterizer the meaning of the field names.
The cluster_fields parameter is a dict, e.g., {"new filed name": "standard field name"}.
'''
if not cluster_fields:
cluster_fields_mapping_inverse = {}
cluster_fields_mapping = {}
else:
# Create also the inverse dictionary for faster lookup
cluster_fields_mapping_inverse = dict((k, v) for k, v in cluster_fields.items())
cluster_fields_mapping = dict((v, k) for k, v in cluster_fields.items())
for old_name, new_name in self._default_cluster_fields_mapping.items():
if old_name not in cluster_fields_mapping:
cluster_fields_mapping[old_name] = new_name
cluster_fields_mapping_inverse[new_name] = old_name
self._cluster_fields_mapping = cluster_fields_mapping
self._cluster_fields_mapping_inverse = cluster_fields_mapping_inverse |
def set_hit_dtype(self, hit_dtype):
''' Set the data type of the hits.
Fields that are not mentioned here are NOT copied into the clustered hits array.
Clusterizer has to know the hit data type to produce the clustered hit result with the same data types.
Parameters:
-----------
hit_dtype : numpy.dtype or equivalent
Defines the dtype of the hit array.
Example:
--------
hit_dtype = [("column", np.uint16), ("row", np.uint16)], where
"column", "row" is the field name of the input hit array.
'''
if not hit_dtype:
hit_dtype = np.dtype([])
else:
hit_dtype = np.dtype(hit_dtype)
cluster_hits_descr = hit_dtype.descr
# Add default back to description
for dtype_name, dtype in self._default_cluster_hits_descr:
if self._hit_fields_mapping[dtype_name] not in hit_dtype.fields:
cluster_hits_descr.append((dtype_name, dtype))
self._cluster_hits_descr = cluster_hits_descr
self._init_arrays(size=0) |
def set_cluster_dtype(self, cluster_dtype):
''' Set the data type of the cluster.
Parameters:
-----------
cluster_dtype : numpy.dtype or equivalent
Defines the dtype of the cluster array.
'''
if not cluster_dtype:
cluster_dtype = np.dtype([])
else:
cluster_dtype = np.dtype(cluster_dtype)
cluster_descr = cluster_dtype.descr
for dtype_name, dtype in self._default_cluster_descr:
if self._cluster_fields_mapping[dtype_name] not in cluster_dtype.fields:
cluster_descr.append((dtype_name, dtype))
self._cluster_descr = cluster_descr
self._init_arrays(size=0) |
def add_cluster_field(self, description):
''' Adds a field or a list of fields to the cluster result array. Has to be defined as a numpy dtype entry, e.g.: ('parameter', '<i4') '''
if isinstance(description, list):
for item in description:
if len(item) != 2:
raise TypeError("Description needs to be a list of 2-tuples of a string and a dtype.")
self._cluster_descr.append(item)
else:
if len(description) != 2:
raise TypeError("Description needs to be a 2-tuple of a string and a dtype.")
self._cluster_descr.append(description)
self._init_arrays(size=0) |
def set_end_of_cluster_function(self, function):
''' Adding function to module.
This is maybe the only way to make the clusterizer to work with multiprocessing.
'''
self.cluster_functions._end_of_cluster_function = self._jitted(function)
self._end_of_cluster_function = function |
def set_end_of_event_function(self, function):
''' Adding function to module.
This is maybe the only way to make the clusterizer to work with multiprocessing.
'''
self.cluster_functions._end_of_event_function = self._jitted(function)
self._end_of_event_function = function |
def cluster_hits(self, hits, noisy_pixels=None, disabled_pixels=None):
''' Cluster given hit array.
The noisy_pixels and disabled_pixels parameters are iterables of column/row index pairs, e.g. [[column_1, row_1], [column_2, row_2], ...].
The noisy_pixels parameter allows for removing clusters that consist of a single noisy pixels. Clusters with 2 or more noisy pixels are not removed.
The disabled_pixels parameter allows for ignoring pixels.
'''
# Jitting a second time to workaround different bahavior of the installation methods on different platforms (pip install vs. python setup.py).
# In some circumstances, the Numba compiler can't compile functions that were pickled previously.
self.cluster_functions._end_of_cluster_function = self._jitted(self._end_of_cluster_function)
self.cluster_functions._end_of_event_function = self._jitted(self._end_of_event_function)
n_hits = hits.shape[0] # Set n_hits to new size
if (n_hits < int(0.5 * self._cluster_hits.size)) or (n_hits > self._cluster_hits.size):
self._init_arrays(size=int(1.1 * n_hits)) # oversize buffer slightly to reduce allocations
else:
self._assigned_hit_array.fill(0) # The hit indices of the actual cluster, 0 means not assigned
self._cluster_hit_indices.fill(-1) # The hit indices of the actual cluster, -1 means not assigned
self._clusters.dtype.names = self._unmap_cluster_field_names(self._clusters.dtype.names) # Reset the data fields from previous renaming
self._cluster_hits.dtype.names = self._unmap_hit_field_names(self._cluster_hits.dtype.names) # Reset the data fields from previous renaming
self._check_struct_compatibility(hits)
# The hit info is extended by the cluster info; this is only possible by creating a new hit info array and copy data
for field_name in hits.dtype.fields:
if field_name in self._hit_fields_mapping_inverse:
cluster_hits_field_name = self._hit_fields_mapping_inverse[field_name]
else:
cluster_hits_field_name = field_name
if cluster_hits_field_name in self._cluster_hits.dtype.fields:
self._cluster_hits[cluster_hits_field_name][:n_hits] = hits[field_name]
noisy_pixels_array = np.array([]) if noisy_pixels is None else np.array(noisy_pixels)
if noisy_pixels_array.shape[0] != 0:
noisy_pixels_max_range = np.array([max(0, np.max(noisy_pixels_array[:, 0])), max(0, np.max(noisy_pixels_array[:, 1]))])
noisy_pixels = np.zeros(noisy_pixels_max_range + 1, dtype=np.bool)
noisy_pixels[noisy_pixels_array[:, 0], noisy_pixels_array[:, 1]] = 1
else:
noisy_pixels = np.zeros((0, 0), dtype=np.bool)
disabled_pixels_array = np.array([]) if disabled_pixels is None else np.array(disabled_pixels)
if disabled_pixels_array.shape[0] != 0:
disabled_pixels_max_range = np.array([np.max(disabled_pixels_array[:, 0]), np.max(disabled_pixels_array[:, 1])])
disabled_pixels = np.zeros(disabled_pixels_max_range + 1, dtype=np.bool)
disabled_pixels[disabled_pixels_array[:, 0], disabled_pixels_array[:, 1]] = 1
else:
disabled_pixels = np.zeros((0, 0), dtype=np.bool)
# col_dtype = self._cluster_hits.dtype.fields["column"][0]
# row_dtype = self._cluster_hits.dtype.fields["row"][0]
# mask_dtype = {"names": ["column", "row"],
# "formats": [col_dtype, row_dtype]}
# noisy_pixels = np.recarray(noisy_pixels_array.shape[0], dtype=mask_dtype)
# noisy_pixels[:] = [(item[0], item[1]) for item in noisy_pixels_array]
# disabled_pixels = np.recarray(disabled_pixels_array.shape[0], dtype=mask_dtype)
# disabled_pixels[:] = [(item[0], item[1]) for item in disabled_pixels_array]
n_clusters = self.cluster_functions._cluster_hits( # Set n_clusters to new size
hits=self._cluster_hits[:n_hits],
clusters=self._clusters[:n_hits],
assigned_hit_array=self._assigned_hit_array[:n_hits],
cluster_hit_indices=self._cluster_hit_indices[:n_hits],
column_cluster_distance=self._column_cluster_distance,
row_cluster_distance=self._row_cluster_distance,
frame_cluster_distance=self._frame_cluster_distance,
min_hit_charge=self._min_hit_charge,
max_hit_charge=self._max_hit_charge,
ignore_same_hits=self._ignore_same_hits,
noisy_pixels=noisy_pixels,
disabled_pixels=disabled_pixels)
self._cluster_hits.dtype.names = self._map_hit_field_names(self._cluster_hits.dtype.names) # Rename the data fields for the result
self._clusters.dtype.names = self._map_cluster_field_names(self._clusters.dtype.names) # Rename the data fields for the result
return self._cluster_hits[:n_hits], self._clusters[:n_clusters] |
def _check_struct_compatibility(self, hits):
''' Takes the hit array and checks if the important data fields have the same data type than the hit clustered array and that the field names are correct.'''
for key, _ in self._cluster_hits_descr:
if key in self._hit_fields_mapping_inverse:
mapped_key = self._hit_fields_mapping_inverse[key]
else:
mapped_key = key
# Only check hit fields that contain hit information
if mapped_key in ['cluster_ID', 'is_seed', 'cluster_size', 'n_cluster']:
continue
if key not in hits.dtype.names:
raise TypeError('Required hit field "%s" not found.' % key)
if self._cluster_hits.dtype[mapped_key] != hits.dtype[key]:
raise TypeError('The dtype for hit data field "%s" does not match. Got/expected: %s/%s.' % (key, hits.dtype[key], self._cluster_hits.dtype[mapped_key]))
additional_hit_fields = set(hits.dtype.names) - set([key for key, val in self._cluster_hits_descr])
if additional_hit_fields:
logging.warning('Found additional hit fields: %s' % ", ".join(additional_hit_fields)) |
def add_mod(self, seq, mod):
"""Create a tree.{Complement, LookAhead, Neg, Until}"""
modstr = self.value(mod)
if modstr == '~':
seq.parser_tree = parsing.Complement(seq.parser_tree)
elif modstr == '!!':
seq.parser_tree = parsing.LookAhead(seq.parser_tree)
elif modstr == '!':
seq.parser_tree = parsing.Neg(seq.parser_tree)
elif modstr == '->':
seq.parser_tree = parsing.Until(seq.parser_tree)
return True |
def add_ruleclause_name(self, ns_name, rid) -> bool:
"""Create a tree.Rule"""
ns_name.parser_tree = parsing.Rule(self.value(rid))
return True |
def add_rules(self, bnf, r) -> bool:
"""Attach a parser tree to the dict of rules"""
bnf[r.rulename] = r.parser_tree
return True |
def add_rule(self, rule, rn, alts) -> bool:
"""Add the rule name"""
rule.rulename = self.value(rn)
rule.parser_tree = alts.parser_tree
return True |
def add_sequences(self, sequences, cla) -> bool:
"""Create a tree.Seq"""
if not hasattr(sequences, 'parser_tree'):
# forward sublevel of sequence as is
sequences.parser_tree = cla.parser_tree
else:
oldnode = sequences
if isinstance(oldnode.parser_tree, parsing.Seq):
oldpt = list(oldnode.parser_tree.ptlist)
else:
oldpt = [oldnode.parser_tree]
oldpt.append(cla.parser_tree)
sequences.parser_tree = parsing.Seq(*tuple(oldpt))
return True |
def add_alt(self, alternatives, alt) -> bool:
"""Create a tree.Alt"""
if not hasattr(alternatives, 'parser_tree'):
# forward sublevel of alt as is
if hasattr(alt, 'parser_tree'):
alternatives.parser_tree = alt.parser_tree
else:
alternatives.parser_tree = alt
else:
oldnode = alternatives
if isinstance(oldnode.parser_tree, parsing.Alt):
oldpt = list(oldnode.parser_tree.ptlist)
else:
oldpt = [oldnode.parser_tree]
oldpt.append(alt.parser_tree)
alternatives.parser_tree = parsing.Alt(*tuple(oldpt))
return True |
def add_read_sqstring(self, sequence, s):
"""Add a read_char/read_text primitive from simple quote string"""
v = self.value(s).strip("'")
if len(v) > 1:
sequence.parser_tree = parsing.Text(v)
return True
sequence.parser_tree = parsing.Char(v)
return True |
def add_range(self, sequence, begin, end):
"""Add a read_range primitive"""
sequence.parser_tree = parsing.Range(self.value(begin).strip("'"),
self.value(end).strip("'"))
return True |
def add_rpt(self, sequence, mod, pt):
"""Add a repeater to the previous sequence"""
modstr = self.value(mod)
if modstr == '!!':
# cursor on the REPEATER
self._stream.restore_context()
# log the error
self.diagnostic.notify(
error.Severity.ERROR,
"Cannot repeat a lookahead rule",
error.LocationInfo.from_stream(self._stream, is_error=True)
)
raise self.diagnostic
if modstr == '!':
# cursor on the REPEATER
self._stream.restore_context()
# log the error
self.diagnostic.notify(
error.Severity.ERROR,
"Cannot repeat a negated rule",
error.LocationInfo.from_stream(self._stream, is_error=True)
)
raise self.diagnostic
oldnode = sequence
sequence.parser_tree = pt.functor(oldnode.parser_tree)
return True |
def add_capture(self, sequence, cpt):
"""Create a tree.Capture"""
cpt_value = self.value(cpt)
sequence.parser_tree = parsing.Capture(cpt_value, sequence.parser_tree)
return True |
def add_bind(self, sequence, cpt):
"""Create a tree.Bind"""
cpt_value = self.value(cpt)
sequence.parser_tree = parsing.Bind(cpt_value, sequence.parser_tree)
return True |
def add_hook(self, sequence, h):
"""Create a tree.Hook"""
sequence.parser_tree = parsing.Hook(h.name, h.listparam)
return True |
def param_num(self, param, n):
"""Parse a int in parameter list"""
param.pair = (int(self.value(n)), int)
return True |
def param_str(self, param, s):
"""Parse a str in parameter list"""
param.pair = (self.value(s).strip('"'), str)
return True |
def param_char(self, param, c):
"""Parse a char in parameter list"""
param.pair = (self.value(c).strip("'"), str)
return True |
def param_id(self, param, i):
"""Parse a node name in parameter list"""
param.pair = (self.value(i), parsing.Node)
return True |
def hook_name(self, hook, n):
"""Parse a hook name"""
hook.name = self.value(n)
hook.listparam = []
return True |
def hook_param(self, hook, p):
"""Parse a hook parameter"""
hook.listparam.append(p.pair)
return True |
def add_directive2(self, sequence, d, s):
"""Add a directive in the sequence"""
sequence.parser_tree = parsing.Directive2(
d.name,
d.listparam,
s.parser_tree
)
return True |
def add_directive(self, sequence, d, s):
"""Add a directive in the sequence"""
if d.name in meta._directives:
the_class = meta._directives[d.name]
sequence.parser_tree = parsing.Directive(the_class(), d.listparam,
s.parser_tree)
elif d.name in meta._decorators:
the_class = meta._decorators[d.name]
sequence.parser_tree = parsing.Decorator(the_class, d.listparam,
s.parser_tree)
else:
raise TypeError("Unkown directive or decorator %s" % d.name)
return True |
def get_rules(self) -> parsing.Node:
"""
Parse the DSL and provide a dictionnaries of all resulting rules.
Call by the MetaGrammar class.
TODO: could be done in the rules property of parsing.BasicParser???
"""
res = None
try:
res = self.eval_rule('bnf_dsl')
if not res:
# we fail to parse, but error is not set
self.diagnostic.notify(
error.Severity.ERROR,
"Parse error in '%s' in EBNF bnf" % self._lastRule,
error.LocationInfo.from_maxstream(self._stream)
)
raise self.diagnostic
except error.Diagnostic as d:
d.notify(
error.Severity.ERROR,
"Parse error in '%s' in EBNF bnf" % self._lastRule
)
raise d
return res |
def to_yml(self):
"""
Allow to get the YML string representation of a Node.::
from pyrser.passes import to_yml
t = Node()
...
print(str(t.to_yml()))
"""
pp = fmt.tab([])
to_yml_item(self, pp.lsdata, "")
return str(pp) |
def ignore_cxx(self) -> bool:
"""Consume comments and whitespace characters."""
self._stream.save_context()
while not self.read_eof():
idxref = self._stream.index
if self._stream.peek_char in " \t\v\f\r\n":
while (not self.read_eof()
and self._stream.peek_char in " \t\v\f\r\n"):
self._stream.incpos()
if self.peek_text("//"):
while not self.read_eof() and not self.peek_char("\n"):
self._stream.incpos()
if not self.read_char("\n") and self.read_eof():
return self._stream.validate_context()
if self.peek_text("/*"):
while not self.read_eof() and not self.peek_text("*/"):
self._stream.incpos()
if not self.read_text("*/") and self.read_eof():
return self._stream.restore_context()
if idxref == self._stream.index:
break
return self._stream.validate_context() |
def add_state(self, s: State):
"""
all state in the register have a uid
"""
ids = id(s)
uid = len(self.states)
if ids not in self.states:
self.states[ids] = (uid, s) |
def to_dot(self) -> str:
"""
Provide a '.dot' representation of all State in the register.
"""
txt = ""
txt += "digraph S%d {\n" % id(self)
if self.label is not None:
txt += '\tlabel="%s";\n' % (self.label + '\l').replace('\n', '\l')
txt += "\trankdir=LR;\n"
#txt += '\tlabelloc="t";\n'
txt += '\tgraph [labeljust=l, labelloc=t, nojustify=true];\n'
txt += "\tesep=1;\n"
txt += '\tranksep="equally";\n'
txt += "\tnode [shape = circle];\n"
txt += "\tsplines = ortho;\n"
for s in self.states.values():
txt += s[1].to_dot()
txt += "}\n"
return txt |
def to_dot_file(self, fname: str):
"""
write a '.dot' file.
"""
with open(fname, 'w') as f:
f.write(self.to_dot()) |
def to_png_file(self, fname: str):
"""
write a '.png' file.
"""
cmd = pipes.Template()
cmd.append('dot -Tpng > %s' % fname, '-.')
with cmd.open('pipefile', 'w') as f:
f.write(self.to_dot()) |
def to_fmt(self) -> str:
"""
Provide a useful representation of the register.
"""
infos = fmt.end(";\n", [])
s = fmt.sep(', ', [])
for ids in sorted(self.states.keys()):
s.lsdata.append(str(ids))
infos.lsdata.append(fmt.block('(', ')', [s]))
infos.lsdata.append("events:" + repr(self.events))
infos.lsdata.append(
"named_events:" + repr(list(self.named_events.keys()))
)
infos.lsdata.append("uid_events:" + repr(list(self.uid_events.keys())))
return infos |
def nextstate(self, newstate, treenode=None, user_data=None):
"""
Manage transition of state.
"""
if newstate is None:
return self
if isinstance(newstate, State) and id(newstate) != id(self):
return newstate
elif isinstance(newstate, StateEvent):
self.state_register.named_events[newstate.name] = True
return newstate.st
elif isinstance(newstate, StatePrecond):
return newstate.st
elif isinstance(newstate, StateHook):
# final API using PSL
newstate.call(treenode, user_data)
return newstate.st
return self |
def checkValue(self, v) -> State:
"""the str() of Values are stored internally for convenience"""
if self.wild_value:
return self.nextstate(self.values['*'])
elif str(v) in self.values:
return self.nextstate(self.values[str(v)])
return self |
def resetLivingState(self):
"""Only one Living State on the S0 of each StateRegister"""
# TODO: add some test to control number of instanciation of LivingState
# clean all living state on S0
must_delete = []
l = len(self.ls)
for idx, ls in zip(range(l), self.ls):
# TODO: alive by default on False, change to True on the first match
ids = id(ls[1].thestate())
if ids == id(ls[0]) and (ls[1].have_finish or not ls[1].alive):
must_delete.append(idx)
elif ls[1].alive:
ls[1].alive = False
for delete in reversed(must_delete):
self.ls.pop(delete)
self.init_all() |
def infer_type(self, init_scope: Scope=None, diagnostic=None):
"""
Do inference. Write infos into diagnostic object, if this parameter
is not provide and self is a AST (has is own diagnostic object),
use the diagnostic of self.
"""
# create the first .infer_node
if not hasattr(self, 'infer_node'):
self.infer_node = InferNode(init_scope)
elif init_scope is not None:
# only change the root scope
self.infer_node.scope_node = init_scope
# get algo
type_algo = self.type_algos()
if diagnostic is None and hasattr(self, 'diagnostic'):
diagnostic = self.diagnostic
type_algo[0](type_algo[1], diagnostic) |
def feedback(self, diagnostic=None):
"""
Do feedback. Write infos into diagnostic object, if this parameter
is not provide and self is a AST (has is own diagnostic object),
use the diagnostic of self.
"""
# get algo
type_algo = self.type_algos()
if diagnostic is None and hasattr(self, 'diagnostic'):
diagnostic = self.diagnostic
type_algo[2](diagnostic) |
def infer_block(self, body, diagnostic=None):
"""
Infer type on block is to type each of is sub-element
"""
# RootBlockStmt has his own .infer_node (created via infer_type)
for e in body:
e.infer_node = InferNode(parent=self.infer_node)
e.infer_type(diagnostic=diagnostic) |
def infer_subexpr(self, expr, diagnostic=None):
"""
Infer type on the subexpr
"""
expr.infer_node = InferNode(parent=self.infer_node)
expr.infer_type(diagnostic=diagnostic) |
def infer_id(self, ident, diagnostic=None):
"""
Infer type from an ID!
- check if ID is declarated in the scope
- if no ID is polymorphic type
"""
# check if ID is declared
#defined = self.type_node.get_by_symbol_name(ident)
defined = self.infer_node.scope_node.get_by_symbol_name(ident)
if len(defined) > 0:
# set from matchings declarations
#self.type_node.update(defined)
self.infer_node.scope_node.update(defined)
else:
diagnostic.notify(
Severity.ERROR,
"%s never declared" % self.value,
self.info
) |
def infer_literal(self, args, diagnostic=None):
"""
Infer type from an LITERAL!
Type of literal depend of language.
We adopt a basic convention
"""
literal, t = args
#self.type_node.add(EvalCtx.from_sig(Val(literal, t)))
self.infer_node.scope_node.add(EvalCtx.from_sig(Val(literal, t))) |
def dump_nodes(self):
"""
Dump tag,rule,id and value cache. For debug.
example::
R = [
#dump_nodes
]
"""
print("DUMP NODE LOCAL INFOS")
try:
print("map Id->node name")
for k, v in self.id_cache.items():
print("[%d]=%s" % (k, v))
print("map tag->capture infos")
for k, v in self.tag_cache.items():
print("[%s]=%s" % (k, v))
print("map nodes->tag resolution")
for k, v in self.rule_nodes.items():
txt = "['%s']=%d" % (k, id(v))
if k in self.tag_cache:
tag = self.tag_cache[k]
txt += " tag <%s>" % tag
k = "%d:%d" % (tag._begin, tag._end)
if k in self._stream.value_cache:
txt += " cache <%s>" % self._stream.value_cache[k]
print(txt)
except Exception as err:
print("RECV Exception %s" % err)
import sys
sys.stdout.flush()
return True |
def list_dataset_uris(cls, base_uri, config_path):
"""Return list containing URIs with base URI."""
uri_list = []
parse_result = generous_parse_uri(base_uri)
bucket_name = parse_result.netloc
bucket = boto3.resource('s3').Bucket(bucket_name)
for obj in bucket.objects.filter(Prefix='dtool').all():
uuid = obj.key.split('-', 1)[1]
uri = cls.generate_uri(None, uuid, base_uri)
storage_broker = cls(uri, config_path)
if storage_broker.has_admin_metadata():
uri_list.append(uri)
return uri_list |
def get_item_abspath(self, identifier):
"""Return absolute path at which item content can be accessed.
:param identifier: item identifier
:returns: absolute path from which the item content can be accessed
"""
admin_metadata = self.get_admin_metadata()
uuid = admin_metadata["uuid"]
# Create directory for the specific dataset.
dataset_cache_abspath = os.path.join(self._s3_cache_abspath, uuid)
mkdir_parents(dataset_cache_abspath)
bucket_fpath = self.data_key_prefix + identifier
obj = self.s3resource.Object(self.bucket, bucket_fpath)
relpath = obj.get()['Metadata']['handle']
_, ext = os.path.splitext(relpath)
local_item_abspath = os.path.join(
dataset_cache_abspath,
identifier + ext
)
if not os.path.isfile(local_item_abspath):
tmp_local_item_abspath = local_item_abspath + ".tmp"
self.s3resource.Bucket(self.bucket).download_file(
bucket_fpath,
tmp_local_item_abspath
)
os.rename(tmp_local_item_abspath, local_item_abspath)
return local_item_abspath |
def list_overlay_names(self):
"""Return list of overlay names."""
bucket = self.s3resource.Bucket(self.bucket)
overlay_names = []
for obj in bucket.objects.filter(
Prefix=self.overlays_key_prefix
).all():
overlay_file = obj.key.rsplit('/', 1)[-1]
overlay_name, ext = overlay_file.split('.')
overlay_names.append(overlay_name)
return overlay_names |
def add_item_metadata(self, handle, key, value):
"""Store the given key:value pair for the item associated with handle.
:param handle: handle for accessing an item before the dataset is
frozen
:param key: metadata key
:param value: metadata value
"""
identifier = generate_identifier(handle)
suffix = '{}.{}.json'.format(identifier, key)
bucket_fpath = self.fragments_key_prefix + suffix
self.s3resource.Object(self.bucket, bucket_fpath).put(
Body=json.dumps(value)
) |
def iter_item_handles(self):
"""Return iterator over item handles."""
bucket = self.s3resource.Bucket(self.bucket)
for obj in bucket.objects.filter(Prefix=self.data_key_prefix).all():
relpath = obj.get()['Metadata']['handle']
yield relpath |
def get_item_metadata(self, handle):
"""Return dictionary containing all metadata associated with handle.
In other words all the metadata added using the ``add_item_metadata``
method.
:param handle: handle for accessing an item before the dataset is
frozen
:returns: dictionary containing item metadata
"""
bucket = self.s3resource.Bucket(self.bucket)
metadata = {}
identifier = generate_identifier(handle)
prefix = self.fragments_key_prefix + '{}'.format(identifier)
for obj in bucket.objects.filter(Prefix=prefix).all():
metadata_key = obj.key.split('.')[-2]
response = obj.get()
value_as_string = response['Body'].read().decode('utf-8')
value = json.loads(value_as_string)
metadata[metadata_key] = value
return metadata |
def parserrule_topython(parser: parsing.BasicParser,
rulename: str) -> ast.FunctionDef:
"""Generates code for a rule.
def rulename(self):
<code for the rule>
return True
"""
visitor = RuleVisitor()
rule = parser._rules[rulename]
fn_args = ast.arguments([ast.arg('self', None)], None, None, [], None,
None, [], [])
body = visitor._clause(rule_topython(rule))
body.append(ast.Return(ast.Name('True', ast.Load())))
return ast.FunctionDef(rulename, fn_args, body, [], None) |
def __exit_scope(self) -> ast.stmt:
"""Create the appropriate scope exiting statement.
The documentation only shows one level and always uses
'return False' in examples.
'raise AltFalse()' within a try.
'break' within a loop.
'return False' otherwise.
"""
if self.in_optional:
return ast.Pass()
if self.in_try:
return ast.Raise(
ast.Call(ast.Name('AltFalse', ast.Load()), [], [], None, None),
None)
if self.in_loop:
return ast.Break()
return ast.Return(ast.Name('False', ast.Load())) |
def _clause(self, pt: parsing.ParserTree) -> [ast.stmt]:
"""Normalize a test expression into a statements list.
Statements list are returned as-is.
Expression is packaged as:
if not expr:
return False
"""
if isinstance(pt, list):
return pt
return [ast.If(ast.UnaryOp(ast.Not(), pt),
[self.__exit_scope()],
[])] |
def visit_Call(self, node: parsing.Call) -> ast.expr:
"""Generates python code calling the function.
fn(*args)
"""
return ast.Call(
ast.Attribute(
ast.Name('self', ast.Load),
node.callObject.__name__,
ast.Load()),
[ast.Str(param) for param in node.params],
[],
None,
None) |
def visit_CallTrue(self, node: parsing.CallTrue) -> ast.expr:
"""Generates python code calling the function and returning True.
lambda: fn(*args) or True
"""
return ast.Lambda(
ast.arguments([], None, None, [], None, None, [], []),
ast.BoolOp(
ast.Or(),
[
self.visit_Call(node),
ast.Name('True', ast.Load())])) |
def visit_Hook(self, node: parsing.Hook) -> ast.expr:
"""Generates python code calling a hook.
self.evalHook('hookname', self.ruleNodes[-1])
"""
return ast.Call(
ast.Attribute(
ast.Name('self', ast.Load()), 'evalHook', ast.Load()),
[
ast.Str(node.name),
ast.Subscript(
ast.Attribute(
ast.Name('self', ast.Load()), 'ruleNodes', ast.Load()),
ast.Index(ast.UnaryOp(ast.USub(), ast.Num(1))),
ast.Load())],
[],
None,
None) |
def visit_Rule(self, node: parsing.Rule) -> ast.expr:
"""Generates python code calling a rule.
self.evalRule('rulename')
"""
return ast.Call(
ast.Attribute(ast.Name('self', ast.Load()),
'evalRule', ast.Load()),
[ast.Str(node.name)], [], None, None) |
def visit_Capture(self, node: parsing.Capture) -> [ast.stmt] or ast.expr:
"""Generates python code to capture text consumed by a clause.
#If all clauses can be inlined
self.beginTag('tagname') and clause and self.endTag('tagname')
if not self.beginTag('tagname'):
return False
<code for the clause>
if not self.endTag('tagname'):
return False
"""
begintag = ast.Attribute(
ast.Name('self', ast.Load()), 'beginTag', ast.Load())
endtag = ast.Attribute(
ast.Name('self', ast.Load()), 'endTag', ast.Load())
begin = ast.Call(begintag, [ast.Str(node.tagname)], [], None, None)
end = ast.Call(endtag, [ast.Str(node.tagname)], [], None, None)
result = [begin, self.visit(node.pt), end]
for clause in result:
if not isinstance(clause, ast.expr):
break
else:
return ast.BoolOp(ast.And(), result)
res = []
for stmt in map(self._clause, result):
res.extend(stmt)
return res |
def visit_Scope(self, node: parsing.Capture) -> [ast.stmt] or ast.expr:
"""Generates python code for a scope.
if not self.begin():
return False
res = self.pt()
if not self.end():
return False
return res
"""
return ast.Name('scope_not_implemented', ast.Load())
raise NotImplementedError() |
def visit_Alt(self, node: parsing.Alt) -> [ast.stmt]:
"""Generates python code for alternatives.
try:
try:
<code for clause> #raise AltFalse when alternative is False
raise AltTrue()
except AltFalse:
pass
return False
except AltTrue:
pass
"""
clauses = [self.visit(clause) for clause in node.ptlist]
for clause in clauses:
if not isinstance(clause, ast.expr):
break
else:
return ast.BoolOp(ast.Or(), clauses)
res = ast.Try([], [ast.ExceptHandler(
ast.Name('AltTrue', ast.Load()), None, [ast.Pass()])], [], [])
alt_true = [ast.Raise(ast.Call(
ast.Name('AltTrue', ast.Load()), [], [], None, None), None)]
alt_false = [ast.ExceptHandler(
ast.Name('AltFalse', ast.Load()), None, [ast.Pass()])]
self.in_try += 1
for clause in node.ptlist:
res.body.append(
ast.Try(self._clause(self.visit(clause)) + alt_true,
alt_false, [], []))
self.in_try -= 1
res.body.append(self.__exit_scope())
return [res] |
def visit_Seq(self, node: parsing.Seq) -> [ast.stmt] or ast.expr:
"""Generates python code for clauses.
#Continuous clauses which can can be inlined are combined with and
clause and clause
if not clause:
return False
if not clause:
return False
"""
exprs, stmts = [], []
for clause in node.ptlist:
clause_ast = self.visit(clause)
if isinstance(clause_ast, ast.expr):
exprs.append(clause_ast)
else:
if exprs:
stmts.extend(self.combine_exprs_for_clauses(exprs))
exprs = []
stmts.extend(self._clause(clause_ast))
if not stmts:
return ast.BoolOp(ast.And(), exprs)
if exprs:
stmts.extend(self.combine_exprs_for_clauses(exprs))
return stmts |
def visit_RepOptional(self, node: parsing.RepOptional) -> ([ast.stmt] or
ast.expr):
"""Generates python code for an optional clause.
<code for the clause>
"""
cl_ast = self.visit(node.pt)
if isinstance(cl_ast, ast.expr):
return ast.BoolOp(ast.Or(), [cl_ast, ast.Name('True', ast.Load())])
self.in_optional += 1
cl_ast = self.visit(node.pt)
self.in_optional -= 1
return cl_ast |
def visit_Rep0N(self, node: parsing.Rep0N) -> [ast.stmt]:
"""Generates python code for a clause repeated 0 or more times.
#If all clauses can be inlined
while clause:
pass
while True:
<code for the clause>
"""
cl_ast = self.visit(node.pt)
if isinstance(cl_ast, ast.expr):
return [ast.While(cl_ast, [ast.Pass()], [])]
self.in_loop += 1
clause = self._clause(self.visit(node.pt))
self.in_loop -= 1
return [ast.While(ast.Name('True', ast.Load()), clause, [])] |
def visit_Rep1N(self, node: parsing.Rep0N) -> [ast.stmt]:
"""Generates python code for a clause repeated 1 or more times.
<code for the clause>
while True:
<code for the clause>
"""
clause = self.visit(node.pt)
if isinstance(clause, ast.expr):
return (self._clause(clause) + self.visit_Rep0N(node))
self.in_loop += 1
clause = self._clause(self.visit(node.pt))
self.in_loop -= 1
return self._clause(self.visit(node.pt)) + [
ast.While(ast.Name('True', ast.Load()), clause, [])] |
def synthese(self, month=None):
"""
month format: YYYYMM
"""
if month is None and self.legislature == '2012-2017':
raise AssertionError('Global Synthesis on legislature does not work, see https://github.com/regardscitoyens/nosdeputes.fr/issues/69')
if month is None:
month = 'data'
url = '%s/synthese/%s/%s' % (self.base_url, month, self.format)
data = requests.get(url).json()
return [depute[self.ptype] for depute in data[self.ptype_plural]] |
def catend(dst: str, src: str, indent) -> str:
"""cat two strings but handle \n for tabulation"""
res = dst
txtsrc = src
if not isinstance(src, str):
txtsrc = str(src)
for c in list(txtsrc):
if len(res) > 0 and res[-1] == '\n':
res += (indentable.char_indent * indentable.num_indent) * \
(indent - 1) + c
else:
res += c
return res |
def list_set_indent(lst: list, indent: int=1):
"""recurs into list for indentation"""
for i in lst:
if isinstance(i, indentable):
i.set_indent(indent)
if isinstance(i, list):
list_set_indent(i, indent) |
def list_to_str(lst: list, content: str, indent: int=1):
"""recurs into list for string computing """
for i in lst:
if isinstance(i, indentable):
content = i.to_str(content, indent)
elif isinstance(i, list):
content = list_to_str(i, content, indent)
elif isinstance(i, str):
content = catend(content, i, indent)
return content |
def echo_nodes(self, *rest):
"""
Print nodes.
example::
R = [
In : node #echo("coucou", 12, node)
]
"""
txt = ""
for thing in rest:
if isinstance(thing, Node):
txt += self.value(thing)
else:
txt += str(thing)
print(txt)
return True |
def populate_from_sequence(seq: list, r: ref(Edge), sr: state.StateRegister):
""" function that connect each other one sequence of MatchExpr. """
base_state = r
# we need to detect the last state of the sequence
idxlast = len(seq) - 1
idx = 0
for m in seq:
# alternatives are represented by builtin list
if isinstance(m, list):
# so recursively connect all states of each alternative sequences.
for item in m:
populate_from_sequence(item, r, sr)
elif isinstance(m, MatchExpr):
# from the current state, have we a existing edge for this event?
eX = r().get_next_edge(m)
if eX is None:
sX = None
if idx != idxlast:
sX = state.State(sr)
sX.matchDefault(base_state().s)
else:
# last state of sequence return to the base
sX = base_state().s
eX = Edge(sX)
r().next_edge[id(sX)] = eX
m.attach(r().s, sX, sr)
r = ref(eX)
idx += 1 |
def populate_state_register(all_seq: [list], sr: state.StateRegister) -> Edge:
""" function that create a state for all instance
of MatchExpr in the given list and connect each others.
"""
# Basic State
s0 = state.State(sr)
# loop on himself
s0.matchDefault(s0)
# this is default
sr.set_default_state(s0)
# use Edge to store connection
e0 = Edge(s0)
for seq in all_seq:
r = ref(e0)
# merge all sequences into one tree automata
populate_from_sequence(seq, r, sr)
# return edge for debug purpose
return e0 |
def build_state_tree(self, tree: list, sr: state.StateRegister):
""" main function for creating a bottom-up tree automata
for a block of matching statements.
"""
all_seq = []
# for all statements populate a list
# from deeper to nearer of MatchExpr instances.
for stmt in self.stmts:
part_seq = list()
stmt.build_state_tree(part_seq)
all_seq.append(part_seq)
# Walk on all MatchExpr instance
# and create State instance into the StateRegister
self.root_edge = populate_state_register(all_seq, sr) |
def pred_eq(self, n, val):
"""
Test if a node set with setint or setstr equal a certain value
example::
R = [
__scope__:n
['a' #setint(n, 12) | 'b' #setint(n, 14)]
C
[#eq(n, 12) D]
]
"""
v1 = n.value
v2 = val
if hasattr(val, 'value'):
v2 = val.value
if isinstance(v1, int) and not isinstance(v2, int):
return v1 == int(v2)
return v1 == v2 |
def from_string(bnf: str, entry=None, *optional_inherit) -> Grammar:
"""
Create a Grammar from a string
"""
inherit = [Grammar] + list(optional_inherit)
scope = {'grammar': bnf, 'entry': entry}
return build_grammar(tuple(inherit), scope) |
def from_file(fn: str, entry=None, *optional_inherit) -> Grammar:
"""
Create a Grammar from a file
"""
import os.path
if os.path.exists(fn):
f = open(fn, 'r')
bnf = f.read()
f.close()
inherit = [Grammar] + list(optional_inherit)
scope = {'grammar': bnf, 'entry': entry, 'source': fn}
return build_grammar(tuple(inherit), scope)
raise Exception("File not Found!") |
def parse(self, source: str=None, entry: str=None) -> parsing.Node:
"""Parse source using the grammar"""
self.from_string = True
if source is not None:
self.parsed_stream(source)
if entry is None:
entry = self.entry
if entry is None:
raise ValueError("No entry rule name defined for {}".format(
self.__class__.__name__))
return self._do_parse(entry) |
def parse_file(self, filename: str, entry: str=None) -> parsing.Node:
"""Parse filename using the grammar"""
self.from_string = False
import os.path
with open(filename, 'r') as f:
self.parsed_stream(f.read(), os.path.abspath(filename))
if entry is None:
entry = self.entry
if entry is None:
raise ValueError("No entry rule name defined for {}".format(
self.__class__.__name__))
return self._do_parse(entry) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.