Search is not available for this dataset
text stringlengths 75 104k |
|---|
def get_defining_component(pe_pe):
'''
Get the BridgePoint component (C_C) that defines the packeable element
*pe_pe*.
'''
if pe_pe is None:
return None
if type(pe_pe).__name__ != 'PE_PE':
pe_pe = one(pe_pe).PE_PE[8001]()
ep_pkg = one(pe_pe).EP_PKG[8000]()
if ep_pkg:
return get_defining_component(ep_pkg)
return one(pe_pe).C_C[8003]() |
def get_attribute_type(o_attr):
'''
Get the base data type (S_DT) associated with a BridgePoint attribute.
'''
ref_o_attr = one(o_attr).O_RATTR[106].O_BATTR[113].O_ATTR[106]()
if ref_o_attr:
return get_attribute_type(ref_o_attr)
else:
return one(o_attr).S_DT[114]() |
def _get_data_type_name(s_dt):
'''
Convert a BridgePoint data type to a pyxtuml meta model type.
'''
s_cdt = one(s_dt).S_CDT[17]()
if s_cdt and s_cdt.Core_Typ in range(1, 6):
return s_dt.Name.upper()
if one(s_dt).S_EDT[17]():
return 'INTEGER'
s_dt = one(s_dt).S_UDT[17].S_DT[18]()
if s_dt:
return _get_data_type_name(s_dt) |
def _get_related_attributes(r_rgo, r_rto):
'''
The two lists of attributes which relates two classes in an association.
'''
l1 = list()
l2 = list()
ref_filter = lambda ref: ref.OIR_ID == r_rgo.OIR_ID
for o_ref in many(r_rto).O_RTIDA[110].O_REF[111](ref_filter):
o_attr = one(o_ref).O_RATTR[108].O_ATTR[106]()
l1.append(o_attr.Name)
o_attr = one(o_ref).O_RTIDA[111].O_OIDA[110].O_ATTR[105]()
l2.append(o_attr.Name)
return l1, l2 |
def mk_enum(s_edt):
'''
Create a named tuple from a BridgePoint enumeration.
'''
s_dt = one(s_edt).S_DT[17]()
enums = list()
kwlist =['False', 'None', 'True'] + keyword.kwlist
for enum in many(s_edt).S_ENUM[27]():
if enum.Name in kwlist:
enums.append(enum.Name + '_')
else:
enums.append(enum.Name)
Enum = collections.namedtuple(s_dt.Name, enums)
return Enum(*range(len(enums))) |
def mk_bridge(metamodel, s_brg):
'''
Create a python function from a BridgePoint bridge.
'''
action = s_brg.Action_Semantics_internal
label = s_brg.Name
return lambda **kwargs: interpret.run_function(metamodel, label,
action, kwargs) |
def mk_external_entity(metamodel, s_ee):
'''
Create a python object from a BridgePoint external entity with bridges
realized as python member functions.
'''
bridges = many(s_ee).S_BRG[19]()
names = [brg.Name for brg in bridges]
EE = collections.namedtuple(s_ee.Key_Lett, names)
funcs = list()
for s_brg in many(s_ee).S_BRG[19]():
fn = mk_bridge(metamodel, s_brg)
funcs.append(fn)
return EE(*funcs) |
def mk_function(metamodel, s_sync):
'''
Create a python function from a BridgePoint function.
'''
action = s_sync.Action_Semantics_internal
label = s_sync.Name
return lambda **kwargs: interpret.run_function(metamodel, label,
action, kwargs) |
def mk_constant(cnst_syc):
'''
Create a python value from a BridgePoint constant.
'''
s_dt = one(cnst_syc).S_DT[1500]()
cnst_lsc = one(cnst_syc).CNST_LFSC[1502].CNST_LSC[1503]()
if s_dt.Name == 'boolean':
return cnst_lsc.Value.lower() == 'true'
if s_dt.Name == 'integer':
return int(cnst_lsc.Value)
if s_dt.Name == 'real':
return float(cnst_lsc.Value)
if s_dt.Name == 'string':
return str(cnst_lsc.Value) |
def mk_operation(metaclass, o_tfr):
'''
Create a python function that interprets that action of a BridgePoint class
operation.
'''
o_obj = one(o_tfr).O_OBJ[115]()
action = o_tfr.Action_Semantics_internal
label = '%s::%s' % (o_obj.Name, o_tfr.Name)
run = interpret.run_operation
if o_tfr.Instance_Based:
return lambda self, **kwargs: run(metaclass, label, action, kwargs, self)
else:
fn = lambda cls, **kwargs: run(metaclass, label, action, kwargs, None)
return classmethod(fn) |
def mk_derived_attribute(metaclass, o_dbattr):
'''
Create a python property that interprets that action of a BridgePoint derived
attribute.
'''
o_attr = one(o_dbattr).O_BATTR[107].O_ATTR[106]()
o_obj = one(o_attr).O_OBJ[102]()
action = o_dbattr.Action_Semantics_internal
label = '%s::%s' % (o_obj.Name, o_attr.Name)
fget = functools.partial(interpret.run_derived_attribute, metaclass,
label, action, o_attr.Name)
return property(fget) |
def mk_class(m, o_obj, derived_attributes=False):
'''
Create a pyxtuml class from a BridgePoint class.
'''
first_filter = lambda selected: not one(selected).O_ATTR[103, 'succeeds']()
o_attr = one(o_obj).O_ATTR[102](first_filter)
attributes = list()
while o_attr:
s_dt = get_attribute_type(o_attr)
ty = _get_data_type_name(s_dt)
if not derived_attributes and one(o_attr).O_BATTR[106].O_DBATTR[107]():
pass
# logger.warning('Omitting derived attribute %s.%s ' %
# (o_obj.Key_Lett, o_attr.Name))
elif not ty:
logger.warning('Omitting unsupported attribute %s.%s ' %
(o_obj.Key_Lett, o_attr.Name))
else:
attributes.append((o_attr.Name, ty))
o_attr = one(o_attr).O_ATTR[103, 'precedes']()
metaclass = m.define_class(o_obj.Key_Lett, list(attributes), o_obj.Descrip)
for o_id in many(o_obj).O_ID[104]():
o_oida = many(o_id).O_OIDA[105]()
o_attrs = many(o_oida).O_ATTR[105]()
if not derived_attributes and one(o_attrs).O_BATTR[106].O_DBATTR[107]():
logger.warning('Omitting unique identifier %s.I%d' %
(o_obj.Key_Lett, o_id.Oid_ID + 1))
continue
names = [o_attr.Name for o_attr in o_attrs]
m.define_unique_identifier(o_obj.Key_Lett, o_id.Oid_ID + 1, *names)
for o_tfr in many(o_obj).O_TFR[115]():
fn = mk_operation(metaclass, o_tfr)
setattr(metaclass.clazz, o_tfr.Name, fn)
for o_dbattr in many(o_obj).O_ATTR[102].O_BATTR[106].O_DBATTR[107]():
o_attr = one(o_dbattr).O_BATTR[107].O_ATTR[106]()
fn = mk_derived_attribute(metaclass, o_dbattr)
setattr(metaclass.clazz, o_attr.Name, fn)
return metaclass |
def mk_simple_association(m, r_simp):
'''
Create a pyxtuml association from a simple association in BridgePoint.
'''
r_rel = one(r_simp).R_REL[206]()
r_form = one(r_simp).R_FORM[208]()
r_part = one(r_simp).R_PART[207]()
r_rgo = one(r_form).R_RGO[205]()
r_rto = one(r_part).R_RTO[204]()
if not r_form:
logger.info('unformalized association R%s' % (r_rel.Numb))
r_form = one(r_simp).R_PART[207](lambda sel: sel != r_part)
r_rgo = one(r_form).R_RTO[204]()
source_o_obj = one(r_rgo).R_OIR[203].O_OBJ[201]()
target_o_obj = one(r_rto).R_OIR[203].O_OBJ[201]()
source_ids, target_ids = _get_related_attributes(r_rgo, r_rto)
if source_o_obj.Obj_ID != target_o_obj.Obj_ID:
source_phrase = target_phrase = ''
else:
source_phrase = r_part.Txt_Phrs
target_phrase = r_form.Txt_Phrs
m.define_association(rel_id=r_rel.Numb,
source_kind=source_o_obj.Key_Lett,
target_kind=target_o_obj.Key_Lett,
source_keys=source_ids,
target_keys=target_ids,
source_conditional=r_form.Cond,
target_conditional=r_part.Cond,
source_phrase=source_phrase,
target_phrase=target_phrase,
source_many=r_form.Mult,
target_many=r_part.Mult) |
def mk_linked_association(m, r_assoc):
'''
Create pyxtuml associations from a linked association in BridgePoint.
'''
r_rel = one(r_assoc).R_REL[206]()
r_rgo = one(r_assoc).R_ASSR[211].R_RGO[205]()
source_o_obj = one(r_rgo).R_OIR[203].O_OBJ[201]()
def _mk_assoc(side1, side2):
r_rto = one(side1).R_RTO[204]()
target_o_obj = one(r_rto).R_OIR[203].O_OBJ[201]()
source_ids, target_ids = _get_related_attributes(r_rgo, r_rto)
if side1.Obj_ID != side2.Obj_ID:
source_phrase = target_phrase = ''
else:
source_phrase = side1.Txt_Phrs
target_phrase = side2.Txt_Phrs
m.define_association(rel_id=r_rel.Numb,
source_kind=source_o_obj.Key_Lett,
target_kind=target_o_obj.Key_Lett,
source_keys=source_ids,
target_keys=target_ids,
source_conditional=side2.Cond,
target_conditional=False,
source_phrase=source_phrase,
target_phrase=target_phrase,
source_many=side2.Mult,
target_many=False)
r_aone = one(r_assoc).R_AONE[209]()
r_aoth = one(r_assoc).R_AOTH[210]()
_mk_assoc(r_aone, r_aoth)
_mk_assoc(r_aoth, r_aone) |
def mk_subsuper_association(m, r_subsup):
'''
Create pyxtuml associations from a sub/super association in BridgePoint.
'''
r_rel = one(r_subsup).R_REL[206]()
r_rto = one(r_subsup).R_SUPER[212].R_RTO[204]()
target_o_obj = one(r_rto).R_OIR[203].O_OBJ[201]()
for r_sub in many(r_subsup).R_SUB[213]():
r_rgo = one(r_sub).R_RGO[205]()
source_o_obj = one(r_rgo).R_OIR[203].O_OBJ[201]()
source_ids, target_ids = _get_related_attributes(r_rgo, r_rto)
m.define_association(rel_id=r_rel.Numb,
source_kind=source_o_obj.Key_Lett,
target_kind=target_o_obj.Key_Lett,
source_keys=source_ids,
target_keys=target_ids,
source_conditional=True,
target_conditional=False,
source_phrase='',
target_phrase='',
source_many=False,
target_many=False) |
def mk_association(m, r_rel):
'''
Create a pyxtuml association from a R_REL in ooaofooa.
'''
handler = {
'R_SIMP': mk_simple_association,
'R_ASSOC': mk_linked_association,
'R_SUBSUP': mk_subsuper_association,
'R_COMP': mk_derived_association,
}
inst = subtype(r_rel, 206)
fn = handler.get(type(inst).__name__)
return fn(m, inst) |
def mk_component(bp_model, c_c=None, derived_attributes=False):
'''
Create a pyxtuml meta model from a BridgePoint model.
Optionally, restrict to classes and associations contained in the
component c_c.
'''
target = Domain()
c_c_filt = lambda sel: c_c is None or is_contained_in(sel, c_c)
for o_obj in bp_model.select_many('O_OBJ', c_c_filt):
mk_class(target, o_obj, derived_attributes)
for r_rel in bp_model.select_many('R_REL', c_c_filt):
mk_association(target, r_rel)
for s_sync in bp_model.select_many('S_SYNC', c_c_filt):
fn = mk_function(target, s_sync)
target.add_symbol(s_sync.Name, fn)
for s_dt in bp_model.select_many('S_DT', c_c_filt):
s_edt = one(s_dt).S_EDT[17]()
if s_edt:
enum = mk_enum(s_edt)
target.add_symbol(s_dt.Name, enum)
for cnst_csp in bp_model.select_many('CNST_CSP', c_c_filt):
for cnst_syc in many(cnst_csp).CNST_SYC[1504]():
value = mk_constant(cnst_syc)
target.add_symbol(cnst_syc.Name, value)
for ass in target.associations:
ass.formalize()
for s_ee in bp_model.select_many('S_EE', c_c_filt):
if s_ee.Key_Lett in ['LOG', 'ARCH', 'TIM', 'NVS', 'PERSIST']:
target.add_symbol(s_ee.Key_Lett, getattr(builtin_ee, s_ee.Key_Lett))
else:
ee = mk_external_entity(target, s_ee)
target.add_symbol(s_ee.Key_Lett, ee)
return target |
def load_metamodel(resource=None, load_globals=True):
'''
Load and return a metamodel expressed in ooaofooa from a *resource*.
The resource may be either a filename, a path, or a list of filenames
and/or paths.
'''
loader = _mk_loader(resource, load_globals)
return loader.build_metamodel() |
def load_component(resource, name=None, load_globals=True):
'''
Load and return a model from a *resource*. The resource may be either a
filename, a path, or a list of filenames and/or paths.
'''
loader = _mk_loader(resource, load_globals)
return loader.build_component() |
def delete_globals(m, disconnect=False):
'''
Remove global instances, e.g. the core data type integer.
'''
filt = lambda sel: (247728914420827907967735776184937480192 <=
sel.DT_ID <=
247728914420827907967735776184937480208)
for s_dt in m.select_many('S_DT', filt):
xtuml.delete(one(s_dt).PE_PE[8001](), disconnect)
xtuml.delete(subtype(s_dt, 17), disconnect)
xtuml.delete(s_dt, disconnect) |
def filename_input(self, path_or_filename):
'''
Open and read input from a *path or filename*, and parse its content.
If the filename is a directory, files that ends with .xtuml located
somewhere in the directory or sub directories will be loaded as well.
'''
if os.path.isdir(path_or_filename):
for path, _, files in os.walk(path_or_filename):
for name in files:
if name.endswith('.xtuml'):
xtuml.ModelLoader.filename_input(self, os.path.join(path, name))
else:
xtuml.ModelLoader.filename_input(self, path_or_filename) |
def build_component(self, name=None, derived_attributes=False):
'''
Instantiate and build a component from ooaofooa named *name* as a
pyxtuml model. Classes, associations, attributes and unique identifers,
i.e. O_OBJ, R_REL, O_ATTR in ooaofooa, are defined in the resulting
pyxtuml model.
Optionally, control whether *derived attributes* shall be mapped into
the resulting pyxtuml model as attributes or not.
Futhermore, if no *name* is provided, the entire content of the ooaofooa
model is instantiated into the pyxtuml model.
'''
mm = self.build_metamodel()
c_c = mm.select_any('C_C', where(Name=name))
if c_c:
return mk_component(mm, c_c, derived_attributes)
elif name:
raise OoaOfOoaException('Unable to find the component %s' % name)
else:
return mk_component(mm, c_c, derived_attributes) |
def default_malformed_message_handler(worker, exc_info, message_parts):
"""The default malformed message handler for :class:`Worker`. It warns
as a :exc:`MalformedMessage`.
"""
exc_type, exc, tb = exc_info
exc_strs = traceback.format_exception_only(exc_type, exc)
exc_str = exc_strs[0].strip()
if len(exc_strs) > 1:
exc_str += '...'
warn('<%s> occurred by %r' % (exc_str, message_parts), MalformedMessage) |
def work(self, socket, call, args, kwargs, topics=()):
"""Calls a function and send results to the collector. It supports
all of function actions. A function could return, yield, raise any
packable objects.
"""
task_id = uuid4_bytes()
reply_socket, topics = self.replier(socket, topics, call.reply_to)
if reply_socket:
channel = (call.call_id, task_id, topics)
else:
channel = (None, None, None)
f, rpc_spec = self.find_call_target(call)
if rpc_spec.reject_if.__get__(self.app)(call, topics):
reply_socket and self.reject(reply_socket, call.call_id, topics)
return
reply_socket and self.accept(reply_socket, channel)
success = False
with self.catch_exceptions():
try:
val = self.call(call, args, kwargs, f, rpc_spec)
except:
exc_info = sys.exc_info()
self.raise_(reply_socket, channel, exc_info)
reraise(*exc_info)
success = True
if not success:
# catch_exceptions() hides exceptions.
return
if isinstance(val, Iterator):
vals = val
with self.catch_exceptions():
try:
try:
val = next(vals)
except StopIteration:
pass
else:
self.send_reply(reply_socket, YIELD, val, *channel)
for val in vals:
self.send_reply(reply_socket, YIELD, val, *channel)
self.send_reply(reply_socket, BREAK, None, *channel)
except:
exc_info = sys.exc_info()
self.raise_(reply_socket, channel, exc_info)
reraise(*exc_info)
else:
self.send_reply(reply_socket, RETURN, val, *channel) |
def accept(self, reply_socket, channel):
"""Sends ACCEPT reply."""
info = self.info or b''
self.send_raw(reply_socket, ACCEPT, info, *channel) |
def reject(self, reply_socket, call_id, topics=()):
"""Sends REJECT reply."""
info = self.info or b''
self.send_raw(reply_socket, REJECT, info, call_id, b'', topics) |
def raise_(self, reply_socket, channel, exc_info=None):
"""Sends RAISE reply."""
if not reply_socket:
return
if exc_info is None:
exc_info = sys.exc_info()
exc_type, exc, tb = exc_info
while tb.tb_next is not None:
tb = tb.tb_next
if issubclass(exc_type, RemoteException):
exc_type = exc_type.exc_type
filename, lineno = tb.tb_frame.f_code.co_filename, tb.tb_lineno
val = (exc_type, str(exc), filename, lineno)
try:
state = exc.__getstate__()
except AttributeError:
pass
else:
val += (state,)
self.send_reply(reply_socket, RAISE, val, *channel) |
def _call_wait(self, hints, name, args, kwargs, topics=(), raw=False,
limit=None, retry=False, max_retries=None):
"""Allocates a call id and emit."""
col = self.collector
if not col.is_running():
col.start()
call_id = uuid4_bytes()
reply_to = (DUPLEX if self.socket is col.socket else col.topic)
# Normal tuple is faster than namedtuple.
header = self._make_header(name, call_id, reply_to, hints)
payload = self._pack(args, kwargs, raw)
# Use short names.
def send_call():
try:
safe(send, self.socket, header, payload, topics, zmq.NOBLOCK)
except zmq.Again:
raise Undelivered('emission was not delivered')
col.prepare(call_id, self, name, args, kwargs)
send_call()
return col.establish(call_id, self.timeout, limit,
send_call if retry else None,
max_retries=max_retries) |
def establish(self, call_id, timeout, limit=None,
retry=None, max_retries=None):
"""Waits for the call is accepted by workers and starts to collect the
results.
"""
rejected = 0
retried = 0
results = []
result_queue = self.result_queues[call_id]
try:
with Timeout(timeout, False):
while True:
result = result_queue.get()
if result is None:
rejected += 1
if retry is not None:
if retried == max_retries:
break
retry()
retried += 1
continue
results.append(result)
if len(results) == limit:
break
finally:
del result_queue
self.remove_result_queue(call_id)
if not results:
if rejected:
raise Rejected('%d workers rejected' % rejected
if rejected != 1 else
'A worker rejected')
else:
raise WorkerNotFound('failed to find worker')
return results |
def dispatch_reply(self, reply, value):
"""Dispatches the reply to the proper queue."""
method = reply.method
call_id = reply.call_id
task_id = reply.task_id
if method & ACK:
try:
result_queue = self.result_queues[call_id]
except KeyError:
raise KeyError('already established or unprepared call')
if method == ACCEPT:
worker_info = value
result = RemoteResult(self, call_id, task_id, worker_info)
self.results[call_id][task_id] = result
result_queue.put_nowait(result)
elif method == REJECT:
result_queue.put_nowait(None)
else:
result = self.results[call_id][task_id]
result.set_reply(reply.method, value) |
def guess_type_name(value):
'''
Guess the type name of a serialized value.
'''
value = str(value)
if value.upper() in ['TRUE', 'FALSE']:
return 'BOOLEAN'
elif re.match(r'(-)?(\d+)(\.\d+)', value):
return 'REAL'
elif re.match(r'(-)?(\d+)', value):
return 'INTEGER'
elif re.match(r'\'((\'\')|[^\'])*\'', value):
return 'STRING'
elif re.match(r'\"([^\\\n]|(\\.))*?\"', value):
return 'UNIQUE_ID' |
def deserialize_value(ty, value):
'''
Deserialize a value of some type
'''
uty = ty.upper()
if uty == 'BOOLEAN':
if value.isdigit():
return bool(int(value))
elif value.upper() == 'FALSE':
return False
elif value.upper() == 'TRUE':
return True
else:
return None
elif uty == 'INTEGER':
if '"' in value:
return uuid.UUID(value[1:-1]).int
else:
return int(value)
elif uty == 'REAL':
return float(value)
elif uty == 'STRING':
return value[1:-1].replace("''", "'")
elif uty == 'UNIQUE_ID':
if '"' in value:
return uuid.UUID(value[1:-1]).int
else:
return int(value) |
def load_metamodel(resource):
'''
Load and return a metamodel from a *resource*. The *resource* may be either
a filename, or a list of filenames.
Usage example:
>>> metamodel = xtuml.load_metamodel(['schema.sql', 'data.sql'])
'''
if isinstance(resource, str):
resource = [resource]
loader = ModelLoader()
for filename in resource:
loader.filename_input(filename)
return loader.build_metamodel() |
def input(self, data, name='<string>'):
'''
Parse *data* directly from a string. The *name* is used when reporting
positional information if the parser encounter syntax errors.
'''
lexer = lex.lex(debuglog=logger,
errorlog=logger,
optimize=1,
module=self,
outputdir=os.path.dirname(__file__),
lextab="xtuml.__xtuml_lextab")
lexer.filename = name
logger.debug('parsing %s' % name)
s = self.parser.parse(lexer=lexer, input=data, tracking=1)
self.statements.extend(s) |
def file_input(self, file_object):
'''
Read and parse data from a *file object*, i.e. the type of object
returned by the builtin python function *open()*.
'''
return self.input(file_object.read(), name=file_object.name) |
def populate_classes(self, metamodel):
'''
Populate a *metamodel* with classes previously encountered from input.
'''
for stmt in self.statements:
if isinstance(stmt, CreateClassStmt):
metamodel.define_class(stmt.kind, stmt.attributes) |
def populate_associations(self, metamodel):
'''
Populate a *metamodel* with associations previously encountered from
input.
'''
for stmt in self.statements:
if not isinstance(stmt, CreateAssociationStmt):
continue
ass = metamodel.define_association(stmt.rel_id,
stmt.source_kind,
stmt.source_keys,
'M' in stmt.source_cardinality,
'C' in stmt.source_cardinality,
stmt.source_phrase,
stmt.target_kind,
stmt.target_keys,
'M' in stmt.target_cardinality,
'C' in stmt.target_cardinality,
stmt.target_phrase)
ass.formalize() |
def populate_unique_identifiers(self, metamodel):
'''
Populate a *metamodel* with class unique identifiers previously
encountered from input.
'''
for stmt in self.statements:
if isinstance(stmt, CreateUniqueStmt):
metamodel.define_unique_identifier(stmt.kind, stmt.name,
*stmt.attributes) |
def _populate_matching_class(metamodel, kind, names, values):
'''
Populate a *metamodel* with a class that matches the given *insert
statement*.
'''
attributes = list()
for name, value in zip(names, values):
ty = guess_type_name(value)
attr = (name, ty)
attributes.append(attr)
return metamodel.define_class(kind, attributes) |
def _populate_instance_with_positional_arguments(metamodel, stmt):
'''
Populate a *metamodel* with an instance previously encountered from
input that was defined using positional arguments.
'''
if stmt.kind.upper() not in metamodel.metaclasses:
names = ['_%s' % idx for idx in range(len(stmt.values))]
ModelLoader._populate_matching_class(metamodel, stmt.kind,
names, stmt.values)
metaclass = metamodel.find_metaclass(stmt.kind)
if len(metaclass.attributes) != len(stmt.values):
logger.warn('%s:%d:schema mismatch' % (stmt.filename, stmt.lineno))
inst = metamodel.new(stmt.kind)
for attr, value in zip(metaclass.attributes, stmt.values):
name, ty = attr
py_value = deserialize_value(ty, value)
if py_value is None:
raise ParsingException("%s:%d:unable to deserialize "\
"%s to a %s" % (stmt.filename,
stmt.lineno,
value,
ty))
inst.__dict__[name] = py_value
return inst |
def _populate_instance_with_named_arguments(metamodel, stmt):
'''
Populate a *metamodel* with an instance previously encountered from
input that was defined using named arguments.
'''
if stmt.kind.upper() not in metamodel.metaclasses:
ModelLoader._populate_matching_class(metamodel, stmt.kind,
stmt.names, stmt.values)
metaclass = metamodel.find_metaclass(stmt.kind)
schema_unames = [name.upper() for name in metaclass.attribute_names]
inst_unames = [name.upper() for name in stmt.names]
if set(inst_unames) - set(schema_unames):
logger.warn('%s:%d:schema mismatch' % (stmt.filename, stmt.lineno))
inst = metamodel.new(stmt.kind)
for name, ty in metaclass.attributes:
uname = name.upper()
if uname in inst_unames:
idx = inst_unames.index(uname)
value = deserialize_value(ty, stmt.values[idx])
if value is None:
raise ParsingException("%s:%d:unable to deserialize "\
"%s to a %s" % (stmt.filename,
stmt.lineno,
value,
ty))
else:
value = None
inst.__dict__[name] = value
return inst |
def populate_instances(self, metamodel):
'''
Populate a *metamodel* with instances previously encountered from
input.
'''
for stmt in self.statements:
if not isinstance(stmt, CreateInstanceStmt):
continue
if stmt.names:
fn = self._populate_instance_with_named_arguments
else:
fn = self._populate_instance_with_positional_arguments
fn(metamodel, stmt) |
def populate_connections(self, metamodel):
'''
Populate links in a *metamodel* with connections between them.
'''
storage = dict()
for ass in metamodel.associations:
source_class = ass.source_link.to_metaclass
target_class = ass.target_link.to_metaclass
if target_class not in storage:
storage[target_class] = dict()
link_key = frozenset(ass.source_link.key_map.values())
if link_key not in storage[target_class]:
storage[target_class][link_key] = dict()
for other_inst in target_class.storage:
inst_key = ass.source_link.compute_index_key(other_inst)
if inst_key is None:
continue
if inst_key not in storage[target_class][link_key]:
storage[target_class][link_key][inst_key] = xtuml.OrderedSet()
storage[target_class][link_key][inst_key].add(other_inst)
for inst in source_class.storage:
inst_key = ass.source_link.compute_lookup_key(inst)
if inst_key is None:
continue
if inst_key not in storage[target_class][link_key]:
continue
for other_inst in storage[target_class][link_key][inst_key]:
ass.source_link.connect(other_inst, inst, check=False)
ass.target_link.connect(inst, other_inst, check=False)
for inst in metamodel.instances:
metaclass = xtuml.get_metaclass(inst)
for attr in metaclass.referential_attributes:
if attr in inst.__dict__:
delattr(inst, attr) |
def populate(self, metamodel):
'''
Populate a *metamodel* with entities previously encountered from input.
'''
self.populate_classes(metamodel)
self.populate_unique_identifiers(metamodel)
self.populate_associations(metamodel)
self.populate_instances(metamodel)
self.populate_connections(metamodel) |
def build_metamodel(self, id_generator=None):
'''
Build and return a *xtuml.MetaModel* containing previously loaded input.
'''
m = xtuml.MetaModel(id_generator)
self.populate(m)
return m |
def t_COMMA(self, t):
r','
t.endlexpos = t.lexpos + len(t.value)
return t |
def t_FRACTION(self, t):
r'(\d+)(\.\d+)'
t.endlexpos = t.lexpos + len(t.value)
return t |
def t_RELID(self, t):
r'R[0-9]+'
t.endlexpos = t.lexpos + len(t.value)
return t |
def t_CARDINALITY(self, t):
r'(1C)'
t.endlexpos = t.lexpos + len(t.value)
return t |
def t_ID(self, t):
r'[A-Za-z_][\w_]*'
vup = t.value.upper()
if vup in self.reserved:
t.type = vup
t.endlexpos = t.lexpos + len(t.value)
return t |
def t_LPAREN(self, t):
r'\('
t.endlexpos = t.lexpos + len(t.value)
return t |
def t_MINUS(self, t):
r'-'
t.endlexpos = t.lexpos + len(t.value)
return t |
def t_NUMBER(self, t):
r'[0-9]+'
t.endlexpos = t.lexpos + len(t.value)
return t |
def t_RPAREN(self, t):
r'\)'
t.endlexpos = t.lexpos + len(t.value)
return t |
def t_SEMICOLON(self, t):
r';'
t.endlexpos = t.lexpos + len(t.value)
return t |
def t_GUID(self, t):
r'\"([^\\\n]|(\\.))*?\"'
t.endlexpos = t.lexpos + len(t.value)
return t |
def t_newline(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
t.endlexpos = t.lexpos + len(t.value) |
def p_statement(self, p):
'''
statement : create_table_statement SEMICOLON
| insert_into_statement SEMICOLON
| create_rop_statement SEMICOLON
| create_index_statement SEMICOLON
'''
p[0] = p[1]
p[0].offset = p.lexpos(1)
p[0].lineno = p.lineno(1)
p[0].filename = p.lexer.filename |
def p_create_rop_statement(self, p):
'''create_rop_statement : CREATE ROP REF_ID RELID FROM association_end TO association_end'''
args = [p[4]]
args.extend(p[6])
args.extend(p[8])
p[0] = CreateAssociationStmt(*args) |
def p_cardinality_1(self, p):
'''cardinality : NUMBER'''
if p[1] != '1':
raise ParsingException("illegal cardinality (%s) at %s:%d" % (p[1],
p.lexer.filename, p.lineno(1)))
p[0] = p[1] |
def p_cardinality_many(self, p):
'''cardinality : ID'''
if p[1] not in ['M', 'MC']:
raise ParsingException("illegal cardinality (%s) at %s:%d" % (p[1],
p.lexer.filename, p.lineno(1)))
p[0] = p[1] |
def append_known_secrets(self): # type: () -> None
"""
Read key-value pair files with secrets. For example, .conf and .ini files.
:return:
"""
for file_name in self.files:
if "~" in file_name:
file_name = os.path.expanduser(file_name)
if not os.path.isfile(file_name):
print(
"Don't have "
+ Back.BLACK
+ Fore.YELLOW
+ file_name
+ ", won't use."
)
continue
with open(os.path.expanduser(file_name), "r") as file:
for line in file:
if line and "=" in line:
possible = line.split("=")[1].strip(" \"'\n")
if len(possible) > 4 and possible not in self.false_positives:
self.secrets.append(possible) |
def search_known_secrets(self): # type: () -> None
"""
Search a path for known secrets, outputing text and file when found
:return:
"""
count = 0
here = os.path.abspath(self.source)
# python 3 only!
# for file in glob.glob(here + "/" + "**/*.*", recursive=True):
# py 2
matches = []
for root, dirnames, filenames in os.walk(here + "/"):
for filename in filenames:
matches.append(os.path.join(root, filename))
for file in matches:
if os.path.isdir(file):
continue
with open(file) as f:
try:
contents = f.read()
except UnicodeDecodeError:
continue
except Exception as e:
print(e)
print(file)
raise
for secret in self.secrets:
if secret in contents:
for line in contents.split("\n"):
if secret in line:
self.found.setdefault(file, []).append(
(
secret,
line.replace(
secret,
Fore.RED
+ Back.YELLOW
+ secret
+ Style.RESET_ALL,
),
)
)
count += 1 |
def eid(s):
'''Encode id (bytes) as a Unicode string.
The encoding is done such that lexicographic order is
preserved. No concern is given to wasting space.
The inverse of ``eid`` is ``did``.
'''
if isinstance(s, unicode):
s = s.encode('utf-8')
return u''.join('{:02x}'.format(ord(b)) for b in s) |
def did(s):
'''Decode id (Unicode string) as a bytes.
The inverse of ``did`` is ``eid``.
'''
return ''.join(chr(int(s[i:i+2], base=16)) for i in xrange(0, len(s), 2)) |
def get(self, content_id, feature_names=None):
'''Retrieve a feature collection.
If a feature collection with the given id does not
exist, then ``None`` is returned.
:param str content_id: Content identifier.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: :class:`dossier.fc.FeatureCollection` or ``None``
'''
try:
resp = self.conn.get(index=self.index, doc_type=self.type,
id=eid(content_id),
_source=self._source(feature_names))
return self.fc_from_dict(resp['_source']['fc'])
except NotFoundError:
return None
except:
raise |
def get_many(self, content_ids, feature_names=None):
'''Returns an iterable of feature collections.
This efficiently retrieves multiple FCs corresponding to the
list of ids given. Tuples of identifier and feature collection
are yielded. If the feature collection for a given id does not
exist, then ``None`` is returned as the second element of the
tuple.
:param [str] content_ids: List of content ids.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: Iterable of ``(content_id, FC)``
'''
try:
resp = self.conn.mget(index=self.index, doc_type=self.type,
_source=self._source(feature_names),
body={'ids': map(eid, content_ids)})
except TransportError:
return
for doc in resp['docs']:
fc = None
if doc['found']:
fc = self.fc_from_dict(doc['_source']['fc'])
yield did(doc['_id']), fc |
def put(self, items, indexes=True):
'''Adds feature collections to the store.
This efficiently adds multiple FCs to the store. The iterable
of ``items`` given should yield tuples of ``(content_id, FC)``.
:param items: Iterable of ``(content_id, FC)``.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
'''
actions = []
for cid, fc in items:
# TODO: If we store features in a columnar order, then we
# could tell ES to index the feature values directly. ---AG
# (But is problematic because we want to preserve the ability
# to selectively index FCs. So we'd probably need two distinct
# doc types.)
idxs = defaultdict(list)
if indexes:
for fname in self.indexed_features:
if fname in fc:
idxs[fname_to_idx_name(fname)].extend(fc[fname])
for fname in self.fulltext_indexed_features:
if fname not in fc:
continue
if isinstance(fc[fname], basestring):
idxs[fname_to_full_idx_name(fname)] = fc[fname]
else:
idxs[fname_to_full_idx_name(fname)].extend(fc[fname])
actions.append({
'_index': self.index,
'_type': self.type,
'_id': eid(cid),
'_op_type': 'index',
'_source': dict(idxs, **{
'fc': self.fc_to_dict(fc),
}),
})
bulk(self.conn, actions, timeout=60, request_timeout=60) |
def delete(self, content_id):
'''Deletes the corresponding feature collection.
If the FC does not exist, then this is a no-op.
'''
try:
self.conn.delete(index=self.index, doc_type=self.type,
id=eid(content_id))
except NotFoundError:
pass |
def delete_all(self):
'''Deletes all feature collections.
This does not destroy the ES index, but instead only
deletes all FCs with the configured document type
(defaults to ``fc``).
'''
try:
self.conn.indices.delete_mapping(
index=self.index, doc_type=self.type)
except TransportError:
logger.warn('type %r in index %r already deleted',
self.index, self.type, exc_info=True) |
def delete_index(self):
'''Deletes the underlying ES index.
Only use this if you know what you're doing. This destroys
the entire underlying ES index, which could be shared by
multiple distinct ElasticStore instances.
'''
if self.conn.indices.exists(index=self.index):
self.conn.indices.delete(index=self.index) |
def scan(self, *key_ranges, **kwargs):
'''Scan for FCs in the given id ranges.
:param key_ranges:
``key_ranges`` should be a list of pairs of ranges. The first
value is the lower bound id and the second value is the
upper bound id. Use ``()`` in either position to leave it
unbounded. If no ``key_ranges`` are given, then all FCs in
the store are returned.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: Iterable of ``(content_id, FC)``
'''
for hit in self._scan(*key_ranges, **kwargs):
yield did(hit['_id']), self.fc_from_dict(hit['_source']['fc']) |
def scan_ids(self, *key_ranges, **kwargs):
'''Scan for ids only in the given id ranges.
:param key_ranges:
``key_ranges`` should be a list of pairs of ranges. The first
value is the lower bound id and the second value is the
upper bound id. Use ``()`` in either position to leave it
unbounded. If no ``key_ranges`` are given, then all FCs in
the store are returned.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: Iterable of ``content_id``
'''
kwargs['feature_names'] = False
for hit in self._scan(*key_ranges, **kwargs):
yield did(hit['_id']) |
def scan_prefix(self, prefix, feature_names=None):
'''Scan for FCs with a given prefix.
:param str prefix: Identifier prefix.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: Iterable of ``(content_id, FC)``
'''
resp = self._scan_prefix(prefix, feature_names=feature_names)
for hit in resp:
yield did(hit['_id']), self.fc_from_dict(hit['_source']['fc']) |
def scan_prefix_ids(self, prefix):
'''Scan for ids with a given prefix.
:param str prefix: Identifier prefix.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: Iterable of ``content_id``
'''
resp = self._scan_prefix(prefix, feature_names=False)
for hit in resp:
yield did(hit['_id']) |
def fulltext_scan(self, query_id=None, query_fc=None, feature_names=None,
preserve_order=True, indexes=None):
'''Fulltext search.
Yields an iterable of triples (score, identifier, FC)
corresponding to the search results of the fulltext search
in ``query``. This will only search text indexed under the
given feature named ``fname``.
Note that, unless ``preserve_order`` is set to True, the
``score`` will always be 0.0, and the results will be
unordered. ``preserve_order`` set to True will cause the
results to be scored and be ordered by score, but you should
expect to see a decrease in performance.
:param str fname:
The feature to search.
:param unicode query:
The query.
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: Iterable of ``(score, content_id, FC)``
'''
it = self._fulltext_scan(query_id, query_fc,
feature_names=feature_names,
preserve_order=preserve_order,
indexes=indexes)
for hit in it:
fc = self.fc_from_dict(hit['_source']['fc'])
yield hit['_score'], did(hit['_id']), fc |
def fulltext_scan_ids(self, query_id=None, query_fc=None,
preserve_order=True, indexes=None):
'''Fulltext search for identifiers.
Yields an iterable of triples (score, identifier)
corresponding to the search results of the fulltext search
in ``query``. This will only search text indexed under the
given feature named ``fname``.
Note that, unless ``preserve_order`` is set to True, the
``score`` will always be 0.0, and the results will be
unordered. ``preserve_order`` set to True will cause the
results to be scored and be ordered by score, but you should
expect to see a decrease in performance.
:param str fname:
The feature to search.
:param unicode query:
The query.
:rtype: Iterable of ``(score, content_id)``
'''
it = self._fulltext_scan(query_id, query_fc, feature_names=False,
preserve_order=preserve_order,
indexes=indexes)
for hit in it:
yield hit['_score'], did(hit['_id']) |
def keyword_scan(self, query_id=None, query_fc=None, feature_names=None):
'''Keyword scan for feature collections.
This performs a keyword scan using the query given. A keyword
scan searches for FCs with terms in each of the query's indexed
fields.
At least one of ``query_id`` or ``query_fc`` must be provided.
If ``query_fc`` is ``None``, then the query is retrieved
automatically corresponding to ``query_id``.
:param str query_id: Optional query id.
:param query_fc: Optional query feature collection.
:type query_fc: :class:`dossier.fc.FeatureCollection`
:param [str] feature_names:
A list of feature names to retrieve. When ``None``, all
features are retrieved. Wildcards are allowed.
:rtype: Iterable of ``(content_id, FC)``
'''
it = self._keyword_scan(query_id, query_fc,
feature_names=feature_names)
for hit in it:
fc = self.fc_from_dict(hit['_source']['fc'])
yield did(hit['_id']), fc |
def keyword_scan_ids(self, query_id=None, query_fc=None):
'''Keyword scan for ids.
This performs a keyword scan using the query given. A keyword
scan searches for FCs with terms in each of the query's indexed
fields.
At least one of ``query_id`` or ``query_fc`` must be provided.
If ``query_fc`` is ``None``, then the query is retrieved
automatically corresponding to ``query_id``.
:param str query_id: Optional query id.
:param query_fc: Optional query feature collection.
:type query_fc: :class:`dossier.fc.FeatureCollection`
:rtype: Iterable of ``content_id``
'''
it = self._keyword_scan(query_id, query_fc, feature_names=False)
for hit in it:
yield did(hit['_id']) |
def index_scan_ids(self, fname, val):
'''Low-level keyword index scan for ids.
Retrieves identifiers of FCs that have a feature value
``val`` in the feature named ``fname``. Note that
``fname`` must be indexed.
:param str fname: Feature name.
:param str val: Feature value.
:rtype: Iterable of ``content_id``
'''
disj = []
for fname2 in self.indexes[fname]['feature_names']:
disj.append({'term': {fname_to_idx_name(fname2): val}})
query = {
'constant_score': {
'filter': {'or': disj},
},
}
hits = scan(self.conn, index=self.index, doc_type=self.type, query={
'_source': False,
'query': query,
})
for hit in hits:
yield did(hit['_id']) |
def _source(self, feature_names):
'''Maps feature names to ES's "_source" field.'''
if feature_names is None:
return True
elif isinstance(feature_names, bool):
return feature_names
else:
return map(lambda n: 'fc.' + n, feature_names) |
def _range_filters(self, *key_ranges):
'Creates ES filters for key ranges used in scanning.'
filters = []
for s, e in key_ranges:
if isinstance(s, basestring):
s = eid(s)
if isinstance(e, basestring):
# Make the range inclusive.
# We need a valid codepoint, so use the max.
e += u'\U0010FFFF'
e = eid(e)
if s == () and e == ():
filters.append({'match_all': {}})
elif e == ():
filters.append({'range': {'_id': {'gte': s}}})
elif s == ():
filters.append({'range': {'_id': {'lte': e}}})
else:
filters.append({'range': {'_id': {'gte': s, 'lte': e}}})
if len(filters) == 0:
return [{'match_all': {}}]
else:
return filters |
def _create_index(self):
'Create the index'
try:
self.conn.indices.create(
index=self.index, timeout=60, request_timeout=60, body={
'settings': {
'number_of_shards': self.shards,
'number_of_replicas': self.replicas,
},
})
except TransportError:
# Hope that this is an "index already exists" error...
logger.warn('index already exists? OK', exc_info=True)
pass |
def _create_mappings(self):
'Create the field type mapping.'
self.conn.indices.put_mapping(
index=self.index, doc_type=self.type,
timeout=60, request_timeout=60,
body={
self.type: {
'dynamic_templates': [{
'default_no_analyze_fc': {
'match': 'fc.*',
'mapping': {'index': 'no'},
},
}],
'_all': {
'enabled': False,
},
'_id': {
'index': 'not_analyzed', # allows range queries
},
'properties': self._get_index_mappings(),
},
})
# It is possible to create an index and quickly launch a request
# that will fail because the index hasn't been set up yet. Usually,
# you'll get a "no active shards available" error.
#
# Since index creation is a very rare operation (it only happens
# when the index doesn't already exist), we sit and wait for the
# cluster to become healthy.
self.conn.cluster.health(index=self.index, wait_for_status='yellow') |
def _get_index_mappings(self):
'Retrieve the field mappings. Useful for debugging.'
maps = {}
for fname in self.indexed_features:
config = self.indexes.get(fname, {})
print(fname, config)
maps[fname_to_idx_name(fname)] = {
'type': config.get('es_index_type', 'integer'),
'store': False,
'index': 'not_analyzed',
}
for fname in self.fulltext_indexed_features:
maps[fname_to_full_idx_name(fname)] = {
'type': 'string',
'store': False,
'index': 'analyzed',
}
return maps |
def _get_field_types(self):
'Retrieve the field types. Useful for debugging.'
mapping = self.conn.indices.get_mapping(
index=self.index, doc_type=self.type)
return mapping[self.index]['mappings'][self.type]['properties'] |
def _fc_index_disjunction_from_query(self, query_fc, fname):
'Creates a disjunction for keyword scan queries.'
if len(query_fc.get(fname, [])) == 0:
return []
terms = query_fc[fname].keys()
disj = []
for fname in self.indexes[fname]['feature_names']:
disj.append({'terms': {fname_to_idx_name(fname): terms}})
return disj |
def fc_bytes(self, fc_dict):
'''Take a feature collection in dict form and count its size in bytes.
'''
num_bytes = 0
for _, feat in fc_dict.iteritems():
num_bytes += len(feat)
return num_bytes |
def count_bytes(self, filter_preds):
'''Count bytes of all feature collections whose key satisfies one of
the predicates in ``filter_preds``. The byte counts are binned
by filter predicate.
'''
num_bytes = defaultdict(int)
for hit in self._scan():
for filter_pred in filter_preds:
if filter_pred(did(hit['_id'])):
num_bytes[filter_pred] += self.fc_bytes(
hit['_source']['fc'])
return num_bytes |
def pretty_string(fc):
'''construct a nice looking string for an FC
'''
s = []
for fname, feature in sorted(fc.items()):
if isinstance(feature, StringCounter):
feature = [u'%s: %d' % (k, v)
for (k,v) in feature.most_common()]
feature = u'\n\t' + u'\n\t'.join(feature)
s.append(fname + u': ' + feature)
return u'\n'.join(s) |
def get_lib_ffi_resource(module_name, libpath, c_hdr):
'''
module_name-->str: module name to retrieve resource
libpath-->str: shared library filename with optional path
c_hdr-->str: C-style header definitions for functions to wrap
Returns-->(ffi, lib)
Use this method when you are loading a package-specific shared library
If you want to load a system-wide shared library, use get_lib_ffi_shared
instead
'''
lib = SharedLibWrapper(libpath, c_hdr, module_name=module_name)
ffi = lib.ffi
return (ffi, lib) |
def get_lib_ffi_shared(libpath, c_hdr):
'''
libpath-->str: shared library filename with optional path
c_hdr-->str: C-style header definitions for functions to wrap
Returns-->(ffi, lib)
'''
lib = SharedLibWrapper(libpath, c_hdr)
ffi = lib.ffi
return (ffi, lib) |
def __openlib(self):
'''
Actual (lazy) dlopen() only when an attribute is accessed
'''
if self.__getattribute__('_libloaded'):
return
libpath_list = self.__get_libres()
for p in libpath_list:
try:
libres = resource_filename(self._module_name, p)
self.lib = self.ffi.dlopen(libres)
return
except:
continue
# Try self._libpath if nothing in libpath_list worked - will work
# only if self._module_name is set
try:
libres = resource_filename(self._module_name, self._libpath)
self.lib = self.ffi.dlopen(libres)
except:
# If self._module_name is in sys.modules, try self._libpath
# in the same dir as sys.modules[self._module_name].__file__
# This is allows get_lib_ffi_shared to work in REPL
try:
# We set _libloaded to indicate all options have been tried
self._libloaded = True
libdir = ''
if self._module_name is not None:
mod = sys.modules.get(self._module_name, None)
if mod is not None:
libdir = os.path.dirname(mod.__file__) or os.getcwd()
libres = os.path.join(libdir, self._libpath)
self.lib = self.ffi.dlopen(libres)
except:
return None |
def __get_libres(self):
'''
Computes libpath based on whether module_name is set or not
Returns-->list of str lib paths to try
PEP3140: ABI version tagged .so files:
https://www.python.org/dev/peps/pep-3149/
There's still one unexplained bit: pypy adds '-' + sys._multiarch()
at the end (looks like 'x86_64-linux-gnu'), but neither py2 or py3 do
Additionally, in older releases of pypy (e.g. build f3ad1e1e1d62
Aug-28-2015), sysconfig.get_config_var('SOABI') returns '' but
shared library still has '.pypy-26' in the name!
So for pypy we try this this variant anyway!
_I_ think Py2 and Py3 _MAY_ start adding sys._multiarch at some time
So, we generate three forms:
1. With sys._multiarch
2. Without sys._multiarch
3. libpath as-is - always tried by self.__openlib anyway
For different versions we try in different order (for efficiency):
Python2 Python3 Pypy
2 --> 1 --> 3 2 --> 1 --> 3 1 --> 2 --> 3
'''
if self._module_name is None:
return []
ending = '.so'
base = self._libpath.rsplit(ending, 1)[0]
abi = sysconfig.get_config_var('SOABI')
if abi is not None:
abi = '.' + abi
else:
abi = ''
multi_arch = sysconfig.get_config_var('MULTIARCH')
if multi_arch is None:
multi_arch = ''
else:
multi_arch = '-' + multi_arch
if PYPY:
n1 = base + abi + multi_arch + ending
n2 = base + abi + ending
else:
n1 = base + abi + ending
n2 = base + abi + multi_arch + ending
if PYPY:
n3 = base + '.pypy-26' + ending
return [n1, n2, n3]
else:
return [n1, n2] |
def process_docopts(): # type: ()->None
"""
Take care of command line options
"""
arguments = docopt(__doc__, version="Find Known Secrets {0}".format(__version__))
logger.debug(arguments)
# print(arguments)
if arguments["here"]:
# all default
go()
else:
# user config
files = arguments["--secrets"]
searcher = Searcher(source=arguments["--source"], files=files)
searcher.go() |
async def api_postcode(request):
"""
Gets data from a postcode.
:param request: The aiohttp request.
"""
postcode: Optional[str] = request.match_info.get('postcode', None)
try:
coroutine = get_postcode_random() if postcode == "random" else get_postcode(postcode)
postcode: Optional[Postcode] = await coroutine
except CachingError as e:
return web.HTTPInternalServerError(body=e.status)
except CircuitBreakerError as e:
pass
else:
if postcode is not None:
return str_json_response(postcode.serialize())
else:
return web.HTTPNotFound(body="Invalid Postcode") |
async def api_nearby(request):
"""
Gets wikipedia articles near a given postcode.
:param request: The aiohttp request.
"""
postcode: Optional[str] = request.match_info.get('postcode', None)
try:
limit = int(request.match_info.get('limit', 10))
except ValueError:
raise web.HTTPBadRequest(text="Invalid Limit")
try:
coroutine = get_postcode_random() if postcode == "random" else get_postcode(postcode)
postcode: Optional[Postcode] = await coroutine
except CachingError as e:
raise web.HTTPInternalServerError(body=e.status)
if postcode is None:
raise web.HTTPNotFound(text="Invalid Postcode")
try:
nearby_items = await fetch_nearby(postcode.lat, postcode.long, limit)
except ApiError:
return web.HTTPInternalServerError(text=f"No nearby locations cached, and can't be retrieved.")
if nearby_items is None:
raise web.HTTPNotFound(text="No Results")
else:
return str_json_response(nearby_items) |
def default_formatter(error):
"""Escape the error, and wrap it in a span with class ``error-message``"""
quoted = formencode.htmlfill.escape_formatter(error)
return u'<span class="error-message">{0}</span>'.format(quoted) |
def pretty_to_link(inst, link):
'''
Create a human-readable representation of a link on the 'TO'-side
'''
values = ''
prefix = ''
metaclass = xtuml.get_metaclass(inst)
for name, ty in metaclass.attributes:
if name in link.key_map:
value = getattr(inst, name)
value = xtuml.serialize_value(value, ty)
name = link.key_map[name]
values += '%s%s=%s' % (prefix, name, value)
prefix = ', '
return '%s(%s)' % (link.kind, values) |
def pretty_unique_identifier(inst, identifier):
'''
Create a human-readable representation a unique identifier.
'''
values = ''
prefix = ''
metaclass = xtuml.get_metaclass(inst)
for name, ty in metaclass.attributes:
if name in metaclass.identifying_attributes:
value = getattr(inst, name)
value = xtuml.serialize_value(value, ty)
values += '%s%s=%s' % (prefix, name, value)
prefix = ', '
return '%s(%s)' % (identifier, values) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.