sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def start_engine(self, **kwargs):
"""
Initializes the workflow with given request, response objects and diagram name.
Args:
session:
input:
workflow_name (str): Name of workflow diagram without ".bpmn" suffix.
File must be placed under one of configured :py:attr:`~zengine.settings.WORKFLOW_PACKAGES_PATHS`
"""
self.current = WFCurrent(**kwargs)
self.wf_state = {'in_external': False, 'finished': False}
if not self.current.new_token:
self.wf_state = self.current.wf_cache.get(self.wf_state)
self.current.workflow_name = self.wf_state['name']
# if we have a pre-selected object to work with,
# inserting it as current.input['id'] and task_data['object_id']
if 'subject' in self.wf_state:
self.current.input['id'] = self.wf_state['subject']
self.current.task_data['object_id'] = self.wf_state['subject']
self.check_for_authentication()
self.check_for_permission()
self.workflow = self.load_or_create_workflow()
# if form data exists in input (user submitted)
# put form data in wf task_data
if 'form' in self.current.input:
form = self.current.input['form']
if 'form_name' in form:
self.current.task_data[form['form_name']] = form
# in wf diagram, if property is stated as init = True
# demanded initial values are assigned and put to cache
start_init_values = self.workflow_spec.wf_properties.get('init', 'False') == 'True'
if start_init_values:
WFInit = get_object_from_path(settings.WF_INITIAL_VALUES)()
WFInit.assign_wf_initial_values(self.current)
log_msg = ("\n\n::::::::::: ENGINE STARTED :::::::::::\n"
"\tWF: %s (Possible) TASK:%s\n"
"\tCMD:%s\n"
"\tSUBCMD:%s" % (
self.workflow.name,
self.workflow.get_tasks(Task.READY),
self.current.input.get('cmd'), self.current.input.get('subcmd')))
log.debug(log_msg)
sys._zops_wf_state_log = log_msg
self.current.workflow = self.workflow
|
Initializes the workflow with given request, response objects and diagram name.
Args:
session:
input:
workflow_name (str): Name of workflow diagram without ".bpmn" suffix.
File must be placed under one of configured :py:attr:`~zengine.settings.WORKFLOW_PACKAGES_PATHS`
|
entailment
|
def generate_wf_state_log(self):
"""
Logs the state of workflow and content of task_data.
"""
output = '\n- - - - - -\n'
output += "WORKFLOW: %s ( %s )" % (self.current.workflow_name.upper(),
self.current.workflow.name)
output += "\nTASK: %s ( %s )\n" % (self.current.task_name, self.current.task_type)
output += "DATA:"
for k, v in self.current.task_data.items():
if v:
output += "\n\t%s: %s" % (k, v)
output += "\nCURRENT:"
output += "\n\tACTIVITY: %s" % self.current.activity
output += "\n\tPOOL: %s" % self.current.pool
output += "\n\tIN EXTERNAL: %s" % self.wf_state['in_external']
output += "\n\tLANE: %s" % self.current.lane_name
output += "\n\tTOKEN: %s" % self.current.token
sys._zops_wf_state_log = output
return output
|
Logs the state of workflow and content of task_data.
|
entailment
|
def switch_from_external_to_main_wf(self):
"""
Main workflow switcher.
This method recreates main workflow from `main wf` dict which
was set by external workflow swicther previously.
"""
# in external assigned as True in switch_to_external_wf.
# external_wf should finish EndEvent and it's name should be
# also EndEvent for switching again to main wf.
if self.wf_state['in_external'] and self.current.task_type == 'EndEvent' and \
self.current.task_name == 'EndEvent':
# main_wf information was copied in switch_to_external_wf and it takes this information.
main_wf = self.wf_state['main_wf']
# main_wf_name is assigned to current workflow name again.
self.current.workflow_name = main_wf['name']
# For external WF, check permission and authentication. But after cleaning current task.
self._clear_current_task()
# check for auth and perm. current task cleared, do against new workflow_name
self.check_for_authentication()
self.check_for_permission()
# WF knowledge is taken for main wf.
self.workflow_spec = self.get_worfklow_spec()
# WF instance is started again where leave off.
self.workflow = self.deserialize_workflow(main_wf['step'])
# Current WF is this WF instance.
self.current.workflow = self.workflow
# in_external is assigned as False
self.wf_state['in_external'] = False
# finished is assigned as False, because still in progress.
self.wf_state['finished'] = False
# pool info of main_wf is assigned.
self.wf_state['pool'] = main_wf['pool']
self.current.pool = self.wf_state['pool']
# With main_wf is executed.
self.run()
|
Main workflow switcher.
This method recreates main workflow from `main wf` dict which
was set by external workflow swicther previously.
|
entailment
|
def switch_to_external_wf(self):
"""
External workflow switcher.
This method copies main workflow information into
a temporary dict `main_wf` and makes external workflow
acting as main workflow.
"""
# External WF name should be stated at main wf diagram and type should be service task.
if (self.current.task_type == 'ServiceTask' and
self.current.task.task_spec.type == 'external'):
log.debug("Entering to EXTERNAL WF")
# Main wf information is copied to main_wf.
main_wf = self.wf_state.copy()
# workflow name from main wf diagram is assigned to current workflow name.
# workflow name must be either in task_data with key 'external_wf' or in main diagram's
# topic.
self.current.workflow_name = self.current.task_data.pop('external_wf', False) or self.\
current.task.task_spec.topic
# For external WF, check permission and authentication. But after cleaning current task.
self._clear_current_task()
# check for auth and perm. current task cleared, do against new workflow_name
self.check_for_authentication()
self.check_for_permission()
# wf knowledge is taken for external wf.
self.workflow_spec = self.get_worfklow_spec()
# New WF instance is created for external wf.
self.workflow = self.create_workflow()
# Current WF is this WF instance.
self.current.workflow = self.workflow
# main_wf: main wf information.
# in_external: it states external wf in progress.
# finished: it shows that main wf didn't finish still progress in external wf.
self.wf_state = {'main_wf': main_wf, 'in_external': True, 'finished': False}
|
External workflow switcher.
This method copies main workflow information into
a temporary dict `main_wf` and makes external workflow
acting as main workflow.
|
entailment
|
def _clear_current_task(self):
"""
Clear tasks related attributes, checks permissions
While switching WF to WF, authentication and permissions are checked for new WF.
"""
self.current.task_name = None
self.current.task_type = None
self.current.task = None
|
Clear tasks related attributes, checks permissions
While switching WF to WF, authentication and permissions are checked for new WF.
|
entailment
|
def run(self):
"""
Main loop of the workflow engine
- Updates ::class:`~WFCurrent` object.
- Checks for Permissions.
- Activates all READY tasks.
- Runs referenced activities (method calls).
- Saves WF states.
- Stops if current task is a UserTask or EndTask.
- Deletes state object if we finish the WF.
"""
# FIXME: raise if first task after line change isn't a UserTask
# FIXME: raise if last task of a workflow is a UserTask
# actually this check should be done at parser
is_lane_changed = False
while self._should_we_run():
self.check_for_rerun_user_task()
task = None
for task in self.workflow.get_tasks(state=Task.READY):
self.current.old_lane = self.current.lane_name
self.current._update_task(task)
if self.catch_lane_change():
return
self.check_for_permission()
self.check_for_lane_permission()
self.log_wf_state()
self.switch_lang()
self.run_activity()
self.parse_workflow_messages()
self.workflow.complete_task_from_id(self.current.task.id)
self._save_or_delete_workflow()
self.switch_to_external_wf()
if task is None:
break
self.switch_from_external_to_main_wf()
self.current.output['token'] = self.current.token
# look for incoming ready task(s)
for task in self.workflow.get_tasks(state=Task.READY):
self.current._update_task(task)
self.catch_lane_change()
self.handle_wf_finalization()
|
Main loop of the workflow engine
- Updates ::class:`~WFCurrent` object.
- Checks for Permissions.
- Activates all READY tasks.
- Runs referenced activities (method calls).
- Saves WF states.
- Stops if current task is a UserTask or EndTask.
- Deletes state object if we finish the WF.
|
entailment
|
def check_for_rerun_user_task(self):
"""
Checks that the user task needs to re-run.
If necessary, current task and pre task's states are changed and re-run.
If wf_meta not in data(there is no user interaction from pre-task) and last completed task
type is user task and current step is not EndEvent and there is no lane change,
this user task is rerun.
"""
data = self.current.input
if 'wf_meta' in data:
return
current_task = self.workflow.get_tasks(Task.READY)[0]
current_task_type = current_task.task_spec.__class__.__name__
pre_task = current_task.parent
pre_task_type = pre_task.task_spec.__class__.__name__
if pre_task_type != 'UserTask':
return
if current_task_type == 'EndEvent':
return
pre_lane = pre_task.task_spec.lane
current_lane = current_task.task_spec.lane
if pre_lane == current_lane:
pre_task._set_state(Task.READY)
current_task._set_state(Task.MAYBE)
|
Checks that the user task needs to re-run.
If necessary, current task and pre task's states are changed and re-run.
If wf_meta not in data(there is no user interaction from pre-task) and last completed task
type is user task and current step is not EndEvent and there is no lane change,
this user task is rerun.
|
entailment
|
def switch_lang(self):
"""Switch to the language of the current user.
If the current language is already the specified one, nothing will be done.
"""
locale = self.current.locale
translation.InstalledLocale.install_language(locale['locale_language'])
translation.InstalledLocale.install_locale(locale['locale_datetime'], 'datetime')
translation.InstalledLocale.install_locale(locale['locale_number'], 'number')
|
Switch to the language of the current user.
If the current language is already the specified one, nothing will be done.
|
entailment
|
def catch_lane_change(self):
"""
trigger a lane_user_change signal if we switched to a new lane
and new lane's user is different from current one
"""
if self.current.lane_name:
if self.current.old_lane and self.current.lane_name != self.current.old_lane:
# if lane_name not found in pool or it's user different from the current(old) user
if (self.current.lane_id not in self.current.pool or
self.current.pool[self.current.lane_id] != self.current.user_id):
self.current.log.info("LANE CHANGE : %s >> %s" % (self.current.old_lane,
self.current.lane_name))
if self.current.lane_auto_sendoff:
self.current.sendoff_current_user()
self.current.flow_enabled = False
if self.current.lane_auto_invite:
self.current.invite_other_parties(self._get_possible_lane_owners())
return True
|
trigger a lane_user_change signal if we switched to a new lane
and new lane's user is different from current one
|
entailment
|
def parse_workflow_messages(self):
"""
Transmits client message that defined in
a workflow task's inputOutput extension
.. code-block:: xml
<bpmn2:extensionElements>
<camunda:inputOutput>
<camunda:inputParameter name="client_message">
<camunda:map>
<camunda:entry key="title">Teşekkürler</camunda:entry>
<camunda:entry key="body">İşlem Başarılı</camunda:entry>
<camunda:entry key="type">info</camunda:entry>
</camunda:map>
</camunda:inputParameter>
</camunda:inputOutput>
</bpmn2:extensionElements>
"""
if 'client_message' in self.current.spec.data:
m = self.current.spec.data['client_message']
self.current.msg_box(title=m.get('title'),
msg=m.get('body'),
typ=m.get('type', 'info'))
|
Transmits client message that defined in
a workflow task's inputOutput extension
.. code-block:: xml
<bpmn2:extensionElements>
<camunda:inputOutput>
<camunda:inputParameter name="client_message">
<camunda:map>
<camunda:entry key="title">Teşekkürler</camunda:entry>
<camunda:entry key="body">İşlem Başarılı</camunda:entry>
<camunda:entry key="type">info</camunda:entry>
</camunda:map>
</camunda:inputParameter>
</camunda:inputOutput>
</bpmn2:extensionElements>
|
entailment
|
def run_activity(self):
"""
runs the method that referenced from current task
"""
activity = self.current.activity
if activity:
if activity not in self.wf_activities:
self._load_activity(activity)
self.current.log.debug(
"Calling Activity %s from %s" % (activity, self.wf_activities[activity]))
self.wf_activities[self.current.activity](self.current)
|
runs the method that referenced from current task
|
entailment
|
def _import_object(self, path, look_for_cls_method):
"""
Imports the module that contains the referenced method.
Args:
path: python path of class/function
look_for_cls_method (bool): If True, treat the last part of path as class method.
Returns:
Tuple. (class object, class name, method to be called)
"""
last_nth = 2 if look_for_cls_method else 1
path = path.split('.')
module_path = '.'.join(path[:-last_nth])
class_name = path[-last_nth]
module = importlib.import_module(module_path)
if look_for_cls_method and path[-last_nth:][0] == path[-last_nth]:
class_method = path[-last_nth:][1]
else:
class_method = None
return getattr(module, class_name), class_name, class_method
|
Imports the module that contains the referenced method.
Args:
path: python path of class/function
look_for_cls_method (bool): If True, treat the last part of path as class method.
Returns:
Tuple. (class object, class name, method to be called)
|
entailment
|
def _load_activity(self, activity):
"""
Iterates trough the all enabled `~zengine.settings.ACTIVITY_MODULES_IMPORT_PATHS` to find the given path.
"""
fpths = []
full_path = ''
errors = []
paths = settings.ACTIVITY_MODULES_IMPORT_PATHS
number_of_paths = len(paths)
for index_no in range(number_of_paths):
full_path = "%s.%s" % (paths[index_no], activity)
for look4kls in (0, 1):
try:
self.current.log.info("try to load from %s[%s]" % (full_path, look4kls))
kls, cls_name, cls_method = self._import_object(full_path, look4kls)
if cls_method:
self.current.log.info("WILLCall %s(current).%s()" % (kls, cls_method))
self.wf_activities[activity] = lambda crnt: getattr(kls(crnt), cls_method)()
else:
self.wf_activities[activity] = kls
return
except (ImportError, AttributeError):
fpths.append(full_path)
errmsg = "{activity} not found under these paths:\n\n >>> {paths} \n\n" \
"Error Messages:\n {errors}"
errors.append("\n========================================================>\n"
"| PATH | %s"
"\n========================================================>\n\n"
"%s" % (full_path, traceback.format_exc()))
assert index_no != number_of_paths - 1, errmsg.format(activity=activity,
paths='\n >>> '.join(
set(fpths)),
errors='\n\n'.join(errors)
)
except:
self.current.log.exception("Cannot found the %s" % activity)
|
Iterates trough the all enabled `~zengine.settings.ACTIVITY_MODULES_IMPORT_PATHS` to find the given path.
|
entailment
|
def check_for_authentication(self):
"""
Checks current workflow against :py:data:`~zengine.settings.ANONYMOUS_WORKFLOWS` list.
Raises:
HTTPUnauthorized: if WF needs an authenticated user and current user isn't.
"""
auth_required = self.current.workflow_name not in settings.ANONYMOUS_WORKFLOWS
if auth_required and not self.current.is_auth:
self.current.log.debug("LOGIN REQUIRED:::: %s" % self.current.workflow_name)
raise HTTPError(401, "Login required for %s" % self.current.workflow_name)
|
Checks current workflow against :py:data:`~zengine.settings.ANONYMOUS_WORKFLOWS` list.
Raises:
HTTPUnauthorized: if WF needs an authenticated user and current user isn't.
|
entailment
|
def check_for_lane_permission(self):
"""
One or more permissions can be associated with a lane
of a workflow. In a similar way, a lane can be
restricted with relation to other lanes of the workflow.
This method called on lane changes and checks user has
required permissions and relations.
Raises:
HTTPForbidden: if the current user hasn't got the
required permissions and proper relations
"""
# TODO: Cache lane_data in app memory
if self.current.lane_permission:
log.debug("HAS LANE PERM: %s" % self.current.lane_permission)
perm = self.current.lane_permission
if not self.current.has_permission(perm):
raise HTTPError(403, "You don't have required lane permission: %s" % perm)
if self.current.lane_relations:
context = self.get_pool_context()
log.debug("HAS LANE RELS: %s" % self.current.lane_relations)
try:
cond_result = eval(self.current.lane_relations, context)
except:
log.exception("CONDITION EVAL ERROR : %s || %s" % (
self.current.lane_relations, context))
raise
if not cond_result:
log.debug("LANE RELATION ERR: %s %s" % (self.current.lane_relations, context))
raise HTTPError(403, "You aren't qualified for this lane: %s" %
self.current.lane_relations)
|
One or more permissions can be associated with a lane
of a workflow. In a similar way, a lane can be
restricted with relation to other lanes of the workflow.
This method called on lane changes and checks user has
required permissions and relations.
Raises:
HTTPForbidden: if the current user hasn't got the
required permissions and proper relations
|
entailment
|
def check_for_permission(self):
# TODO: Works but not beautiful, needs review!
"""
Checks if current user (or role) has the required permission
for current workflow step.
Raises:
HTTPError: if user doesn't have required permissions.
"""
if self.current.task:
lane = self.current.lane_id
permission = "%s.%s.%s" % (self.current.workflow_name, lane, self.current.task_name)
else:
permission = self.current.workflow_name
log.debug("CHECK PERM: %s" % permission)
if (self.current.task_type not in PERM_REQ_TASK_TYPES or
permission.startswith(tuple(settings.ANONYMOUS_WORKFLOWS)) or
(self.current.is_auth and permission.startswith(tuple(settings.COMMON_WORKFLOWS)))):
return
# FIXME:needs hardening
log.debug("REQUIRE PERM: %s" % permission)
if not self.current.has_permission(permission):
raise HTTPError(403, "You don't have required permission: %s" % permission)
|
Checks if current user (or role) has the required permission
for current workflow step.
Raises:
HTTPError: if user doesn't have required permissions.
|
entailment
|
def handle_wf_finalization(self):
"""
Removes the ``token`` key from ``current.output`` if WF is over.
"""
if ((not self.current.flow_enabled or (
self.current.task_type.startswith('End') and not self.are_we_in_subprocess())) and
'token' in self.current.output):
del self.current.output['token']
|
Removes the ``token`` key from ``current.output`` if WF is over.
|
entailment
|
def from_rdkit_molecule(data):
"""
RDKit molecule object to MoleculeContainer converter
"""
m = MoleculeContainer()
atoms, mapping = [], []
for a in data.GetAtoms():
atom = {'element': a.GetSymbol(), 'charge': a.GetFormalCharge()}
atoms.append(atom)
mapping.append(a.GetAtomMapNum())
isotope = a.GetIsotope()
if isotope:
atom['isotope'] = isotope
radical = a.GetNumRadicalElectrons()
if radical:
atom['multiplicity'] = radical + 1
conformers = data.GetConformers()
if conformers:
for atom, (x, y, z) in zip(atoms, conformers[0].GetPositions()):
atom['x'] = x
atom['y'] = y
atom['z'] = z
for atom, mapping in zip(atoms, mapping):
a = m.add_atom(atom)
if mapping:
m.atom(a)._parsed_mapping = mapping
for bond in data.GetBonds():
m.add_bond(bond.GetBeginAtomIdx() + 1, bond.GetEndAtomIdx() + 1, _rdkit_bond_map[bond.GetBondType()])
return m
|
RDKit molecule object to MoleculeContainer converter
|
entailment
|
def to_rdkit_molecule(data):
"""
MoleculeContainer to RDKit molecule object converter
"""
mol = RWMol()
conf = Conformer()
mapping = {}
is_3d = False
for n, a in data.atoms():
ra = Atom(a.number)
ra.SetAtomMapNum(n)
if a.charge:
ra.SetFormalCharge(a.charge)
if a.isotope != a.common_isotope:
ra.SetIsotope(a.isotope)
if a.radical:
ra.SetNumRadicalElectrons(a.radical)
mapping[n] = m = mol.AddAtom(ra)
conf.SetAtomPosition(m, (a.x, a.y, a.z))
if a.z:
is_3d = True
if not is_3d:
conf.Set3D(False)
for n, m, b in data.bonds():
mol.AddBond(mapping[n], mapping[m], _bond_map[b.order])
mol.AddConformer(conf)
SanitizeMol(mol)
return mol
|
MoleculeContainer to RDKit molecule object converter
|
entailment
|
def __dfs(self, start, weights, depth_limit):
"""
modified NX dfs
"""
adj = self._adj
stack = [(start, depth_limit, iter(sorted(adj[start], key=weights)))]
visited = {start}
disconnected = defaultdict(list)
edges = defaultdict(list)
while stack:
parent, depth_now, children = stack[-1]
try:
child = next(children)
except StopIteration:
stack.pop()
else:
if child not in visited:
edges[parent].append(child)
visited.add(child)
if depth_now > 1:
front = adj[child].keys() - {parent}
if front:
stack.append((child, depth_now - 1, iter(sorted(front, key=weights))))
elif child not in disconnected:
disconnected[parent].append(child)
return visited, edges, disconnected
|
modified NX dfs
|
entailment
|
def get_args_parser():
"""Return a parser for command line options."""
parser = argparse.ArgumentParser(
description='Marabunta: Migrating ants for Odoo')
parser.add_argument('--migration-file', '-f',
action=EnvDefault,
envvar='MARABUNTA_MIGRATION_FILE',
required=True,
help='The yaml file containing the migration steps')
parser.add_argument('--database', '-d',
action=EnvDefault,
envvar='MARABUNTA_DATABASE',
required=True,
help="Odoo's database")
parser.add_argument('--db-user', '-u',
action=EnvDefault,
envvar='MARABUNTA_DB_USER',
required=True,
help="Odoo's database user")
parser.add_argument('--db-password', '-w',
action=EnvDefault,
envvar='MARABUNTA_DB_PASSWORD',
required=True,
help="Odoo's database password")
parser.add_argument('--db-port', '-p',
default=os.environ.get('MARABUNTA_DB_PORT', 5432),
help="Odoo's database port")
parser.add_argument('--db-host', '-H',
default=os.environ.get('MARABUNTA_DB_HOST',
'localhost'),
help="Odoo's database host")
parser.add_argument('--mode',
action=EnvDefault,
envvar='MARABUNTA_MODE',
required=False,
help="Specify the mode in which we run the migration,"
"such as 'demo' or 'prod'. Additional operations "
"of this mode will be executed after the main "
"operations and the addons list of this mode "
"will be merged with the main addons list.")
parser.add_argument('--allow-serie',
action=BoolEnvDefault,
required=False,
envvar='MARABUNTA_ALLOW_SERIE',
help='Allow to run more than 1 version upgrade at a '
'time.')
parser.add_argument('--force-version',
required=False,
default=os.environ.get('MARABUNTA_FORCE_VERSION'),
help='Force upgrade of a version, even if it has '
'already been applied.')
group = parser.add_argument_group(
title='Web',
description='Configuration related to the internal web server, '
'used to publish a maintenance page during the migration.',
)
group.add_argument('--web-host',
required=False,
default=os.environ.get('MARABUNTA_WEB_HOST', '0.0.0.0'),
help='Host for the web server')
group.add_argument('--web-port',
required=False,
default=os.environ.get('MARABUNTA_WEB_PORT', 8069),
help='Port for the web server')
group.add_argument('--web-custom-html',
required=False,
default=os.environ.get(
'MARABUNTA_WEB_CUSTOM_HTML'
),
help='Path to a custom html file to publish')
return parser
|
Return a parser for command line options.
|
entailment
|
def from_parse_args(cls, args):
"""Constructor from command line args.
:param args: parse command line arguments
:type args: argparse.ArgumentParser
"""
return cls(args.migration_file,
args.database,
db_user=args.db_user,
db_password=args.db_password,
db_port=args.db_port,
db_host=args.db_host,
mode=args.mode,
allow_serie=args.allow_serie,
force_version=args.force_version,
web_host=args.web_host,
web_port=args.web_port,
web_custom_html=args.web_custom_html,
)
|
Constructor from command line args.
:param args: parse command line arguments
:type args: argparse.ArgumentParser
|
entailment
|
def set_current(self, current):
"""
Creates some aliases for attributes of ``current``.
Args:
current: :attr:`~zengine.engine.WFCurrent` object.
"""
self.current = current
self.input = current.input
# self.req = current.request
# self.resp = current.response
self.output = current.output
self.cmd = current.task_data['cmd']
if self.cmd and NEXT_CMD_SPLITTER in self.cmd:
self.cmd, self.next_cmd = self.cmd.split(NEXT_CMD_SPLITTER)
else:
self.next_cmd = None
|
Creates some aliases for attributes of ``current``.
Args:
current: :attr:`~zengine.engine.WFCurrent` object.
|
entailment
|
def form_out(self, _form=None):
"""
Renders form. Applies form modifiers, then writes
result to response payload. If supplied, given form
object instance will be used instead of view's
default ObjectForm.
Args:
_form (:py:attr:`~zengine.forms.json_form.JsonForm`):
Form object to override `self.object_form`
"""
_form = _form or self.object_form
self.output['forms'] = _form.serialize()
self._add_meta_props(_form)
self.output['forms']['grouping'] = _form.Meta.grouping
self.output['forms']['constraints'] = _form.Meta.constraints
self._patch_form(self.output['forms'])
self.set_client_cmd('form')
|
Renders form. Applies form modifiers, then writes
result to response payload. If supplied, given form
object instance will be used instead of view's
default ObjectForm.
Args:
_form (:py:attr:`~zengine.forms.json_form.JsonForm`):
Form object to override `self.object_form`
|
entailment
|
def set_client_cmd(self, *args):
"""
Adds given cmd(s) to ``self.output['client_cmd']``
Args:
*args: Client commands.
"""
self.client_cmd.update(args)
self.output['client_cmd'] = list(self.client_cmd)
|
Adds given cmd(s) to ``self.output['client_cmd']``
Args:
*args: Client commands.
|
entailment
|
def run(self):
"""
Creates new permissions.
"""
from pyoko.lib.utils import get_object_from_path
from zengine.config import settings
model = get_object_from_path(settings.PERMISSION_MODEL)
perm_provider = get_object_from_path(settings.PERMISSION_PROVIDER)
existing_perms = []
new_perms = []
for code, name, desc in perm_provider():
code = six.text_type(code)
if self.manager.args.dry:
exists = model.objects.filter(code=code, name=name)
if exists:
perm = exists[0]
new = False
else:
new = True
perm = model(code=code, name=name)
else:
try:
perm = model.objects.get(code)
existing_perms.append(perm)
except ObjectDoesNotExist:
perm = model(description=desc, code=code, name=name)
perm.key = code
perm.save()
new_perms.append(perm)
# perm, new = model.objects.get_or_create({'description': desc}, code=code, name=name)
# if new:
# new_perms.append(perm)
# else:
# existing_perms.append(perm)
report = "\n\n%s permission(s) were found in DB. " % len(existing_perms)
if new_perms:
report += "\n%s new permission record added. " % len(new_perms)
else:
report += 'No new perms added. '
if new_perms:
if not self.manager.args.dry:
SelectBoxCache.flush(model.__name__)
report += 'Total %s perms exists.' % (len(existing_perms) + len(new_perms))
report = "\n + " + "\n + ".join([p.name or p.code for p in new_perms]) + report
if self.manager.args.dry:
print("\n~~~~~~~~~~~~~~ DRY RUN ~~~~~~~~~~~~~~\n")
print(report + "\n")
|
Creates new permissions.
|
entailment
|
def run(self):
"""
Creates user, encrypts password.
"""
from zengine.models import User
user = User(username=self.manager.args.username, superuser=self.manager.args.super)
user.set_password(self.manager.args.password)
user.save()
print("New user created with ID: %s" % user.key)
|
Creates user, encrypts password.
|
entailment
|
def run(self):
"""
Starts a development server for the zengine application
"""
print("Development server started on http://%s:%s. \n\nPress Ctrl+C to stop\n" % (
self.manager.args.addr,
self.manager.args.port)
)
if self.manager.args.server_type == 'falcon':
self.run_with_falcon()
elif self.manager.args.server_type == 'tornado':
self.run_with_tornado()
|
Starts a development server for the zengine application
|
entailment
|
def run_with_tornado(self):
"""
runs the tornado/websockets based test server
"""
from zengine.tornado_server.server import runserver
runserver(self.manager.args.addr, int(self.manager.args.port))
|
runs the tornado/websockets based test server
|
entailment
|
def run_with_falcon(self):
"""
runs the falcon/http based test server
"""
from wsgiref import simple_server
from zengine.server import app
httpd = simple_server.make_server(self.manager.args.addr, int(self.manager.args.port), app)
httpd.serve_forever()
|
runs the falcon/http based test server
|
entailment
|
def run(self):
"""
Starts a development server for the zengine application
"""
from zengine.wf_daemon import run_workers, Worker
worker_count = int(self.manager.args.workers or 1)
if not self.manager.args.daemonize:
print("Starting worker(s)")
if worker_count > 1 or self.manager.args.autoreload:
run_workers(worker_count,
self.manager.args.paths.split(' '),
self.manager.args.daemonize)
else:
worker = Worker()
worker.run()
|
Starts a development server for the zengine application
|
entailment
|
def _prepare_domain(mapping):
"""Prepare a helper dictionary for the domain to temporarily hold some information."""
# Parse the domain-directory mapping
try:
domain, dir = mapping.split(':')
except ValueError:
print("Please provide the sources in the form of '<domain>:<directory>'")
sys.exit(1)
try:
default_language = settings.TRANSLATION_DOMAINS[domain]
except KeyError:
print("Unknown domain {domain}, check the settings file to make sure"
" this domain is set in TRANSLATION_DOMAINS".format(domain=domain))
sys.exit(1)
# Create a temporary file to hold the `.pot` file for this domain
handle, path = tempfile.mkstemp(prefix='zengine_i18n_', suffix='.pot')
return (domain, {
'default': default_language,
'pot': path,
'source': dir,
})
|
Prepare a helper dictionary for the domain to temporarily hold some information.
|
entailment
|
def _validate_domains(domains):
"""Check that all domains specified in the settings was provided in the options."""
missing = set(settings.TRANSLATION_DOMAINS.keys()) - set(domains.keys())
if missing:
print('The following domains have been set in the configuration, '
'but their sources were not provided, use the `--source` '
'option to specify their sources: {domains}'.format(domains=', '.join(missing)))
sys.exit(1)
|
Check that all domains specified in the settings was provided in the options.
|
entailment
|
def _extract_translations(self, domains):
"""Extract the translations into `.pot` files"""
for domain, options in domains.items():
# Create the extractor
extractor = babel_frontend.extract_messages()
extractor.initialize_options()
# The temporary location to write the `.pot` file
extractor.output_file = options['pot']
# Add the comments marked with 'tn:' to the translation file for translators to read. Strip the marker.
extractor.add_comments = ['tn:']
extractor.strip_comments = True
# The directory where the sources for this domain are located
extractor.input_paths = [options['source']]
# Pass the metadata to the translator
extractor.msgid_bugs_address = self.manager.args.contact
extractor.copyright_holder = self.manager.args.copyright
extractor.version = self.manager.args.version
extractor.project = self.manager.args.project
extractor.finalize_options()
# Add keywords for lazy translation functions, based on their non-lazy variants
extractor.keywords.update({
'gettext_lazy': extractor.keywords['gettext'],
'ngettext_lazy': extractor.keywords['ngettext'],
'__': extractor.keywords['gettext'], # double underscore for lazy
})
# Do the extraction
_run_babel_command(extractor)
|
Extract the translations into `.pot` files
|
entailment
|
def _init_update_po_files(self, domains):
"""Update or initialize the `.po` translation files"""
for language in settings.TRANSLATIONS:
for domain, options in domains.items():
if language == options['default']: continue # Default language of the domain doesn't need translations
if os.path.isfile(_po_path(language, domain)):
# If the translation already exists, update it, keeping the parts already translated
self._update_po_file(language, domain, options['pot'])
else:
# The translation doesn't exist, create a new translation file
self._init_po_file(language, domain, options['pot'])
|
Update or initialize the `.po` translation files
|
entailment
|
def _cleanup(self, domains):
"""Remove the temporary '.pot' files that were created for the domains."""
for option in domains.values():
try:
os.remove(option['pot'])
except (IOError, OSError):
# It is not a problem if we can't actually remove the temporary file
pass
|
Remove the temporary '.pot' files that were created for the domains.
|
entailment
|
def run(self):
"""
read workflows, checks if it's updated,
tries to update if there aren't any running instances of that wf
"""
from zengine.lib.cache import WFSpecNames
if self.manager.args.clear:
self._clear_models()
return
if self.manager.args.wf_path:
paths = self.get_wf_from_path(self.manager.args.wf_path)
else:
paths = self.get_workflows()
self.count = 0
self.do_with_submit(self.load_diagram, paths, threads=self.manager.args.threads)
WFSpecNames().refresh()
print("%s BPMN file loaded" % self.count)
|
read workflows, checks if it's updated,
tries to update if there aren't any running instances of that wf
|
entailment
|
def get_wf_from_path(self, path):
"""
load xml from given path
Args:
path: diagram path
Returns:
"""
with open(path) as fp:
content = fp.read()
return [(os.path.basename(os.path.splitext(path)[0]), content), ]
|
load xml from given path
Args:
path: diagram path
Returns:
|
entailment
|
def get_workflows(self):
"""
Scans and loads all wf found under WORKFLOW_PACKAGES_PATHS
Yields: XML content of diagram file
"""
for pth in settings.WORKFLOW_PACKAGES_PATHS:
for f in glob.glob("%s/*.bpmn" % pth):
with open(f) as fp:
yield os.path.basename(os.path.splitext(f)[0]), fp.read()
|
Scans and loads all wf found under WORKFLOW_PACKAGES_PATHS
Yields: XML content of diagram file
|
entailment
|
def check_migration_and_solr(self):
"""
The model or models are checked for migrations that need to be done.
Solr is also checked.
"""
from pyoko.db.schema_update import SchemaUpdater
from socket import error as socket_error
from pyoko.conf import settings
from importlib import import_module
import_module(settings.MODELS_MODULE)
registry = import_module('pyoko.model').model_registry
models = [model for model in registry.get_base_models()]
try:
print(__(u"Checking migration and solr ..."))
updater = SchemaUpdater(models, 1, False)
updater.run(check_only=True)
except socket_error as e:
print(__(u"{0}Error not connected, open redis and rabbitmq{1}").format(CheckList.FAIL,
CheckList.ENDC))
|
The model or models are checked for migrations that need to be done.
Solr is also checked.
|
entailment
|
def check_redis():
"""
Redis checks the connection
It displays on the screen whether or not you have a connection.
"""
from pyoko.db.connection import cache
from redis.exceptions import ConnectionError
try:
cache.ping()
print(CheckList.OKGREEN + "{0}Redis is working{1}" + CheckList.ENDC)
except ConnectionError as e:
print(__(u"{0}Redis is not working{1} ").format(CheckList.FAIL,
CheckList.ENDC), e.message)
|
Redis checks the connection
It displays on the screen whether or not you have a connection.
|
entailment
|
def check_riak():
"""
Riak checks the connection
It displays on the screen whether or not you have a connection.
"""
from pyoko.db.connection import client
from socket import error as socket_error
try:
if client.ping():
print(__(u"{0}Riak is working{1}").format(CheckList.OKGREEN, CheckList.ENDC))
else:
print(__(u"{0}Riak is not working{1}").format(CheckList.FAIL, CheckList.ENDC))
except socket_error as e:
print(__(u"{0}Riak is not working{1}").format(CheckList.FAIL,
CheckList.ENDC), e.message)
|
Riak checks the connection
It displays on the screen whether or not you have a connection.
|
entailment
|
def check_mq_connection(self):
"""
RabbitMQ checks the connection
It displays on the screen whether or not you have a connection.
"""
import pika
from zengine.client_queue import BLOCKING_MQ_PARAMS
from pika.exceptions import ProbableAuthenticationError, ConnectionClosed
try:
connection = pika.BlockingConnection(BLOCKING_MQ_PARAMS)
channel = connection.channel()
if channel.is_open:
print(__(u"{0}RabbitMQ is working{1}").format(CheckList.OKGREEN, CheckList.ENDC))
elif self.channel.is_closed or self.channel.is_closing:
print(__(u"{0}RabbitMQ is not working!{1}").format(CheckList.FAIL, CheckList.ENDC))
except ConnectionClosed as e:
print(__(u"{0}RabbitMQ is not working!{1}").format(CheckList.FAIL, CheckList.ENDC), e)
except ProbableAuthenticationError as e:
print(__(u"{0}RabbitMQ username and password wrong{1}").format(CheckList.FAIL,
CheckList.ENDC))
|
RabbitMQ checks the connection
It displays on the screen whether or not you have a connection.
|
entailment
|
def check_encoding_and_env():
"""
It brings the environment variables to the screen.
The user checks to see if they are using the correct variables.
"""
import sys
import os
if sys.getfilesystemencoding() in ['utf-8', 'UTF-8']:
print(__(u"{0}File system encoding correct{1}").format(CheckList.OKGREEN,
CheckList.ENDC))
else:
print(__(u"{0}File system encoding wrong!!{1}").format(CheckList.FAIL,
CheckList.ENDC))
check_env_list = ['RIAK_PROTOCOL', 'RIAK_SERVER', 'RIAK_PORT', 'REDIS_SERVER',
'DEFAULT_BUCKET_TYPE', 'PYOKO_SETTINGS',
'MQ_HOST', 'MQ_PORT', 'MQ_USER', 'MQ_VHOST',
]
env = os.environ
for k, v in env.items():
if k in check_env_list:
print(__(u"{0}{1} : {2}{3}").format(CheckList.BOLD, k, v, CheckList.ENDC))
|
It brings the environment variables to the screen.
The user checks to see if they are using the correct variables.
|
entailment
|
def no_moves(position):
"""
Finds if the game is over.
:type: position: Board
:rtype: bool
"""
return position.no_moves(color.white) \
or position.no_moves(color.black)
|
Finds if the game is over.
:type: position: Board
:rtype: bool
|
entailment
|
def is_checkmate(position, input_color):
"""
Finds if particular King is checkmated.
:type: position: Board
:type: input_color: Color
:rtype: bool
"""
return position.no_moves(input_color) and \
position.get_king(input_color).in_check(position)
|
Finds if particular King is checkmated.
:type: position: Board
:type: input_color: Color
:rtype: bool
|
entailment
|
def _paginate(self, current_page, query_set, per_page=10):
"""
Handles pagination of object listings.
Args:
current_page int:
Current page number
query_set (:class:`QuerySet<pyoko:pyoko.db.queryset.QuerySet>`):
Object listing queryset.
per_page int:
Objects per page.
Returns:
QuerySet object, pagination data dict as a tuple
"""
total_objects = query_set.count()
total_pages = int(total_objects / per_page or 1)
# add orphans to last page
current_per_page = per_page + (
total_objects % per_page if current_page == total_pages else 0)
pagination_data = dict(page=current_page,
total_pages=total_pages,
total_objects=total_objects,
per_page=current_per_page)
query_set = query_set.set_params(rows=current_per_page, start=(current_page - 1) * per_page)
return query_set, pagination_data
|
Handles pagination of object listings.
Args:
current_page int:
Current page number
query_set (:class:`QuerySet<pyoko:pyoko.db.queryset.QuerySet>`):
Object listing queryset.
per_page int:
Objects per page.
Returns:
QuerySet object, pagination data dict as a tuple
|
entailment
|
def create_message(current):
"""
Creates a message for the given channel.
.. code-block:: python
# request:
{
'view':'_zops_create_message',
'message': {
'channel': key, # of channel
'body': string, # message text.,
'type': int, # zengine.messaging.model.MSG_TYPES,
'attachments': [{
'description': string, # can be blank,
'name': string, # file name with extension,
'content': string, # base64 encoded file content
}]}
# response:
{
'status': 'Created',
'code': 201,
'msg_key': key, # key of the message object,
}
"""
msg = current.input['message']
msg_obj = Channel.add_message(msg['channel'], body=msg['body'], typ=msg['type'],
sender=current.user,
title=msg['title'], receiver=msg['receiver'] or None)
current.output = {
'msg_key': msg_obj.key,
'status': 'Created',
'code': 201
}
if 'attachment' in msg:
for atch in msg['attachments']:
typ = current._dedect_file_type(atch['name'], atch['content'])
Attachment(channel_id=msg['channel'], msg=msg_obj, name=atch['name'],
file=atch['content'], description=atch['description'], typ=typ).save()
|
Creates a message for the given channel.
.. code-block:: python
# request:
{
'view':'_zops_create_message',
'message': {
'channel': key, # of channel
'body': string, # message text.,
'type': int, # zengine.messaging.model.MSG_TYPES,
'attachments': [{
'description': string, # can be blank,
'name': string, # file name with extension,
'content': string, # base64 encoded file content
}]}
# response:
{
'status': 'Created',
'code': 201,
'msg_key': key, # key of the message object,
}
|
entailment
|
def show_channel(current, waited=False):
"""
Initial display of channel content.
Returns channel description, members, no of members, last 20 messages etc.
.. code-block:: python
# request:
{
'view':'_zops_show_channel',
'key': key,
}
# response:
{
'channel_key': key,
'description': string,
'no_of_members': int,
'member_list': [
{'name': string,
'is_online': bool,
'avatar_url': string,
}],
'name': string,
'last_messages': [MSG_DICT]
'status': 'OK',
'code': 200
}
"""
ch = Channel(current).objects.get(current.input['key'])
sbs = ch.get_subscription_for_user(current.user_id)
current.output = {'key': current.input['key'],
'description': ch.description,
'name': sbs.name,
'actions': sbs.get_actions(),
'avatar_url': ch.get_avatar(current.user),
'no_of_members': len(ch.subscriber_set),
'member_list': [{'name': sb.user.full_name,
'is_online': sb.user.is_online(),
'avatar_url': sb.user.get_avatar_url()
} for sb in ch.subscriber_set.objects.all()],
'last_messages': [],
'status': 'OK',
'code': 200
}
for msg in ch.get_last_messages():
current.output['last_messages'].insert(0, msg.serialize(current.user))
|
Initial display of channel content.
Returns channel description, members, no of members, last 20 messages etc.
.. code-block:: python
# request:
{
'view':'_zops_show_channel',
'key': key,
}
# response:
{
'channel_key': key,
'description': string,
'no_of_members': int,
'member_list': [
{'name': string,
'is_online': bool,
'avatar_url': string,
}],
'name': string,
'last_messages': [MSG_DICT]
'status': 'OK',
'code': 200
}
|
entailment
|
def channel_history(current):
"""
Get old messages for a channel. 20 messages per request
.. code-block:: python
# request:
{
'view':'_zops_channel_history,
'channel_key': key,
'timestamp': datetime, # timestamp data of oldest shown message
}
# response:
{
'messages': [MSG_DICT, ],
'status': 'OK',
'code': 200
}
"""
current.output = {
'status': 'OK',
'code': 201,
'messages': []
}
for msg in list(Message.objects.filter(channel_id=current.input['channel_key'],
updated_at__lte=current.input['timestamp'])[:20]):
current.output['messages'].insert(0, msg.serialize(current.user))
# FIXME: looks like pyoko's __lt is broken
# TODO: convert lte to lt and remove this block, when __lt filter fixed
if current.output['messages']:
current.output['messages'].pop(-1)
|
Get old messages for a channel. 20 messages per request
.. code-block:: python
# request:
{
'view':'_zops_channel_history,
'channel_key': key,
'timestamp': datetime, # timestamp data of oldest shown message
}
# response:
{
'messages': [MSG_DICT, ],
'status': 'OK',
'code': 200
}
|
entailment
|
def report_last_seen_message(current):
"""
Push timestamp of latest message of an ACTIVE channel.
This view should be called with timestamp of latest message;
- When user opens (clicks on) a channel.
- Periodically (eg: setInterval for 15secs) while user staying in a channel.
.. code-block:: python
# request:
{
'view':'_zops_last_seen_msg',
'channel_key': key,
'key': key,
'timestamp': datetime,
}
# response:
{
'status': 'OK',
'code': 200,
}
"""
sbs = Subscriber(current).objects.filter(channel_id=current.input['channel_key'],
user_id=current.user_id)[0]
sbs.last_seen_msg_time = current.input['timestamp']
sbs.save()
current.output = {
'status': 'OK',
'code': 200}
|
Push timestamp of latest message of an ACTIVE channel.
This view should be called with timestamp of latest message;
- When user opens (clicks on) a channel.
- Periodically (eg: setInterval for 15secs) while user staying in a channel.
.. code-block:: python
# request:
{
'view':'_zops_last_seen_msg',
'channel_key': key,
'key': key,
'timestamp': datetime,
}
# response:
{
'status': 'OK',
'code': 200,
}
|
entailment
|
def list_channels(current):
"""
List channel memberships of current user
.. code-block:: python
# request:
{
'view':'_zops_list_channels',
}
# response:
{
'channels': [
{'name': string, # name of channel
'key': key, # key of channel
'unread': int, # unread message count
'type': int, # channel type,
# 15: public channels (chat room/broadcast channel distinction
comes from "read_only" flag)
# 10: direct channels
# 5: one and only private channel which is "Notifications"
'read_only': boolean,
# true if this is a read-only subscription to a broadcast channel
# false if it's a public chat room
'actions':[('action name', 'view name'),]
},]
}
"""
current.output = {
'status': 'OK',
'code': 200,
'channels': []}
for sbs in current.user.subscriptions.objects.filter(is_visible=True):
try:
current.output['channels'].append(sbs.get_channel_listing())
except ObjectDoesNotExist:
# FIXME: This should not happen,
log.exception("UNPAIRED DIRECT EXCHANGES!!!!")
sbs.delete()
|
List channel memberships of current user
.. code-block:: python
# request:
{
'view':'_zops_list_channels',
}
# response:
{
'channels': [
{'name': string, # name of channel
'key': key, # key of channel
'unread': int, # unread message count
'type': int, # channel type,
# 15: public channels (chat room/broadcast channel distinction
comes from "read_only" flag)
# 10: direct channels
# 5: one and only private channel which is "Notifications"
'read_only': boolean,
# true if this is a read-only subscription to a broadcast channel
# false if it's a public chat room
'actions':[('action name', 'view name'),]
},]
}
|
entailment
|
def unread_count(current):
"""
Number of unread messages for current user
.. code-block:: python
# request:
{
'view':'_zops_unread_count',
}
# response:
{
'status': 'OK',
'code': 200,
'notifications': int,
'messages': int,
}
"""
unread_ntf = 0
unread_msg = 0
for sbs in current.user.subscriptions.objects.filter(is_visible=True):
try:
if sbs.channel.key == current.user.prv_exchange:
unread_ntf += sbs.unread_count()
else:
unread_msg += sbs.unread_count()
except ObjectDoesNotExist:
# FIXME: This should not happen,
log.exception("MULTIPLE PRV EXCHANGES!!!!")
sbs.delete()
current.output = {
'status': 'OK',
'code': 200,
'notifications': unread_ntf,
'messages': unread_msg
}
|
Number of unread messages for current user
.. code-block:: python
# request:
{
'view':'_zops_unread_count',
}
# response:
{
'status': 'OK',
'code': 200,
'notifications': int,
'messages': int,
}
|
entailment
|
def get_notifications(current):
"""
Returns last N notifications for current user
.. code-block:: python
# request:
{
'view':'_zops_unread_messages',
'amount': int, # Optional, defaults to 8
}
# response:
{
'status': 'OK',
'code': 200,
'notifications': [{'title':string,
'body': string,
'channel_key': key,
'type': int,
'url': string, # could be a in app JS URL prefixed with "#" or
# full blown URL prefixed with "http"
'message_key': key,
'timestamp': datetime},],
}
"""
current.output = {
'status': 'OK',
'code': 200,
'notifications': [],
}
amount = current.input.get('amount', 8)
try:
notif_sbs = current.user.subscriptions.objects.get(channel_id=current.user.prv_exchange)
except MultipleObjectsReturned:
# FIXME: This should not happen,
log.exception("MULTIPLE PRV EXCHANGES!!!!")
sbs = current.user.subscriptions.objects.filter(channel_id=current.user.prv_exchange)
sbs[0].delete()
notif_sbs = sbs[1]
for msg in notif_sbs.channel.message_set.objects.all()[:amount]:
current.output['notifications'].insert(0, {
'title': msg.msg_title,
'body': msg.body,
'type': msg.typ,
'url': msg.url,
'channel_key': msg.channel.key,
'message_key': msg.key,
'timestamp': msg.updated_at})
|
Returns last N notifications for current user
.. code-block:: python
# request:
{
'view':'_zops_unread_messages',
'amount': int, # Optional, defaults to 8
}
# response:
{
'status': 'OK',
'code': 200,
'notifications': [{'title':string,
'body': string,
'channel_key': key,
'type': int,
'url': string, # could be a in app JS URL prefixed with "#" or
# full blown URL prefixed with "http"
'message_key': key,
'timestamp': datetime},],
}
|
entailment
|
def create_channel(current):
"""
Create a public channel. Can be a broadcast channel or normal chat room.
Chat room and broadcast distinction will be made at user subscription phase.
.. code-block:: python
# request:
{
'view':'_zops_create_channel',
'name': string,
'description': string,
}
# response:
{
'description': string,
'name': string,
'no_of_members': int,
'member_list': [
{'name': string,
'is_online': bool,
'avatar_url': string,
}],
'last_messages': [MSG_DICT]
'status': 'Created',
'code': 201,
'key': key, # of just created channel
}
"""
channel = Channel(name=current.input['name'],
description=current.input['description'],
owner=current.user,
typ=15).save()
with BlockSave(Subscriber):
Subscriber.objects.get_or_create(user=channel.owner,
channel=channel,
can_manage=True,
can_leave=False)
current.input['key'] = channel.key
show_channel(current)
current.output.update({
'status': 'Created',
'code': 201
})
|
Create a public channel. Can be a broadcast channel or normal chat room.
Chat room and broadcast distinction will be made at user subscription phase.
.. code-block:: python
# request:
{
'view':'_zops_create_channel',
'name': string,
'description': string,
}
# response:
{
'description': string,
'name': string,
'no_of_members': int,
'member_list': [
{'name': string,
'is_online': bool,
'avatar_url': string,
}],
'last_messages': [MSG_DICT]
'status': 'Created',
'code': 201,
'key': key, # of just created channel
}
|
entailment
|
def add_members(current):
"""
Subscribe member(s) to a channel
.. code-block:: python
# request:
{
'view':'_zops_add_members',
'channel_key': key,
'read_only': boolean, # true if this is a Broadcast channel,
# false if it's a normal chat room
'members': [key, key],
}
# response:
{
'existing': [key,], # existing members
'newly_added': [key,], # newly added members
'status': 'Created',
'code': 201
}
"""
newly_added, existing = [], []
read_only = current.input['read_only']
for member_key in current.input['members']:
sb, new = Subscriber(current).objects.get_or_create(user_id=member_key,
read_only=read_only,
channel_id=current.input['channel_key'])
if new:
newly_added.append(member_key)
else:
existing.append(member_key)
current.output = {
'existing': existing,
'newly_added': newly_added,
'status': 'OK',
'code': 201
}
|
Subscribe member(s) to a channel
.. code-block:: python
# request:
{
'view':'_zops_add_members',
'channel_key': key,
'read_only': boolean, # true if this is a Broadcast channel,
# false if it's a normal chat room
'members': [key, key],
}
# response:
{
'existing': [key,], # existing members
'newly_added': [key,], # newly added members
'status': 'Created',
'code': 201
}
|
entailment
|
def add_unit_to_channel(current):
"""
Subscribe users of a given unit to given channel
JSON API:
.. code-block:: python
# request:
{
'view':'_zops_add_unit_to_channel',
'unit_key': key,
'channel_key': key,
'read_only': boolean, # true if this is a Broadcast channel,
# false if it's a normal chat room
}
# response:
{
'existing': [key,], # existing members
'newly_added': [key,], # newly added members
'status': 'Created',
'code': 201
}
"""
read_only = current.input['read_only']
newly_added, existing = [], []
for member_key in UnitModel.get_user_keys(current, current.input['unit_key']):
sb, new = Subscriber(current).objects.get_or_create(user_id=member_key,
read_only=read_only,
channel_id=current.input['channel_key'])
if new:
newly_added.append(member_key)
else:
existing.append(member_key)
current.output = {
'existing': existing,
'newly_added': newly_added,
'status': 'OK',
'code': 201
}
|
Subscribe users of a given unit to given channel
JSON API:
.. code-block:: python
# request:
{
'view':'_zops_add_unit_to_channel',
'unit_key': key,
'channel_key': key,
'read_only': boolean, # true if this is a Broadcast channel,
# false if it's a normal chat room
}
# response:
{
'existing': [key,], # existing members
'newly_added': [key,], # newly added members
'status': 'Created',
'code': 201
}
|
entailment
|
def search_user(current):
"""
Search users for adding to a public room
or creating one to one direct messaging
.. code-block:: python
# request:
{
'view':'_zops_search_user',
'query': string,
}
# response:
{
'results': [('full_name', 'key', 'avatar_url'), ],
'status': 'OK',
'code': 200
}
"""
current.output = {
'results': [],
'status': 'OK',
'code': 201
}
qs = UserModel(current).objects.exclude(key=current.user_id).search_on(
*settings.MESSAGING_USER_SEARCH_FIELDS,
contains=current.input['query'])
# FIXME: somehow exclude(key=current.user_id) not working with search_on()
for user in qs:
if user.key != current.user_id:
current.output['results'].append((user.full_name, user.key, user.get_avatar_url()))
|
Search users for adding to a public room
or creating one to one direct messaging
.. code-block:: python
# request:
{
'view':'_zops_search_user',
'query': string,
}
# response:
{
'results': [('full_name', 'key', 'avatar_url'), ],
'status': 'OK',
'code': 200
}
|
entailment
|
def search_unit(current):
"""
Search on units for subscribing it's users to a channel
.. code-block:: python
# request:
{
'view':'_zops_search_unit',
'query': string,
}
# response:
{
'results': [('name', 'key'), ],
'status': 'OK',
'code': 200
}
"""
current.output = {
'results': [],
'status': 'OK',
'code': 201
}
for user in UnitModel(current).objects.search_on(*settings.MESSAGING_UNIT_SEARCH_FIELDS,
contains=current.input['query']):
current.output['results'].append((user.name, user.key))
|
Search on units for subscribing it's users to a channel
.. code-block:: python
# request:
{
'view':'_zops_search_unit',
'query': string,
}
# response:
{
'results': [('name', 'key'), ],
'status': 'OK',
'code': 200
}
|
entailment
|
def create_direct_channel(current):
"""
Create a One-To-One channel between current and selected user.
.. code-block:: python
# request:
{
'view':'_zops_create_direct_channel',
'user_key': key,
}
# response:
{
'description': string,
'no_of_members': int,
'member_list': [
{'name': string,
'is_online': bool,
'avatar_url': string,
}],
'last_messages': [MSG_DICT]
'status': 'Created',
'code': 201,
'channel_key': key, # of just created channel
'name': string, # name of subscribed channel
}
"""
channel, sub_name = Channel.get_or_create_direct_channel(current.user_id,
current.input['user_key'])
current.input['key'] = channel.key
show_channel(current)
current.output.update({
'status': 'Created',
'code': 201
})
|
Create a One-To-One channel between current and selected user.
.. code-block:: python
# request:
{
'view':'_zops_create_direct_channel',
'user_key': key,
}
# response:
{
'description': string,
'no_of_members': int,
'member_list': [
{'name': string,
'is_online': bool,
'avatar_url': string,
}],
'last_messages': [MSG_DICT]
'status': 'Created',
'code': 201,
'channel_key': key, # of just created channel
'name': string, # name of subscribed channel
}
|
entailment
|
def find_message(current):
"""
Search in messages. If "channel_key" given, search will be limited to that channel,
otherwise search will be performed on all of user's subscribed channels.
.. code-block:: python
# request:
{
'view':'_zops_search_unit,
'channel_key': key,
'query': string,
'page': int,
}
# response:
{
'results': [MSG_DICT, ],
'pagination': {
'page': int, # current page
'total_pages': int,
'total_objects': int,
'per_page': int, # object per page
},
'status': 'OK',
'code': 200
}
"""
current.output = {
'results': [],
'status': 'OK',
'code': 201
}
query_set = Message(current).objects.search_on(['msg_title', 'body', 'url'],
contains=current.input['query'])
if current.input['channel_key']:
query_set = query_set.filter(channel_id=current.input['channel_key'])
else:
subscribed_channels = Subscriber.objects.filter(user_id=current.user_id).values_list(
"channel_id", flatten=True)
query_set = query_set.filter(channel_id__in=subscribed_channels)
query_set, pagination_data = _paginate(current_page=current.input['page'], query_set=query_set)
current.output['pagination'] = pagination_data
for msg in query_set:
current.output['results'].append(msg.serialize(current.user))
|
Search in messages. If "channel_key" given, search will be limited to that channel,
otherwise search will be performed on all of user's subscribed channels.
.. code-block:: python
# request:
{
'view':'_zops_search_unit,
'channel_key': key,
'query': string,
'page': int,
}
# response:
{
'results': [MSG_DICT, ],
'pagination': {
'page': int, # current page
'total_pages': int,
'total_objects': int,
'per_page': int, # object per page
},
'status': 'OK',
'code': 200
}
|
entailment
|
def delete_channel(current):
"""
Delete a channel
.. code-block:: python
# request:
{
'view':'_zops_delete_channel,
'channel_key': key,
}
# response:
{
'status': 'OK',
'code': 200
}
"""
ch_key = current.input['channel_key']
ch = Channel(current).objects.get(owner_id=current.user_id, key=ch_key)
ch.delete()
Subscriber.objects.filter(channel_id=ch_key).delete()
Message.objects.filter(channel_id=ch_key).delete()
current.output = {'status': 'Deleted', 'code': 200}
|
Delete a channel
.. code-block:: python
# request:
{
'view':'_zops_delete_channel,
'channel_key': key,
}
# response:
{
'status': 'OK',
'code': 200
}
|
entailment
|
def edit_channel(current):
"""
Update channel name or description
.. code-block:: python
# request:
{
'view':'_zops_edit_channel,
'channel_key': key,
'name': string,
'description': string,
}
# response:
{
'status': 'OK',
'code': 200
}
"""
ch = Channel(current).objects.get(owner_id=current.user_id,
key=current.input['channel_key'])
ch.name = current.input['name']
ch.description = current.input['description']
ch.save()
for sbs in ch.subscriber_set.objects.all():
sbs.name = ch.name
sbs.save()
current.output = {'status': 'OK', 'code': 200}
|
Update channel name or description
.. code-block:: python
# request:
{
'view':'_zops_edit_channel,
'channel_key': key,
'name': string,
'description': string,
}
# response:
{
'status': 'OK',
'code': 200
}
|
entailment
|
def pin_channel(current):
"""
Pin a channel to top of channel list
.. code-block:: python
# request:
{
'view':'_zops_pin_channel,
'channel_key': key,
}
# response:
{
'status': 'OK',
'code': 200
}
"""
try:
Subscriber(current).objects.filter(user_id=current.user_id,
channel_id=current.input['channel_key']).update(
pinned=True)
current.output = {'status': 'OK', 'code': 200}
except ObjectDoesNotExist:
raise HTTPError(404, "")
|
Pin a channel to top of channel list
.. code-block:: python
# request:
{
'view':'_zops_pin_channel,
'channel_key': key,
}
# response:
{
'status': 'OK',
'code': 200
}
|
entailment
|
def delete_message(current):
"""
Delete a message
.. code-block:: python
# request:
{
'view':'_zops_delete_message,
'message_key': key,
}
# response:
{
'key': key,
'status': 'OK',
'code': 200
}
"""
try:
Message(current).objects.get(sender_id=current.user_id,
key=current.input['key']).delete()
current.output = {'status': 'Deleted', 'code': 200, 'key': current.input['key']}
except ObjectDoesNotExist:
raise HTTPError(404, "")
|
Delete a message
.. code-block:: python
# request:
{
'view':'_zops_delete_message,
'message_key': key,
}
# response:
{
'key': key,
'status': 'OK',
'code': 200
}
|
entailment
|
def edit_message(current):
"""
Edit a message a user own.
.. code-block:: python
# request:
{
'view':'_zops_edit_message',
'message': {
'body': string, # message text
'key': key
}
}
# response:
{
'status': string, # 'OK' for success
'code': int, # 200 for success
}
"""
current.output = {'status': 'OK', 'code': 200}
in_msg = current.input['message']
try:
msg = Message(current).objects.get(sender_id=current.user_id, key=in_msg['key'])
msg.body = in_msg['body']
msg.save()
except ObjectDoesNotExist:
raise HTTPError(404, "")
|
Edit a message a user own.
.. code-block:: python
# request:
{
'view':'_zops_edit_message',
'message': {
'body': string, # message text
'key': key
}
}
# response:
{
'status': string, # 'OK' for success
'code': int, # 200 for success
}
|
entailment
|
def flag_message(current):
"""
Flag inappropriate messages
.. code-block:: python
# request:
{
'view':'_zops_flag_message',
'message_key': key,
}
# response:
{
'
'status': 'Created',
'code': 201,
}
"""
current.output = {'status': 'Created', 'code': 201}
FlaggedMessage.objects.get_or_create(user_id=current.user_id,
message_id=current.input['key'])
|
Flag inappropriate messages
.. code-block:: python
# request:
{
'view':'_zops_flag_message',
'message_key': key,
}
# response:
{
'
'status': 'Created',
'code': 201,
}
|
entailment
|
def unflag_message(current):
"""
remove flag of a message
.. code-block:: python
# request:
{
'view':'_zops_flag_message',
'key': key,
}
# response:
{
'
'status': 'OK',
'code': 200,
}
"""
current.output = {'status': 'OK', 'code': 200}
FlaggedMessage(current).objects.filter(user_id=current.user_id,
message_id=current.input['key']).delete()
|
remove flag of a message
.. code-block:: python
# request:
{
'view':'_zops_flag_message',
'key': key,
}
# response:
{
'
'status': 'OK',
'code': 200,
}
|
entailment
|
def get_message_actions(current):
"""
Returns applicable actions for current user for given message key
.. code-block:: python
# request:
{
'view':'_zops_get_message_actions',
'key': key,
}
# response:
{
'actions':[('name_string', 'cmd_string'),]
'status': string, # 'OK' for success
'code': int, # 200 for success
}
"""
current.output = {'status': 'OK',
'code': 200,
'actions': Message.objects.get(
current.input['key']).get_actions_for(current.user)}
|
Returns applicable actions for current user for given message key
.. code-block:: python
# request:
{
'view':'_zops_get_message_actions',
'key': key,
}
# response:
{
'actions':[('name_string', 'cmd_string'),]
'status': string, # 'OK' for success
'code': int, # 200 for success
}
|
entailment
|
def add_to_favorites(current):
"""
Favorite a message
.. code-block:: python
# request:
{
'view':'_zops_add_to_favorites,
'key': key,
}
# response:
{
'status': 'Created',
'code': 201
'favorite_key': key
}
"""
msg = Message.objects.get(current.input['key'])
current.output = {'status': 'Created', 'code': 201}
fav, new = Favorite.objects.get_or_create(user_id=current.user_id, message=msg)
current.output['favorite_key'] = fav.key
|
Favorite a message
.. code-block:: python
# request:
{
'view':'_zops_add_to_favorites,
'key': key,
}
# response:
{
'status': 'Created',
'code': 201
'favorite_key': key
}
|
entailment
|
def remove_from_favorites(current):
"""
Remove a message from favorites
.. code-block:: python
# request:
{
'view':'_zops_remove_from_favorites,
'key': key,
}
# response:
{
'status': 'OK',
'code': 200
}
"""
try:
current.output = {'status': 'OK', 'code': 200}
Favorite(current).objects.get(user_id=current.user_id,
key=current.input['key']).delete()
except ObjectDoesNotExist:
raise HTTPError(404, "")
|
Remove a message from favorites
.. code-block:: python
# request:
{
'view':'_zops_remove_from_favorites,
'key': key,
}
# response:
{
'status': 'OK',
'code': 200
}
|
entailment
|
def list_favorites(current):
"""
List user's favorites. If "channel_key" given, will return favorites belong to that channel.
.. code-block:: python
# request:
{
'view':'_zops_list_favorites,
'channel_key': key,
}
# response:
{
'status': 'OK',
'code': 200
'favorites':[{'key': key,
'channel_key': key,
'message_key': key,
'message_summary': string, # max 60 char
'channel_name': string,
},]
}
"""
current.output = {'status': 'OK', 'code': 200, 'favorites': []}
query_set = Favorite(current).objects.filter(user_id=current.user_id)
if current.input['channel_key']:
query_set = query_set.filter(channel_id=current.input['channel_key'])
current.output['favorites'] = [{
'key': fav.key,
'channel_key': fav.channel.key,
'message_key': fav.message.key,
'message_summary': fav.summary,
'channel_name': fav.channel_name
} for fav in query_set]
|
List user's favorites. If "channel_key" given, will return favorites belong to that channel.
.. code-block:: python
# request:
{
'view':'_zops_list_favorites,
'channel_key': key,
}
# response:
{
'status': 'OK',
'code': 200
'favorites':[{'key': key,
'channel_key': key,
'message_key': key,
'message_summary': string, # max 60 char
'channel_name': string,
},]
}
|
entailment
|
def get_or_create_direct_channel(cls, initiator_key, receiver_key):
"""
Creates a direct messaging channel between two user
Args:
initiator: User, who want's to make first contact
receiver: User, other party
Returns:
(Channel, receiver_name)
"""
existing = cls.objects.OR().filter(
code_name='%s_%s' % (initiator_key, receiver_key)).filter(
code_name='%s_%s' % (receiver_key, initiator_key))
receiver_name = UserModel.objects.get(receiver_key).full_name
if existing:
channel = existing[0]
else:
channel_name = '%s_%s' % (initiator_key, receiver_key)
channel = cls(is_direct=True, code_name=channel_name, typ=10).blocking_save()
with BlockSave(Subscriber):
Subscriber.objects.get_or_create(channel=channel,
user_id=initiator_key,
name=receiver_name)
Subscriber.objects.get_or_create(channel=channel,
user_id=receiver_key,
name=UserModel.objects.get(initiator_key).full_name)
return channel, receiver_name
|
Creates a direct messaging channel between two user
Args:
initiator: User, who want's to make first contact
receiver: User, other party
Returns:
(Channel, receiver_name)
|
entailment
|
def create_exchange(self):
"""
Creates MQ exchange for this channel
Needs to be defined only once.
"""
mq_channel = self._connect_mq()
mq_channel.exchange_declare(exchange=self.code_name,
exchange_type='fanout',
durable=True)
|
Creates MQ exchange for this channel
Needs to be defined only once.
|
entailment
|
def delete_exchange(self):
"""
Deletes MQ exchange for this channel
Needs to be defined only once.
"""
mq_channel = self._connect_mq()
mq_channel.exchange_delete(exchange=self.code_name)
|
Deletes MQ exchange for this channel
Needs to be defined only once.
|
entailment
|
def get_channel_listing(self):
"""
serialized form for channel listing
"""
return {'name': self.name,
'key': self.channel.key,
'type': self.channel.typ,
'read_only': self.read_only,
'is_online': self.is_online(),
'actions': self.get_actions(),
'unread': self.unread_count()}
|
serialized form for channel listing
|
entailment
|
def create_exchange(self):
"""
Creates user's private exchange
Actually user's private channel needed to be defined only once,
and this should be happened when user first created.
But since this has a little performance cost,
to be safe we always call it before binding to the channel we currently subscribe
"""
channel = self._connect_mq()
channel.exchange_declare(exchange=self.user.prv_exchange,
exchange_type='fanout',
durable=True)
|
Creates user's private exchange
Actually user's private channel needed to be defined only once,
and this should be happened when user first created.
But since this has a little performance cost,
to be safe we always call it before binding to the channel we currently subscribe
|
entailment
|
def bind_to_channel(self):
"""
Binds (subscribes) users private exchange to channel exchange
Automatically called at creation of subscription record.
"""
if self.channel.code_name != self.user.prv_exchange:
channel = self._connect_mq()
channel.exchange_bind(source=self.channel.code_name, destination=self.user.prv_exchange)
|
Binds (subscribes) users private exchange to channel exchange
Automatically called at creation of subscription record.
|
entailment
|
def serialize(self, user=None):
"""
Serializes message for given user.
Note:
Should be called before first save(). Otherwise "is_update" will get wrong value.
Args:
user: User object
Returns:
Dict. JSON serialization ready dictionary object
"""
return {
'content': self.body,
'type': self.typ,
'updated_at': self.updated_at,
'timestamp': self.updated_at,
'is_update': not hasattr(self, 'unsaved'),
'attachments': [attachment.serialize() for attachment in self.attachment_set],
'title': self.msg_title,
'url': self.url,
'sender_name': self.sender.full_name,
'sender_key': self.sender.key,
'channel_key': self.channel.key,
'cmd': 'message',
'avatar_url': self.sender.avatar,
'key': self.key,
}
|
Serializes message for given user.
Note:
Should be called before first save(). Otherwise "is_update" will get wrong value.
Args:
user: User object
Returns:
Dict. JSON serialization ready dictionary object
|
entailment
|
def _republish(self):
"""
Re-publishes updated message
"""
mq_channel = self.channel._connect_mq()
mq_channel.basic_publish(exchange=self.channel.key, routing_key='',
body=json.dumps(self.serialize()))
|
Re-publishes updated message
|
entailment
|
def defaultCrawlId():
"""
Provide a reasonable default crawl name using the user name and date
"""
timestamp = datetime.now().isoformat().replace(':', '_')
user = getuser()
return '_'.join(('crawl', user, timestamp))
|
Provide a reasonable default crawl name using the user name and date
|
entailment
|
def main(argv=None):
"""Run Nutch command using REST API."""
global Verbose, Mock
if argv is None:
argv = sys.argv
if len(argv) < 5: die('Bad args')
try:
opts, argv = getopt.getopt(argv[1:], 'hs:p:mv',
['help', 'server=', 'port=', 'mock', 'verbose'])
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
die()
serverEndpoint = DefaultServerEndpoint
# TODO: Fix this
for opt, val in opts:
if opt in ('-h', '--help'): echo2(USAGE); sys.exit()
elif opt in ('-s', '--server'): serverEndpoint = val
elif opt in ('-p', '--port'): serverEndpoint = 'http://localhost:%s' % val
elif opt in ('-m', '--mock'): Mock = 1
elif opt in ('-v', '--verbose'): Verbose = 1
else: die(USAGE)
cmd = argv[0]
crawlId = argv[1]
confId = argv[2]
urlDir = argv[3]
args = {}
if len(argv) > 4: args = eval(argv[4])
nt = Nutch(crawlId, confId, serverEndpoint, urlDir)
nt.Jobs().create(cmd, **args)
|
Run Nutch command using REST API.
|
entailment
|
def call(self, verb, servicePath, data=None, headers=None, forceText=False, sendJson=True):
"""Call the Nutch Server, do some error checking, and return the response.
:param verb: One of nutch.RequestVerbs
:param servicePath: path component of URL to append to endpoint, e.g. '/config'
:param data: Data to attach to this request
:param headers: headers to attach to this request, default are JsonAcceptHeader
:param forceText: don't trust the response headers and just get the text
:param sendJson: Whether to treat attached data as JSON or not
"""
default_data = {} if sendJson else ""
data = data if data else default_data
headers = headers if headers else JsonAcceptHeader.copy()
if not sendJson:
headers.update(TextSendHeader)
if verb not in RequestVerbs:
die('Server call verb must be one of %s' % str(RequestVerbs.keys()))
if Verbose:
echo2("%s Endpoint:" % verb.upper(), servicePath)
echo2("%s Request data:" % verb.upper(), data)
echo2("%s Request headers:" % verb.upper(), headers)
verbFn = RequestVerbs[verb]
if sendJson:
resp = verbFn(self.serverEndpoint + servicePath, json=data, headers=headers)
else:
resp = verbFn(self.serverEndpoint + servicePath, data=data, headers=headers)
if Verbose:
echo2("Response headers:", resp.headers)
echo2("Response status:", resp.status_code)
if resp.status_code != 200:
if self.raiseErrors:
error = NutchException("Unexpected server response: %d" % resp.status_code)
error.status_code = resp.status_code
raise error
else:
warn('Nutch server returned status:', resp.status_code)
if forceText or 'content-type' not in resp.headers or resp.headers['content-type'] == 'text/plain':
if Verbose:
echo2("Response text:", resp.text)
return resp.text
content_type = resp.headers['content-type']
if content_type == 'application/json' and not forceText:
if Verbose:
echo2("Response JSON:", resp.json())
return resp.json()
else:
die('Did not understand server response: %s' % resp.headers)
|
Call the Nutch Server, do some error checking, and return the response.
:param verb: One of nutch.RequestVerbs
:param servicePath: path component of URL to append to endpoint, e.g. '/config'
:param data: Data to attach to this request
:param headers: headers to attach to this request, default are JsonAcceptHeader
:param forceText: don't trust the response headers and just get the text
:param sendJson: Whether to treat attached data as JSON or not
|
entailment
|
def create(self, cid, configData):
"""
Create a new named (cid) configuration from a parameter dictionary (config_data).
"""
configArgs = {'configId': cid, 'params': configData, 'force': True}
cid = self.server.call('post', "/config/create", configArgs, forceText=True, headers=TextAcceptHeader)
new_config = Config(cid, self.server)
return new_config
|
Create a new named (cid) configuration from a parameter dictionary (config_data).
|
entailment
|
def list(self, allJobs=False):
"""
Return list of jobs at this endpoint.
Call get(allJobs=True) to see all jobs, not just the ones managed by this Client
"""
jobs = self.server.call('get', '/job')
return [Job(job['id'], self.server) for job in jobs if allJobs or self._job_owned(job)]
|
Return list of jobs at this endpoint.
Call get(allJobs=True) to see all jobs, not just the ones managed by this Client
|
entailment
|
def create(self, command, **args):
"""
Create a job given a command
:param command: Nutch command, one of nutch.LegalJobs
:param args: Additional arguments to pass to the job
:return: The created Job
"""
command = command.upper()
if command not in LegalJobs:
warn('Nutch command must be one of: %s' % ', '.join(LegalJobs))
else:
echo2('Starting %s job with args %s' % (command, str(args)))
parameters = self.parameters.copy()
parameters['type'] = command
parameters['crawlId'] = self.crawlId
parameters['confId'] = self.confId
parameters['args'].update(args)
job_info = self.server.call('post', "/job/create", parameters, JsonAcceptHeader)
job = Job(job_info['id'], self.server)
return job
|
Create a job given a command
:param command: Nutch command, one of nutch.LegalJobs
:param args: Additional arguments to pass to the job
:return: The created Job
|
entailment
|
def inject(self, seed=None, urlDir=None, **args):
"""
:param seed: A Seed object (this or urlDir must be specified)
:param urlDir: The directory on the server containing the seed list (this or urlDir must be specified)
:param args: Extra arguments for the job
:return: a created Job object
"""
if seed:
if urlDir and urlDir != seed.seedPath:
raise NutchException("Can't specify both seed and urlDir")
urlDir = seed.seedPath
elif urlDir:
pass
else:
raise NutchException("Must specify seed or urlDir")
args['url_dir'] = urlDir
return self.create('INJECT', **args)
|
:param seed: A Seed object (this or urlDir must be specified)
:param urlDir: The directory on the server containing the seed list (this or urlDir must be specified)
:param args: Extra arguments for the job
:return: a created Job object
|
entailment
|
def create(self, sid, seedList):
"""
Create a new named (sid) Seed from a list of seed URLs
:param sid: the name to assign to the new seed list
:param seedList: the list of seeds to use
:return: the created Seed object
"""
seedUrl = lambda uid, url: {"id": uid, "url": url}
if not isinstance(seedList,tuple):
seedList = (seedList,)
seedListData = {
"id": "12345",
"name": sid,
"seedUrls": [seedUrl(uid, url) for uid, url in enumerate(seedList)]
}
# As per resolution of https://issues.apache.org/jira/browse/NUTCH-2123
seedPath = self.server.call('post', "/seed/create", seedListData, TextAcceptHeader)
new_seed = Seed(sid, seedPath, self.server)
return new_seed
|
Create a new named (sid) Seed from a list of seed URLs
:param sid: the name to assign to the new seed list
:param seedList: the list of seeds to use
:return: the created Seed object
|
entailment
|
def createFromFile(self, sid, filename):
"""
Create a new named (sid) Seed from a file containing URLs
It's assumed URLs are whitespace seperated.
:param sid: the name to assign to the new seed list
:param filename: the name of the file that contains URLs
:return: the created Seed object
"""
urls = []
with open(filename) as f:
for line in f:
for url in line.split():
urls.append(url)
return self.create(sid, tuple(urls))
|
Create a new named (sid) Seed from a file containing URLs
It's assumed URLs are whitespace seperated.
:param sid: the name to assign to the new seed list
:param filename: the name of the file that contains URLs
:return: the created Seed object
|
entailment
|
def _nextJob(self, job, nextRound=True):
"""
Given a completed job, start the next job in the round, or return None
:param nextRound: whether to start jobs from the next round if the current round is completed.
:return: the newly started Job, or None if no job was started
"""
jobInfo = job.info()
assert jobInfo['state'] == 'FINISHED'
roundEnd = False
if jobInfo['type'] == 'INJECT':
nextCommand = 'GENERATE'
elif jobInfo['type'] == 'GENERATE':
nextCommand = 'FETCH'
elif jobInfo['type'] == 'FETCH':
nextCommand = 'PARSE'
elif jobInfo['type'] == 'PARSE':
nextCommand = 'UPDATEDB'
elif jobInfo['type'] == 'UPDATEDB':
nextCommand = 'INVERTLINKS'
elif jobInfo['type'] == 'INVERTLINKS':
nextCommand = 'DEDUP'
elif jobInfo['type'] == 'DEDUP':
if self.enable_index:
nextCommand = 'INDEX'
else:
roundEnd = True
elif jobInfo['type'] == 'INDEX':
roundEnd = True
else:
raise NutchException("Unrecognized job type {}".format(jobInfo['type']))
if roundEnd:
if nextRound and self.currentRound < self.totalRounds:
nextCommand = 'GENERATE'
self.currentRound += 1
else:
return None
return self.jobClient.create(nextCommand)
|
Given a completed job, start the next job in the round, or return None
:param nextRound: whether to start jobs from the next round if the current round is completed.
:return: the newly started Job, or None if no job was started
|
entailment
|
def progress(self, nextRound=True):
"""
Check the status of the current job, activate the next job if it's finished, and return the active job
If the current job has failed, a NutchCrawlException will be raised with no jobs attached.
:param nextRound: whether to start jobs from the next round if the current job/round is completed.
:return: the currently running Job, or None if no jobs are running.
"""
currentJob = self.currentJob
if currentJob is None:
return currentJob
jobInfo = currentJob.info()
if jobInfo['state'] == 'RUNNING':
return currentJob
elif jobInfo['state'] == 'FINISHED':
nextJob = self._nextJob(currentJob, nextRound)
self.currentJob = nextJob
return nextJob
else:
error = NutchCrawlException("Unexpected job state: {}".format(jobInfo['state']))
error.current_job = currentJob
raise NutchCrawlException
|
Check the status of the current job, activate the next job if it's finished, and return the active job
If the current job has failed, a NutchCrawlException will be raised with no jobs attached.
:param nextRound: whether to start jobs from the next round if the current job/round is completed.
:return: the currently running Job, or None if no jobs are running.
|
entailment
|
def nextRound(self):
"""
Execute all jobs in the current round and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs from this round attached
to the exception.
:return: a list of all completed Jobs
"""
finishedJobs = []
if self.currentJob is None:
self.currentJob = self.jobClient.create('GENERATE')
activeJob = self.progress(nextRound=False)
while activeJob:
oldJob = activeJob
activeJob = self.progress(nextRound=False) # updates self.currentJob
if oldJob and oldJob != activeJob:
finishedJobs.append(oldJob)
sleep(self.sleepTime)
self.currentRound += 1
return finishedJobs
|
Execute all jobs in the current round and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs from this round attached
to the exception.
:return: a list of all completed Jobs
|
entailment
|
def waitAll(self):
"""
Execute all queued rounds and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs attached
to the exception
:return: a list of jobs completed for each round, organized by round (list-of-lists)
"""
finishedRounds = [self.nextRound()]
while self.currentRound < self.totalRounds:
finishedRounds.append(self.nextRound())
return finishedRounds
|
Execute all queued rounds and return when they have finished.
If a job fails, a NutchCrawlException will be raised, with all completed jobs attached
to the exception
:return: a list of jobs completed for each round, organized by round (list-of-lists)
|
entailment
|
def Jobs(self, crawlId=None):
"""
Create a JobClient for listing and creating jobs.
The JobClient inherits the confId from the Nutch client.
:param crawlId: crawlIds to use for this client. If not provided, will be generated
by nutch.defaultCrawlId()
:return: a JobClient
"""
crawlId = crawlId if crawlId else defaultCrawlId()
return JobClient(self.server, crawlId, self.confId)
|
Create a JobClient for listing and creating jobs.
The JobClient inherits the confId from the Nutch client.
:param crawlId: crawlIds to use for this client. If not provided, will be generated
by nutch.defaultCrawlId()
:return: a JobClient
|
entailment
|
def Crawl(self, seed, seedClient=None, jobClient=None, rounds=1, index=True):
"""
Launch a crawl using the given seed
:param seed: Type (Seed or SeedList) - used for crawl
:param seedClient: if a SeedList is given, the SeedClient to upload, if None a default will be created
:param jobClient: the JobClient to be used, if None a default will be created
:param rounds: the number of rounds in the crawl
:return: a CrawlClient to monitor and control the crawl
"""
if seedClient is None:
seedClient = self.Seeds()
if jobClient is None:
jobClient = self.Jobs()
if type(seed) != Seed:
seed = seedClient.create(jobClient.crawlId + '_seeds', seed)
return CrawlClient(self.server, seed, jobClient, rounds, index)
|
Launch a crawl using the given seed
:param seed: Type (Seed or SeedList) - used for crawl
:param seedClient: if a SeedList is given, the SeedClient to upload, if None a default will be created
:param jobClient: the JobClient to be used, if None a default will be created
:param rounds: the number of rounds in the crawl
:return: a CrawlClient to monitor and control the crawl
|
entailment
|
def haproxy(line):
#TODO Handle all message formats
'''
>>> import pprint
>>> input_line1 = 'Apr 24 00:00:02 node haproxy[12298]: 1.1.1.1:48660 [24/Apr/2019:00:00:02.358] pre-staging~ pre-staging_doc/pre-staging_active 261/0/2/8/271 200 2406 - - ---- 4/4/0/1/0 0/0 {AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018} "GET /doc/api/get?call=apple HTTP/1.1"'
>>> output_line1 = haproxy(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'Tc': 2.0,
'Tq': 261.0,
'Tr': 8.0,
'Tw': 0.0,
'_api': '/doc/api/get?call=apple',
'_headers': ['AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018'],
'actconn': 4,
'backend': 'pre-staging_doc/pre-staging_active',
'backend_queue': 0,
'beconn': 1,
'bytes_read': 2406.0,
'client_port': '48660',
'client_server': '1.1.1.1',
'feconn': 4,
'front_end': 'pre-staging~',
'haproxy_server': 'node',
'method': 'GET',
'resp_time': 271.0,
'retries': 0,
'srv_conn': 0,
'srv_queue': 0,
'status': '200',
'timestamp': '2019-04-24T00:00:02.358000'},
'event': 'haproxy_event',
'timestamp': '2019-04-24T00:00:02.358000',
'type': 'metric'}
'''
_line = line.strip().split()
log = {}
log['client_server'] = _line[5].split(':')[0].strip()
log['client_port'] = _line[5].split(':')[1].strip()
_timestamp = re.findall(r'\[(.*?)\]', _line[6])[0]
log['timestamp'] = datetime.datetime.strptime(_timestamp, '%d/%b/%Y:%H:%M:%S.%f').isoformat()
log['front_end'] = _line[7].strip()
log['backend'] = _line[8].strip()
log['Tq'] = float(_line[9].split('/')[0].strip())
log['Tw'] = float(_line[9].split('/')[1].strip())
log['Tc'] = float(_line[9].split('/')[2].strip())
log['Tr'] = float(_line[9].split('/')[3].strip())
log['resp_time'] = float(_line[9].split('/')[-1].strip())
log['status'] = _line[10].strip()
log['bytes_read'] = float(_line[11].strip())
log['_headers'] = re.findall(r'{(.*)}', line)
log['haproxy_server'] = _line[3].strip()
log['method'] = _line[-3].strip('"').strip()
log['_api'] = _line[-2].strip()
log['retries'] = int(_line[15].split('/')[-1].strip())
log['actconn'] = int(_line[15].split('/')[0].strip())
log['feconn'] = int(_line[15].split('/')[1].strip())
log['beconn'] = int(_line[15].split('/')[-2].strip())
log['srv_conn'] = int(_line[15].split('/')[-3].strip())
log['srv_queue'] = int(_line[16].split('/')[0].strip())
log['backend_queue'] = int(_line[16].split('/')[1].strip())
return dict(
data=log,
event='haproxy_event',
timestamp=log.get('timestamp'),
type='metric'
)
|
>>> import pprint
>>> input_line1 = 'Apr 24 00:00:02 node haproxy[12298]: 1.1.1.1:48660 [24/Apr/2019:00:00:02.358] pre-staging~ pre-staging_doc/pre-staging_active 261/0/2/8/271 200 2406 - - ---- 4/4/0/1/0 0/0 {AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018} "GET /doc/api/get?call=apple HTTP/1.1"'
>>> output_line1 = haproxy(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'Tc': 2.0,
'Tq': 261.0,
'Tr': 8.0,
'Tw': 0.0,
'_api': '/doc/api/get?call=apple',
'_headers': ['AAAAAA:AAAAA_AAAAA:AAAAA_AAAAA_AAAAA:300A||| user@mail.net:sdasdasdasdsdasAHDivsjd=|user@mail.net|2018'],
'actconn': 4,
'backend': 'pre-staging_doc/pre-staging_active',
'backend_queue': 0,
'beconn': 1,
'bytes_read': 2406.0,
'client_port': '48660',
'client_server': '1.1.1.1',
'feconn': 4,
'front_end': 'pre-staging~',
'haproxy_server': 'node',
'method': 'GET',
'resp_time': 271.0,
'retries': 0,
'srv_conn': 0,
'srv_queue': 0,
'status': '200',
'timestamp': '2019-04-24T00:00:02.358000'},
'event': 'haproxy_event',
'timestamp': '2019-04-24T00:00:02.358000',
'type': 'metric'}
|
entailment
|
def nginx_access(line):
'''
>>> import pprint
>>> input_line1 = '{ \
"remote_addr": "127.0.0.1","remote_user": "-","timestamp": "1515144699.201", \
"request": "GET / HTTP/1.1","status": "200","request_time": "0.000", \
"body_bytes_sent": "396","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line1 = nginx_access(input_line1)
>>> pprint.pprint(output_line1)
{'data': {u'body_bytes_sent': 396.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'127.0.0.1',
u'remote_user': u'-',
u'request': u'GET / HTTP/1.1',
u'request_time': 0.0,
u'status': u'200',
u'timestamp': '2018-01-05T09:31:39.201000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:31:39.201000',
'type': 'metric'}
>>> input_line2 = '{ \
"remote_addr": "192.158.0.51","remote_user": "-","timestamp": "1515143686.415", \
"request": "POST /mpub?topic=heartbeat HTTP/1.1","status": "404","request_time": "0.000", \
"body_bytes_sent": "152","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line2 = nginx_access(input_line2)
>>> pprint.pprint(output_line2)
{'data': {u'body_bytes_sent': 152.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'192.158.0.51',
u'remote_user': u'-',
u'request': u'POST /mpub?topic=heartbeat HTTP/1.1',
u'request_time': 0.0,
u'status': u'404',
u'timestamp': '2018-01-05T09:14:46.415000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:14:46.415000',
'type': 'metric'}
'''
#TODO Handle nginx error logs
log = json.loads(line)
timestamp_iso = datetime.datetime.utcfromtimestamp(float(log['timestamp'])).isoformat()
log.update({'timestamp':timestamp_iso})
if '-' in log.get('upstream_response_time'):
log['upstream_response_time'] = 0.0
log['body_bytes_sent'] = float(log['body_bytes_sent'])
log['request_time'] = float(log['request_time'])
log['upstream_response_time'] = float(log['upstream_response_time'])
return dict(
timestamp=log.get('timestamp',' '),
data=log,
type='metric',
event='nginx_event',
)
|
>>> import pprint
>>> input_line1 = '{ \
"remote_addr": "127.0.0.1","remote_user": "-","timestamp": "1515144699.201", \
"request": "GET / HTTP/1.1","status": "200","request_time": "0.000", \
"body_bytes_sent": "396","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line1 = nginx_access(input_line1)
>>> pprint.pprint(output_line1)
{'data': {u'body_bytes_sent': 396.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'127.0.0.1',
u'remote_user': u'-',
u'request': u'GET / HTTP/1.1',
u'request_time': 0.0,
u'status': u'200',
u'timestamp': '2018-01-05T09:31:39.201000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:31:39.201000',
'type': 'metric'}
>>> input_line2 = '{ \
"remote_addr": "192.158.0.51","remote_user": "-","timestamp": "1515143686.415", \
"request": "POST /mpub?topic=heartbeat HTTP/1.1","status": "404","request_time": "0.000", \
"body_bytes_sent": "152","http_referer": "-","http_user_agent": "python-requests/2.18.4", \
"http_x_forwarded_for": "-","upstream_response_time": "-" \
}'
>>> output_line2 = nginx_access(input_line2)
>>> pprint.pprint(output_line2)
{'data': {u'body_bytes_sent': 152.0,
u'http_referer': u'-',
u'http_user_agent': u'python-requests/2.18.4',
u'http_x_forwarded_for': u'-',
u'remote_addr': u'192.158.0.51',
u'remote_user': u'-',
u'request': u'POST /mpub?topic=heartbeat HTTP/1.1',
u'request_time': 0.0,
u'status': u'404',
u'timestamp': '2018-01-05T09:14:46.415000',
u'upstream_response_time': 0.0},
'event': 'nginx_event',
'timestamp': '2018-01-05T09:14:46.415000',
'type': 'metric'}
|
entailment
|
def mongodb(line):
'''
>>> import pprint
>>> input_line1 = '2017-08-17T07:56:33.489+0200 I REPL [signalProcessingThread] shutting down replication subsystems'
>>> output_line1 = mongodb(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'component': 'REPL',
'context': '[signalProcessingThread]',
'message': 'shutting down replication subsystems',
'severity': 'I',
'timestamp': '2017-08-17T07:56:33.489+0200'},
'timestamp': '2017-08-17T07:56:33.489+0200',
'type': 'log'}
>>> input_line2 = '2017-08-17T07:56:33.515+0200 W NETWORK [initandlisten] No primary detected for set confsvr_repl1'
>>> output_line2 = mongodb(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'component': 'NETWORK',
'context': '[initandlisten]',
'message': 'No primary detected for set confsvr_repl1',
'severity': 'W',
'timestamp': '2017-08-17T07:56:33.515+0200'},
'timestamp': '2017-08-17T07:56:33.515+0200',
'type': 'log'}
'''
keys = ['timestamp', 'severity', 'component', 'context', 'message']
values = re.split(r'\s+', line, maxsplit=4)
mongodb_log = dict(zip(keys,values))
return dict(
timestamp=values[0],
data=mongodb_log,
type='log',
)
|
>>> import pprint
>>> input_line1 = '2017-08-17T07:56:33.489+0200 I REPL [signalProcessingThread] shutting down replication subsystems'
>>> output_line1 = mongodb(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'component': 'REPL',
'context': '[signalProcessingThread]',
'message': 'shutting down replication subsystems',
'severity': 'I',
'timestamp': '2017-08-17T07:56:33.489+0200'},
'timestamp': '2017-08-17T07:56:33.489+0200',
'type': 'log'}
>>> input_line2 = '2017-08-17T07:56:33.515+0200 W NETWORK [initandlisten] No primary detected for set confsvr_repl1'
>>> output_line2 = mongodb(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'component': 'NETWORK',
'context': '[initandlisten]',
'message': 'No primary detected for set confsvr_repl1',
'severity': 'W',
'timestamp': '2017-08-17T07:56:33.515+0200'},
'timestamp': '2017-08-17T07:56:33.515+0200',
'type': 'log'}
|
entailment
|
def django(line):
'''
>>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true }
'''
#TODO we need to handle case2 logs
data = {}
log = re.findall(r'^(\[\d+/\w+/\d+ \d+:\d+:\d+\].*)', line)
if len(log) == 1:
data['timestamp'] = datetime.datetime.strptime(re.findall(r'(\d+/\w+/\d+ \d+:\d+:\d+)',\
log[0])[0],"%d/%b/%Y %H:%M:%S").isoformat()
data['loglevel'] = re.findall('[A-Z]+', log[0])[1]
data['logname'] = re.findall('\[\D+.\w+:\d+\]', log[0])[0]
message = re.findall('\{.+\}', log[0])
try:
if len(message) > 0:
message = json.loads(message[0])
else:
message = re.split(']', log[0])
message = ''.join(message[2:])
except ValueError:
message = re.split(']', log[0])
message = ''.join(message[2:])
data['message'] = message
return dict(
timestamp=data['timestamp'],
level=data['loglevel'],
data=data,
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow()),
data={raw:line}
)
|
>>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true }
|
entailment
|
def basescript(line):
'''
>>> import pprint
>>> input_line = '{"level": "warning", "timestamp": "2018-02-07T06:37:00.297610Z", "event": "exited via keyboard interrupt", "type": "log", "id": "20180207T063700_4d03fe800bd111e89ecb96000007bc65", "_": {"ln": 58, "file": "/usr/local/lib/python2.7/dist-packages/basescript/basescript.py", "name": "basescript.basescript", "fn": "start"}}'
>>> output_line1 = basescript(input_line)
>>> pprint.pprint(output_line1)
{'data': {u'_': {u'file': u'/usr/local/lib/python2.7/dist-packages/basescript/basescript.py',
u'fn': u'start',
u'ln': 58,
u'name': u'basescript.basescript'},
u'event': u'exited via keyboard interrupt',
u'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
u'level': u'warning',
u'timestamp': u'2018-02-07T06:37:00.297610Z',
u'type': u'log'},
'event': u'exited via keyboard interrupt',
'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
'level': u'warning',
'timestamp': u'2018-02-07T06:37:00.297610Z',
'type': u'log'}
'''
log = json.loads(line)
return dict(
timestamp=log['timestamp'],
data=log,
id=log['id'],
type=log['type'],
level=log['level'],
event=log['event']
)
|
>>> import pprint
>>> input_line = '{"level": "warning", "timestamp": "2018-02-07T06:37:00.297610Z", "event": "exited via keyboard interrupt", "type": "log", "id": "20180207T063700_4d03fe800bd111e89ecb96000007bc65", "_": {"ln": 58, "file": "/usr/local/lib/python2.7/dist-packages/basescript/basescript.py", "name": "basescript.basescript", "fn": "start"}}'
>>> output_line1 = basescript(input_line)
>>> pprint.pprint(output_line1)
{'data': {u'_': {u'file': u'/usr/local/lib/python2.7/dist-packages/basescript/basescript.py',
u'fn': u'start',
u'ln': 58,
u'name': u'basescript.basescript'},
u'event': u'exited via keyboard interrupt',
u'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
u'level': u'warning',
u'timestamp': u'2018-02-07T06:37:00.297610Z',
u'type': u'log'},
'event': u'exited via keyboard interrupt',
'id': u'20180207T063700_4d03fe800bd111e89ecb96000007bc65',
'level': u'warning',
'timestamp': u'2018-02-07T06:37:00.297610Z',
'type': u'log'}
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.