sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def write(self, obj: BioCDocument or BioCPassage or BioCSentence):
"""
Encode and write a single object.
Args:
obj: an instance of BioCDocument, BioCPassage, or BioCSentence
Returns:
"""
if self.level == DOCUMENT and not isinstance(obj, BioCDocument):
raise ValueError
if self.level == PASSAGE and not isinstance(obj, BioCPassage):
raise ValueError
if self.level == SENTENCE and not isinstance(obj, BioCSentence):
raise ValueError
self.writer.write(BioCJSONEncoder().default(obj)) | Encode and write a single object.
Args:
obj: an instance of BioCDocument, BioCPassage, or BioCSentence
Returns: | entailment |
def execute(self, input_data):
''' Execute method '''
# Spin up the rekall adapter
adapter = RekallAdapter()
adapter.set_plugin_name(self.plugin_name)
rekall_output = adapter.execute(input_data)
# Process the output data
for line in rekall_output:
if line['type'] == 'm': # Meta
self.output['meta'] = line['data']
elif line['type'] == 's': # New Session (Table)
self.current_table_name = line['data']['name'][1]
elif line['type'] == 't': # New Table Headers (column names)
self.column_map = {item['cname']: item['name'] if 'name' in item else item['cname'] for item in line['data']}
elif line['type'] == 'r': # Row
# Add the row to our current table
row = RekallAdapter.process_row(line['data'], self.column_map)
self.output['tables'][self.current_table_name].append(row)
else:
print 'Note: Ignoring rekall message of type %s: %s' % (line['type'], line['data'])
# All done
return self.output | Execute method | entailment |
def execute(self, input_data):
''' Execute the ViewMemoryDeep worker '''
# Aggregate the output from all the memory workers, clearly this could be kewler
output = input_data['view_memory']
output['tables'] = {}
for data in [input_data[key] for key in ViewMemoryDeep.dependencies]:
for name,table in data['tables'].iteritems():
output['tables'].update({name: table})
return output | Execute the ViewMemoryDeep worker | entailment |
def execute(self, input_data):
''' Execute the ViewMemory worker '''
# Aggregate the output from all the memory workers into concise summary info
output = {'meta': input_data['mem_meta']['tables']['info']}
output['connscan'] = list(set([item['Remote Address'] for item in input_data['mem_connscan']['tables']['connscan']]))
pslist_md5s = {self.file_to_pid(item['filename']): item['md5'] for item in input_data['mem_procdump']['tables']['dumped_files']}
output['pslist'] = ['PPID: %d PID: %d Name: %s - %s' % (item['PPID'], item['PID'], item['Name'], pslist_md5s[item['PID']])
for item in input_data['mem_pslist']['tables']['pslist']]
return output | Execute the ViewMemory worker | entailment |
def store(self, name, value, atype, new_name=None, multiplier=None, allowed_values=None):
''' store a config value in a dictionary, these values are used to populate a trasnfer spec
validation -- check type, check allowed values and rename if required '''
if value is not None:
_bad_type = (not isinstance(value, atype))
if not _bad_type:
# special case
_bad_type = (isinstance(value, bool) and atype == int)
if _bad_type:
# could be a special value
if allowed_values and value in allowed_values:
allowed_values = None
else:
raise ValueError("%s should be value of type (%s)" % (name, atype.__name__))
if allowed_values:
if isinstance(value, str):
if value not in allowed_values:
raise ValueError("%s can be %s" % (name, allowed_values))
elif isinstance(value, int):
if isinstance(allowed_values[0], int):
if value < allowed_values[0]:
raise ValueError("%s must be >= %d" % (name, allowed_values[0]))
_val = value if not multiplier else (multiplier * value)
_name = name if not new_name else new_name
self._dict[_name] = _val | store a config value in a dictionary, these values are used to populate a trasnfer spec
validation -- check type, check allowed values and rename if required | entailment |
def multi_session(self):
''' convert the multi_session param a number '''
_val = 0
if "multi_session" in self._dict:
_val = self._dict["multi_session"]
if str(_val).lower() == 'all':
_val = -1
return int(_val) | convert the multi_session param a number | entailment |
def _raw_aspera_metadata(self, bucket):
''' get the Aspera connection details on Aspera enabled buckets '''
response = self._client.get_bucket_aspera(Bucket=bucket)
# Parse metadata from response
aspera_access_key = response['AccessKey']['Id']
aspera_secret_key = response['AccessKey']['Secret']
ats_endpoint = response['ATSEndpoint']
return aspera_access_key, aspera_secret_key, ats_endpoint | get the Aspera connection details on Aspera enabled buckets | entailment |
def _fetch_transfer_spec(self, node_action, token, bucket_name, paths):
''' make hhtp call to Aspera to fetch back trasnfer spec '''
aspera_access_key, aspera_secret_key, ats_endpoint = self._get_aspera_metadata(bucket_name)
_headers = {'accept': "application/json",
'Content-Type': "application/json"}
credentials = {'type': 'token',
'token': {'delegated_refresh_token': token}}
_url = ats_endpoint
_headers['X-Aspera-Storage-Credentials'] = json.dumps(credentials)
_data = {'transfer_requests': [
{'transfer_request': {'paths': paths, 'tags': {'aspera': {
'node': {'storage_credentials': credentials}}}}}]}
_session = requests.Session()
_response = _session.post(url=_url + "/files/" + node_action,
auth=(aspera_access_key, aspera_secret_key),
headers=_headers, json=_data, verify=self._config.verify_ssl)
return _response | make hhtp call to Aspera to fetch back trasnfer spec | entailment |
def _create_transfer_spec(self, call_args):
''' pass the transfer details to aspera and receive back a
populated transfer spec complete with access token '''
_paths = []
for _file_pair in call_args.file_pair_list:
_path = OrderedDict()
if call_args.direction == enumAsperaDirection.SEND:
_action = "upload_setup"
_path['source'] = _file_pair.fileobj
_path['destination'] = _file_pair.key
else:
_action = "download_setup"
_path['source'] = _file_pair.key
_path['destination'] = _file_pair.fileobj
_paths.append(_path)
# Add credentials before the transfer spec is requested.
delegated_token = self._delegated_token_manager.get_token()
_response = self._fetch_transfer_spec(_action, delegated_token, call_args.bucket, _paths)
tspec_dict = json.loads(_response.content)['transfer_specs'][0]['transfer_spec']
tspec_dict["destination_root"] = "/"
if (call_args.transfer_config):
tspec_dict.update(call_args.transfer_config.dict)
if call_args.transfer_config.is_multi_session_all:
tspec_dict['multi_session'] = 0
_remote_host = tspec_dict['remote_host'].split('.')
# now we append '-all' to the remote host
_remote_host[0] += "-all"
tspec_dict['remote_host'] = ".".join(_remote_host)
logger.info("New remote_host(%s)" % tspec_dict['remote_host'])
call_args.transfer_spec = json.dumps(tspec_dict)
return True | pass the transfer details to aspera and receive back a
populated transfer spec complete with access token | entailment |
def upload_directory(self, directory, bucket, key, transfer_config=None, subscribers=None):
''' upload a directory using Aspera '''
check_io_access(directory, os.R_OK)
return self._queue_task(bucket, [FilePair(key, directory)], transfer_config,
subscribers, enumAsperaDirection.SEND) | upload a directory using Aspera | entailment |
def download_directory(self, bucket, key, directory, transfer_config=None, subscribers=None):
''' download a directory using Aspera '''
check_io_access(directory, os.W_OK)
return self._queue_task(bucket, [FilePair(key, directory)], transfer_config,
subscribers, enumAsperaDirection.RECEIVE) | download a directory using Aspera | entailment |
def upload(self, fileobj, bucket, key, transfer_config=None, subscribers=None):
''' upload a file using Aspera '''
check_io_access(fileobj, os.R_OK, True)
return self._queue_task(bucket, [FilePair(key, fileobj)], transfer_config,
subscribers, enumAsperaDirection.SEND) | upload a file using Aspera | entailment |
def download(self, bucket, key, fileobj, transfer_config=None, subscribers=None):
''' download a file using Aspera '''
check_io_access(os.path.dirname(fileobj), os.W_OK)
return self._queue_task(bucket, [FilePair(key, fileobj)], transfer_config,
subscribers, enumAsperaDirection.RECEIVE) | download a file using Aspera | entailment |
def set_log_details(aspera_log_path=None,
sdk_log_level=logging.NOTSET):
''' set the aspera log path - used by th Ascp process
set the internal aspera sdk activity - for debug purposes '''
if aspera_log_path:
check_io_access(aspera_log_path, os.W_OK)
AsperaTransferCoordinator.set_log_location(aspera_log_path)
if sdk_log_level != logging.NOTSET:
if logger:
if not len(logger.handlers):
handler = logging.StreamHandler()
_fmt = '%(asctime)s %(levelname)s %(message)s'
handler.setFormatter(logging.Formatter(_fmt))
logger.addHandler(handler)
logger.setLevel(sdk_log_level) | set the aspera log path - used by th Ascp process
set the internal aspera sdk activity - for debug purposes | entailment |
def _validate_args(self, args):
''' validate the user arguments '''
assert(args.bucket)
if args.subscribers:
for _subscriber in args.subscribers:
assert(isinstance(_subscriber, AsperaBaseSubscriber))
if (args.transfer_config):
assert(isinstance(args.transfer_config, AsperaConfig))
# number of sessions requested cant be greater than max ascps
if args.transfer_config.multi_session > self._config.ascp_max_concurrent:
raise ValueError("Max sessions is %d" % self._config.ascp_max_concurrent)
for _pair in args.file_pair_list:
if not _pair.key or not _pair.fileobj:
raise ValueError("Invalid file pair") | validate the user arguments | entailment |
def _queue_task(self, bucket, file_pair_list, transfer_config, subscribers, direction):
''' queue the upload/download - when get processed when resources available
Use class level transfer_config if not defined. '''
config = transfer_config if transfer_config else self._transfer_config
_call_args = CallArgs(bucket=bucket,
file_pair_list=file_pair_list,
transfer_config=config,
subscribers=subscribers,
direction=direction,
transfer_spec=None,
transfer_spec_func=self._create_transfer_spec,
transfer_id=str(uuid.uuid4()))
self._validate_args(_call_args)
return self._coordinator_controller._queue_task(_call_args) | queue the upload/download - when get processed when resources available
Use class level transfer_config if not defined. | entailment |
def _shutdown(self, cancel, cancel_msg, exc_type=CancelledError):
''' Internal shutdown used by 'shutdown' method above '''
if cancel:
# Cancel all in-flight transfers if requested, before waiting
# for them to complete.
self._coordinator_controller.cancel(cancel_msg, exc_type)
try:
# Wait until there are no more in-progress transfers. This is
# wrapped in a try statement because this can be interrupted
# with a KeyboardInterrupt that needs to be caught.
self._coordinator_controller.wait()
except KeyboardInterrupt:
# If not errors were raised in the try block, the cancel should
# have no coordinators it needs to run cancel on. If there was
# an error raised in the try statement we want to cancel all of
# the inflight transfers before shutting down to speed that
# process up.
self._coordinator_controller.cancel('KeyboardInterrupt()')
raise
finally:
self._coordinator_controller.cleanup() | Internal shutdown used by 'shutdown' method above | entailment |
def cleanup(self):
''' Stop backgroud thread and cleanup resources '''
self._processing_stop = True
self._wakeup_processing_thread()
self._processing_stopped_event.wait(3) | Stop backgroud thread and cleanup resources | entailment |
def tracked_coordinator_count(self, count_ascps=False):
''' count the number of cooridnators currently being processed
or count the number of ascps currently being used '''
with self._lock:
_count = 0
if count_ascps:
for _coordinator in self._tracked_transfer_coordinators:
_count += _coordinator.session_count
else:
_count = len(self._tracked_transfer_coordinators)
return _count | count the number of cooridnators currently being processed
or count the number of ascps currently being used | entailment |
def _queue_task(self, args):
''' add transfer to waiting queue if possible
then notify the background thread to process it '''
if self._cancel_called:
raise AsperaTransferQueueError("Cancel already called")
elif self._wait_called:
raise AsperaTransferQueueError("Cant queue items during wait")
elif self.waiting_coordinator_count() >= self._config.max_submission_queue_size:
raise AsperaTransferQueueError("Max queued items reached")
else:
_coordinator = AsperaTransferCoordinator(args)
_components = {'meta': TransferMeta(args, transfer_id=args.transfer_id),
'coordinator': _coordinator}
_transfer_future = AsperaTransferFuture(**_components)
_coordinator.add_subscribers(args.subscribers, future=_transfer_future)
_coordinator.add_done_callback(self.remove_aspera_coordinator,
transfer_coordinator=_coordinator)
self.append_waiting_queue(_coordinator)
if not self._processing_thread:
self._processing_thread = threading.Thread(target=self._process_waiting_queue)
self._processing_thread.daemon = True
self._processing_thread.start()
self._wakeup_processing_thread()
return _transfer_future | add transfer to waiting queue if possible
then notify the background thread to process it | entailment |
def remove_aspera_coordinator(self, transfer_coordinator):
''' remove entry from the waiting waiting
or remove item from processig queue and add to processed quque
notify background thread as it may be able to process watiign requests
'''
# usually called on processing completion - but can be called for a cancel
if self._in_waiting_queue(transfer_coordinator):
logger.info("Remove from waiting queue count=%d" % self.waiting_coordinator_count())
with self._lockw:
self._waiting_transfer_coordinators.remove(transfer_coordinator)
else:
logger.info("Remove from processing queue count=%d" % self.tracked_coordinator_count())
try:
self.remove_transfer_coordinator(transfer_coordinator)
self.append_processed_queue(transfer_coordinator)
except Exception:
pass
self._wakeup_processing_thread() | remove entry from the waiting waiting
or remove item from processig queue and add to processed quque
notify background thread as it may be able to process watiign requests | entailment |
def append_waiting_queue(self, transfer_coordinator):
''' append item to waiting queue '''
logger.debug("Add to waiting queue count=%d" % self.waiting_coordinator_count())
with self._lockw:
self._waiting_transfer_coordinators.append(transfer_coordinator) | append item to waiting queue | entailment |
def free_processed_queue(self):
''' call the Aspera sdk to freeup resources '''
with self._lock:
if len(self._processed_coordinators) > 0:
for _coordinator in self._processed_coordinators:
_coordinator.free_resources()
self._processed_coordinators = [] | call the Aspera sdk to freeup resources | entailment |
def is_stop(self):
''' has either of the stop processing flags been set '''
if len(self._processed_coordinators) > 0:
self.free_processed_queue()
return self._cancel_called or self._processing_stop | has either of the stop processing flags been set | entailment |
def _process_waiting_queue(self):
''' thread to processes the waiting queue
fetches transfer spec
then calls start transfer
ensures that max ascp is not exceeded '''
logger.info("Queue processing thread started")
while not self.is_stop():
self._processing_event.wait(3)
self._processing_event.clear()
if self.is_stop():
break
while self.waiting_coordinator_count() > 0:
if self.is_stop():
break
_used_slots = self.tracked_coordinator_count(True)
_free_slots = self._config.ascp_max_concurrent - _used_slots
if _free_slots <= 0:
break
with self._lockw:
# check are there enough free slots
_req_slots = self._waiting_transfer_coordinators[0].session_count
if _req_slots > _free_slots:
break
_coordinator = self._waiting_transfer_coordinators.popleft()
self.add_transfer_coordinator(_coordinator)
if not _coordinator.set_transfer_spec():
self.remove_aspera_coordinator(_coordinator)
else:
logger.info("ASCP process queue - Max(%d) InUse(%d) Free(%d) New(%d)" %
(self._config.ascp_max_concurrent,
_used_slots,
_free_slots,
_req_slots))
_coordinator.start_transfer()
logger.info("Queue processing thread stopped")
self._processing_stopped_event.set() | thread to processes the waiting queue
fetches transfer spec
then calls start transfer
ensures that max ascp is not exceeded | entailment |
def clear_waiting_coordinators(self, cancel=False):
''' remove all entries from waiting queue or cancell all in waiting queue '''
with self._lockw:
if cancel:
for _coordinator in self._waiting_transfer_coordinators:
_coordinator.notify_cancelled("Clear Waiting Queue", False)
self._waiting_transfer_coordinators.clear() | remove all entries from waiting queue or cancell all in waiting queue | entailment |
def cancel(self, *args, **kwargs):
""" Cancel all queue items - then attempt to cancel all in progress items """
self._cancel_called = True
self.clear_waiting_coordinators(cancel=True)
super(AsperaTransferCoordinatorController, self).cancel(*args, **kwargs) | Cancel all queue items - then attempt to cancel all in progress items | entailment |
def wait(self):
""" Wait until all in progress and queued items are processed """
self._wait_called = True
while self.tracked_coordinator_count() > 0 or \
self.waiting_coordinator_count() > 0:
time.sleep(1)
super(AsperaTransferCoordinatorController, self).wait()
self._wait_called = False | Wait until all in progress and queued items are processed | entailment |
def execute(self, input_data):
''' Execute the ViewZip worker '''
# Just a small check to make sure we haven't been called on the wrong file type
if (input_data['meta']['type_tag'] != 'zip'):
return {'error': self.__class__.__name__+': called on '+input_data['meta']['type_tag']}
view = {}
view['payload_md5s'] = input_data['unzip']['payload_md5s']
view['yara_sigs'] = input_data['yara_sigs']['matches'].keys()
view.update(input_data['meta'])
# Okay this view is going to also give the meta data about the payloads
view['payload_meta'] = [self.workbench.work_request('meta', md5) for md5 in input_data['unzip']['payload_md5s']]
return view | Execute the ViewZip worker | entailment |
def set_exception(self, exception):
"""Sets the exception on the future."""
if not self.is_done():
raise TransferNotDoneError(
'set_exception can only be called once the transfer is '
'complete.')
self._coordinator.set_exception(exception, override=True) | Sets the exception on the future. | entailment |
def transferReporter(self, xferId, message):
''' the callback method used by the Aspera sdk during transfer
to notify progress, error or successful completion
'''
if self.is_stopped():
return True
_asp_message = AsperaMessage(message)
if not _asp_message.is_msg_type(
[enumAsperaMsgType.INIT,
enumAsperaMsgType.DONE,
enumAsperaMsgType.ERROR,
enumAsperaMsgType.FILEERROR,
enumAsperaMsgType.STATS]):
return
_session_id = _asp_message.get_session_id()
_msg = self.debug_id(xferId, _session_id) + " : " + _asp_message._msg_type
logger.info(_msg)
with self._session_lock:
if _asp_message.is_msg_type([enumAsperaMsgType.INIT]):
assert(_session_id not in self._sessions)
_session = AsperaSession(_session_id)
self._sessions[_session_id] = _session
self.notify_init()
else:
_session = self._sessions[_session_id]
if _asp_message.is_msg_type([enumAsperaMsgType.DONE]):
if _session.set_bytes_transferred(_asp_message.get_bytes_transferred()):
self.notify_progress()
_session.set_success()
self.notify_done()
elif _asp_message.is_msg_type([enumAsperaMsgType.ERROR, enumAsperaMsgType.FILEERROR]):
_session.set_error(_asp_message.get_error_descr())
self.notify_done(error=True)
elif _asp_message.is_msg_type([enumAsperaMsgType.STATS]):
if _session.set_bytes_transferred(_asp_message.get_bytes_transferred()):
self.notify_progress() | the callback method used by the Aspera sdk during transfer
to notify progress, error or successful completion | entailment |
def start_transfer(self):
''' pass the transfer spec to the Aspera sdk and start the transfer '''
try:
if not self.is_done():
faspmanager2.startTransfer(self.get_transfer_id(),
None,
self.get_transfer_spec(),
self)
except Exception as ex:
self.notify_exception(ex) | pass the transfer spec to the Aspera sdk and start the transfer | entailment |
def is_running(self, is_stopped):
''' check whether a transfer is currently running '''
if is_stopped and self.is_stopped():
return False
return faspmanager2.isRunning(self.get_transfer_id()) | check whether a transfer is currently running | entailment |
def is_stopped(self, is_stopping=True):
''' check whether a transfer is stopped or is being stopped '''
if is_stopping:
return self._is_stopped or self._is_stopping
return self._is_stopped | check whether a transfer is stopped or is being stopped | entailment |
def _modify_transfer(self, option, value=0):
''' call Apsera sdk modify an in progress eg pause/resume
allowed values defined in enumAsperaModifyTransfer class '''
_ret = False
try:
if self.is_running(True):
logger.info("ModifyTransfer called %d = %d" % (option, value))
_ret = faspmanager2.modifyTransfer(self.get_transfer_id(), option, value)
logger.info("ModifyTransfer returned %s" % _ret)
except Exception as ex:
self.notify_exception(ex)
return _ret | call Apsera sdk modify an in progress eg pause/resume
allowed values defined in enumAsperaModifyTransfer class | entailment |
def stop(self, free_resource=False):
''' send a stop transfer request to the Aspera sdk, can be done for:
cancel - stop an in progress transfer
free_resource - request to the Aspera sdk free resouces related to trasnfer_id
'''
if not self.is_stopped():
self._is_stopping = True
try:
if free_resource or self.is_running(False):
if not free_resource:
logger.info("StopTransfer called - %s" % self.get_transfer_id())
self._is_stopped = faspmanager2.stopTransfer(self.get_transfer_id())
if not free_resource:
logger.info("StopTransfer returned %s - %s" % (
self._is_stopped, self.get_transfer_id()))
except Exception as ex:
self.notify_exception(ex)
self._is_stopping = False
return self.is_stopped(False) | send a stop transfer request to the Aspera sdk, can be done for:
cancel - stop an in progress transfer
free_resource - request to the Aspera sdk free resouces related to trasnfer_id | entailment |
def free_resources(self):
''' call stop to free up resources '''
if not self.is_stopped():
logger.info("Freeing resources: %s" % self.get_transfer_id())
self.stop(True) | call stop to free up resources | entailment |
def extract_message_value(self, name):
''' search message to find and extract a named value '''
name += ":"
assert(self._message)
_start = self._message.find(name)
if _start >= 0:
_start += len(name) + 1
_end = self._message.find("\n", _start)
_value = self._message[_start:_end]
return _value.strip()
return None | search message to find and extract a named value | entailment |
def _set_status(self, status, ex=None):
''' set session status - eg failed, success --
valid values contained in enumAsperaControllerStatus class '''
self._status = status
logger.debug("Set status(%s) for %s" % (self._status, self.session_id))
self.set_done()
if ex:
self._exception = ex | set session status - eg failed, success --
valid values contained in enumAsperaControllerStatus class | entailment |
def set_bytes_transferred(self, bytes_transferred):
''' set the number of bytes transferred - if it has changed return True '''
_changed = False
if bytes_transferred:
_changed = (self._bytes_transferred != int(bytes_transferred))
if _changed:
self._bytes_transferred = int(bytes_transferred)
logger.debug("(%s) BytesTransferred: %d" % (
self.session_id, self._bytes_transferred))
if AsperaSession.PROGRESS_MSGS_SEND_ALL:
return True
return _changed | set the number of bytes transferred - if it has changed return True | entailment |
def set_exception(self, exception):
''' set the exception message and set the status to failed '''
logger.error("%s : %s" % (exception.__class__.__name__, str(exception)))
self._set_status(enumAsperaControllerStatus.FAILED, exception) | set the exception message and set the status to failed | entailment |
def wait(self):
''' wait for the done event to be set - no timeout'''
self._done_event.wait(MAXINT)
return self._status, self._exception | wait for the done event to be set - no timeout | entailment |
def cancel(self, msg='', exc_type=CancelledError):
"""Cancels the TransferFuture
:param msg: The message to attach to the cancellation
:param exc_type: The type of exception to set for the cancellation
"""
_ret = False
if not self.is_done():
self.notify_cancelled(msg, True)
_ret = True
return _ret | Cancels the TransferFuture
:param msg: The message to attach to the cancellation
:param exc_type: The type of exception to set for the cancellation | entailment |
def _update_session_count(self, type=0, actutal_session_count=0):
''' update the session/ascp count
0 : set the number of sessions being used to 1 or number specified in transfer config
-1: decrement the session count by one
1: set the session count to param value
'''
if type == 0: # init
_count = 0
if self._args.transfer_config:
_count = self._args.transfer_config.multi_session
self._session_count = _count if _count > 0 else 1
elif type == -1: # decrement
self._session_count -= 1
elif type == 1: # set from number of actual session objects
self._session_count = actutal_session_count | update the session/ascp count
0 : set the number of sessions being used to 1 or number specified in transfer config
-1: decrement the session count by one
1: set the session count to param value | entailment |
def result(self, raise_exception=True):
"""Waits until TransferFuture is done and returns the result
If the TransferFuture succeeded, it will return the result. If the
TransferFuture failed, it will raise the exception associated to the
failure.
"""
_status = None
_exception = None
self._done_event.wait(MAXINT) # first wait for session global
if self.is_failed(): # global exception set
_exception = self._exception
_status = enumAsperaControllerStatus.FAILED
else:
for _session in self._sessions.values():
_status_tmp, _exception_tmp = _session.wait()
if _exception_tmp and not _exception:
_exception = _exception_tmp
_status = _status_tmp
# Once done waiting, raise an exception if present or return the final status
if _exception and raise_exception:
raise _exception
return _status | Waits until TransferFuture is done and returns the result
If the TransferFuture succeeded, it will return the result. If the
TransferFuture failed, it will raise the exception associated to the
failure. | entailment |
def notify_init(self):
''' run the queed callback for just the first session only '''
_session_count = len(self._sessions)
self._update_session_count(1, _session_count)
if _session_count == 1:
self._run_queued_callbacks() | run the queed callback for just the first session only | entailment |
def notify_done(self, error=False, run_done_callbacks=True):
''' if error clear all sessions otherwise check to see if all other sessions are complete
then run the done callbacks
'''
if error:
for _session in self._sessions.values():
_session.set_done()
self._session_count = 0
else:
self._update_session_count(-1)
for _session in self._sessions.values():
if not _session.is_done():
return
if run_done_callbacks:
self._run_done_callbacks()
self._done_event.set() | if error clear all sessions otherwise check to see if all other sessions are complete
then run the done callbacks | entailment |
def notify_progress(self):
''' only call the progress callback if total has changed
or PROGRESS_MSGS_SEND_ALL is set '''
_total = 0
for _session in self._sessions.values():
_total += _session.bytes_transferred
if AsperaSession.PROGRESS_MSGS_SEND_ALL:
self._run_progress_callbacks(_total)
else:
# dont call progress callback unless total has changed
if self._total_bytes_transferred != _total:
self._total_bytes_transferred = _total
self._run_progress_callbacks(_total) | only call the progress callback if total has changed
or PROGRESS_MSGS_SEND_ALL is set | entailment |
def notify_exception(self, exception, run_done_callbacks=True):
''' set the exception message, stop transfer if running and set the done event '''
logger.error("%s : %s" % (exception.__class__.__name__, str(exception)))
self._exception = exception
if self.is_running(True):
# wait for a short 5 seconds for it to finish
for _cnt in range(0, 5):
if not self._cancel():
time.sleep(1)
else:
break
self.notify_done(error=True, run_done_callbacks=run_done_callbacks) | set the exception message, stop transfer if running and set the done event | entailment |
def is_success(self):
''' check all sessions to see if they have completed successfully '''
for _session in self._sessions.values():
if not _session.is_success():
return False
return True | check all sessions to see if they have completed successfully | entailment |
def set_transfer_spec(self):
''' run the function to set the transfer spec on error set associated exception '''
_ret = False
try:
self._args.transfer_spec_func(self._args)
_ret = True
except Exception as ex:
self.notify_exception(AsperaTransferSpecError(ex), False)
return _ret | run the function to set the transfer spec on error set associated exception | entailment |
def _add_subscribers_for_type(self, callback_type, subscribers, callbacks, **kwargs):
''' add a done/queued/progress callback to the appropriate list '''
for subscriber in subscribers:
callback_name = 'on_' + callback_type
if hasattr(subscriber, callback_name):
_function = functools.partial(getattr(subscriber, callback_name), **kwargs)
callbacks.append(_function) | add a done/queued/progress callback to the appropriate list | entailment |
def add_done_callback(self, function, **kwargs):
"""Add a done callback to be invoked when transfer is complete """
with self._callbacks_lock:
_function = functools.partial(function, **kwargs)
self._done_callbacks.append(_function) | Add a done callback to be invoked when transfer is complete | entailment |
def add_subscribers(self, subscribers, **kwargs):
""" Add a callbacks to be invoked during transfer """
if subscribers:
with self._callbacks_lock:
self._add_subscribers_for_type(
'done', subscribers, self._done_callbacks, **kwargs)
self._add_subscribers_for_type(
'queued', subscribers, self._queued_callbacks, **kwargs)
self._add_subscribers_for_type(
'progress', subscribers, self._progress_callbacks, **kwargs) | Add a callbacks to be invoked during transfer | entailment |
def _run_queued_callbacks(self):
''' run the init/quued calback when the trasnfer is initiated on apsera '''
for callback in self._queued_callbacks:
try:
callback()
except Exception as ex:
logger.error("Exception: %s" % str(ex)) | run the init/quued calback when the trasnfer is initiated on apsera | entailment |
def _run_progress_callbacks(self, bytes_transferred):
''' pass the number of bytes process to progress callbacks '''
if bytes_transferred:
for callback in self._progress_callbacks:
try:
callback(bytes_transferred=bytes_transferred)
except Exception as ex:
logger.error("Exception: %s" % str(ex)) | pass the number of bytes process to progress callbacks | entailment |
def _run_done_callbacks(self):
''' Run the callbacks and remove the callbacks from the internal
List so they do not get run again if done is notified more than once.
'''
with self._callbacks_lock:
for callback in self._done_callbacks:
try:
callback()
# We do not want a callback interrupting the process, especially
# in the failure cleanups. So log and catch, the excpetion.
except Exception as ex:
logger.error("Exception: %s" % str(ex))
logger.error("Exception raised in %s." % callback, exc_info=True)
self._done_callbacks = [] | Run the callbacks and remove the callbacks from the internal
List so they do not get run again if done is notified more than once. | entailment |
def total_span(self) -> BioCLocation:
"""The total span of this annotation. Discontinued locations will be merged."""
if not self.locations:
raise ValueError('BioCAnnotation must have at least one location')
start = min(l.offset for l in self.locations)
end = max(l.end for l in self.locations)
return BioCLocation(start, end - start) | The total span of this annotation. Discontinued locations will be merged. | entailment |
def get_node(self, role: str, default=None) -> BioCNode:
"""
Get the first node with role
Args:
role: role
default: node returned instead of raising StopIteration
Returns:
the first node with role
"""
return next((node for node in self.nodes if node.role == role), default) | Get the first node with role
Args:
role: role
default: node returned instead of raising StopIteration
Returns:
the first node with role | entailment |
def get_sentence(self, offset: int) -> BioCSentence or None:
"""
Gets sentence with specified offset
Args:
offset: sentence offset
Return:
the sentence with specified offset
"""
for sentence in self.sentences:
if sentence.offset == offset:
return sentence
return None | Gets sentence with specified offset
Args:
offset: sentence offset
Return:
the sentence with specified offset | entailment |
def get_passage(self, offset: int) -> BioCPassage or None:
"""
Gets passage
Args:
offset: passage offset
Return:
the passage with specified offset
"""
for passage in self.passages:
if passage.offset == offset:
return passage
return None | Gets passage
Args:
offset: passage offset
Return:
the passage with specified offset | entailment |
def of(cls, *passages: BioCPassage):
"""
Returns a collection passages
"""
if len(passages) <= 0:
raise ValueError("There has to be at least one passage.")
c = BioCDocument()
for passage in passages:
if passage is None:
raise ValueError('Passage is None')
c.add_passage(passage)
return c | Returns a collection passages | entailment |
def of(cls, *documents: BioCDocument):
"""
Returns a collection documents
"""
if len(documents) <= 0:
raise ValueError("There has to be at least one document.")
c = BioCCollection()
for document in documents:
if document is None:
raise ValueError('Document is None')
c.add_document(document)
return c | Returns a collection documents | entailment |
def add_it(workbench, file_list, labels):
"""Add the given file_list to workbench as samples, also add them as nodes.
Args:
workbench: Instance of Workbench Client.
file_list: list of files.
labels: labels for the nodes.
Returns:
A list of md5s.
"""
md5s = []
for filename in file_list:
if filename != '.DS_Store':
with open(filename, 'rb') as pe_file:
base_name = os.path.basename(filename)
md5 = workbench.store_sample(pe_file.read(), base_name, 'exe')
workbench.add_node(md5, md5[:6], labels)
md5s.append(md5)
return md5s | Add the given file_list to workbench as samples, also add them as nodes.
Args:
workbench: Instance of Workbench Client.
file_list: list of files.
labels: labels for the nodes.
Returns:
A list of md5s. | entailment |
def jaccard_sims(feature_list):
"""Compute Jaccard similarities between all the observations in the feature list.
Args:
feature_list: a list of dictionaries, each having structure as
{ 'md5' : String, 'features': list of Strings }
Returns:
list of dictionaries with structure as
{'source': md5 String, 'target': md5 String, 'sim': Jaccard similarity Number}
"""
sim_info_list = []
for feature_info in feature_list:
md5_source = feature_info['md5']
features_source = feature_info['features']
for feature_info in feature_list:
md5_target = feature_info['md5']
features_target = feature_info['features']
if md5_source == md5_target:
continue
sim = jaccard_sim(features_source, features_target)
if sim > .5:
sim_info_list.append({'source': md5_source, 'target': md5_target, 'sim': sim})
return sim_info_list | Compute Jaccard similarities between all the observations in the feature list.
Args:
feature_list: a list of dictionaries, each having structure as
{ 'md5' : String, 'features': list of Strings }
Returns:
list of dictionaries with structure as
{'source': md5 String, 'target': md5 String, 'sim': Jaccard similarity Number} | entailment |
def jaccard_sim(features1, features2):
"""Compute similarity between two sets using Jaccard similarity.
Args:
features1: list of PE Symbols.
features2: list of PE Symbols.
Returns:
Returns an int.
"""
set1 = set(features1)
set2 = set(features2)
try:
return len(set1.intersection(set2))/float(max(len(set1), len(set2)))
except ZeroDivisionError:
return 0 | Compute similarity between two sets using Jaccard similarity.
Args:
features1: list of PE Symbols.
features2: list of PE Symbols.
Returns:
Returns an int. | entailment |
def run():
"""This client generates a similarity graph from features in PE Files."""
# Grab server args
args = client_helper.grab_server_args()
# Start up workbench connection
workbench = zerorpc.Client(timeout=300, heartbeat=60)
workbench.connect('tcp://'+args['server']+':'+args['port'])
# Test out PEFile -> pe_deep_sim -> pe_jaccard_sim -> graph
data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),'../data/pe/bad')
bad_files = [os.path.join(data_path, child) for child in os.listdir(data_path)][:5]
data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),'../data/pe/good')
good_files = [os.path.join(data_path, child) for child in os.listdir(data_path)][:5]
# Clear any graph in the Neo4j database
workbench.clear_graph_db()
# First throw them into workbench and add them as nodes into the graph
all_md5s = add_it(workbench, bad_files, ['exe', 'bad']) + add_it(workbench, good_files, ['exe', 'good'])
# Make a sample set
sample_set = workbench.store_sample_set(all_md5s)
# Compute pe_features on all files of type pe, just pull back the sparse features
import_gen = workbench.set_work_request('pe_features', sample_set, ['md5', 'sparse_features.imported_symbols'])
imports = [{'md5': r['md5'], 'features': r['imported_symbols']} for r in import_gen]
# Compute pe_features on all files of type pe, just pull back the sparse features
warning_gen = workbench.set_work_request('pe_features', sample_set, ['md5', 'sparse_features.pe_warning_strings'])
warnings = [{'md5': r['md5'], 'features': r['pe_warning_strings']} for r in warning_gen]
# Compute strings on all files of type pe, just pull back the string_list
string_gen = workbench.set_work_request('strings', sample_set, ['md5', 'string_list'])
strings = [{'md5': r['md5'], 'features': r['string_list']} for r in string_gen]
# Compute pe_peid on all files of type pe, just pull back the match_list
# Fixme: commenting this out until we figure out why peid is SO slow
'''
peid_gen = workbench.set_work_request('pe_peid', sample_set, ['md5', 'match_list']})
peids = [{'md5': r['md5'], 'features': r['match_list']} for r in peid_gen]
'''
# Compute the Jaccard Index between imported systems and store as relationships
sims = jaccard_sims(imports)
for sim_info in sims:
workbench.add_rel(sim_info['source'], sim_info['target'], 'imports')
# Compute the Jaccard Index between warnings and store as relationships
sims = jaccard_sims(warnings)
for sim_info in sims:
workbench.add_rel(sim_info['source'], sim_info['target'], 'warnings')
# Compute the Jaccard Index between strings and store as relationships
sims = jaccard_sims(strings)
for sim_info in sims:
workbench.add_rel(sim_info['source'], sim_info['target'], 'strings')
# Compute the Jaccard Index between peids and store as relationships
# Fixme: commenting this out until we figure out why peid is SO slow
'''
sims = jaccard_sims(peids)
for sim_info in sims:
workbench.add_rel(sim_info['source'], sim_info['target'], 'peids')
'''
# Compute pe_deep_sim on all files of type pe
results = workbench.set_work_request('pe_deep_sim', sample_set)
# Store the ssdeep sims as relationships
for result in list(results):
for sim_info in result['sim_list']:
workbench.add_rel(result['md5'], sim_info['md5'], 'ssdeep')
# Let them know where they can get there graph
print 'All done: go to http://localhost:7474/browser and execute this query: "%s"' % \
('match (n)-[r]-() return n,r') | This client generates a similarity graph from features in PE Files. | entailment |
def pad_char(text: str, width: int, char: str = '\n') -> str:
"""Pads a text until length width."""
dis = width - len(text)
if dis < 0:
raise ValueError
if dis > 0:
text += char * dis
return text | Pads a text until length width. | entailment |
def get_text(obj) -> Tuple[int, str]:
"""
Return text with its offset in the document
Args:
obj: BioCDocument, BioCPassage, or BioCSentence
Returns:
offset, text
"""
from bioc.bioc import BioCDocument, BioCPassage, BioCSentence
if isinstance(obj, BioCSentence):
return obj.offset, obj.text
if isinstance(obj, BioCPassage):
if obj.text:
return obj.offset, obj.text
text = ''
for sentence in obj.sentences:
try:
text = pad_char(text, sentence.offset - obj.offset, ' ')
assert sentence.text, f'BioC sentence has no text: {sentence.offset}'
text += sentence.text
except ValueError:
raise ValueError(f'Overlapping sentences {sentence.offset}')
return obj.offset, text
if isinstance(obj, BioCDocument):
text = ''
for passage in obj.passages:
try:
text = pad_char(text, passage.offset)
text += get_text(passage)[1]
except ValueError:
raise ValueError(f'{obj.id}: overlapping passages {passage.offset}')
return 0, text
raise TypeError(f'Object of type {obj.__class__.__name__} must be BioCCollection, '
f'BioCDocument, BioCPassage, or BioCSentence') | Return text with its offset in the document
Args:
obj: BioCDocument, BioCPassage, or BioCSentence
Returns:
offset, text | entailment |
def pretty_print(source, dest):
"""
Pretty print the XML file
"""
parser = etree.XMLParser(remove_blank_text=True)
if not isinstance(source, str):
source = str(source)
tree = etree.parse(source, parser)
docinfo = tree.docinfo
with open(dest, 'wb') as fp:
fp.write(etree.tostring(tree, pretty_print=True,
encoding=docinfo.encoding, standalone=docinfo.standalone)) | Pretty print the XML file | entailment |
def shorten_text(text: str):
"""Return a short repr of text if it is longer than 40"""
if len(text) <= 40:
text = text
else:
text = text[:17] + ' ... ' + text[-17:]
return repr(text) | Return a short repr of text if it is longer than 40 | entailment |
def execute(self, input_data):
''' This worker computes meta data for any file type. '''
raw_bytes = input_data['sample']['raw_bytes']
self.meta['md5'] = hashlib.md5(raw_bytes).hexdigest()
self.meta['tags'] = input_data['tags']['tags']
self.meta['type_tag'] = input_data['sample']['type_tag']
with magic.Magic() as mag:
self.meta['file_type'] = mag.id_buffer(raw_bytes[:1024])
with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as mag:
self.meta['mime_type'] = mag.id_buffer(raw_bytes[:1024])
with magic.Magic(flags=magic.MAGIC_MIME_ENCODING) as mag:
try:
self.meta['encoding'] = mag.id_buffer(raw_bytes[:1024])
except magic.MagicError:
self.meta['encoding'] = 'unknown'
self.meta['file_size'] = len(raw_bytes)
self.meta['filename'] = input_data['sample']['filename']
self.meta['import_time'] = input_data['sample']['import_time']
self.meta['customer'] = input_data['sample']['customer']
self.meta['length'] = input_data['sample']['length']
return self.meta | This worker computes meta data for any file type. | entailment |
def execute(self, input_data):
''' Execute the Strings worker '''
raw_bytes = input_data['sample']['raw_bytes']
strings = self.find_strings.findall(raw_bytes)
return {'string_list': strings} | Execute the Strings worker | entailment |
def execute(self, input_data):
''' Execute the PEIndicators worker '''
raw_bytes = input_data['sample']['raw_bytes']
# Analyze the output of pefile for any anomalous conditions.
# Have the PE File module process the file
try:
self.pefile_handle = pefile.PE(data=raw_bytes, fast_load=False)
except (AttributeError, pefile.PEFormatError), error:
return {'error': str(error), 'indicator_list': [{'Error': 'PE module failed!'}]}
indicators = []
indicators += [{'description': warn, 'severity': 2, 'category': 'PE_WARN'}
for warn in self.pefile_handle.get_warnings()]
# Automatically invoke any method of this class that starts with 'check'
check_methods = self._get_check_methods()
for check_method in check_methods:
hit_data = check_method()
if hit_data:
indicators.append(hit_data)
return {'indicator_list': indicators} | Execute the PEIndicators worker | entailment |
def check_checksum_mismatch(self):
''' Checking for a checksum that doesn't match the generated checksum '''
if self.pefile_handle.OPTIONAL_HEADER:
if self.pefile_handle.OPTIONAL_HEADER.CheckSum != self.pefile_handle.generate_checksum():
return {'description': 'Reported Checksum does not match actual checksum',
'severity': 2, 'category': 'MALFORMED'}
return None | Checking for a checksum that doesn't match the generated checksum | entailment |
def check_nonstandard_section_name(self):
''' Checking for an non-standard section name '''
std_sections = ['.text', '.bss', '.rdata', '.data', '.rsrc', '.edata', '.idata',
'.pdata', '.debug', '.reloc', '.stab', '.stabstr', '.tls',
'.crt', '.gnu_deb', '.eh_fram', '.exptbl', '.rodata']
for i in range(200):
std_sections.append('/'+str(i))
non_std_sections = []
for section in self.pefile_handle.sections:
name = convert_to_ascii_null_term(section.Name).lower()
if (name not in std_sections):
non_std_sections.append(name)
if non_std_sections:
return{'description': 'Section(s) with a non-standard name, tamper indication',
'severity': 3, 'category': 'MALFORMED', 'attributes': non_std_sections}
return None | Checking for an non-standard section name | entailment |
def check_image_size_incorrect(self):
''' Checking if the reported image size matches the actual image size '''
last_virtual_address = 0
last_virtual_size = 0
section_alignment = self.pefile_handle.OPTIONAL_HEADER.SectionAlignment
total_image_size = self.pefile_handle.OPTIONAL_HEADER.SizeOfImage
for section in self.pefile_handle.sections:
if section.VirtualAddress > last_virtual_address:
last_virtual_address = section.VirtualAddress
last_virtual_size = section.Misc_VirtualSize
# Just pad the size to be equal to the alignment and check for mismatch
last_virtual_size += section_alignment - (last_virtual_size % section_alignment)
if (last_virtual_address + last_virtual_size) != total_image_size:
return {'description': 'Image size does not match reported size',
'severity': 3, 'category': 'MALFORMED'}
return None | Checking if the reported image size matches the actual image size | entailment |
def check_section_unaligned(self):
''' Checking if any of the sections are unaligned '''
file_alignment = self.pefile_handle.OPTIONAL_HEADER.FileAlignment
unaligned_sections = []
for section in self.pefile_handle.sections:
if section.PointerToRawData % file_alignment:
unaligned_sections.append(section.Name)
# If we had any unaligned sections, return them
if unaligned_sections:
return {'description': 'Unaligned section, tamper indication',
'severity': 3, 'category': 'MALFORMED', 'attributes': unaligned_sections}
return None | Checking if any of the sections are unaligned | entailment |
def check_section_oversized(self):
''' Checking if any of the sections go past the total size of the image '''
total_image_size = self.pefile_handle.OPTIONAL_HEADER.SizeOfImage
for section in self.pefile_handle.sections:
if section.PointerToRawData + section.SizeOfRawData > total_image_size:
return {'description': 'Oversized section, storing addition data within the PE',
'severity': 3, 'category': 'MALFORMED', 'attributes': section.Name}
return None | Checking if any of the sections go past the total size of the image | entailment |
def _search_within_pe_warnings(self, matches):
''' Just encapsulating a search that takes place fairly often '''
pattern = '|'.join(re.escape(match) for match in matches)
exp = re.compile(pattern)
if any(exp.search(warning) for warning in self.pefile_handle.get_warnings()):
return True
return False | Just encapsulating a search that takes place fairly often | entailment |
def _search_for_import_symbols(self, matches):
''' Just encapsulating a search that takes place fairly often '''
# Sanity check
if not hasattr(self.pefile_handle, 'DIRECTORY_ENTRY_IMPORT'):
return []
# Find symbols that match
pattern = '|'.join(re.escape(match) for match in matches)
exp = re.compile(pattern)
symbol_list = []
for module in self.pefile_handle.DIRECTORY_ENTRY_IMPORT:
for symbol in module.imports:
if (symbol.name):
symbol_list.append(symbol.name.lower())
symbol_matches = []
for symbol in symbol_list:
if exp.search(symbol):
symbol_matches.append(symbol)
return symbol_matches | Just encapsulating a search that takes place fairly often | entailment |
def _search_for_export_symbols(self, matches):
''' Just encapsulating a search that takes place fairly often '''
pattern = '|'.join(re.escape(match) for match in matches)
exp = re.compile(pattern)
symbol_list = []
try:
for symbol in self.pefile_handle.DIRECTORY_ENTRY_EXPORT.symbols:
if symbol.name:
symbol_list.append(symbol.name.lower())
symbol_matches = []
for symbol in symbol_list:
if exp.search(symbol):
symbol_matches.append(symbol)
return symbol_matches
except AttributeError:
return [] | Just encapsulating a search that takes place fairly often | entailment |
def annotations(obj: BioCCollection or BioCDocument or BioCPassage or BioCSentence,
docid: str = None, level: int = PASSAGE) -> Generator[BioCAnnotation, None, None]:
"""
Get all annotations in document id.
Args:
obj: BioCCollection, BioCDocument, BioCPassage, or BioCSentence
docid: document id. If None, all documents
level: one of DOCUMENT, PASSAGE, SENTENCE
Yields:
one annotation
"""
if isinstance(obj, BioCCollection):
for document in filter(lambda d: docid is None or docid == d.id, obj.documents):
yield from annotations(document, level=level)
elif isinstance(obj, BioCDocument):
if level == DOCUMENT:
yield from obj.annotations
elif level in (PASSAGE, SENTENCE):
for passage in obj.passages:
yield from annotations(passage, level=level)
else:
raise ValueError('level must be DOCUMENT, PASSAGE, or SENTENCE')
elif isinstance(obj, BioCPassage):
if level == PASSAGE:
yield from obj.annotations
elif level == SENTENCE:
for sentence in obj.sentences:
yield from annotations(sentence, level=level)
else:
raise ValueError('level must be PASSAGE or SENTENCE')
elif isinstance(obj, BioCSentence):
if level == SENTENCE:
yield from obj.annotations
else:
raise ValueError('level must be SENTENCE')
else:
raise TypeError(f'Object of type {obj.__class__.__name__} must be BioCCollection, '
f'BioCDocument, BioCPassage, or BioCSentence') | Get all annotations in document id.
Args:
obj: BioCCollection, BioCDocument, BioCPassage, or BioCSentence
docid: document id. If None, all documents
level: one of DOCUMENT, PASSAGE, SENTENCE
Yields:
one annotation | entailment |
def sentences(obj: BioCCollection or BioCDocument or BioCPassage) \
-> Generator[BioCSentence, None, None]:
"""
Get all sentences in document id.
Args:
obj: BioCCollection, BioCDocument, or BioCPassage
Yields:
one sentence
"""
if isinstance(obj, BioCCollection):
for document in obj.documents:
yield from sentences(document)
elif isinstance(obj, BioCDocument):
for passage in obj.passages:
yield from sentences(passage)
elif isinstance(obj, BioCPassage):
yield from obj.sentences
else:
raise TypeError(f'Object of type {obj.__class__.__name__} must be BioCCollection, '
f'BioCDocument, BioCPassage, or BioCSentence') | Get all sentences in document id.
Args:
obj: BioCCollection, BioCDocument, or BioCPassage
Yields:
one sentence | entailment |
def execute(self, input_data):
''' This worker classifies PEFiles as Evil or AOK (TOY not a real classifier at this point)'''
# In general you'd do something different with these two outputs
# for this toy example will just smash them in a big string
pefile_output = input_data['pe_features']
indicators = input_data['pe_indicators']
all_input = str(pefile_output) + str(indicators)
flag = 'Reported Checksum does not match actual checksum'
if flag in all_input:
self.output['classification'] = 'Toy/Fake Classifier says Evil!'
return self.output | This worker classifies PEFiles as Evil or AOK (TOY not a real classifier at this point) | entailment |
def dump(collection: BioCCollection, fp, pretty_print: bool = True):
"""
Serialize ``collection`` as a BioC formatted stream to ``fp``.
Args:
collection: the BioC collection
fp: a ``.write()``-supporting file-like object
pretty_print: enables formatted XML
"""
fp.write(dumps(collection, pretty_print)) | Serialize ``collection`` as a BioC formatted stream to ``fp``.
Args:
collection: the BioC collection
fp: a ``.write()``-supporting file-like object
pretty_print: enables formatted XML | entailment |
def dumps(collection: BioCCollection, pretty_print: bool = True) -> str:
"""
Serialize ``collection`` to a BioC formatted ``str``.
Args:
collection: the BioC collection
pretty_print: enables formatted XML
Returns:
a BioC formatted ``str``
"""
doc = etree.ElementTree(BioCXMLEncoder().encode(collection))
s = etree.tostring(doc, pretty_print=pretty_print, encoding=collection.encoding,
standalone=collection.standalone)
return s.decode(collection.encoding) | Serialize ``collection`` to a BioC formatted ``str``.
Args:
collection: the BioC collection
pretty_print: enables formatted XML
Returns:
a BioC formatted ``str`` | entailment |
def encode_location(location: BioCLocation):
"""Encode a single location."""
return etree.Element('location',
{'offset': str(location.offset), 'length': str(location.length)}) | Encode a single location. | entailment |
def encode_relation(relation: BioCRelation):
"""Encode a single relation."""
tree = etree.Element('relation', {'id': relation.id})
encode_infons(tree, relation.infons)
for node in relation.nodes:
tree.append(encode_node(node))
return tree | Encode a single relation. | entailment |
def encode_annotation(annotation):
"""Encode a single annotation."""
tree = etree.Element('annotation', {'id': annotation.id})
encode_infons(tree, annotation.infons)
for location in annotation.locations:
tree.append(encode_location(location))
etree.SubElement(tree, 'text').text = annotation.text
return tree | Encode a single annotation. | entailment |
def encode_sentence(sentence):
"""Encode a single sentence."""
tree = etree.Element('sentence')
encode_infons(tree, sentence.infons)
etree.SubElement(tree, 'offset').text = str(sentence.offset)
if sentence.text:
etree.SubElement(tree, 'text').text = sentence.text
for ann in sentence.annotations:
tree.append(encode_annotation(ann))
for rel in sentence.relations:
tree.append(encode_relation(rel))
return tree | Encode a single sentence. | entailment |
def encode_passage(passage):
"""Encode a single passage."""
tree = etree.Element('passage')
encode_infons(tree, passage.infons)
etree.SubElement(tree, 'offset').text = str(passage.offset)
if passage.text:
etree.SubElement(tree, 'text').text = passage.text
for sen in passage.sentences:
tree.append(encode_sentence(sen))
for ann in passage.annotations:
tree.append(encode_annotation(ann))
for rel in passage.relations:
tree.append(encode_relation(rel))
return tree | Encode a single passage. | entailment |
def encode_document(document):
"""Encode a single document."""
tree = etree.Element('document')
etree.SubElement(tree, 'id').text = document.id
encode_infons(tree, document.infons)
for passage in document.passages:
tree.append(encode_passage(passage))
for ann in document.annotations:
tree.append(encode_annotation(ann))
for rel in document.relations:
tree.append(encode_relation(rel))
return tree | Encode a single document. | entailment |
def encode_collection(collection):
"""Encode a single collection."""
tree = etree.Element('collection')
etree.SubElement(tree, 'source').text = collection.source
etree.SubElement(tree, 'date').text = collection.date
etree.SubElement(tree, 'key').text = collection.key
encode_infons(tree, collection.infons)
for doc in collection.documents:
tree.append(encode_document(doc))
return tree | Encode a single collection. | entailment |
def default(self, obj):
"""Implement this method in a subclass such that it returns a tree for ``o``."""
if isinstance(obj, BioCDocument):
return encode_document(obj)
if isinstance(obj, BioCCollection):
return encode_collection(obj)
raise TypeError(f'Object of type {obj.__class__.__name__} is not BioC XML serializable') | Implement this method in a subclass such that it returns a tree for ``o``. | entailment |
def write_collection_info(self, collection: BioCCollection):
"""
Writes the collection information: encoding, version, DTD, source, date, key, infons, etc.
"""
elem = etree.Element('source')
elem.text = collection.source
self.__writer.send(elem)
elem = etree.Element('date')
elem.text = collection.date
self.__writer.send(elem)
elem = etree.Element('key')
elem.text = collection.key
self.__writer.send(elem)
for k, v in collection.infons.items():
elem = etree.Element('infon', {'key': str(k)})
elem.text = str(v)
self.__writer.send(elem) | Writes the collection information: encoding, version, DTD, source, date, key, infons, etc. | entailment |
def write_document(self, document: BioCDocument):
"""Encode and write a single document."""
tree = self.encoder.encode(document)
self.__writer.send(tree) | Encode and write a single document. | entailment |
def run():
"""This client generates customer reports on all the samples in workbench."""
# Grab server args
args = client_helper.grab_server_args()
# Start up workbench connection
workbench = zerorpc.Client(timeout=300, heartbeat=60)
workbench.connect('tcp://'+args['server']+':'+args['port'])
all_set = workbench.generate_sample_set()
results = workbench.set_work_request('view_customer', all_set)
for customer in results:
print customer['customer'] | This client generates customer reports on all the samples in workbench. | entailment |
def check_io_access(ioobj, access, is_file=False):
''' check if a file/folder exists and has a given IO access '''
if ((is_file and not os.path.isfile(ioobj)) or
(not is_file and not os.path.isdir(ioobj)) or
not os.access(ioobj, access)):
_objtype = "File" if is_file else "Directory"
raise IOError("Error accessing %s: %s" % (_objtype, ioobj)) | check if a file/folder exists and has a given IO access | entailment |
def validate(collection, onerror: Callable[[str, List], None] = None):
"""Validate BioC data structure."""
BioCValidator(onerror).validate(collection) | Validate BioC data structure. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.