sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def r_to_s(func): """Decorator method for Workbench methods returning a str""" @functools.wraps(func) def wrapper(*args, **kwargs): """Decorator method for Workbench methods returning a str""" class ReprToStr(str): """Replaces a class __repr__ with it's string representation""" def __repr__(self): return str(self) return ReprToStr(func(*args, **kwargs)) return wrapper
Decorator method for Workbench methods returning a str
entailment
def all_files_in_directory(path): """ Recursively ist all files under a directory """ file_list = [] for dirname, dirnames, filenames in os.walk(path): for filename in filenames: file_list.append(os.path.join(dirname, filename)) return file_list
Recursively ist all files under a directory
entailment
def run(): """This client pushes a big directory of different files into Workbench.""" # Grab server args args = client_helper.grab_server_args() # Start up workbench connection workbench = zerorpc.Client(timeout=300, heartbeat=60) workbench.connect('tcp://'+args['server']+':'+args['port']) # Grab all the filenames from the data directory data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),'../data') file_list = all_files_in_directory(data_dir) # Upload the files into workbench md5_list = [] for path in file_list: # Skip OS generated files if '.DS_Store' in path: continue with open(path,'rb') as f: filename = os.path.basename(path) # Here we're going to save network traffic by asking # Workbench if it already has this md5 raw_bytes = f.read() md5 = hashlib.md5(raw_bytes).hexdigest() md5_list.append(md5) if workbench.has_sample(md5): print 'Workbench already has this sample %s' % md5 else: # Store the sample into workbench md5 = workbench.store_sample(raw_bytes, filename, 'unknown') print 'Filename %s uploaded: type_tag %s, md5 %s' % (filename, 'unknown', md5) # Okay now explode any container types zip_files = workbench.generate_sample_set('zip') _foo = workbench.set_work_request('unzip', zip_files); list(_foo) # See Issue #306 pcap_files = workbench.generate_sample_set('pcap') _foo = workbench.set_work_request('pcap_bro', pcap_files); list(_foo) # See Issue #306 mem_files = workbench.generate_sample_set('mem') _foo = workbench.set_work_request('mem_procdump', mem_files); list(_foo) # See Issue #306 # Make sure all files are properly identified print 'Info: Ensuring File Identifications...' type_tag_set = set() all_files = workbench.generate_sample_set() meta_all = workbench.set_work_request('meta', all_files) for meta in meta_all: type_tag_set.add(meta['type_tag']) if meta['type_tag'] in ['unknown', 'own']: print meta pprint.pprint(type_tag_set)
This client pushes a big directory of different files into Workbench.
entailment
def load_all_plugins(self): """Load all the plugins in the plugin directory""" # Go through the existing python files in the plugin directory self.plugin_path = os.path.realpath(self.plugin_dir) sys.path.append(self.plugin_dir) print '<<< Plugin Manager >>>' for f in [os.path.join(self.plugin_dir, child) for child in os.listdir(self.plugin_dir)]: # Skip certain files if '.DS_Store' in f or '__init__.py' in f: continue # Add the plugin self.add_plugin(f)
Load all the plugins in the plugin directory
entailment
def remove_plugin(self, f): """Remvoing a deleted plugin. Args: f: the filepath for the plugin. """ if f.endswith('.py'): plugin_name = os.path.splitext(os.path.basename(f))[0] print '- %s %sREMOVED' % (plugin_name, color.Red) print '\t%sNote: still in memory, restart Workbench to remove...%s' % \ (color.Yellow, color.Normal)
Remvoing a deleted plugin. Args: f: the filepath for the plugin.
entailment
def add_plugin(self, f): """Adding and verifying plugin. Args: f: the filepath for the plugin. """ if f.endswith('.py'): # Just the basename without extension plugin_name = os.path.splitext(os.path.basename(f))[0] # It's possible the plugin has been modified and needs to be reloaded if plugin_name in sys.modules: try: handler = reload(sys.modules[plugin_name]) print'\t- %s %sRELOAD%s' % (plugin_name, color.Yellow, color.Normal) except ImportError, error: print 'Failed to import plugin: %s (%s)' % (plugin_name, error) return else: # Not already loaded so try to import it try: handler = __import__(plugin_name, globals(), locals(), [], -1) except ImportError, error: print 'Failed to import plugin: %s (%s)' % (plugin_name, error) return # Run the handler through plugin validation plugin = self.validate(handler) print '\t- %s %sOK%s' % (plugin_name, color.Green, color.Normal) if plugin: # Okay must be successfully loaded so capture the plugin meta-data, # modification time and register the plugin through the callback plugin['name'] = plugin_name plugin['dependencies'] = plugin['class'].dependencies plugin['docstring'] = plugin['class'].__doc__ plugin['mod_time'] = datetime.utcfromtimestamp(os.path.getmtime(f)) # Plugin may accept sample_sets as input try: plugin['sample_set_input'] = getattr(plugin['class'], 'sample_set_input') except AttributeError: plugin['sample_set_input'] = False # Now pass the plugin back to workbench self.plugin_callback(plugin)
Adding and verifying plugin. Args: f: the filepath for the plugin.
entailment
def validate(self, handler): """Validate the plugin, each plugin must have the following: 1) The worker class must have an execute method: execute(self, input_data). 2) The worker class must have a dependencies list (even if it's empty). 3) The file must have a top level test() method. Args: handler: the loaded plugin. """ # Check for the test method first test_method = self.plugin_test_validation(handler) if not test_method: return None # Here we iterate through the classes found in the module and pick # the first one that satisfies the validation for name, plugin_class in inspect.getmembers(handler, inspect.isclass): if self.plugin_class_validation(plugin_class): return {'class':plugin_class, 'test':test_method} # If we're here the plugin didn't pass validation print 'Failure for plugin: %s' % (handler.__name__) print 'Validation Error: Worker class is required to have a dependencies list and an execute method' return None
Validate the plugin, each plugin must have the following: 1) The worker class must have an execute method: execute(self, input_data). 2) The worker class must have a dependencies list (even if it's empty). 3) The file must have a top level test() method. Args: handler: the loaded plugin.
entailment
def plugin_class_validation(self, plugin_class): """Plugin validation Every workbench plugin must have a dependencies list (even if it's empty). Every workbench plugin must have an execute method. Args: plugin_class: The loaded plugun class. Returns: True if dependencies and execute are present, else False. """ try: getattr(plugin_class, 'dependencies') getattr(plugin_class, 'execute') except AttributeError: return False return True
Plugin validation Every workbench plugin must have a dependencies list (even if it's empty). Every workbench plugin must have an execute method. Args: plugin_class: The loaded plugun class. Returns: True if dependencies and execute are present, else False.
entailment
def store_sample(self, sample_bytes, filename, type_tag): """Store a sample into the datastore. Args: filename: Name of the file. sample_bytes: Actual bytes of sample. type_tag: Type of sample ('exe','pcap','pdf','json','swf', or ...) Returns: md5 digest of the sample. """ # Temp sanity check for old clients if len(filename) > 1000: print 'switched bytes/filename... %s %s' % (sample_bytes[:100], filename[:100]) exit(1) sample_info = {} # Compute the MD5 hash sample_info['md5'] = hashlib.md5(sample_bytes).hexdigest() # Check if sample already exists if self.has_sample(sample_info['md5']): return sample_info['md5'] # Run the periodic operations self.periodic_ops() # Check if we need to expire anything self.expire_data() # Okay start populating the sample for adding to the data store # Filename, length, import time and type_tag sample_info['filename'] = filename sample_info['length'] = len(sample_bytes) sample_info['import_time'] = datetime.datetime.utcnow() sample_info['type_tag'] = type_tag # Random customer for now import random sample_info['customer'] = random.choice(['Mega Corp', 'Huge Inc', 'BearTron', 'Dorseys Mom']) # Push the file into the MongoDB GridFS sample_info['__grid_fs'] = self.gridfs_handle.put(sample_bytes) self.database[self.sample_collection].insert(sample_info) # Print info print 'Sample Storage: %.2f out of %.2f MB' % (self.sample_storage_size(), self.samples_cap) # Return the sample md5 return sample_info['md5']
Store a sample into the datastore. Args: filename: Name of the file. sample_bytes: Actual bytes of sample. type_tag: Type of sample ('exe','pcap','pdf','json','swf', or ...) Returns: md5 digest of the sample.
entailment
def sample_storage_size(self): """Get the storage size of the samples storage collection.""" try: coll_stats = self.database.command('collStats', 'fs.chunks') sample_storage_size = coll_stats['size']/1024.0/1024.0 return sample_storage_size except pymongo.errors.OperationFailure: return 0
Get the storage size of the samples storage collection.
entailment
def expire_data(self): """Expire data within the samples collection.""" # Do we need to start deleting stuff? while self.sample_storage_size() > self.samples_cap: # This should return the 'oldest' record in samples record = self.database[self.sample_collection].find().sort('import_time',pymongo.ASCENDING).limit(1)[0] self.remove_sample(record['md5'])
Expire data within the samples collection.
entailment
def remove_sample(self, md5): """Delete a specific sample""" # Grab the sample record = self.database[self.sample_collection].find_one({'md5': md5}) if not record: return # Delete it print 'Deleting sample: %s (%.2f MB)...' % (record['md5'], record['length']/1024.0/1024.0) self.database[self.sample_collection].remove({'md5': record['md5']}) self.gridfs_handle.delete(record['__grid_fs']) # Print info print 'Sample Storage: %.2f out of %.2f MB' % (self.sample_storage_size(), self.samples_cap)
Delete a specific sample
entailment
def clean_for_serialization(self, data): """Clean data in preparation for serialization. Deletes items having key either a BSON, datetime, dict or a list instance, or starting with __. Args: data: Sample data to be serialized. Returns: Cleaned data dictionary. """ if isinstance(data, dict): for k in data.keys(): if (k.startswith('__')): del data[k] elif isinstance(data[k], bson.objectid.ObjectId): del data[k] elif isinstance(data[k], datetime.datetime): data[k] = data[k].isoformat()+'Z' elif isinstance(data[k], dict): data[k] = self.clean_for_serialization(data[k]) elif isinstance(data[k], list): data[k] = [self.clean_for_serialization(item) for item in data[k]] return data
Clean data in preparation for serialization. Deletes items having key either a BSON, datetime, dict or a list instance, or starting with __. Args: data: Sample data to be serialized. Returns: Cleaned data dictionary.
entailment
def clean_for_storage(self, data): """Clean data in preparation for storage. Deletes items with key having a '.' or is '_id'. Also deletes those items whose value is a dictionary or a list. Args: data: Sample data dictionary to be cleaned. Returns: Cleaned data dictionary. """ data = self.data_to_unicode(data) if isinstance(data, dict): for k in dict(data).keys(): if k == '_id': del data[k] continue if '.' in k: new_k = k.replace('.', '_') data[new_k] = data[k] del data[k] k = new_k if isinstance(data[k], dict): data[k] = self.clean_for_storage(data[k]) elif isinstance(data[k], list): data[k] = [self.clean_for_storage(item) for item in data[k]] return data
Clean data in preparation for storage. Deletes items with key having a '.' or is '_id'. Also deletes those items whose value is a dictionary or a list. Args: data: Sample data dictionary to be cleaned. Returns: Cleaned data dictionary.
entailment
def get_full_md5(self, partial_md5, collection): """Support partial/short md5s, return the full md5 with this method""" print 'Notice: Performing slow md5 search...' starts_with = '%s.*' % partial_md5 sample_info = self.database[collection].find_one({'md5': {'$regex' : starts_with}},{'md5':1}) return sample_info['md5'] if sample_info else None
Support partial/short md5s, return the full md5 with this method
entailment
def get_sample(self, md5): """Get the sample from the data store. This method first fetches the data from datastore, then cleans it for serialization and then updates it with 'raw_bytes' item. Args: md5: The md5 digest of the sample to be fetched from datastore. Returns: The sample dictionary or None """ # Support 'short' md5s but don't waste performance if the full md5 is provided if len(md5) < 32: md5 = self.get_full_md5(md5, self.sample_collection) # Grab the sample sample_info = self.database[self.sample_collection].find_one({'md5': md5}) if not sample_info: return None # Get the raw bytes from GridFS (note: this could fail) try: grid_fs_id = sample_info['__grid_fs'] sample_info = self.clean_for_serialization(sample_info) sample_info.update({'raw_bytes':self.gridfs_handle.get(grid_fs_id).read()}) return sample_info except gridfs.errors.CorruptGridFile: # If we don't have the gridfs files, delete the entry from samples self.database[self.sample_collection].update({'md5': md5}, {'md5': None}) return None
Get the sample from the data store. This method first fetches the data from datastore, then cleans it for serialization and then updates it with 'raw_bytes' item. Args: md5: The md5 digest of the sample to be fetched from datastore. Returns: The sample dictionary or None
entailment
def get_sample_window(self, type_tag, size=10): """Get a window of samples not to exceed size (in MB). Args: type_tag: Type of sample ('exe','pcap','pdf','json','swf', or ...). size: Size of samples in MBs. Returns: a list of md5s. """ # Convert size to MB size = size * 1024 * 1024 # Grab all the samples of type=type_tag, sort by import_time (newest to oldest) cursor = self.database[self.sample_collection].find({'type_tag': type_tag}, {'md5': 1,'length': 1}).sort('import_time',pymongo.DESCENDING) total_size = 0 md5_list = [] for item in cursor: if total_size > size: return md5_list md5_list.append(item['md5']) total_size += item['length'] # If you get this far you don't have 'size' amount of data # so just return what you've got return md5_list
Get a window of samples not to exceed size (in MB). Args: type_tag: Type of sample ('exe','pcap','pdf','json','swf', or ...). size: Size of samples in MBs. Returns: a list of md5s.
entailment
def has_sample(self, md5): """Checks if data store has this sample. Args: md5: The md5 digest of the required sample. Returns: True if sample with this md5 is present, else False. """ # The easiest thing is to simply get the sample and if that # succeeds than return True, else return False sample = self.get_sample(md5) return True if sample else False
Checks if data store has this sample. Args: md5: The md5 digest of the required sample. Returns: True if sample with this md5 is present, else False.
entailment
def _list_samples(self, predicate=None): """List all samples that meet the predicate or all if predicate is not specified. Args: predicate: Match samples against this predicate (or all if not specified) Returns: List of the md5s for the matching samples """ cursor = self.database[self.sample_collection].find(predicate, {'_id':0, 'md5':1}) return [item['md5'] for item in cursor]
List all samples that meet the predicate or all if predicate is not specified. Args: predicate: Match samples against this predicate (or all if not specified) Returns: List of the md5s for the matching samples
entailment
def tag_match(self, tags=None): """List all samples that match the tags or all if tags are not specified. Args: tags: Match samples against these tags (or all if not specified) Returns: List of the md5s for the matching samples """ if 'tags' not in self.database.collection_names(): print 'Warning: Searching on non-existance tags collection' return None if not tags: cursor = self.database['tags'].find({}, {'_id':0, 'md5':1}) else: cursor = self.database['tags'].find({'tags': {'$in': tags}}, {'_id':0, 'md5':1}) # We have the tags, now make sure we only return those md5 which # also exist in the samples collection tag_md5s = set([item['md5'] for item in cursor]) sample_md5s = set(item['md5'] for item in self.database['samples'].find({}, {'_id':0, 'md5':1})) return list(tag_md5s.intersection(sample_md5s))
List all samples that match the tags or all if tags are not specified. Args: tags: Match samples against these tags (or all if not specified) Returns: List of the md5s for the matching samples
entailment
def tags_all(self): """List of the tags and md5s for all samples Args: None Returns: List of the tags and md5s for all samples """ if 'tags' not in self.database.collection_names(): print 'Warning: Searching on non-existance tags collection' return None cursor = self.database['tags'].find({}, {'_id':0, 'md5':1, 'tags':1}) return [item for item in cursor]
List of the tags and md5s for all samples Args: None Returns: List of the tags and md5s for all samples
entailment
def store_work_results(self, results, collection, md5): """Store the output results of the worker. Args: results: a dictionary. collection: the database collection to store the results in. md5: the md5 of sample data to be updated. """ # Make sure the md5 and time stamp is on the data before storing results['md5'] = md5 results['__time_stamp'] = datetime.datetime.utcnow() # If the data doesn't have a 'mod_time' field add one now if 'mod_time' not in results: results['mod_time'] = results['__time_stamp'] # Fixme: Occasionally a capped collection will not let you update with a # larger object, if you have MongoDB 2.6 or above this shouldn't # really happen, so for now just kinda punting and giving a message. try: self.database[collection].update({'md5':md5}, self.clean_for_storage(results), True) except pymongo.errors.OperationFailure: #self.database[collection].insert({'md5':md5}, self.clean_for_storage(results), True) print 'Could not update exising object in capped collection, punting...' print 'collection: %s md5:%s' % (collection, md5)
Store the output results of the worker. Args: results: a dictionary. collection: the database collection to store the results in. md5: the md5 of sample data to be updated.
entailment
def all_sample_md5s(self, type_tag=None): """Return a list of all md5 matching the type_tag ('exe','pdf', etc). Args: type_tag: the type of sample. Returns: a list of matching samples. """ if type_tag: cursor = self.database[self.sample_collection].find({'type_tag': type_tag}, {'md5': 1, '_id': 0}) else: cursor = self.database[self.sample_collection].find({}, {'md5': 1, '_id': 0}) return [match.values()[0] for match in cursor]
Return a list of all md5 matching the type_tag ('exe','pdf', etc). Args: type_tag: the type of sample. Returns: a list of matching samples.
entailment
def clear_worker_output(self): """Drops all of the worker output collections""" print 'Dropping all of the worker output collections... Whee!' # Get all the collections in the workbench database all_c = self.database.collection_names() # Remove collections that we don't want to cap try: all_c.remove('system.indexes') all_c.remove('fs.chunks') all_c.remove('fs.files') all_c.remove('sample_set') all_c.remove('tags') all_c.remove(self.sample_collection) except ValueError: print 'Catching a benign exception thats expected...' for collection in all_c: self.database.drop_collection(collection)
Drops all of the worker output collections
entailment
def periodic_ops(self): """Run periodic operations on the the data store. Operations like making sure collections are capped and indexes are set up. """ # Only run every 30 seconds if (time.time() - self.last_ops_run) < 30: return try: # Reset last ops run self.last_ops_run = time.time() print 'Running Periodic Ops' # Get all the collections in the workbench database all_c = self.database.collection_names() # Remove collections that we don't want to cap try: all_c.remove('system.indexes') all_c.remove('fs.chunks') all_c.remove('fs.files') all_c.remove('info') all_c.remove('tags') all_c.remove(self.sample_collection) except ValueError: print 'Catching a benign exception thats expected...' # Convert collections to capped if desired if self.worker_cap: size = self.worker_cap * pow(1024, 2) # MegaBytes per collection for collection in all_c: self.database.command('convertToCapped', collection, size=size) # Loop through all collections ensuring they have an index on MD5s for collection in all_c: self.database[collection].ensure_index('md5') # Add required indexes for samples collection self.database[self.sample_collection].create_index('import_time') # Create an index on tags self.database['tags'].create_index('tags') # Mongo may throw an autoreconnect exception so catch it and just return # the autoreconnect means that some operations didn't get executed but # because this method gets called every 30 seconds no biggy... except pymongo.errors.AutoReconnect as e: print 'Warning: MongoDB raised an AutoReconnect...' % e return except Exception as e: print 'Critical: MongoDB raised an exception' % e return
Run periodic operations on the the data store. Operations like making sure collections are capped and indexes are set up.
entailment
def to_unicode(self, s): """Convert an elementary datatype to unicode. Args: s: the datatype to be unicoded. Returns: Unicoded data. """ # Fixme: This is total horseshit if isinstance(s, unicode): return s if isinstance(s, str): return unicode(s, errors='ignore') # Just return the original object return s
Convert an elementary datatype to unicode. Args: s: the datatype to be unicoded. Returns: Unicoded data.
entailment
def data_to_unicode(self, data): """Recursively convert a list or dictionary to unicode. Args: data: The data to be unicoded. Returns: Unicoded data. """ if isinstance(data, dict): return {self.to_unicode(k): self.to_unicode(v) for k, v in data.iteritems()} if isinstance(data, list): return [self.to_unicode(l) for l in data] else: return self.to_unicode(data)
Recursively convert a list or dictionary to unicode. Args: data: The data to be unicoded. Returns: Unicoded data.
entailment
def execute(self, input_data): # Spin up SWF class swf = SWF() # Get the raw_bytes raw_bytes = input_data['sample']['raw_bytes'] # Parse it swf.parse(StringIO(raw_bytes)) # Header info head = swf.header output = {'version':head.version,'file_length':head.file_length,'frame_count':head.frame_count, 'frame_rate':head.frame_rate,'frame_size':head.frame_size.__str__(),'compressed':head.compressed} # Loop through all the tags output['tags'] = [tag.__str__() for tag in swf.tags] # Add the meta data to the output output.update(input_data['meta']) return output ''' # Map all tag names to indexes tag_map = {tag.name:index for tag,index in enumerate(swf.tags)} # FileAttribute Info file_attr_tag = swf.tags[tag_map] ''' ''' # Build up return data structure output = {name:value for name,value in locals().iteritems() if name not in ['self', 'input_data','raw_bytes']} output.update(input_data['meta']) return output '''
# Map all tag names to indexes tag_map = {tag.name:index for tag,index in enumerate(swf.tags)} # FileAttribute Info file_attr_tag = swf.tags[tag_map]
entailment
def grab_server_args(): """Grab server info from configuration file""" workbench_conf = ConfigParser.ConfigParser() config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.ini') workbench_conf.read(config_path) server = workbench_conf.get('workbench', 'server_uri') port = workbench_conf.get('workbench', 'server_port') # Collect args from the command line parser = argparse.ArgumentParser() parser.add_argument('-s', '--server', type=str, default=server, help='location of workbench server') parser.add_argument('-p', '--port', type=int, default=port, help='port used by workbench server') args, commands = parser.parse_known_args() server = str(args.server) port = str(args.port) return {'server':server, 'port':port, 'commands': commands}
Grab server info from configuration file
entailment
def _make_attachment(self, attachment, str_encoding=None): """Returns EmailMessage.attachments item formatted for sending with Mailjet Returns mailjet_dict, is_inline_image """ is_inline_image = False if isinstance(attachment, MIMEBase): name = attachment.get_filename() content = attachment.get_payload(decode=True) mimetype = attachment.get_content_type() if attachment.get_content_maintype() == 'image' and attachment['Content-ID'] is not None: is_inline_image = True name = attachment['Content-ID'] else: (name, content, mimetype) = attachment # Guess missing mimetype from filename, borrowed from # django.core.mail.EmailMessage._create_attachment() if mimetype is None and name is not None: mimetype, _ = mimetypes.guess_type(name) if mimetype is None: mimetype = DEFAULT_ATTACHMENT_MIME_TYPE try: # noinspection PyUnresolvedReferences if isinstance(content, unicode): # Python 2.x unicode string content = content.encode(str_encoding) except NameError: # Python 3 doesn't differentiate between strings and unicode # Convert python3 unicode str to bytes attachment: if isinstance(content, str): content = content.encode(str_encoding) content_b64 = b64encode(content) mj_attachment = { 'Content-type': mimetype, 'Filename': name or '', 'content': content_b64.decode('ascii'), } return mj_attachment, is_inline_image
Returns EmailMessage.attachments item formatted for sending with Mailjet Returns mailjet_dict, is_inline_image
entailment
def index_data(self, data, index_name, doc_type): """Take an arbitrary dictionary of data and index it with ELS. Args: data: data to be Indexed. Should be a dictionary. index_name: Name of the index. doc_type: The type of the document. Raises: RuntimeError: When the Indexing fails. """ # Index the data (which needs to be a dict/object) if it's not # we're going to toss an exception if not isinstance(data, dict): raise RuntimeError('Index failed, data needs to be a dict!') try: self.els_search.index(index=index_name, doc_type=doc_type, body=data) except Exception, error: print 'Index failed: %s' % str(error) raise RuntimeError('Index failed: %s' % str(error))
Take an arbitrary dictionary of data and index it with ELS. Args: data: data to be Indexed. Should be a dictionary. index_name: Name of the index. doc_type: The type of the document. Raises: RuntimeError: When the Indexing fails.
entailment
def search(self, index_name, query): """Search the given index_name with the given ELS query. Args: index_name: Name of the Index query: The string to be searched. Returns: List of results. Raises: RuntimeError: When the search query fails. """ try: results = self.els_search.search(index=index_name, body=query) return results except Exception, error: error_str = 'Query failed: %s\n' % str(error) error_str += '\nIs there a dynamic script in the query?, see www.elasticsearch.org' print error_str raise RuntimeError(error_str)
Search the given index_name with the given ELS query. Args: index_name: Name of the Index query: The string to be searched. Returns: List of results. Raises: RuntimeError: When the search query fails.
entailment
def index_data(self, data, index_name, doc_type): """Index data in Stub Indexer.""" print 'ELS Stub Indexer getting called...' print '%s %s %s %s' % (self, data, index_name, doc_type)
Index data in Stub Indexer.
entailment
def execute(self, input_data): ''' Execute method ''' my_ssdeep = input_data['meta_deep']['ssdeep'] my_md5 = input_data['meta_deep']['md5'] # For every PE sample in the database compute my ssdeep fuzzy match sample_set = self.workbench.generate_sample_set('exe') results = self.workbench.set_work_request('meta_deep', sample_set, ['md5','ssdeep']) sim_list = [] for result in results: if result['md5'] != my_md5: sim_list.append({'md5':result['md5'], 'sim':ssd.compare(my_ssdeep, result['ssdeep'])}) # Sort and return the sim_list (with some logic for threshold) sim_list.sort(key=itemgetter('sim'), reverse=True) output_list = [sim for sim in sim_list if sim['sim'] > 0] return {'md5': my_md5, 'sim_list':output_list}
Execute method
entailment
def execute(self, input_data): ''' Do CLI formatting and coloring based on the type_tag ''' input_data = input_data['help_base'] type_tag = input_data['type_tag'] # Standard help text if type_tag == 'help': output = '%s%s%s' % (color.LightBlue, input_data['help'], color.Normal) # Worker elif type_tag == 'worker': output = '%s%s' % (color.Yellow, input_data['name']) output += '\n %sInput: %s%s%s' % (color.LightBlue, color.Green, input_data['dependencies'], color.Normal) output += '\n %s%s' % (color.Green, input_data['docstring']) # Command elif type_tag == 'command': output = '%s%s%s %s' % (color.Yellow, input_data['command'], color.LightBlue, input_data['sig']) output += '\n %s%s%s' % (color.Green, input_data['docstring'], color.Normal) # WTF: Alert on unknown type_tag and return a string of the input_data else: print 'Alert: help_formatter worker received malformed object: %s' % str(input_data) output = '\n%s%s%s' % (color.Red, str(input_data), color.Normal) # Return the formatted and colored help return {'help': output}
Do CLI formatting and coloring based on the type_tag
entailment
def run(): """ Run the workbench server """ # Load the configuration file relative to this script location config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.ini') workbench_conf = ConfigParser.ConfigParser() config_ini = workbench_conf.read(config_path) if not config_ini: print 'Could not locate config.ini file, tried %s : exiting...' % config_path exit(1) # Pull configuration settings datastore_uri = workbench_conf.get('workbench', 'datastore_uri') database = workbench_conf.get('workbench', 'database') worker_cap = workbench_conf.getint('workbench', 'worker_cap') samples_cap = workbench_conf.getint('workbench', 'samples_cap') # Spin up Workbench ZeroRPC try: store_args = {'uri': datastore_uri, 'database': database, 'worker_cap':worker_cap, 'samples_cap':samples_cap} workbench = zerorpc.Server(WorkBench(store_args=store_args), name='workbench', heartbeat=60) workbench.bind('tcp://0.0.0.0:4242') print '\nWorkbench is ready and feeling super duper!' gevent_signal(signal.SIGTERM, workbench.stop) gevent_signal(signal.SIGINT, workbench.stop) gevent_signal(signal.SIGKILL, workbench.stop) workbench.run() print '\nWorkbench Server Shutting Down... and dreaming of sheep...' except zmq.error.ZMQError: print '\nInfo: Could not start Workbench server (no worries, probably already running...)\n'
Run the workbench server
entailment
def store_sample(self, input_bytes, filename, type_tag): """ Store a sample into the DataStore. Args: input_bytes: the actual bytes of the sample e.g. f.read() filename: name of the file (used purely as meta data not for lookup) type_tag: ('exe','pcap','pdf','json','swf', or ...) Returns: the md5 of the sample. """ # If the sample comes in with an unknown type_tag try to determine it if type_tag == 'unknown': print 'Info: Unknown File -- Trying to Determine Type...' type_tag = self.guess_type_tag(input_bytes, filename) # Do we have a compressed sample? If so decompress it if type_tag == 'lz4': input_bytes = lz4.loads(input_bytes) # Store the sample md5 = self.data_store.store_sample(input_bytes, filename, type_tag) # Add the type_tags to tags if type_tag != 'lz4': self.add_tags(md5, type_tag) return md5
Store a sample into the DataStore. Args: input_bytes: the actual bytes of the sample e.g. f.read() filename: name of the file (used purely as meta data not for lookup) type_tag: ('exe','pcap','pdf','json','swf', or ...) Returns: the md5 of the sample.
entailment
def get_sample(self, md5): """ Get a sample from the DataStore. Args: md5: the md5 of the sample Returns: A dictionary of meta data about the sample which includes a ['raw_bytes'] key that contains the raw bytes. Raises: Workbench.DataNotFound if the sample is not found. """ # First we try a sample, if we can't find one we try getting a sample_set. sample = self.data_store.get_sample(md5) if not sample: return {'sample_set': {'md5_list': self.get_sample_set(md5)}} return {'sample': sample}
Get a sample from the DataStore. Args: md5: the md5 of the sample Returns: A dictionary of meta data about the sample which includes a ['raw_bytes'] key that contains the raw bytes. Raises: Workbench.DataNotFound if the sample is not found.
entailment
def is_sample_set(self, md5): """ Does the md5 represent a sample_set? Args: md5: the md5 of the sample_set Returns: True/False """ try: self.get_sample_set(md5) return True except WorkBench.DataNotFound: return False
Does the md5 represent a sample_set? Args: md5: the md5 of the sample_set Returns: True/False
entailment
def get_sample_window(self, type_tag, size): """ Get a sample from the DataStore. Args: type_tag: the type of samples ('pcap','exe','pdf') size: the size of the window in MegaBytes (10 = 10MB) Returns: A sample_set handle which represents the newest samples within the size window """ md5_list = self.data_store.get_sample_window(type_tag, size) return self.store_sample_set(md5_list)
Get a sample from the DataStore. Args: type_tag: the type of samples ('pcap','exe','pdf') size: the size of the window in MegaBytes (10 = 10MB) Returns: A sample_set handle which represents the newest samples within the size window
entailment
def combine_samples(self, md5_list, filename, type_tag): """Combine samples together. This may have various use cases the most significant involving a bunch of sample 'chunks' got uploaded and now we combine them together Args: md5_list: The list of md5s to combine, order matters! filename: name of the file (used purely as meta data not for lookup) type_tag: ('exe','pcap','pdf','json','swf', or ...) Returns: the computed md5 of the combined samples """ total_bytes = "" for md5 in md5_list: total_bytes += self.get_sample(md5)['sample']['raw_bytes'] self.remove_sample(md5) # Store it return self.store_sample(total_bytes, filename, type_tag)
Combine samples together. This may have various use cases the most significant involving a bunch of sample 'chunks' got uploaded and now we combine them together Args: md5_list: The list of md5s to combine, order matters! filename: name of the file (used purely as meta data not for lookup) type_tag: ('exe','pcap','pdf','json','swf', or ...) Returns: the computed md5 of the combined samples
entailment
def stream_sample(self, md5, kwargs=None): """ Stream the sample by giving back a generator, typically used on 'logs'. Args: md5: the md5 of the sample kwargs: a way of specifying subsets of samples (None for all) max_rows: the maximum number of rows to return Returns: A generator that yields rows of the file/log """ # Get the max_rows if specified max_rows = kwargs.get('max_rows', None) if kwargs else None # Grab the sample and it's raw bytes sample = self.get_sample(md5)['sample'] raw_bytes = sample['raw_bytes'] # Figure out the type of file to be streamed type_tag = sample['type_tag'] if type_tag == 'bro': bro_log = bro_log_reader.BroLogReader(convert_datetimes=False) mem_file = StringIO(raw_bytes) generator = bro_log.read_log(mem_file) return generator elif type_tag == 'els_query': els_log = json.loads(raw_bytes) # Try to determine a couple of different types of ELS query results if 'fields' in els_log['hits']['hits'][0]: generator = (row['fields'] for row in els_log['hits']['hits'][:max_rows]) else: generator = (row['_source'] for row in els_log['hits']['hits'][:max_rows]) return generator elif type_tag == 'log': generator = ({'row':row} for row in raw_bytes.split('\n')[:max_rows]) return generator elif type_tag == 'json': generator = (row for row in json.loads(raw_bytes)[:max_rows]) return generator else: raise RuntimeError('Cannot stream file %s with type_tag:%s' % (md5, type_tag))
Stream the sample by giving back a generator, typically used on 'logs'. Args: md5: the md5 of the sample kwargs: a way of specifying subsets of samples (None for all) max_rows: the maximum number of rows to return Returns: A generator that yields rows of the file/log
entailment
def get_dataframe(self, md5, compress='lz4'): """Return a dataframe from the DataStore. This is just a convenience method that uses get_sample internally. Args: md5: the md5 of the dataframe compress: compression to use: (defaults to 'lz4' but can be set to None) Returns: A msgpack'd Pandas DataFrame Raises: Workbench.DataNotFound if the dataframe is not found. """ # First we try a sample, if we can't find one we try getting a sample_set. sample = self.data_store.get_sample(md5) if not sample: raise WorkBench.DataNotFound("Could not find %s in the data store", md5) if not compress: return sample['raw_bytes'] else: compress_df = lz4.dumps(sample['raw_bytes']) print 'Info: DataFrame compression %.0f%%' % (len(compress_df)*100.0/float(len(sample['raw_bytes']))) return compress_df
Return a dataframe from the DataStore. This is just a convenience method that uses get_sample internally. Args: md5: the md5 of the dataframe compress: compression to use: (defaults to 'lz4' but can be set to None) Returns: A msgpack'd Pandas DataFrame Raises: Workbench.DataNotFound if the dataframe is not found.
entailment
def guess_type_tag(self, input_bytes, filename): """ Try to guess the type_tag for this sample """ mime_to_type = {'application/jar': 'jar', 'application/java-archive': 'jar', 'application/octet-stream': 'data', 'application/pdf': 'pdf', 'application/vnd.ms-cab-compressed': 'cab', 'application/vnd.ms-fontobject': 'ms_font', 'application/vnd.tcpdump.pcap': 'pcap', 'application/x-dosexec': 'exe', 'application/x-empty': 'empty', 'application/x-shockwave-flash': 'swf', 'application/xml': 'xml', 'application/zip': 'zip', 'image/gif': 'gif', 'text/html': 'html', 'image/jpeg': 'jpg', 'image/png': 'png', 'image/x-icon': 'icon', 'text/plain': 'txt' } # See what filemagic can determine with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as mag: mime_type = mag.id_buffer(input_bytes[:1024]) if mime_type in mime_to_type: type_tag = mime_to_type[mime_type] # If we get 'data' back look at the filename if type_tag == 'data': print 'Info: File -- Trying to Determine Type from filename...' ext = os.path.splitext(filename)[1][1:] if ext in ['mem','vmem']: type_tag = 'mem' else: print 'Alert: Failed to Determine Type for %s' % filename exit(1) # Temp return type_tag else: print 'Alert: Sample Type could not be Determined' return 'unknown'
Try to guess the type_tag for this sample
entailment
def add_tags(self, md5, tags): """Add tags to this sample""" if not tags: return tag_set = set(self.get_tags(md5)) if self.get_tags(md5) else set() if isinstance(tags, str): tags = [tags] for tag in tags: tag_set.add(tag) self.data_store.store_work_results({'tags': list(tag_set)}, 'tags', md5)
Add tags to this sample
entailment
def set_tags(self, md5, tags): """Set the tags for this sample""" if isinstance(tags, str): tags = [tags] tag_set = set(tags) self.data_store.store_work_results({'tags': list(tag_set)}, 'tags', md5)
Set the tags for this sample
entailment
def get_tags(self, md5): """Get tags for this sample""" tag_data = self.data_store.get_work_results('tags', md5) return tag_data['tags'] if tag_data else None
Get tags for this sample
entailment
def index_sample(self, md5, index_name): """ Index a stored sample with the Indexer. Args: md5: the md5 of the sample index_name: the name of the index Returns: Nothing """ generator = self.stream_sample(md5) for row in generator: self.indexer.index_data(row, index_name)
Index a stored sample with the Indexer. Args: md5: the md5 of the sample index_name: the name of the index Returns: Nothing
entailment
def index_worker_output(self, worker_name, md5, index_name, subfield): """ Index worker output with the Indexer. Args: worker_name: 'strings', 'pe_features', whatever md5: the md5 of the sample index_name: the name of the index subfield: index just this subfield (None for all) Returns: Nothing """ # Grab the data if subfield: data = self.work_request(worker_name, md5)[worker_name][subfield] else: data = self.work_request(worker_name, md5)[worker_name] # Okay now index the data self.indexer.index_data(data, index_name=index_name, doc_type='unknown')
Index worker output with the Indexer. Args: worker_name: 'strings', 'pe_features', whatever md5: the md5 of the sample index_name: the name of the index subfield: index just this subfield (None for all) Returns: Nothing
entailment
def add_node(self, node_id, name, labels): """ Add a node to the graph with name and labels. Args: node_id: the unique node_id e.g. 'www.evil4u.com' name: the display name of the node e.g. 'evil4u' labels: a list of labels e.g. ['domain','evil'] Returns: Nothing """ self.neo_db.add_node(node_id, name, labels)
Add a node to the graph with name and labels. Args: node_id: the unique node_id e.g. 'www.evil4u.com' name: the display name of the node e.g. 'evil4u' labels: a list of labels e.g. ['domain','evil'] Returns: Nothing
entailment
def add_rel(self, source_id, target_id, rel): """ Add a relationship: source, target must already exist (see add_node) 'rel' is the name of the relationship 'contains' or whatever. Args: source_id: the unique node_id of the source target_id: the unique node_id of the target rel: name of the relationship Returns: Nothing """ self.neo_db.add_rel(source_id, target_id, rel)
Add a relationship: source, target must already exist (see add_node) 'rel' is the name of the relationship 'contains' or whatever. Args: source_id: the unique node_id of the source target_id: the unique node_id of the target rel: name of the relationship Returns: Nothing
entailment
def clear_db(self): """ Clear the Main Database of all samples and worker output. Args: None Returns: Nothing """ self.data_store.clear_db() # Have the plugin manager reload all the plugins self.plugin_manager.load_all_plugins() # Store information about commands and workbench self._store_information()
Clear the Main Database of all samples and worker output. Args: None Returns: Nothing
entailment
def clear_worker_output(self): """Drops all of the worker output collections Args: None Returns: Nothing """ self.data_store.clear_worker_output() # Have the plugin manager reload all the plugins self.plugin_manager.load_all_plugins() # Store information about commands and workbench self._store_information()
Drops all of the worker output collections Args: None Returns: Nothing
entailment
def work_request(self, worker_name, md5, subkeys=None): """ Make a work request for an existing stored sample. Args: worker_name: 'strings', 'pe_features', whatever md5: the md5 of the sample (or sample_set!) subkeys: just get a subkey of the output: 'foo' or 'foo.bar' (None for all) Returns: The output of the worker. """ # Pull the worker output work_results = self._recursive_work_resolver(worker_name, md5) # Subkeys (Fixme this is super klutzy) if subkeys: if isinstance(subkeys, str): subkeys = [subkeys] try: sub_results = {} for subkey in subkeys: tmp = work_results[worker_name] # Traverse any subkeys for key in subkey.split('.')[:-1]: tmp = tmp[key] # Last subkey key = subkey.split('.')[-1] if key == '*': for key in tmp.keys(): sub_results[key] = tmp[key] else: sub_results[key] = tmp[key] # Set the output work_results = sub_results except (KeyError, TypeError): raise RuntimeError('Could not get one or more subkeys for: %s' % (work_results)) # Clean it and ship it return self.data_store.clean_for_serialization(work_results)
Make a work request for an existing stored sample. Args: worker_name: 'strings', 'pe_features', whatever md5: the md5 of the sample (or sample_set!) subkeys: just get a subkey of the output: 'foo' or 'foo.bar' (None for all) Returns: The output of the worker.
entailment
def set_work_request(self, worker_name, sample_set, subkeys=None): """ Make a work request for an existing stored sample (or sample_set). Args: worker_name: 'strings', 'pe_features', whatever sample_set: the md5 of a sample_set in the Workbench data store subkeys: just get a subkey of the output: 'foo' or 'foo.bar' (None for all) Returns: The output is a generator of the results of the worker output for the sample_set """ # Does worker support sample_set_input? if self.plugin_meta[worker_name]['sample_set_input']: yield self.work_request(worker_name, sample_set, subkeys) # Loop through all the md5s and return a generator with yield else: md5_list = self.get_sample_set(sample_set) for md5 in md5_list: if subkeys: yield self.work_request(worker_name, md5, subkeys) else: yield self.work_request(worker_name, md5)[worker_name]
Make a work request for an existing stored sample (or sample_set). Args: worker_name: 'strings', 'pe_features', whatever sample_set: the md5 of a sample_set in the Workbench data store subkeys: just get a subkey of the output: 'foo' or 'foo.bar' (None for all) Returns: The output is a generator of the results of the worker output for the sample_set
entailment
def store_sample_set(self, md5_list): """ Store a sample set (which is just a list of md5s). Note: All md5s must already be in the data store. Args: md5_list: a list of the md5s in this set (all must exist in data store) Returns: The md5 of the set (the actual md5 of the set) """ # Sanity check if not md5_list: print 'Warning: Trying to store an empty sample_set' return None # Remove any duplicates md5_list = list(set(md5_list)) for md5 in md5_list: if not self.has_sample(md5): raise RuntimeError('%s: Not found! All items in sample_set must be in the datastore' % (md5)) set_md5 = hashlib.md5(str(md5_list)).hexdigest() self._store_work_results({'md5_list':md5_list}, 'sample_set', set_md5) return set_md5
Store a sample set (which is just a list of md5s). Note: All md5s must already be in the data store. Args: md5_list: a list of the md5s in this set (all must exist in data store) Returns: The md5 of the set (the actual md5 of the set)
entailment
def generate_sample_set(self, tags=None): """Generate a sample_set that maches the tags or all if tags are not specified. Args: tags: Match samples against this tag list (or all if not specified) Returns: The sample_set of those samples matching the tags """ if isinstance(tags, str): tags = [tags] md5_list = self.data_store.tag_match(tags) return self.store_sample_set(md5_list)
Generate a sample_set that maches the tags or all if tags are not specified. Args: tags: Match samples against this tag list (or all if not specified) Returns: The sample_set of those samples matching the tags
entailment
def help(self, topic=None): """ Returns the formatted, colored help """ if not topic: topic = 'workbench' # It's possible to ask for help on something that doesn't exist # so we'll catch the exception and push back an object that # indicates we didn't find what they were asking for try: return self.work_request('help_formatter', topic)['help_formatter']['help'] except WorkBench.DataNotFound as e: # Okay this is a bit tricky we want to give the user a nice error # message that has both the md5 of what they were looking for and # a nice informative message that explains what might have happened sample_md5 = e.args[0] return '%s%s\n\t%s%s%s' % (color.Yellow, sample_md5, color.Green, e.message(), color.Normal)
Returns the formatted, colored help
entailment
def _help_workbench(self): """ Help on Workbench """ help = '%sWelcome to Workbench Help:%s' % (color.Yellow, color.Normal) help += '\n\t%s- workbench.help(\'basic\') %s for getting started help' % (color.Green, color.LightBlue) help += '\n\t%s- workbench.help(\'workers\') %s for help on available workers' % (color.Green, color.LightBlue) help += '\n\t%s- workbench.help(\'commands\') %s for help on workbench commands' % (color.Green, color.LightBlue) help += '\n\t%s- workbench.help(topic) %s where topic can be a help, command or worker' % (color.Green, color.LightBlue) help += '\n\n%sSee http://github.com/SuperCowPowers/workbench for more information\n%s' % (color.Yellow, color.Normal) return help
Help on Workbench
entailment
def _help_basic(self): """ Help for Workbench Basics """ help = '%sWorkbench: Getting started...' % (color.Yellow) help += '\n%sStore a sample into Workbench:' % (color.Green) help += '\n\t%s$ workbench.store_sample(raw_bytes, filename, type_tag)' % (color.LightBlue) help += '\n\n%sNotice store_sample returns an md5 of the sample...'% (color.Yellow) help += '\n%sRun workers on the sample (view, meta, whatever...):' % (color.Green) help += '\n\t%s$ workbench.work_request(\'view\', md5)%s' % (color.LightBlue, color.Normal) return help
Help for Workbench Basics
entailment
def _help_commands(self): """ Help on all the available commands """ help = 'Workbench Commands:' for command in self.list_all_commands(): full_help = self.work_request('help_formatter', command)['help_formatter']['help'] compact_help = full_help.split('\n')[:2] help += '\n\n%s' % '\n'.join(compact_help) return help
Help on all the available commands
entailment
def _help_workers(self): """ Help on all the available workers """ help = 'Workbench Workers:' for worker in self.list_all_workers(): full_help = self.work_request('help_formatter', worker)['help_formatter']['help'] compact_help = full_help.split('\n')[:4] help += '\n\n%s' % '\n'.join(compact_help) return help
Help on all the available workers
entailment
def list_all_commands(self): """ Returns a list of all the Workbench commands""" commands = [name for name, _ in inspect.getmembers(self, predicate=inspect.isroutine) if not name.startswith('_')] return commands
Returns a list of all the Workbench commands
entailment
def get_info(self, component): """ Get the information about this component """ # Grab it, clean it and ship it work_results = self._get_work_results('info', component) return self.data_store.clean_for_serialization(work_results)
Get the information about this component
entailment
def store_info(self, info_dict, component, type_tag): """ Store information about a component. The component could be a worker or a commands or a class, or whatever you want, the only thing to be aware of is name collisions. """ # Enforce dictionary input if not isinstance(info_dict, dict): print 'Critical: info_dict must be a python dictionary, got %s' % type(info_dict) return # Ensure values are not functions/methods/classes info_storage = {key:value for key, value in info_dict.iteritems() if not hasattr(value, '__call__')} # Place the type_tag on it and store it info_storage['type_tag'] = type_tag self._store_work_results(info_storage, 'info', component)
Store information about a component. The component could be a worker or a commands or a class, or whatever you want, the only thing to be aware of is name collisions.
entailment
def _store_information(self): """ Store infomation about Workbench and its commands """ print '<<< Generating Information Storage >>>' # Stores information on Workbench commands and signatures for name, meth in inspect.getmembers(self, predicate=inspect.isroutine): if not name.startswith('_'): info = {'command': name, 'sig': str(funcsigs.signature(meth)), 'docstring': meth.__doc__} self.store_info(info, name, type_tag='command') # Stores help text into the workbench information system self.store_info({'help': '<<< Workbench Server Version %s >>>' % self.version}, 'version', type_tag='help') self.store_info({'help': self._help_workbench()}, 'workbench', type_tag='help') self.store_info({'help': self._help_basic()}, 'basic', type_tag='help') self.store_info({'help': self._help_commands()}, 'commands', type_tag='help') self.store_info({'help': self._help_workers()}, 'workers', type_tag='help')
Store infomation about Workbench and its commands
entailment
def _new_plugin(self, plugin): """ Internal: This method handles the mechanics around new plugins. """ # First store the plugin info into our data store self.store_info(plugin, plugin['name'], type_tag='worker') # Place it into our active plugin list self.plugin_meta[plugin['name']] = plugin
Internal: This method handles the mechanics around new plugins.
entailment
def _store_work_results(self, results, collection, md5): """ Internal: Stores the work results of a worker.""" self.data_store.store_work_results(results, collection, md5)
Internal: Stores the work results of a worker.
entailment
def _get_work_results(self, collection, md5): """ Internal: Method for fetching work results.""" results = self.data_store.get_work_results(collection, md5) if not results: raise WorkBench.DataNotFound(md5 + ': Data/Sample not found...') return {collection: results}
Internal: Method for fetching work results.
entailment
def _work_chain_mod_time(self, worker_name): """ Internal: We compute a modification time of a work chain. Returns: The newest modification time of any worker in the work chain. """ # Bottom out on sample, info or tags if worker_name=='sample' or worker_name=='info' or worker_name=='tags': return datetime.datetime(1970, 1, 1) my_mod_time = self._get_work_results('info', worker_name)['info']['mod_time'] dependencies = self.plugin_meta[worker_name]['dependencies'] if not dependencies: return my_mod_time else: depend_mod_times = [my_mod_time] for depend in dependencies: depend_mod_times.append(self._work_chain_mod_time(depend)) return max(depend_mod_times)
Internal: We compute a modification time of a work chain. Returns: The newest modification time of any worker in the work chain.
entailment
def _recursive_work_resolver(self, worker_name, md5): """ Internal: Input dependencies are recursively backtracked, invoked and then passed down the pipeline until getting to the requested worker. """ # Looking for the sample? if worker_name == 'sample': return self.get_sample(md5) # Looking for info? if worker_name == 'info': return self._get_work_results('info', md5) # Looking for tags? if worker_name == 'tags': return self._get_work_results('tags', md5) # Do I actually have this plugin? (might have failed, etc) if (worker_name not in self.plugin_meta): print 'Alert: Request for non-existing or failed plugin: %s' % (worker_name) return {} # If the results exist and the time_stamp is newer than the entire work_chain, I'm done collection = self.plugin_meta[worker_name]['name'] try: work_results = self._get_work_results(collection, md5) work_chain_mod_time = self._work_chain_mod_time(worker_name) if work_chain_mod_time < work_results[collection]['__time_stamp']: return work_results elif self.VERBOSE: print 'VERBOSE: %s work_chain is newer than data' % (worker_name) except WorkBench.DataNotFound: if self.VERBOSE: print 'Verbose: %s data not found generating' % (worker_name) # Okay either need to generate (or re-generate) the work results dependencies = self.plugin_meta[worker_name]['dependencies'] dependant_results = {} for dependency in dependencies: dependant_results.update(self._recursive_work_resolver(dependency, md5)) if self.VERBOSE: print 'Verbose: new work for plugin: %s' % (worker_name) work_results = self.plugin_meta[worker_name]['class']().execute(dependant_results) # Enforce dictionary output if not isinstance(work_results, dict): print 'Critical: Plugin %s MUST produce a python dictionary!' % worker_name return None # Store the results and return self._store_work_results(work_results, collection, md5) return self._get_work_results(collection, md5)
Internal: Input dependencies are recursively backtracked, invoked and then passed down the pipeline until getting to the requested worker.
entailment
def load_sample(self, file_path, tags=None): """Load a sample (or samples) into workbench Args: file_path: path to a file or directory tags (optional): a list of tags for the sample/samples ['bad','aptz13'] Returns: The list of md5s for all samples """ # Recommend a tag if not tags: print '\n%sRequired: Add a list of tags when you load samples (put \'unknown\' if you must). \ \n\t%sExamples: [\'bad\'], [\'good\'], [\'bad\',\'aptz13\']%s' % (color.Yellow, color.Green, color.Normal) return # Do they want everything under a directory? if os.path.isdir(file_path): file_list = self._all_files_in_directory(file_path) else: file_list = [file_path] # Upload the files into workbench md5_list = [] for path in file_list: with open(path, 'rb') as my_file: raw_bytes = my_file.read() md5 = hashlib.md5(raw_bytes).hexdigest() if not self.workbench.has_sample(md5): print '%sStreaming Sample...%s' % (color.LightPurple, color.Normal) basename = os.path.basename(path) md5 = self.streamer.stream_to_workbench(raw_bytes, basename, 'unknown', tags) print '\n%s %s%s %sLocked and Loaded...%s\n' % \ (self.beer, color.LightPurple, md5[:6], color.Yellow, color.Normal) # Add tags to the sample self.workbench.add_tags(md5, tags) md5_list.append(md5) # Pivot on the sample_set set_md5 = self.workbench.store_sample_set(md5_list) self.pivot(set_md5, '_'.join(tags)) # Dump out tag information self.tags()
Load a sample (or samples) into workbench Args: file_path: path to a file or directory tags (optional): a list of tags for the sample/samples ['bad','aptz13'] Returns: The list of md5s for all samples
entailment
def pivot(self, md5, tag=''): """Pivot on an md5 (md5 can be a single sample or a sample_set) Args: md5: The md5 can be a single sample or a sample_set tags (optional): a tag for the sample (for the prompt) Returns: Nothing but it's sets the active sample/sample_set """ # Is the md5 a tag? ss = self.workbench.generate_sample_set(md5) if ss: tag = md5 if not tag else tag md5 = ss # Is the md5 a sample_set? if self.workbench.is_sample_set(md5): # Is the sample_set one sample? ss = self.workbench.get_sample_set(md5) if len(ss) == 1: md5 = ss[0] deco = '(%s:%d)' % (tag, len(ss)) self.ipshell.push({'prompt_deco': deco}) else: deco = '(%s:1)' % tag self.ipshell.push({'prompt_deco': deco}) # Set the new md5 self.session.md5 = md5 self.session.short_md5 = md5[:6] self.ipshell.push({'md5': self.session.md5}) self.ipshell.push({'short_md5': self.session.short_md5})
Pivot on an md5 (md5 can be a single sample or a sample_set) Args: md5: The md5 can be a single sample or a sample_set tags (optional): a tag for the sample (for the prompt) Returns: Nothing but it's sets the active sample/sample_set
entailment
def tags(self): '''Display tag information for all samples in database''' tags = self.workbench.get_all_tags() if not tags: return tag_df = pd.DataFrame(tags) tag_df = self.vectorize(tag_df, 'tags') print '\n%sSamples in Database%s' % (color.LightPurple, color.Normal) self.top_corr(tag_df)
Display tag information for all samples in database
entailment
def pull_df(self, md5): """Wrapper for the Workbench get_dataframe method Args: md5: pull the dataframe identified by this md5 Returns: The uncompressed/unserialized dataframe """ try: _packed_df = self.workbench.get_dataframe(md5) _df = pd.read_msgpack(lz4.loads(_packed_df)) return _df except zerorpc.exceptions.RemoteError as e: return repr_to_str_decorator.r_to_s(self._data_not_found)(e)
Wrapper for the Workbench get_dataframe method Args: md5: pull the dataframe identified by this md5 Returns: The uncompressed/unserialized dataframe
entailment
def vectorize(self, df, column_name): """Vectorize a column in the dataframe""" vec_df = df[column_name].str.join(sep='-').str.get_dummies(sep='-') return vec_df
Vectorize a column in the dataframe
entailment
def flatten(self, df, column_name): """Flatten a column in the dataframe that contains lists""" _exp_list = [[md5, x] for md5, value_list in zip(df['md5'], df[column_name]) for x in value_list] return pd.DataFrame(_exp_list, columns=['md5',column_name])
Flatten a column in the dataframe that contains lists
entailment
def top_corr(self, df): """Give aggregation counts and correlations""" tag_freq = df.sum() tag_freq.sort(ascending=False) corr = df.corr().fillna(1) corr_dict = corr.to_dict() for tag, count in tag_freq.iteritems(): print ' %s%s: %s%s%s (' % (color.Green, tag, color.LightBlue, count, color.Normal), tag_corrs = sorted(corr_dict[tag].iteritems(), key=operator.itemgetter(1), reverse=True) for corr_tag, value in tag_corrs[:5]: if corr_tag != tag and (value > .2): print '%s%s:%s%.1f' % (color.Green, corr_tag, color.LightBlue, value), print '%s)' % color.Normal
Give aggregation counts and correlations
entailment
def search(self, tags=None): """Wrapper for the Workbench search method Args: tags: a single tag 'pcap' or a list of tags to search for ['bad','aptz13'] Returns: A sample_set that contains the md5s for all matching samples """ if isinstance(tags, str): tags = [tags] return self.workbench.generate_sample_set(tags)
Wrapper for the Workbench search method Args: tags: a single tag 'pcap' or a list of tags to search for ['bad','aptz13'] Returns: A sample_set that contains the md5s for all matching samples
entailment
def versions(self): """Announce Versions of CLI and Server Args: None Returns: The running versions of both the CLI and the Workbench Server """ print '%s<<< Workbench CLI Version %s >>>%s' % (color.LightBlue, self.version, color.Normal) print self.workbench.help('version')
Announce Versions of CLI and Server Args: None Returns: The running versions of both the CLI and the Workbench Server
entailment
def run(self): ''' Running the workbench CLI ''' # Announce versions self.versions() # Sample/Tag info and Help self.tags() print '\n%s' % self.workbench.help('cli') # Now that we have the Workbench connection spun up, we register some stuff # with the embedded IPython interpreter and than spin it up # cfg = IPython.config.loader.Config() cfg = Config() cfg.InteractiveShellEmbed.autocall = 2 cfg.InteractiveShellEmbed.colors = 'Linux' cfg.InteractiveShellEmbed.color_info = True cfg.InteractiveShellEmbed.autoindent = True cfg.InteractiveShellEmbed.deep_reload = True cfg.PromptManager.in_template = ( r'{color.LightPurple}{short_md5}{color.Yellow}{prompt_deco}{color.LightBlue} Workbench{color.Green}[\#]> ') # cfg.PromptManager.out_template = '' # Create the IPython shell self.ipshell = IPython.terminal.embed.InteractiveShellEmbed( config=cfg, banner1='', exit_msg='\nWorkbench has SuperCowPowers...') # Register our transformer, the shell will use this for 'shortcut' commands auto_quoter = auto_quote_xform.AutoQuoteTransformer(self.ipshell, self.ipshell.prefilter_manager) auto_quoter.register_command_set(self.command_set) # Setting up some Pandas options pd.set_option('display.width', 140) pd.set_option('max_colwidth', 15) # Start up the shell with our set of workbench commands self.ipshell(local_ns=self.command_dict)
Running the workbench CLI
entailment
def _connect(self, server_info): """Connect to the workbench server""" # First we do a temp connect with a short heartbeat _tmp_connect = zerorpc.Client(timeout=300, heartbeat=2) _tmp_connect.connect('tcp://'+server_info['server']+':'+server_info['port']) try: _tmp_connect._zerorpc_name() _tmp_connect.close() del _tmp_connect except zerorpc.exceptions.LostRemote: print '%sError: Could not connect to Workbench Server at %s:%s%s' % \ (color.Red, server_info['server'], server_info['port'], color.Normal) sys.exit(1) # Okay do the real connection if self.workbench: self.workbench.close() self.workbench = zerorpc.Client(timeout=300, heartbeat=60) self.workbench.connect('tcp://'+server_info['server']+':'+server_info['port']) print '\n%s<<< Connected: %s:%s >>>%s' % (color.Green, server_info['server'], server_info['port'], color.Normal)
Connect to the workbench server
entailment
def _progress_print(self, sent, total): """Progress print show the progress of the current upload with a neat progress bar Credits: http://redino.net/blog/2013/07/display-a-progress-bar-in-console-using-python/ """ percent = min(int(sent*100.0/total), 100) sys.stdout.write('\r{0}[{1}{2}] {3}{4}%{5}'. format(color.Green, '#'*(percent/2), ' '*(50-percent/2), color.Yellow, percent, color.Normal)) sys.stdout.flush()
Progress print show the progress of the current upload with a neat progress bar Credits: http://redino.net/blog/2013/07/display-a-progress-bar-in-console-using-python/
entailment
def _work_request(self, worker, md5=None): """Wrapper for a work_request to workbench""" # I'm sure there's a better way to do this if not md5 and not self.session.md5: return 'Must call worker with an md5 argument...' elif not md5: md5 = self.session.md5 # Is the md5 a sample_set? if self.workbench.is_sample_set(md5): return self.workbench.set_work_request(worker, md5) # Make the work_request with worker and md5 args try: return self.workbench.work_request(worker, md5) except zerorpc.exceptions.RemoteError as e: return repr_to_str_decorator.r_to_s(self._data_not_found)(e)
Wrapper for a work_request to workbench
entailment
def _generate_command_dict(self): """Create a customized namespace for Workbench with a bunch of shortcuts and helper/alias functions that will make using the shell MUCH easier. """ # First add all the workers commands = {} for worker in self.workbench.list_all_workers(): commands[worker] = lambda md5=None, worker=worker: self._work_request(worker, md5) # Next add all the commands for command in self.workbench.list_all_commands(): # Fixme: is there a better way to get the lambda function from ZeroRPC commands[command] = self.workbench.__getattr__(command) # Now the general commands which are often overloads # for some of the workbench commands general = { 'workbench': self.workbench, 'help': self._help, 'load_sample': self.load_sample, 'pull_df': self.pull_df, 'flatten': self.flatten, 'vectorize': self.vectorize, 'top_corr': self.top_corr, 'tags': self.tags, 'pivot': self.pivot, 'search': self.search, 'reconnect': lambda info=self.server_info: self._connect(info), 'version': self.versions, 'versions': self.versions, 'short_md5': self.session.short_md5, 'prompt_deco': self.session.prompt_deco } commands.update(general) # Return the list of workbench commands return commands
Create a customized namespace for Workbench with a bunch of shortcuts and helper/alias functions that will make using the shell MUCH easier.
entailment
def _register_info(self): """Register local methods in the Workbench Information system""" # Stores information on Workbench commands and signatures for name, meth in inspect.getmembers(self, predicate=inspect.isroutine): if not name.startswith('_') and name != 'run': info = {'command': name, 'sig': str(funcsigs.signature(meth)), 'docstring': meth.__doc__} self.workbench.store_info(info, name, 'command') # Register help information self.workbench.store_info({'help': self.help.help_cli()}, 'cli', 'help') self.workbench.store_info({'help': self.help.help_cli_basic()}, 'cli_basic', 'help') self.workbench.store_info({'help': self.help.help_cli_search()}, 'search', 'help') self.workbench.store_info({'help': self.help.help_dataframe()}, 'dataframe', 'help') self.workbench.store_info({'help': self.help.help_dataframe_memory()}, 'dataframe_memory', 'help') self.workbench.store_info({'help': self.help.help_dataframe_pe()}, 'dataframe_pe', 'help')
Register local methods in the Workbench Information system
entailment
def transform(self, line, _continue_prompt): """Shortcut Workbench commands by using 'auto-quotes'""" # Capture the original line orig_line = line # Get tokens from all the currently active namespace ns_token_set = set([token for nspace in self.shell.all_ns_refs for token in nspace]) # Build up token set and info out of the incoming line token_list = re.split(' |;|,|(|)|\'|"', line) token_list = [item for item in token_list if item != None and item != ''] num_tokens = len(token_list) first_token = token_list[0] token_set = set(token_list) # Very conservative logic (but possibly flawed) # 1) Lines with any of these symbols ; , ' " ( ) aren't touched # 2) Need to have more than one token # 3) First token in line must be in the workbench command set # 4) If first token is 'help' than all other tokens are quoted # 5) Otherwise only tokens that are not in any of the namespace are quoted # Fixme: Horse shit temp hack for load_sample # 0) If load_sample do special processing if first_token == 'load_sample': # If the second arg isn't in namespace quote it if token_list[1] not in ns_token_set: line = line.replace(token_list[1], '"'+token_list[1]+'",') return line # Fixme: Horse shit temp hack for pivot # 0) If pivot do special processing if first_token == 'pivot': # Quote all other tokens for token in token_list: if token not in ns_token_set: line = line.replace(token, '"' + token + '",') return line # 1) Lines with any of these symbols ; , ' " ( ) aren't touched skip_symbols = [';', ',', '\'', '"', '(', ')'] if any([sym in line for sym in skip_symbols]): return line # 2) Need to have more than one token # 3) First token in line must be in the workbench command set if num_tokens > 1 and first_token in self.command_set: # 4) If first token is 'help' than all other tokens are quoted if first_token == 'help': token_set.remove('help') for token in token_set: line = line.replace(token, '"'+token+'"') # 5) Otherwise only tokens that are not in any of the namespace are quoted else: # Not help for token in token_set: if token not in ns_token_set: line = line.replace(token, '"'+token+'"') # Return the processed line return line
Shortcut Workbench commands by using 'auto-quotes
entailment
def decodes(self, s: str) -> BioCCollection: """ Deserialize ``s`` to a BioC collection object. Args: s: a "str" instance containing a BioC collection Returns: an object of BioCollection """ tree = etree.parse(io.BytesIO(bytes(s, encoding='UTF-8'))) collection = self.__parse_collection(tree.getroot()) collection.encoding = tree.docinfo.encoding collection.standalone = tree.docinfo.standalone collection.version = tree.docinfo.xml_version return collection
Deserialize ``s`` to a BioC collection object. Args: s: a "str" instance containing a BioC collection Returns: an object of BioCollection
entailment
def decode(self, fp: TextIO) -> BioCCollection: """ Deserialize ``fp`` to a BioC collection object. Args: fp: a ``.read()``-supporting file-like object containing a BioC collection Returns: an object of BioCollection """ # utf8_parser = etree.XMLParser(encoding='utf-8') tree = etree.parse(fp) collection = self.__parse_collection(tree.getroot()) collection.encoding = tree.docinfo.encoding collection.standalone = tree.docinfo.standalone collection.version = tree.docinfo.xml_version return collection
Deserialize ``fp`` to a BioC collection object. Args: fp: a ``.read()``-supporting file-like object containing a BioC collection Returns: an object of BioCollection
entailment
def run(): """This client pushes a file into Workbench.""" # Grab server args args = client_helper.grab_server_args() # Start up workbench connection workbench = zerorpc.Client(timeout=300, heartbeat=60) workbench.connect('tcp://'+args['server']+':'+args['port']) # Upload the file into workbench my_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/pe/bad/033d91aae8ad29ed9fbb858179271232') with open(my_file,'rb') as f: # Throw file into workbench filename = os.path.basename(my_file) raw_bytes = f.read() md5 = workbench.store_sample(raw_bytes, filename, 'exe') results = workbench.work_request('view', md5) print 'Filename: %s' % filename pprint.pprint(results)
This client pushes a file into Workbench.
entailment
def execute(self, input_data): ''' Execute method ''' # Grab the raw bytes of the sample raw_bytes = input_data['sample']['raw_bytes'] # Spin up the rekall session and render components session = MemSession(raw_bytes) renderer = WorkbenchRenderer(session=session) # Run the plugin session.RunPlugin(self.plugin_name, renderer=renderer) return renderer.get_output()
Execute method
entailment
def process_row(cls, data, column_map): """Process the row data from Rekall""" row = {} for key,value in data.iteritems(): if not value: value = '-' elif isinstance(value, list): value = value[1] elif isinstance(value, dict): if 'type_name' in value: if 'UnixTimeStamp' in value['type_name']: value = datetime.datetime.utcfromtimestamp(value['epoch']) if value == datetime.datetime(1970, 1, 1, 0, 0): value = '-' # Assume the value is somehow well formed when we get here row[column_map[key]] = value return row
Process the row data from Rekall
entailment
def start(self, plugin_name=None, kwargs=None): """Start method: initial data structures and store some meta data.""" self.output = [] # Start basically resets the output data super(WorkbenchRenderer, self).start(plugin_name=plugin_name) return self
Start method: initial data structures and store some meta data.
entailment
def format(self, formatstring, *args): """Presentation Information from the Plugin""" # Make a new section if self.incoming_section: self.SendMessage(['s', {'name': args}]) self.incoming_section = False
Presentation Information from the Plugin
entailment
def SendMessage(self, statement): """Here we're actually capturing messages and putting them into our output""" # The way messages are 'encapsulated' by Rekall is questionable, 99% of the # time it's way better to have a dictionary...shrug... message_type = statement[0] message_data = statement[1] self.output.append({'type': message_type, 'data': message_data})
Here we're actually capturing messages and putting them into our output
entailment
def open(self, directory=None, filename=None, mode="rb"): """Opens a file for writing or reading.""" path = os.path.join(directory, filename) return open(path, mode)
Opens a file for writing or reading.
entailment
def _file_chunks(self, data, chunk_size): """ Yield compressed chunks from a data array""" for i in xrange(0, len(data), chunk_size): yield self.compressor(data[i:i+chunk_size])
Yield compressed chunks from a data array
entailment
def stream_to_workbench(self, raw_bytes, filename, type_tag, tags): """Split up a large file into chunks and send to Workbench""" md5_list = [] sent_bytes = 0 total_bytes = len(raw_bytes) for chunk in self._file_chunks(raw_bytes, self.chunk_size): md5_list.append(self.workbench.store_sample(chunk, filename, self.compress_ident)) sent_bytes += self.chunk_size self.progress(sent_bytes, total_bytes) # Now we just ask Workbench to combine these full_md5 = self.workbench.combine_samples(md5_list, filename, type_tag) # Add the tags self.workbench.add_tags(full_md5, tags) # Return the md5 of the finalized sample return full_md5
Split up a large file into chunks and send to Workbench
entailment
def dumps(obj, **kwargs) -> str: """ Serialize a BioC ``obj`` to a JSON formatted ``str``. """ return json.dumps(obj, cls=BioCJSONEncoder, **kwargs)
Serialize a BioC ``obj`` to a JSON formatted ``str``.
entailment
def dump(obj, fp, **kwargs): """ Serialize obj as a JSON formatted stream to fp (a .write()-supporting file-like object) """ return json.dump(obj, fp, cls=BioCJSONEncoder, **kwargs)
Serialize obj as a JSON formatted stream to fp (a .write()-supporting file-like object)
entailment