sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def validate_doc(self, document: BioCDocument):
"""Validate a single document."""
annotations = []
annotations.extend(document.annotations)
annotations.extend(document.relations)
for passage in document.passages:
annotations.extend(passage.annotations)
annotations.extend(passage.relations)
for sentence in passage.sentences:
annotations.extend(sentence.annotations)
annotations.extend(sentence.relations)
self.current_docid = document.id
self.traceback.append(document)
text = self.__get_doc_text(document)
self.__validate_ann(document.annotations, text, 0)
self.__validate_rel(annotations, document.relations, f'document {document.id}')
for passage in document.passages:
self.traceback.append(passage)
text = self.__get_passage_text(passage)
self.__validate_ann(passage.annotations, text, passage.offset)
self.__validate_rel(annotations, passage.relations,
f'document {document.id} --> passage {passage.offset}')
for sentence in passage.sentences:
self.traceback.append(sentence)
self.__validate_ann(sentence.annotations, sentence.text, sentence.offset)
self.__validate_rel(annotations, sentence.relations,
f'document {document.id} --> sentence {sentence.offset}')
self.traceback.pop()
self.traceback.pop()
self.traceback.pop() | Validate a single document. | entailment |
def validate(self, collection: BioCCollection):
"""Validate a single collection."""
for document in collection.documents:
self.validate_doc(document) | Validate a single collection. | entailment |
def run():
"""This client pushes PE Files -> ELS Indexer."""
# Grab server args
args = client_helper.grab_server_args()
# Start up workbench connection
workbench = zerorpc.Client(timeout=300, heartbeat=60)
workbench.connect('tcp://'+args['server']+':'+args['port'])
# Test out PEFile -> strings -> indexer -> search
data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),'../data/pe/bad')
file_list = [os.path.join(data_path, child) for child in os.listdir(data_path)][:20]
for filename in file_list:
# Skip OS generated files
if '.DS_Store' in filename:
continue
with open(filename, 'rb') as f:
base_name = os.path.basename(filename)
md5 = workbench.store_sample(f.read(), base_name, 'exe')
# Index the strings and features output (notice we can ask for any worker output)
# Also (super important) it all happens on the server side.
workbench.index_worker_output('strings', md5, 'strings', None)
print '\n<<< Strings for PE: %s Indexed>>>' % (base_name)
workbench.index_worker_output('pe_features', md5, 'pe_features', None)
print '<<< Features for PE: %s Indexed>>>' % (base_name)
# Well we should execute some queries against ElasticSearch at this point but as of
# version 1.2+ the dynamic scripting disabled by default, see
# 'http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-scripting.html#_enabling_dynamic_scripting
# Now actually do something interesing with our ELS index
# ES Facets are kewl (http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html)
facet_query = '{"facets" : {"tag" : {"terms" : {"field" : "string_list"}}}}'
results = workbench.search_index('strings', facet_query)
try:
print '\nQuery: %s' % facet_query
print 'Number of hits: %d' % results['hits']['total']
print 'Max Score: %f' % results['hits']['max_score']
pprint.pprint(results['facets'])
except TypeError:
print 'Probably using a Stub Indexer, if you want an ELS Indexer see the readme'
# Fuzzy is kewl (http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-fuzzy-query.html)
fuzzy_query = '{"fields":["md5","sparse_features.imported_symbols"],' \
'"query": {"fuzzy" : {"sparse_features.imported_symbols" : "loadlibrary"}}}'
results = workbench.search_index('pe_features', fuzzy_query)
try:
print '\nQuery: %s' % fuzzy_query
print 'Number of hits: %d' % results['hits']['total']
print 'Max Score: %f' % results['hits']['max_score']
pprint.pprint([(hit['fields']['md5'], hit['fields']['sparse_features.imported_symbols'])
for hit in results['hits']['hits']])
except TypeError:
print 'Probably using a Stub Indexer, if you want an ELS Indexer see the readme' | This client pushes PE Files -> ELS Indexer. | entailment |
def execute(self, input_data):
''' Execute method '''
# Spin up the rekall adapter
adapter = RekallAdapter()
adapter.set_plugin_name(self.plugin_name)
# Create a temporary directory and run this plugin from there
with self.goto_temp_directory():
# Run the procdump plugin
rekall_output = adapter.execute(input_data)
# Process the output data
for line in rekall_output:
if line['type'] == 'm': # Meta
self.output['meta'] = line['data']
elif line['type'] == 't': # New Table Headers (column names)
self.column_map = {item['cname']: item['name'] if 'name' in item else item['cname'] for item in line['data']}
elif line['type'] == 'r': # Row
# Add the row to our current table
row = RekallAdapter.process_row(line['data'], self.column_map)
self.output['tables'][self.current_table_name].append(row)
# Scrape any extracted files
print 'mem_procdump: Scraping dumped files...'
for output_file in glob.glob('*'):
# Store the output into workbench, put md5s in the 'dumped_files' field
output_name = os.path.basename(output_file)
output_name = output_name.replace('executable.', '')
with open(output_file, 'rb') as dumped_file:
raw_bytes = dumped_file.read()
md5 = self.c.store_sample(raw_bytes, output_name, 'exe')
# Remove some columns from meta data
meta = self.c.work_request('meta', md5)['meta']
del meta['customer']
del meta['encoding']
del meta['import_time']
del meta['mime_type']
self.output['tables'][self.current_table_name].append(meta)
# All done
return self.output | Execute method | entailment |
def convert_to_utf8(string):
''' Convert string to UTF8 '''
if (isinstance(string, unicode)):
return string.encode('utf-8')
try:
u = unicode(string, 'utf-8')
except TypeError:
return str(string)
utf8 = u.encode('utf-8')
return utf8 | Convert string to UTF8 | entailment |
def execute(self, input_data):
''' Process the input bytes with pefile '''
raw_bytes = input_data['sample']['raw_bytes']
# Have the PE File module process the file
pefile_handle, error_str = self.open_using_pefile('unknown', raw_bytes)
if not pefile_handle:
return {'error': error_str, 'dense_features': [], 'sparse_features': []}
# Now extract the various features using pefile
dense_features, sparse_features = self.extract_features_using_pefile(pefile_handle)
# Okay set my response
return {'dense_features': dense_features, 'sparse_features': sparse_features, 'tags': input_data['tags']['tags']} | Process the input bytes with pefile | entailment |
def open_using_pefile(input_name, input_bytes):
''' Open the PE File using the Python pefile module. '''
try:
pef = pefile.PE(data=input_bytes, fast_load=False)
except (AttributeError, pefile.PEFormatError), error:
print 'warning: pe_fail (with exception from pefile module) on file: %s' % input_name
error_str = '(Exception):, %s' % (str(error))
return None, error_str
# Now test to see if the features are there/extractable if not return FAIL flag
if pef.PE_TYPE is None or pef.OPTIONAL_HEADER is None or len(pef.OPTIONAL_HEADER.DATA_DIRECTORY) < 7:
print 'warning: pe_fail on file: %s' % input_name
error_str = 'warning: pe_fail on file: %s' % input_name
return None, error_str
# Success
return pef, None | Open the PE File using the Python pefile module. | entailment |
def extract_features_using_pefile(self, pef):
''' Process the PE File using the Python pefile module. '''
# Store all extracted features into feature lists
extracted_dense = {}
extracted_sparse = {}
# Now slog through the info and extract the features
feature_not_found_flag = -99
feature_default_value = 0
self._warnings = []
# Set all the dense features and sparse features to 'feature not found'
# value and then check later to see if it was found
for feature in self._dense_feature_list:
extracted_dense[feature] = feature_not_found_flag
for feature in self._sparse_feature_list:
extracted_sparse[feature] = feature_not_found_flag
# Check to make sure all the section names are standard
std_sections = ['.text', '.bss', '.rdata', '.data', '.rsrc', '.edata', '.idata',
'.pdata', '.debug', '.reloc', '.stab', '.stabstr', '.tls',
'.crt', '.gnu_deb', '.eh_fram', '.exptbl', '.rodata']
for i in range(200):
std_sections.append('/'+str(i))
std_section_names = 1
extracted_sparse['section_names'] = []
for section in pef.sections:
name = convert_to_ascii_null_term(section.Name).lower()
extracted_sparse['section_names'].append(name)
if name not in std_sections:
std_section_names = 0
extracted_dense['std_section_names'] = std_section_names
extracted_dense['debug_size'] = pef.OPTIONAL_HEADER.DATA_DIRECTORY[6].Size
extracted_dense['major_version'] = pef.OPTIONAL_HEADER.MajorImageVersion
extracted_dense['minor_version'] = pef.OPTIONAL_HEADER.MinorImageVersion
extracted_dense['iat_rva'] = pef.OPTIONAL_HEADER.DATA_DIRECTORY[1].VirtualAddress
extracted_dense['export_size'] = pef.OPTIONAL_HEADER.DATA_DIRECTORY[0].Size
extracted_dense['check_sum'] = pef.OPTIONAL_HEADER.CheckSum
try:
extracted_dense['generated_check_sum'] = pef.generate_checksum()
except ValueError:
extracted_dense['generated_check_sum'] = 0
if len(pef.sections) > 0:
extracted_dense['virtual_address'] = pef.sections[0].VirtualAddress
extracted_dense['virtual_size'] = pef.sections[0].Misc_VirtualSize
extracted_dense['number_of_sections'] = pef.FILE_HEADER.NumberOfSections
extracted_dense['compile_date'] = pef.FILE_HEADER.TimeDateStamp
extracted_dense['number_of_rva_and_sizes'] = pef.OPTIONAL_HEADER.NumberOfRvaAndSizes
extracted_dense['total_size_pe'] = len(pef.__data__)
# Number of import and exports
if hasattr(pef, 'DIRECTORY_ENTRY_IMPORT'):
extracted_dense['number_of_imports'] = len(pef.DIRECTORY_ENTRY_IMPORT)
num_imported_symbols = 0
for module in pef.DIRECTORY_ENTRY_IMPORT:
num_imported_symbols += len(module.imports)
extracted_dense['number_of_import_symbols'] = num_imported_symbols
if hasattr(pef, 'DIRECTORY_ENTRY_BOUND_IMPORT'):
extracted_dense['number_of_bound_imports'] = len(pef.DIRECTORY_ENTRY_BOUND_IMPORT)
num_imported_symbols = 0
for module in pef.DIRECTORY_ENTRY_BOUND_IMPORT:
num_imported_symbols += len(module.entries)
extracted_dense['number_of_bound_import_symbols'] = num_imported_symbols
if hasattr(pef, 'DIRECTORY_ENTRY_EXPORT'):
try:
extracted_dense['number_of_export_symbols'] = len(pef.DIRECTORY_ENTRY_EXPORT.symbols)
symbol_set = set()
for symbol in pef.DIRECTORY_ENTRY_EXPORT.symbols:
symbol_info = 'unknown'
if not symbol.name:
symbol_info = 'ordinal=' + str(symbol.ordinal)
else:
symbol_info = 'name=' + symbol.name
symbol_set.add(convert_to_utf8('%s' % (symbol_info)).lower())
# Now convert set to list and add to features
extracted_sparse['ExportedSymbols'] = list(symbol_set)
except AttributeError:
extracted_sparse['ExportedSymbols'] = ['AttributeError']
# Specific Import info (Note this will be a sparse field woo hoo!)
if hasattr(pef, 'DIRECTORY_ENTRY_IMPORT'):
symbol_set = set()
for module in pef.DIRECTORY_ENTRY_IMPORT:
for symbol in module.imports:
symbol_info = 'unknown'
if symbol.import_by_ordinal is True:
symbol_info = 'ordinal=' + str(symbol.ordinal)
else:
symbol_info = 'name=' + symbol.name
# symbol_info['hint'] = symbol.hint
if symbol.bound:
symbol_info += ' bound=' + str(symbol.bound)
symbol_set.add(convert_to_utf8('%s:%s' % (module.dll, symbol_info)).lower())
# Now convert set to list and add to features
extracted_sparse['imported_symbols'] = list(symbol_set)
# Do we have a second section
if len(pef.sections) >= 2:
extracted_dense['virtual_size_2'] = pef.sections[1].Misc_VirtualSize
extracted_dense['size_image'] = pef.OPTIONAL_HEADER.SizeOfImage
extracted_dense['size_code'] = pef.OPTIONAL_HEADER.SizeOfCode
extracted_dense['size_initdata'] = pef.OPTIONAL_HEADER.SizeOfInitializedData
extracted_dense['size_uninit'] = pef.OPTIONAL_HEADER.SizeOfUninitializedData
extracted_dense['pe_majorlink'] = pef.OPTIONAL_HEADER.MajorLinkerVersion
extracted_dense['pe_minorlink'] = pef.OPTIONAL_HEADER.MinorLinkerVersion
extracted_dense['pe_driver'] = 1 if pef.is_driver() else 0
extracted_dense['pe_exe'] = 1 if pef.is_exe() else 0
extracted_dense['pe_dll'] = 1 if pef.is_dll() else 0
extracted_dense['pe_i386'] = 1
if pef.FILE_HEADER.Machine != 0x014c:
extracted_dense['pe_i386'] = 0
extracted_dense['pe_char'] = pef.FILE_HEADER.Characteristics
# Data directory features!!
datadirs = {
0: 'IMAGE_DIRECTORY_ENTRY_EXPORT', 1: 'IMAGE_DIRECTORY_ENTRY_IMPORT',
2: 'IMAGE_DIRECTORY_ENTRY_RESOURCE', 5: 'IMAGE_DIRECTORY_ENTRY_BASERELOC',
12: 'IMAGE_DIRECTORY_ENTRY_IAT'}
for idx, datadir in datadirs.items():
datadir = pefile.DIRECTORY_ENTRY[idx]
if len(pef.OPTIONAL_HEADER.DATA_DIRECTORY) <= idx:
continue
directory = pef.OPTIONAL_HEADER.DATA_DIRECTORY[idx]
extracted_dense['datadir_%s_size' % datadir] = directory.Size
# Section features
section_flags = ['IMAGE_SCN_MEM_EXECUTE', 'IMAGE_SCN_CNT_CODE', 'IMAGE_SCN_MEM_WRITE', 'IMAGE_SCN_MEM_READ']
rawexecsize = 0
vaexecsize = 0
for sec in pef.sections:
if not sec:
continue
for char in section_flags:
# does the section have one of our attribs?
if hasattr(sec, char):
rawexecsize += sec.SizeOfRawData
vaexecsize += sec.Misc_VirtualSize
break
# Take out any weird characters in section names
secname = convert_to_ascii_null_term(sec.Name).lower()
secname = secname.replace('.', '')
if secname in std_sections:
extracted_dense['sec_entropy_%s' % secname] = sec.get_entropy()
extracted_dense['sec_rawptr_%s' % secname] = sec.PointerToRawData
extracted_dense['sec_rawsize_%s' % secname] = sec.SizeOfRawData
extracted_dense['sec_vasize_%s' % secname] = sec.Misc_VirtualSize
extracted_dense['sec_va_execsize'] = vaexecsize
extracted_dense['sec_raw_execsize'] = rawexecsize
# Imphash (implemented in pefile 1.2.10-139 or later)
try:
extracted_sparse['imp_hash'] = pef.get_imphash()
except AttributeError:
extracted_sparse['imp_hash'] = 'Not found: Install pefile 1.2.10-139 or later'
# Register if there were any pe warnings
warnings = pef.get_warnings()
if warnings:
extracted_dense['pe_warnings'] = 1
extracted_sparse['pe_warning_strings'] = warnings
else:
extracted_dense['pe_warnings'] = 0
# Issue a warning if the feature isn't found
for feature in self._dense_feature_list:
if extracted_dense[feature] == feature_not_found_flag:
extracted_dense[feature] = feature_default_value
if (self._verbose):
print 'info: Feature: %s not found! Setting to %d' % (feature, feature_default_value)
# Issue a warning if the feature isn't found
for feature in self._sparse_feature_list:
if extracted_sparse[feature] == feature_not_found_flag:
extracted_sparse[feature] = [] # For sparse data probably best default
if (self._verbose):
print 'info: Feature: %s not found! Setting to %d' % (feature, feature_default_value)
# Set the features for the class var
self._dense_features = extracted_dense
self._sparse_features = extracted_sparse
return self.get_dense_features(), self.get_sparse_features() | Process the PE File using the Python pefile module. | entailment |
def read_log(self, logfile):
"""The read_log method returns a memory efficient generator for rows in a Bro log.
Usage:
rows = my_bro_reader.read_log(logfile)
for row in rows:
do something with row
Args:
logfile: The Bro Log file.
"""
# Make sure we're at the beginning
logfile.seek(0)
# First parse the header of the bro log
field_names, _ = self._parse_bro_header(logfile)
# Note: SO stupid to write a csv reader, but csv.DictReader on Bro
# files was doing something weird with generator output that
# affected zeroRPC and gave 'could not route _zpc_more' error.
# So wrote my own, put a sleep at the end, seems to fix it.
while 1:
_line = next(logfile).strip()
if not _line.startswith('#close'):
yield self._cast_dict(dict(zip(field_names, _line.split(self.delimiter))))
else:
time.sleep(.1) # Give time for zeroRPC to finish messages
break | The read_log method returns a memory efficient generator for rows in a Bro log.
Usage:
rows = my_bro_reader.read_log(logfile)
for row in rows:
do something with row
Args:
logfile: The Bro Log file. | entailment |
def _parse_bro_header(self, logfile):
"""This method tries to parse the Bro log header section.
Note: My googling is failing me on the documentation on the format,
so just making a lot of assumptions and skipping some shit.
Assumption 1: The delimeter is a tab.
Assumption 2: Types are either time, string, int or float
Assumption 3: The header always ends with #fields and #types as
the last two lines.
Format example:
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path httpheader_recon
#fields ts origin useragent header_events_json
#types time string string string
Args:
logfile: The Bro log file.
Returns:
A tuple of 2 lists. One for field names and other for field types.
"""
# Skip until you find the #fields line
_line = next(logfile)
while (not _line.startswith('#fields')):
_line = next(logfile)
# Read in the field names
_field_names = _line.strip().split(self.delimiter)[1:]
# Read in the types
_line = next(logfile)
_field_types = _line.strip().split(self.delimiter)[1:]
# Return the header info
return _field_names, _field_types | This method tries to parse the Bro log header section.
Note: My googling is failing me on the documentation on the format,
so just making a lot of assumptions and skipping some shit.
Assumption 1: The delimeter is a tab.
Assumption 2: Types are either time, string, int or float
Assumption 3: The header always ends with #fields and #types as
the last two lines.
Format example:
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path httpheader_recon
#fields ts origin useragent header_events_json
#types time string string string
Args:
logfile: The Bro log file.
Returns:
A tuple of 2 lists. One for field names and other for field types. | entailment |
def _cast_dict(self, data_dict):
"""Internal method that makes sure any dictionary elements
are properly cast into the correct types, instead of
just treating everything like a string from the csv file.
Args:
data_dict: dictionary containing bro log data.
Returns:
Cleaned Data dict.
"""
for key, value in data_dict.iteritems():
data_dict[key] = self._cast_value(value)
# Fixme: resp_body_data can be very large so removing it for now
if 'resp_body_data' in data_dict:
del data_dict['resp_body_data']
return data_dict | Internal method that makes sure any dictionary elements
are properly cast into the correct types, instead of
just treating everything like a string from the csv file.
Args:
data_dict: dictionary containing bro log data.
Returns:
Cleaned Data dict. | entailment |
def _cast_value(self, value):
"""Internal method that makes sure every value in dictionary
is properly cast into the correct types, instead of
just treating everything like a string from the csv file.
Args:
value : The value to be casted
Returns:
A casted Value.
"""
# Try to convert to a datetime (if requested)
if (self.convert_datetimes):
try:
date_time = datetime.datetime.fromtimestamp(float(value))
if datetime.datetime(1970, 1, 1) > date_time:
raise ValueError
else:
return date_time
# Next try a set of primitive types
except ValueError:
pass
# Try conversion to basic types
tests = (int, float, str)
for test in tests:
try:
return test(value)
except ValueError:
continue
return value | Internal method that makes sure every value in dictionary
is properly cast into the correct types, instead of
just treating everything like a string from the csv file.
Args:
value : The value to be casted
Returns:
A casted Value. | entailment |
def run():
"""This client shows workbench extacting files from a zip file."""
# Grab server args
args = client_helper.grab_server_args()
# Start up workbench connection
workbench = zerorpc.Client(timeout=300, heartbeat=60)
workbench.connect('tcp://'+args['server']+':'+args['port'])
# Test out zip data
data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),'../data/zip')
file_list = [os.path.join(data_path, child) for child in os.listdir(data_path)]
for filename in file_list:
with open(filename,'rb') as f:
base_name = os.path.basename(filename)
md5 = workbench.store_sample(f.read(), base_name, 'zip')
results = workbench.work_request('view', md5)
print 'Filename: %s ' % (base_name)
pprint.pprint(results)
# The unzip worker gives you a list of md5s back
# Run meta on all the unzipped files.
results = workbench.work_request('unzip', md5)
print '\n*** Filename: %s ***' % (base_name)
for child_md5 in results['unzip']['payload_md5s']:
pprint.pprint(workbench.work_request('meta', child_md5)) | This client shows workbench extacting files from a zip file. | entailment |
def execute(self, input_data):
''' Execute the VTQuery worker '''
md5 = input_data['meta']['md5']
response = requests.get('http://www.virustotal.com/vtapi/v2/file/report',
params={'apikey':self.apikey,'resource':md5, 'allinfo':1})
# Make sure we got a json blob back
try:
vt_output = response.json()
except ValueError:
return {'vt_error': 'VirusTotal Query Error, no valid response... past per min quota?'}
# Just pull some of the fields
output = {field:vt_output[field] for field in vt_output.keys() if field not in self.exclude}
# Check for not-found
not_found = False if output else True
# Add in file_type
output['file_type'] = input_data['meta']['file_type']
# Toss back a not found
if not_found:
output['not_found'] = True
return output
# Organize the scans fields
scan_results = collections.Counter()
for scan in vt_output['scans'].values():
if 'result' in scan:
if scan['result']:
scan_results[scan['result']] += 1
output['scan_results'] = scan_results.most_common(5)
return output | Execute the VTQuery worker | entailment |
def get_peid_db():
''' Grab the peid_userdb.txt file from local disk '''
# Try to find the yara rules directory relative to the worker
my_dir = os.path.dirname(os.path.realpath(__file__))
db_path = os.path.join(my_dir, 'peid_userdb.txt')
if not os.path.exists(db_path):
raise RuntimeError('peid could not find peid_userdb.txt under: %s' % db_path)
# Okay load up signature
signatures = peutils.SignatureDatabase(data = open(db_path, 'rb').read())
return signatures | Grab the peid_userdb.txt file from local disk | entailment |
def execute(self, input_data):
''' Execute the PEIDWorker '''
raw_bytes = input_data['sample']['raw_bytes']
# Have the PE File module process the file
try:
pefile_handle = pefile.PE(data=raw_bytes, fast_load=False)
except (AttributeError, pefile.PEFormatError), error:
return {'error': str(error), 'match_list': []}
# Now get information from PEID module
peid_match = self.peid_features(pefile_handle)
return {'match_list': peid_match} | Execute the PEIDWorker | entailment |
def peid_features(self, pefile_handle):
''' Get features from PEid signature database'''
peid_match = self.peid_sigs.match(pefile_handle)
return peid_match if peid_match else [] | Get features from PEid signature database | entailment |
def run():
"""This client pulls PCAP files for building report.
Returns:
A list with `view_pcap` , `meta` and `filename` objects.
"""
global WORKBENCH
# Grab grab_server_argsrver args
args = client_helper.grab_server_args()
# Start up workbench connection
WORKBENCH = zerorpc.Client(timeout=300, heartbeat=60)
WORKBENCH.connect('tcp://'+args['server']+':'+args['port'])
data_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../data/pcap')
file_list = [os.path.join(data_path, child) for child in \
os.listdir(data_path)]
results = []
for filename in file_list:
# Skip OS generated files
if '.DS_Store' in filename: continue
# Process the pcap file
with open(filename,'rb') as f:
md5 = WORKBENCH.store_sample(f.read(), filename, 'pcap')
result = WORKBENCH.work_request('view_pcap', md5)
result.update(WORKBENCH.work_request('meta', result['view_pcap']['md5']))
result['filename'] = result['meta']['filename'].split('/')[-1]
results.append(result)
return results | This client pulls PCAP files for building report.
Returns:
A list with `view_pcap` , `meta` and `filename` objects. | entailment |
def show_files(md5):
'''Renders template with `view` of the md5.'''
if not WORKBENCH:
return flask.redirect('/')
md5_view = WORKBENCH.work_request('view', md5)
return flask.render_template('templates/md5_view.html', md5_view=md5_view['view'], md5=md5) | Renders template with `view` of the md5. | entailment |
def show_md5_view(md5):
'''Renders template with `stream_sample` of the md5.'''
if not WORKBENCH:
return flask.redirect('/')
md5_view = WORKBENCH.stream_sample(md5)
return flask.render_template('templates/md5_view.html', md5_view=list(md5_view), md5=md5) | Renders template with `stream_sample` of the md5. | entailment |
def execute(self, input_data):
"""This worker puts the output of pe_features into a dictionary of dataframes"""
if 'sample' in input_data:
print 'Warning: PEFeaturesDF is supposed to be called on a sample_set'
self.samples.append(input_data['sample']['md5'])
else:
self.samples = input_data['sample_set']['md5_list']
# Make a sample set
sample_set = self.workbench.store_sample_set(self.samples)
# Dense Features
dense_features = self.workbench.set_work_request('pe_features', sample_set, ['md5', 'tags', 'dense_features'])
# Fixme: There's probably a nicer/better way to do this
flat_features = []
for feat in dense_features:
feat['dense_features'].update({'md5': feat['md5'], 'tags': feat['tags']})
flat_features.append(feat['dense_features'])
dense_df = pd.DataFrame(flat_features)
df_packed = dense_df.to_msgpack()
dense_df_md5 = self.workbench.store_sample(df_packed, 'pe_features_dense_df', 'dataframe')
# Sparse Features
sparse_features = self.workbench.set_work_request('pe_features', sample_set, ['md5', 'tags', 'sparse_features'])
# Fixme: There's probably a nicer/better way to do this
flat_features = []
for feat in sparse_features:
feat['sparse_features'].update({'md5': feat['md5'], 'tags': feat['tags']})
flat_features.append(feat['sparse_features'])
sparse_df = pd.DataFrame(flat_features)
df_packed = sparse_df.to_msgpack()
sparse_df_md5 = self.workbench.store_sample(df_packed, 'pe_features_sparse_df', 'dataframe')
# Return the dataframes
return {'dense_features': dense_df_md5, 'sparse_features': sparse_df_md5} | This worker puts the output of pe_features into a dictionary of dataframes | entailment |
def add_node(self, node_id, name, labels):
"""Add the node with name and labels.
Args:
node_id: Id for the node.
name: Name for the node.
labels: Label for the node.
Raises:
NotImplementedError: When adding labels is not supported.
"""
node = self.graph_db.get_or_create_indexed_node('Node', 'node_id', node_id, {'node_id': node_id, 'name': name})
try:
node.add_labels(*labels)
except NotImplementedError:
pass | Add the node with name and labels.
Args:
node_id: Id for the node.
name: Name for the node.
labels: Label for the node.
Raises:
NotImplementedError: When adding labels is not supported. | entailment |
def add_rel(self, source_node_id, target_node_id, rel):
"""Add a relationship between nodes.
Args:
source_node_id: Node Id for the source node.
target_node_id: Node Id for the target node.
rel: Name of the relationship 'contains'
"""
# Add the relationship
n1_ref = self.graph_db.get_indexed_node('Node', 'node_id', source_node_id)
n2_ref = self.graph_db.get_indexed_node('Node', 'node_id', target_node_id)
# Sanity check
if not n1_ref or not n2_ref:
print 'Cannot add relationship between unfound nodes: %s --> %s' % (source_node_id, target_node_id)
return
path = neo4j.Path(n1_ref, rel, n2_ref)
path.get_or_create(self.graph_db) | Add a relationship between nodes.
Args:
source_node_id: Node Id for the source node.
target_node_id: Node Id for the target node.
rel: Name of the relationship 'contains' | entailment |
def add_node(self, node_id, name, labels):
"""NeoDB Stub."""
print 'NeoDB Stub getting called...'
print '%s %s %s %s' % (self, node_id, name, labels) | NeoDB Stub. | entailment |
def add_rel(self, source_node_id, target_node_id, rel):
"""NeoDB Stub."""
print 'NeoDB Stub getting called...'
print '%s %s %s %s' % (self, source_node_id, target_node_id, rel) | NeoDB Stub. | entailment |
def tail_file(filename):
''' Tail a file using pygtail. Note: this could probably be improved '''
with make_temp_file() as offset_file:
while True:
for line in pygtail.Pygtail(filename, offset_file=offset_file):
yield line
time.sleep(1.0) | Tail a file using pygtail. Note: this could probably be improved | entailment |
def execute(self, input_data):
''' Execute the ViewPE worker '''
# Just a small check to make sure we haven't been called on the wrong file type
if (input_data['meta']['type_tag'] != 'exe'):
return {'error': self.__class__.__name__+': called on '+input_data['meta']['type_tag']}
view = {}
view['indicators'] = list(set([item['category'] for item in input_data['pe_indicators']['indicator_list']]))
view['peid_matches'] = input_data['pe_peid']['match_list']
view['yara_sigs'] = input_data['yara_sigs']['matches'].keys()
view['classification'] = input_data['pe_classifier']['classification']
view['disass'] = self.safe_get(input_data, ['pe_disass', 'decode'])[:15]
view.update(input_data['meta'])
return view | Execute the ViewPE worker | entailment |
def safe_get(data, key_list):
''' Safely access dictionary keys when plugin may have failed '''
for key in key_list:
data = data.get(key, {})
return data if data else 'plugin_failed' | Safely access dictionary keys when plugin may have failed | entailment |
def execute(self):
''' Begin capturing PCAPs and sending them to workbench '''
# Create a temporary directory
self.temp_dir = tempfile.mkdtemp()
os.chdir(self.temp_dir)
# Spin up the directory watcher
DirWatcher(self.temp_dir, self.file_created)
# Spin up tcpdump
self.subprocess_manager(self.tcpdump_cmd) | Begin capturing PCAPs and sending them to workbench | entailment |
def file_created(self, filepath):
''' File created callback '''
# Send the on-deck pcap to workbench
if self.on_deck:
self.store_file(self.on_deck)
os.remove(self.on_deck)
# Now put the newly created file on-deck
self.on_deck = filepath | File created callback | entailment |
def store_file(self, filename):
''' Store a file into workbench '''
# Spin up workbench
self.workbench = zerorpc.Client(timeout=300, heartbeat=60)
self.workbench.connect("tcp://127.0.0.1:4242")
# Open the file and send it to workbench
storage_name = "streaming_pcap" + str(self.pcap_index)
print filename, storage_name
with open(filename,'rb') as f:
self.workbench.store_sample(f.read(), storage_name, 'pcap')
self.pcap_index += 1
# Close workbench client
self.workbench.close() | Store a file into workbench | entailment |
def encode_document(obj):
"""Encode a single document."""
warnings.warn("deprecated. Please use bioc.biocxml.encoder.encode_document", DeprecationWarning)
return bioc.biocxml.encoder.encode_document(obj) | Encode a single document. | entailment |
def encode_passage(obj):
"""Encode a single passage."""
warnings.warn("deprecated. Please use bioc.biocxml.encoder.encode_passage", DeprecationWarning)
return bioc.biocxml.encoder.encode_passage(obj) | Encode a single passage. | entailment |
def encode_sentence(obj):
"""Encode a single sentence."""
warnings.warn("deprecated. Please use bioc.biocxml.encoder.encode_sentence", DeprecationWarning)
return bioc.biocxml.encoder.encode_sentence(obj) | Encode a single sentence. | entailment |
def encode_annotation(obj):
"""Encode a single annotation."""
warnings.warn("deprecated. Please use bioc.biocxml.encoder.encode_annotation",
DeprecationWarning)
return bioc.biocxml.encoder.encode_annotation(obj) | Encode a single annotation. | entailment |
def encode_relation(obj):
"""Encode a single relation."""
warnings.warn("deprecated. Please use bioc.biocxml.encoder.encode_relation", DeprecationWarning)
return bioc.biocxml.encoder.encode_relation(obj) | Encode a single relation. | entailment |
def parse_eprocess(self, eprocess_data):
"""Parse the EProcess object we get from some rekall output"""
Name = eprocess_data['_EPROCESS']['Cybox']['Name']
PID = eprocess_data['_EPROCESS']['Cybox']['PID']
PPID = eprocess_data['_EPROCESS']['Cybox']['Parent_PID']
return {'Name': Name, 'PID': PID, 'PPID': PPID} | Parse the EProcess object we get from some rekall output | entailment |
def execute(self, input_data):
''' Execute the Unzip worker '''
raw_bytes = input_data['sample']['raw_bytes']
zipfile_output = zipfile.ZipFile(StringIO(raw_bytes))
payload_md5s = []
for name in zipfile_output.namelist():
filename = os.path.basename(name)
payload_md5s.append(self.workbench.store_sample(zipfile_output.read(name), name, 'unknown'))
return {'payload_md5s': payload_md5s} | Execute the Unzip worker | entailment |
def execute(self, input_data):
''' Execute method '''
# Spin up the rekall adapter
adapter = RekallAdapter()
adapter.set_plugin_name(self.plugin_name)
rekall_output = adapter.execute(input_data)
# Process the output data
for line in rekall_output:
if line['type'] == 'm': # Meta
self.output['meta'] = line['data']
elif line['type'] == 's': # New Session (Table)
if line['data']['name']:
self.current_table_name = str(line['data']['name'][1].v())
elif line['type'] == 't': # New Table Headers (column names)
self.column_map = {item['cname']: item['name'] if 'name' in item else item['cname'] for item in line['data']}
elif line['type'] == 'r': # Row
# Add the row to our current table
row = RekallAdapter.process_row(line['data'], self.column_map)
self.output['tables'][self.current_table_name].append(row)
# Process Base entries
if 'Base' in row:
base_info = self.parse_base(row)
row.update(base_info)
else:
print 'Got unknown line %s: %s' % (line['type'], line['data'])
# All done
return self.output | Execute method | entailment |
def help_cli(self):
""" Help on Workbench CLI """
help = '%sWelcome to Workbench CLI Help:%s' % (color.Yellow, color.Normal)
help += '\n\t%s> help cli_basic %s for getting started help' % (color.Green, color.LightBlue)
help += '\n\t%s> help workers %s for help on available workers' % (color.Green, color.LightBlue)
help += '\n\t%s> help search %s for help on searching samples' % (color.Green, color.LightBlue)
help += '\n\t%s> help dataframe %s for help on making dataframes' % (color.Green, color.LightBlue)
help += '\n\t%s> help commands %s for help on workbench commands' % (color.Green, color.LightBlue)
help += '\n\t%s> help topic %s where topic can be a help, command or worker' % (color.Green, color.LightBlue)
help += '\n\n%sNote: cli commands are transformed into python calls' % (color.Yellow)
help += '\n\t%s> help cli_basic --> help("cli_basic")%s' % (color.Green, color.Normal)
return help | Help on Workbench CLI | entailment |
def help_cli_basic(self):
""" Help for Workbench CLI Basics """
help = '%sWorkbench: Getting started...' % (color.Yellow)
help += '\n%sLoad in a sample:' % (color.Green)
help += '\n\t%s> load_sample /path/to/file' % (color.LightBlue)
help += '\n\n%sNotice the prompt now shows the md5 of the sample...'% (color.Yellow)
help += '\n%sRun workers on the sample:' % (color.Green)
help += '\n\t%s> view' % (color.LightBlue)
help += '\n%sType the \'help workers\' or the first part of the worker <tab>...' % (color.Green)
help += '\n\t%s> help workers (lists all possible workers)' % (color.LightBlue)
help += '\n\t%s> pe_<tab> (will give you pe_classifier, pe_deep_sim, pe_features, pe_indicators, pe_peid)%s' % (color.LightBlue, color.Normal)
return help | Help for Workbench CLI Basics | entailment |
def help_cli_search(self):
""" Help for Workbench CLI Search """
help = '%sSearch: %s returns sample_sets, a sample_set is a set/list of md5s.' % (color.Yellow, color.Green)
help += '\n\n\t%sSearch for all samples in the database that are known bad pe files,' % (color.Green)
help += '\n\t%sthis command returns the sample_set containing the matching items'% (color.Green)
help += '\n\t%s> my_bad_exes = search([\'bad\', \'exe\'])' % (color.LightBlue)
help += '\n\n\t%sRun workers on this sample_set:' % (color.Green)
help += '\n\t%s> pe_outputs = pe_features(my_bad_exes) %s' % (color.LightBlue, color.Normal)
help += '\n\n\t%sLoop on the generator (or make a DataFrame see >help dataframe)' % (color.Green)
help += '\n\t%s> for output in pe_outputs: %s' % (color.LightBlue, color.Normal)
help += '\n\t\t%s print output %s' % (color.LightBlue, color.Normal)
return help | Help for Workbench CLI Search | entailment |
def help_dataframe(self):
""" Help for making a DataFrame with Workbench CLI """
help = '%sMaking a DataFrame: %s how to make a dataframe from raw data (pcap, memory, pe files)' % (color.Yellow, color.Green)
help += '\n\t%sNote: for memory_image and pe_files see > help dataframe_memory or dataframe_pe' % (color.Green)
help += '\n\n%sPCAP Example:' % (color.Green)
help += '\n\t%s> load_sample /path/to/pcap/gold_xxx.pcap [\'bad\', \'threatglass\']' % (color.LightBlue)
help += '\n\t%s> view # view is your friend use it often' % (color.LightBlue)
help += '\n\n%sGrab the http_log from the pcap (also play around with other logs):' % (color.Green)
help += '\n\t%s> http_log_md5 = view()[\'view\'][\'bro_logs\'][\'http_log\']' % (color.LightBlue)
help += '\n\t%s> http_log_md5 (returns the md5 of the http_log)' % (color.LightBlue)
help += '\n\n%sStream back the ^contents^ of the http_log:' % (color.Green)
help += '\n\t%s> http_log = stream_sample(http_log_md5)' % (color.LightBlue)
help += '\n\n%sPut the http_log into a dataframe:' % (color.Green)
help += '\n\t%s> http_df = pd.DataFrame(http_log)' % (color.LightBlue)
help += '\n\t%s> http_df.head()' % (color.LightBlue)
help += '\n\t%s> http_df.groupby([\'host\',\'id.resp_h\',\'resp_mime_types\'])[[\'response_body_len\']].sum()' % (color.LightBlue)
help += '\n\t%s> http_df.describe() %s' % (color.LightBlue, color.Normal)
return help | Help for making a DataFrame with Workbench CLI | entailment |
def help_dataframe_memory(self):
""" Help for making a DataFrame with Workbench CLI """
help = '%sMaking a DataFrame: %s how to make a dataframe from memory_forensics sample' % (color.Yellow, color.Green)
help += '\n\n%sMemory Images Example:' % (color.Green)
help += '\n\t%s> load_sample /path/to/pcap/exemplar4.vmem [\'bad\', \'aptz13\']' % (color.LightBlue)
help += '\n\t%s> view # view is your friend use it often' % (color.LightBlue)
help += '\n\t%s> <<< TODO :) >>> %s' % (color.LightBlue, color.Normal)
return help | Help for making a DataFrame with Workbench CLI | entailment |
def help_dataframe_pe(self):
""" Help for making a DataFrame with Workbench CLI """
help = '%sMaking a DataFrame: %s how to make a dataframe from pe files' % (color.Yellow, color.Green)
help += '\n\n%sPE Files Example (loading a directory):' % (color.Green)
help += '\n\t%s> load_sample /path/to/pe/bad [\'bad\', \'case_69\']' % (color.LightBlue)
help += '\n\n\t%sSearch for all samples in the database that are pe files,' % (color.Green)
help += '\n\t%sthis command returns the sample_set containing the matching items'% (color.Green)
help += '\n\t%s> my_exes = search([\'exe\'])' % (color.LightBlue)
help += '\n\n\t%sRun workers on this sample_set:' % (color.Green)
help += '\n\t%s> pe_outputs = set_work_request(\'pe_features\', my_exes, [\'md5\', \'dense_features.*\', \'tags\'])' % (color.LightBlue)
help += '\n\n\t%sMake a DataFrame:' % (color.Green)
help += '\n\t%s> pe_df = pd.DataFrame(pe_outputs) %s' % (color.LightBlue, color.Normal)
help += '\n\t%s> pe_df.head() %s' % (color.LightBlue, color.Normal)
help += '\n\t%s> pe_df = flatten_tags(pe_df) %s' % (color.LightBlue, color.Normal)
help += '\n\t%s> pe_df.hist(\'check_sum\',\'tags\') %s' % (color.LightBlue, color.Normal)
help += '\n\t%s> pe_df.bloxplot(\'check_sum\',\'tags\') %s' % (color.LightBlue, color.Normal)
return help | Help for making a DataFrame with Workbench CLI | entailment |
def _all_help_methods(self):
""" Returns a list of all the Workbench commands"""
methods = {name:method for name, method in inspect.getmembers(self, predicate=inspect.isroutine) if not name.startswith('_')}
return methods | Returns a list of all the Workbench commands | entailment |
def execute(self, input_data):
''' ViewPcapDeep execute method '''
# Copy info from input
view = input_data['view_pcap']
# Grab a couple of handles
extracted_files = input_data['view_pcap']['extracted_files']
# Dump a couple of fields
del view['extracted_files']
# Grab additional info about the extracted files
view['extracted_files'] = [self.workbench.work_request('meta_deep', md5,
['md5', 'sha256', 'entropy', 'ssdeep', 'file_size', 'file_type']) for md5 in extracted_files]
return view | ViewPcapDeep execute method | entailment |
def execute(self, input_data):
''' Execute Method '''
# View on all the meta data files in the sample
fields = ['filename', 'md5', 'length', 'customer', 'import_time', 'type_tag']
view = {key:input_data['meta'][key] for key in fields}
return view | Execute Method | entailment |
def execute(self, input_data):
""" Info objects all have a type_tag of ('help','worker','command', or 'other') """
input_data = input_data['info']
type_tag = input_data['type_tag']
if type_tag == 'help':
return {'help': input_data['help'], 'type_tag': input_data['type_tag']}
elif type_tag == 'worker':
out_keys = ['name', 'dependencies', 'docstring', 'type_tag']
return {key: value for key, value in input_data.iteritems() if key in out_keys}
elif type_tag == 'command':
out_keys = ['command', 'sig', 'docstring', 'type_tag']
return {key: value for key, value in input_data.iteritems() if key in out_keys}
elif type_tag == 'other':
return input_data
else:
print 'Got a malformed info object %s' % input_data
return input_data | Info objects all have a type_tag of ('help','worker','command', or 'other') | entailment |
def parse_collection(obj: dict) -> BioCCollection:
"""Deserialize a dict obj to a BioCCollection object"""
collection = BioCCollection()
collection.source = obj['source']
collection.date = obj['date']
collection.key = obj['key']
collection.infons = obj['infons']
for doc in obj['documents']:
collection.add_document(parse_doc(doc))
return collection | Deserialize a dict obj to a BioCCollection object | entailment |
def parse_annotation(obj: dict) -> BioCAnnotation:
"""Deserialize a dict obj to a BioCAnnotation object"""
ann = BioCAnnotation()
ann.id = obj['id']
ann.infons = obj['infons']
ann.text = obj['text']
for loc in obj['locations']:
ann.add_location(BioCLocation(loc['offset'], loc['length']))
return ann | Deserialize a dict obj to a BioCAnnotation object | entailment |
def parse_relation(obj: dict) -> BioCRelation:
"""Deserialize a dict obj to a BioCRelation object"""
rel = BioCRelation()
rel.id = obj['id']
rel.infons = obj['infons']
for node in obj['nodes']:
rel.add_node(BioCNode(node['refid'], node['role']))
return rel | Deserialize a dict obj to a BioCRelation object | entailment |
def parse_sentence(obj: dict) -> BioCSentence:
"""Deserialize a dict obj to a BioCSentence object"""
sentence = BioCSentence()
sentence.offset = obj['offset']
sentence.infons = obj['infons']
sentence.text = obj['text']
for annotation in obj['annotations']:
sentence.add_annotation(parse_annotation(annotation))
for relation in obj['relations']:
sentence.add_relation(parse_relation(relation))
return sentence | Deserialize a dict obj to a BioCSentence object | entailment |
def parse_passage(obj: dict) -> BioCPassage:
"""Deserialize a dict obj to a BioCPassage object"""
passage = BioCPassage()
passage.offset = obj['offset']
passage.infons = obj['infons']
if 'text' in obj:
passage.text = obj['text']
for sentence in obj['sentences']:
passage.add_sentence(parse_sentence(sentence))
for annotation in obj['annotations']:
passage.add_annotation(parse_annotation(annotation))
for relation in obj['relations']:
passage.add_relation(parse_relation(relation))
return passage | Deserialize a dict obj to a BioCPassage object | entailment |
def parse_doc(obj: dict) -> BioCDocument:
"""Deserialize a dict obj to a BioCDocument object"""
doc = BioCDocument()
doc.id = obj['id']
doc.infons = obj['infons']
for passage in obj['passages']:
doc.add_passage(parse_passage(passage))
for annotation in obj['annotations']:
doc.add_annotation(parse_annotation(annotation))
for relation in obj['relations']:
doc.add_relation(parse_relation(relation))
return doc | Deserialize a dict obj to a BioCDocument object | entailment |
def load(fp, **kwargs) -> BioCCollection:
"""
Deserialize fp (a .read()-supporting text file or binary file containing a JSON document) to
a BioCCollection object
Args:
fp: a file containing a JSON document
**kwargs:
Returns:
BioCCollection: a collection
"""
obj = json.load(fp, **kwargs)
return parse_collection(obj) | Deserialize fp (a .read()-supporting text file or binary file containing a JSON document) to
a BioCCollection object
Args:
fp: a file containing a JSON document
**kwargs:
Returns:
BioCCollection: a collection | entailment |
def loads(s: str, **kwargs) -> BioCCollection:
"""
Deserialize s (a str, bytes or bytearray instance containing a JSON document) to
a BioCCollection object.
Args:
s(str):
**kwargs:
Returns:
BioCCollection: a collection
"""
obj = json.loads(s, **kwargs)
return parse_collection(obj) | Deserialize s (a str, bytes or bytearray instance containing a JSON document) to
a BioCCollection object.
Args:
s(str):
**kwargs:
Returns:
BioCCollection: a collection | entailment |
def execute(self, input_data):
''' Okay this worker is going build graphs from PCAP Bro output logs '''
# Grab the Bro log handles from the input
bro_logs = input_data['pcap_bro']
# Weird log
if 'weird_log' in bro_logs:
stream = self.workbench.stream_sample(bro_logs['weird_log'])
self.weird_log_graph(stream)
# HTTP log
gsleep()
stream = self.workbench.stream_sample(bro_logs['http_log'])
self.http_log_graph(stream)
# Files log
gsleep()
stream = self.workbench.stream_sample(bro_logs['files_log'])
self.files_log_graph(stream)
return {'output':'go to http://localhost:7474/browser and execute this query "match (s:origin), (t:file), p=allShortestPaths((s)--(t)) return p"'} | Okay this worker is going build graphs from PCAP Bro output logs | entailment |
def http_log_graph(self, stream):
''' Build up a graph (nodes and edges from a Bro http.log) '''
print 'Entering http_log_graph...'
for row in list(stream):
# Skip '-' hosts
if (row['id.orig_h'] == '-'):
continue
# Add the originating host
self.add_node(row['id.orig_h'], row['id.orig_h'], ['host', 'origin'])
# Add the response host and reponse ip
self.add_node(row['host'], row['host'], ['host'])
self.add_node(row['id.resp_h'], row['id.resp_h'], ['host'])
# Add the http request relationships
self.add_rel(row['id.orig_h'], row['host'], 'http_request')
self.add_rel(row['host'], row['id.resp_h'], 'A') | Build up a graph (nodes and edges from a Bro http.log) | entailment |
def files_log_graph(self, stream):
''' Build up a graph (nodes and edges from a Bro dns.log) '''
for row in list(stream):
# dataframes['files_log'][['md5','mime_type','missing_bytes','rx_hosts','source','tx_hosts']]
# If the mime-type is interesting add the uri and the host->uri->host relationships
if row['mime_type'] not in self.exclude_mime_types:
# Check for weird conditions
if (row['total_bytes'] == '-'):
continue
if ('-' in row['md5']):
continue
# Check for missing bytes and small file
if row['missing_bytes']:
labels = ['missing', 'file']
elif row['total_bytes'] < 50*1024:
labels = ['small','file']
else:
labels = ['file']
# Make the file node name kewl
name = '%6s %s %.0f-KB' % (row['md5'][:6], row['mime_type'], row['total_bytes']/1024.0)
if row['missing_bytes']:
name += '*'
name = name.replace('application/','')
# Add the file node
self.add_node(row['md5'], name, labels)
# Add the tx_host
self.add_node(row['tx_hosts'], row['tx_hosts'], ['host'])
# Add the file->tx_host relationship
self.add_rel(row['tx_hosts'], row['md5'], 'file') | Build up a graph (nodes and edges from a Bro dns.log) | entailment |
def register_callbacks(self, on_create, on_modify, on_delete):
""" Register callbacks for file creation, modification, and deletion """
self.on_create = on_create
self.on_modify = on_modify
self.on_delete = on_delete | Register callbacks for file creation, modification, and deletion | entailment |
def _start_monitoring(self):
""" Internal method that monitors the directory for changes """
# Grab all the timestamp info
before = self._file_timestamp_info(self.path)
while True:
gevent.sleep(1)
after = self._file_timestamp_info(self.path)
added = [fname for fname in after.keys() if fname not in before.keys()]
removed = [fname for fname in before.keys() if fname not in after.keys()]
modified = []
for fname in before.keys():
if fname not in removed:
if os.path.getmtime(fname) != before.get(fname):
modified.append(fname)
if added:
self.on_create(added)
if removed:
self.on_delete(removed)
if modified:
self.on_modify(modified)
before = after | Internal method that monitors the directory for changes | entailment |
def _file_timestamp_info(self, path):
""" Grab all the timestamps for the files in the directory """
files = [os.path.join(path, fname) for fname in os.listdir(path) if '.py' in fname]
return dict ([(fname, os.path.getmtime(fname)) for fname in files]) | Grab all the timestamps for the files in the directory | entailment |
def get_rules_from_disk(self):
''' Recursively traverse the yara/rules directory for rules '''
# Try to find the yara rules directory relative to the worker
my_dir = os.path.dirname(os.path.realpath(__file__))
yara_rule_path = os.path.join(my_dir, 'yara/rules')
if not os.path.exists(yara_rule_path):
raise RuntimeError('yara could not find yara rules directory under: %s' % my_dir)
# Okay load in all the rules under the yara rule path
self.rules = yara.load_rules(rules_rootpath=yara_rule_path)
# Save rules to Workbench
self.save_rules_to_workbench(self.rules)
return self.rules | Recursively traverse the yara/rules directory for rules | entailment |
def execute(self, input_data):
''' yara worker execute method '''
raw_bytes = input_data['sample']['raw_bytes']
matches = self.rules.match_data(raw_bytes)
# The matches data is organized in the following way
# {'filename1': [match_list], 'filename2': [match_list]}
# match_list = list of match
# match = {'meta':{'description':'blah}, tags=[], matches:True,
# strings:[string_list]}
# string = {'flags':blah, 'identifier':'$', 'data': FindWindow, 'offset'}
#
# So we're going to flatten a bit (shrug)
# {filename_match_meta_description: string_list}
flat_data = collections.defaultdict(list)
for filename, match_list in matches.iteritems():
for match in match_list:
if 'description' in match['meta']:
new_tag = filename+'_'+match['meta']['description']
else:
new_tag = filename+'_'+match['rule']
for match in match['strings']:
flat_data[new_tag].append(match['data'])
# Remove duplicates
flat_data[new_tag] = list(set(flat_data[new_tag]))
return {'matches': flat_data} | yara worker execute method | entailment |
def chunks(data, chunk_size):
""" Yield chunk_size chunks from data."""
for i in xrange(0, len(data), chunk_size):
yield data[i:i+chunk_size] | Yield chunk_size chunks from data. | entailment |
def run():
"""This client pushes a file into Workbench."""
# Grab server args
args = client_helper.grab_server_args()
# Start up workbench connection
workbench = zerorpc.Client(timeout=300, heartbeat=60)
workbench.connect('tcp://'+args['server']+':'+args['port'])
# Upload the files into workbench
my_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../data/pcap/gold_xxx.pcap')
with open(my_file,'rb') as f:
# We're going to upload the file in chunks to workbench
filename = os.path.basename(my_file)
raw_bytes = f.read()
md5_list = []
for chunk in chunks(raw_bytes, 1024*1024):
md5_list.append(workbench.store_sample(chunk, filename, 'exe'))
# Now we just ask Workbench to combine these
combined_md5 = workbench.combine_samples(md5_list, filename, 'exe')
real_md5 = workbench.store_sample(raw_bytes, filename, 'exe')
assert(combined_md5 == real_md5) | This client pushes a file into Workbench. | entailment |
def get_rules_from_disk():
''' Recursively traverse the yara/rules directory for rules '''
# Try to find the yara rules directory relative to the worker
my_dir = os.path.dirname(os.path.realpath(__file__))
yara_rule_path = os.path.join(my_dir, 'yara/rules')
if not os.path.exists(yara_rule_path):
raise RuntimeError('yara could not find yara rules directory under: %s' % my_dir)
# Okay load in all the rules under the yara rule path
rules = yara.load_rules(rules_rootpath=yara_rule_path, fast_match=True)
return rules | Recursively traverse the yara/rules directory for rules | entailment |
def setup_pcap_inputs(self, input_data):
''' Write the PCAPs to disk for Bro to process and return the pcap filenames '''
# Setup the pcap in the input data for processing by Bro. The input
# may be either an individual sample or a sample set.
file_list = []
if 'sample' in input_data:
raw_bytes = input_data['sample']['raw_bytes']
filename = os.path.basename(input_data['sample']['filename'])
file_list.append({'filename': filename, 'bytes': raw_bytes})
else:
for md5 in input_data['sample_set']['md5_list']:
sample = self.workbench.get_sample(md5)['sample']
raw_bytes = sample['raw_bytes']
filename = os.path.basename(sample['filename'])
file_list.append({'filename': filename, 'bytes': raw_bytes})
# Write the pcaps to disk and keep the filenames for Bro to process
for file_info in file_list:
with open(file_info['filename'], 'wb') as pcap_file:
pcap_file.write(file_info['bytes'])
# Return filenames
return [file_info['filename'] for file_info in file_list] | Write the PCAPs to disk for Bro to process and return the pcap filenames | entailment |
def execute(self, input_data):
''' Execute '''
# Get the bro script path (workers/bro/__load__.bro)
script_path = self.bro_script_dir
# Create a temporary directory
with self.goto_temp_directory() as temp_dir:
# Get the pcap inputs (filenames)
print 'pcap_bro: Setting up PCAP inputs...'
filenames = self.setup_pcap_inputs(input_data)
command_line = ['bro']
for filename in filenames:
command_line += ['-C', '-r', filename]
if script_path:
command_line.append(script_path)
# Execute command line as a subprocess
print 'pcap_bro: Executing subprocess...'
self.subprocess_manager(command_line)
# Scrape up all the output log files
gsleep()
print 'pcap_bro: Scraping output logs...'
my_output = {}
for output_log in glob.glob('*.log'):
# Store the output into workbench, put the name:md5 in my output
output_name = os.path.splitext(output_log)[0] + '_log'
with open(output_log, 'rb') as bro_file:
raw_bytes = bro_file.read()
my_output[output_name] = self.workbench.store_sample(raw_bytes, output_name, 'bro')
# Scrape any extracted files
gsleep()
print 'pcap_bro: Scraping extracted files...'
my_output['extracted_files'] = []
for output_file in glob.glob('extract_files/*'):
# Store the output into workbench, put md5s in the 'extracted_files' field
output_name = os.path.basename(output_file)
with open(output_file, 'rb') as extracted_file:
if output_name.endswith('exe'):
type_tag = 'exe'
else:
type_tag = output_name[-3:]
raw_bytes = extracted_file.read()
my_output['extracted_files'].append(self.workbench.store_sample(raw_bytes, output_name, type_tag))
# Construct back-pointers to the PCAPs
if 'sample' in input_data:
my_output['pcaps'] = [input_data['sample']['md5']]
else:
my_output['pcaps'] = input_data['sample_set']['md5_list']
# Return my output
return my_output | Execute | entailment |
def subprocess_manager(self, exec_args):
''' Bro subprocess manager '''
try:
sp = gevent.subprocess.Popen(exec_args, stdout=gevent.subprocess.PIPE, stderr=gevent.subprocess.PIPE)
except OSError:
raise RuntimeError('Could not run bro executable (either not installed or not in path): %s' % (exec_args))
out, err = sp.communicate()
if out:
print 'standard output of subprocess: %s' % out
if err:
raise RuntimeError('%s\npcap_bro had output on stderr: %s' % (exec_args, err))
if sp.returncode:
raise RuntimeError('%s\npcap_bro had returncode: %d' % (exec_args, sp.returncode)) | Bro subprocess manager | entailment |
def getConsole(rh):
"""
Get the virtual machine's console output.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getVM.getConsole")
# Transfer the console to this virtual machine.
parms = ["-T", rh.userid]
results = invokeSMCLI(rh, "Image_Console_Get", parms)
if results['overallRC'] != 0:
if (results['overallRC'] == 8 and results['rc'] == 8 and
results['rs'] == 8):
# Give a more specific message. Userid is either
# not logged on or not spooling their console.
msg = msgs.msg['0409'][1] % (modId, rh.userid)
else:
msg = results['response']
rh.updateResults(results) # Use results from invokeSMCLI
rh.printLn("ES", msg)
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# Check whether the reader is online
with open('/sys/bus/ccw/drivers/vmur/0.0.000c/online', 'r') as myfile:
out = myfile.read().replace('\n', '')
myfile.close()
# Nope, offline, error out and exit
if int(out) != 1:
msg = msgs.msg['0411'][1]
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0411'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# We should set class to *, otherwise we will get errors like:
# vmur: Reader device class does not match spool file class
cmd = ["sudo", "/sbin/vmcp", "spool reader class *"]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# If we couldn't change the class, that's not fatal
# But we want to warn about possibly incomplete
# results
msg = msgs.msg['0407'][1] % (modId, strCmd, e.output)
rh.printLn("WS", msg)
except Exception as e:
# All other exceptions.
# If we couldn't change the class, that's not fatal
# But we want to warn about possibly incomplete
# results
rh.printLn("ES", msgs.msg['0422'][1] % (modId, strCmd,
type(e).__name__, str(e)))
rh.printLn("ES", msgs.msg['0423'][1] % modId, strCmd,
type(e).__name__, str(e))
# List the spool files in the reader
cmd = ["sudo", "/usr/sbin/vmur", "list"]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
files = subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# Uh oh, vmur list command failed for some reason
msg = msgs.msg['0408'][1] % (modId, rh.userid,
strCmd, e.output)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0408'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
except Exception as e:
# All other exceptions.
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
rh.updateResults(msgs.msg['0421'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# Now for each line that contains our user and is a
# class T console file, add the spool id to our list
spoolFiles = files.split('\n')
outstr = ""
for myfile in spoolFiles:
if (myfile != "" and
myfile.split()[0] == rh.userid and
myfile.split()[2] == "T" and
myfile.split()[3] == "CON"):
fileId = myfile.split()[1]
outstr += fileId + " "
# No files in our list
if outstr == "":
msg = msgs.msg['0410'][1] % (modId, rh.userid)
rh.printLn("ES", msg)
rh.updateResults(msgs.msg['0410'][0])
rh.printSysLog("Exit getVM.parseCmdLine, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
# Output the list
rh.printLn("N", "List of spool files containing "
"console logs from %s: %s" % (rh.userid, outstr))
rh.results['overallRC'] = 0
rh.printSysLog("Exit getVM.getConsole, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC'] | Get the virtual machine's console output.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error | entailment |
def getDirectory(rh):
"""
Get the virtual machine's directory statements.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getVM.getDirectory")
parms = ["-T", rh.userid]
results = invokeSMCLI(rh, "Image_Query_DM", parms)
if results['overallRC'] == 0:
results['response'] = re.sub('\*DVHOPT.*', '', results['response'])
rh.printLn("N", results['response'])
else:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
rh.printSysLog("Exit getVM.getDirectory, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC'] | Get the virtual machine's directory statements.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error | entailment |
def getStatus(rh):
"""
Get the basic status of a virtual machine.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter getVM.getStatus, userid: " + rh.userid)
results = isLoggedOn(rh, rh.userid)
if results['rc'] != 0:
# Uhoh, can't determine if guest is logged on or not
rh.updateResults(results)
rh.printSysLog("Exit getVM.getStatus, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
if results['rs'] == 1:
# Guest is logged off, everything is 0
powerStr = "Power state: off"
memStr = "Total Memory: 0M"
usedMemStr = "Used Memory: 0M"
procStr = "Processors: 0"
timeStr = "CPU Used Time: 0 sec"
else:
powerStr = "Power state: on"
if 'power' in rh.parms:
# Test here to see if we only need power state
# Then we can return early
rh.printLn("N", powerStr)
rh.updateResults(results)
rh.printSysLog("Exit getVM.getStatus, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
if results['rs'] != 1:
# Guest is logged on, go get more info
results = getPerfInfo(rh, rh.userid)
if results['overallRC'] != 0:
# Something went wrong in subroutine, exit
rh.updateResults(results)
rh.printSysLog("Exit getVM.getStatus, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC']
else:
# Everything went well, response should be good
memStr = results['response'].split("\n")[0]
usedMemStr = results['response'].split("\n")[1]
procStr = results['response'].split("\n")[2]
timeStr = results['response'].split("\n")[3]
# Build our output string according
# to what information was asked for
if 'memory' in rh.parms:
outStr = memStr + "\n" + usedMemStr
elif 'cpu' in rh.parms:
outStr = procStr + "\n" + timeStr
else:
# Default to all
outStr = powerStr + "\n" + memStr + "\n" + usedMemStr
outStr += "\n" + procStr + "\n" + timeStr
rh.printLn("N", outStr)
rh.printSysLog("Exit getVM.getStatus, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC'] | Get the basic status of a virtual machine.
Input:
Request Handle with the following properties:
function - 'CMDVM'
subfunction - 'CMD'
userid - userid of the virtual machine
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error | entailment |
def extract_fcp_data(raw_data, status):
"""
extract data from smcli System_WWPN_Query output.
Input:
raw data returned from smcli
Output:
data extracted would be like:
'status:Free \n
fcp_dev_no:1D2F\n
physical_wwpn:C05076E9928051D1\n
channel_path_id:8B\n
npiv_wwpn': 'NONE'\n
status:Free\n
fcp_dev_no:1D29\n
physical_wwpn:C05076E9928051D1\n
channel_path_id:8B\n
npiv_wwpn:NONE
"""
raw_data = raw_data.split('\n')
# clear blank lines
data = []
for i in raw_data:
i = i.strip(' \n')
if i == '':
continue
else:
data.append(i)
# process data into one list of dicts
results = []
for i in range(0, len(data), 5):
temp = data[i + 1].split(':')[-1].strip()
# only return results match the status
if temp.lower() == status.lower():
results.extend(data[i:i + 5])
return '\n'.join(results) | extract data from smcli System_WWPN_Query output.
Input:
raw data returned from smcli
Output:
data extracted would be like:
'status:Free \n
fcp_dev_no:1D2F\n
physical_wwpn:C05076E9928051D1\n
channel_path_id:8B\n
npiv_wwpn': 'NONE'\n
status:Free\n
fcp_dev_no:1D29\n
physical_wwpn:C05076E9928051D1\n
channel_path_id:8B\n
npiv_wwpn:NONE | entailment |
def fcpinfo(rh):
"""
Get fcp info and filter by the status.
Input:
Request Handle with the following properties:
function - 'GETVM'
subfunction - 'FCPINFO'
userid - userid of the virtual machine
parms['status'] - The status for filter results.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter changeVM.dedicate")
parms = ["-T", rh.userid]
hideList = []
results = invokeSMCLI(rh,
"System_WWPN_Query",
parms,
hideInLog=hideList)
if results['overallRC'] != 0:
# SMAPI API failed.
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
if results['overallRC'] == 0:
# extract data from smcli return
ret = extract_fcp_data(results['response'], rh.parms['status'])
# write the ret into results['response']
rh.printLn("N", ret)
else:
rh.printLn("ES", results['response'])
rh.updateResults(results) # Use results from invokeSMCLI
return rh.results['overallRC'] | Get fcp info and filter by the status.
Input:
Request Handle with the following properties:
function - 'GETVM'
subfunction - 'FCPINFO'
userid - userid of the virtual machine
parms['status'] - The status for filter results.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error | entailment |
def _no_auto_update_getter(self):
""":class:`bool`. Boolean controlling whether the :meth:`start_update`
method is automatically called by the :meth:`update` method
Examples
--------
You can disable the automatic update via
>>> with data.no_auto_update:
... data.update(time=1)
... data.start_update()
To permanently disable the automatic update, simply set
>>> data.no_auto_update = True
>>> data.update(time=1)
>>> data.no_auto_update = False # reenable automatical update"""
if getattr(self, '_no_auto_update', None) is not None:
return self._no_auto_update
else:
self._no_auto_update = utils._TempBool()
return self._no_auto_update | :class:`bool`. Boolean controlling whether the :meth:`start_update`
method is automatically called by the :meth:`update` method
Examples
--------
You can disable the automatic update via
>>> with data.no_auto_update:
... data.update(time=1)
... data.start_update()
To permanently disable the automatic update, simply set
>>> data.no_auto_update = True
>>> data.update(time=1)
>>> data.no_auto_update = False # reenable automatical update | entailment |
def _infer_interval_breaks(coord):
"""
>>> _infer_interval_breaks(np.arange(5))
array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
Taken from xarray.plotting.plot module
"""
coord = np.asarray(coord)
deltas = 0.5 * (coord[1:] - coord[:-1])
first = coord[0] - deltas[0]
last = coord[-1] + deltas[-1]
return np.r_[[first], coord[:-1] + deltas, [last]] | >>> _infer_interval_breaks(np.arange(5))
array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
Taken from xarray.plotting.plot module | entailment |
def _get_variable_names(arr):
"""Return the variable names of an array"""
if VARIABLELABEL in arr.dims:
return arr.coords[VARIABLELABEL].tolist()
else:
return arr.name | Return the variable names of an array | entailment |
def setup_coords(arr_names=None, sort=[], dims={}, **kwargs):
"""
Sets up the arr_names dictionary for the plot
Parameters
----------
arr_names: string, list of strings or dictionary
Set the unique array names of the resulting arrays and (optionally)
dimensions.
- if string: same as list of strings (see below). Strings may
include {0} which will be replaced by a counter.
- list of strings: those will be used for the array names. The final
number of dictionaries in the return depend in this case on the
`dims` and ``**furtherdims``
- dictionary:
Then nothing happens and an :class:`OrderedDict` version of
`arr_names` is returned.
sort: list of strings
This parameter defines how the dictionaries are ordered. It has no
effect if `arr_names` is a dictionary (use a
:class:`~collections.OrderedDict` for that). It can be a list of
dimension strings matching to the dimensions in `dims` for the
variable.
dims: dict
Keys must be variable names of dimensions (e.g. time, level, lat or
lon) or 'name' for the variable name you want to choose.
Values must be values of that dimension or iterables of the values
(e.g. lists). Note that strings will be put into a list.
For example dims = {'name': 't2m', 'time': 0} will result in one plot
for the first time step, whereas dims = {'name': 't2m', 'time': [0, 1]}
will result in two plots, one for the first (time == 0) and one for the
second (time == 1) time step.
``**kwargs``
The same as `dims` (those will update what is specified in `dims`)
Returns
-------
~collections.OrderedDict
A mapping from the keys in `arr_names` and to dictionaries. Each
dictionary corresponds defines the coordinates of one data array to
load"""
try:
return OrderedDict(arr_names)
except (ValueError, TypeError):
# ValueError for cyordereddict, TypeError for collections.OrderedDict
pass
if arr_names is None:
arr_names = repeat('arr{0}')
elif isstring(arr_names):
arr_names = repeat(arr_names)
dims = OrderedDict(dims)
for key, val in six.iteritems(kwargs):
dims.setdefault(key, val)
sorted_dims = OrderedDict()
if sort:
for key in sort:
sorted_dims[key] = dims.pop(key)
for key, val in six.iteritems(dims):
sorted_dims[key] = val
else:
# make sure, it is first sorted for the variable names
if 'name' in dims:
sorted_dims['name'] = None
for key, val in sorted(dims.items()):
sorted_dims[key] = val
for key, val in six.iteritems(kwargs):
sorted_dims.setdefault(key, val)
for key, val in six.iteritems(sorted_dims):
sorted_dims[key] = iter(safe_list(val))
return OrderedDict([
(arr_name.format(i), dict(zip(sorted_dims.keys(), dim_tuple)))
for i, (arr_name, dim_tuple) in enumerate(zip(
arr_names, product(
*map(list, sorted_dims.values()))))]) | Sets up the arr_names dictionary for the plot
Parameters
----------
arr_names: string, list of strings or dictionary
Set the unique array names of the resulting arrays and (optionally)
dimensions.
- if string: same as list of strings (see below). Strings may
include {0} which will be replaced by a counter.
- list of strings: those will be used for the array names. The final
number of dictionaries in the return depend in this case on the
`dims` and ``**furtherdims``
- dictionary:
Then nothing happens and an :class:`OrderedDict` version of
`arr_names` is returned.
sort: list of strings
This parameter defines how the dictionaries are ordered. It has no
effect if `arr_names` is a dictionary (use a
:class:`~collections.OrderedDict` for that). It can be a list of
dimension strings matching to the dimensions in `dims` for the
variable.
dims: dict
Keys must be variable names of dimensions (e.g. time, level, lat or
lon) or 'name' for the variable name you want to choose.
Values must be values of that dimension or iterables of the values
(e.g. lists). Note that strings will be put into a list.
For example dims = {'name': 't2m', 'time': 0} will result in one plot
for the first time step, whereas dims = {'name': 't2m', 'time': [0, 1]}
will result in two plots, one for the first (time == 0) and one for the
second (time == 1) time step.
``**kwargs``
The same as `dims` (those will update what is specified in `dims`)
Returns
-------
~collections.OrderedDict
A mapping from the keys in `arr_names` and to dictionaries. Each
dictionary corresponds defines the coordinates of one data array to
load | entailment |
def to_slice(arr):
"""Test whether `arr` is an integer array that can be replaced by a slice
Parameters
----------
arr: numpy.array
Numpy integer array
Returns
-------
slice or None
If `arr` could be converted to an array, this is returned, otherwise
`None` is returned
See Also
--------
get_index_from_coord"""
if isinstance(arr, slice):
return arr
if len(arr) == 1:
return slice(arr[0], arr[0] + 1)
step = np.unique(arr[1:] - arr[:-1])
if len(step) == 1:
return slice(arr[0], arr[-1] + step[0], step[0]) | Test whether `arr` is an integer array that can be replaced by a slice
Parameters
----------
arr: numpy.array
Numpy integer array
Returns
-------
slice or None
If `arr` could be converted to an array, this is returned, otherwise
`None` is returned
See Also
--------
get_index_from_coord | entailment |
def get_index_from_coord(coord, base_index):
"""Function to return the coordinate as integer, integer array or slice
If `coord` is zero-dimensional, the corresponding integer in `base_index`
will be supplied. Otherwise it is first tried to return a slice, if that
does not work an integer array with the corresponding indices is returned.
Parameters
----------
coord: xarray.Coordinate or xarray.Variable
Coordinate to convert
base_index: pandas.Index
The base index from which the `coord` was extracted
Returns
-------
int, array of ints or slice
The indexer that can be used to access the `coord` in the
`base_index`
"""
try:
values = coord.values
except AttributeError:
values = coord
if values.ndim == 0:
return base_index.get_loc(values[()])
if len(values) == len(base_index) and (values == base_index).all():
return slice(None)
values = np.array(list(map(lambda i: base_index.get_loc(i), values)))
return to_slice(values) or values | Function to return the coordinate as integer, integer array or slice
If `coord` is zero-dimensional, the corresponding integer in `base_index`
will be supplied. Otherwise it is first tried to return a slice, if that
does not work an integer array with the corresponding indices is returned.
Parameters
----------
coord: xarray.Coordinate or xarray.Variable
Coordinate to convert
base_index: pandas.Index
The base index from which the `coord` was extracted
Returns
-------
int, array of ints or slice
The indexer that can be used to access the `coord` in the
`base_index` | entailment |
def get_tdata(t_format, files):
"""
Get the time information from file names
Parameters
----------
t_format: str
The string that can be used to get the time information in the files.
Any numeric datetime format string (e.g. %Y, %m, %H) can be used, but
not non-numeric strings like %b, etc. See [1]_ for the datetime format
strings
files: list of str
The that contain the time informations
Returns
-------
pandas.Index
The time coordinate
list of str
The file names as they are sorten in the returned index
References
----------
.. [1] https://docs.python.org/2/library/datetime.html"""
def median(arr):
return arr.min() + (arr.max() - arr.min())/2
import re
from pandas import Index
t_pattern = t_format
for fmt, patt in t_patterns.items():
t_pattern = t_pattern.replace(fmt, patt)
t_pattern = re.compile(t_pattern)
time = list(range(len(files)))
for i, f in enumerate(files):
time[i] = median(np.array(list(map(
lambda s: np.datetime64(dt.datetime.strptime(s, t_format)),
t_pattern.findall(f)))))
ind = np.argsort(time) # sort according to time
files = np.array(files)[ind]
time = np.array(time)[ind]
return to_datetime(Index(time, name='time')), files | Get the time information from file names
Parameters
----------
t_format: str
The string that can be used to get the time information in the files.
Any numeric datetime format string (e.g. %Y, %m, %H) can be used, but
not non-numeric strings like %b, etc. See [1]_ for the datetime format
strings
files: list of str
The that contain the time informations
Returns
-------
pandas.Index
The time coordinate
list of str
The file names as they are sorten in the returned index
References
----------
.. [1] https://docs.python.org/2/library/datetime.html | entailment |
def to_netcdf(ds, *args, **kwargs):
"""
Store the given dataset as a netCDF file
This functions works essentially the same as the usual
:meth:`xarray.Dataset.to_netcdf` method but can also encode absolute time
units
Parameters
----------
ds: xarray.Dataset
The dataset to store
%(xarray.Dataset.to_netcdf.parameters)s
"""
to_update = {}
for v, obj in six.iteritems(ds.variables):
units = obj.attrs.get('units', obj.encoding.get('units', None))
if units == 'day as %Y%m%d.%f' and np.issubdtype(
obj.dtype, np.datetime64):
to_update[v] = xr.Variable(
obj.dims, AbsoluteTimeEncoder(obj), attrs=obj.attrs.copy(),
encoding=obj.encoding)
to_update[v].attrs['units'] = units
if to_update:
ds = ds.copy()
ds.update(to_update)
return xarray_api.to_netcdf(ds, *args, **kwargs) | Store the given dataset as a netCDF file
This functions works essentially the same as the usual
:meth:`xarray.Dataset.to_netcdf` method but can also encode absolute time
units
Parameters
----------
ds: xarray.Dataset
The dataset to store
%(xarray.Dataset.to_netcdf.parameters)s | entailment |
def _get_fname_nio(store):
"""Try to get the file name from the NioDataStore store"""
try:
f = store.ds.file
except AttributeError:
return None
try:
return f.path
except AttributeError:
return None | Try to get the file name from the NioDataStore store | entailment |
def get_filename_ds(ds, dump=True, paths=None, **kwargs):
"""
Return the filename of the corresponding to a dataset
This method returns the path to the `ds` or saves the dataset
if there exists no filename
Parameters
----------
ds: xarray.Dataset
The dataset you want the path information for
dump: bool
If True and the dataset has not been dumped so far, it is dumped to a
temporary file or the one generated by `paths` is used
paths: iterable or True
An iterator over filenames to use if a dataset has no filename.
If paths is ``True``, an iterator over temporary files will be
created without raising a warning
Other Parameters
----------------
``**kwargs``
Any other keyword for the :func:`to_netcdf` function
%(xarray.Dataset.to_netcdf.parameters)s
Returns
-------
str or None
None, if the dataset has not yet been dumped to the harddisk and
`dump` is False, otherwise the complete the path to the input
file
str
The module of the :class:`xarray.backends.common.AbstractDataStore`
instance that is used to hold the data
str
The class name of the
:class:`xarray.backends.common.AbstractDataStore` instance that is
used to open the data
"""
from tempfile import NamedTemporaryFile
# if already specified, return that filename
if ds.psy._filename is not None:
return tuple([ds.psy._filename] + list(ds.psy.data_store))
def dump_nc():
# make sure that the data store is not closed by providing a
# write argument
if xr_version < (0, 11):
kwargs.setdefault('writer', xarray_api.ArrayWriter())
store = to_netcdf(ds, fname, **kwargs)
else:
# `writer` parameter was removed by
# https://github.com/pydata/xarray/pull/2261
kwargs.setdefault('multifile', True)
store = to_netcdf(ds, fname, **kwargs)[1]
store_mod = store.__module__
store_cls = store.__class__.__name__
ds._file_obj = store
return store_mod, store_cls
def tmp_it():
while True:
yield NamedTemporaryFile(suffix='.nc').name
fname = None
if paths is True or (dump and paths is None):
paths = tmp_it()
elif paths is not None:
if isstring(paths):
paths = iter([paths])
else:
paths = iter(paths)
# try to get the filename from the data store of the obj
store_mod, store_cls = ds.psy.data_store
if store_mod is not None:
store = ds._file_obj
# try several engines
if hasattr(store, 'file_objs'):
fname = []
store_mod = []
store_cls = []
for obj in store.file_objs: # mfdataset
_fname = None
for func in get_fname_funcs:
if _fname is None:
_fname = func(obj)
if _fname is not None:
fname.append(_fname)
store_mod.append(obj.__module__)
store_cls.append(obj.__class__.__name__)
fname = tuple(fname)
store_mod = tuple(store_mod)
store_cls = tuple(store_cls)
else:
for func in get_fname_funcs:
fname = func(store)
if fname is not None:
break
# check if paths is provided and if yes, save the file
if fname is None and paths is not None:
fname = next(paths, None)
if dump and fname is not None:
store_mod, store_cls = dump_nc()
ds.psy.filename = fname
ds.psy.data_store = (store_mod, store_cls)
return fname, store_mod, store_cls | Return the filename of the corresponding to a dataset
This method returns the path to the `ds` or saves the dataset
if there exists no filename
Parameters
----------
ds: xarray.Dataset
The dataset you want the path information for
dump: bool
If True and the dataset has not been dumped so far, it is dumped to a
temporary file or the one generated by `paths` is used
paths: iterable or True
An iterator over filenames to use if a dataset has no filename.
If paths is ``True``, an iterator over temporary files will be
created without raising a warning
Other Parameters
----------------
``**kwargs``
Any other keyword for the :func:`to_netcdf` function
%(xarray.Dataset.to_netcdf.parameters)s
Returns
-------
str or None
None, if the dataset has not yet been dumped to the harddisk and
`dump` is False, otherwise the complete the path to the input
file
str
The module of the :class:`xarray.backends.common.AbstractDataStore`
instance that is used to hold the data
str
The class name of the
:class:`xarray.backends.common.AbstractDataStore` instance that is
used to open the data | entailment |
def open_dataset(filename_or_obj, decode_cf=True, decode_times=True,
decode_coords=True, engine=None, gridfile=None, **kwargs):
"""
Open an instance of :class:`xarray.Dataset`.
This method has the same functionality as the :func:`xarray.open_dataset`
method except that is supports an additional 'gdal' engine to open
gdal Rasters (e.g. GeoTiffs) and that is supports absolute time units like
``'day as %Y%m%d.%f'`` (if `decode_cf` and `decode_times` are True).
Parameters
----------
%(xarray.open_dataset.parameters.no_engine)s
engine: {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'gdal'}, optional
Engine to use when reading netCDF files. If not provided, the default
engine is chosen based on available dependencies, with a preference for
'netcdf4'.
%(CFDecoder.decode_coords.parameters.gridfile)s
Returns
-------
xarray.Dataset
The dataset that contains the variables from `filename_or_obj`"""
# use the absolute path name (is saver when saving the project)
if isstring(filename_or_obj) and osp.exists(filename_or_obj):
filename_or_obj = osp.abspath(filename_or_obj)
if engine == 'gdal':
from psyplot.gdal_store import GdalStore
filename_or_obj = GdalStore(filename_or_obj)
engine = None
ds = xr.open_dataset(filename_or_obj, decode_cf=decode_cf,
decode_coords=False, engine=engine,
decode_times=decode_times, **kwargs)
if decode_cf:
ds = CFDecoder.decode_ds(
ds, decode_coords=decode_coords, decode_times=decode_times,
gridfile=gridfile)
return ds | Open an instance of :class:`xarray.Dataset`.
This method has the same functionality as the :func:`xarray.open_dataset`
method except that is supports an additional 'gdal' engine to open
gdal Rasters (e.g. GeoTiffs) and that is supports absolute time units like
``'day as %Y%m%d.%f'`` (if `decode_cf` and `decode_times` are True).
Parameters
----------
%(xarray.open_dataset.parameters.no_engine)s
engine: {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'gdal'}, optional
Engine to use when reading netCDF files. If not provided, the default
engine is chosen based on available dependencies, with a preference for
'netcdf4'.
%(CFDecoder.decode_coords.parameters.gridfile)s
Returns
-------
xarray.Dataset
The dataset that contains the variables from `filename_or_obj` | entailment |
def open_mfdataset(paths, decode_cf=True, decode_times=True,
decode_coords=True, engine=None, gridfile=None,
t_format=None, **kwargs):
"""
Open multiple files as a single dataset.
This function is essentially the same as the :func:`xarray.open_mfdataset`
function but (as the :func:`open_dataset`) supports additional decoding
and the ``'gdal'`` engine.
You can further specify the `t_format` parameter to get the time
information from the files and use the results to concatenate the files
Parameters
----------
%(xarray.open_mfdataset.parameters.no_engine)s
%(open_dataset.parameters.engine)s
%(get_tdata.parameters.t_format)s
%(CFDecoder.decode_coords.parameters.gridfile)s
Returns
-------
xarray.Dataset
The dataset that contains the variables from `filename_or_obj`"""
if t_format is not None or engine == 'gdal':
if isinstance(paths, six.string_types):
paths = sorted(glob(paths))
if not paths:
raise IOError('no files to open')
if t_format is not None:
time, paths = get_tdata(t_format, paths)
kwargs['concat_dim'] = time
if engine == 'gdal':
from psyplot.gdal_store import GdalStore
paths = list(map(GdalStore, paths))
engine = None
kwargs['lock'] = False
ds = xr.open_mfdataset(
paths, decode_cf=decode_cf, decode_times=decode_times, engine=engine,
decode_coords=False, **kwargs)
if decode_cf:
ds = CFDecoder.decode_ds(ds, gridfile=gridfile,
decode_coords=decode_coords,
decode_times=decode_times)
ds.psy._concat_dim = kwargs.get('concat_dim')
return ds | Open multiple files as a single dataset.
This function is essentially the same as the :func:`xarray.open_mfdataset`
function but (as the :func:`open_dataset`) supports additional decoding
and the ``'gdal'`` engine.
You can further specify the `t_format` parameter to get the time
information from the files and use the results to concatenate the files
Parameters
----------
%(xarray.open_mfdataset.parameters.no_engine)s
%(open_dataset.parameters.engine)s
%(get_tdata.parameters.t_format)s
%(CFDecoder.decode_coords.parameters.gridfile)s
Returns
-------
xarray.Dataset
The dataset that contains the variables from `filename_or_obj` | entailment |
def _open_ds_from_store(fname, store_mod=None, store_cls=None, **kwargs):
"""Open a dataset and return it"""
if isinstance(fname, xr.Dataset):
return fname
if not isstring(fname):
try: # test iterable
fname[0]
except TypeError:
pass
else:
if store_mod is not None and store_cls is not None:
if isstring(store_mod):
store_mod = repeat(store_mod)
if isstring(store_cls):
store_cls = repeat(store_cls)
fname = [_open_store(sm, sc, f)
for sm, sc, f in zip(store_mod, store_cls, fname)]
kwargs['engine'] = None
kwargs['lock'] = False
return open_mfdataset(fname, **kwargs)
if store_mod is not None and store_cls is not None:
fname = _open_store(store_mod, store_cls, fname)
return open_dataset(fname, **kwargs) | Open a dataset and return it | entailment |
def disconnect(self, func=None):
"""Disconnect a function call to the signal. If None, all connections
are disconnected"""
if func is None:
self._connections = []
else:
self._connections.remove(func) | Disconnect a function call to the signal. If None, all connections
are disconnected | entailment |
def logger(self):
""":class:`logging.Logger` of this instance"""
try:
return self._logger
except AttributeError:
name = '%s.%s' % (self.__module__, self.__class__.__name__)
self._logger = logging.getLogger(name)
self.logger.debug('Initializing...')
return self._logger | :class:`logging.Logger` of this instance | entailment |
def get_decoder(cls, ds, var):
"""
Class method to get the right decoder class that can decode the
given dataset and variable
Parameters
----------
%(CFDecoder.can_decode.parameters)s
Returns
-------
CFDecoder
The decoder for the given dataset that can decode the variable
`var`"""
for decoder_cls in cls._registry:
if decoder_cls.can_decode(ds, var):
return decoder_cls(ds)
return CFDecoder(ds) | Class method to get the right decoder class that can decode the
given dataset and variable
Parameters
----------
%(CFDecoder.can_decode.parameters)s
Returns
-------
CFDecoder
The decoder for the given dataset that can decode the variable
`var` | entailment |
def decode_coords(ds, gridfile=None):
"""
Sets the coordinates and bounds in a dataset
This static method sets those coordinates and bounds that are marked
marked in the netCDF attributes as coordinates in :attr:`ds` (without
deleting them from the variable attributes because this information is
necessary for visualizing the data correctly)
Parameters
----------
ds: xarray.Dataset
The dataset to decode
gridfile: str
The path to a separate grid file or a xarray.Dataset instance which
may store the coordinates used in `ds`
Returns
-------
xarray.Dataset
`ds` with additional coordinates"""
def add_attrs(obj):
if 'coordinates' in obj.attrs:
extra_coords.update(obj.attrs['coordinates'].split())
obj.encoding['coordinates'] = obj.attrs.pop('coordinates')
if 'bounds' in obj.attrs:
extra_coords.add(obj.attrs['bounds'])
if gridfile is not None and not isinstance(gridfile, xr.Dataset):
gridfile = open_dataset(gridfile)
extra_coords = set(ds.coords)
for k, v in six.iteritems(ds.variables):
add_attrs(v)
add_attrs(ds)
if gridfile is not None:
ds.update({k: v for k, v in six.iteritems(gridfile.variables)
if k in extra_coords})
if xr_version < (0, 11):
ds.set_coords(extra_coords.intersection(ds.variables),
inplace=True)
else:
ds._coord_names.update(extra_coords.intersection(ds.variables))
return ds | Sets the coordinates and bounds in a dataset
This static method sets those coordinates and bounds that are marked
marked in the netCDF attributes as coordinates in :attr:`ds` (without
deleting them from the variable attributes because this information is
necessary for visualizing the data correctly)
Parameters
----------
ds: xarray.Dataset
The dataset to decode
gridfile: str
The path to a separate grid file or a xarray.Dataset instance which
may store the coordinates used in `ds`
Returns
-------
xarray.Dataset
`ds` with additional coordinates | entailment |
def is_triangular(self, var):
"""
Test if a variable is on a triangular grid
This method first checks the `grid_type` attribute of the variable (if
existent) whether it is equal to ``"unstructered"``, then it checks
whether the bounds are not two-dimensional.
Parameters
----------
var: xarray.Variable or xarray.DataArray
The variable to check
Returns
-------
bool
True, if the grid is triangular, else False"""
warn("The 'is_triangular' method is depreceated and will be removed "
"soon! Use the 'is_unstructured' method!", DeprecationWarning,
stacklevel=1)
return str(var.attrs.get('grid_type')) == 'unstructured' or \
self._check_triangular_bounds(var)[0] | Test if a variable is on a triangular grid
This method first checks the `grid_type` attribute of the variable (if
existent) whether it is equal to ``"unstructered"``, then it checks
whether the bounds are not two-dimensional.
Parameters
----------
var: xarray.Variable or xarray.DataArray
The variable to check
Returns
-------
bool
True, if the grid is triangular, else False | entailment |
def get_cell_node_coord(self, var, coords=None, axis='x', nans=None):
"""
Checks whether the bounds in the variable attribute are triangular
Parameters
----------
var: xarray.Variable or xarray.DataArray
The variable to check
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
axis: {'x', 'y'}
The spatial axis to check
nans: {None, 'skip', 'only'}
Determines whether values with nan shall be left (None), skipped
(``'skip'``) or shall be the only one returned (``'only'``)
Returns
-------
xarray.DataArray or None
the bounds corrdinate (if existent)"""
if coords is None:
coords = self.ds.coords
axis = axis.lower()
get_coord = self.get_x if axis == 'x' else self.get_y
coord = get_coord(var, coords=coords)
if coord is not None:
bounds = self._get_coord_cell_node_coord(coord, coords, nans,
var=var)
if bounds is None:
bounds = self.get_plotbounds(coord)
dim0 = coord.dims[-1]
bounds = xr.DataArray(
np.dstack([bounds[:-1], bounds[1:]])[0],
dims=(dim0, '_bnds'), attrs=coord.attrs.copy(),
name=coord.name + '_bnds')
if bounds is not None and bounds.shape[-1] == 2:
# normal CF-Conventions for rectangular grids
arr = bounds.values
if axis == 'y':
stacked = np.repeat(
np.dstack([arr, arr]).reshape((-1, 4)),
len(self.get_x(var, coords)), axis=0)
else:
stacked = np.tile(np.c_[arr, arr[:, ::-1]],
(len(self.get_y(var, coords)), 1))
bounds = xr.DataArray(
stacked,
dims=('cell', bounds.dims[1]), name=bounds.name,
attrs=bounds.attrs)
return bounds
return None | Checks whether the bounds in the variable attribute are triangular
Parameters
----------
var: xarray.Variable or xarray.DataArray
The variable to check
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
axis: {'x', 'y'}
The spatial axis to check
nans: {None, 'skip', 'only'}
Determines whether values with nan shall be left (None), skipped
(``'skip'``) or shall be the only one returned (``'only'``)
Returns
-------
xarray.DataArray or None
the bounds corrdinate (if existent) | entailment |
def _get_coord_cell_node_coord(self, coord, coords=None, nans=None,
var=None):
"""
Get the boundaries of an unstructed coordinate
Parameters
----------
coord: xr.Variable
The coordinate whose bounds should be returned
%(CFDecoder.get_cell_node_coord.parameters.no_var|axis)s
Returns
-------
%(CFDecoder.get_cell_node_coord.returns)s
"""
bounds = coord.attrs.get('bounds')
if bounds is not None:
bounds = self.ds.coords.get(bounds)
if bounds is not None:
if coords is not None:
bounds = bounds.sel(**{
key: coords[key]
for key in set(coords).intersection(bounds.dims)})
if nans is not None and var is None:
raise ValueError("Need the variable to deal with NaN!")
elif nans is None:
pass
elif nans == 'skip':
bounds = bounds[~np.isnan(var.values)]
elif nans == 'only':
bounds = bounds[np.isnan(var.values)]
else:
raise ValueError(
"`nans` must be either None, 'skip', or 'only'! "
"Not {0}!".format(str(nans)))
return bounds | Get the boundaries of an unstructed coordinate
Parameters
----------
coord: xr.Variable
The coordinate whose bounds should be returned
%(CFDecoder.get_cell_node_coord.parameters.no_var|axis)s
Returns
-------
%(CFDecoder.get_cell_node_coord.returns)s | entailment |
def _check_triangular_bounds(self, var, coords=None, axis='x', nans=None):
"""
Checks whether the bounds in the variable attribute are triangular
Parameters
----------
%(CFDecoder.get_cell_node_coord.parameters)s
Returns
-------
bool or None
True, if unstructered, None if it could not be determined
xarray.Coordinate or None
the bounds corrdinate (if existent)"""
# !!! WILL BE REMOVED IN THE NEAR FUTURE! !!!
bounds = self.get_cell_node_coord(var, coords, axis=axis,
nans=nans)
if bounds is not None:
return bounds.shape[-1] == 3, bounds
else:
return None, None | Checks whether the bounds in the variable attribute are triangular
Parameters
----------
%(CFDecoder.get_cell_node_coord.parameters)s
Returns
-------
bool or None
True, if unstructered, None if it could not be determined
xarray.Coordinate or None
the bounds corrdinate (if existent) | entailment |
def is_unstructured(self, var):
"""
Test if a variable is on an unstructered grid
Parameters
----------
%(CFDecoder.is_triangular.parameters)s
Returns
-------
%(CFDecoder.is_triangular.returns)s
Notes
-----
Currently this is the same as :meth:`is_triangular` method, but may
change in the future to support hexagonal grids"""
if str(var.attrs.get('grid_type')) == 'unstructured':
return True
xcoord = self.get_x(var)
if xcoord is not None:
bounds = self._get_coord_cell_node_coord(xcoord)
if bounds is not None and bounds.shape[-1] > 2:
return True | Test if a variable is on an unstructered grid
Parameters
----------
%(CFDecoder.is_triangular.parameters)s
Returns
-------
%(CFDecoder.is_triangular.returns)s
Notes
-----
Currently this is the same as :meth:`is_triangular` method, but may
change in the future to support hexagonal grids | entailment |
def is_circumpolar(self, var):
"""
Test if a variable is on a circumpolar grid
Parameters
----------
%(CFDecoder.is_triangular.parameters)s
Returns
-------
%(CFDecoder.is_triangular.returns)s"""
xcoord = self.get_x(var)
return xcoord is not None and xcoord.ndim == 2 | Test if a variable is on a circumpolar grid
Parameters
----------
%(CFDecoder.is_triangular.parameters)s
Returns
-------
%(CFDecoder.is_triangular.returns)s | entailment |
def get_variable_by_axis(self, var, axis, coords=None):
"""Return the coordinate matching the specified axis
This method uses to ``'axis'`` attribute in coordinates to return the
corresponding coordinate of the given variable
Possible types
--------------
var: xarray.Variable
The variable to get the dimension for
axis: {'x', 'y', 'z', 't'}
The axis string that identifies the dimension
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
Returns
-------
xarray.Coordinate or None
The coordinate for `var` that matches the given `axis` or None if
no coordinate with the right `axis` could be found.
Notes
-----
This is a rather low-level function that only interpretes the
CFConvention. It is used by the :meth:`get_x`,
:meth:`get_y`, :meth:`get_z` and :meth:`get_t` methods
Warning
-------
If None of the coordinates have an ``'axis'`` attribute, we use the
``'coordinate'`` attribute of `var` (if existent).
Since however the CF Conventions do not determine the order on how
the coordinates shall be saved, we try to use a pattern matching
for latitude (``'lat'``) and longitude (``lon'``). If this patterns
do not match, we interpret the coordinates such that x: -1, y: -2,
z: -3. This is all not very safe for awkward dimension names,
but works for most cases. If you want to be a hundred percent sure,
use the :attr:`x`, :attr:`y`, :attr:`z` and :attr:`t` attribute.
See Also
--------
get_x, get_y, get_z, get_t"""
axis = axis.lower()
if axis not in list('xyzt'):
raise ValueError("Axis must be one of X, Y, Z, T, not {0}".format(
axis))
# we first check for the dimensions and then for the coordinates
# attribute
coords = coords or self.ds.coords
coord_names = var.attrs.get('coordinates', var.encoding.get(
'coordinates', '')).split()
if not coord_names:
return
ret = []
for coord in map(lambda dim: coords[dim], filter(
lambda dim: dim in coords, chain(
coord_names, var.dims))):
# check for the axis attribute or whether the coordinate is in the
# list of possible coordinate names
if (coord.name not in (c.name for c in ret) and
(coord.attrs.get('axis', '').lower() == axis or
coord.name in getattr(self, axis))):
ret.append(coord)
if ret:
return None if len(ret) > 1 else ret[0]
# If the coordinates attribute is specified but the coordinate
# variables themselves have no 'axis' attribute, we interpret the
# coordinates such that x: -1, y: -2, z: -3
# Since however the CF Conventions do not determine the order on how
# the coordinates shall be saved, we try to use a pattern matching
# for latitude and longitude. This is not very nice, hence it is
# better to specify the :attr:`x` and :attr:`y` attribute
tnames = self.t.intersection(coord_names)
if axis == 'x':
for cname in filter(lambda cname: re.search('lon', cname),
coord_names):
return coords[cname]
return coords.get(coord_names[-1])
elif axis == 'y' and len(coord_names) >= 2:
for cname in filter(lambda cname: re.search('lat', cname),
coord_names):
return coords[cname]
return coords.get(coord_names[-2])
elif (axis == 'z' and len(coord_names) >= 3 and
coord_names[-3] not in tnames):
return coords.get(coord_names[-3])
elif axis == 't' and tnames:
tname = next(iter(tnames))
if len(tnames) > 1:
warn("Found multiple matches for time coordinate in the "
"coordinates: %s. I use %s" % (', '.join(tnames), tname),
PsyPlotRuntimeWarning)
return coords.get(tname) | Return the coordinate matching the specified axis
This method uses to ``'axis'`` attribute in coordinates to return the
corresponding coordinate of the given variable
Possible types
--------------
var: xarray.Variable
The variable to get the dimension for
axis: {'x', 'y', 'z', 't'}
The axis string that identifies the dimension
coords: dict
Coordinates to use. If None, the coordinates of the dataset in the
:attr:`ds` attribute are used.
Returns
-------
xarray.Coordinate or None
The coordinate for `var` that matches the given `axis` or None if
no coordinate with the right `axis` could be found.
Notes
-----
This is a rather low-level function that only interpretes the
CFConvention. It is used by the :meth:`get_x`,
:meth:`get_y`, :meth:`get_z` and :meth:`get_t` methods
Warning
-------
If None of the coordinates have an ``'axis'`` attribute, we use the
``'coordinate'`` attribute of `var` (if existent).
Since however the CF Conventions do not determine the order on how
the coordinates shall be saved, we try to use a pattern matching
for latitude (``'lat'``) and longitude (``lon'``). If this patterns
do not match, we interpret the coordinates such that x: -1, y: -2,
z: -3. This is all not very safe for awkward dimension names,
but works for most cases. If you want to be a hundred percent sure,
use the :attr:`x`, :attr:`y`, :attr:`z` and :attr:`t` attribute.
See Also
--------
get_x, get_y, get_z, get_t | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.