idx
int64 0
63k
| question
stringlengths 53
5.28k
| target
stringlengths 5
805
|
|---|---|---|
2,500
|
def del_instance ( self , obj ) : to_remove = set ( ) for wrkey , _obj in self . iter_instances ( ) : if obj is _obj : to_remove . add ( wrkey ) for wrkey in to_remove : del self [ wrkey ]
|
Remove any stored instance methods that belong to an object
|
2,501
|
def iter_instances ( self ) : for wrkey in set ( self . keys ( ) ) : obj = self . get ( wrkey ) if obj is None : continue yield wrkey , obj
|
Iterate over the stored objects
|
2,502
|
def iter_methods ( self ) : for wrkey , obj in self . iter_instances ( ) : f , obj_id = wrkey if f == 'function' : yield self [ wrkey ] else : yield getattr ( obj , f . __name__ )
|
Iterate over stored functions and instance methods
|
2,503
|
def load_data_subject_areas ( subject_file ) : lst = [ ] if os . path . exists ( subject_file ) : with open ( subject_file , 'r' ) as f : for line in f : lst . append ( line . strip ( ) ) else : print ( 'MISSING DATA FILE (subject_file) ' , subject_file ) print ( 'update your config.py or config.txt' ) return lst
|
reads the subject file to a list to confirm config is setup
|
2,504
|
def check_ontology ( fname ) : with open ( fname , 'r' ) as stream : y = yaml . safe_load ( stream ) import pprint pprint . pprint ( y )
|
reads the ontology yaml file and does basic verifcation
|
2,505
|
def find_type ( self , txt ) : searchString = txt . upper ( ) match = 'Unknown' for i in self . lst_type : if searchString in i : match = i return match
|
top level function used to simply return the ONE ACTUAL string used for data types
|
2,506
|
def get_full_filename ( self , dataType , subjectArea ) : return dataPath + os . sep + 'core' + os . sep + dataType + '_' + subjectArea + '.CSV'
|
returns the file based on dataType and subjectArea
|
2,507
|
def load_plan ( self , fname ) : with open ( fname , "r" ) as f : for line in f : if line != '' : tpe , txt = self . parse_plan_from_string ( line ) if tpe == 'name' : self . name = txt elif tpe == 'version' : self . plan_version = txt elif tpe == 'belief' : self . beliefs . add ( txt ) elif tpe == 'desire' : self . desires . add ( txt ) elif tpe == 'intention' : self . intentions . add ( txt )
|
read the list of thoughts from a text file
|
2,508
|
def add_constraint ( self , name , tpe , val ) : self . constraint . append ( [ name , tpe , val ] )
|
adds a constraint for the plan
|
2,509
|
def get_maps_stats ( self ) : tpes = { } for m in self . maps : if m . tpe in tpes : tpes [ m . tpe ] += 1 else : tpes [ m . tpe ] = 1 return tpes
|
calculates basic stats on the MapRule elements of the maps to give a quick overview .
|
2,510
|
def save_rules ( self , op_file ) : with open ( op_file , 'w' ) as f : for m in self . maps : f . write ( m . format_for_file_output ( ) )
|
save the rules to file after web updates or program changes
|
2,511
|
def process_rule ( self , m , dct , tpe ) : print ( 'TODO - ' + tpe + ' + applying rule ' + str ( m ) . replace ( '\n' , '' ) )
|
uses the MapRule m to run through the dict and extract data based on the rule
|
2,512
|
def format_raw_data ( self , tpe , raw_data ) : if tpe == 'text' : formatted_raw_data = self . parse_text_to_dict ( raw_data ) elif tpe == 'file' : formatted_raw_data = self . parse_file_to_dict ( raw_data ) else : formatted_raw_data = { 'ERROR' : 'unknown data type' , 'data' : [ raw_data ] } return formatted_raw_data
|
uses type to format the raw information to a dictionary usable by the mapper
|
2,513
|
def parse_text_to_dict ( self , txt ) : op = { } print ( 'TODO - import NLP, split into verbs / nouns' ) op [ 'nouns' ] = txt op [ 'verbs' ] = txt return op
|
takes a string and parses via NLP ready for mapping
|
2,514
|
def parse_file_to_dict ( self , fname ) : print ( 'TODO - parse_file_to_dict' + fname ) for m in self . maps : if m . tpe == 'file' : if m . key [ 0 : 3 ] == 'col' : print ( 'reading column..' )
|
process the file according to the mapping rules . The cols list must match the columns in the filename
|
2,515
|
def create_map_from_file ( self , data_filename ) : op_filename = data_filename + '.rule' dataset = mod_datatable . DataTable ( data_filename , ',' ) dataset . load_to_array ( ) l_map = self . generate_map_from_dataset ( dataset ) with open ( op_filename , 'w' ) as f : f . write ( '# rules file autogenerated by mapper.py v0.1\n' ) f . write ( 'filename:source=' + data_filename + '\n' ) f . write ( 'filename:rule=' + op_filename + '\n\n' ) for row in l_map : if type ( row ) is str : f . write ( row + '\n' ) else : for v in row : f . write ( v )
|
reads the data_filename into a matrix and calls the main function to generate a . rule file based on the data in the map For all datafiles mapped there exists a . rule file to define it
|
2,516
|
def run ( self ) : while self . status != 'EXIT' : print ( self . process_input ( self . get_input ( ) ) ) print ( 'Bye' )
|
loops until exit command given
|
2,517
|
def process_input ( self , question ) : ans = '' if self . status == 'EXIT' : print ( 'bye' ) sys . exit ( ) if '?' in question : ans = self . info . find_answer ( question ) elif question . startswith ( ':LIST' ) : ans = 'List of Raw Input\n' for i in self . info . raw_input : ans += str ( i ) + '\n' else : ans = 'Adding info..' self . info . raw_input . append ( question ) self . lg . record_process ( 'aggie.py' , 'Question > ' + question ) self . lg . record_process ( 'aggie.py' , 'Answer > ' + ans ) return ans
|
takes a question and returns the best answer based on known skills
|
2,518
|
def show_data_file ( fname ) : txt = '<H2>' + fname + '</H2>' print ( fname ) txt += web . read_csv_to_html_table ( fname , 'Y' ) txt += '</div>\n' return txt
|
shows a data file in CSV format - all files live in CORE folder
|
2,519
|
def managed_process ( process ) : try : yield process finally : for stream in [ process . stdout , process . stdin , process . stderr ] : if stream : stream . close ( ) process . wait ( )
|
Wrapper for subprocess . Popen to work across various Python versions when using the with syntax .
|
2,520
|
def get_temporary_scripts_path ( self ) : result = None if len ( self . config . temporary_scripts_path ) > 0 : if os . path . isdir ( self . config . temporary_scripts_path ) : result = self . config . temporary_scripts_path return result
|
Get path for temporary scripts .
|
2,521
|
def create_file_for ( self , script ) : temp = tempfile . NamedTemporaryFile ( prefix = "pipeline-script-" , mode = 'w+t' , suffix = ".sh" , delete = False , dir = self . get_temporary_scripts_path ( ) ) self . update_environment_variables ( temp . name ) rendered_script = render ( script , model = self . config . model , env = self . env , item = self . config . item , variables = self . config . variables ) if rendered_script is None : self . success = False temp . close ( ) os . remove ( temp . name ) return None to_file_map = { 2 : lambda s : s . encode ( 'utf-8' ) , 3 : lambda s : s } if all ( ord ( ch ) < 128 for ch in rendered_script ) and os . path . isfile ( rendered_script ) : with open ( rendered_script ) as handle : content = str ( handle . read ( ) ) temp . writelines ( content ) else : temp . write ( u"#!/bin/bash\n%s" % self . render_bash_options ( ) ) temp . write ( to_file_map [ sys . version_info . major ] ( rendered_script ) ) temp . close ( ) os . chmod ( temp . name , 0o700 ) return temp . name
|
Create a temporary executable bash file .
|
2,522
|
def render_bash_options ( self ) : options = '' if self . config . debug : options += "set -x\n" if self . config . strict : options += "set -euo pipefail\n" return options
|
Rendering Bash options .
|
2,523
|
def process_file ( self , filename ) : if self . config . dry_run : if not self . config . internal : self . logger . info ( "Dry run mode for script %s" , filename ) with open ( filename ) as handle : for line in handle : yield line [ 0 : - 1 ] if line [ - 1 ] == '\n' else line else : if not self . config . internal : self . logger . info ( "Running script %s" , filename ) for line in self . process_script ( filename ) : yield line
|
Processing one file .
|
2,524
|
def unregister ( self , fileobj ) : try : key = self . _fd_to_key . pop ( self . _fileobj_lookup ( fileobj ) ) except KeyError : raise KeyError ( "{0!r} is not registered" . format ( fileobj ) ) except socket . error as err : if err . errno != errno . EBADF : raise else : for key in self . _fd_to_key . values ( ) : if key . fileobj is fileobj : self . _fd_to_key . pop ( key . fd ) break else : raise KeyError ( "{0!r} is not registered" . format ( fileobj ) ) return key
|
Unregister a file object from being monitored .
|
2,525
|
def prepare ( self ) : try : assert ( type ( self . sender ) is Channel ) assert ( type ( self . receiver ) is Channel ) return True except : return False
|
does some basic validation
|
2,526
|
def send ( self ) : if self . prepare ( ) : print ( 'sending message' ) lg . record_process ( 'comms.py' , 'Sending message ' + self . title ) return True else : return False
|
this handles the message transmission
|
2,527
|
def buildIndex ( ipFile , ndxFile , append = 'Y' , silent = 'N' , useShortFileName = 'Y' ) : if silent == 'N' : pass if append == 'N' : try : os . remove ( ndxFile ) except Exception as ex : print ( 'file already deleted - ignore' + str ( ex ) ) delims = [ ',' , chr ( 31 ) , '' , '$' , '&' , '"' , '%' , '/' , '\\' , '.' , ';' , ':' , '!' , '?' , '-' , '_' , ' ' , '\n' , '*' , '\'' , '(' , ')' , '[' , ']' , '{' , '}' ] totWords , totLines , uniqueWords = getWordList ( ipFile , delims ) AppendIndexDictionaryToFile ( uniqueWords , ndxFile , ipFile , useShortFileName ) if silent == 'N' : print ( format_op_row ( ipFile , totLines , totWords , uniqueWords ) ) show ( 'uniqueWords' , uniqueWords , 5 ) DisplayIndexAsDictionary ( uniqueWords )
|
this creates an index of a text file specifically for use in AIKIF separates the ontology descriptions highest followed by values and lastly a final pass to get all delimited word parts .
|
2,528
|
def format_op_row ( ipFile , totLines , totWords , uniqueWords ) : txt = os . path . basename ( ipFile ) . ljust ( 36 ) + ' ' txt += str ( totLines ) . rjust ( 7 ) + ' ' txt += str ( totWords ) . rjust ( 7 ) + ' ' txt += str ( len ( uniqueWords ) ) . rjust ( 7 ) + ' ' return txt
|
Format the output row with stats
|
2,529
|
def format_op_hdr ( ) : txt = 'Base Filename' . ljust ( 36 ) + ' ' txt += 'Lines' . rjust ( 7 ) + ' ' txt += 'Words' . rjust ( 7 ) + ' ' txt += 'Unique' . ljust ( 8 ) + '' return txt
|
Build the header
|
2,530
|
def AppendIndexDictionaryToFile ( uniqueWords , ndxFile , ipFile , useShortFileName = 'Y' ) : if useShortFileName == 'Y' : f = os . path . basename ( ipFile ) else : f = ipFile with open ( ndxFile , "a" , encoding = 'utf-8' , errors = 'replace' ) as ndx : word_keys = uniqueWords . keys ( ) for word in sorted ( word_keys ) : if word != '' : line_nums = uniqueWords [ word ] ndx . write ( f + ', ' + word + ', ' ) for line_num in line_nums : ndx . write ( str ( line_num ) ) ndx . write ( '\n' )
|
Save the list of unique words to the master list
|
2,531
|
def DisplayIndexAsDictionary ( word_occurrences ) : word_keys = word_occurrences . keys ( ) for num , word in enumerate ( word_keys ) : line_nums = word_occurrences [ word ] print ( word + " " ) if num > 3 : break
|
print the index as a dict
|
2,532
|
def show ( title , lst , full = - 1 ) : txt = title + ' (' + str ( len ( lst ) ) + ') items :\n ' num = 0 for i in lst : if full == - 1 or num < full : if type ( i ) is str : txt = txt + i + ',\n ' else : txt = txt + i + ', [' for j in i : txt = txt + j + ', ' txt = txt + ']\n' num = num + 1 try : print ( txt ) except Exception as ex : print ( 'index.show() - cant print line, error ' + str ( ex ) )
|
for testing simply shows a list details
|
2,533
|
def getWordList ( ipFile , delim ) : indexedWords = { } totWords = 0 totLines = 0 with codecs . open ( ipFile , "r" , encoding = 'utf-8' , errors = 'replace' ) as f : for line in f : totLines = totLines + 1 words = multi_split ( line , delim ) totWords = totWords + len ( words ) for word in words : cleanedWord = word . lower ( ) . strip ( ) if cleanedWord not in indexedWords : indexedWords [ cleanedWord ] = str ( totLines ) else : indexedWords [ cleanedWord ] = indexedWords [ cleanedWord ] + ' ' + str ( totLines ) return totWords , totLines , indexedWords
|
extract a unique list of words and have line numbers that word appears
|
2,534
|
def multi_split ( txt , delims ) : res = [ txt ] for delimChar in delims : txt , res = res , [ ] for word in txt : if len ( word ) > 1 : res += word . split ( delimChar ) return res
|
split by multiple delimiters
|
2,535
|
def creator ( entry , config ) : script = render ( config . script , model = config . model , env = config . env , item = config . item ) temp = tempfile . NamedTemporaryFile ( prefix = "script-" , suffix = ".py" , mode = 'w+t' , delete = False ) temp . writelines ( script ) temp . close ( ) language = 'python' if 'type' not in entry else entry [ 'type' ] template_file = os . path . join ( os . path . dirname ( __file__ ) , 'templates/%s-script.sh.j2' % language ) with open ( template_file ) as handle : template = handle . read ( ) config . script = render ( template , script = temp . name ) return Script ( config )
|
Preparing and creating script .
|
2,536
|
def force_to_string ( unknown ) : result = '' if type ( unknown ) is str : result = unknown if type ( unknown ) is int : result = str ( unknown ) if type ( unknown ) is float : result = str ( unknown ) if type ( unknown ) is dict : result = Dict2String ( unknown ) if type ( unknown ) is list : result = List2String ( unknown ) return result
|
converts and unknown type to string for display purposes .
|
2,537
|
def add_watch_point ( self , string , rating , importance = 5 ) : d = { } d [ 'string' ] = string d [ 'rating' ] = rating d [ 'importance' ] = importance self . watch_points . append ( d )
|
For a log session you can add as many watch points which are used in the aggregation and extraction of key things that happen . Each watch point has a rating ( up to you and can range from success to total failure and an importance for finer control of display
|
2,538
|
def estimate_complexity ( self , x , y , z , n ) : num_calculations = x * y * z * n run_time = num_calculations / 100000 return self . show_time_as_short_string ( run_time )
|
calculates a rough guess of runtime based on product of parameters
|
2,539
|
def show_time_as_short_string ( self , seconds ) : if seconds < 60 : return str ( seconds ) + ' seconds' elif seconds < 3600 : return str ( round ( seconds / 60 , 1 ) ) + ' minutes' elif seconds < 3600 * 24 : return str ( round ( seconds / ( 60 * 24 ) , 1 ) ) + ' hours' elif seconds < 3600 * 24 * 365 : return str ( round ( seconds / ( 3600 * 24 ) , 1 ) ) + ' days' else : print ( 'WARNING - this will take ' + str ( seconds / ( 60 * 24 * 365 ) ) + ' YEARS to run' ) return str ( round ( seconds / ( 60 * 24 * 365 ) , 1 ) ) + ' years'
|
converts seconds to a string in terms of seconds - > years to show complexity of algorithm
|
2,540
|
def _log ( self , fname , txt , prg = '' ) : if os . sep not in fname : fname = self . log_folder + os . sep + fname delim = ',' q = '"' dte = TodayAsString ( ) usr = GetUserName ( ) hst = GetHostName ( ) i = self . session_id if prg == '' : prg = 'cls_log.log' logEntry = q + dte + q + delim + q + i + q + delim + q + usr + q + delim + q + hst + q + delim + q + prg + q + delim + q + txt + q + delim + '\n' with open ( fname , "a" , encoding = 'utf-8' , errors = 'replace' ) as myfile : myfile . write ( logEntry )
|
logs an entry to fname along with standard date and user details
|
2,541
|
def record_source ( self , src , prg = '' ) : self . _log ( self . logFileSource , force_to_string ( src ) , prg )
|
function to collect raw data from the web and hard drive Examples - new source file for ontologies email contacts list folder for xmas photos
|
2,542
|
def record_command ( self , cmd , prg = '' ) : self . _log ( self . logFileCommand , force_to_string ( cmd ) , prg )
|
record the command passed - this is usually the name of the program being run or task being run
|
2,543
|
def record_result ( self , res , prg = '' ) : self . _log ( self . logFileResult , force_to_string ( res ) , prg )
|
record the output of the command . Records the result can have multiple results so will need to work out a consistent way to aggregate this
|
2,544
|
def extract_logs ( self , fname , prg ) : op = [ ] with open ( fname , 'r' ) as f : for line in f : if prg in line : op . append ( line ) return op
|
read a logfile and return entries for a program
|
2,545
|
def summarise_events ( self ) : all_dates = [ ] d_command = self . _count_by_date ( self . command_file , all_dates ) d_result = self . _count_by_date ( self . result_file , all_dates ) d_process = self . _count_by_date ( self . process_file , all_dates ) d_source = self . _count_by_date ( self . source_file , all_dates ) with open ( self . log_sum , "w" ) as sum_file : sum_file . write ( 'date,command,result,process,source\n' ) for dte in sorted ( set ( all_dates ) ) : sum_file . write ( dte + ',' ) if dte in d_command : sum_file . write ( str ( d_command [ dte ] ) + ',' ) else : sum_file . write ( '0,' ) if dte in d_result : sum_file . write ( str ( d_result [ dte ] ) + ',' ) else : sum_file . write ( '0,' ) if dte in d_process : sum_file . write ( str ( d_process [ dte ] ) + ',' ) else : sum_file . write ( '0,' ) if dte in d_source : sum_file . write ( str ( d_source [ dte ] ) + '\n' ) else : sum_file . write ( '0\n' )
|
takes the logfiles and produces an event summary matrix date command result process source 20140421 9 40 178 9 20140423 0 0 6 0 20140424 19 1 47 19 20140425 24 0 117 24 20140426 16 0 83 16 20140427 1 0 6 1 20140429 0 0 0 4
|
2,546
|
def _count_by_date ( self , fname , all_dates ) : if not os . path . isfile ( fname ) : return { } d_log_sum = { } with open ( fname , "r" ) as raw_log : for line in raw_log : cols = line . split ( ',' ) dte = cols [ 0 ] . strip ( '"' ) [ 0 : 10 ] . replace ( '-' , '' ) all_dates . append ( dte ) if dte in d_log_sum : d_log_sum [ dte ] += 1 else : d_log_sum [ dte ] = 1 return d_log_sum
|
reads a logfile and returns a dictionary by date showing the count of log entries
|
2,547
|
def map_data ( self ) : with open ( self . src_file , "r" ) as f : for line in f : cols = line . split ( ',' ) print ( cols )
|
provides a mapping from the CSV file to the aikif data structures .
|
2,548
|
def variablename ( var ) : s = [ tpl [ 0 ] for tpl in itertools . ifilter ( lambda x : var is x [ 1 ] , globals ( ) . items ( ) ) ] s = s [ 0 ] . upper ( ) return s
|
Returns the string of a variable name .
|
2,549
|
def BLASTquery ( query , database , program , filter = None , format_type = None , expect = None , nucl_reward = None , nucl_penalty = None , gapcosts = None , matrix = None , hitlist_size = None , descriptions = None , alignments = None , ncbi_gi = None , threshold = None , word_size = None , composition_based_statistics = None , organism = None , others = None , num_threads = None , baseURL = "http://blast.ncbi.nlm.nih.gov" , verbose = False ) : if organism : organism = organism . replace ( " " , "%20" ) . replace ( "(" , "%28" ) . replace ( ")" , "%29" ) . replace ( ":" , "%3A" ) EQ_MENU = organism else : EQ_MENU = None URL = baseURL + "/Blast.cgi?" URL = URL + "QUERY=" + str ( query ) + "&DATABASE=" + str ( database ) + "&PROGRAM=" + str ( program ) for o , varname in zip ( [ filter , format_type , expect , nucl_reward , nucl_penalty , gapcosts , matrix , hitlist_size , descriptions , alignments , ncbi_gi , threshold , word_size , composition_based_statistics , EQ_MENU , num_threads ] , [ 'FILTER' , 'FORMAT_TYPE' , 'EXPECT' , 'NUCL_REWARD' , 'NUCL_PENALTY' , 'GAPCOSTS' , 'MATRIX' , 'HITLIST_SIZE' , 'DESCRIPTIONS' , 'ALIGNMENTS' , 'NCBI_GI' , 'THRESHOLD' , 'WORD_SIZE' , 'COMPOSITION_BASED_STATISTICS' , 'EQ_MENU' , 'NUM_THREADS' ] ) : if o : URL = URL + "&" + varname + "=" + str ( o ) if others : URL = URL + "&" + others URL = URL + "&CMD=Put" if verbose : print ( URL ) sys . stdout . flush ( ) response = requests . get ( url = URL ) r = response . content . split ( "\n" ) RID = [ s for s in r if "RID = " in s ] if len ( RID ) > 0 : RID = RID [ 0 ] . split ( " " ) [ - 1 ] else : print ( "Could not return an RID for this query." ) RID = None return RID
|
Performs a blast query online .
|
2,550
|
def BLASTcheck ( rid , baseURL = "http://blast.ncbi.nlm.nih.gov" ) : URL = baseURL + "/Blast.cgi?" URL = URL + "FORMAT_OBJECT=SearchInfo&RID=" + rid + "&CMD=Get" response = requests . get ( url = URL ) r = response . content . split ( "\n" ) try : status = [ s for s in r if "Status=" in s ] [ 0 ] . split ( "=" ) [ - 1 ] ThereAreHits = [ s for s in r if "ThereAreHits=" in s ] [ 0 ] . split ( "=" ) [ - 1 ] except : status = None ThereAreHits = None print ( rid , status , ThereAreHits ) sys . stdout . flush ( ) return status , ThereAreHits
|
Checks the status of a query .
|
2,551
|
def BLASTresults ( rid , format_type = "Tabular" , hitlist_size = None , alignments = None , ncbi_gi = None , format_object = None , baseURL = "http://blast.ncbi.nlm.nih.gov" ) : URL = baseURL + "/Blast.cgi?" URL = URL + "RID=" + str ( rid ) + "&FORMAT_TYPE=" + str ( format_type ) for o in [ hitlist_size , alignments , ncbi_gi , format_object ] : if o : URL = URL + "&" + variablename ( var ) + "=" + str ( o ) URL = URL + "&CMD=Get" response = requests . get ( url = URL ) response = response . content if format_type == "Tabular" : result = response . split ( "\n" ) result = [ s . split ( "\t" ) for s in result ] [ 6 : ] header = result [ : 7 ] content = result [ 7 : ] fields = header [ 5 ] [ 0 ] . strip ( "# Fields: " ) . split ( ", " ) result = pd . DataFrame ( content , columns = fields ) response = result [ : int ( header [ - 1 ] [ 0 ] . split ( " " ) [ 1 ] ) ] return response
|
Retrieves results for an RID .
|
2,552
|
def generate_html ( store ) : spline = { 'version' : VERSION , 'url' : 'https://github.com/Nachtfeuer/pipeline' , 'generated' : datetime . now ( ) . strftime ( "%A, %d. %B %Y - %I:%M:%S %p" ) } html_template_file = os . path . join ( os . path . dirname ( __file__ ) , 'templates/report.html.j2' ) with open ( html_template_file ) as handle : html_template = handle . read ( ) return render ( html_template , spline = spline , store = store )
|
Generating HTML report .
|
2,553
|
def __begin_of_list ( self , ast_token ) : self . list_level += 1 if self . list_level == 1 : self . final_ast_tokens . append ( ast_token )
|
Handle begin of a list .
|
2,554
|
def __end_of_list ( self , ast_token ) : self . list_level -= 1 if self . list_level == 0 : if self . list_entry is not None : self . final_ast_tokens . append ( self . list_entry ) self . list_entry = None self . final_ast_tokens . append ( ast_token )
|
Handle end of a list .
|
2,555
|
def __default ( self , ast_token ) : if self . list_level == 1 : if self . list_entry is None : self . list_entry = ast_token elif not isinstance ( ast_token , type ( self . list_entry ) ) : self . final_ast_tokens . append ( ast_token ) elif self . list_level == 0 : self . final_ast_tokens . append ( ast_token )
|
Handle tokens inside the list or outside the list .
|
2,556
|
def compress ( self ) : for ast_token in self . ast_tokens : if type ( ast_token ) in self . dispatcher : self . dispatcher [ type ( ast_token ) ] ( ast_token ) else : self . dispatcher [ 'default' ] ( ast_token )
|
Main function of compression .
|
2,557
|
def get_tokens ( condition ) : try : ast_tokens = list ( ast . walk ( ast . parse ( condition . strip ( ) ) ) ) except SyntaxError as exception : Logger . get_logger ( __name__ ) . error ( "Syntax error: %s" , exception ) ast_tokens = [ ] return ast_tokens
|
Get AST tokens for Python condition .
|
2,558
|
def match_tokens ( ast_tokens , ast_types ) : ast_final_types = [ ast . Module , ast . Expr ] + ast_types return all ( isinstance ( ast_token , ast_type ) for ast_token , ast_type in zip ( ast_tokens , ast_final_types ) )
|
Verify that each token in order does match the expected types .
|
2,559
|
def find_rule ( condition ) : final_condition = re . sub ( '{{.*}}' , '42' , condition ) ast_tokens = Condition . get_tokens ( final_condition ) ast_compressed_tokens = Condition . compress_tokens ( ast_tokens ) name = 'undefined' function = lambda tokens : False if len ( ast_compressed_tokens ) > 0 : for rule in Condition . RULES : if Condition . match_tokens ( ast_compressed_tokens , rule [ 'types' ] ) : name = rule [ 'name' ] function = rule [ 'evaluate' ] break return name , ast_tokens , function
|
Find rule for given condition .
|
2,560
|
def evaluate ( condition ) : success = False if len ( condition ) > 0 : try : rule_name , ast_tokens , evaluate_function = Condition . find_rule ( condition ) if not rule_name == 'undefined' : success = evaluate_function ( ast_tokens ) except AttributeError as exception : Logger . get_logger ( __name__ ) . error ( "Attribute error: %s" , exception ) else : success = True return success
|
Evaluate simple condition .
|
2,561
|
def start_aikif ( ) : if sys . platform [ 0 : 3 ] == 'win' : os . system ( "start go_web_aikif.bat" ) else : os . system ( "../aikif/web_app/web_aikif.py" ) import webbrowser import time time . sleep ( 1 ) webbrowser . open ( 'http://127.0.0.1:5000' )
|
starts the web interface and possibly other processes
|
2,562
|
def get_creator_by_name ( name ) : return { 'docker(container)' : Container . creator , 'shell' : Bash . creator , 'docker(image)' : Image . creator , 'python' : Script . creator , 'packer' : Packer . creator , 'ansible(simple)' : Ansible . creator } [ name ]
|
Get creator function by name .
|
2,563
|
def worker ( data ) : creator = get_creator_by_name ( data [ 'creator' ] ) shell = creator ( data [ 'entry' ] , ShellConfig ( script = data [ 'entry' ] [ 'script' ] , title = data [ 'entry' ] [ 'title' ] if 'title' in data [ 'entry' ] else '' , model = data [ 'model' ] , env = data [ 'env' ] , item = data [ 'item' ] , dry_run = data [ 'dry_run' ] , debug = data [ 'debug' ] , strict = data [ 'strict' ] , variables = data [ 'variables' ] , temporary_scripts_path = data [ 'temporary_scripts_path' ] ) ) output = [ ] for line in shell . process ( ) : output . append ( line ) Logger . get_logger ( __name__ + '.worker' ) . info ( " | %s" , line ) return { 'id' : data [ 'id' ] , 'success' : shell . success , 'output' : output }
|
Running on shell via multiprocessing .
|
2,564
|
def get_merged_env ( self , include_os = False ) : env = { } if include_os : env . update ( os . environ . copy ( ) ) for level in range ( 3 ) : env . update ( self . pipeline . data . env_list [ level ] . copy ( ) ) return env
|
Copying and merging environment variables .
|
2,565
|
def prepare_shell_data ( self , shells , key , entry ) : if self . can_process_shell ( entry ) : if key in [ 'python' ] : entry [ 'type' ] = key if 'with' in entry and isinstance ( entry [ 'with' ] , str ) : rendered_with = ast . literal_eval ( render ( entry [ 'with' ] , variables = self . pipeline . variables , model = self . pipeline . model , env = self . get_merged_env ( include_os = True ) ) ) elif 'with' in entry : rendered_with = entry [ 'with' ] else : rendered_with = [ '' ] for item in rendered_with : shells . append ( { 'id' : self . next_task_id , 'creator' : key , 'entry' : entry , 'model' : self . pipeline . model , 'env' : self . get_merged_env ( ) , 'item' : item , 'dry_run' : self . pipeline . options . dry_run , 'debug' : self . pipeline . options . debug , 'strict' : self . pipeline . options . strict , 'variables' : self . pipeline . variables , 'temporary_scripts_path' : self . pipeline . options . temporary_scripts_path } ) self . next_task_id += 1
|
Prepare one shell or docker task .
|
2,566
|
def process ( self , document ) : self . logger . info ( "Processing group of tasks (parallel=%s)" , self . get_parallel_mode ( ) ) self . pipeline . data . env_list [ 2 ] = { } output , shells = [ ] , [ ] result = Adapter ( { 'success' : True , 'output' : [ ] } ) for task_entry in document : key , entry = list ( task_entry . items ( ) ) [ 0 ] if ( not self . parallel or key == 'env' ) and len ( shells ) > 0 : result = Adapter ( self . process_shells ( shells ) ) output += result . output shells = [ ] if not result . success : break if key == 'env' : self . pipeline . data . env_list [ 2 ] . update ( entry ) elif key in [ 'shell' , 'docker(container)' , 'docker(image)' , 'python' , 'packer' , 'ansible(simple)' ] : self . prepare_shell_data ( shells , key , entry ) if result . success : result = Adapter ( self . process_shells ( shells ) ) output += result . output self . event . delegate ( result . success ) return { 'success' : result . success , 'output' : output }
|
Processing a group of tasks .
|
2,567
|
def process_shells_parallel ( self , shells ) : output = [ ] success = True with closing ( multiprocessing . Pool ( multiprocessing . cpu_count ( ) ) ) as pool : for result in [ Adapter ( entry ) for entry in pool . map ( worker , [ shell for shell in shells ] ) ] : output += result . output the_shell = [ shell for shell in shells if shell [ 'id' ] == result . id ] [ 0 ] self . __handle_variable ( the_shell [ 'entry' ] , result . output ) if not result . success : success = False if success : self . logger . info ( "Parallel Processing Bash code: finished" ) return { 'success' : True , 'output' : output } for line in self . run_cleanup ( shells [ 0 ] [ 'env' ] , 99 ) : output . append ( line ) self . logger . error ( "Pipeline has failed: immediately leaving!" ) self . event . failed ( ) return { 'success' : False , 'output' : output }
|
Processing a list of shells parallel .
|
2,568
|
def process_shells_ordered ( self , shells ) : output = [ ] for shell in shells : entry = shell [ 'entry' ] config = ShellConfig ( script = entry [ 'script' ] , title = entry [ 'title' ] if 'title' in entry else '' , model = shell [ 'model' ] , env = shell [ 'env' ] , item = shell [ 'item' ] , dry_run = shell [ 'dry_run' ] , debug = shell [ 'debug' ] , strict = shell [ 'strict' ] , variables = shell [ 'variables' ] , temporary_scripts_path = shell [ 'temporary_scripts_path' ] ) result = Adapter ( self . process_shell ( get_creator_by_name ( shell [ 'creator' ] ) , entry , config ) ) output += result . output self . __handle_variable ( entry , result . output ) if not result . success : return { 'success' : False , 'output' : output } return { 'success' : True , 'output' : output }
|
Processing a list of shells one after the other .
|
2,569
|
def process_shells ( self , shells ) : result = { 'success' : True , 'output' : [ ] } if self . parallel and len ( shells ) > 1 : result = self . process_shells_parallel ( shells ) elif len ( shells ) > 0 : result = self . process_shells_ordered ( shells ) return result
|
Processing a list of shells .
|
2,570
|
def process_shell ( self , creator , entry , config ) : self . logger . info ( "Processing Bash code: start" ) output = [ ] shell = creator ( entry , config ) for line in shell . process ( ) : output . append ( line ) self . logger . info ( " | %s" , line ) if shell . success : self . logger . info ( "Processing Bash code: finished" ) return { 'success' : True , 'output' : output } for line in self . run_cleanup ( config . env , shell . exit_code ) : output . append ( line ) self . logger . error ( "Pipeline has failed: leaving as soon as possible!" ) self . event . failed ( ) return { 'success' : False , 'output' : output }
|
Processing a shell entry .
|
2,571
|
def run_cleanup ( self , env , exit_code ) : output = [ ] if self . pipeline . data . hooks and len ( self . pipeline . data . hooks . cleanup ) > 0 : env . update ( { 'PIPELINE_RESULT' : 'FAILURE' } ) env . update ( { 'PIPELINE_SHELL_EXIT_CODE' : str ( exit_code ) } ) config = ShellConfig ( script = self . pipeline . data . hooks . cleanup , model = self . pipeline . model , env = env , dry_run = self . pipeline . options . dry_run , debug = self . pipeline . options . debug , strict = self . pipeline . options . strict , temporary_scripts_path = self . pipeline . options . temporary_scripts_path ) cleanup_shell = Bash ( config ) for line in cleanup_shell . process ( ) : output . append ( line ) self . logger . info ( " | %s" , line ) return output
|
Run cleanup hook when configured .
|
2,572
|
def __handle_variable ( self , shell_entry , output ) : if 'variable' in shell_entry : variable_name = shell_entry [ 'variable' ] self . pipeline . variables [ variable_name ] = "\n" . join ( output )
|
Saving output for configured variable name .
|
2,573
|
def main ( ) : print ( 'AIKIF example: Processing Finance data\n' ) data = read_bank_statements ( 'your_statement.csv' ) print ( data ) maps = load_column_maps ( ) rules = load_rules ( ) for m in maps : print ( 'AIKIF mapping : ' + m [ 0 ] + ' => ' + m [ 1 ] ) for rule in rules : if rule [ 0 ] == 'agg' : print ( 'summing : ' + rule [ 1 ] + ' into ' + rule [ 2 ] ) elif rule [ 0 ] == 'derive' : print ( 'New column : ' + rule [ 1 ] + ' = ' + rule [ 2 ] + ' WHERE ' + rule [ 1 ] + ' ELSE ' + rule [ 3 ] ) print ( 'Done\n' )
|
This is the main body of the process that does the work .
|
2,574
|
def _clean_params ( self , params ) : clean_params = { } for key , value in params . iteritems ( ) : if value is not None : clean_params [ key ] = value return clean_params
|
Removes parameters whose values are set to None .
|
2,575
|
def distance_to ( self , other_catchment ) : try : if self . country == other_catchment . country : try : return 0.001 * hypot ( self . descriptors . centroid_ngr . x - other_catchment . descriptors . centroid_ngr . x , self . descriptors . centroid_ngr . y - other_catchment . descriptors . centroid_ngr . y ) except TypeError : return float ( '+inf' ) else : return float ( '+inf' ) except ( TypeError , KeyError ) : raise InsufficientDataError ( "Catchment `descriptors` attribute must be set first." )
|
Returns the distance between the centroids of two catchments in kilometers .
|
2,576
|
def urbext ( self , year ) : urban_expansion = 0.7851 + 0.2124 * atan ( ( year - 1967.5 ) / 20.331792998 ) try : return self . catchment . descriptors . urbext2000 * urban_expansion except TypeError : return 0
|
Estimate the urbext2000 parameter for a given year assuming a nation - wide urbanisation curve .
|
2,577
|
def continuous_periods ( self ) : result = [ ] start_date = self . start_date for gap in self . pot_data_gaps : end_date = gap . start_date - timedelta ( days = 1 ) result . append ( PotPeriod ( start_date , end_date ) ) start_date = gap . end_date + timedelta ( days = 1 ) end_date = self . end_date result . append ( PotPeriod ( start_date , end_date ) ) return result
|
Return a list of continuous data periods by removing the data gaps from the overall record .
|
2,578
|
def add_path ( self , path , path_filter = None ) : for root , _ , files in os . walk ( path ) : for filename in files : full_path_and_filename = os . path . join ( root , filename ) if path_filter is None or path_filter ( full_path_and_filename ) : relative_path_and_filename = full_path_and_filename . replace ( path + '/' , '' ) with open ( full_path_and_filename , 'rb' ) as handle : self . files [ relative_path_and_filename ] = b64encode ( handle . read ( ) ) . decode ( 'utf-8' )
|
Adding all files from given path to the object .
|
2,579
|
def from_json ( data ) : memfiles = InMemoryFiles ( ) memfiles . files = json . loads ( data ) return memfiles
|
Convert JSON into a in memory file storage .
|
2,580
|
def delete_file ( f , ignore_errors = False ) : try : os . remove ( f ) except Exception as ex : if ignore_errors : return print ( 'ERROR deleting file ' + str ( ex ) )
|
delete a single file
|
2,581
|
def delete_files_in_folder ( fldr ) : fl = glob . glob ( fldr + os . sep + '*.*' ) for f in fl : delete_file ( f , True )
|
delete all files in folder fldr
|
2,582
|
def copy_file ( src , dest ) : try : shutil . copy2 ( src , dest ) except Exception as ex : print ( 'ERROR copying file' + str ( ex ) )
|
copy single file
|
2,583
|
def copy_files_to_folder ( src , dest , xtn = '*.txt' ) : try : all_files = glob . glob ( os . path . join ( src , xtn ) ) for f in all_files : copy_file ( f , dest ) except Exception as ex : print ( 'ERROR copy_files_to_folder - ' + str ( ex ) )
|
copies all the files from src to dest folder
|
2,584
|
def main ( ) : print ( '\n\n /------- AIKIF Installation --------\\' ) print ( ' | s. show current setup |' ) print ( ' | f. setup folder structures |' ) print ( ' | c. create sample data |' ) print ( ' | q. quit |' ) print ( ' \\-----------------------------------/' ) cmd = input ( '?' ) if cmd == 's' : show_setup ( ) elif cmd == 'f' : setup_folders ( ) elif cmd == 'c' : create_sample_data ( ) elif cmd == 'q' : exit ( 0 ) main ( )
|
script to setup folder structures for AIKIF and prepare data tables .
|
2,585
|
def load_graph_from_rdf ( fname ) : print ( "reading RDF from " + fname + "...." ) store = Graph ( ) store . parse ( fname , format = "n3" ) print ( "Loaded " + str ( len ( store ) ) + " tuples" ) return store
|
reads an RDF file into a graph
|
2,586
|
def show_graph_summary ( g ) : sample_data = [ ] print ( "list(g[RDFS.Class]) = " + str ( len ( list ( g [ RDFS . Class ] ) ) ) ) num_subj = 0 for subj in g . subjects ( RDF . type ) : num_subj += 1 if num_subj < 5 : sample_data . append ( "subjects.subject: " + get_string_from_rdf ( subj ) ) print ( "g.subjects(RDF.type) = " + str ( num_subj ) ) num_subj = 0 for subj , pred , obj in g : num_subj += 1 if num_subj < 5 : sample_data . append ( "g.subject : " + get_string_from_rdf ( pred ) ) sample_data . append ( "g.predicate : " + get_string_from_rdf ( subj ) ) sample_data . append ( "g.object : " + get_string_from_rdf ( obj ) ) print ( "g.obj(RDF.type) = " + str ( num_subj ) ) print ( "------ Sample Data ------" ) for line in sample_data : print ( line )
|
display sample data from a graph
|
2,587
|
def export ( g , csv_fname ) : with open ( csv_fname , "w" ) as f : num_tuples = 0 f . write ( '"num","subject","predicate","object"\n' ) for subj , pred , obj in g : num_tuples += 1 f . write ( '"' + str ( num_tuples ) + '",' ) f . write ( '"' + get_string_from_rdf ( subj ) + '",' ) f . write ( '"' + get_string_from_rdf ( pred ) + '",' ) f . write ( '"' + get_string_from_rdf ( obj ) + '"\n' ) print ( "Finished exporting " , num_tuples , " tuples" )
|
export a graph to CSV for simpler viewing
|
2,588
|
def get_string_from_rdf ( src ) : res = src . split ( "/" ) return "" . join ( [ l . replace ( '"' , '""' ) for l in res [ len ( res ) - 1 ] ] )
|
extracts the real content from an RDF info object
|
2,589
|
def create_sample_file ( ip , op , num_lines ) : with open ( ip , "rb" ) as f : with open ( op , "wb" ) as fout : for _ in range ( num_lines ) : fout . write ( f . readline ( ) )
|
make a short version of an RDF file
|
2,590
|
def flatten ( * sequence ) : result = [ ] for entry in sequence : if isinstance ( entry , list ) : result += Select . flatten ( * entry ) elif isinstance ( entry , tuple ) : result += Select . flatten ( * entry ) else : result . append ( entry ) return result
|
Flatten nested sequences into one .
|
2,591
|
def build ( self ) : result = [ ] for entry in self . sequence : ignore = False for filter_function in self . filter_functions : if not filter_function ( entry ) : ignore = True break if not ignore : value = entry for transform_function in self . transform_functions : value = transform_function ( value ) result . append ( value ) return result
|
Do the query .
|
2,592
|
def extract_all ( zipfile , dest_folder ) : z = ZipFile ( zipfile ) print ( z ) z . extract ( dest_folder )
|
reads the zip file determines compression and unzips recursively until source files are extracted
|
2,593
|
def create_zip_from_file ( zip_file , fname ) : with zipfile . ZipFile ( zip_file , 'w' ) as myzip : myzip . write ( fname )
|
add a file to the archive
|
2,594
|
def create_zip_from_folder ( zip_file , fldr , mode = "r" ) : zipf = zipfile . ZipFile ( zip_file , 'w' ) for root , dirs , files in os . walk ( fldr ) : for file in files : fullname = os . path . join ( root , file ) zipf . write ( fullname ) zipf . close ( )
|
add all the files from the folder fldr to the archive
|
2,595
|
def add_method ( self , loop , callback ) : f , obj = get_method_vars ( callback ) wrkey = ( f , id ( obj ) ) self [ wrkey ] = obj self . event_loop_map [ wrkey ] = loop
|
Add a coroutine function
|
2,596
|
def iter_methods ( self ) : for wrkey , obj in self . iter_instances ( ) : f , obj_id = wrkey loop = self . event_loop_map [ wrkey ] m = getattr ( obj , f . __name__ ) yield loop , m
|
Iterate over stored coroutine functions
|
2,597
|
def submit_coroutine ( self , coro , loop ) : async def _do_call ( _coro ) : with _IterationGuard ( self ) : await _coro asyncio . run_coroutine_threadsafe ( _do_call ( coro ) , loop = loop )
|
Schedule and await a coroutine on the specified loop
|
2,598
|
def launch ( self ) : import subprocess try : retcode = subprocess . call ( self . fullname , shell = True ) if retcode < 0 : print ( "Child was terminated by signal" , - retcode , file = sys . stderr ) return False else : print ( "Child returned" , retcode , file = sys . stderr ) return True except OSError as e : print ( "Execution failed:" , e , file = sys . stderr ) return False
|
launch a file - used for starting html pages
|
2,599
|
def delete ( self ) : if self . fullname != "" : try : os . remove ( self . fullname ) except IOError : print ( "Cant delete " , self . fullname )
|
delete a file don t really care if it doesn t exist
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.