idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
2,200
def search_aikif ( txt , formatHTML = True ) : results = [ ] num_found = 0 import aikif . lib . cls_filelist as mod_fl my_files = mod_fl . FileList ( [ aikif_folder ] , [ '*.*' ] , [ '*.pyc' ] ) files = my_files . get_list ( ) for f in files : try : num_found = 0 with open ( f , 'r' ) as cur : line_num = 0 for line in cur : line_num += 1 if txt in line : num_found += 1 if formatHTML is True : results . append ( format_result ( line , line_num , txt ) ) else : results . append ( [ f , line , line_num , txt ] ) if num_found > 0 : if formatHTML is True : results . append ( '<h3>' + f + ' = ' + str ( num_found ) + ' results</h3>' ) else : print ( f + ' = ' + str ( num_found ) + '' ) except Exception : results . append ( 'problem with file ' + f ) if len ( results ) == 0 : results . append ( "No results" ) return results
search for text - currently this looks in all folders in the root of AIKIF but that also contains binaries so will need to use the agent_filelist . py to specify the list of folders . NOTE - this needs to use indexes rather than full search each time
2,201
def format_result ( line , line_num , txt ) : return '&nbsp;&nbsp;' + str ( line_num ) + ': ' + line . replace ( txt , '<span style="background-color: #FFFF00">' + txt + '</span>' )
highlight the search result
2,202
def TEST ( ) : w = World ( 'Mars' , [ 0 , 0.0 , 0.9 , 0.0 ] ) print ( w ) p = Person ( 'Rover' , { 'tax_min' : 0.0 , 'tax_max' : 0.9 , 'tradition' : 0.9 , 'equity' : 0.0 } ) print ( p ) h = Happiness ( p , w ) h . add_factor ( HappinessFactors ( 'tax' , 'Economic' , 0.1 , 0.3 ) ) h . add_factor ( HappinessFactors ( 'tradition' , 'Personal' , 0.3 , 0.9 ) ) h . add_factor ( HappinessFactors ( 'equity' , 'Personal' , 0.1 , 0.9 ) ) h . add_factor ( HappinessFactors ( 'growth' , 'Economic' , 0.01 , 0.09 ) ) print ( h . show_details ( ) )
Modules for testing happiness of persons in worlds based on simplistic preferences . Just a toy - dont take seriously
2,203
def solve ( self , max_worlds = 10000 , silent = False ) : self . num_worlds = 0 num_unhappy = 0 for tax_rate in range ( self . tax_range [ 0 ] , self . tax_range [ 1 ] ) : for equity in range ( self . equity_range [ 0 ] , self . equity_range [ 1 ] ) : for tradition in range ( self . tradition_range [ 0 ] , self . tradition_range [ 1 ] ) : self . num_worlds += 1 if self . num_worlds > max_worlds : break w = World ( str ( self . num_worlds ) . zfill ( 6 ) , [ 5000 , tax_rate / 10 , tradition / 10 , equity / 10 ] ) world_happiness = 0 num_unhappy = 0 for person in self . all_people : wh = Happiness ( person , w ) world_happiness += wh . rating if wh . rating < 0 : num_unhappy += 1 if world_happiness > self . net_happiness : self . net_happiness = world_happiness self . unhappy_people = num_unhappy if not silent : print ( 'found better world - ' + w . nme + ' = ' + str ( world_happiness ) + ' - total unhappy_people = ' + str ( self . unhappy_people ) )
find the best world to make people happy
2,204
def show_details ( self ) : res = str ( self ) res += '\nDETAILS\n' for f in self . factors : res += str ( f ) return res
extended print details of happiness parameters
2,205
def match_value_to_text ( self , text ) : if self . nme in text : res = 0.8 else : res = 0.2 return self . nme + ' = ' + str ( res ) + ' match against ' + text
this is going to be the tricky bit - probably not possible to get the exact rating for a value . Will need to do sentiment analysis of the text to see how it matches the rating . Even that sounds like it wont work - maybe a ML algorithm would do it but that requires a large body of text already matched to values - and values aren t even defined as far as I have found .
2,206
def list2html ( lst ) : txt = '<TABLE width=100% border=0>' for l in lst : txt += '<TR>\n' if type ( l ) is str : txt += '<TD>' + l + '</TD>\n' elif type ( l ) is list : txt += '<TD>' for i in l : txt += i + ', ' txt += '</TD>' else : txt += '<TD>' + str ( l ) + '</TD>\n' txt += '</TR>\n' txt += '</TABLE><BR>\n' return txt
convert a list to html using table formatting
2,207
def build_edit_form ( title , id , cols , return_page ) : txt = '<H3>' + title + '<H3>' txt += '<form action="' + return_page + '" method="POST">\n' txt += ' updating id:' + str ( id ) + '\n<BR>' txt += ' <input type="hidden" name="rec_id" readonly value="' + str ( id ) + '"> ' txt += ' <TABLE width=80% valign=top border=1>' for col_num , col in enumerate ( cols ) : txt += ' <TR>\n' txt += ' <TD><div id="form_label">' + col + '</div></TD>\n' txt += ' <TD><div id="form_input"><input type="text" name="col_' + str ( col_num ) + '"></div></TD>\n' txt += ' </TR>\n' txt += ' <TR><TD></TD>\n' txt += ' <TD>\n' txt += ' <input type="submit" name="update-form" value="Save Changes">\n' txt += ' <input type="submit" name="delete-form" value="Delete">\n' txt += ' <input type="submit" name="add-form" value="Add">\n' txt += ' </TD></TR></TABLE>' txt += '</form>\n' return txt
returns the html for a simple edit form
2,208
def build_html_listbox ( lst , nme ) : res = '<select name="' + nme + '" multiple="multiple">\n' for l in lst : res += ' <option>' + str ( l ) + '</option>\n' res += '</select>\n' return res
returns the html to display a listbox
2,209
def build_data_list ( lst ) : txt = '<H3>' + List + '<H3><UL>' for i in lst : txt += '<LI>' + i + '</LI>' txt += '<UL>' return txt
returns the html with supplied list as a HTML listbox
2,210
def filelist2html ( lst , fldr , hasHeader = 'N' ) : txt = '<TABLE width=100% border=0>' numRows = 1 if lst : for l in lst : if hasHeader == 'Y' : if numRows == 1 : td_begin = '<TH>' td_end = '</TH>' else : td_begin = '<TD>' td_end = '</TD>' else : td_begin = '<TD>' td_end = '</TD>' numRows += 1 txt += '<TR>' if type ( l ) is str : txt += td_begin + link_file ( l , fldr ) + td_end elif type ( l ) is list : txt += td_begin for i in l : txt += link_file ( i , fldr ) + '; ' txt += td_end else : txt += td_begin + str ( l ) + td_end txt += '</TR>\n' txt += '</TABLE><BR>\n' return txt
formats a standard filelist to htmk using table formats
2,211
def link_file ( f , fldr ) : fname = os . path . join ( fldr , f ) if os . path . isfile ( fname ) : return '<a href="/aikif/data/core/' + f + '">' + f + '</a>' else : return f
creates a html link for a file using folder fldr
2,212
def dict_to_htmlrow ( d ) : res = "<TR>\n" for k , v in d . items ( ) : if type ( v ) == str : res = res + '<TD><p>' + k + ':</p></TD><TD><p>' + v + '</p></TD>' else : res = res + '<TD><p>' + k + ':</p></TD><TD><p>' + str ( v ) + '</p></TD>' res += '</TR>\n' return res
converts a dictionary to a HTML table row
2,213
def read_csv_to_html_table ( csvFile , hasHeader = 'N' ) : txt = '<table class="as-table as-table-zebra as-table-horizontal">' with open ( csvFile , "r" ) as f : numRows = 1 for row in f : if hasHeader == 'Y' : if numRows == 1 : td_begin = '<TH>' td_end = '</TH>' else : td_begin = '<TD>' td_end = '</TD>' else : td_begin = '<TD>' td_end = '</TD>' cols = row . split ( ',' ) numRows += 1 txt += "<TR>" for col in cols : txt += td_begin try : colString = col except Exception : colString = '<font color=red>Error decoding column data</font>' txt += colString . strip ( '"' ) txt += td_end txt += "</TR>\n" txt += "</TABLE>\n\n" return txt
reads a CSV file and converts it to HTML
2,214
def read_csv_to_html_list ( csvFile ) : txt = '' with open ( csvFile ) as csv_file : for row in csv . reader ( csv_file , delimiter = ',' ) : txt += '<div id="table_row">' for col in row : txt += " " try : txt += col except Exception : txt += 'Error' txt += " " txt += "</div>\n" return txt
reads a CSV file and converts it to a HTML List
2,215
def do_your_job ( self ) : y , x = self . get_intended_direction ( ) if self . target_x == self . current_x and self . target_y == self . current_y : if len ( self . results ) == 0 : self . results . append ( "TARGET ACQUIRED" ) self . lg_mv ( 2 , self . name + ": TARGET ACQUIRED" ) return self . num_steps += 1 accessible = [ '\\' , '-' , '|' , '/' , '.' ] if y != 0 and x != 0 and self . backtrack == [ 0 , 0 ] : if random . randint ( 1 , 10 ) > 6 : if self . grd . get_tile ( self . current_y + y , self . current_x ) in accessible : self . current_y += y self . lg_mv ( 3 , self . name + ": randomly moving Y axis " + str ( self . num_steps ) ) return if x == 1 : if self . grd . get_tile ( self . current_y , self . current_x + 1 ) in accessible : self . current_x += 1 self . lg_mv ( 3 , self . name + ": move# " + str ( self . num_steps ) + " - moving West" ) return elif x == - 1 : if self . grd . get_tile ( self . current_y , self . current_x - 1 ) in accessible : self . current_x -= 1 self . lg_mv ( 3 , self . name + ": move# " + str ( self . num_steps ) + " - moving East" ) return elif y == 1 : if self . grd . get_tile ( self . current_y + 1 , self . current_x ) in accessible : self . current_y += 1 self . lg_mv ( 3 , self . name + ": move# " + str ( self . num_steps ) + " - moving South" ) return elif y == - 1 : if self . grd . get_tile ( self . current_y - 1 , self . current_x ) in accessible : self . current_y -= 1 self . lg_mv ( 3 , self . name + ": move# " + str ( self . num_steps ) + " - moving North" ) return self . grd . set_tile ( self . start_y , self . start_x , 'A' ) self . grd . save ( os . path . join ( os . getcwd ( ) , 'agent.txt' ) )
the goal of the explore agent is to move to the target while avoiding blockages on the grid . This function is messy and needs to be looked at . It currently has a bug in that the backtrack oscillates so need a new method of doing this - probably checking if previously backtracked in that direction for those coords ie keep track of cells visited and number of times visited?
2,216
def lg_mv ( self , log_lvl , txt ) : if log_lvl <= self . LOG_LEVEL : print ( txt + str ( self . current_y ) + "," + str ( self . current_x ) )
wrapper for debugging print and log methods
2,217
def get_intended_direction ( self ) : x = 0 y = 0 if self . target_x == self . current_x and self . target_y == self . current_y : return y , x if self . target_y > self . current_y : y = 1 elif self . target_y < self . current_y : y = - 1 if self . target_x > self . current_x : x = 1 elif self . target_x < self . current_x : x = - 1 return y , x
returns a Y X value showing which direction the agent should move in order to get to the target
2,218
def show_status ( self ) : txt = 'Agent Status:\n' print ( txt ) txt += "start_x = " + str ( self . start_x ) + "\n" txt += "start_y = " + str ( self . start_y ) + "\n" txt += "target_x = " + str ( self . target_x ) + "\n" txt += "target_y = " + str ( self . target_y ) + "\n" txt += "current_x = " + str ( self . current_x ) + "\n" txt += "current_y = " + str ( self . current_y ) + "\n" print ( self . grd ) return txt
dumps the status of the agent
2,219
def get_audio_metadata_old ( fname ) : audio_dict = { } print ( "IDv2 tag info for %s:" % fname ) try : audio = mutagenx . id3 . ID3 ( fname , translate = False ) except StandardError as err : print ( "ERROR = " + str ( err ) ) try : audio_dict [ "title" ] = audio [ "title" ] except KeyError : print ( "No title" ) try : audio_dict [ "artist" ] = audio [ "artist" ] except KeyError : print ( "No artist" ) try : audio_dict [ "album" ] = audio [ "album" ] except KeyError : print ( "No album" ) try : audio_dict [ "length" ] = audio [ "length" ] except KeyError : print ( "No length" ) return audio_dict
retrieve the metadata from an MP3 file
2,220
def calculate_columns ( sequence ) : columns = { } for row in sequence : for key in row . keys ( ) : if key not in columns : columns [ key ] = len ( key ) value_length = len ( str ( row [ key ] ) ) if value_length > columns [ key ] : columns [ key ] = value_length return columns
Find all row names and the maximum column widths .
2,221
def calculate_row_format ( columns , keys = None ) : row_format = '' if keys is None : keys = columns . keys ( ) else : keys = [ key for key in keys if key in columns ] for key in keys : if len ( row_format ) > 0 : row_format += "|" row_format += "%%(%s)-%ds" % ( key , columns [ key ] ) return '|' + row_format + '|'
Calculate row format .
2,222
def pprint ( sequence , keys = None ) : if len ( sequence ) > 0 : columns = calculate_columns ( sequence ) row_format = calculate_row_format ( columns , keys ) header = row_format % dict ( [ ( key , key . title ( ) ) for key in columns ] ) separator = row_format % dict ( [ ( key , '-' * columns [ key ] ) for key in columns ] ) print ( separator ) print ( header ) print ( separator ) for row in sequence : print ( row_format % row ) print ( separator )
Print sequence as ascii table to stdout .
2,223
def matrix_worker ( data ) : matrix = data [ 'matrix' ] Logger . get_logger ( __name__ + '.worker' ) . info ( "Processing pipeline for matrix entry '%s'" , matrix [ 'name' ] ) env = matrix [ 'env' ] . copy ( ) env . update ( { 'PIPELINE_MATRIX' : matrix [ 'name' ] } ) pipeline = Pipeline ( model = data [ 'model' ] , env = env , options = data [ 'options' ] ) pipeline . hooks = data [ 'hooks' ] return pipeline . process ( data [ 'pipeline' ] )
Run pipelines in parallel .
2,224
def can_process_matrix ( entry , matrix_tags ) : if len ( matrix_tags ) == 0 : return True count = 0 if 'tags' in entry : for tag in matrix_tags : if tag in entry [ 'tags' ] : count += 1 return count > 0
Check given matrix tags to be in the given list of matric tags .
2,225
def run_matrix_ordered ( self , process_data ) : output = [ ] for entry in self . matrix : env = entry [ 'env' ] . copy ( ) env . update ( { 'PIPELINE_MATRIX' : entry [ 'name' ] } ) if Matrix . can_process_matrix ( entry , process_data . options . matrix_tags ) : self . logger . info ( "Processing pipeline for matrix entry '%s'" , entry [ 'name' ] ) pipeline = Pipeline ( model = process_data . model , env = env , options = process_data . options ) pipeline . hooks = process_data . hooks result = pipeline . process ( process_data . pipeline ) output += result [ 'output' ] if not result [ 'success' ] : return { 'success' : False , 'output' : output } return { 'success' : True , 'output' : output }
Running pipelines one after the other .
2,226
def run_matrix_in_parallel ( self , process_data ) : worker_data = [ { 'matrix' : entry , 'pipeline' : process_data . pipeline , 'model' : process_data . model , 'options' : process_data . options , 'hooks' : process_data . hooks } for entry in self . matrix if Matrix . can_process_matrix ( entry , process_data . options . matrix_tags ) ] output = [ ] success = True with closing ( multiprocessing . Pool ( multiprocessing . cpu_count ( ) ) ) as pool : for result in pool . map ( matrix_worker , worker_data ) : output += result [ 'output' ] if not result [ 'success' ] : success = False return { 'success' : success , 'output' : output }
Running pipelines in parallel .
2,227
def process ( self , process_data ) : if self . parallel and not process_data . options . dry_run : return self . run_matrix_in_parallel ( process_data ) return self . run_matrix_ordered ( process_data )
Process the pipeline per matrix item .
2,228
def _sqlfile_to_statements ( sql ) : statements = ( sqlparse . format ( stmt , strip_comments = True ) . strip ( ) for stmt in sqlparse . split ( sql ) ) return [ stmt for stmt in statements if stmt ]
Takes a SQL string containing 0 or more statements and returns a list of individual statements as strings . Comments and empty statements are ignored .
2,229
def generate_migration_name ( self , name , suffix ) : return os . path . join ( self . dir , 'm{datestr}_{name}.{suffix}' . format ( datestr = datetime . datetime . utcnow ( ) . strftime ( '%Y%m%d%H%M%S' ) , name = name . replace ( ' ' , '_' ) , suffix = suffix ) )
Returns a name of a new migration . It will usually be a filename with a valid and unique name .
2,230
def _call_migrate ( self , module , connection_param ) : args = [ connection_param ] spec = inspect . getargspec ( module . migrate ) if len ( spec . args ) == 2 : args . append ( self . db_config ) return module . migrate ( * args )
Subclasses should call this method instead of module . migrate directly to support db_config optional argument .
2,231
def _identify_datatype ( self , input_data ) : if isinstance ( input_data , ( int , float ) ) : self . data_type = 'number' elif isinstance ( input_data , ( list ) ) : self . data_type = 'list' elif isinstance ( input_data , dict ) : self . data_type = 'dict' elif type ( input_data ) is str : if self . input_data [ 0 : 4 ] == 'http' : self . data_type = 'url' elif os . path . exists ( input_data ) : self . data_type = 'file' else : self . data_type = 'str' lg . record_result ( '_identify_datatype' , self . name + ' is ' + self . data_type )
uses the input data which may be a string list number or file to work out how to load the data ( this can be overridden by passing the data_type on the command line
2,232
def _calc_size_stats ( self ) : self . total_records = 0 self . total_length = 0 self . total_nodes = 0 if type ( self . content [ 'data' ] ) is dict : self . total_length += len ( str ( self . content [ 'data' ] ) ) self . total_records += 1 self . total_nodes = sum ( len ( x ) for x in self . content [ 'data' ] . values ( ) ) elif hasattr ( self . content [ 'data' ] , '__iter__' ) and type ( self . content [ 'data' ] ) is not str : self . _get_size_recursive ( self . content [ 'data' ] ) else : self . total_records += 1 self . total_length += len ( str ( self . content [ 'data' ] ) ) return str ( self . total_records ) + ' records [or ' + str ( self . total_nodes ) + ' nodes], taking ' + str ( self . total_length ) + ' bytes'
get the size in bytes and num records of the content
2,233
def _get_size_recursive ( self , dat ) : self . total_records += 1 for rec in dat : if hasattr ( rec , '__iter__' ) and type ( rec ) is not str : self . _get_size_recursive ( rec ) else : self . total_nodes += 1 self . total_length += len ( str ( rec ) )
recursively walk through a data set or json file to get the total number of nodes
2,234
def _make_version ( major , minor , micro , releaselevel , serial ) : assert releaselevel in [ 'alpha' , 'beta' , 'candidate' , 'final' ] version = "%d.%d" % ( major , minor ) if micro : version += ".%d" % ( micro , ) if releaselevel != 'final' : short = { 'alpha' : 'a' , 'beta' : 'b' , 'candidate' : 'rc' } [ releaselevel ] version += "%s%d" % ( short , serial ) return version
Create a readable version string from version_info tuple components .
2,235
def _make_url ( major , minor , micro , releaselevel , serial ) : url = "https://django-pagination-bootstrap.readthedocs.io" if releaselevel != 'final' : url += "/en/" + _make_version ( major , minor , micro , releaselevel , serial ) return url
Make the URL people should start at for this version of coverage . py .
2,236
def get_list_of_paths ( self ) : all_paths = [ ] for p in self . fl_metadata : try : all_paths . append ( p [ 'path' ] ) except : try : print ( 'cls_filelist - no key path, ignoring folder ' + str ( p ) ) except : print ( 'cls_filelist - no key path, ignoring odd character folder' ) return list ( set ( all_paths ) )
return a list of unique paths in the file list
2,237
def add_file_metadata ( self , fname ) : file_dict = { } file_dict [ "fullfilename" ] = fname try : file_dict [ "name" ] = os . path . basename ( fname ) file_dict [ "date" ] = self . GetDateAsString ( fname ) file_dict [ "size" ] = os . path . getsize ( fname ) file_dict [ "path" ] = os . path . dirname ( fname ) except IOError : print ( 'Error getting metadata for file' ) self . fl_metadata . append ( file_dict )
collects the files metadata - note that this will fail with strange errors if network connection drops out to shared folder but it is better to stop the program rather than do a try except otherwise you will get an incomplete set of files .
2,238
def print_file_details_as_csv ( self , fname , col_headers ) : line = '' qu = '"' d = ',' for fld in col_headers : if fld == "fullfilename" : line = line + qu + fname + qu + d if fld == "name" : line = line + qu + os . path . basename ( fname ) + qu + d if fld == "date" : line = line + qu + self . GetDateAsString ( fname ) + qu + d if fld == "size" : line = line + qu + self . get_size_as_string ( fname ) + qu + d if fld == "path" : try : line = line + qu + os . path . dirname ( fname ) + qu + d except IOError : line = line + qu + 'ERROR_PATH' + qu + d return line
saves as csv format
2,239
def save_filelist ( self , opFile , opFormat , delim = ',' , qu = '"' ) : op_folder = os . path . dirname ( opFile ) if op_folder is not None : if not os . path . exists ( op_folder ) : os . makedirs ( op_folder ) with open ( opFile , 'w' ) as fout : fout . write ( "fullFilename" + delim ) for colHeading in opFormat : fout . write ( colHeading + delim ) fout . write ( '\n' ) for f in self . filelist : line = qu + f + qu + delim try : for fld in opFormat : if fld == "name" : line = line + qu + os . path . basename ( f ) + qu + delim if fld == "date" : line = line + qu + self . GetDateAsString ( f ) + qu + delim if fld == "size" : line = line + qu + str ( os . path . getsize ( f ) ) + qu + delim if fld == "path" : line = line + qu + os . path . dirname ( f ) + qu + delim except IOError : line += '\n' try : fout . write ( str ( line . encode ( 'ascii' , 'ignore' ) . decode ( 'utf-8' ) ) ) fout . write ( '\n' ) except IOError : pass
uses a List of files and collects meta data on them and saves to an text file as a list or with metadata depending on opFormat .
2,240
def login ( self , schema , username , password ) : self . schema = schema self . username = username self . password = password self . connection = schema
connect here - use the other classes cls_oracle cls_mysql etc otherwise this has the credentials used to access a share folder
2,241
def type ( self , sequence_coverage_collection , min_gene_percent_covg_threshold = 99 ) : best_versions = self . get_best_version ( sequence_coverage_collection . values ( ) , min_gene_percent_covg_threshold ) return [ self . presence_typer . type ( best_version ) for best_version in best_versions ]
Types a collection of genes returning the most likely gene version in the collection with it s genotype
2,242
def list_all_python_programs ( self ) : self . tot_lines = 0 self . tot_bytes = 0 self . tot_files = 0 self . tot_loc = 0 self . lstPrograms = [ ] fl = mod_fl . FileList ( [ self . fldr ] , [ '*.py' ] , [ "__pycache__" , "/venv/" , "/venv2/" , ".git" ] ) for fip in fl . get_list ( ) : if '__init__.py' not in fip : self . add ( fip , 'TODO - add comment' ) f = mod_file . TextFile ( fip ) self . tot_lines += f . count_lines_in_file ( ) self . tot_loc += f . count_lines_of_code ( ) self . tot_bytes += f . size self . tot_files += 1 print ( 'All Python Program Statistics' ) print ( 'Files = ' , self . tot_files , ' Bytes = ' , self . tot_bytes , ' Lines = ' , self . tot_lines , ' Lines of Code = ' , self . tot_loc )
collects a filelist of all . py programs
2,243
def save ( self , fname = '' ) : if fname != '' : with open ( fname , 'w' ) as f : for i in self . lstPrograms : f . write ( self . get_file_info_line ( i , ',' ) ) filemap = mod_filemap . FileMap ( [ ] , [ ] ) object_fileList = filemap . get_full_filename ( filemap . find_type ( 'OBJECT' ) , filemap . find_ontology ( 'FILE-PROGRAM' ) [ 0 ] ) print ( 'object_fileList = ' + object_fileList + '\n' ) if os . path . exists ( object_fileList ) : os . remove ( object_fileList ) self . lstPrograms . sort ( ) try : with open ( object_fileList , 'a' ) as f : f . write ( '\n' . join ( [ i [ 0 ] for i in self . lstPrograms ] ) ) except Exception as ex : print ( 'ERROR = cant write to object_filelist ' , object_fileList , str ( ex ) )
Save the list of items to AIKIF core and optionally to local file fname
2,244
def collect_program_info ( self , fname ) : md = '#AIKIF Technical details\n' md += 'Autogenerated list of programs with comments and progress\n' md += '\nFilename | Comment | Date | Size\n' md += '--- | --- | --- | ---\n' for i in self . lstPrograms : md += self . get_file_info_line ( i , ' | ' ) with open ( fname , 'w' ) as f : f . write ( md )
gets details on the program size date list of functions and produces a Markdown file for documentation
2,245
def id_nameDAVID ( df , GTF = None , name_id = None ) : if name_id is None : gene_name = retrieve_GTF_field ( 'gene_name' , GTF ) gene_id = retrieve_GTF_field ( 'gene_id' , GTF ) GTF = pd . concat ( [ gene_name , gene_id ] , axis = 1 ) else : GTF = name_id . copy ( ) df [ 'Gene_names' ] = "genes" terms = df [ 'termName' ] . tolist ( ) enrichN = pd . DataFrame ( ) for term in terms : tmp = df [ df [ 'termName' ] == term ] tmp = tmp . reset_index ( drop = True ) ids = tmp . xs ( 0 ) [ 'geneIds' ] ids = pd . DataFrame ( data = ids . split ( ", " ) ) ids . columns = [ 'geneIds' ] ids [ 'geneIds' ] = ids [ 'geneIds' ] . map ( str . lower ) GTF [ 'gene_id' ] = GTF [ 'gene_id' ] . astype ( str ) GTF [ 'gene_id' ] = GTF [ 'gene_id' ] . map ( str . lower ) ids = pd . merge ( ids , GTF , how = 'left' , left_on = 'geneIds' , right_on = 'gene_id' ) names = ids [ 'gene_name' ] . tolist ( ) names = ', ' . join ( names ) tmp [ "Gene_names" ] = names enrichN = pd . concat ( [ enrichN , tmp ] ) enrichN = enrichN . reset_index ( drop = True ) gene_names = enrichN [ [ 'Gene_names' ] ] gpos = enrichN . columns . get_loc ( "geneIds" ) enrichN = enrichN . drop ( [ 'Gene_names' ] , axis = 1 ) cols = enrichN . columns . tolist ( ) enrichN = pd . concat ( [ enrichN [ cols [ : gpos + 1 ] ] , gene_names , enrichN [ cols [ gpos + 1 : ] ] ] , axis = 1 ) return enrichN
Given a DAVIDenrich output it converts ensembl gene ids to genes names and adds this column to the output
2,246
def DAVIDgetGeneAttribute ( x , df , refCol = "ensembl_gene_id" , fieldTOretrieve = "gene_name" ) : l = x . split ( ", " ) l = [ s . upper ( ) for s in l ] tmpdf = pd . DataFrame ( { refCol : l } , index = range ( len ( l ) ) ) df_fix = df [ [ refCol , fieldTOretrieve ] ] . drop_duplicates ( ) df_fix [ refCol ] = df_fix [ refCol ] . apply ( lambda x : x . upper ( ) ) ids = pd . merge ( tmpdf , df_fix , how = "left" , on = [ refCol ] ) ids = ids [ fieldTOretrieve ] . tolist ( ) ids = [ str ( s ) for s in ids ] ids = ", " . join ( ids ) return ids
Returns a list of gene names for given gene ids .
2,247
def main ( ** options ) : application = Application ( ** options ) if not application . run ( ) : sys . exit ( 1 ) return application
Spline loc tool .
2,248
def load_configuration ( self ) : filename = os . path . join ( os . path . dirname ( __file__ ) , 'templates/spline-loc.yml.j2' ) with open ( filename ) as handle : return Adapter ( safe_load ( handle ) ) . configuration
Loading configuration .
2,249
def ignore_path ( path ) : ignore = False for name in [ '.tox' , 'dist' , 'build' , 'node_modules' , 'htmlcov' ] : if path . find ( name ) >= 0 : ignore = True break return ignore
Verify whether to ignore a path .
2,250
def walk_files_for ( paths , supported_extensions ) : for path in paths : for root , _ , files in os . walk ( path ) : if Application . ignore_path ( root . replace ( path , '' ) ) : continue for filename in files : extension = os . path . splitext ( filename ) [ 1 ] if extension in supported_extensions : yield path , os . path . join ( root , filename ) , extension
Iterating files for given extensions .
2,251
def analyse ( self , path_and_filename , pattern ) : with open ( path_and_filename ) as handle : content = handle . read ( ) loc = content . count ( '\n' ) + 1 com = 0 for match in re . findall ( pattern , content , re . DOTALL ) : com += match . count ( '\n' ) + 1 return max ( 0 , loc - com ) , com
Find out lines of code and lines of comments .
2,252
def datasetsBM ( host = biomart_host ) : stdout_ = sys . stdout stream = StringIO ( ) sys . stdout = stream server = BiomartServer ( biomart_host ) server . show_datasets ( ) sys . stdout = stdout_ variable = stream . getvalue ( ) v = variable . replace ( "{" , " " ) v = v . replace ( "}" , " " ) v = v . replace ( ": " , "\t" ) print ( v )
Lists BioMart datasets .
2,253
def filtersBM ( dataset , host = biomart_host ) : stdout_ = sys . stdout stream = StringIO ( ) sys . stdout = stream server = BiomartServer ( host ) d = server . datasets [ dataset ] d . show_filters ( ) sys . stdout = stdout_ variable = stream . getvalue ( ) v = variable . replace ( "{" , " " ) v = v . replace ( "}" , " " ) v = v . replace ( ": " , "\t" ) print ( v )
Lists BioMart filters for a specific dataset .
2,254
def format_csv ( self , delim = ',' , qu = '"' ) : res = qu + self . name + qu + delim if self . data : for d in self . data : res += qu + str ( d ) + qu + delim return res + '\n'
Prepares the data in CSV format
2,255
def format_all ( self ) : res = '\n--- Format all : ' + str ( self . name ) + ' -------------\n' res += ' parent = ' + str ( self . parent ) + '\n' res += self . _get_all_children ( ) res += self . _get_links ( ) return res
return a trace of parents and children of the obect
2,256
def _get_all_children ( self , ) : res = '' if self . child_nodes : for c in self . child_nodes : res += ' child = ' + str ( c ) + '\n' if c . child_nodes : for grandchild in c . child_nodes : res += ' child = ' + str ( grandchild ) + '\n' else : res += ' child = None\n' return res
return the list of children of a node
2,257
def _get_links ( self , ) : res = '' if self . links : for l in self . links : res += ' links = ' + str ( l [ 0 ] ) + '\n' if l [ 0 ] . child_nodes : for chld in l [ 0 ] . child_nodes : res += ' child = ' + str ( chld ) + '\n' if l [ 0 ] . links : for lnk in l [ 0 ] . links : res += ' sublink = ' + str ( lnk [ 0 ] ) + '\n' else : res += ' links = None\n' return res
return the list of links of a node
2,258
def get_child_by_name ( self , name ) : for c in self . child_nodes : if c . name == name : return c return None
find the child object by name and return the object
2,259
def get_filename ( self , year ) : res = self . fldr + os . sep + self . type + year + '.' + self . user return res
returns the filename
2,260
def save ( self , file_tag = '2016' , add_header = 'N' ) : fname = self . get_filename ( file_tag ) with open ( fname , 'a' ) as f : if add_header == 'Y' : f . write ( self . format_hdr ( ) ) for e in self . table : f . write ( e . format_csv ( ) )
save table to folder in appropriate files NOTE - ONLY APPEND AT THIS STAGE - THEN USE DATABASE
2,261
def format_hdr ( self , delim = ',' , qu = '"' ) : res = '' if self . header : for d in self . header : res += qu + str ( d ) + qu + delim return res + '\n'
Prepares the header in CSV format
2,262
def generate_diary ( self ) : print ( 'Generate diary files from Event rows only' ) for r in self . table : print ( str ( type ( r ) ) + ' = ' , r )
extracts event information from core tables into diary files
2,263
def type ( self , variant_probe_coverages , variant = None ) : if not isinstance ( variant_probe_coverages , list ) : variant_probe_coverages = [ variant_probe_coverages ] calls = [ ] for variant_probe_coverage in variant_probe_coverages : calls . append ( self . _type_variant_probe_coverages ( variant_probe_coverage , variant ) ) hom_alt_calls = [ c for c in calls if sum ( c [ "genotype" ] ) > 1 ] het_calls = [ c for c in calls if sum ( c [ "genotype" ] ) == 1 ] if hom_alt_calls : hom_alt_calls . sort ( key = lambda x : x [ "info" ] [ "conf" ] , reverse = True ) return hom_alt_calls [ 0 ] elif het_calls : het_calls . sort ( key = lambda x : x [ "info" ] [ "conf" ] , reverse = True ) return het_calls [ 0 ] else : calls . sort ( key = lambda x : x [ "info" ] [ "conf" ] , reverse = True ) return calls [ 0 ]
Takes a list of VariantProbeCoverages and returns a Call for the Variant . Note in the simplest case the list will be of length one . However we may be typing the Variant on multiple backgrouds leading to multiple VariantProbes for a single Variant .
2,264
def creator ( entry , config ) : ansible_playbook = "ansible.playbook.dry.run.see.comment" ansible_inventory = "ansible.inventory.dry.run.see.comment" ansible_playbook_content = render ( config . script , model = config . model , env = config . env , variables = config . variables , item = config . item ) ansible_inventory_content = render ( entry [ 'inventory' ] , model = config . model , env = config . env , variables = config . variables , item = config . item ) if not config . dry_run : ansible_playbook = write_temporary_file ( ansible_playbook_content , 'ansible-play-' , '.yaml' ) ansible_playbook_content = '' ansible_inventory = write_temporary_file ( ansible_inventory_content , prefix = 'ansible-inventory-' ) ansible_inventory_content = '' template_file = os . path . join ( os . path . dirname ( __file__ ) , 'templates/ansible.sh.j2' ) with open ( template_file ) as handle : template = handle . read ( ) config . script = render ( template , debug = config . debug , ansible_playbook_content = ansible_playbook_content , ansible_playbook = ansible_playbook , ansible_inventory_content = ansible_inventory_content , ansible_inventory = ansible_inventory , limit = entry [ 'limit' ] ) return Ansible ( config )
Creator function for creating an instance of an Ansible script .
2,265
def update_gol ( self ) : updated_grid = [ [ self . update_cell ( row , col ) for col in range ( self . get_grid_width ( ) ) ] for row in range ( self . get_grid_height ( ) ) ] self . replace_grid ( updated_grid )
Function that performs one step of the Game of Life
2,266
def update_cell ( self , row , col ) : neighbors = self . eight_neighbors ( row , col ) living_neighbors = 0 for neighbor in neighbors : if not self . is_empty ( neighbor [ 0 ] , neighbor [ 1 ] ) : living_neighbors += 1 if ( living_neighbors == 3 ) or ( living_neighbors == 2 and not self . is_empty ( row , col ) ) : return mod_grid . FULL else : return mod_grid . EMPTY
Function that computes the update for one cell in the Game of Life
2,267
def random_offset ( self , lst ) : res = [ ] x = random . randint ( 4 , self . max_x - 42 ) y = random . randint ( 4 , self . max_y - 10 ) for itm in lst : res . append ( [ itm [ 0 ] + y , itm [ 1 ] + x ] ) return res
offsets a pattern list generated below to a random position in the grid
2,268
def get_random ( self , size = 10 ) : bin_i = np . random . choice ( np . arange ( len ( self . bin_centers ) ) , size = size , p = self . normalized_histogram ) return self . bin_centers [ bin_i ] + np . random . uniform ( - 0.5 , 0.5 , size = size ) * self . bin_volumes ( ) [ bin_i ]
Returns random variates from the histogram . Note this assumes the histogram is an events per bin not a pdf . Inside the bins a uniform distribution is assumed .
2,269
def std ( self , bessel_correction = True ) : if bessel_correction : n = self . n bc = n / ( n - 1 ) else : bc = 1 return np . sqrt ( np . average ( ( self . bin_centers - self . mean ) ** 2 , weights = self . histogram ) ) * bc
Estimates std of underlying data assuming each datapoint was exactly in the center of its bin .
2,270
def percentile ( self , percentile ) : return self . bin_centers [ np . argmin ( np . abs ( self . cumulative_density * 100 - percentile ) ) ]
Return bin center nearest to percentile
2,271
def _data_to_hist ( self , data , ** kwargs ) : if hasattr ( self , 'bin_edges' ) : kwargs . setdefault ( 'bins' , self . bin_edges ) if len ( data ) == 1 and isinstance ( data [ 0 ] , COLUMNAR_DATA_SOURCES ) : data = data [ 0 ] if self . axis_names is None : raise ValueError ( "When histogramming from a columnar data source, " "axis_names or dimensions is mandatory" ) is_dask = False if WE_HAVE_DASK : is_dask = isinstance ( data , dask . dataframe . DataFrame ) if is_dask : fake_histogram = Histdd ( axis_names = self . axis_names , bins = kwargs [ 'bins' ] ) partial_hists = [ ] for partition in data . to_delayed ( ) : ph = dask . delayed ( Histdd ) ( partition , axis_names = self . axis_names , bins = kwargs [ 'bins' ] ) ph = dask . delayed ( lambda x : x . histogram ) ( ph ) ph = dask . array . from_delayed ( ph , shape = fake_histogram . histogram . shape , dtype = fake_histogram . histogram . dtype ) partial_hists . append ( ph ) partial_hists = dask . array . stack ( partial_hists , axis = 0 ) compute_options = kwargs . get ( 'compute_options' , { } ) for k , v in DEFAULT_DASK_COMPUTE_KWARGS . items ( ) : compute_options . setdefault ( k , v ) histogram = partial_hists . sum ( axis = 0 ) . compute ( ** compute_options ) bin_edges = fake_histogram . bin_edges return histogram , bin_edges else : data = np . vstack ( [ data [ x ] . values for x in self . axis_names ] ) data = np . array ( data ) . T return np . histogramdd ( data , bins = kwargs . get ( 'bins' ) , weights = kwargs . get ( 'weights' ) , range = kwargs . get ( 'range' ) )
Return bin_edges histogram array
2,272
def axis_names_without ( self , axis ) : if self . axis_names is None : return None return itemgetter ( * self . other_axes ( axis ) ) ( self . axis_names )
Return axis names without axis or None if axis_names is None
2,273
def bin_centers ( self , axis = None ) : if axis is None : return np . array ( [ self . bin_centers ( axis = i ) for i in range ( self . dimensions ) ] ) axis = self . get_axis_number ( axis ) return 0.5 * ( self . bin_edges [ axis ] [ 1 : ] + self . bin_edges [ axis ] [ : - 1 ] )
Return bin centers along an axis or if axis = None list of bin_centers along each axis
2,274
def get_axis_bin_index ( self , value , axis ) : axis = self . get_axis_number ( axis ) bin_edges = self . bin_edges [ axis ] if value == bin_edges [ - 1 ] : return len ( bin_edges ) - 2 result = np . searchsorted ( bin_edges , [ value ] , side = 'right' ) [ 0 ] - 1 if not 0 <= result <= len ( bin_edges ) - 1 : raise CoordinateOutOfRangeException ( "Value %s is not in range (%s-%s) of axis %s" % ( value , bin_edges [ 0 ] , bin_edges [ - 1 ] , axis ) ) return result
Returns index along axis of bin in histogram which contains value Inclusive on both endpoints
2,275
def get_bin_indices ( self , values ) : return tuple ( [ self . get_axis_bin_index ( values [ ax_i ] , ax_i ) for ax_i in range ( self . dimensions ) ] )
Returns index tuple in histogram of bin which contains value
2,276
def all_axis_bin_centers ( self , axis ) : axis = self . get_axis_number ( axis ) return np . meshgrid ( * self . bin_centers ( ) , indexing = 'ij' ) [ axis ]
Return ndarray of same shape as histogram containing bin center value along axis at each point
2,277
def sum ( self , axis ) : axis = self . get_axis_number ( axis ) if self . dimensions == 2 : new_hist = Hist1d else : new_hist = Histdd return new_hist . from_histogram ( np . sum ( self . histogram , axis = axis ) , bin_edges = itemgetter ( * self . other_axes ( axis ) ) ( self . bin_edges ) , axis_names = self . axis_names_without ( axis ) )
Sums all data along axis returns d - 1 dimensional histogram
2,278
def slicesum ( self , start , stop = None , axis = 0 ) : return self . slice ( start , stop , axis ) . sum ( axis )
Slices the histogram along axis then sums over that slice returning a d - 1 dimensional histogram
2,279
def projection ( self , axis ) : axis = self . get_axis_number ( axis ) projected_hist = np . sum ( self . histogram , axis = self . other_axes ( axis ) ) return Hist1d . from_histogram ( projected_hist , bin_edges = self . bin_edges [ axis ] )
Sums all data along all other axes then return Hist1D
2,280
def cumulate ( self , axis ) : axis = self . get_axis_number ( axis ) return Histdd . from_histogram ( np . cumsum ( self . histogram , axis = axis ) , bin_edges = self . bin_edges , axis_names = self . axis_names )
Returns new histogram with all data cumulated along axis .
2,281
def central_likelihood ( self , axis ) : result = self . cumulative_density ( axis ) result . histogram = 1 - 2 * np . abs ( result . histogram - 0.5 ) return result
Returns new histogram with all values replaced by their central likelihoods along axis .
2,282
def lookup_hist ( self , mh ) : result = mh . similar_blank_histogram ( ) points = np . stack ( [ mh . all_axis_bin_centers ( i ) for i in range ( mh . dimensions ) ] ) . reshape ( mh . dimensions , - 1 ) values = self . lookup ( * points ) result . histogram = values . reshape ( result . histogram . shape ) return result
Return histogram within binning of Histdd mh with values looked up in this histogram .
2,283
def create_roadmap_doc ( dat , opFile ) : op = format_title ( 'Roadmap for AIKIF' ) for h1 in dat [ 'projects' ] : op += format_h1 ( h1 ) if dat [ h1 ] is None : op += '(No details)\n' else : for h2 in dat [ h1 ] : op += '\n' + format_h2 ( h2 ) if dat [ h1 ] [ h2 ] is None : op += '(blank text)\n' else : for txt in dat [ h1 ] [ h2 ] : op += ' - ' + txt + '\n' op += '\n' with open ( opFile , 'w' ) as f : f . write ( op )
takes a dictionary read from a yaml file and converts it to the roadmap documentation
2,284
def clear ( self ) : self . grid = [ [ EMPTY for dummy_col in range ( self . grid_width ) ] for dummy_row in range ( self . grid_height ) ]
Clears grid to be EMPTY
2,285
def save ( self , fname ) : try : with open ( fname , "w" ) as f : f . write ( str ( self ) ) except Exception as ex : print ( 'ERROR = cant save grid results to ' + fname + str ( ex ) )
saves a grid to file as ASCII text
2,286
def load ( self , fname ) : self . grid_width = 4 self . grid_height = 4 self . grid = [ [ 0 for dummy_l in range ( self . grid_width ) ] for dummy_l in range ( self . grid_height ) ] with open ( fname , 'r' ) as f : for row_num , row in enumerate ( f ) : if row . strip ( '\n' ) == '' : break for col_num , col in enumerate ( row . strip ( '\n' ) ) : self . set_tile ( row_num , col_num , col )
loads a ASCII text file grid to self
2,287
def extract_col ( self , col ) : new_col = [ row [ col ] for row in self . grid ] return new_col
get column number col
2,288
def extract_row ( self , row ) : new_row = [ ] for col in range ( self . get_grid_width ( ) ) : new_row . append ( self . get_tile ( row , col ) ) return new_row
get row number row
2,289
def replace_row ( self , line , ndx ) : for col in range ( len ( line ) ) : self . set_tile ( ndx , col , line [ col ] )
replace a grids row at index ndx with line
2,290
def replace_col ( self , line , ndx ) : for row in range ( len ( line ) ) : self . set_tile ( row , ndx , line [ row ] )
replace a grids column at index ndx with line
2,291
def new_tile ( self , num = 1 ) : for _ in range ( num ) : if random . random ( ) > .5 : new_tile = self . pieces [ 0 ] else : new_tile = self . pieces [ 1 ] blanks = self . count_blank_positions ( ) if blanks == 0 : print ( "GAME OVER" ) else : res = self . find_random_blank_cell ( ) row = res [ 0 ] col = res [ 1 ] self . set_tile ( row , col , new_tile )
Create a new tile in a randomly selected empty square . The tile should be 2 90% of the time and 4 10% of the time .
2,292
def set_tile ( self , row , col , value ) : if col < 0 : print ( "ERROR - x less than zero" , col ) col = 0 if col > self . grid_width - 1 : print ( "ERROR - x larger than grid" , col ) col = self . grid_width - 1 if row < 0 : print ( "ERROR - y less than zero" , row ) row = 0 if row > self . grid_height - 1 : print ( "ERROR - y larger than grid" , row ) row = self . grid_height - 1 self . grid [ row ] [ col ] = value
Set the tile at position row col to have the given value .
2,293
def replace_grid ( self , updated_grid ) : for col in range ( self . get_grid_width ( ) ) : for row in range ( self . get_grid_height ( ) ) : if updated_grid [ row ] [ col ] == EMPTY : self . set_empty ( row , col ) else : self . set_full ( row , col )
replace all cells in current grid with updated grid
2,294
def find_safe_starting_point ( self ) : y = random . randint ( 2 , self . grid_height - 4 ) x = random . randint ( 2 , self . grid_width - 4 ) return y , x
finds a place on the grid which is clear on all sides to avoid starting in the middle of a blockage
2,295
def resize ( fname , basewidth , opFilename ) : if basewidth == 0 : basewidth = 300 img = Image . open ( fname ) wpercent = ( basewidth / float ( img . size [ 0 ] ) ) hsize = int ( ( float ( img . size [ 1 ] ) * float ( wpercent ) ) ) img = img . resize ( ( basewidth , hsize ) , Image . ANTIALIAS ) img . save ( opFilename )
resize an image to basewidth
2,296
def print_stats ( img ) : stat = ImageStat . Stat ( img ) print ( "extrema : " , stat . extrema ) print ( "count : " , stat . count ) print ( "sum : " , stat . sum ) print ( "sum2 : " , stat . sum2 ) print ( "mean : " , stat . mean ) print ( "median : " , stat . median ) print ( "rms : " , stat . rms ) print ( "var : " , stat . var ) print ( "stddev : " , stat . stddev )
prints stats remember that img should already have been loaded
2,297
def print_all_metadata ( fname ) : print ( "Filename :" , fname ) print ( "Basename :" , os . path . basename ( fname ) ) print ( "Path :" , os . path . dirname ( fname ) ) print ( "Size :" , os . path . getsize ( fname ) ) img = Image . open ( fname ) width , height = img . size print ( "Width :" , width ) print ( "Height :" , height ) print ( "Format :" , img . format ) print ( "palette :" , img . palette ) print_stats ( img ) exif_data = get_exif_data ( img ) ( lat , lon ) = get_lat_lon ( exif_data ) print ( "GPS Lat :" , lat ) print ( "GPS Long :" , lon )
high level that prints all as long list
2,298
def get_metadata_as_dict ( fname ) : imgdict = { } try : imgdict [ 'filename' ] = fname imgdict [ 'size' ] = str ( os . path . getsize ( fname ) ) imgdict [ 'basename' ] = os . path . basename ( fname ) imgdict [ 'path' ] = os . path . dirname ( fname ) img = Image . open ( fname ) width , height = img . size imgdict [ 'width' ] = str ( width ) imgdict [ 'height' ] = str ( height ) imgdict [ 'format' ] = str ( img . format ) imgdict [ 'palette' ] = str ( img . palette ) stat = ImageStat . Stat ( img ) imgdict [ 'count' ] = List2String ( stat . count , "," ) imgdict [ 'sum' ] = List2String ( stat . sum , "," ) imgdict [ 'sum2' ] = List2String ( stat . sum2 , "," ) imgdict [ 'mean' ] = List2String ( stat . mean , "," ) imgdict [ 'median' ] = List2String ( stat . median , "," ) imgdict [ 'rms' ] = List2String ( stat . rms , "," ) imgdict [ 'var' ] = List2String ( stat . var , "," ) imgdict [ 'stddev' ] = List2String ( stat . stddev , "," ) exif_data = get_exif_data ( img ) print ( 'exif_data = ' , exif_data ) ( lat , lon ) = get_lat_lon ( exif_data ) print ( '(lat, lon)' , ( lat , lon ) ) imgdict [ 'lat' ] = str ( lat ) imgdict [ 'lon' ] = str ( lon ) except Exception as ex : print ( 'problem reading image file metadata in ' , fname , str ( ex ) ) imgdict [ 'lat' ] = 'ERROR' imgdict [ 'lon' ] = 'ERROR' return imgdict
Gets all metadata and puts into dictionary
2,299
def get_metadata_as_csv ( fname ) : q = chr ( 34 ) d = "," res = q + fname + q + d res = res + q + os . path . basename ( fname ) + q + d res = res + q + os . path . dirname ( fname ) + q + d try : res = res + q + str ( os . path . getsize ( fname ) ) + q + d img = Image . open ( fname ) width , height = img . size res = res + q + str ( width ) + q + d res = res + q + str ( height ) + q + d res = res + q + str ( img . format ) + q + d res = res + q + str ( img . palette ) + q + d stat = ImageStat . Stat ( img ) res = res + q + List2String ( stat . count , "," ) + q + d res = res + q + List2String ( stat . sum , "," ) + q + d res = res + q + List2String ( stat . sum2 , "," ) + q + d res = res + q + List2String ( stat . mean , "," ) + q + d res = res + q + List2String ( stat . median , "," ) + q + d res = res + q + List2String ( stat . rms , "," ) + q + d res = res + q + List2String ( stat . var , "," ) + q + d res = res + q + List2String ( stat . stddev , "," ) + q + d exif_data = get_exif_data ( img ) ( lat , lon ) = get_lat_lon ( exif_data ) res = res + q + str ( lat ) + q + d res = res + q + str ( lon ) + q + d except Exception as ex : print ( 'problem reading image file metadata in ' , fname , str ( ex ) ) return res
Gets all metadata and puts into CSV format