idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
2,400
def parse ( self , file_name ) : self . object = self . parsed_class ( ) with open ( file_name , encoding = 'utf-8' ) as f : self . parse_str ( f . read ( ) ) return self . object
Parse entire file and return relevant object .
2,401
def has_next ( self ) : try : next_item = self . paginator . object_list [ self . paginator . per_page ] except IndexError : return False return True
Checks for one more item than last on this page .
2,402
def parse_miss_cann ( node , m , c ) : if node [ 2 ] : m1 = node [ 0 ] m2 = m - node [ 0 ] c1 = node [ 1 ] c2 = c - node [ 1 ] else : m1 = m - node [ 0 ] m2 = node [ 0 ] c1 = c - node [ 1 ] c2 = node [ 1 ] return m1 , c1 , m2 , c2
extracts names from the node to get counts of miss + cann on both sides
2,403
def solve ( m , c ) : G = { ( m , c , 1 ) : [ ] } frontier = [ ( m , c , 1 ) ] while len ( frontier ) > 0 : hold = list ( frontier ) for node in hold : newnode = [ ] frontier . remove ( node ) newnode . extend ( pick_next_boat_trip ( node , m , c , frontier ) ) for neighbor in newnode : if neighbor not in G : G [ node ] . append ( neighbor ) G [ neighbor ] = [ node ] frontier . append ( neighbor ) return mod_plan . find_path_BFS ( G , ( m , c , 1 ) , ( 0 , 0 , 0 ) )
run the algorithm to find the path list
2,404
def create_script_fact ( self ) : self . ddl_text += '---------------------------------------------\n' self . ddl_text += '-- CREATE Fact Table - ' + self . fact_table + '\n' self . ddl_text += '---------------------------------------------\n' self . ddl_text += 'DROP TABLE ' + self . fact_table + ' CASCADE CONSTRAINTS;\n' self . ddl_text += 'CREATE TABLE ' + self . fact_table + ' (\n' self . ddl_text += ' ' . join ( [ col + ' VARCHAR2(200), \n' for col in self . col_list ] ) self . ddl_text += ' ' + self . date_updated_col + ' DATE \n' self . ddl_text += ');\n'
appends the CREATE TABLE index etc to self . ddl_text
2,405
def create_script_staging_table ( self , output_table , col_list ) : self . ddl_text += '---------------------------------------------\n' self . ddl_text += '-- CREATE Staging Table - ' + output_table + '\n' self . ddl_text += '---------------------------------------------\n' self . ddl_text += 'DROP TABLE ' + output_table + ' CASCADE CONSTRAINTS;\n' self . ddl_text += 'CREATE TABLE ' + output_table + ' (\n ' self . ddl_text += ' ' . join ( [ col + ' VARCHAR2(200), \n' for col in col_list ] ) self . ddl_text += ' ' + self . date_updated_col + ' DATE \n' self . ddl_text += ');\n'
appends the CREATE TABLE index etc to another table
2,406
def distinct_values ( t_old , t_new ) : res = [ ] res . append ( [ ' -- NOT IN check -- ' ] ) for new_col in t_new . header : dist_new = t_new . get_distinct_values_from_cols ( [ new_col ] ) for old_col in t_old . header : if old_col == new_col : dist_old = t_old . get_distinct_values_from_cols ( [ old_col ] ) not_in_new = [ x for x in dist_old [ 0 ] if x not in dist_new [ 0 ] ] if not_in_new != [ ] : res . append ( [ 'Not in New' , old_col , not_in_new ] ) not_in_old = [ x for x in dist_new [ 0 ] if x not in dist_old [ 0 ] ] if not_in_old != [ ] : res . append ( [ 'Not in Old' , new_col , not_in_old ] ) return sorted ( res )
for all columns check which values are not in the other table
2,407
def aikif_web_menu ( cur = '' ) : pgeHdg = '' pgeBlurb = '' if cur == '' : cur = 'Home' txt = get_header ( cur ) txt += '<div id = "container">\n' txt += ' <div id = "header">\n' txt += ' <!-- Banner txt += ' <img src = "' + os . path . join ( '/static' , 'aikif_banner.jpg' ) + '" alt="AIKIF Banner"/>\n' txt += ' <ul id = "menu_list">\n' for m in menu : if m [ 1 ] == cur : txt += ' <LI id="top_menu_selected"><a href=' + m [ 0 ] + '>' + m [ 1 ] + '</a></li>\n' pgeHdg = m [ 1 ] try : pgeBlurb = m [ 2 ] except Exception : pass else : txt += ' <LI id="top_menu"><a href=' + m [ 0 ] + '>' + m [ 1 ] + '</a></li>\n' txt += " </ul>\n </div>\n\n" txt += '<H1>AIKIF ' + pgeHdg + '</H1>\n' txt += '<H4>' + pgeBlurb + '</H4>\n' return txt
returns the web page header containing standard AIKIF top level web menu
2,408
def main ( ) : print ( "Generating research notes..." ) if os . path . exists ( fname ) : os . remove ( fname ) append_rst ( '================================================\n' ) append_rst ( 'Comparison of Information Aggregation Techniques\n' ) append_rst ( '================================================\n\n' ) append_rst ( '.. contents::\n\n' ) append_rst ( open ( 'res_core_data_HEADER.rst' , 'r' ) . read ( ) ) append_rst ( res_core_data_mthd1 . get_method ( ) ) append_rst ( res_core_data_mthd2 . get_method ( ) ) append_rst ( 'Results\n' ) append_rst ( '=====================================\n' ) for dat in data_files : append_rst ( '\nData File : ' + dat + '\n---------------------------------------\n\n' ) res_core_data_mthd1 . get_results ( fname , dat ) res_core_data_mthd2 . get_results ( fname , dat ) append_rst ( open ( 'res_core_data_FOOTER.rst' , 'r' ) . read ( ) ) print ( "Done!" )
This generates the research document based on the results of the various programs and includes RST imports for introduction and summary
2,409
def find ( self , txt ) : result = [ ] for d in self . data : if txt in d : result . append ( d ) return result
returns a list of records containing text
2,410
def schema_complete ( ) : return Schema ( { 'stage' : And ( str , len ) , 'timestamp' : int , 'status' : And ( str , lambda s : s in [ 'started' , 'succeeded' , 'failed' ] ) , Optional ( 'matrix' , default = 'default' ) : And ( str , len ) , Optional ( 'information' , default = { } ) : { Optional ( Regex ( r'([a-z][_a-z]*)' ) ) : object } } )
Schema for data in CollectorUpdate .
2,411
def schema_event_items ( ) : return { 'timestamp' : And ( int , lambda n : n > 0 ) , Optional ( 'information' , default = { } ) : { Optional ( Regex ( r'([a-z][_a-z]*)' ) ) : object } }
Schema for event items .
2,412
def schema_complete ( ) : return Schema ( { 'stage' : And ( str , len ) , 'status' : And ( str , lambda s : s in [ 'started' , 'succeeded' , 'failed' ] ) , Optional ( 'events' , default = [ ] ) : And ( len , [ CollectorStage . schema_event_items ( ) ] ) } )
Schema for data in CollectorStage .
2,413
def add ( self , timestamp , information ) : try : item = Schema ( CollectorStage . schema_event_items ( ) ) . validate ( { 'timestamp' : timestamp , 'information' : information } ) self . events . append ( item ) except SchemaError as exception : Logger . get_logger ( __name__ ) . error ( exception ) raise RuntimeError ( str ( exception ) )
Add event information .
2,414
def duration ( self ) : duration = 0.0 if len ( self . events ) > 0 : first = datetime . fromtimestamp ( self . events [ 0 ] [ 'timestamp' ] ) last = datetime . fromtimestamp ( self . events [ - 1 ] [ 'timestamp' ] ) duration = ( last - first ) . total_seconds ( ) return duration
Calculate how long the stage took .
2,415
def count_stages ( self , matrix_name ) : return len ( self . data [ matrix_name ] ) if matrix_name in self . data else 0
Number of registered stages for given matrix name .
2,416
def get_stage ( self , matrix_name , stage_name ) : found_stage = None if matrix_name in self . data : result = Select ( self . data [ matrix_name ] ) . where ( lambda entry : entry . stage == stage_name ) . build ( ) found_stage = result [ 0 ] if len ( result ) > 0 else None return found_stage
Get Stage of a concrete matrix .
2,417
def get_duration ( self , matrix_name ) : duration = 0.0 if matrix_name in self . data : duration = sum ( [ stage . duration ( ) for stage in self . data [ matrix_name ] ] ) return duration
Get duration for a concrete matrix .
2,418
def update ( self , item ) : if item . matrix not in self . data : self . data [ item . matrix ] = [ ] result = Select ( self . data [ item . matrix ] ) . where ( lambda entry : entry . stage == item . stage ) . build ( ) if len ( result ) > 0 : stage = result [ 0 ] stage . status = item . status stage . add ( item . timestamp , item . information ) else : stage = CollectorStage ( stage = item . stage , status = item . status ) stage . add ( item . timestamp , item . information ) self . data [ item . matrix ] . append ( stage )
Add a collector item .
2,419
def run ( self ) : while True : data = self . queue . get ( ) if data is None : Logger . get_logger ( __name__ ) . info ( "Stopping collector process ..." ) break self . store . update ( data ) generate ( self . store , 'html' , os . getcwd ( ) )
Collector main loop .
2,420
def read_map ( fname ) : lst = [ ] with open ( fname , "r" ) as f : for line in f : lst . append ( line ) return lst
reads a saved text file to list
2,421
def show_grid_from_file ( self , fname ) : with open ( fname , "r" ) as f : for y , row in enumerate ( f ) : for x , val in enumerate ( row ) : self . draw_cell ( y , x , val )
reads a saved grid file and paints it on the canvas
2,422
def draw_cell ( self , row , col , val ) : if val == 'T' : self . paint_target ( row , col ) elif val == '#' : self . paint_block ( row , col ) elif val == 'X' : self . paint_hill ( row , col ) elif val == '.' : self . paint_land ( row , col ) elif val in [ 'A' ] : self . paint_agent_location ( row , col ) elif val in [ '1' , '2' , '3' , '4' , '5' , '6' , '7' , '8' , '9' ] : self . paint_agent_trail ( row , col , val )
draw a cell as position row col containing val
2,423
def paint_agent_trail ( self , y , x , val ) : for j in range ( 1 , self . cell_height - 1 ) : for i in range ( 1 , self . cell_width - 1 ) : self . img . put ( self . agent_color ( val ) , ( x * self . cell_width + i , y * self . cell_height + j ) )
paint an agent trail as ONE pixel to allow for multiple agent trails to be seen in the same cell
2,424
def agent_color ( self , val ) : if val == '0' : colour = 'blue' elif val == '1' : colour = 'navy' elif val == '2' : colour = 'firebrick' elif val == '3' : colour = 'blue' elif val == '4' : colour = 'blue2' elif val == '5' : colour = 'blue4' elif val == '6' : colour = 'gray22' elif val == '7' : colour = 'gray57' elif val == '8' : colour = 'red4' elif val == '9' : colour = 'red3' return colour
gets a colour for agent 0 - 9
2,425
def create_random_population ( num = 100 ) : people = [ ] for _ in range ( num ) : nme = 'blah' tax_min = random . randint ( 1 , 40 ) / 100 tax_max = tax_min + random . randint ( 1 , 40 ) / 100 tradition = random . randint ( 1 , 100 ) / 100 equity = random . randint ( 1 , 100 ) / 100 pers = mod_hap_env . Person ( nme , { 'tax_min' : tax_min , 'tax_max' : tax_max , 'tradition' : tradition , 'equity' : equity } ) people . append ( pers ) print ( pers ) return people
create a list of people with randomly generated names and stats
2,426
def cleanup ( self ) : if self . data . hooks and len ( self . data . hooks . cleanup ) > 0 : env = self . data . env_list [ 0 ] . copy ( ) env . update ( { 'PIPELINE_RESULT' : 'SUCCESS' , 'PIPELINE_SHELL_EXIT_CODE' : '0' } ) config = ShellConfig ( script = self . data . hooks . cleanup , model = self . model , env = env , dry_run = self . options . dry_run , debug = self . options . debug , strict = self . options . strict , temporary_scripts_path = self . options . temporary_scripts_path ) cleanup_shell = Bash ( config ) for line in cleanup_shell . process ( ) : yield line
Run cleanup script of pipeline when hook is configured .
2,427
def process ( self , pipeline ) : output = [ ] for entry in pipeline : key = list ( entry . keys ( ) ) [ 0 ] if key == "env" : self . data . env_list [ 0 ] . update ( entry [ key ] ) self . logger . debug ( "Updating environment at level 0 with %s" , self . data . env_list [ 0 ] ) continue stage = Stage ( self , re . match ( r"stage\((?P<title>.*)\)" , key ) . group ( "title" ) ) result = stage . process ( entry [ key ] ) output += result [ 'output' ] if not result [ 'success' ] : return { 'success' : False , 'output' : output } for line in self . cleanup ( ) : output . append ( line ) self . logger . info ( " | %s" , line ) self . event . succeeded ( ) return { 'success' : True , 'output' : output }
Processing the whole pipeline definition .
2,428
def process ( self , txt , mode ) : result = '' if mode == 'ADD' : if txt in self . all_commands [ 'cmd' ] [ 0 ] : self . show_output ( 'Returning to Command mode' ) mode = 'COMMAND' self . prompt = '> ' else : self . show_output ( 'Adding Text : ' , txt ) result = self . cmd_add ( txt ) elif mode == 'QUERY' : if txt in self . all_commands [ 'cmd' ] [ 0 ] : self . show_output ( 'Returning to Command mode' ) mode = 'COMMAND' self . prompt = '> ' else : self . show_output ( 'Query : ' , txt ) result = self . cmd_query ( txt ) else : if txt in self . all_commands [ 'exit' ] [ 0 ] : self . cmd_exit ( ) elif txt in self . all_commands [ 'help' ] [ 0 ] : self . cmd_help ( ) elif txt in self . all_commands [ 'cmd' ] [ 0 ] : result = 'Returning to Command mode' mode = 'COMMAND' self . prompt = '> ' elif txt in self . all_commands [ 'add' ] [ 0 ] : result = 'Entering Add mode' mode = 'ADD' self . prompt = 'ADD > ' elif txt in self . all_commands [ 'query' ] [ 0 ] : result = 'Entering Query mode' mode = 'QUERY' self . prompt = '?? > ' else : result = 'Unknown command - type help for list of commands' return result , mode
Top level function to process the command mainly depending on mode . This should work by using the function name defined in all_commamnds
2,429
def cmd_add ( self , txt ) : self . show_output ( 'Adding ' , txt ) self . raw . add ( txt ) print ( self . raw ) return 'Added ' + txt
Enter add mode - all text entered now will be processed as adding information until cancelled
2,430
def cmd_query ( self , txt ) : self . show_output ( 'Searching for ' , txt ) res = self . raw . find ( txt ) for d in res : self . show_output ( d ) return str ( len ( res ) ) + ' results for ' + txt
search and query the AIKIF
2,431
def verify_integrity ( self ) : if not self . __integrity_check : if not self . __appid : raise Exception ( 'U2F_APPID was not defined! Please define it in configuration file.' ) if self . __facets_enabled and not len ( self . __facets_list ) : raise Exception ( ) undefined_message = 'U2F {name} handler is not defined! Please import {name} through {method}!' if not self . __get_u2f_devices : raise Exception ( undefined_message . format ( name = 'Read' , method = '@u2f.read' ) ) if not self . __save_u2f_devices : raise Exception ( undefined_message . format ( name = 'Save' , method = '@u2f.save' ) ) if not self . __call_success_enroll : raise Exception ( undefined_message . format ( name = 'enroll onSuccess' , method = '@u2f.enroll_on_success' ) ) if not self . __call_success_sign : raise Exception ( undefined_message . format ( name = 'sign onSuccess' , method = '@u2f.sign_on_success' ) ) self . __integrity_check = True return True
Verifies that all required functions been injected .
2,432
def devices ( self ) : self . verify_integrity ( ) if session . get ( 'u2f_device_management_authorized' , False ) : if request . method == 'GET' : return jsonify ( self . get_devices ( ) ) , 200 elif request . method == 'DELETE' : response = self . remove_device ( request . json ) if response [ 'status' ] == 'ok' : return jsonify ( response ) , 200 else : return jsonify ( response ) , 404 return jsonify ( { 'status' : 'failed' , 'error' : 'Unauthorized!' } ) , 401
Manages users enrolled u2f devices
2,433
def facets ( self ) : self . verify_integrity ( ) if self . __facets_enabled : data = json . dumps ( { 'trustedFacets' : [ { 'version' : { 'major' : 1 , 'minor' : 0 } , 'ids' : self . __facets_list } ] } , sort_keys = True , indent = 2 , separators = ( ',' , ': ' ) ) mime = 'application/fido.trusted-apps+json' resp = Response ( data , mimetype = mime ) return resp , 200 else : return jsonify ( { } ) , 404
Provides facets support . REQUIRES VALID HTTPS!
2,434
def get_enroll ( self ) : devices = [ DeviceRegistration . wrap ( device ) for device in self . __get_u2f_devices ( ) ] enroll = start_register ( self . __appid , devices ) enroll [ 'status' ] = 'ok' session [ '_u2f_enroll_' ] = enroll . json return enroll
Returns new enroll seed
2,435
def verify_enroll ( self , response ) : seed = session . pop ( '_u2f_enroll_' ) try : new_device , cert = complete_register ( seed , response , self . __facets_list ) except Exception as e : if self . __call_fail_enroll : self . __call_fail_enroll ( e ) return { 'status' : 'failed' , 'error' : 'Invalid key handle!' } finally : pass devices = self . __get_u2f_devices ( ) new_device [ 'counter' ] = 0 new_device [ 'index' ] = 0 for device in devices : if new_device [ 'index' ] <= device [ 'index' ] : new_device [ 'index' ] = device [ 'index' ] + 1 devices . append ( new_device ) self . __save_u2f_devices ( devices ) self . __call_success_enroll ( ) return { 'status' : 'ok' , 'message' : 'Successfully enrolled new U2F device!' }
Verifies and saves U2F enroll
2,436
def get_signature_challenge ( self ) : devices = [ DeviceRegistration . wrap ( device ) for device in self . __get_u2f_devices ( ) ] if devices == [ ] : return { 'status' : 'failed' , 'error' : 'No devices been associated with the account!' } challenge = start_authenticate ( devices ) challenge [ 'status' ] = 'ok' session [ '_u2f_challenge_' ] = challenge . json return challenge
Returns new signature challenge
2,437
def remove_device ( self , request ) : devices = self . __get_u2f_devices ( ) for i in range ( len ( devices ) ) : if devices [ i ] [ 'keyHandle' ] == request [ 'id' ] : del devices [ i ] self . __save_u2f_devices ( devices ) return { 'status' : 'ok' , 'message' : 'Successfully deleted your device!' } return { 'status' : 'failed' , 'error' : 'No device with such an id been found!' }
Removes device specified by id
2,438
def verify_counter ( self , signature , counter ) : devices = self . __get_u2f_devices ( ) for device in devices : if device [ 'keyHandle' ] == signature [ 'keyHandle' ] : if counter > device [ 'counter' ] : device [ 'counter' ] = counter self . __save_u2f_devices ( devices ) return True else : return False
Verifies that counter value is greater than previous signature
2,439
def validate ( data ) : try : return Schema ( Validator . SCHEMA ) . validate ( data ) except SchemaError as exception : logging . getLogger ( __name__ ) . error ( exception ) return None
Validate data against the schema .
2,440
def include ( self , node ) : result = None if isinstance ( node , ScalarNode ) : result = Loader . include_file ( self . construct_scalar ( node ) ) else : raise RuntimeError ( "Not supported !include on type %s" % type ( node ) ) return result
Include the defined yaml file .
2,441
def load ( filename ) : if os . path . isfile ( filename ) : with open ( filename ) as handle : return yaml_load ( handle , Loader = Loader ) raise RuntimeError ( "File %s doesn't exist!" % filename )
Load yaml file with specific include loader .
2,442
def pivot ( self ) : self . op_data = [ list ( i ) for i in zip ( * self . ip_data ) ]
transposes rows and columns
2,443
def key_value_pairs ( self ) : self . op_data = [ ] hdrs = self . ip_data [ 0 ] for row in self . ip_data [ 1 : ] : id_col = row [ 0 ] for col_num , col in enumerate ( row ) : self . op_data . append ( [ id_col , hdrs [ col_num ] , col ] )
convert list to key value pairs This should also create unique id s to allow for any dataset to be transposed and then later manipulated r1c1 r1c2 r1c3 r2c1 r2c2 r2c3 should be converted to ID COLNUM VAL r1c1
2,444
def links_to_data ( self , col_name_col_num , col_val_col_num , id_a_col_num , id_b_col_num ) : print ( 'Converting links to data' ) self . op_data unique_ids = [ ] unique_vals = [ ] self . op_data . append ( [ 'Name' , self . ip_data [ 1 ] [ col_name_col_num ] ] ) for r in self . ip_data [ 1 : ] : if r [ id_a_col_num ] not in unique_ids : unique_ids . append ( r [ id_a_col_num ] ) self . op_data . append ( [ r [ id_a_col_num ] , r [ col_val_col_num ] ] ) if r [ id_b_col_num ] not in unique_ids : unique_ids . append ( r [ id_b_col_num ] ) if r [ col_val_col_num ] not in unique_vals : unique_vals . append ( r [ col_val_col_num ] ) print ( 'unique_ids = ' , unique_ids ) print ( 'unique_vals= ' , unique_vals ) print ( 'op_data = ' , self . op_data ) return self . op_data
This is the reverse of data_to_links and takes a links table and generates a data table as follows Input Table Output Table Cat_Name CAT_val Person_a person_b NAME Location Location Perth John Fred John Perth Location Perth John Cindy Cindy Perth Location Perth Fred Cindy Fred Perth
2,445
def find_best_plan ( self ) : for plan in self . plans : for strat in self . strategy : self . run_plan ( plan , strat )
try each strategy with different amounts
2,446
def load_data ( fname ) : print ( 'Loading ' + fname + ' to redis' ) r = redis . StrictRedis ( host = '127.0.0.1' , port = 6379 , db = 0 ) with open ( fname , 'r' ) as f : for line_num , row in enumerate ( f ) : if row . strip ( '' ) != '' : if line_num < 100000000 : l_key , l_val = parse_n3 ( row , 'csv' ) if line_num % 1000 == 0 : print ( 'loading line #' , line_num , 'key=' , l_key , ' = ' , l_val ) if l_key != '' : r . set ( l_key , l_val )
loads previously exported CSV file to redis database
2,447
def parse_n3 ( row , src = 'csv' ) : if row . strip ( ) == '' : return '' , '' l_root = 'opencyc' key = '' val = '' if src == 'csv' : cols = row . split ( ',' ) if len ( cols ) < 3 : return '' , '' key = '' val = '' key = l_root + ':' + cols [ 1 ] . strip ( '"' ) . strip ( ) + ':' + cols [ 2 ] . strip ( '"' ) . strip ( ) try : val = cols [ 3 ] . strip ( '"' ) . strip ( ) except Exception : val = "Error parsing " + row elif src == 'n3' : pass return key , val
takes a row from an n3 file and returns the triple NOTE - currently parses a CSV line already split via cyc_extract . py
2,448
def summarise_file_as_html ( fname ) : txt = '<H1>' + fname + '</H1>' num_lines = 0 print ( 'Reading OpenCyc file - ' , fname ) with open ( ip_folder + os . sep + fname , 'r' ) as f : txt += '<PRE>' for line in f : if line . strip ( ) != '' : num_lines += 1 if num_lines < 80 : txt += str ( num_lines ) + ': ' + escape_html ( line ) + '' txt += '</PRE>' txt += 'Total lines = ' + str ( num_lines ) + '<BR><BR>' return txt
takes a large data file and produces a HTML summary as html
2,449
def main ( ) : iterations = 9 years = 3 width = 22 height = 78 time_delay = 0.03 lg = mod_log . Log ( 'test' ) lg . record_process ( 'Game of Life' , 'game_of_life_console.py' ) for _ in range ( iterations ) : s , e = run_game_of_life ( years , width , height , time_delay , 'N' ) lg . record_result ( "Started with " + str ( s ) + " cells and ended with " + str ( e ) + " cells" )
Example to show AIKIF logging of results . Generates a sequence of random grids and runs the Game of Life saving results
2,450
def run_game_of_life ( years , width , height , time_delay , silent = "N" ) : lfe = mod_grid . GameOfLife ( width , height , [ '.' , 'x' ] , 1 ) set_random_starting_grid ( lfe ) lg . record_source ( lfe , 'game_of_life_console.py' ) print ( lfe ) start_cells = lfe . count_filled_positions ( ) for ndx , dummy_idx in enumerate ( range ( years ) ) : lfe . update_gol ( ) if silent == "N" : print_there ( 1 , 1 , "Game of Life - Iteration # " + str ( ndx ) ) print_there ( 1 , 2 , lfe ) time . sleep ( time_delay ) end_cells = lfe . count_filled_positions ( ) return start_cells , end_cells
run a single game of life for years and log start and end living cells to aikif
2,451
def print_there ( x , y , text ) : sys . stdout . write ( "\x1b7\x1b[%d;%df%s\x1b8" % ( x , y , text ) ) sys . stdout . flush ( )
allows display of a game of life on a console via resetting cursor position to a set point - looks ok for testing but not production quality .
2,452
def identify_col_pos ( txt ) : res = [ ] lines = txt . split ( '\n' ) prev_ch = '' for col_pos , ch in enumerate ( lines [ 0 ] ) : if _is_white_space ( ch ) is False and _is_white_space ( prev_ch ) is True : res . append ( col_pos ) prev_ch = ch res . append ( col_pos ) return res
assume no delimiter in this file so guess the best fixed column widths to split by
2,453
def load_tbl_from_csv ( fname ) : import csv rows_to_load = [ ] with open ( fname , 'r' , encoding = 'cp1252' , errors = 'ignore' ) as csvfile : csvreader = csv . reader ( csvfile , delimiter = ',' ) reader = csv . reader ( csvfile ) rows_to_load = list ( reader ) return rows_to_load
read a CSV file to list without worrying about odd characters
2,454
def _get_dict_char_count ( txt ) : dct = { } for letter in txt : if letter in dct : dct [ letter ] += 1 else : dct [ letter ] = 1 return dct
reads the characters in txt and returns a dictionary of all letters
2,455
def creator ( entry , config ) : template_file = os . path . join ( os . path . dirname ( __file__ ) , 'templates/docker-container.sh.j2' ) with open ( template_file ) as handle : template = handle . read ( ) wrapped_script = render ( template , container = { 'image' : 'centos:7' if 'image' not in entry else entry [ 'image' ] , 'remove' : True if 'remove' not in entry else str ( entry [ 'remove' ] ) . lower ( ) , 'background' : False if 'background' not in entry else str ( entry [ 'background' ] ) . lower ( ) , 'mount' : False if 'mount' not in entry else str ( entry [ 'mount' ] ) . lower ( ) , 'network' : '' if 'network' not in entry else entry [ 'network' ] , 'labels' : { } if 'labels' not in entry else entry [ 'labels' ] , 'script' : config . script } ) config . script = wrapped_script return Container ( config )
Creator function for creating an instance of a Bash .
2,456
def creator ( entry , config ) : dockerfile = render ( config . script , model = config . model , env = config . env , variables = config . variables , item = config . item ) filename = "dockerfile.dry.run.see.comment" if not config . dry_run : temp = tempfile . NamedTemporaryFile ( prefix = "dockerfile-" , mode = 'w+t' , delete = False ) temp . writelines ( dockerfile ) temp . close ( ) filename = temp . name dockerfile = '' name = entry [ 'name' ] + "-%s" % os . getpid ( ) if entry [ 'unique' ] else entry [ 'name' ] tag = render ( entry [ 'tag' ] , model = config . model , env = config . env , item = config . item ) template_file = os . path . join ( os . path . dirname ( __file__ ) , 'templates/docker-image.sh.j2' ) with open ( template_file ) as handle : template = handle . read ( ) config . script = render ( template , name = name , tag = tag , dockerfile_content = dockerfile , dockerfile_filename = filename ) return Image ( config )
Creator function for creating an instance of a Docker image script .
2,457
def stdout_redirector ( ) : old_stdout = sys . stdout sys . stdout = Stream ( ) try : yield sys . stdout finally : sys . stdout . close ( ) sys . stdout = old_stdout
Simplify redirect of stdout .
2,458
def write_temporary_file ( content , prefix = '' , suffix = '' ) : temp = tempfile . NamedTemporaryFile ( prefix = prefix , suffix = suffix , mode = 'w+t' , delete = False ) temp . writelines ( content ) temp . close ( ) return temp . name
Generating a temporary file with content .
2,459
def print_new ( ctx , name , migration_type ) : click . echo ( ctx . obj . repository . generate_migration_name ( name , migration_type ) )
Prints filename of a new migration
2,460
def start ( self ) : self . running = True self . status = 'RUNNING' self . mylog . record_process ( 'agent' , self . name + ' - starting' )
Starts an agent with standard logging
2,461
def set_coords ( self , x = 0 , y = 0 , z = 0 , t = 0 ) : self . coords = { } self . coords [ 'x' ] = x self . coords [ 'y' ] = y self . coords [ 'z' ] = z self . coords [ 't' ] = t
set coords of agent in an arbitrary world
2,462
def from_file ( file_path , incl_pot = True ) : filename , ext = os . path . splitext ( file_path ) am_file_path = filename + '.AM' pot_file_path = filename + '.PT' parser_by_ext = { '.cd3' : parsers . Cd3Parser , '.xml' : parsers . XmlCatchmentParser } catchment = parser_by_ext [ ext . lower ( ) ] ( ) . parse ( file_path ) try : catchment . amax_records = parsers . AmaxParser ( ) . parse ( am_file_path ) except FileNotFoundError : catchment . amax_records = [ ] if incl_pot : try : catchment . pot_dataset = parsers . PotParser ( ) . parse ( pot_file_path ) except FileNotFoundError : pass return catchment
Load catchment object from a . CD3 or . xml file .
2,463
def to_db ( catchment , session , method = 'create' , autocommit = False ) : if not catchment . id : raise ValueError ( "Catchment/station number (`catchment.id`) must be set." ) if method == 'create' : session . add ( catchment ) elif method == 'update' : session . merge ( catchment ) else : raise ValueError ( "Method `{}` invalid. Use either `create` or `update`." ) if autocommit : session . commit ( )
Load catchment object into the database .
2,464
def userdata_to_db ( session , method = 'update' , autocommit = False ) : try : folder = config [ 'import' ] [ 'folder' ] except KeyError : return if folder : folder_to_db ( folder , session , method = method , autocommit = autocommit )
Add catchments from a user folder to the database .
2,465
def send_text ( hwnd , txt ) : try : for c in txt : if c == '\n' : win32api . SendMessage ( hwnd , win32con . WM_KEYDOWN , win32con . VK_RETURN , 0 ) win32api . SendMessage ( hwnd , win32con . WM_KEYUP , win32con . VK_RETURN , 0 ) else : win32api . SendMessage ( hwnd , win32con . WM_CHAR , ord ( c ) , 0 ) except Exception as ex : print ( 'error calling SendMessage ' + str ( ex ) )
sends the text txt to the window handle hwnd using SendMessage
2,466
def launch_app ( app_path , params = [ ] , time_before_kill_app = 15 ) : import subprocess try : res = subprocess . call ( [ app_path , params ] , timeout = time_before_kill_app , shell = True ) print ( 'res = ' , res ) if res == 0 : return True else : return False except Exception as ex : print ( 'error launching app ' + str ( app_path ) + ' with params ' + str ( params ) + '\n' + str ( ex ) ) return False
start an app
2,467
def app_activate ( caption ) : try : shell = win32com . client . Dispatch ( "WScript.Shell" ) shell . AppActivate ( caption ) except Exception as ex : print ( 'error calling win32com.client.Dispatch (AppActivate)' )
use shell to bring the application with caption to front
2,468
def most_similar_catchments ( self , subject_catchment , similarity_dist_function , records_limit = 500 , include_subject_catchment = 'auto' ) : if include_subject_catchment not in [ 'auto' , 'force' , 'exclude' ] : raise ValueError ( "Parameter `include_subject_catchment={}` invalid." . format ( include_subject_catchment ) + "Must be one of `auto`, `force` or `exclude`." ) query = ( self . db_session . query ( Catchment ) . join ( Catchment . descriptors ) . join ( Catchment . amax_records ) . filter ( Catchment . id != subject_catchment . id , Catchment . is_suitable_for_pooling , or_ ( Descriptors . urbext2000 < 0.03 , Descriptors . urbext2000 == None ) , AmaxRecord . flag == 0 ) . group_by ( Catchment ) . having ( func . count ( AmaxRecord . catchment_id ) >= 10 ) ) catchments = query . all ( ) if include_subject_catchment == 'force' : if len ( subject_catchment . amax_records ) >= 10 : catchments . append ( subject_catchment ) elif include_subject_catchment == 'auto' : if len ( subject_catchment . amax_records ) >= 10 and subject_catchment . is_suitable_for_pooling and ( subject_catchment . descriptors . urbext2000 < 0.03 or subject_catchment . descriptors . urbext2000 is None ) : catchments . append ( subject_catchment ) for catchment in catchments : catchment . similarity_dist = similarity_dist_function ( subject_catchment , catchment ) catchments . sort ( key = attrgetter ( 'similarity_dist' ) ) amax_records_count = 0 catchments_limited = [ ] for catchment in catchments : catchments_limited . append ( catchment ) amax_records_count += catchment . record_length if amax_records_count >= records_limit : break return catchments_limited
Return a list of catchments sorted by hydrological similarity defined by similarity_distance_function
2,469
def readSAM ( SAMfile , header = False ) : if header == True : f = open ( SAMfile , "r+" ) head = [ ] for line in f . readlines ( ) : if line [ 0 ] == "@" : head . append ( line ) else : continue f . close ( ) sam = pd . read_table ( SAMfile , sep = "this_gives_one_column" , comment = "@" , header = None ) sam = pd . DataFrame ( sam [ 0 ] . str . split ( "\t" ) . tolist ( ) ) acols = [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ] sam_ = sam [ acols ] samcols = sam . columns . tolist ( ) bcols = [ s for s in samcols if s not in acols ] sam_ [ 10 ] = sam [ bcols [ 0 ] ] if len ( bcols ) > 1 : for c in bcols [ 1 : ] : sam_ [ 10 ] = sam_ [ 10 ] . astype ( str ) sam [ c ] = sam [ c ] . astype ( str ) sam_ [ 10 ] = sam_ [ 10 ] + "\t" + sam [ c ] sam_ . columns = [ 'QNAME' , 'FLAG' , 'RNAME' , 'POS' , 'MAPQ' , 'CIGAR' , 'RNEXT' , 'PNEXT' , 'TLEN' , 'SEQ' , 'QUAL' ] if header == True : return sam_ , head else : return sam_
Reads and parses a sam file .
2,470
def SAMflags ( x ) : flags = [ ] if x & 1 : l = "1: Read paired" else : l = "0: Read unpaired" flags . append ( l ) if x & 2 : l = "1: Read mapped in proper pair" else : l = "0: Read not mapped in proper pair" flags . append ( l ) if x & 4 : l = "1: Read unmapped" else : l = "0: Read mapped" flags . append ( l ) if x & 8 : l = "1: Mate unmapped" else : l = "0: Mate mapped" flags . append ( l ) if x & 16 : l = "1: Read reverse strand" else : l = "0: Read direct strand" flags . append ( l ) if x & 32 : l = "1: Mate reverse strand" else : l = "0: Mate direct strand" flags . append ( l ) if x & 64 : l = "1: First in pair" else : l = "0: Second in pair" flags . append ( l ) if x & 128 : l = "1: Second in pair" else : l = "0: First in pair" flags . append ( l ) if x & 256 : l = "1: Not primary alignment" else : l = "0: Primary alignment" flags . append ( l ) if x & 512 : l = "1: Read fails platform/vendor quality checks" else : l = "0: Read passes platform/vendor quality checks" flags . append ( l ) if x & 1024 : l = "1: Read is PCR or optical duplicate" else : l = "0: Read is not PCR or optical duplicate" flags . append ( l ) if x & 2048 : l = "1: Supplementary alignment" else : l = "0: Not supplementary alignment" flags . append ( l ) return flags
Explains a SAM flag .
2,471
def get_bias_details ( self ) : res = 'Bias File Details\n' for b in self . bias_details : if len ( b ) > 2 : res += b [ 0 ] . ljust ( 35 ) res += b [ 1 ] . ljust ( 35 ) res += b [ 2 ] . ljust ( 9 ) res += '\n' return res
returns a string representation of the bias details
2,472
def _read_bias_rating ( self , short_filename ) : res = { } full_name = os . path . join ( root_fldr , 'aikif' , 'data' , 'ref' , short_filename ) lg . record_process ( 'bias.py' , 'reading ' + full_name ) with open ( full_name , 'r' ) as f : for line in f : if line . strip ( '' ) == '' : break bias_line = [ ] cols = line . split ( ',' ) bias_line . extend ( [ short_filename ] ) for col in cols : bias_line . extend ( [ col . strip ( '"' ) . strip ( '\n' ) ] ) self . bias_details . append ( bias_line )
read the bias file based on the short_filename and return as a dictionary
2,473
def get_root_folder ( ) : locations = { 'linux' : { 'hme' : '/home/duncan/' , 'core_folder' : '/home/duncan/dev/src/python/AIKIF' } , 'win32' : { 'hme' : 'T:\\user\\' , 'core_folder' : 'T:\\user\\dev\\src\\python\\AIKIF' } , 'cygwin' : { 'hme' : os . getcwd ( ) + os . sep , 'core_folder' : os . getcwd ( ) } , 'darwin' : { 'hme' : os . getcwd ( ) + os . sep , 'core_folder' : os . getcwd ( ) } } hme = locations [ sys . platform ] [ 'hme' ] core_folder = locations [ sys . platform ] [ 'core_folder' ] if not os . path . exists ( core_folder ) : hme = os . getcwd ( ) core_folder = os . getcwd ( ) print ( 'config.py : running on CI build (or you need to modify the paths in config.py)' ) return hme , core_folder
returns the home folder and program root depending on OS
2,474
def read_credentials ( fname ) : with open ( fname , 'r' ) as f : username = f . readline ( ) . strip ( '\n' ) password = f . readline ( ) . strip ( '\n' ) return username , password
read a simple text file from a private location to get username and password
2,475
def show_config ( ) : res = '' res += '\n---------- Folder Locations ---------\n' for k , v in fldrs . items ( ) : res += str ( k ) + ' = ' + str ( v ) + '\n' res += '\n---------- Logfiles ---------\n' for k , v in logs . items ( ) : res += str ( k ) + ' = ' + str ( v ) + '\n' res += '\n---------- Parameters ---------\n' for k , v in params . items ( ) : res += str ( k ) + ' = ' + str ( v ) + '\n' print ( "\nusage from other programs - returns " + fldr_root ( ) ) return res
module intended to be imported in most AIKIF utils to manage folder paths user settings etc . Modify the parameters at the top of this file to suit
2,476
def filterMotifs ( memeFile , outFile , minSites ) : with open ( memeFile , "r" ) as mF : oldMEME = mF . readlines ( ) newMEME = oldMEME [ : 7 ] i = 7 while i < len ( oldMEME ) : if oldMEME [ i ] . split ( " " ) [ 0 ] == "MOTIF" : print ( oldMEME [ i ] . split ( "\n" ) [ 0 ] , int ( oldMEME [ i + 2 ] . split ( "nsites= " ) [ 1 ] . split ( " " ) [ 0 ] ) ) sys . stdout . flush ( ) if int ( oldMEME [ i + 2 ] . split ( "nsites= " ) [ 1 ] . split ( " " ) [ 0 ] ) > minSites : newMEME . append ( oldMEME [ i ] ) f = i + 1 while oldMEME [ f ] . split ( " " ) [ 0 ] != "MOTIF" : newMEME . append ( oldMEME [ f ] ) f = f + 1 i = i + 1 else : i = i + 1 else : i = i + 1 with open ( outFile , "w+" ) as out : out . write ( "" . join ( newMEME ) ) return newMEME
Selectes motifs from a meme file based on the number of sites .
2,477
def _read_file ( self ) : self . raw = [ ] with open ( self . fname , 'r' ) as f : for line in f : if line . startswith ( '#' ) : pass elif line . strip ( '\n' ) == '' : pass else : self . raw . append ( line . strip ( '\n' ) )
reads the file and cleans into standard text ready for parsing
2,478
def reset ( self ) : try : os . remove ( self . _user_config_file ) except FileNotFoundError : pass for section_name in self . sections ( ) : self . remove_section ( section_name ) self . read_defaults ( )
Restore the default configuration and remove the user s config file .
2,479
def save ( self ) : with open ( self . _user_config_file , 'w' , encoding = 'utf-8' ) as f : self . write ( f )
Write data to user config file .
2,480
def _magic_data ( filename = os . path . join ( here , 'magic_data.json' ) ) : with open ( filename ) as f : data = json . load ( f ) headers = [ _create_puremagic ( x ) for x in data [ 'headers' ] ] footers = [ _create_puremagic ( x ) for x in data [ 'footers' ] ] return headers , footers
Read the magic file
2,481
def _max_lengths ( ) : max_header_length = max ( [ len ( x . byte_match ) + x . offset for x in magic_header_array ] ) max_footer_length = max ( [ len ( x . byte_match ) + abs ( x . offset ) for x in magic_footer_array ] ) return max_header_length , max_footer_length
The length of the largest magic string + its offset
2,482
def _confidence ( matches , ext = None ) : results = [ ] for match in matches : con = ( 0.8 if len ( match . extension ) > 9 else float ( "0.{0}" . format ( len ( match . extension ) ) ) ) if ext == match . extension : con = 0.9 results . append ( PureMagicWithConfidence ( confidence = con , ** match . _asdict ( ) ) ) return sorted ( results , key = lambda x : x . confidence , reverse = True )
Rough confidence based on string length and file extension
2,483
def _identify_all ( header , footer , ext = None ) : matches = list ( ) for magic_row in magic_header_array : start = magic_row . offset end = magic_row . offset + len ( magic_row . byte_match ) if end > len ( header ) : continue if header [ start : end ] == magic_row . byte_match : matches . append ( magic_row ) for magic_row in magic_footer_array : start = magic_row . offset if footer [ start : ] == magic_row . byte_match : matches . append ( magic_row ) if not matches : raise PureError ( "Could not identify file" ) return _confidence ( matches , ext )
Attempt to identify data by its magic numbers
2,484
def _magic ( header , footer , mime , ext = None ) : if not header : raise ValueError ( "Input was empty" ) info = _identify_all ( header , footer , ext ) [ 0 ] if mime : return info . mime_type return info . extension if not isinstance ( info . extension , list ) else info [ 0 ] . extension
Discover what type of file it is based on the incoming string
2,485
def _file_details ( filename ) : max_head , max_foot = _max_lengths ( ) with open ( filename , "rb" ) as fin : head = fin . read ( max_head ) try : fin . seek ( - max_foot , os . SEEK_END ) except IOError : fin . seek ( 0 ) foot = fin . read ( ) return head , foot
Grab the start and end of the file
2,486
def ext_from_filename ( filename ) : try : base , ext = filename . lower ( ) . rsplit ( "." , 1 ) except ValueError : return '' ext = ".{0}" . format ( ext ) all_exts = [ x . extension for x in chain ( magic_header_array , magic_footer_array ) ] if base [ - 4 : ] . startswith ( "." ) : long_ext = base [ - 4 : ] + ext if long_ext in all_exts : return long_ext return ext
Scan a filename for it s extension .
2,487
def from_file ( filename , mime = False ) : head , foot = _file_details ( filename ) return _magic ( head , foot , mime , ext_from_filename ( filename ) )
Opens file attempts to identify content based off magic number and will return the file extension . If mime is True it will return the mime type instead .
2,488
def from_string ( string , mime = False , filename = None ) : head , foot = _string_details ( string ) ext = ext_from_filename ( filename ) if filename else None return _magic ( head , foot , mime , ext )
Reads in string attempts to identify content based off magic number and will return the file extension . If mime is True it will return the mime type instead . If filename is provided it will be used in the computation .
2,489
def retrieve_GTF_field ( field , gtf ) : inGTF = gtf . copy ( ) def splits ( x ) : l = x . split ( ";" ) l = [ s . split ( " " ) for s in l ] res = np . nan for s in l : if field in s : if '"' in s [ - 1 ] : res = s [ - 1 ] [ 1 : - 1 ] else : res = s [ - 1 ] return res inGTF [ field ] = inGTF [ 'attribute' ] . apply ( lambda x : splits ( x ) ) return inGTF [ [ field ] ]
Returns a field of choice from the attribute column of the GTF
2,490
def attributesGTF ( inGTF ) : df = pd . DataFrame ( inGTF [ 'attribute' ] . str . split ( ";" ) . tolist ( ) ) desc = [ ] for i in df . columns . tolist ( ) : val = df [ [ i ] ] . dropna ( ) val = pd . DataFrame ( val [ i ] . str . split ( ' "' ) . tolist ( ) ) [ 0 ] val = list ( set ( val ) ) for v in val : if len ( v ) > 0 : l = v . split ( " " ) if len ( l ) > 1 : l = l [ 1 ] else : l = l [ 0 ] desc . append ( l ) desc = list ( set ( desc ) ) finaldesc = [ ] for d in desc : if len ( d ) > 0 : finaldesc . append ( d ) return finaldesc
List the type of attributes in a the attribute section of a GTF file
2,491
def parseGTF ( inGTF ) : desc = attributesGTF ( inGTF ) ref = inGTF . copy ( ) ref . reset_index ( inplace = True , drop = True ) df = ref . drop ( [ 'attribute' ] , axis = 1 ) . copy ( ) for d in desc : field = retrieve_GTF_field ( d , ref ) df = pd . concat ( [ df , field ] , axis = 1 ) return df
Reads an extracts all attributes in the attributes section of a GTF and constructs a new dataframe wiht one collumn per attribute instead of the attributes column
2,492
def writeGTF ( inGTF , file_path ) : cols = inGTF . columns . tolist ( ) if len ( cols ) == 9 : if 'attribute' in cols : df = inGTF else : df = inGTF [ cols [ : 8 ] ] df [ 'attribute' ] = "" for c in cols [ 8 : ] : if c == cols [ len ( cols ) - 1 ] : df [ 'attribute' ] = df [ 'attribute' ] + c + ' "' + inGTF [ c ] . astype ( str ) + '";' else : df [ 'attribute' ] = df [ 'attribute' ] + c + ' "' + inGTF [ c ] . astype ( str ) + '"; ' df . to_csv ( file_path , sep = "\t" , header = None , index = None , quoting = csv . QUOTE_NONE )
Write a GTF dataframe into a file
2,493
def GTFtoBED ( inGTF , name ) : bed = inGTF . copy ( ) bed . reset_index ( inplace = True , drop = True ) if name not in bed . columns . tolist ( ) : field = retrieve_GTF_field ( name , bed ) bed = pd . concat ( [ bed , field ] , axis = 1 ) bed = bed [ [ 'seqname' , 'start' , 'end' , name , 'score' , 'strand' ] ] bed . columns = [ 'chrom' , 'chromStart' , 'chromEnd' , 'name' , 'score' , 'strand' ] bed . drop_duplicates ( inplace = True ) bed . reset_index ( inplace = True , drop = True ) return bed
Transform a GTF dataframe into a bed dataframe
2,494
def MAPGenoToTrans ( parsedGTF , feature ) : GenTransMap = parsedGTF [ parsedGTF [ "feature" ] == feature ] def getExonsPositions ( df ) : start = int ( df [ "start" ] ) stop = int ( df [ "end" ] ) strand = df [ "strand" ] r = range ( start , stop + 1 ) if strand == "-" : r . sort ( reverse = True ) r = [ str ( s ) for s in r ] return "," . join ( r ) GenTransMap [ "feature_bases" ] = GenTransMap . apply ( getExonsPositions , axis = 1 ) GenTransMap = GenTransMap . sort_values ( by = [ "transcript_id" , "exon_number" ] , ascending = True ) def CombineExons ( df ) : return pd . Series ( dict ( feature_bases = ',' . join ( df [ 'feature_bases' ] ) ) ) GenTransMap = GenTransMap . groupby ( "transcript_id" ) . apply ( CombineExons ) GenTransMap = GenTransMap . to_dict ( ) . get ( "feature_bases" ) return GenTransMap
Gets all positions of all bases in an exon
2,495
def GetTransPosition ( df , field , dic , refCol = "transcript_id" ) : try : gen = str ( int ( df [ field ] ) ) transid = df [ refCol ] bases = dic . get ( transid ) . split ( "," ) bases = bases . index ( str ( gen ) ) + 1 except : bases = np . nan return bases
Maps a genome position to transcript positon
2,496
def get_protected_page ( url , user , pwd , filename ) : import requests r = requests . get ( url , auth = ( user , pwd ) ) print ( r . status_code ) if r . status_code == 200 : print ( 'success' ) with open ( filename , 'wb' ) as fd : for chunk in r . iter_content ( 4096 ) : fd . write ( chunk ) lg . record_result ( "Success - downloaded " + url ) else : lg . record_result ( 'network_tools.get_protected_page:Failed to downloaded ' + url + ' (status code = ' + str ( r . status_code ) + ')' )
having problems with urllib on a specific site so trying requests
2,497
def read_rawFilesTable ( filename ) : exp = pd . read_table ( filename ) expected_columns = { 'File' , 'Exists' , 'Size' , 'Data format' , 'Parameter group' , 'Experiment' , 'Fraction' } found_columns = set ( exp . columns ) if len ( expected_columns - found_columns ) > 0 : message = '\n' . join ( [ 'The raw files table has the wrong format!' , 'It should contain columns:' , ', ' . join ( sorted ( expected_columns ) ) , 'Found columns:' , ', ' . join ( sorted ( found_columns ) ) ] ) raise ValueError ( message ) exp [ 'Raw file' ] = exp [ 'File' ] . apply ( path . basename ) . apply ( path . splitext ) . str . get ( 0 ) exp [ 'Experiment' ] = exp [ 'Experiment' ] . astype ( str ) return exp
parse the rawFilesTable . txt file into a pandas dataframe
2,498
def add_method ( self , m , ** kwargs ) : if isinstance ( m , types . FunctionType ) : self [ 'function' , id ( m ) ] = m else : f , obj = get_method_vars ( m ) wrkey = ( f , id ( obj ) ) self [ wrkey ] = obj
Add an instance method or function
2,499
def del_method ( self , m ) : if isinstance ( m , types . FunctionType ) and not iscoroutinefunction ( m ) : wrkey = ( 'function' , id ( m ) ) else : f , obj = get_method_vars ( m ) wrkey = ( f , id ( obj ) ) if wrkey in self : del self [ wrkey ]
Remove an instance method or function if it exists