idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
51,100
def contrast_rms ( data , * kwargs ) : av = np . average ( data , * kwargs ) mal = 1 / ( data . shape [ 0 ] * data . shape [ 1 ] ) return np . sqrt ( mal * np . sum ( np . square ( data - av ) ) )
Compute RMS contrast norm of an image
51,101
def spectral ( data , lambd , * kwargs ) : fftdata = np . fft . fftn ( data ) fftdata [ 0 , 0 ] = 0 kx = 2 * np . pi * np . fft . fftfreq ( data . shape [ 0 ] ) . reshape ( 1 , - 1 ) ky = 2 * np . pi * np . fft . fftfreq ( data . shape [ 1 ] ) . reshape ( - 1 , 1 ) kmax = ( 2 * np . pi ) / ( 2 * lambd ) fftdata [ np . where ( kx ** 2 + ky ** 2 > kmax ** 2 ) ] = 0 spec = np . sum ( np . log ( 1 + np . abs ( fftdata ) ) ) / np . sqrt ( np . prod ( data . shape ) ) return spec
Compute spectral contrast of image
51,102
def write ( s , path , encoding = "utf-8" ) : is_gzip = is_gzip_file ( path ) with open ( path , "wb" ) as f : if is_gzip : f . write ( zlib . compress ( s . encode ( encoding ) ) ) else : f . write ( s . encode ( encoding ) )
Write string to text file .
51,103
def read ( path , encoding = "utf-8" ) : is_gzip = is_gzip_file ( path ) with open ( path , "rb" ) as f : if is_gzip : return zlib . decompress ( f . read ( ) ) . decode ( encoding ) else : return f . read ( ) . decode ( encoding )
Read string from text file .
51,104
def smartread ( path ) : with open ( path , "rb" ) as f : content = f . read ( ) result = chardet . detect ( content ) return content . decode ( result [ "encoding" ] )
Read text from file automatically detect encoding . chardet required .
51,105
def to_utf8 ( path , output_path = None ) : if output_path is None : basename , ext = os . path . splitext ( path ) output_path = basename + "-UTF8Encode" + ext text = smartread ( path ) write ( text , output_path )
Convert any text file to utf8 encoding .
51,106
def load_cache ( self ) : with open ( self . cache_path , "rb" ) as f : print ( "Loading cached Zotero data..." ) cache = pickle . load ( f ) self . _references = cache [ self . CACHE_REFERENCE_LIST ] self . reference_types = cache [ self . CACHE_REFERENCE_TYPES ] self . reference_templates = cache [ self . CACHE_REFERENCE_TEMPLATES ] print ( "Cached Zotero data loaded." )
Load the cached Zotero data .
51,107
def load_distant ( self ) : print ( "Loading distant Zotero data..." ) self . _references = self . get_references ( ) self . reference_types = self . get_reference_types ( ) self . reference_templates = self . get_reference_templates ( self . reference_types ) print ( "Distant Zotero data loaded." ) self . cache ( )
Load the distant Zotero data .
51,108
def cache ( self ) : with open ( self . cache_path , "wb" ) as f : cache = { self . CACHE_REFERENCE_LIST : self . _references , self . CACHE_REFERENCE_TYPES : self . reference_types , self . CACHE_REFERENCE_TEMPLATES : self . reference_templates } pickle . dump ( cache , f )
Cache the Zotero data .
51,109
def create_distant_reference ( self , ref_data ) : self . validate_reference_data ( ref_data ) creation_status = self . _zotero_lib . create_items ( [ ref_data ] ) try : created_item = creation_status [ "successful" ] [ "0" ] return created_item except KeyError as e : print ( creation_status ) raise CreateZoteroItemError from e
Validate and create the reference in Zotero and return the created item .
51,110
def update_local_reference ( self , index , ref ) : self . _references [ index ] = ref self . cache ( )
Replace the reference in the reference list and cache it .
51,111
def update_distant_reference ( self , ref ) : self . validate_reference_data ( ref [ "data" ] ) self . _zotero_lib . update_item ( ref )
Validate and update the reference in Zotero .
51,112
def validate_reference_data ( self , ref_data ) : try : self . _zotero_lib . check_items ( [ ref_data ] ) except InvalidItemFields as e : raise InvalidZoteroItemError from e
Validate the reference data .
51,113
def get_reference_types ( self ) : item_types = self . _zotero_lib . item_types ( ) return sorted ( [ x [ "itemType" ] for x in item_types ] )
Return the reference types .
51,114
def get_reference_templates ( self , ref_types ) : return OrderedDict ( [ ( x , self . get_reference_template ( x ) ) for x in ref_types ] )
Return the reference templates for the types as an ordered dictionary .
51,115
def get_reference_template ( self , ref_type ) : template = self . _zotero_lib . item_template ( ref_type ) return OrderedDict ( sorted ( template . items ( ) , key = lambda x : x [ 0 ] ) )
Return the reference template for the type as an ordered dictionary .
51,116
def reference_extra_field ( self , field , index ) : ref_data = self . reference_data ( index ) extra_fields = ref_data [ "extra" ] . split ( "\n" ) field_id = field + ":" matched = next ( ( x for x in extra_fields if x . startswith ( field_id ) ) , None ) if matched : return matched . replace ( field_id , "" , 1 ) . strip ( ) else : return ""
Return the value of the field in extra otherwise .
51,117
def reference_doi ( self , index ) : return self . reference_data ( index ) . get ( "DOI" , self . reference_extra_field ( "DOI" , index ) )
Return the reference DOI .
51,118
def reference_year ( self , index ) : ref_date = self . reference_date ( index ) try : return parse ( ref_date ) . year except ValueError : matched = re . search ( r"\d{4}" , ref_date ) if matched : return int ( matched . group ( ) ) else : return ""
Return the reference publication year .
51,119
def reference_journal ( self , index ) : ref_type = self . reference_type ( index ) if ref_type == "journalArticle" : return self . reference_data ( index ) [ "publicationTitle" ] else : return "({})" . format ( ref_type )
Return the reference journal name .
51,120
def reference_index ( self , ref_id ) : try : indexes = range ( self . reference_count ( ) ) return next ( i for i in indexes if self . reference_id ( i ) == ref_id ) except StopIteration as e : raise ReferenceNotFoundError ( "ID: " + ref_id ) from e
Return the first reference with this ID .
51,121
def computePDFSimilarity ( paperId , userPDF ) : if not isPDFInDb ( paperId ) : return None userPDF . save ( "temp.pdf" ) check_call ( [ 'pdftotext' , '-enc' , 'UTF-8' , "temp.pdf" , "temp.txt" ] ) os . remove ( "temp.pdf" ) a = open ( "temp.txt" , 'r' ) . read ( ) b = open ( join ( dbPath , paperId ) + ".txt" , 'r' ) . read ( ) import nltk , string from sklearn . feature_extraction . text import TfidfVectorizer stemmer = nltk . stem . porter . PorterStemmer ( ) remove_punctuation_map = dict ( ( ord ( char ) , None ) for char in string . punctuation ) def stem_tokens ( tokens ) : return [ stemmer . stem ( item ) for item in tokens ] def normalize ( text ) : return stem_tokens ( nltk . word_tokenize ( text . lower ( ) . translate ( remove_punctuation_map ) ) ) vectorizer = TfidfVectorizer ( tokenizer = normalize , stop_words = 'english' ) def cosine_sim ( text1 , text2 ) : tfidf = vectorizer . fit_transform ( [ text1 , text2 ] ) return ( ( tfidf * tfidf . T ) . A ) [ 0 , 1 ] similarity = cosine_sim ( a , b ) os . remove ( "temp.txt" ) return similarity
remove punctuation lowercase stem
51,122
def install_plugins ( plugins , app , install_type , config ) : try : disable_plugins = config . disable_plugins if not disable_plugins : disable_plugins = [ ] except AttributeError : disable_plugins = [ ] for plugin_name in plugins : plugin_group = f'backendai_{plugin_name}_v10' registry = PluginRegistry ( plugin_name ) for entrypoint in pkg_resources . iter_entry_points ( plugin_group ) : if entrypoint . name in disable_plugins : continue log . info ( 'Installing plugin: {}.{}' , plugin_group , entrypoint . name ) plugin_module = entrypoint . load ( ) plugin = getattr ( plugin_module , 'get_plugin' ) ( config ) registry . register ( plugin ) if install_type == 'attr' : setattr ( app , plugin_name , registry ) elif install_type == 'dict' : assert isinstance ( app , typing . MutableMapping ) , ( f"app must be an instance of MutableMapping " f"for 'dict' install_type." ) app [ plugin_name ] = registry else : raise ValueError ( f'Invalid install type: {install_type}' )
Automatically install plugins to the app .
51,123
def load_cell ( fname = "HL60_field.zip" ) : "Load zip file and return complex field" here = op . dirname ( op . abspath ( __file__ ) ) data = op . join ( here , "data" ) arc = zipfile . ZipFile ( op . join ( data , fname ) ) for f in arc . filelist : with arc . open ( f ) as fd : if f . filename . count ( "imag" ) : imag = np . loadtxt ( fd ) elif f . filename . count ( "real" ) : real = np . loadtxt ( fd ) field = real + 1j * imag return field
Load zip file and return complex field
51,124
def bootstrap ( nside , rand , nbar , * data ) : def split ( data , indices , axis ) : s = [ ] s . append ( slice ( 0 , indices [ 0 ] ) ) for i in range ( len ( indices ) - 1 ) : s . append ( slice ( indices [ i ] , indices [ i + 1 ] ) ) s . append ( slice ( indices [ - 1 ] , None ) ) rt = [ ] for ss in s : ind = [ slice ( None , None , None ) for i in range ( len ( data . shape ) ) ] ind [ axis ] = ss ind = tuple ( ind ) rt . append ( data [ ind ] ) return rt def hpsplit ( nside , data ) : RA , DEC = data pix = radec2pix ( nside , RA , DEC ) n = numpy . bincount ( pix ) a = numpy . argsort ( pix ) data = numpy . array ( data ) [ : , a ] rt = split ( data , n . cumsum ( ) , axis = - 1 ) return rt Abar = 41252.96 / nside2npix ( nside ) rand = hpsplit ( nside , rand ) if len ( data ) > 0 : data = [ list ( i ) for i in zip ( * [ hpsplit ( nside , d1 ) for d1 in data ] ) ] else : data = [ [ ] for i in range ( len ( rand ) ) ] heap = [ ] j = 0 for r , d in zip ( rand , data ) : if len ( r [ 0 ] ) == 0 : continue a = 1.0 * len ( r [ 0 ] ) / nbar j = j + 1 if len ( heap ) == 0 : heapq . heappush ( heap , ( a , j , r , d ) ) else : a0 , j0 , r0 , d0 = heapq . heappop ( heap ) if a0 + a < Abar : a0 += a d0 = [ numpy . concatenate ( ( d0 [ i ] , d [ i ] ) , axis = - 1 ) for i in range ( len ( d ) ) ] r0 = numpy . concatenate ( ( r0 , r ) , axis = - 1 ) else : heapq . heappush ( heap , ( a , j , r , d ) ) heapq . heappush ( heap , ( a0 , j0 , r0 , d0 ) ) for i in range ( len ( heap ) ) : area , j , r , d = heapq . heappop ( heap ) rt = [ area , r ] + d yield rt
This function will bootstrap data based on the sky coverage of rand . It is different from bootstrap in the traditional sense but for correlation functions it gives the correct answer with less computation .
51,125
def load_project_flag_list_file ( self , project_exceptions , project ) : if self . loaded : return exception_file = None for item in project_exceptions : if project in item : exception_file = item . get ( project ) if exception_file is not None : try : with open ( exception_file , 'r' ) as f : ex = yaml . safe_load ( f ) except IOError : logger . error ( 'File not found: %s' , exception_file ) sys . exit ( 0 ) for key in ex : if key in fl : fl [ key ] [ project ] = _merge ( fl [ key ] [ project ] , ex . get ( key , None ) ) if project in fl [ key ] else ex . get ( key , None ) self . loaded = True else : logger . info ( '%s not found in %s' , project , ignore_list ) logger . info ( 'No project specific exceptions will be applied' )
Loads project specific lists
51,126
def binary_hash ( self , project , patch_file ) : global il exception_file = None try : project_exceptions = il . get ( 'project_exceptions' ) except KeyError : logger . info ( 'project_exceptions missing in %s for %s' , ignore_list , project ) for project_files in project_exceptions : if project in project_files : exception_file = project_files . get ( project ) with open ( exception_file , 'r' ) as f : bl = yaml . safe_load ( f ) for key , value in bl . items ( ) : if key == 'binaries' : if patch_file in value : hashvalue = value [ patch_file ] return hashvalue else : for key , value in il . items ( ) : if key == 'binaries' : if patch_file in value : hashvalue = value [ patch_file ] return hashvalue else : hashvalue = "" return hashvalue else : logger . info ( '%s not found in %s' , project , ignore_list ) logger . info ( 'No project specific exceptions will be applied' ) hashvalue = "" return hashvalue
Gathers sha256 hashes from binary lists
51,127
def file_audit_list ( self , project ) : project_list = False self . load_project_flag_list_file ( il . get ( 'project_exceptions' ) , project ) try : default_list = set ( ( fl [ 'file_audits' ] [ 'file_names' ] ) ) except KeyError : logger . error ( 'Key Error processing file_names list values' ) try : project_list = set ( ( fl [ 'file_audits' ] [ project ] [ 'file_names' ] ) ) logger . info ( 'Loaded %s specific file_audits entries' , project ) except KeyError : logger . info ( 'No project specific file_names section for project %s' , project ) file_names_re = re . compile ( "|" . join ( default_list ) , flags = re . IGNORECASE ) if project_list : file_names_proj_re = re . compile ( "|" . join ( project_list ) , flags = re . IGNORECASE ) return file_names_re , file_names_proj_re else : file_names_proj_re = re . compile ( "" ) return file_names_re , file_names_proj_re
Gathers file name lists
51,128
def file_content_list ( self , project ) : project_list = False self . load_project_flag_list_file ( il . get ( 'project_exceptions' ) , project ) try : flag_list = ( fl [ 'file_audits' ] [ 'file_contents' ] ) except KeyError : logger . error ( 'Key Error processing file_contents list values' ) try : ignore_list = il [ 'file_audits' ] [ 'file_contents' ] except KeyError : logger . error ( 'Key Error processing file_contents list values' ) try : project_list = fl [ 'file_audits' ] [ project ] [ 'file_contents' ] logger . info ( 'Loaded %s specific file_contents entries' , project ) except KeyError : logger . info ( 'No project specific file_contents section for project %s' , project ) if project_list : ignore_list_merge = project_list + ignore_list ignore_list_re = re . compile ( "|" . join ( ignore_list_merge ) , flags = re . IGNORECASE ) return flag_list , ignore_list_re else : ignore_list_re = re . compile ( "|" . join ( ignore_list ) , flags = re . IGNORECASE ) return flag_list , ignore_list_re
gathers content strings
51,129
def ignore_directories ( self , project ) : project_list = False try : ignore_directories = il [ 'ignore_directories' ] except KeyError : logger . error ( 'Key Error processing ignore_directories list values' ) try : project_exceptions = il . get ( 'project_exceptions' ) for item in project_exceptions : if project in item : exception_file = item . get ( project ) with open ( exception_file , 'r' ) as f : test_list = yaml . safe_load ( f ) project_list = test_list [ 'ignore_directories' ] except KeyError : logger . info ( 'No ignore_directories for %s' , project ) if project_list : ignore_directories = ignore_directories + project_list return ignore_directories else : return ignore_directories
Gathers a list of directories to ignore
51,130
def download_from_github ( fname , path ) : base_url = 'https://github.com/ornlneutronimaging/ImagingReso/blob/master/ImagingReso/reference_data/' f = fname + '?raw=true' url = base_url + f block_size = 16384 req = urlopen ( url ) if sys . version_info [ 0 ] < 3 : file_size = int ( req . info ( ) . getheaders ( 'Content-Length' ) [ 0 ] ) else : file_size = req . length if os . path . exists ( fname ) : if os . path . getsize ( fname ) == file_size : print ( "Skipping downloading '{}'" . format ( fname ) ) else : overwrite = input ( "File size changed, overwrite '{}'? ([y]/n) " . format ( fname ) ) if overwrite . lower ( ) . startswith ( 'n' ) : print ( "Local file '{}' kept without overwriting." . format ( fname ) ) print ( "Downloading '{}'... " . format ( fname ) ) with open ( fname , 'wb' ) as fh : while True : chunk = req . read ( block_size ) if not chunk : break fh . write ( chunk ) print ( '' ) print ( 'Download completed.' ) print ( "Unzipping '{}'... " . format ( fname ) ) _database_zip = zipfile . ZipFile ( fname ) _database_zip . extractall ( path = path ) print ( "'{}' has been unzipped and database '{}' is ready to use." . format ( fname , fname . replace ( '.zip' , '' ) ) ) os . remove ( fname ) print ( "'{}' has been deleted" . format ( fname ) )
Download database from GitHub
51,131
def get_list_element_from_database ( database = 'ENDF_VII' ) : _file_path = os . path . abspath ( os . path . dirname ( __file__ ) ) _ref_data_folder = os . path . join ( _file_path , 'reference_data' ) _database_folder = os . path . join ( _ref_data_folder , database ) if not os . path . exists ( _ref_data_folder ) : os . makedirs ( _ref_data_folder ) print ( "Folder to store database files has been created: '{}'" . format ( _ref_data_folder ) ) if not os . path . exists ( _database_folder ) : print ( "First time using database '{}'? " . format ( database ) ) print ( "I will retrieve and store a local copy of database'{}': " . format ( database ) ) download_from_github ( fname = database + '.zip' , path = _ref_data_folder ) if not os . path . exists ( _database_folder + '/_elements_list.csv' ) : _list_files = glob . glob ( _database_folder + '/*.csv' ) if not _list_files : _list_files = glob . glob ( _database_folder + '/*.h5' ) _empty_list_boo = not _list_files if _empty_list_boo is True : raise ValueError ( "'{}' does not contain any '*.csv' or '*.h5' file." . format ( _database_folder ) ) _list_short_filename_without_extension = [ os . path . splitext ( os . path . basename ( _file ) ) [ 0 ] for _file in _list_files ] if '-' in _list_short_filename_without_extension [ 0 ] : _list_element = list ( set ( [ _name . split ( '-' ) [ 0 ] for _name in _list_short_filename_without_extension ] ) ) else : _list_letter_part = list ( set ( [ re . split ( r'(\d+)' , _name ) [ 0 ] for _name in _list_short_filename_without_extension ] ) ) _list_element = [ ] for each_letter_part in _list_letter_part : if len ( each_letter_part ) <= 2 : _list_element . append ( each_letter_part ) _list_element . sort ( ) df_to_save = pd . DataFrame ( ) df_to_save [ 'elements' ] = _list_element df_to_save . to_csv ( _database_folder + '/_elements_list.csv' ) else : df_to_read = pd . read_csv ( _database_folder + '/_elements_list.csv' ) _list_element = list ( df_to_read [ 'elements' ] ) return _list_element
return a string array of all the element from the database
51,132
def get_sigma ( database_file_name = '' , e_min = np . NaN , e_max = np . NaN , e_step = np . NaN , t_kelvin = None ) : file_extension = os . path . splitext ( database_file_name ) [ 1 ] if t_kelvin is None : if file_extension != '.csv' : raise IOError ( "Cross-section File type must be '.csv'" ) else : _df = get_database_data ( file_name = database_file_name ) _dict = get_interpolated_data ( df = _df , e_min = e_min , e_max = e_max , e_step = e_step ) return { 'energy_eV' : _dict [ 'x_axis' ] , 'sigma_b' : _dict [ 'y_axis' ] } else : raise ValueError ( "Doppler broadened cross-section in not yet supported in current version." )
retrieve the Energy and sigma axis for the given isotope
51,133
def temp_repo ( url , branch , commit = '' ) : tmp_folder = tempfile . mkdtemp ( ) git . Repo . clone_from ( url , tmp_folder , branch = branch ) if commit : git_cmd = git . Git ( tmp_folder ) git_cmd . checkout ( commit ) yield tmp_folder shutil . rmtree ( tmp_folder )
Clone a git repository inside a temporary folder yield the folder then delete the folder .
51,134
def force_move ( source , destination ) : if not os . path . exists ( destination ) : raise RuntimeError ( 'The code could not be moved to {destination} ' 'because the folder does not exist' . format ( destination = destination ) ) destination_folder = os . path . join ( destination , os . path . split ( source ) [ - 1 ] ) if os . path . exists ( destination_folder ) : shutil . rmtree ( destination_folder ) shutil . move ( source , destination )
Force the move of the source inside the destination even if the destination has already a folder with the name inside . In the case the folder will be replaced .
51,135
def _run_command_inside_folder ( command , folder ) : logger . debug ( "command: %s" , command ) process = subprocess . Popen ( command . split ( ) , stdout = subprocess . PIPE , cwd = folder ) stream_data = process . communicate ( ) [ 0 ] logger . debug ( "%s stdout: %s (RC %s)" , command , stream_data , process . returncode ) return process . returncode , stream_data
Run a command inside the given folder .
51,136
def parse_url ( url ) : try : url = unicode ( url ) except NameError : url = url parsed = pystache . parse ( url ) variables = ( element . key for element in parsed . _parse_tree if isinstance ( element , _EscapeNode ) ) return pystache . render ( url , { variable : os . environ [ variable ] for variable in variables } )
Parse the given url and update it with environment value if required .
51,137
def _move_modules ( self , temp_repo , destination ) : folders = self . _get_module_folders ( temp_repo ) for folder in folders : force_move ( folder , destination )
Move modules froom the temp directory to the destination .
51,138
def _get_module_folders ( self , temp_repo ) : paths = ( os . path . join ( temp_repo , path ) for path in os . listdir ( temp_repo ) if self . _is_module_included ( path ) ) return ( path for path in paths if os . path . isdir ( path ) )
Get a list of module paths contained in a temp directory .
51,139
def _is_module_included ( self , module ) : if module in self . exclude_modules : return False if self . include_modules is None : return True return module in self . include_modules
Evaluate if the module must be included in the Odoo addons .
51,140
def _move_modules ( self , temp_repo , destination ) : tmp_addons = os . path . join ( temp_repo , 'addons' ) tmp_odoo_addons = os . path . join ( temp_repo , 'odoo/addons' ) folders = self . _get_module_folders ( tmp_addons ) for folder in folders : force_move ( folder , tmp_odoo_addons ) tmp_odoo = os . path . join ( temp_repo , 'odoo' ) force_move ( tmp_odoo , destination )
Move odoo modules from the temp directory to the destination .
51,141
def apply ( self , folder ) : logger . info ( "Apply Patch %s@%s (commit %s)" , self . url , self . branch , self . commit ) remote_name = 'patch' commands = [ "git remote add {} {}" . format ( remote_name , self . url ) , "git fetch {} {}" . format ( remote_name , self . branch ) , 'git merge {} -m "patch"' . format ( self . commit ) , "git remote remove {}" . format ( remote_name ) , ] for command in commands : return_code , stream_data = _run_command_inside_folder ( command , folder ) if return_code : msg = "Could not apply patch from {}@{}: {}. Error: {}" . format ( self . url , self . branch , command , stream_data ) logger . error ( msg ) raise RuntimeError ( msg )
Merge code from the given repo url to the git repo contained in the given folder .
51,142
def apply ( self , folder ) : logger . info ( "Apply Patch File %s" , self . file_path ) command = "git apply {}" . format ( self . file_path ) return_code , stream_data = _run_command_inside_folder ( command , folder ) if return_code : msg = "Could not apply patch file at {}. Error: {}" . format ( self . file_path , stream_data ) logger . error ( msg ) raise RuntimeError ( msg )
Apply a patch from a git patch file .
51,143
def _set_up_context ( cls ) : cls . context = AttributeDict ( ) cls . context . new_meta = { } cls . context . new_transitions = { } cls . context . new_methods = { }
Create context to keep all needed variables in .
51,144
def _check_states_enum ( cls ) : states_enum_name = cls . context . get_config ( 'states_enum_name' ) try : cls . context [ 'states_enum' ] = getattr ( cls . context . new_class , states_enum_name ) except AttributeError : raise ValueError ( 'No states enum given!' ) proper = True try : if not issubclass ( cls . context . states_enum , Enum ) : proper = False except TypeError : proper = False if not proper : raise ValueError ( 'Please provide enum instance to define available states.' )
Check if states enum exists and is proper one .
51,145
def _check_if_states_are_strings ( cls ) : for item in list ( cls . context . states_enum ) : if not isinstance ( item . value , six . string_types ) : raise ValueError ( 'Item {name} is not string. Only strings are allowed.' . format ( name = item . name ) )
Check if all states are strings .
51,146
def _check_state_value ( cls ) : state_value = cls . context . get_config ( 'initial_state' , None ) state_value = state_value or getattr ( cls . context . new_class , cls . context . state_name , None ) if not state_value : raise ValueError ( "Empty state is disallowed, yet no initial state is given!" ) state_value = ( cls . context . new_meta [ 'translator' ] . translate ( state_value ) ) cls . context . state_value = state_value
Check initial state value - if is proper and translate it .
51,147
def _add_standard_attributes ( cls ) : setattr ( cls . context . new_class , cls . context . new_meta [ 'state_attribute_name' ] , cls . context . state_value ) setattr ( cls . context . new_class , cls . context . state_name , utils . state_property ) setattr ( cls . context . new_class , 'is_' , utils . is_ ) setattr ( cls . context . new_class , 'can_be_' , utils . can_be_ ) setattr ( cls . context . new_class , 'set_' , utils . set_ )
Add attributes common to all state machines .
51,148
def _generate_standard_transitions ( cls ) : allowed_transitions = cls . context . get_config ( 'transitions' , { } ) for key , transitions in allowed_transitions . items ( ) : key = cls . context . new_meta [ 'translator' ] . translate ( key ) new_transitions = set ( ) for trans in transitions : if not isinstance ( trans , Enum ) : trans = cls . context . new_meta [ 'translator' ] . translate ( trans ) new_transitions . add ( trans ) cls . context . new_transitions [ key ] = new_transitions for state in cls . context . states_enum : if state not in cls . context . new_transitions : cls . context . new_transitions [ state ] = set ( )
Generate methods used for transitions .
51,149
def _generate_standard_methods ( cls ) : for state in cls . context . states_enum : getter_name = 'is_{name}' . format ( name = state . value ) cls . context . new_methods [ getter_name ] = utils . generate_getter ( state ) setter_name = 'set_{name}' . format ( name = state . value ) cls . context . new_methods [ setter_name ] = utils . generate_setter ( state ) checker_name = 'can_be_{name}' . format ( name = state . value ) checker = utils . generate_checker ( state ) cls . context . new_methods [ checker_name ] = checker cls . context . new_methods [ 'actual_state' ] = utils . actual_state cls . context . new_methods [ 'as_enum' ] = utils . as_enum cls . context . new_methods [ 'force_set' ] = utils . force_set
Generate standard setters getters and checkers .
51,150
def _add_new_methods ( cls ) : for name , method in cls . context . new_methods . items ( ) : if hasattr ( cls . context . new_class , name ) : raise ValueError ( "Name collision in state machine class - '{name}'." . format ( name ) ) setattr ( cls . context . new_class , name , method )
Add all generated methods to result class .
51,151
def _set_complete_option ( cls ) : get_config = cls . context . get_config complete = get_config ( 'complete' , None ) if complete is None : conditions = [ get_config ( 'transitions' , False ) , get_config ( 'named_transitions' , False ) , ] complete = not any ( conditions ) cls . context . new_meta [ 'complete' ] = complete
Check and set complete option .
51,152
def data_directory ( ) : package_directory = os . path . abspath ( os . path . dirname ( __file__ ) ) return os . path . join ( package_directory , "data" )
Return the absolute path to the directory containing the package data .
51,153
def filterMapAttrs ( records = getIndex ( ) , ** tags ) : if len ( tags ) == 0 : return records ret = [ ] for record in records : if matchRecordAttrs ( record , tags ) : ret . append ( record ) return ret
matches available maps if their attributes match as specified
51,154
def matchRecordAttrs ( mapobj , attrs ) : for k , v in iteritems ( attrs ) : try : val = getattr ( mapobj , k ) except AttributeError : if bool ( v ) : return False else : continue if val != v : return False return True
attempt to match given attributes against a single map object s attributes
51,155
def to_boolean ( value , ctx ) : if isinstance ( value , bool ) : return value elif isinstance ( value , int ) : return value != 0 elif isinstance ( value , Decimal ) : return value != Decimal ( 0 ) elif isinstance ( value , str ) : value = value . lower ( ) if value == 'true' : return True elif value == 'false' : return False elif isinstance ( value , datetime . date ) or isinstance ( value , datetime . time ) : return True raise EvaluationError ( "Can't convert '%s' to a boolean" % str ( value ) )
Tries conversion of any value to a boolean
51,156
def to_integer ( value , ctx ) : if isinstance ( value , bool ) : return 1 if value else 0 elif isinstance ( value , int ) : return value elif isinstance ( value , Decimal ) : try : val = int ( value . to_integral_exact ( ROUND_HALF_UP ) ) if isinstance ( val , int ) : return val except ArithmeticError : pass elif isinstance ( value , str ) : try : return int ( value ) except ValueError : pass raise EvaluationError ( "Can't convert '%s' to an integer" % str ( value ) )
Tries conversion of any value to an integer
51,157
def to_decimal ( value , ctx ) : if isinstance ( value , bool ) : return Decimal ( 1 ) if value else Decimal ( 0 ) elif isinstance ( value , int ) : return Decimal ( value ) elif isinstance ( value , Decimal ) : return value elif isinstance ( value , str ) : try : return Decimal ( value ) except Exception : pass raise EvaluationError ( "Can't convert '%s' to a decimal" % str ( value ) )
Tries conversion of any value to a decimal
51,158
def to_string ( value , ctx ) : if isinstance ( value , bool ) : return "TRUE" if value else "FALSE" elif isinstance ( value , int ) : return str ( value ) elif isinstance ( value , Decimal ) : return format_decimal ( value ) elif isinstance ( value , str ) : return value elif type ( value ) == datetime . date : return value . strftime ( ctx . get_date_format ( False ) ) elif isinstance ( value , datetime . time ) : return value . strftime ( '%H:%M' ) elif isinstance ( value , datetime . datetime ) : return value . astimezone ( ctx . timezone ) . isoformat ( ) raise EvaluationError ( "Can't convert '%s' to a string" % str ( value ) )
Tries conversion of any value to a string
51,159
def to_date ( value , ctx ) : if isinstance ( value , str ) : temporal = ctx . get_date_parser ( ) . auto ( value ) if temporal is not None : return to_date ( temporal , ctx ) elif type ( value ) == datetime . date : return value elif isinstance ( value , datetime . datetime ) : return value . date ( ) raise EvaluationError ( "Can't convert '%s' to a date" % str ( value ) )
Tries conversion of any value to a date
51,160
def to_datetime ( value , ctx ) : if isinstance ( value , str ) : temporal = ctx . get_date_parser ( ) . auto ( value ) if temporal is not None : return to_datetime ( temporal , ctx ) elif type ( value ) == datetime . date : return ctx . timezone . localize ( datetime . datetime . combine ( value , datetime . time ( 0 , 0 ) ) ) elif isinstance ( value , datetime . datetime ) : return value . astimezone ( ctx . timezone ) raise EvaluationError ( "Can't convert '%s' to a datetime" % str ( value ) )
Tries conversion of any value to a datetime
51,161
def to_date_or_datetime ( value , ctx ) : if isinstance ( value , str ) : temporal = ctx . get_date_parser ( ) . auto ( value ) if temporal is not None : return temporal elif type ( value ) == datetime . date : return value elif isinstance ( value , datetime . datetime ) : return value . astimezone ( ctx . timezone ) raise EvaluationError ( "Can't convert '%s' to a date or datetime" % str ( value ) )
Tries conversion of any value to a date or datetime
51,162
def to_time ( value , ctx ) : if isinstance ( value , str ) : time = ctx . get_date_parser ( ) . time ( value ) if time is not None : return time elif isinstance ( value , datetime . time ) : return value elif isinstance ( value , datetime . datetime ) : return value . astimezone ( ctx . timezone ) . time ( ) raise EvaluationError ( "Can't convert '%s' to a time" % str ( value ) )
Tries conversion of any value to a time
51,163
def to_same ( value1 , value2 , ctx ) : if type ( value1 ) == type ( value2 ) : return value1 , value2 try : return to_decimal ( value1 , ctx ) , to_decimal ( value2 , ctx ) except EvaluationError : pass try : d1 , d2 = to_date_or_datetime ( value1 , ctx ) , to_date_or_datetime ( value2 , ctx ) if type ( value1 ) != type ( value2 ) : d1 , d2 = to_datetime ( d1 , ctx ) , to_datetime ( d2 , ctx ) return d1 , d2 except EvaluationError : pass return to_string ( value1 , ctx ) , to_string ( value2 , ctx )
Converts a pair of arguments to their most - likely types . This deviates from Excel which doesn t auto convert values but is necessary for us to intuitively handle contact fields which don t use the correct value type
51,164
def is_containerized ( ) -> bool : try : cginfo = Path ( '/proc/self/cgroup' ) . read_text ( ) if '/docker/' in cginfo or '/lxc/' in cginfo : return True except IOError : return False
Check if I am running inside a Linux container .
51,165
def detect_cloud ( ) -> str : if sys . platform . startswith ( 'linux' ) : try : mb = Path ( '/sys/devices/virtual/dmi/id/board_vendor' ) . read_text ( ) . lower ( ) if 'amazon' in mb : return 'amazon' except IOError : pass try : bios = Path ( '/sys/devices/virtual/dmi/id/bios_version' ) . read_text ( ) . lower ( ) if 'google' in bios : return 'google' if 'amazon' in bios : return 'amazon' except IOError : pass try : dhcp = Path ( '/var/lib/dhcp/dhclient.eth0.leases' ) . read_text ( ) if 'unknown-245' in dhcp : return 'azure' except IOError : pass else : log . warning ( 'Cloud detection is implemented for Linux only yet.' ) return None
Detect the cloud provider where I am running on .
51,166
def refocus ( field , d , nm , res , method = "helmholtz" , num_cpus = 1 , padding = True ) : fshape = len ( field . shape ) assert fshape in [ 1 , 2 ] , "Dimension of `field` must be 1 or 2." func = fft_propagate names = func . __code__ . co_varnames [ : func . __code__ . co_argcount ] loc = locals ( ) vardict = dict ( ) for name in names : if name in loc : vardict [ name ] = loc [ name ] if padding : field = pad . pad_add ( field ) vardict [ "fftfield" ] = np . fft . fftn ( field ) refoc = func ( ** vardict ) if padding : refoc = pad . pad_rem ( refoc ) return refoc
Refocus a 1D or 2D field
51,167
def refocus_stack ( fieldstack , d , nm , res , method = "helmholtz" , num_cpus = _cpu_count , copy = True , padding = True ) : func = refocus names = func . __code__ . co_varnames [ : func . __code__ . co_argcount ] loc = locals ( ) vardict = dict ( ) for name in names : if name in loc . keys ( ) : vardict [ name ] = loc [ name ] func_def = func . __defaults__ [ : : - 1 ] vardict [ "num_cpus" ] = 1 vardict [ "padding" ] = padding M = fieldstack . shape [ 0 ] stackargs = list ( ) for m in range ( M ) : kwarg = vardict . copy ( ) kwarg [ "field" ] = fieldstack [ m ] args = list ( ) for i , a in enumerate ( names [ : : - 1 ] ) : if i < len ( func_def ) : val = func_def [ i ] if a in kwarg : val = kwarg [ a ] args . append ( val ) stackargs . append ( args [ : : - 1 ] ) p = mp . Pool ( num_cpus ) result = p . map_async ( _refocus_wrapper , stackargs ) . get ( ) p . close ( ) p . terminate ( ) p . join ( ) if copy : data = np . zeros ( fieldstack . shape , dtype = result [ 0 ] . dtype ) else : data = fieldstack for m in range ( M ) : data [ m ] = result [ m ] return data
Refocus a stack of 1D or 2D fields
51,168
def fft_propagate ( fftfield , d , nm , res , method = "helmholtz" , ret_fft = False ) : fshape = len ( fftfield . shape ) assert fshape in [ 1 , 2 ] , "Dimension of `fftfield` must be 1 or 2." if fshape == 1 : func = fft_propagate_2d else : func = fft_propagate_3d names = func . __code__ . co_varnames [ : func . __code__ . co_argcount ] loc = locals ( ) vardict = dict ( ) for name in names : vardict [ name ] = loc [ name ] return func ( ** vardict )
Propagates a 1D or 2D Fourier transformed field
51,169
def fft_propagate_2d ( fftfield , d , nm , res , method = "helmholtz" , ret_fft = False ) : assert len ( fftfield . shape ) == 1 , "Dimension of `fftfield` must be 1." km = ( 2 * np . pi * nm ) / res kx = np . fft . fftfreq ( len ( fftfield ) ) * 2 * np . pi if method == "helmholtz" : root_km = km ** 2 - kx ** 2 rt0 = ( root_km > 0 ) fstemp = np . exp ( 1j * ( np . sqrt ( root_km * rt0 ) - km ) * d ) * rt0 elif method == "fresnel" : fstemp = np . exp ( - 1j * d * ( kx ** 2 / ( 2 * km ) ) ) else : raise ValueError ( "Unknown method: {}" . format ( method ) ) if ret_fft : return fftfield * fstemp else : return np . fft . ifft ( fftfield * fstemp )
Propagate a 1D Fourier transformed field in 2D
51,170
def fft_propagate_3d ( fftfield , d , nm , res , method = "helmholtz" , ret_fft = False ) : assert len ( fftfield . shape ) == 2 , "Dimension of `fftfield` must be 2." km = ( 2 * np . pi * nm ) / res kx = ( np . fft . fftfreq ( fftfield . shape [ 0 ] ) * 2 * np . pi ) . reshape ( - 1 , 1 ) ky = ( np . fft . fftfreq ( fftfield . shape [ 1 ] ) * 2 * np . pi ) . reshape ( 1 , - 1 ) if method == "helmholtz" : root_km = km ** 2 - kx ** 2 - ky ** 2 rt0 = ( root_km > 0 ) fstemp = np . exp ( 1j * ( np . sqrt ( root_km * rt0 ) - km ) * d ) * rt0 elif method == "fresnel" : fstemp = np . exp ( - 1j * d * ( kx ** 2 + ky ** 2 ) / ( 2 * km ) ) else : raise ValueError ( "Unknown method: {}" . format ( method ) ) if ret_fft : return fftfield * fstemp else : return np . fft . ifft2 ( fftfield * fstemp )
Propagate a 2D Fourier transformed field in 3D
51,171
def autofocus ( field , nm , res , ival , roi = None , metric = "average gradient" , padding = True , ret_d = False , ret_grad = False , num_cpus = 1 ) : if metric == "average gradient" : def metric_func ( x ) : return metrics . average_gradient ( np . abs ( x ) ) elif metric == "rms contrast" : def metric_func ( x ) : return - metrics . contrast_rms ( np . angle ( x ) ) elif metric == "spectrum" : def metric_func ( x ) : return metrics . spectral ( np . abs ( x ) , res ) else : raise ValueError ( "No such metric: {}" . format ( metric ) ) field , d , grad = minimize_metric ( field , metric_func , nm , res , ival , roi = roi , padding = padding ) ret_list = [ field ] if ret_d : ret_list += [ d ] if ret_grad : ret_list += [ grad ] if len ( ret_list ) == 1 : return ret_list [ 0 ] else : return tuple ( ret_list )
Numerical autofocusing of a field using the Helmholtz equation .
51,172
def autofocus_stack ( fieldstack , nm , res , ival , roi = None , metric = "average gradient" , padding = True , same_dist = False , ret_ds = False , ret_grads = False , num_cpus = _cpu_count , copy = True ) : dopt = list ( ) grad = list ( ) M = fieldstack . shape [ 0 ] stackargs = list ( ) for s in range ( M ) : stackargs . append ( [ fieldstack [ s ] . copy ( copy ) , nm , res , ival , roi , metric , padding , True , True , 1 ] ) p = mp . Pool ( num_cpus ) result = p . map_async ( _autofocus_wrapper , stackargs ) . get ( ) p . close ( ) p . terminate ( ) p . join ( ) newstack = np . zeros ( fieldstack . shape , dtype = fieldstack . dtype ) for s in range ( M ) : field , ds , gs = result [ s ] dopt . append ( ds ) grad . append ( gs ) newstack [ s ] = field if same_dist : davg = np . average ( dopt ) newstack = refocus_stack ( fieldstack , davg , nm , res , num_cpus = num_cpus , copy = copy , padding = padding ) ret_list = [ newstack ] if ret_ds : ret_list += [ dopt ] if ret_grads : ret_list += [ grad ] if len ( ret_list ) == 1 : return ret_list [ 0 ] else : return tuple ( ret_list )
Numerical autofocusing of a stack using the Helmholtz equation .
51,173
def getIndex ( folderPath = None ) : try : return cache . structure except AttributeError : pass if folderPath == None : from sc2maptool . startup import setup folderPath = setup ( ) def folderSearch ( path , attrList = [ ] ) : ret = [ ] for item in glob ( os . path . join ( path , '*' ) ) : if item == os . sep : continue itemName = os . path . basename ( item ) if os . path . isdir ( item ) : ret += folderSearch ( item , attrList + [ itemName ] ) elif itemName . endswith ( c . SC2_MAP_EXT ) : ret . append ( MapRecord ( itemName , item , attrList ) ) return ret cache . structure = folderSearch ( folderPath ) return cache . structure
parse the Maps subfolder directory divining criteria for valid maps
51,174
def _stringify_number ( v ) : if isinstance ( v , ( float , Decimal ) ) : if math . isinf ( v ) and v > 0 : v = 'Infinity' elif math . isinf ( v ) and v < 0 : v = '-Infinity' else : v = '{:f}' . format ( v ) elif isinstance ( v , BinarySize ) : v = '{:d}' . format ( int ( v ) ) elif isinstance ( v , int ) : v = '{:d}' . format ( v ) else : v = str ( v ) return v
Stringify a number preventing unwanted scientific notations .
51,175
async def resolve_alias ( cls , alias_key : str , etcd : etcd . AsyncEtcd ) : alias_target = None repeats = 0 while repeats < 8 : prev_alias_key = alias_key alias_key = await etcd . get ( f'images/_aliases/{alias_key}' ) if alias_key is None : alias_target = prev_alias_key break repeats += 1 else : raise AliasResolutionFailed ( 'Could not resolve the given image name!' ) known_registries = await get_known_registries ( etcd ) return cls ( alias_target , known_registries )
Resolve the tag using etcd so that the current instance indicates a concrete latest image .
51,176
def _init_logging ( anteater_log ) : LOG . setLevel ( logging . DEBUG ) ch = logging . StreamHandler ( ) formatter = logging . Formatter ( '%(asctime)s - %(name)s - ' '%(levelname)s - %(message)s' ) ch . setFormatter ( formatter ) ch . setLevel ( logging . DEBUG ) path = os . path . dirname ( anteater_log ) try : os . makedirs ( path ) except OSError as e : if e . errno != errno . EEXIST : raise handler = logging . FileHandler ( anteater_log ) handler . setFormatter ( formatter ) handler . setLevel ( logging . DEBUG ) del logging . root . handlers [ : ] logging . root . addHandler ( ch ) logging . root . addHandler ( handler )
Setup root logger for package
51,177
def check_dir ( ) : try : os . makedirs ( reports_dir ) logger . info ( 'Creating reports directory: %s' , reports_dir ) except OSError as e : if e . errno != errno . EEXIST : raise
Creates a directory for scan reports
51,178
def main ( ) : _init_logging ( config . get ( 'config' , 'anteater_log' ) ) check_dir ( ) arguments = docopt ( __doc__ , version = __version__ ) if arguments [ '<patchset>' ] : prepare_patchset ( arguments [ '<project>' ] , arguments [ '<patchset>' ] , arguments [ '--binaries' ] , arguments [ '--ips' ] , arguments [ '--urls' ] ) elif arguments [ '<project_path>' ] : prepare_project ( arguments [ '<project>' ] , arguments [ '<project_path>' ] , arguments [ '--binaries' ] , arguments [ '--ips' ] , arguments [ '--urls' ] )
Main function mostly for passing arguments
51,179
def fit ( model_code , * args , ** kwargs ) : kwargs = dict ( kwargs ) kwargs [ 'model_code' ] = model_code if 'n_jobs' not in kwargs : kwargs [ 'n_jobs' ] = - 1 if model_code in FIT_CACHE : print ( "Reusing model." ) kwargs [ 'fit' ] = FIT_CACHE [ model_code ] else : print ( "NOT reusing model." ) start = time . time ( ) FIT_CACHE [ model_code ] = pystan . stan ( * args , ** kwargs ) print ( "Ran in %0.3f sec." % ( time . time ( ) - start ) ) return FIT_CACHE [ model_code ]
Fit a Stan model . Caches the compiled model .
51,180
def count ( self , other , r , attrs = None , info = { } ) : r = numpy . array ( r , dtype = 'f8' ) return _core . KDNode . count ( self , other , r , attrs , info = info )
Gray & Moore based fast dual tree counting .
51,181
def fof ( self , linkinglength , out = None , method = 'splay' ) : if out is None : out = numpy . empty ( self . size , dtype = 'intp' ) return _core . KDNode . fof ( self , linkinglength , out , method )
Friend - of - Friend clustering with linking length .
51,182
def integrate ( self , min , max , attr = None , info = { } ) : if numpy . isscalar ( min ) : min = [ min for i in range ( self . ndims ) ] if numpy . isscalar ( max ) : max = [ max for i in range ( self . ndims ) ] min = numpy . array ( min , dtype = 'f8' , order = 'C' ) max = numpy . array ( max , dtype = 'f8' , order = 'C' ) if ( min ) . shape [ - 1 ] != self . ndims : raise ValueError ( "dimension of min does not match Node" ) if ( max ) . shape [ - 1 ] != self . ndims : raise ValueError ( "dimension of max does not match Node" ) min , max = broadcast_arrays ( min , max ) return _core . KDNode . integrate ( self , min , max , attr , info )
Calculate the total number of points between [ min max ) .
51,183
def make_forest ( self , chunksize ) : heap = [ ] heappush ( heap , ( - self . size , self ) ) while True : w , x = heappop ( heap ) if w == 0 : heappush ( heap , ( 0 , x ) ) break if x . less is None or ( x . size < chunksize ) : heappush ( heap , ( 0 , x ) ) continue heappush ( heap , ( x . less . size , x . less ) ) heappush ( heap , ( x . greater . size , x . greater ) ) for w , x in heap : yield x
Divide a tree branch to a forest each subtree of size at most chunksize
51,184
def _install_all ( destination = '' , conf_file = '' ) : dir_path = os . path . dirname ( os . path . realpath ( __file__ ) ) destination = destination or os . path . join ( dir_path , '..' , '3rd' ) conf_file = conf_file or os . path . join ( dir_path , '..' , "third_party_addons.yaml" ) work_directory = os . path . dirname ( os . path . realpath ( conf_file ) ) with open ( conf_file , "r" ) as conf_data : data = yaml . load ( conf_data ) for addons in data : _install_one ( addons [ 'url' ] , addons [ 'branch' ] , os . path . abspath ( destination ) , commit = addons . get ( 'commit' ) , patches = addons . get ( 'patches' ) , exclude_modules = addons . get ( 'excludes' ) , include_modules = addons . get ( 'includes' ) , base = addons . get ( 'base' ) , work_directory = work_directory , )
Use the conf file to list all the third party Odoo add - ons that will be installed and the patches that should be applied .
51,185
def find_lt ( a , x ) : i = bisect . bisect_left ( a , x ) if i : return a [ i - 1 ] raise ValueError
Find rightmost value less than x
51,186
def parse ( isatab_ref ) : if os . path . isdir ( isatab_ref ) : fnames = glob . glob ( os . path . join ( isatab_ref , "i_*.txt" ) ) + glob . glob ( os . path . join ( isatab_ref , "*.idf.txt" ) ) assert len ( fnames ) == 1 isatab_ref = fnames [ 0 ] assert os . path . exists ( isatab_ref ) , "Did not find investigation file: %s" % isatab_ref i_parser = InvestigationParser ( ) with open ( isatab_ref , "rU" ) as in_handle : rec = i_parser . parse ( in_handle ) s_parser = StudyAssayParser ( isatab_ref ) rec = s_parser . parse ( rec ) return rec
Entry point to parse an ISA - Tab directory . isatab_ref can point to a directory of ISA - Tab data in which case we search for the investigator file or be a reference to the high level investigation file .
51,187
def _parse_region ( self , rec , line_iter ) : had_info = False keyvals , section = self . _parse_keyvals ( line_iter ) if keyvals : rec . metadata = keyvals [ 0 ] while section and section [ 0 ] != "STUDY" : had_info = True keyvals , next_section = self . _parse_keyvals ( line_iter ) attr_name = self . _sections [ section [ 0 ] ] if attr_name in self . _nolist : try : keyvals = keyvals [ 0 ] except IndexError : keyvals = { } setattr ( rec , attr_name , keyvals ) section = next_section return rec , had_info
Parse a section of an ISA - Tab assigning information to a supplied record .
51,188
def _line_iter ( self , in_handle ) : reader = csv . reader ( in_handle , dialect = "excel-tab" ) for line in reader : if len ( line ) > 0 and line [ 0 ] : if line [ 0 ] . upper ( ) == line [ 0 ] and "" . join ( line [ 1 : ] ) == "" : line = [ line [ 0 ] ] yield line
Read tab delimited file handling ISA - Tab special case headers .
51,189
def parse ( self , rec ) : final_studies = [ ] for study in rec . studies : source_data = self . _parse_study ( study . metadata [ "Study File Name" ] , [ "Source Name" , "Sample Name" , "Comment[ENA_SAMPLE]" ] ) if source_data : study . nodes = source_data final_assays = [ ] for assay in study . assays : cur_assay = ISATabAssayRecord ( assay ) assay_data = self . _parse_study ( assay [ "Study Assay File Name" ] , [ "Sample Name" , "Extract Name" , "Raw Data File" , "Derived Data File" , "Image File" , "Acquisition Parameter Data File" , "Free Induction Decay Data File" ] ) cur_assay . nodes = assay_data self . _get_process_nodes ( assay [ "Study Assay File Name" ] , cur_assay ) final_assays . append ( cur_assay ) study . assays = final_assays self . _get_process_nodes ( study . metadata [ "Study File Name" ] , study ) final_studies . append ( study ) rec . studies = final_studies return rec
Retrieve row data from files associated with the ISATabRecord .
51,190
def _parse_study ( self , fname , node_types ) : if not os . path . exists ( os . path . join ( self . _dir , fname ) ) : return None nodes = { } with open ( os . path . join ( self . _dir , fname ) , "rU" ) as in_handle : reader = csv . reader ( in_handle , dialect = "excel-tab" ) header = self . _swap_synonyms ( next ( reader ) ) hgroups = self . _collapse_header ( header ) htypes = self . _characterize_header ( header , hgroups ) for node_type in node_types : try : name_index = header . index ( node_type ) except ValueError : name_index = None if name_index is None : continue in_handle . seek ( 0 , 0 ) for line in reader : name = line [ name_index ] node_index = self . _build_node_index ( node_type , name ) if name in header : continue if ( not name ) : continue try : node = nodes [ node_index ] except KeyError : node = NodeRecord ( name , node_type ) node . metadata = collections . defaultdict ( set ) nodes [ node_index ] = node attrs = self . _line_keyvals ( line , header , hgroups , htypes , node . metadata ) nodes [ node_index ] . metadata = attrs return dict ( [ ( k , self . _finalize_metadata ( v ) ) for k , v in nodes . items ( ) ] )
Parse study or assay row oriented file around the supplied base node .
51,191
def _finalize_metadata ( self , node ) : final = { } for key , val in iter ( node . metadata . items ( ) ) : final [ key ] = list ( val ) node . metadata = final return node
Convert node metadata back into a standard dictionary and list .
51,192
def _line_by_type ( self , line , header , hgroups , htypes , out , want_type , collapse_quals_fn = None ) : for index , htype in ( ( i , t ) for i , t in enumerate ( htypes ) if t == want_type ) : col = hgroups [ index ] [ 0 ] key = header [ col ] if collapse_quals_fn : val = collapse_quals_fn ( line , header , hgroups [ index ] ) else : val = line [ col ] out [ key ] . add ( val ) return out
Parse out key value pairs for line information based on a group of values .
51,193
def _collapse_attributes ( self , line , header , indexes ) : names = [ ] vals = [ ] pat = re . compile ( "[\W]+" ) for i in indexes : names . append ( pat . sub ( "_" , self . _clean_header ( header [ i ] ) ) ) vals . append ( line [ i ] ) Attrs = collections . namedtuple ( 'Attrs' , names ) return Attrs ( * vals )
Combine attributes in multiple columns into single named tuple .
51,194
def _characterize_header ( self , header , hgroups ) : out = [ ] for h in [ header [ g [ 0 ] ] for g in hgroups ] : this_ctype = None for ctype , names in self . _col_types . items ( ) : if h . startswith ( names ) : this_ctype = ctype break out . append ( this_ctype ) return out
Characterize header groups into different data types .
51,195
def _collapse_header ( self , header ) : out = [ ] for i , h in enumerate ( header ) : if h . startswith ( self . _col_quals ) : out [ - 1 ] . append ( i ) else : out . append ( [ i ] ) return out
Combine header columns into related groups .
51,196
def load ( abspath , default = None , enable_verbose = True ) : if default is None : default = dict ( ) prt ( "\nLoad from '%s' ..." % abspath , enable_verbose ) abspath = lower_ext ( str ( abspath ) ) is_pickle = is_pickle_file ( abspath ) if not os . path . exists ( abspath ) : prt ( " File not found, use default value: %r" % default , enable_verbose ) return default st = time . clock ( ) if is_pickle : data = pickle . loads ( textfile . readbytes ( abspath ) ) else : data = pickle . loads ( compress . read_gzip ( abspath ) ) prt ( " Complete! Elapse %.6f sec." % ( time . clock ( ) - st ) , enable_verbose ) return data
Load Pickle from file . If file are not exists returns default .
51,197
def dump ( data , abspath , pk_protocol = py23 . pk_protocol , overwrite = False , enable_verbose = True ) : prt ( "\nDump to '%s' ..." % abspath , enable_verbose ) abspath = lower_ext ( str ( abspath ) ) is_pickle = is_pickle_file ( abspath ) if os . path . exists ( abspath ) : if not overwrite : prt ( " Stop! File exists and overwrite is not allowed" , enable_verbose ) return st = time . clock ( ) content = pickle . dumps ( data , pk_protocol ) if is_pickle : textfile . writebytes ( content , abspath ) else : compress . write_gzip ( content , abspath ) prt ( " Complete! Elapse %.6f sec." % ( time . clock ( ) - st ) , enable_verbose )
Dump picklable object to file . Provides multiple choice to customize the behavior .
51,198
def obj2bytes ( obj , pk_protocol = py23 . pk_protocol ) : return pickle . dumps ( obj , protocol = pk_protocol )
Convert arbitrary pickable Python Object to bytes .
51,199
def obj2str ( obj , pk_protocol = py23 . pk_protocol ) : return base64 . urlsafe_b64encode ( pickle . dumps ( obj , protocol = pk_protocol ) ) . decode ( "utf-8" )
Convert arbitrary object to base64 encoded string .