idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
4,000
def forkexec_pty ( argv , env = None , size = None ) : child_pid , child_fd = pty . fork ( ) if child_pid == 0 : os . closerange ( 3 , MAXFD ) environ = os . environ . copy ( ) if env is not None : environ . update ( env ) os . execve ( argv [ 0 ] , argv , environ ) if size is None : try : size = get_terminal_size ( 1 ) except Exception : size = ( 80 , 24 ) set_terminal_size ( child_fd , size ) return child_pid , child_fd
Fork a child process attached to a pty .
4,001
def get_ancestor_processes ( ) : if not _ANCESTOR_PROCESSES and psutil is not None : proc = psutil . Process ( os . getpid ( ) ) while proc . parent ( ) is not None : try : _ANCESTOR_PROCESSES . append ( proc . parent ( ) . exe ( ) ) proc = proc . parent ( ) except psutil . Error : break return _ANCESTOR_PROCESSES
Get a list of the executables of all ancestor processes .
4,002
def get_default_shell ( environ = None , fallback = _UNSPECIFIED ) : if environ is None : environ = os . environ if "PIAS_OPT_SHELL" in environ : return environ [ "PIAS_OPT_SHELL" ] shells = [ ] for filename in ( environ . get ( "SHELL" ) , "bash" , "sh" ) : if filename is not None : filepath = find_executable ( filename , environ ) if filepath is not None : shells . append ( filepath ) for ancestor in get_ancestor_processes ( ) : if ancestor in shells : return ancestor for shell in shells : return shell if fallback is not _UNSPECIFIED : return fallback raise ValueError ( "Could not find a shell" )
Get the user s default shell program .
4,003
def get_default_terminal ( environ = None , fallback = _UNSPECIFIED ) : if environ is None : environ = os . environ if "PIAS_OPT_TERMINAL" in environ : return environ [ "PIAS_OPT_TERMINAL" ] terminals = [ ] colorterm = environ . get ( "COLORTERM" ) for filename in ( colorterm , "gnome-terminal" , "konsole" , "xterm" ) : if filename is not None : filepath = find_executable ( filename , environ ) if filepath is not None : terminals . append ( filepath ) for ancestor in get_ancestor_processes ( ) : if ancestor in terminals : return ancestor for term in terminals : return term if fallback is not _UNSPECIFIED : return fallback raise ValueError ( "Could not find a terminal" )
Get the user s default terminal program .
4,004
def get_pias_script ( environ = None ) : if os . path . basename ( sys . argv [ 0 ] ) == "pias" : return sys . argv [ 0 ] filepath = find_executable ( "pias" , environ ) if filepath is not None : return filepath filepath = os . path . join ( os . path . dirname ( __file__ ) , "__main__.py" ) if os . path . exists ( filepath ) : return filepath raise RuntimeError ( "Could not locate the pias script." )
Get the path to the playitagainsam command - line script .
4,005
def airwires ( board , showgui = 0 ) : 'search for airwires in eagle board' board = Path ( board ) . expand ( ) . abspath ( ) file_out = tempfile . NamedTemporaryFile ( suffix = '.txt' , delete = 0 ) file_out . close ( ) ulp = ulp_templ . replace ( 'FILE_NAME' , file_out . name ) file_ulp = tempfile . NamedTemporaryFile ( suffix = '.ulp' , delete = 0 ) file_ulp . write ( ulp . encode ( 'utf-8' ) ) file_ulp . close ( ) commands = [ 'run ' + file_ulp . name , 'quit' , ] command_eagle ( board , commands = commands , showgui = showgui ) n = int ( Path ( file_out . name ) . text ( ) ) Path ( file_out . name ) . remove ( ) Path ( file_ulp . name ) . remove ( ) return n
search for airwires in eagle board
4,006
def _initialize ( self , con ) : if self . initialized : return SQLite3Database ( ) . _initialize ( con ) cur = con . execute ( 'SELECT COUNT(*) FROM sqlite_master WHERE name="reaction"' ) if cur . fetchone ( ) [ 0 ] == 0 : for init_command in init_commands : con . execute ( init_command ) con . commit ( ) self . initialized = True
Set up tables in SQL
4,007
def write_publication ( self , values ) : con = self . connection or self . _connect ( ) self . _initialize ( con ) cur = con . cursor ( ) values = ( values [ 'pub_id' ] , values [ 'title' ] , json . dumps ( values [ 'authors' ] ) , values [ 'journal' ] , values [ 'volume' ] , values [ 'number' ] , values [ 'pages' ] , values [ 'year' ] , values [ 'publisher' ] , values [ 'doi' ] , json . dumps ( values [ 'tags' ] ) ) q = self . default + ',' + ', ' . join ( '?' * len ( values ) ) cur . execute ( 'INSERT OR IGNORE INTO publication VALUES ({})' . format ( q ) , values ) pid = self . get_last_id ( cur , table = 'publication' ) if self . connection is None : con . commit ( ) con . close ( ) return pid
Write publication info to db
4,008
def write ( self , values , data = None ) : con = self . connection or self . _connect ( ) self . _initialize ( con ) cur = con . cursor ( ) pub_id = values [ 'pub_id' ] ase_ids = values [ 'ase_ids' ] energy_corrections = values [ 'energy_corrections' ] if ase_ids is not None : check_ase_ids ( values , ase_ids ) else : ase_ids = { } values = ( values [ 'chemical_composition' ] , values [ 'surface_composition' ] , values [ 'facet' ] , json . dumps ( values [ 'sites' ] ) , json . dumps ( values [ 'coverages' ] ) , json . dumps ( values [ 'reactants' ] ) , json . dumps ( values [ 'products' ] ) , values [ 'reaction_energy' ] , values [ 'activation_energy' ] , values [ 'dft_code' ] , values [ 'dft_functional' ] , values [ 'username' ] , values [ 'pub_id' ] ) q = self . default + ',' + ', ' . join ( '?' * len ( values ) ) cur . execute ( 'INSERT INTO reaction VALUES ({})' . format ( q ) , values ) id = self . get_last_id ( cur ) reaction_structure_values = [ ] for name , ase_id in ase_ids . items ( ) : if name in energy_corrections : energy_correction = energy_corrections [ name ] else : energy_correction = 0 reaction_structure_values . append ( [ name , energy_correction , ase_id , id ] ) insert_statement = cur . execute ( insert_statement , [ ase_id , pub_id ] ) cur . executemany ( 'INSERT INTO reaction_system VALUES (?, ?, ?, ?)' , reaction_structure_values ) if self . connection is None : con . commit ( ) con . close ( ) return id
Write reaction info to db file
4,009
def update ( self , id , values , key_names = 'all' ) : con = self . connection or self . _connect ( ) self . _initialize ( con ) cur = con . cursor ( ) pub_id = values [ 'pub_id' ] ase_ids = values [ 'ase_ids' ] energy_corrections = values [ 'energy_corrections' ] if ase_ids is not None : check_ase_ids ( values , ase_ids ) else : ase_ids = { } key_list , value_list = get_key_value_list ( key_names , values ) N_keys = len ( key_list ) value_strlist = get_value_strlist ( value_list ) execute_str = ', ' . join ( '{}={}' . format ( key_list [ i ] , value_strlist [ i ] ) for i in range ( N_keys ) ) update_command = 'UPDATE reaction SET {} WHERE id = {};' . format ( execute_str , id ) cur . execute ( update_command ) delete_command = 'DELETE from reaction_system WHERE id = {}' . format ( id ) cur . execute ( delete_command ) reaction_structure_values = [ ] for name , ase_id in ase_ids . items ( ) : reaction_structure_values . append ( [ name , energy_corrections . get ( name ) , ase_id , id ] ) insert_statement = cur . execute ( insert_statement , [ ase_id , pub_id ] ) cur . executemany ( 'INSERT INTO reaction_system VALUES (?, ?, ?, ?)' , reaction_structure_values ) if self . connection is None : con . commit ( ) con . close ( ) return id
Update reaction info for a selected row
4,010
def get_last_id ( self , cur , table = 'reaction' ) : cur . execute ( "SELECT seq FROM sqlite_sequence WHERE name='{0}'" . format ( table ) ) result = cur . fetchone ( ) if result is not None : id = result [ 0 ] else : id = 0 return id
Get the id of the last written row in table
4,011
def read_wv_master_from_array ( master_table , lines = 'brightest' , debugplot = 0 ) : if lines not in [ 'brightest' , 'all' ] : raise ValueError ( 'Unexpected lines=' + str ( lines ) ) if master_table . ndim == 1 : wv_master = master_table else : wv_master_all = master_table [ : , 0 ] if master_table . shape [ 1 ] == 2 : wv_master = np . copy ( wv_master_all ) elif master_table . shape [ 1 ] == 3 : if lines == 'brightest' : wv_flag = master_table [ : , 1 ] wv_master = wv_master_all [ np . where ( wv_flag == 1 ) ] else : wv_master = np . copy ( wv_master_all ) else : raise ValueError ( 'Lines_catalog file does not have the ' 'expected number of columns' ) if abs ( debugplot ) >= 10 : print ( "Reading master table from numpy array" ) print ( "wv_master:\n" , wv_master ) return wv_master
read arc line wavelengths from numpy array
4,012
def read_wv_master_file ( wv_master_file , lines = 'brightest' , debugplot = 0 ) : if lines not in [ 'brightest' , 'all' ] : raise ValueError ( 'Unexpected lines=' + str ( lines ) ) master_table = np . genfromtxt ( wv_master_file ) wv_master = read_wv_master_from_array ( master_table , lines ) if abs ( debugplot ) >= 10 : print ( "Reading master table: " + wv_master_file ) print ( "wv_master:\n" , wv_master ) return wv_master
read arc line wavelengths from external file .
4,013
def wvcal_spectrum ( sp , fxpeaks , poly_degree_wfit , wv_master , wv_ini_search = None , wv_end_search = None , wvmin_useful = None , wvmax_useful = None , geometry = None , debugplot = 0 ) : if len ( fxpeaks ) <= poly_degree_wfit : print ( ">>> Warning: not enough lines to fit spectrum" ) return None naxis1 = sp . shape [ 0 ] wv_master_range = wv_master [ - 1 ] - wv_master [ 0 ] delta_wv_master_range = 0.20 * wv_master_range if wv_ini_search is None : wv_ini_search = wv_master [ 0 ] - delta_wv_master_range if wv_end_search is None : wv_end_search = wv_master [ - 1 ] + delta_wv_master_range xchannel = fxpeaks + 1.0 list_of_wvfeatures = arccalibration ( wv_master = wv_master , xpos_arc = xchannel , naxis1_arc = naxis1 , crpix1 = 1.0 , wv_ini_search = wv_ini_search , wv_end_search = wv_end_search , wvmin_useful = wvmin_useful , wvmax_useful = wvmax_useful , error_xpos_arc = 3 , times_sigma_r = 3.0 , frac_triplets_for_sum = 0.50 , times_sigma_theil_sen = 10.0 , poly_degree_wfit = poly_degree_wfit , times_sigma_polfilt = 10.0 , times_sigma_cook = 10.0 , times_sigma_inclusion = 10.0 , geometry = geometry , debugplot = debugplot ) title = "Wavelength calibration" solution_wv = fit_list_of_wvfeatures ( list_of_wvfeatures = list_of_wvfeatures , naxis1_arc = naxis1 , crpix1 = 1.0 , poly_degree_wfit = poly_degree_wfit , weighted = False , plot_title = title , geometry = geometry , debugplot = debugplot ) if abs ( debugplot ) % 10 != 0 : xplot = np . arange ( 1 , naxis1 + 1 , dtype = float ) ax = ximplotxy ( xplot , sp , title = title , show = False , xlabel = 'pixel (from 1 to NAXIS1)' , ylabel = 'number of counts' , geometry = geometry ) ymin = sp . min ( ) ymax = sp . max ( ) dy = ymax - ymin ymin -= dy / 20. ymax += dy / 20. ax . set_ylim ( [ ymin , ymax ] ) for feature in solution_wv . features : xpos = feature . xpos reference = feature . reference ax . text ( xpos , sp [ int ( xpos + 0.5 ) - 1 ] , str ( reference ) , fontsize = 8 , horizontalalignment = 'center' ) print ( 'Plot with identified lines' ) pause_debugplot ( 12 , pltshow = True ) return solution_wv
Execute wavelength calibration of a spectrum using fixed line peaks .
4,014
def get_thumbnail ( file_ , name ) : options = settings . OPTIONS_DICT [ name ] opt = copy ( options ) geometry = opt . pop ( 'geometry' ) return original_get_thumbnail ( file_ , geometry , ** opt )
get_thumbnail version that uses aliasses defined in THUMBNAIL_OPTIONS_DICT
4,015
def bitset ( name , members , base = bases . BitSet , list = False , tuple = False ) : if not name : raise ValueError ( 'empty bitset name: %r' % name ) if not hasattr ( members , '__getitem__' ) or not hasattr ( members , '__len__' ) : raise ValueError ( 'non-sequence bitset members: %r' % members ) if not len ( members ) : raise ValueError ( 'less than one bitset member: %r' % ( members , ) ) if len ( set ( members ) ) != len ( members ) : raise ValueError ( 'bitset members contains duplicates: %r' % ( members , ) ) if not issubclass ( base . __class__ , meta . MemberBitsMeta ) : raise ValueError ( 'base does not subclass bitset.bases: %r' % base ) list = { False : None , True : series . List } . get ( list , list ) tuple = { False : None , True : series . Tuple } . get ( tuple , tuple ) return base . _make_subclass ( name , members , listcls = list , tuplecls = tuple )
Return a new bitset class with given name and members .
4,016
def _extrapolation ( self , extrapolate ) : modes = [ 'extrapolate' , 'raise' , 'const' , 'border' ] if extrapolate not in modes : msg = 'invalid extrapolation mode {}' . format ( extrapolate ) raise ValueError ( msg ) if extrapolate == 'raise' : self . bounds_error = True self . extrapolate = False else : self . extrapolate = True self . bounds_error = False self . extrapolate_mode = extrapolate
Check permited values of extrapolation .
4,017
def _create_h ( x ) : h = np . zeros_like ( x ) h [ : - 1 ] = x [ 1 : ] - x [ : - 1 ] h [ - 1 ] = h [ - 2 ] return h
increase between samples
4,018
def _eval ( self , v , in_bounds , der ) : result = np . zeros_like ( v , dtype = 'float' ) x_indices = np . searchsorted ( self . _x , v , side = 'rigth' ) ids = x_indices [ in_bounds ] - 1 u = v [ in_bounds ] - self . _x [ ids ] result [ in_bounds ] = self . _poly_eval ( u , ids , der ) return result
Eval polynomial inside bounds .
4,019
def _extrapolate ( self , result , v , below_bounds , above_bounds , der ) : if self . extrapolate_mode == 'const' : fill_b = fill_a = self . fill_value elif self . extrapolate_mode == 'border' : fill_b = self . _poly_eval ( 0 , 0 , der ) fill_a = self . _poly_eval ( 0 , - 1 , der ) elif self . extrapolate_mode == 'extrapolate' : u = v [ above_bounds ] - self . _x [ - 2 ] fill_a = self . _poly_eval ( u , - 2 , der ) u = v [ below_bounds ] - self . _x [ 0 ] fill_b = self . _poly_eval ( u , 0 , der ) else : raise ValueError ( "extrapolation method doesn't exist" ) result [ below_bounds ] = fill_b result [ above_bounds ] = fill_a
Extrapolate result based on extrapolation mode .
4,020
def _check_bounds ( self , v ) : below_bounds = v < self . _x [ 0 ] above_bounds = v > self . _x [ - 1 ] if self . bounds_error and below_bounds . any ( ) : raise ValueError ( "A value in x_new is below the interpolation " "range." ) if self . bounds_error and above_bounds . any ( ) : raise ValueError ( "A value in x_new is above the interpolation " "range." ) return below_bounds , above_bounds
Check which values are out of bounds .
4,021
def filtmask ( sp , fmin = 0.02 , fmax = 0.15 , debugplot = 0 ) : xf = np . fft . fftfreq ( sp . size ) yf = np . fft . fft ( sp ) if abs ( debugplot ) in ( 21 , 22 ) : iok = np . where ( xf > 0 ) ximplotxy ( xf [ iok ] , yf [ iok ] . real , plottype = 'loglog' , xlabel = 'frequency' , ylabel = 'power' , title = 'before masking' , debugplot = debugplot ) cut = ( np . abs ( xf ) > fmax ) yf [ cut ] = 0.0 cut = ( np . abs ( xf ) < fmin ) yf [ cut ] = 0.0 if abs ( debugplot ) in ( 21 , 22 ) : iok = np . where ( xf > 0 ) ximplotxy ( xf [ iok ] , yf [ iok ] . real , plottype = 'loglog' , xlabel = 'frequency' , ylabel = 'power' , title = 'after masking' , debugplot = debugplot ) sp_filt = np . fft . ifft ( yf ) . real if abs ( debugplot ) in ( 21 , 22 ) : xdum = np . arange ( 1 , sp_filt . size + 1 ) ximplotxy ( xdum , sp_filt , title = "filtered median spectrum" , debugplot = debugplot ) sp_filtmask = sp_filt * cosinebell ( sp_filt . size , 0.1 ) if abs ( debugplot ) in ( 21 , 22 ) : xdum = np . arange ( 1 , sp_filt . size + 1 ) ximplotxy ( xdum , sp_filtmask , title = "filtered and masked median spectrum" , debugplot = debugplot ) return sp_filtmask
Filter spectrum in Fourier space and apply cosine bell .
4,022
def cosinebell ( n , fraction ) : mask = np . ones ( n ) nmasked = int ( fraction * n ) for i in range ( nmasked ) : yval = 0.5 * ( 1 - np . cos ( np . pi * float ( i ) / float ( nmasked ) ) ) mask [ i ] = yval mask [ n - i - 1 ] = yval return mask
Return a cosine bell spanning n pixels masking a fraction of pixels
4,023
def convolve_comb_lines ( lines_wave , lines_flux , sigma , crpix1 , crval1 , cdelt1 , naxis1 ) : xwave = crval1 + ( np . arange ( naxis1 ) + 1 - crpix1 ) * cdelt1 spectrum = np . zeros ( naxis1 ) for wave , flux in zip ( lines_wave , lines_flux ) : sp_tmp = gauss_box_model ( x = xwave , amplitude = flux , mean = wave , stddev = sigma ) spectrum += sp_tmp return xwave , spectrum
Convolve a set of lines of known wavelengths and flux .
4,024
def _split_refextract_authors_str ( authors_str ) : author_seq = ( x . strip ( ) for x in RE_SPLIT_AUTH . split ( authors_str ) if x ) res = [ ] current = '' for author in author_seq : if not isinstance ( author , six . text_type ) : author = six . text_type ( author . decode ( 'utf8' , 'ignore' ) ) author = re . sub ( r'\(|\)' , '' , author , re . U ) author = re . sub ( r'^[\W\d]+' , '' , author , re . U ) author = re . sub ( r'[^.\w]+$' , '' , author , re . U ) if RE_INITIALS_ONLY . match ( author ) : current += ', ' + author . strip ( ) . replace ( '. ' , '.' ) else : if current : res . append ( current ) current = author if current : res . append ( current ) filters = [ lambda a : a == 'ed' , lambda a : a . startswith ( ',' ) , lambda a : len ( a ) == 1 ] res = [ r for r in res if all ( not f ( r ) for f in filters ) ] return res
Extract author names out of refextract authors output .
4,025
def _set_publication_info_field ( self , field_name , value ) : self . _ensure_reference_field ( 'publication_info' , { } ) self . obj [ 'reference' ] [ 'publication_info' ] [ field_name ] = value
Put a value in the publication info of the reference .
4,026
def set_pubnote ( self , pubnote ) : if 'publication_info' in self . obj . get ( 'reference' , { } ) : self . add_misc ( u'Additional pubnote: {}' . format ( pubnote ) ) return if self . RE_VALID_PUBNOTE . match ( pubnote ) : pubnote = split_pubnote ( pubnote ) pubnote = convert_old_publication_info_to_new ( [ pubnote ] ) [ 0 ] self . _ensure_reference_field ( 'publication_info' , pubnote ) else : self . add_misc ( pubnote )
Parse pubnote and populate correct fields .
4,027
def _add_uid ( self , uid , skip_handle = False ) : uid = uid or '' if is_arxiv ( uid ) : self . _ensure_reference_field ( 'arxiv_eprint' , normalize_arxiv ( uid ) ) elif idutils . is_doi ( uid ) : self . _ensure_reference_field ( 'dois' , [ ] ) self . obj [ 'reference' ] [ 'dois' ] . append ( idutils . normalize_doi ( uid ) ) elif idutils . is_handle ( uid ) and not skip_handle : self . _ensure_reference_field ( 'persistent_identifiers' , [ ] ) self . obj [ 'reference' ] [ 'persistent_identifiers' ] . append ( { 'schema' : 'HDL' , 'value' : idutils . normalize_handle ( uid ) , } ) elif idutils . is_urn ( uid ) : self . _ensure_reference_field ( 'persistent_identifiers' , [ ] ) self . obj [ 'reference' ] [ 'persistent_identifiers' ] . append ( { 'schema' : 'URN' , 'value' : uid , } ) elif self . RE_VALID_CNUM . match ( uid ) : self . _ensure_reference_field ( 'publication_info' , { } ) self . obj [ 'reference' ] [ 'publication_info' ] [ 'cnum' ] = uid elif is_cds_url ( uid ) : self . _ensure_reference_field ( 'external_system_identifiers' , [ ] ) self . obj [ 'reference' ] [ 'external_system_identifiers' ] . append ( { 'schema' : 'CDS' , 'value' : extract_cds_id ( uid ) , } ) elif is_ads_url ( uid ) : self . _ensure_reference_field ( 'external_system_identifiers' , [ ] ) self . obj [ 'reference' ] [ 'external_system_identifiers' ] . append ( { 'schema' : 'ADS' , 'value' : extract_ads_id ( uid ) , } ) else : try : isbn = str ( ISBN ( uid ) ) self . _ensure_reference_field ( 'isbn' , { } ) self . obj [ 'reference' ] [ 'isbn' ] = isbn except Exception : raise ValueError ( 'Unrecognized uid type' )
Add unique identifier in correct field .
4,028
def set_page_artid ( self , page_start = None , page_end = None , artid = None ) : if page_end and not page_start : raise ValueError ( 'End_page provided without start_page' ) self . _ensure_reference_field ( 'publication_info' , { } ) publication_info = self . obj [ 'reference' ] [ 'publication_info' ] if page_start : publication_info [ 'page_start' ] = page_start if page_end : publication_info [ 'page_end' ] = page_end if artid : publication_info [ 'artid' ] = artid
Add artid start end pages to publication info of a reference .
4,029
def separate_globs ( globs ) : exclude = [ ] include = [ ] for path in globs : if path . startswith ( "!" ) : exclude . append ( path [ 1 : ] ) else : include . append ( path ) return ( exclude , include )
Separate include and exclude globs .
4,030
def parse_glob ( path , included ) : files = glob . glob ( path , recursive = True ) array = [ ] for file in files : file = os . path . abspath ( file ) if file not in included : array . append ( file ) included += array return array
Parse a glob .
4,031
def find_base ( path ) : result = _pattern . match ( path ) if result : base = result . group ( 0 ) else : base = "./" if base . endswith ( '/' ) or base . endswith ( '\\' ) : return os . path . abspath ( base ) else : return os . path . dirname ( os . path . abspath ( base ) )
Find the base of a glob .
4,032
def fun_wv ( xchannel , crpix1 , crval1 , cdelt1 ) : wv = crval1 + ( xchannel - crpix1 ) * cdelt1 return wv
Compute wavelengths from channels .
4,033
def update_poly_wlcalib ( coeff_ini , coeff_residuals , naxis1_ini , debugplot ) : coeff = [ ] for fdum in coeff_ini : coeff . append ( fdum ) poly_ini = np . polynomial . Polynomial ( coeff ) poldeg_wlcalib = len ( coeff ) - 1 if len ( coeff_residuals ) == 0 : return poly_ini . coef else : if np . count_nonzero ( poly_ini . coef ) == 0 : return poly_ini . coef coeff = [ ] for fdum in coeff_residuals : coeff . append ( fdum ) poly_residuals = np . polynomial . Polynomial ( coeff ) xfit = np . zeros ( naxis1_ini ) yfit = np . zeros ( naxis1_ini ) for i in range ( naxis1_ini ) : xfit [ i ] = float ( i + 1 ) wv_tmp = poly_ini ( xfit [ i ] ) yfit [ i ] = wv_tmp + poly_residuals ( wv_tmp ) if len ( xfit ) > poldeg_wlcalib : poldeg_effective = poldeg_wlcalib else : poldeg_effective = len ( xfit ) - 1 poly_updated , ydum = polfit_residuals ( x = xfit , y = yfit , deg = poldeg_effective , debugplot = debugplot ) return poly_updated . coef
Update wavelength calibration polynomial using the residuals fit .
4,034
def generate_docs ( klass ) : import numina . types . datatype attrh = ( 'Attributes\n' '----------\n' ) doc = getattr ( klass , '__doc__' , None ) if doc is None or doc == '' : doc = "%s documentation." % klass . __name__ if len ( klass . stored ( ) ) : doc = doc + '\n\n' + attrh skeys = sorted ( klass . stored ( ) . keys ( ) ) for key in skeys : y = klass . stored ( ) [ key ] if isinstance ( y , Requirement ) : modo = 'requirement' elif isinstance ( y , Result ) : modo = 'product' else : modo = "" if y . type . isproduct ( ) : tipo = y . type . __class__ . __name__ elif isinstance ( y . type , numina . types . datatype . PlainPythonType ) : tipo = y . type . internal_type . __name__ else : tipo = y . type . __class__ . __name__ if y . optional : if y . default_value ( ) : modo = "%s, optional, default=%s" % ( modo , y . default ) else : modo = "%s, optional" % ( modo , ) descript = y . description if descript : field = "%s : %s, %s\n %s\n" % ( key , tipo , modo , descript ) else : field = "%s : %s, %s\n" % ( key , tipo , modo ) doc = doc + field klass . __doc__ = doc return klass
Add documentation to generated classes
4,035
def reinverted ( n , r ) : result = 0 r = 1 << ( r - 1 ) while n : if not n & 1 : result |= r r >>= 1 n >>= 1 if r : result |= ( r << 1 ) - 1 return result
Integer with reversed and inverted bits of n assuming bit length r .
4,036
def rank ( items , sequence = string . ascii_lowercase ) : items = set ( items ) return sum ( 1 << i for i , s in enumerate ( sequence ) if s in items )
Rank items from sequence in colexicographical order .
4,037
def unrank ( n , sequence = string . ascii_lowercase ) : return list ( map ( sequence . __getitem__ , indexes ( n ) ) )
Unrank n from sequence in colexicographical order .
4,038
def background_estimator ( bdata ) : crowded = False std = numpy . std ( bdata ) std0 = std mean = bdata . mean ( ) while True : prep = len ( bdata ) numpy . clip ( bdata , mean - 3 * std , mean + 3 * std , out = bdata ) if prep == len ( bdata ) : if std < 0.8 * std0 : crowded = True break std = numpy . std ( bdata ) mean = bdata . mean ( ) if crowded : median = numpy . median ( bdata ) mean = bdata . mean ( ) std = bdata . std ( ) return 2.5 * median - 1.5 * mean , std return bdata . mean ( ) , bdata . std ( )
Estimate the background in a 2D array
4,039
def create_background_map ( data , bsx , bsy ) : sx , sy = data . shape mx = sx // bsx my = sy // bsy comp = [ ] rms = [ ] sp = numpy . split ( data , numpy . arange ( bsx , sx , bsx ) , axis = 0 ) for s in sp : rp = numpy . split ( s , numpy . arange ( bsy , sy , bsy ) , axis = 1 ) for r in rp : b , r = background_estimator ( r ) comp . append ( b ) rms . append ( r ) z = numpy . array ( comp ) z . shape = ( mx , my ) ndfilter . median_filter ( z , size = ( 3 , 3 ) , output = z ) new = _interpolation ( z , sx , sy , mx , my ) z = numpy . array ( rms ) z . shape = ( mx , my ) nrms = _interpolation ( z , sx , sy , mx , my ) return new , nrms
Create a background map with a given mesh size
4,040
def _read_columns_file ( f ) : try : columns = json . loads ( open ( f , 'r' ) . read ( ) , object_pairs_hook = collections . OrderedDict ) except Exception as err : raise InvalidColumnsFileError ( "There was an error while reading {0}: {1}" . format ( f , err ) ) if '__options' in columns : del columns [ '__options' ] return columns
Return the list of column queries read from the given JSON file .
4,041
def _table_to_csv ( table_ ) : f = cStringIO . StringIO ( ) try : _write_csv ( f , table_ ) return f . getvalue ( ) finally : f . close ( )
Return the given table converted to a CSV string .
4,042
def table ( dicts , columns , csv = False , pretty = False ) : if isinstance ( columns , basestring ) : columns = _read_columns_file ( columns ) for column in columns . values ( ) : if "pattern" in column : assert "pattern_path" not in column , ( 'A column must have either a "pattern" or a "pattern_path"' "but not both" ) column [ "pattern_path" ] = column [ "pattern" ] del column [ "pattern" ] table_ = [ ] for d in dicts : row = collections . OrderedDict ( ) for column_title , column_spec in columns . items ( ) : if not column_spec . get ( 'return_multiple_columns' , False ) : row [ column_title ] = query ( dict_ = d , ** column_spec ) else : multiple_columns = query ( dict_ = d , ** column_spec ) for k , v in multiple_columns . items ( ) : row [ k ] = v table_ . append ( row ) if pretty : return tabulate . tabulate ( table_ , tablefmt = "grid" , headers = "keys" ) elif csv : return _table_to_csv ( table_ ) else : return table_
Query a list of dicts with a list of queries and return a table .
4,043
def query ( pattern_path , dict_ , max_length = None , strip = False , case_sensitive = False , unique = False , deduplicate = False , string_transformations = None , hyperlink = False , return_multiple_columns = False ) : if string_transformations is None : string_transformations = [ ] if max_length : string_transformations . append ( lambda x : x [ : max_length ] ) if hyperlink : string_transformations . append ( lambda x : '=HYPERLINK("{0}")' . format ( x ) ) if isinstance ( pattern_path , basestring ) : pattern_path = [ pattern_path ] original_pattern_path = pattern_path pattern_path = pattern_path [ : ] pattern_path . reverse ( ) result = _process_object ( pattern_path , dict_ , string_transformations = string_transformations , strip = strip , case_sensitive = case_sensitive , return_multiple_columns = return_multiple_columns ) if not result : return None elif isinstance ( result , dict ) : return _flatten ( result ) elif len ( result ) == 1 : return result [ 0 ] else : if unique : msg = "pattern_path: {0}\n\n" . format ( original_pattern_path ) msg = msg + pprint . pformat ( dict_ ) raise UniqueError ( msg ) if deduplicate : new_result = [ ] for item in result : if item not in new_result : new_result . append ( item ) result = new_result return result
Query the given dict with the given pattern path and return the result .
4,044
def get_reactions ( columns = 'all' , n_results = 20 , write_db = False , ** kwargs ) : if write_db or columns == 'all' : columns = all_columns [ 'reactions' ] queries = { } for key , value in kwargs . items ( ) : key = map_column_names ( key ) if key == 'distinct' : if value in [ True , 'True' , 'true' ] : queries . update ( { key : True } ) continue if isinstance ( value , int ) or isinstance ( value , float ) : queries . update ( { key : value } ) else : queries . update ( { key : '{0}' . format ( value ) } ) subtables = [ ] if write_db : subtables = [ 'reactionSystems' , 'publication' ] else : subtables = [ ] data = query ( table = 'reactions' , subtables = subtables , columns = columns , n_results = n_results , queries = queries ) if not write_db : return data print ( 'Writing result to Reactions.db' ) unique_ids = [ ] for row in data [ 'reactions' ] [ 'edges' ] : with CathubSQLite ( 'Reactions.db' ) as db : row = row [ 'node' ] key_values = { } for key in all_columns [ 'reactions' ] : v = row [ key ] try : v = json . loads ( v ) except BaseException : pass key_values [ convert ( key ) ] = v ase_ids = { } energy_corrections = { } for row_rs in row [ 'reactionSystems' ] : if row_rs [ 'name' ] == 'N/A' : continue ase_ids [ row_rs [ 'name' ] ] = row_rs [ 'aseId' ] energy_corrections [ row_rs [ 'name' ] ] = row_rs [ 'energyCorrection' ] if not ase_ids : ase_ids = None energy_corrections = None else : unique_ids += ase_ids . values ( ) key_values [ 'ase_ids' ] = ase_ids key_values [ 'energy_corrections' ] = ase_ids pub_key_values = { } row_p = row [ 'publication' ] for key in all_columns [ 'publications' ] : pub_key_values [ convert ( key ) ] = row_p [ key ] db . write_publication ( pub_key_values ) id = db . check ( key_values [ 'chemical_composition' ] , key_values [ 'reaction_energy' ] ) if id is None : id = db . write ( key_values ) else : db . update ( id , key_values ) if ase_ids is not None : with ase . db . connect ( 'Reactions.db' ) as ase_db : con = ase_db . connection cur = con . cursor ( ) cur . execute ( 'SELECT unique_id from systems;' ) unique_ids0 = cur . fetchall ( ) unique_ids0 = [ un [ 0 ] for un in unique_ids0 ] unique_ids = [ un for un in unique_ids if un not in unique_ids0 ] for unique_id in list ( set ( unique_ids ) ) : atomsrow = get_atomsrow_by_id ( unique_id ) ase_db . write ( atomsrow ) print ( 'Writing complete!' ) return data
Get reactions from server
4,045
def create_recipe_file_logger ( logger , logfile , logformat ) : recipe_formatter = logging . Formatter ( logformat ) fh = logging . FileHandler ( logfile , mode = 'w' ) fh . setLevel ( logging . DEBUG ) fh . setFormatter ( recipe_formatter ) return fh
Redirect Recipe log messages to a file .
4,046
def run_recipe ( recipe , task , rinput , workenv , logger_control ) : recipe_logger = logging . getLogger ( logger_control [ 'default' ] ) if logger_control [ 'enabled' ] : logfile = os . path . join ( workenv . resultsdir , logger_control [ 'logfile' ] ) logformat = logger_control [ 'format' ] _logger . debug ( 'creating file logger %r from Recipe logger' , logfile ) fh = create_recipe_file_logger ( recipe_logger , logfile , logformat ) else : fh = logging . NullHandler ( ) recipe_logger . addHandler ( fh ) with working_directory ( workenv . workdir ) : try : run_recipe_timed ( task , recipe , rinput ) return task finally : recipe_logger . removeHandler ( fh )
Recipe execution mode of numina .
4,047
def run_recipe_timed ( task , recipe , rinput ) : _logger . info ( 'running recipe' ) now1 = datetime . datetime . now ( ) task . state = 1 task . time_start = now1 result = recipe ( rinput ) _logger . info ( 'result: %r' , result ) task . result = result now2 = datetime . datetime . now ( ) task . state = 2 task . time_end = now2 return task
Run the recipe and count the time it takes .
4,048
def stop_db_session ( exc = None ) : if has_db_session ( ) : exc_type = None tb = None if exc : exc_type , exc , tb = get_exc_info ( exc ) db_session . __exit__ ( exc_type , exc , tb )
Stops the last db_session
4,049
def get_path ( dest , file , cwd = None ) : if callable ( dest ) : return dest ( file ) if not cwd : cwd = file . cwd if not os . path . isabs ( dest ) : dest = os . path . join ( cwd , dest ) relative = os . path . relpath ( file . path , file . base ) return os . path . join ( dest , relative )
Get the writing path of a file .
4,050
def write_file ( path , contents ) : os . makedirs ( os . path . dirname ( path ) , exist_ok = True ) with open ( path , "w" ) as file : file . write ( contents )
Write contents to a local file .
4,051
def get_pub_id ( title , authors , year ) : "construct publication id" if len ( title . split ( ' ' ) ) > 1 and title . split ( ' ' ) [ 0 ] . lower ( ) in [ 'the' , 'a' ] : _first_word = title . split ( ' ' ) [ 1 ] . split ( '_' ) [ 0 ] else : _first_word = title . split ( ' ' ) [ 0 ] . split ( '_' ) [ 0 ] pub_id = authors [ 0 ] . split ( ',' ) [ 0 ] . split ( ' ' ) [ 0 ] + _first_word + str ( year ) return pub_id
construct publication id
4,052
def extract_atoms ( molecule ) : if molecule == '' : return molecule try : return float ( molecule ) except BaseException : pass atoms = '' if not molecule [ 0 ] . isalpha ( ) : i = 0 while not molecule [ i ] . isalpha ( ) : i += 1 prefactor = float ( molecule [ : i ] ) if prefactor < 0 : prefactor = abs ( prefactor ) sign = '-' else : sign = '' molecule = molecule [ i : ] else : prefactor = 1 sign = '' for k in range ( len ( molecule ) ) : if molecule [ k ] . isdigit ( ) : for j in range ( int ( molecule [ k ] ) - 1 ) : atoms += molecule [ k - 1 ] else : atoms += molecule [ k ] if prefactor % 1 == 0 : atoms *= int ( prefactor ) elif prefactor % 1 == 0.5 : atoms_sort = sorted ( atoms ) N = len ( atoms ) atoms = '' for n in range ( N ) : for m in range ( int ( prefactor - 0.5 ) ) : atoms += atoms_sort [ n ] if n % 2 == 0 : atoms += atoms_sort [ n ] return sign + '' . join ( sorted ( atoms ) )
Return a string with all atoms in molecule
4,053
def check_reaction ( reactants , products ) : reactant_list = [ reactant . split ( '@' ) [ 0 ] . strip ( 'star' ) . strip ( 'gas' ) for reactant in reactants ] product_list = [ product . split ( '@' ) [ 0 ] . strip ( 'star' ) . strip ( 'gas' ) for product in products ] reactant_atoms = [ extract_atoms ( reactant ) for reactant in reactant_list ] product_atoms = [ extract_atoms ( product ) for product in product_list ] reactants = add_atoms ( reactant_atoms ) products = add_atoms ( product_atoms ) r_stars = 0 p_stars = 0 for i , a in enumerate ( reactant_atoms ) : if a == '' or 'star' in reactant_list [ i ] : r_stars += 1 elif isinstance ( a , float ) : r_stars += a for a in product_atoms : if a == '' : p_stars += 1 elif isinstance ( a , float ) : p_stars += a assert '' . join ( sorted ( reactants ) ) == '' . join ( sorted ( products ) )
Check the stoichiometry and format of chemical reaction used for folder structure . list of reactants - > list of products
4,054
def check_section ( node , section , keys = None ) : if keys : for key in keys : if key not in node : raise ValueError ( 'Missing key %r inside %r node' % ( key , section ) )
Validate keys in a section
4,055
def drp_load ( package , resource , confclass = None ) : data = pkgutil . get_data ( package , resource ) return drp_load_data ( package , data , confclass = confclass )
Load the DRPS from a resource file .
4,056
def drp_load_data ( package , data , confclass = None ) : drpdict = yaml . safe_load ( data ) ins = load_instrument ( package , drpdict , confclass = confclass ) if ins . version == 'undefined' : pkg = importlib . import_module ( package ) ins . version = getattr ( pkg , '__version__' , 'undefined' ) return ins
Load the DRPS from data .
4,057
def load_modes ( node ) : if isinstance ( node , list ) : values = [ load_mode ( child ) for child in node ] keys = [ mode . key for mode in values ] return dict ( zip ( keys , values ) ) elif isinstance ( node , dict ) : values = { key : load_mode ( child ) for key , child in node } return values else : raise NotImplementedError
Load all observing modes
4,058
def load_mode ( node ) : obs_mode = ObservingMode ( ) obs_mode . __dict__ . update ( node ) load_mode_validator ( obs_mode , node ) load_mode_builder ( obs_mode , node ) load_mode_tagger ( obs_mode , node ) return obs_mode
Load one observing mdode
4,059
def load_mode_tagger ( obs_mode , node ) : ntagger = node . get ( 'tagger' ) if ntagger is None : pass elif isinstance ( ntagger , list ) : def full_tagger ( obsres ) : return get_tags_from_full_ob ( obsres , reqtags = ntagger ) obs_mode . tagger = full_tagger elif isinstance ( ntagger , six . string_types ) : obs_mode . tagger = import_object ( ntagger ) else : raise TypeError ( 'tagger must be None, a list or a string' ) return obs_mode
Load observing mode OB tagger
4,060
def load_mode_builder ( obs_mode , node ) : nval1 = node . get ( 'builder' ) if nval1 is not None : if isinstance ( nval1 , str ) : newmethod = import_object ( nval1 ) obs_mode . build_ob = newmethod . __get__ ( obs_mode ) else : raise TypeError ( 'builder must be None or a string' ) else : nval2 = node . get ( 'builder_options' ) if nval2 is not None : if isinstance ( nval2 , list ) : for opt_dict in nval2 : if 'result_of' in opt_dict : fields = opt_dict [ 'result_of' ] obs_mode . build_ob_options = ResultOf ( ** fields ) break else : raise TypeError ( 'builder_options must be None or a list' ) return obs_mode
Load observing mode OB builder
4,061
def load_mode_validator ( obs_mode , node ) : nval = node . get ( 'validator' ) if nval is None : pass elif isinstance ( nval , str ) : obs_mode . validator = import_object ( nval ) else : raise TypeError ( 'validator must be None or a string' ) return obs_mode
Load observing mode validator
4,062
def frommembers ( cls , members ) : return cls . frombitsets ( map ( cls . BitSet . frommembers , members ) )
Series from iterable of member iterables .
4,063
def frombools ( cls , bools ) : return cls . frombitsets ( map ( cls . BitSet . frombools , bools ) )
Series from iterable of boolean evaluable iterables .
4,064
def frombits ( cls , bits ) : return cls . frombitsets ( map ( cls . BitSet . frombits , bits ) )
Series from binary string arguments .
4,065
def fromints ( cls , ints ) : return cls . frombitsets ( map ( cls . BitSet . fromint , ints ) )
Series from integer rank arguments .
4,066
def index_sets ( self , as_set = False ) : indexes = frozenset if as_set else tuple return [ indexes ( b . iter_set ( ) ) for b in self ]
Return the series as list of index set tuples .
4,067
def append_arrays ( many , single ) : assert np . ndim ( single ) == 1 diff = single . shape [ 0 ] - many . shape [ 0 ] if diff < 0 : single = np . pad ( single , ( 0 , - diff ) , 'constant' , constant_values = np . nan ) elif diff > 0 : many = np . pad ( many , ( ( 0 , diff ) , ) , 'constant' , constant_values = np . nan ) else : pass return np . c_ [ many , single ]
Append an array to another padding with NaNs for constant length .
4,068
def _get_locations ( self , calc ) : return ( self . _location_in ( calc . profile ) , self . _location_out ( calc . profile ) )
Locate locations within the profile .
4,069
def find_pix_borders ( sp , sought_value ) : if sp . ndim != 1 : raise ValueError ( 'Unexpected number of dimensions:' , sp . ndim ) naxis1 = len ( sp ) jborder_min = - 1 jborder_max = naxis1 if not np . alltrue ( sp == sought_value ) : while True : jborder_min += 1 if sp [ jborder_min ] != sought_value : break while True : jborder_max -= 1 if sp [ jborder_max ] != sought_value : break return jborder_min , jborder_max
Find useful region of a given spectrum
4,070
def fix_pix_borders ( image2d , nreplace , sought_value , replacement_value ) : naxis2 , naxis1 = image2d . shape for i in range ( naxis2 ) : jborder_min , jborder_max = find_pix_borders ( image2d [ i , : ] , sought_value = sought_value ) if jborder_min != - 1 : j1 = jborder_min j2 = min ( j1 + nreplace , naxis1 ) image2d [ i , j1 : j2 ] = replacement_value if jborder_max != naxis1 : j2 = jborder_max + 1 j1 = max ( j2 - nreplace , 0 ) image2d [ i , j1 : j2 ] = replacement_value return image2d
Replace a few pixels at the borders of each spectrum .
4,071
def define_mask_borders ( image2d , sought_value , nadditional = 0 ) : naxis2 , naxis1 = image2d . shape mask2d = np . zeros ( ( naxis2 , naxis1 ) , dtype = bool ) borders = [ ] for i in range ( naxis2 ) : jborder_min , jborder_max = find_pix_borders ( image2d [ i , : ] , sought_value = sought_value ) borders . append ( ( jborder_min , jborder_max ) ) if ( jborder_min , jborder_max ) != ( - 1 , naxis1 ) : if jborder_min != - 1 : j1 = 0 j2 = jborder_min + nadditional + 1 mask2d [ i , j1 : j2 ] = True if jborder_max != naxis1 : j1 = jborder_max - nadditional j2 = naxis1 mask2d [ i , j1 : j2 ] = True return mask2d , borders
Generate mask avoiding undesired values at the borders .
4,072
def update_features ( self , poly ) : for feature in self . features : feature . wavelength = poly ( feature . xpos )
Evaluate wavelength at xpos using the provided polynomial .
4,073
def dataframe_from_list ( values ) : if ( isinstance ( values , six . string_types ) ) : return DataFrame ( filename = values ) elif ( isinstance ( values , fits . HDUList ) ) : return DataFrame ( frame = values ) else : return None
Build a DataFrame object from a list .
4,074
def obsres_from_dict ( values ) : obsres = ObservationResult ( ) ikey = 'frames' if 'images' in values : ikey = 'images' obsres . id = values . get ( 'id' , 1 ) obsres . mode = values [ 'mode' ] obsres . instrument = values [ 'instrument' ] obsres . configuration = values . get ( 'configuration' , 'default' ) obsres . pipeline = values . get ( 'pipeline' , 'default' ) obsres . children = values . get ( 'children' , [ ] ) obsres . parent = values . get ( 'parent' , None ) obsres . results = values . get ( 'results' , { } ) obsres . requirements = values . get ( 'requirements' , { } ) try : obsres . frames = [ dataframe_from_list ( val ) for val in values [ ikey ] ] except Exception : obsres . frames = [ ] return obsres
Build a ObservationResult object from a dictionary .
4,075
def get_sample_frame ( self ) : for frame in self . frames : return frame . open ( ) for res in self . results . values ( ) : return res . open ( ) return None
Return first available image in observation result
4,076
def fowler_array ( fowlerdata , ti = 0.0 , ts = 0.0 , gain = 1.0 , ron = 1.0 , badpixels = None , dtype = 'float64' , saturation = 65631 , blank = 0 , normalize = False ) : import numina . array . _nirproc as _nirproc if gain <= 0 : raise ValueError ( "invalid parameter, gain <= 0.0" ) if ron <= 0 : raise ValueError ( "invalid parameter, ron < 0.0" ) if ti < 0 : raise ValueError ( "invalid parameter, ti < 0.0" ) if ts < 0 : raise ValueError ( "invalid parameter, ts < 0.0" ) if saturation <= 0 : raise ValueError ( "invalid parameter, saturation <= 0" ) fowlerdata = numpy . asarray ( fowlerdata ) if fowlerdata . ndim != 3 : raise ValueError ( 'fowlerdata must be 3D' ) npairs = fowlerdata . shape [ 0 ] // 2 if 2 * npairs != fowlerdata . shape [ 0 ] : raise ValueError ( 'axis-0 in fowlerdata must be even' ) ndtype = fowlerdata . dtype . newbyteorder ( '=' ) fowlerdata = numpy . asarray ( fowlerdata , dtype = ndtype ) fdtype = numpy . result_type ( fowlerdata . dtype , dtype ) mdtype = numpy . dtype ( 'uint8' ) fshape = ( fowlerdata . shape [ 1 ] , fowlerdata . shape [ 2 ] ) if badpixels is None : badpixels = numpy . zeros ( fshape , dtype = mdtype ) else : if badpixels . shape != fshape : raise ValueError ( 'shape of badpixels is not ' 'compatible with shape of fowlerdata' ) if badpixels . dtype != mdtype : raise ValueError ( 'dtype of badpixels must be uint8' ) result = numpy . empty ( fshape , dtype = fdtype ) var = numpy . empty_like ( result ) npix = numpy . empty ( fshape , dtype = mdtype ) mask = badpixels . copy ( ) _nirproc . _process_fowler_intl ( fowlerdata , ti , ts , gain , ron , badpixels , saturation , blank , result , var , npix , mask ) return result , var , npix , mask
Loop over the first axis applying Fowler processing .
4,077
def ramp_array ( rampdata , ti , gain = 1.0 , ron = 1.0 , badpixels = None , dtype = 'float64' , saturation = 65631 , blank = 0 , nsig = None , normalize = False ) : import numina . array . _nirproc as _nirproc if ti <= 0 : raise ValueError ( "invalid parameter, ti <= 0.0" ) if gain <= 0 : raise ValueError ( "invalid parameter, gain <= 0.0" ) if ron <= 0 : raise ValueError ( "invalid parameter, ron < 0.0" ) if saturation <= 0 : raise ValueError ( "invalid parameter, saturation <= 0" ) rampdata = numpy . asarray ( rampdata ) if rampdata . ndim != 3 : raise ValueError ( 'rampdata must be 3D' ) ndtype = rampdata . dtype . newbyteorder ( '=' ) rampdata = numpy . asarray ( rampdata , dtype = ndtype ) fdtype = numpy . result_type ( rampdata . dtype , dtype ) mdtype = numpy . dtype ( 'uint8' ) fshape = ( rampdata . shape [ 1 ] , rampdata . shape [ 2 ] ) if badpixels is None : badpixels = numpy . zeros ( fshape , dtype = mdtype ) else : if badpixels . shape != fshape : msg = 'shape of badpixels is not compatible with shape of rampdata' raise ValueError ( msg ) if badpixels . dtype != mdtype : raise ValueError ( 'dtype of badpixels must be uint8' ) result = numpy . empty ( fshape , dtype = fdtype ) var = numpy . empty_like ( result ) npix = numpy . empty ( fshape , dtype = mdtype ) mask = badpixels . copy ( ) _nirproc . _process_ramp_intl ( rampdata , ti , gain , ron , badpixels , saturation , blank , result , var , npix , mask ) return result , var , npix , mask
Loop over the first axis applying ramp processing .
4,078
def accept_freeware_license ( ) : ntab = 3 if version ( ) . startswith ( '6.6.' ) else 2 for _ in range ( ntab ) : EasyProcess ( 'xdotool key KP_Tab' ) . call ( ) time . sleep ( 0.5 ) EasyProcess ( 'xdotool key KP_Space' ) . call ( ) time . sleep ( 0.5 ) EasyProcess ( 'xdotool key KP_Space' ) . call ( )
different Eagle versions need differnt TAB count . 6 . 5 - > 2 6 . 6 - > 3 7 . 4 - > 2
4,079
def fully_qualified_name ( obj , sep = '.' ) : if inspect . isclass ( obj ) : return obj . __module__ + sep + obj . __name__ else : return obj . __module__ + sep + obj . __class__ . __name__
Return fully qualified name from object
4,080
def fun_residuals ( params , xnor , ynor , w , bbox , k , ext ) : spl = LSQUnivariateSpline ( x = xnor , y = ynor , t = [ item . value for item in params . values ( ) ] , w = w , bbox = bbox , k = k , ext = ext , check_finite = False ) return spl . get_residual ( )
Compute fit residuals
4,081
def parse_as_yaml ( strdict ) : interm = "" for key , val in strdict . items ( ) : interm = "%s: %s, %s" % ( key , val , interm ) fin = '{%s}' % interm return yaml . load ( fin )
Parse a dictionary of strings as if yaml reads it
4,082
def folder2db ( folder_name , debug , energy_limit , skip_folders , goto_reaction ) : folder_name = folder_name . rstrip ( '/' ) skip = [ ] for s in skip_folders . split ( ', ' ) : for sk in s . split ( ',' ) : skip . append ( sk ) pub_id = _folder2db . main ( folder_name , debug , energy_limit , skip , goto_reaction ) if pub_id : print ( '' ) print ( '' ) print ( 'Ready to release the data?' ) print ( " Send it to the Catalysis-Hub server with 'cathub db2server {folder_name}/{pub_id}.db'." . format ( ** locals ( ) ) ) print ( " Then log in at www.catalysis-hub.org/upload/ to verify and release. " )
Read folder and collect data in local sqlite3 database
4,083
def db2server ( dbfile , block_size , dbuser , dbpassword ) : _db2server . main ( dbfile , write_reaction = True , write_ase = True , write_publication = True , write_reaction_system = True , block_size = block_size , start_block = 0 , user = dbuser , password = dbpassword )
Transfer data from local database to Catalysis Hub server
4,084
def reactions ( columns , n_results , write_db , queries ) : if not isinstance ( queries , dict ) : query_dict = { } for q in queries : key , value = q . split ( '=' ) if key == 'distinct' : if value in [ 'True' , 'true' ] : query_dict . update ( { key : True } ) continue try : value = int ( value ) query_dict . update ( { key : value } ) except BaseException : query_dict . update ( { key : '{0}' . format ( value ) } ) if write_db and n_results > 1000 : print ( ) data = query . get_reactions ( columns = columns , n_results = n_results , write_db = write_db , ** query_dict ) if write_db : return table = [ ] headers = [ ] for row in data [ 'reactions' ] [ 'edges' ] : table += [ list ( row [ 'node' ] . values ( ) ) ] headers = list ( row [ 'node' ] . keys ( ) ) print ( tabulate ( table , headers ) + '\n' )
Search for reactions
4,085
def publications ( columns , n_results , queries ) : if not isinstance ( queries , dict ) : query_dict = { } for q in queries : key , value = q . split ( '=' ) if key == 'distinct' : if value in [ 'True' , 'true' ] : query_dict . update ( { key : True } ) continue try : value = int ( value ) query_dict . update ( { key : value } ) except BaseException : query_dict . update ( { key : '{0}' . format ( value ) } ) if 'sort' not in query_dict : query_dict . update ( { 'order' : '-year' } ) data = query . query ( table = 'publications' , columns = columns , n_results = n_results , queries = query_dict ) table = [ ] headers = [ ] for row in data [ 'publications' ] [ 'edges' ] : value = list ( row [ 'node' ] . values ( ) ) for n , v in enumerate ( value ) : if isinstance ( v , str ) and len ( v ) > 20 : splited = v . split ( ' ' ) size = 0 sentence = '' for word in splited : if size < 20 : size += len ( word ) sentence += ' ' + word else : sentence += '\n' + word size = 0 sentence += '\n' value [ n ] = sentence table += [ value ] headers = list ( row [ 'node' ] . keys ( ) ) print ( tabulate ( table , headers , tablefmt = "grid" ) + '\n' )
Search for publications
4,086
def make_folders ( template , custom_base ) : def dict_representer ( dumper , data ) : return dumper . represent_dict ( data . items ( ) ) Dumper . add_representer ( collections . OrderedDict , dict_representer ) if custom_base is None : custom_base = os . path . abspath ( os . path . curdir ) template = custom_base + '/' + template template_data = ase_tools . REACTION_TEMPLATE if not os . path . exists ( template ) : with open ( template , 'w' ) as outfile : outfile . write ( yaml . dump ( template_data , indent = 4 , Dumper = Dumper ) + '\n' ) print ( "Created template file: {template}\n" . format ( ** locals ( ) ) + ' Please edit it and run the script again to create your folderstructure.\n' + ' Run cathub make_folders --help for instructions' ) return with open ( template ) as infile : template_data = yaml . load ( infile ) title = template_data [ 'title' ] authors = template_data [ 'authors' ] journal = template_data [ 'journal' ] volume = template_data [ 'volume' ] number = template_data [ 'number' ] pages = template_data [ 'pages' ] year = template_data [ 'year' ] email = template_data [ 'email' ] publisher = template_data [ 'publisher' ] doi = template_data [ 'doi' ] dft_code = template_data [ 'DFT_code' ] dft_functionals = template_data [ 'DFT_functionals' ] reactions = template_data [ 'reactions' ] crystal_structures = template_data [ 'crystal_structures' ] bulk_compositions = template_data [ 'bulk_compositions' ] facets = template_data [ 'facets' ] energy_corrections = template_data [ 'energy_corrections' ] make_folders_template . main ( title = title , authors = eval ( authors ) if isinstance ( authors , six . string_types ) else authors , journal = journal , volume = volume , number = number , pages = pages , year = year , email = email , publisher = publisher , doi = doi , DFT_code = dft_code , DFT_functionals = dft_functionals , reactions = eval ( reactions ) if isinstance ( reactions , six . string_types ) else reactions , custom_base = custom_base , bulk_compositions = bulk_compositions , crystal_structures = crystal_structures , facets = facets , energy_corrections = energy_corrections ) pub_id = tools . get_pub_id ( title , authors , year ) print ( "Now dump your DFT output files into the folder, and run 'cathub folder2db {pub_id}'" . format ( ** locals ( ) ) )
Create a basic folder tree for dumping DFT calculcations for reaction energies .
4,087
def organize ( ** kwargs ) : if len ( kwargs [ 'adsorbates' ] ) == 0 : print ( ) print ( " Enter adsorbates like so --adsorbates CO,O,CO2" ) print ( " [Comma-separated list without spaces.]" ) kwargs [ 'adsorbates' ] = list ( map ( lambda x : ( '' . join ( sorted ( string2symbols ( x ) ) ) ) , kwargs [ 'adsorbates' ] . split ( ',' ) , ) ) if kwargs [ 'energy_corrections' ] : e_c_dict = { } for e_c in kwargs [ 'energy_corrections' ] . split ( ',' ) : key , value = e_c . split ( '=' ) e_c_dict . update ( { key : float ( value ) } ) kwargs [ 'energy_corrections' ] = e_c_dict options = collections . namedtuple ( 'options' , kwargs . keys ( ) ) ( ** kwargs ) _organize . main ( options = options )
Read reactions from non - organized folder
4,088
def initCurses ( self ) : curses . noecho ( ) curses . cbreak ( ) curses . curs_set ( 0 ) curses . start_color ( ) curses . use_default_colors ( ) curses . init_pair ( 1 , curses . COLOR_WHITE , - 1 ) curses . init_pair ( 2 , curses . COLOR_YELLOW , - 1 ) curses . init_pair ( 3 , curses . COLOR_MAGENTA , - 1 ) curses . init_pair ( 4 , curses . COLOR_CYAN , - 1 ) curses . init_pair ( 5 , curses . COLOR_GREEN , - 1 ) curses . init_pair ( 6 , curses . COLOR_BLUE , - 1 ) curses . init_pair ( 7 , curses . COLOR_RED , - 1 )
Set up screen properties
4,089
def patchCurses ( self ) : if ( sys . version_info ) [ : 3 ] == ( 3 , 4 , 0 ) : self . addchar = lambda y , x , * args : self . win . addch ( x , y , * args ) else : self . addchar = self . win . addch
Fix curses addch function for python 3 . 4 . 0
4,090
def splash ( self ) : dirname = os . path . split ( os . path . abspath ( __file__ ) ) [ 0 ] try : splash = open ( os . path . join ( dirname , "splash" ) , "r" ) . readlines ( ) except IOError : return width = len ( max ( splash , key = len ) ) y = int ( self . y_grid / 2 ) - len ( splash ) x = int ( self . x_grid / 2 ) - int ( width / 2 ) if self . x_grid > width : for i , line in enumerate ( splash ) : self . win . addstr ( y + i , x , line , curses . color_pair ( 5 ) )
Draw splash screen
4,091
def drawHUD ( self ) : self . win . move ( self . height - 2 , self . x_pad ) self . win . clrtoeol ( ) self . win . box ( ) self . addstr ( 2 , self . x_pad + 1 , "Population: %i" % len ( self . grid ) ) self . addstr ( 3 , self . x_pad + 1 , "Generation: %s" % self . current_gen ) self . addstr ( 3 , self . x_grid - 21 , "s: start p: pause" ) self . addstr ( 2 , self . x_grid - 21 , "r: restart q: quit" )
Draw information on population size and current generation
4,092
def drawGrid ( self ) : for cell in self . grid : y , x = cell y += self . y_pad x += self . x_pad if self . traditional : sprite = '.' color = curses . color_pair ( 4 ) else : sprite = self . char [ self . grid [ cell ] - 1 ] color = curses . color_pair ( self . grid [ cell ] ) self . addchar ( y , x , sprite , color ) self . win . refresh ( )
Redraw the grid with the new generation
4,093
def nextGen ( self ) : self . current_gen += 1 self . change_gen [ self . current_gen % 3 ] = copy . copy ( self . grid ) grid_cp = copy . copy ( self . grid ) for cell in self . grid : y , x = cell y1 = ( y - 1 ) % self . y_grid y2 = ( y + 1 ) % self . y_grid x1 = ( x - 1 ) % self . x_grid x2 = ( x + 1 ) % self . x_grid n = self . countNeighbours ( cell ) if n < 2 or n > 3 : del grid_cp [ cell ] self . addchar ( y + self . y_pad , x + self . x_pad , ' ' ) else : grid_cp [ cell ] = min ( self . grid [ cell ] + 1 , self . color_max ) for neighbour in product ( [ y1 , y , y2 ] , [ x1 , x , x2 ] ) : if not self . grid . get ( neighbour ) : if self . countNeighbours ( neighbour ) == 3 : y , x = neighbour y = y % self . y_grid x = x % self . x_grid neighbour = y , x grid_cp [ neighbour ] = 1 self . grid = grid_cp
Decide the fate of the cells
4,094
def countNeighbours ( self , cell ) : count = 0 y , x = cell y = y % self . y_grid x = x % self . x_grid y1 = ( y - 1 ) % self . y_grid y2 = ( y + 1 ) % self . y_grid x1 = ( x - 1 ) % self . x_grid x2 = ( x + 1 ) % self . x_grid cell = y , x for neighbour in product ( [ y1 , y , y2 ] , [ x1 , x , x2 ] ) : if neighbour != cell and self . grid . get ( neighbour ) : count += 1 return count
Return the number active neighbours within one positions away from cell
4,095
def initGrid ( self ) : blinker = [ ( 4 , 4 ) , ( 4 , 5 ) , ( 4 , 6 ) ] toad = [ ( 9 , 5 ) , ( 9 , 6 ) , ( 9 , 7 ) , ( 10 , 4 ) , ( 10 , 5 ) , ( 10 , 6 ) ] glider = [ ( 4 , 11 ) , ( 5 , 12 ) , ( 6 , 10 ) , ( 6 , 11 ) , ( 6 , 12 ) ] r_pentomino = [ ( 10 , 60 ) , ( 9 , 61 ) , ( 10 , 61 ) , ( 11 , 61 ) , ( 9 , 62 ) ] self . grid = { } if self . test : for cell in chain ( blinker , toad , glider , r_pentomino ) : self . grid [ cell ] = 1 else : for _ in range ( self . initsize ) : ry = random . randint ( self . y_pad , self . y_grid - 1 ) rx = random . randint ( self . x_pad , self . x_grid - 1 ) self . grid [ ( ry , rx ) ] = 1
Initialise the game grid
4,096
def restart ( self ) : self . initGrid ( ) self . win . clear ( ) self . current_gen = 1 self . start
Restart the game from a new generation 0
4,097
def end ( self ) : if self . loop : self . restart return self . addstr ( 2 , self . x_grid / 2 - 4 , "GAMEOVER" , 7 ) if self . hud : self . addstr ( 2 , self . x_pad + 13 , len ( self . grid ) , 5 ) self . addstr ( 3 , self . x_pad + 13 , self . current_gen , 5 ) if self . test : exit ( ) while self . state == 'stopped' : key = self . win . getch ( ) if key == ord ( 'q' ) : exit ( ) if key in [ ord ( 's' ) , ord ( 'r' ) ] : self . restart
Game Finished - Restart or Quit
4,098
def mean ( arrays , masks = None , dtype = None , out = None , zeros = None , scales = None , weights = None ) : return generic_combine ( intl_combine . mean_method ( ) , arrays , masks = masks , dtype = dtype , out = out , zeros = zeros , scales = scales , weights = weights )
Combine arrays using the mean with masks and offsets .
4,099
def median ( arrays , masks = None , dtype = None , out = None , zeros = None , scales = None , weights = None ) : return generic_combine ( intl_combine . median_method ( ) , arrays , masks = masks , dtype = dtype , out = out , zeros = zeros , scales = scales , weights = weights )
Combine arrays using the median with masks .