idx
int64 0
63k
| question
stringlengths 53
5.28k
| target
stringlengths 5
805
|
|---|---|---|
3,700
|
def runstring ( self ) : cfile = self . template % self . last self . last += 1 return cfile
|
Return the run number and the file name .
|
3,701
|
def obsres_from_oblock_id ( self , obsid , configuration = None ) : este = self . ob_table [ obsid ] obsres = obsres_from_dict ( este ) _logger . debug ( "obsres_from_oblock_id id='%s', mode='%s' START" , obsid , obsres . mode ) try : this_drp = self . drps . query_by_name ( obsres . instrument ) except KeyError : raise ValueError ( 'no DRP for instrument {}' . format ( obsres . instrument ) ) if obsres . mode in self . _RESERVED_MODE_NAMES : selected_mode = None else : selected_mode = this_drp . modes [ obsres . mode ] if selected_mode : obsres = selected_mode . build_ob ( obsres , self ) obsres = selected_mode . tag_ob ( obsres ) if configuration : pass else : pass key , date_obs , keyname = this_drp . select_profile ( obsres ) obsres . configuration = self . assembly_instrument ( key , date_obs , keyname ) obsres . profile = obsres . configuration _logger . debug ( 'obsres_from_oblock_id %s END' , obsid ) return obsres
|
Override instrument configuration if configuration is not None
|
3,702
|
def map_tree ( visitor , tree ) : newn = [ map_tree ( visitor , node ) for node in tree . nodes ] return visitor ( tree , newn )
|
Apply function to nodes
|
3,703
|
def filter_tree ( condition , tree ) : if condition ( tree ) : for node in tree . nodes : for n in filter_tree ( condition , node ) : yield n yield tree
|
Return parts of the tree that fulfill condition
|
3,704
|
def fill_placeholders ( self , tags ) : def change_p_node_tags ( node , children ) : if isinstance ( node , Placeholder ) : value = ConstExpr ( tags [ node . name ] ) return value else : return node . clone ( children ) return map_tree ( change_p_node_tags , self )
|
Substitute Placeholder nodes by its value in tags
|
3,705
|
def molecules2symbols ( molecules , add_hydrogen = True ) : symbols = sorted ( list ( set ( ase . symbols . string2symbols ( '' . join ( map ( lambda _x : '' . join ( ase . symbols . string2symbols ( _x ) ) , molecules ) ) ) ) ) , key = lambda _y : ase . data . atomic_numbers [ _y ] ) if add_hydrogen and 'H' not in symbols : symbols . insert ( 0 , 'H' ) return symbols
|
Take a list of molecules and return just a list of atomic symbols possibly adding hydrogen
|
3,706
|
def construct_reference_system ( symbols , candidates = None , options = None , ) : if hasattr ( options , 'no_hydrogen' ) and options . no_hydrogen : add_hydrogen = False else : add_hydrogen = True references = { } sorted_candidates = [ 'H2' , 'H2O' , 'NH3' , 'N2' , 'CH4' , 'CO' , 'H2S' , 'HCl' , 'O2' ] if candidates is None : candidates = sorted_candidates else : odd_candidates = [ c for c in candidates if c not in sorted_candidates ] candidates = [ c for c in sorted_candidates if c in candidates ] + odd_candidates added_symbols = [ ] for symbol in symbols : added_symbols . append ( symbol ) for candidate in candidates : _symbols = ase . symbols . string2symbols ( candidate ) if set ( added_symbols ) <= set ( list ( references . keys ( ) ) + _symbols ) and set ( list ( references . keys ( ) ) + _symbols ) <= set ( symbols ) and candidate not in references . values ( ) : references [ symbol ] = candidate break else : raise UserWarning ( ( "No candidate satisfied {symbol}. Add more candidates\n" " Symbols {symbols}\n" " _Symbols {_symbols}\n" " References {references}\n" " Candidates {candidates}\n" ) . format ( symbol = symbol , symbols = symbols , _symbols = _symbols , candidates = candidates , references = list ( references . keys ( ) ) , ) ) sorted_references = [ ] references = list ( references . items ( ) ) return references
|
Take a list of symbols and construct gas phase references system when possible avoiding O2 . Candidates can be rearranged where earlier candidates get higher preference than later candidates
|
3,707
|
def get_stoichiometry_factors ( adsorbates , references ) : stoichiometry = get_atomic_stoichiometry ( references ) stoichiometry_factors = { } for adsorbate in adsorbates : for symbol in ase . symbols . string2symbols ( adsorbate ) : symbol_index = list ( map ( lambda _x : _x [ 0 ] , references ) ) . index ( symbol ) for ( factor , ( ref_symbol , ref_molecule ) ) in zip ( stoichiometry [ symbol_index ] , references ) : stoichiometry_factors . setdefault ( adsorbate , { } ) [ ref_molecule ] = stoichiometry_factors . setdefault ( adsorbate , { } ) . get ( ref_molecule , 0 ) + factor nonzero_factors = { } for key , value in stoichiometry_factors [ adsorbate ] . items ( ) : if not np . isclose ( value , 0. ) : nonzero_factors [ key ] = value stoichiometry_factors [ adsorbate ] = nonzero_factors return stoichiometry_factors
|
Take a list of adsorabtes and a corresponding reference system and return a list of dictionaries encoding the stoichiometry factors converting between adsorbates and reference molecules .
|
3,708
|
def get_fields_dict ( self , row ) : return { k : getattr ( self , 'clean_{}' . format ( k ) , lambda x : x ) ( v . strip ( ) if isinstance ( v , str ) else None ) for k , v in zip_longest ( self . get_fields ( ) , row ) }
|
Returns a dict of field name and cleaned value pairs to initialize the model . Beware it aligns the lists of fields and row values with Nones to allow for adding fields not found in the CSV . Whitespace around the value of the cell is stripped .
|
3,709
|
def process_node ( node ) : value = node [ 'value' ] mname = node [ 'name' ] typeid = node [ 'typeid' ] if typeid == 52 : obj = { } for el in value [ 'elements' ] : key , val = process_node ( el ) obj [ key ] = val if value [ 'struct_type' ] != 'dict' : klass = objimp . import_object ( value [ 'struct_type' ] ) newobj = klass . __new__ ( klass ) if hasattr ( newobj , '__setstate__' ) : newobj . __setstate__ ( obj ) else : newobj . __dict__ = obj obj = newobj elif typeid == 9 : data = value [ 'data' ] dim = value [ 'dimension' ] shape = dim [ 'height' ] , dim [ 'width' ] obj = data elif typeid == 90 : obj = [ ] for el in value : sobj = { } for sel in el [ 'elements' ] : key , val = process_node ( sel ) sobj [ key ] = val obj . append ( sobj ) elif typeid == 45 : obj = dataframe . DataFrame ( frame = os . path . abspath ( value [ 'path' ] ) ) else : obj = value return mname , obj
|
Process a node in result . json structure
|
3,710
|
def build_result ( data ) : more = { } for key , value in data . items ( ) : if key != 'elements' : newnode = value else : newnode = { } for el in value : nkey , nvalue = process_node ( el ) newnode [ nkey ] = nvalue more [ key ] = newnode return more
|
Create a dictionary with the contents of result . json
|
3,711
|
def _finalize ( self , all_msg_errors = None ) : if all_msg_errors is None : all_msg_errors = [ ] for key in self . stored ( ) : try : getattr ( self , key ) except ( ValueError , TypeError ) as err : all_msg_errors . append ( err . args [ 0 ] ) if all_msg_errors : raise ValueError ( all_msg_errors )
|
Access all the instance descriptors
|
3,712
|
def validate ( self ) : for key , req in self . stored ( ) . items ( ) : val = getattr ( self , key ) req . validate ( val ) self . _run_checks ( )
|
Validate myself .
|
3,713
|
def validate ( method ) : @ wraps ( method ) def mod_run ( self , rinput ) : self . validate_input ( rinput ) result = method ( self , rinput ) self . validate_result ( result ) return result return mod_run
|
Decorate run method inputs and outputs are validated
|
3,714
|
def as_list ( callable ) : @ wraps ( callable ) def wrapper ( value_iter ) : return [ callable ( value ) for value in value_iter ] return wrapper
|
Convert a scalar validator in a list validator
|
3,715
|
def range_validator ( minval = None , maxval = None ) : def checker_func ( value ) : if minval is not None and value < minval : msg = "must be >= {}" . format ( minval ) raise ValidationError ( msg ) if maxval is not None and value > maxval : msg = "must be <= {}" . format ( maxval ) raise ValidationError ( msg ) return value return checker_func
|
Generates a function that validates that a number is within range
|
3,716
|
def run ( path , tasks ) : readable_path = make_readable_path ( path ) if not os . path . isfile ( path ) : logger . log ( logger . red ( "Can't read pylpfile " ) , logger . magenta ( readable_path ) ) sys . exit ( - 1 ) else : logger . log ( "Using pylpfile " , logger . magenta ( readable_path ) ) try : runpy . run_path ( path , None , "pylpfile" ) except Exception as e : traceback . print_exc ( file = sys . stdout ) logger . log ( logger . red ( "\nAn error has occurred during the execution of the pylpfile" ) ) sys . exit ( - 1 ) for name in tasks : pylp . start ( name ) loop = asyncio . get_event_loop ( ) loop . run_until_complete ( wait_and_quit ( loop ) )
|
Run a pylpfile .
|
3,717
|
async def wait_and_quit ( loop ) : from pylp . lib . tasks import running if running : await asyncio . wait ( map ( lambda runner : runner . future , running ) )
|
Wait until all task are executed .
|
3,718
|
def is_published ( self ) : citeable = 'publication_info' in self . record and is_citeable ( self . record [ 'publication_info' ] ) submitted = 'dois' in self . record and any ( 'journal_title' in el for el in force_list ( self . record . get ( 'publication_info' ) ) ) return citeable or submitted
|
Return True if a record is published .
|
3,719
|
def get_page_artid_for_publication_info ( publication_info , separator ) : if 'artid' in publication_info : return publication_info [ 'artid' ] elif 'page_start' in publication_info and 'page_end' in publication_info : page_start = publication_info [ 'page_start' ] page_end = publication_info [ 'page_end' ] return text_type ( '{}{}{}' ) . format ( page_start , text_type ( separator ) , page_end ) return ''
|
Return the page range or the article id of a publication_info entry .
|
3,720
|
def get_page_artid ( self , separator = '-' ) : publication_info = get_value ( self . record , 'publication_info[0]' , default = { } ) return LiteratureReader . get_page_artid_for_publication_info ( publication_info , separator )
|
Return the page range or the article id of a record .
|
3,721
|
def chunkreverse ( integers , dtype = 'L' ) : if dtype in ( 'B' , 8 ) : return map ( RBYTES . __getitem__ , integers ) fmt = '{0:0%db}' % NBITS [ dtype ] return ( int ( fmt . format ( chunk ) [ : : - 1 ] , 2 ) for chunk in integers )
|
Yield integers of dtype bit - length reverting their bit - order .
|
3,722
|
def pack ( chunks , r = 32 ) : if r < 1 : raise ValueError ( 'pack needs r > 0' ) n = shift = 0 for c in chunks : n += c << shift shift += r return n
|
Return integer concatenating integer chunks of r > 0 bit - length .
|
3,723
|
def unpack ( n , r = 32 ) : if r < 1 : raise ValueError ( 'unpack needs r > 0' ) mask = ( 1 << r ) - 1 while n : yield n & mask n >>= r
|
Yield r > 0 bit - length integers splitting n into chunks .
|
3,724
|
def packbools ( bools , dtype = 'L' ) : r = NBITS [ dtype ] atoms = ATOMS [ dtype ] for chunk in zip_longest ( * [ iter ( bools ) ] * r , fillvalue = False ) : yield sum ( compress ( atoms , chunk ) )
|
Yield integers concatenating bools in chunks of dtype bit - length .
|
3,725
|
def unpackbools ( integers , dtype = 'L' ) : atoms = ATOMS [ dtype ] for chunk in integers : for a in atoms : yield not not chunk & a
|
Yield booleans unpacking integers of dtype bit - length .
|
3,726
|
def select_data_for_fit ( list_of_wvfeatures ) : nlines_arc = len ( list_of_wvfeatures ) nfit = 0 ifit = [ ] xfit = np . array ( [ ] ) yfit = np . array ( [ ] ) wfit = np . array ( [ ] ) for i in range ( nlines_arc ) : if list_of_wvfeatures [ i ] . line_ok : ifit . append ( i ) xfit = np . append ( xfit , [ list_of_wvfeatures [ i ] . xpos ] ) yfit = np . append ( yfit , [ list_of_wvfeatures [ i ] . reference ] ) wfit = np . append ( wfit , [ list_of_wvfeatures [ i ] . funcost ] ) nfit += 1 return nfit , ifit , xfit , yfit , wfit
|
Select information from valid arc lines to facilitate posterior fits .
|
3,727
|
def gen_triplets_master ( wv_master , geometry = None , debugplot = 0 ) : nlines_master = wv_master . size wv_previous = wv_master [ 0 ] for i in range ( 1 , nlines_master ) : if wv_previous >= wv_master [ i ] : raise ValueError ( 'Wavelengths:\n + str ( wv_previous ) + '\n + str ( wv_master [ i ] ) + '\nin master table are duplicated or not sorted' ) wv_previous = wv_master [ i ] iter_comb_triplets = itertools . combinations ( range ( nlines_master ) , 3 ) triplets_master_list = [ val for val in iter_comb_triplets ] ntriplets_master = len ( triplets_master_list ) if ntriplets_master == comb ( nlines_master , 3 , exact = True ) : if abs ( debugplot ) >= 10 : print ( '>>> Total number of lines in master table:' , nlines_master ) print ( '>>> Number of triplets in master table...:' , ntriplets_master ) else : raise ValueError ( 'Invalid number of combinations' ) ratios_master = np . zeros ( ntriplets_master ) for index , value in enumerate ( triplets_master_list ) : i1 , i2 , i3 = value delta1 = wv_master [ i2 ] - wv_master [ i1 ] delta2 = wv_master [ i3 ] - wv_master [ i1 ] ratios_master [ index ] = delta1 / delta2 isort_ratios_master = np . argsort ( ratios_master ) ratios_master_sorted = ratios_master [ isort_ratios_master ] triplets_master_sorted_list = [ triplets_master_list [ i ] for i in isort_ratios_master ] if abs ( debugplot ) in [ 21 , 22 ] : bins_in = np . linspace ( 0.0 , 1.0 , 41 ) hist , bins_out = np . histogram ( ratios_master , bins = bins_in ) from numina . array . display . matplotlib_qt import plt fig = plt . figure ( ) ax = fig . add_subplot ( 111 ) width_hist = 0.8 * ( bins_out [ 1 ] - bins_out [ 0 ] ) center = ( bins_out [ : - 1 ] + bins_out [ 1 : ] ) / 2 ax . bar ( center , hist , align = 'center' , width = width_hist ) ax . set_xlabel ( 'distance ratio in each triplet' ) ax . set_ylabel ( 'Number of triplets' ) ax . set_title ( "Number of lines/triplets: " + str ( nlines_master ) + "/" + str ( ntriplets_master ) ) set_window_geometry ( geometry ) pause_debugplot ( debugplot , pltshow = True , tight_layout = True ) return ntriplets_master , ratios_master_sorted , triplets_master_sorted_list
|
Compute information associated to triplets in master table .
|
3,728
|
def arccalibration ( wv_master , xpos_arc , naxis1_arc , crpix1 , wv_ini_search , wv_end_search , wvmin_useful , wvmax_useful , error_xpos_arc , times_sigma_r , frac_triplets_for_sum , times_sigma_theil_sen , poly_degree_wfit , times_sigma_polfilt , times_sigma_cook , times_sigma_inclusion , geometry = None , debugplot = 0 ) : ntriplets_master , ratios_master_sorted , triplets_master_sorted_list = gen_triplets_master ( wv_master = wv_master , geometry = geometry , debugplot = debugplot ) list_of_wvfeatures = arccalibration_direct ( wv_master = wv_master , ntriplets_master = ntriplets_master , ratios_master_sorted = ratios_master_sorted , triplets_master_sorted_list = triplets_master_sorted_list , xpos_arc = xpos_arc , naxis1_arc = naxis1_arc , crpix1 = crpix1 , wv_ini_search = wv_ini_search , wv_end_search = wv_end_search , wvmin_useful = wvmin_useful , wvmax_useful = wvmax_useful , error_xpos_arc = error_xpos_arc , times_sigma_r = times_sigma_r , frac_triplets_for_sum = frac_triplets_for_sum , times_sigma_theil_sen = times_sigma_theil_sen , poly_degree_wfit = poly_degree_wfit , times_sigma_polfilt = times_sigma_polfilt , times_sigma_cook = times_sigma_cook , times_sigma_inclusion = times_sigma_inclusion , geometry = geometry , debugplot = debugplot ) return list_of_wvfeatures
|
Performs arc line identification for arc calibration .
|
3,729
|
def match_wv_arrays ( wv_master , wv_expected_all_peaks , delta_wv_max ) : wv_verified_all_peaks = np . zeros_like ( wv_expected_all_peaks ) wv_unused = np . ones_like ( wv_expected_all_peaks , dtype = bool ) minimum_delta_wv = np . ones_like ( wv_expected_all_peaks , dtype = float ) minimum_delta_wv *= np . infty for i in range ( len ( wv_master ) ) : j = np . searchsorted ( wv_expected_all_peaks , wv_master [ i ] ) if j == 0 : delta_wv = abs ( wv_master [ i ] - wv_expected_all_peaks [ j ] ) if delta_wv < delta_wv_max : if wv_unused [ j ] : wv_verified_all_peaks [ j ] = wv_master [ i ] wv_unused [ j ] = False minimum_delta_wv [ j ] = delta_wv else : if delta_wv < minimum_delta_wv [ j ] : wv_verified_all_peaks [ j ] = wv_master [ i ] minimum_delta_wv [ j ] = delta_wv elif j == len ( wv_expected_all_peaks ) : delta_wv = abs ( wv_master [ i ] - wv_expected_all_peaks [ j - 1 ] ) if delta_wv < delta_wv_max : if wv_unused [ j - 1 ] : wv_verified_all_peaks [ j - 1 ] = wv_master [ i ] wv_unused [ j - 1 ] = False else : if delta_wv < minimum_delta_wv [ j - 1 ] : wv_verified_all_peaks [ j - 1 ] = wv_master [ i ] else : delta_wv1 = abs ( wv_master [ i ] - wv_expected_all_peaks [ j - 1 ] ) delta_wv2 = abs ( wv_master [ i ] - wv_expected_all_peaks [ j ] ) if delta_wv1 < delta_wv2 : if delta_wv1 < delta_wv_max : if wv_unused [ j - 1 ] : wv_verified_all_peaks [ j - 1 ] = wv_master [ i ] wv_unused [ j - 1 ] = False else : if delta_wv1 < minimum_delta_wv [ j - 1 ] : wv_verified_all_peaks [ j - 1 ] = wv_master [ i ] else : if delta_wv2 < delta_wv_max : if wv_unused [ j ] : wv_verified_all_peaks [ j ] = wv_master [ i ] wv_unused [ j ] = False else : if delta_wv2 < minimum_delta_wv [ j ] : wv_verified_all_peaks [ j ] = wv_master [ i ] return wv_verified_all_peaks
|
Match two lists with wavelengths .
|
3,730
|
def set_window_geometry ( geometry ) : if geometry is not None : x_geom , y_geom , dx_geom , dy_geom = geometry mngr = plt . get_current_fig_manager ( ) if 'window' in dir ( mngr ) : try : mngr . window . setGeometry ( x_geom , y_geom , dx_geom , dy_geom ) except AttributeError : pass else : pass
|
Set window geometry .
|
3,731
|
def parse_fixed_width ( types , lines ) : values = [ ] line = [ ] for width , parser in types : if not line : line = lines . pop ( 0 ) . replace ( '\n' , '' ) values . append ( parser ( line [ : width ] ) ) line = line [ width : ] return values
|
Parse a fixed width line .
|
3,732
|
def _parse_curves ( block , ** kwargs ) : count = int ( block . pop ( 0 ) ) curves = [ ] for i in range ( count ) : for param in [ 'mod_reduc' , 'damping' ] : length , name = parse_fixed_width ( [ ( 5 , int ) , ( 65 , to_str ) ] , block ) curves . append ( site . NonlinearProperty ( name , parse_fixed_width ( length * [ ( 10 , float ) ] , block ) , parse_fixed_width ( length * [ ( 10 , float ) ] , block ) , param ) ) length = int ( block [ 0 ] [ : 5 ] ) soil_types = parse_fixed_width ( ( length + 1 ) * [ ( 5 , int ) ] , block ) [ 1 : ] return { ( soil_types [ i // 2 ] , c . param ) : c for i , c in enumerate ( curves ) }
|
Parse nonlinear curves block .
|
3,733
|
def _parse_soil_profile ( block , units , curves , ** kwargs ) : wt_layer , length , _ , name = parse_fixed_width ( 3 * [ ( 5 , int ) ] + [ ( 55 , to_str ) ] , block ) layers = [ ] soil_types = [ ] for i in range ( length ) : index , soil_idx , thickness , shear_mod , damping , unit_wt , shear_vel = parse_fixed_width ( [ ( 5 , int ) , ( 5 , int ) , ( 15 , to_float ) ] + 4 * [ ( 10 , to_float ) ] , block ) st = site . SoilType ( soil_idx , unit_wt , curves [ ( soil_idx , 'mod_reduc' ) ] , curves [ ( soil_idx , 'damping' ) ] , ) try : st = soil_types [ soil_types . index ( st ) ] except ValueError : soil_types . append ( st ) layers . append ( site . Layer ( st , thickness , shear_vel ) ) if units == 'english' : for st in soil_types : st . unit_wt *= 0.00015708746 for l in layers : l . thickness *= 0.3048 l . shear_vel *= 0.3048 p = site . Profile ( layers ) p . update_layers ( ) p . wt_depth = p [ wt_layer - 1 ] . depth return p
|
Parse soil profile block .
|
3,734
|
def _parse_motion ( block , ** kwargs ) : _ , fa_length , time_step , name , fmt = parse_fixed_width ( [ ( 5 , int ) , ( 5 , int ) , ( 10 , float ) , ( 30 , to_str ) , ( 30 , to_str ) ] , block ) scale , pga , _ , header_lines , _ = parse_fixed_width ( 3 * [ ( 10 , to_float ) ] + 2 * [ ( 5 , int ) ] , block ) m = re . search ( r'(\d+)\w(\d+)\.\d+' , fmt ) count_per_line = int ( m . group ( 1 ) ) width = int ( m . group ( 2 ) ) fname = os . path . join ( os . path . dirname ( kwargs [ 'fname' ] ) , name ) accels = np . genfromtxt ( fname , delimiter = ( count_per_line * [ width ] ) , skip_header = header_lines , ) if np . isfinite ( scale ) : pass elif np . isfinite ( pga ) : scale = pga / np . abs ( accels ) . max ( ) else : scale = 1. accels *= scale m = motion . TimeSeriesMotion ( fname , '' , time_step , accels , fa_length ) return m
|
Parse motin specification block .
|
3,735
|
def _parse_input_loc ( block , profile , ** kwargs ) : layer , wave_field = parse_fixed_width ( 2 * [ ( 5 , int ) ] , block ) return profile . location ( motion . WaveField [ wave_field ] , index = ( layer - 1 ) , )
|
Parse input location block .
|
3,736
|
def _parse_run_control ( block ) : _ , max_iterations , strain_ratio , _ , _ = parse_fixed_width ( 2 * [ ( 5 , int ) ] + [ ( 10 , float ) ] + 2 * [ ( 5 , int ) ] , block ) return propagation . EquivalentLinearCalculation ( strain_ratio , max_iterations , tolerance = 10. )
|
Parse run control block .
|
3,737
|
def blockgen1d ( block , size ) : def numblock ( blk , x ) : a , b = x if b - a <= blk : return [ x ] else : result = [ ] d = int ( b - a ) // 2 for i in imap ( numblock , [ blk , blk ] , [ ( a , a + d ) , ( a + d , b ) ] ) : result . extend ( i ) return result return [ slice ( * l ) for l in numblock ( block , ( 0 , size ) ) ]
|
Compute 1d block intervals to be used by combine .
|
3,738
|
def blockgen ( blocks , shape ) : iterables = [ blockgen1d ( l , s ) for ( l , s ) in zip ( blocks , shape ) ] return product ( * iterables )
|
Generate a list of slice tuples to be used by combine .
|
3,739
|
def blk_coverage_1d ( blk , size ) : rem = size % blk maxpix = size - rem return maxpix , rem
|
Return the part of a 1d array covered by a block .
|
3,740
|
def max_blk_coverage ( blk , shape ) : return tuple ( blk_coverage_1d ( b , s ) [ 0 ] for b , s in zip ( blk , shape ) )
|
Return the maximum shape of an array covered by a block .
|
3,741
|
def blk_nd_short ( blk , shape ) : internals = ( blk_1d_short ( b , s ) for b , s in zip ( blk , shape ) ) return product ( * internals )
|
Iterate trough the blocks that strictly cover an array .
|
3,742
|
def blk_nd ( blk , shape ) : internals = ( blk_1d ( b , s ) for b , s in zip ( blk , shape ) ) return product ( * internals )
|
Iterate through the blocks that cover an array .
|
3,743
|
def block_view ( arr , block = ( 3 , 3 ) ) : shape = ( arr . shape [ 0 ] // block [ 0 ] , arr . shape [ 1 ] // block [ 1 ] ) + block strides = ( block [ 0 ] * arr . strides [ 0 ] , block [ 1 ] * arr . strides [ 1 ] ) + arr . strides return ast ( arr , shape = shape , strides = strides )
|
Provide a 2D block view to 2D array .
|
3,744
|
def is_citeable ( publication_info ) : def _item_has_pub_info ( item ) : return all ( key in item for key in ( 'journal_title' , 'journal_volume' ) ) def _item_has_page_or_artid ( item ) : return any ( key in item for key in ( 'page_start' , 'artid' ) ) has_pub_info = any ( _item_has_pub_info ( item ) for item in publication_info ) has_page_or_artid = any ( _item_has_page_or_artid ( item ) for item in publication_info ) return has_pub_info and has_page_or_artid
|
Check some fields in order to define if the article is citeable .
|
3,745
|
def add_abstract ( self , abstract , source = None ) : self . _append_to ( 'abstracts' , self . _sourced_dict ( source , value = abstract . strip ( ) , ) )
|
Add abstract .
|
3,746
|
def add_arxiv_eprint ( self , arxiv_id , arxiv_categories ) : self . _append_to ( 'arxiv_eprints' , { 'value' : arxiv_id , 'categories' : arxiv_categories , } ) self . set_citeable ( True )
|
Add arxiv eprint .
|
3,747
|
def add_doi ( self , doi , source = None , material = None ) : if doi is None : return try : doi = idutils . normalize_doi ( doi ) except AttributeError : return if not doi : return dois = self . _sourced_dict ( source , value = doi ) if material is not None : dois [ 'material' ] = material self . _append_to ( 'dois' , dois )
|
Add doi .
|
3,748
|
def make_author ( self , full_name , affiliations = ( ) , roles = ( ) , raw_affiliations = ( ) , source = None , ids = ( ) , emails = ( ) , alternative_names = ( ) ) : builder = SignatureBuilder ( ) builder . set_full_name ( full_name ) for affiliation in affiliations : builder . add_affiliation ( affiliation ) for role in roles : builder . add_inspire_role ( role ) for raw_affiliation in raw_affiliations : builder . add_raw_affiliation ( raw_affiliation , source or self . source ) for id_schema , id_value in ids : if id_schema and id_value : builder . set_uid ( id_value , schema = id_schema ) for email in emails : builder . add_email ( email ) for alternative_name in alternative_names : builder . add_alternative_name ( alternative_name ) return builder . obj
|
Make a subrecord representing an author .
|
3,749
|
def add_book ( self , publisher = None , place = None , date = None ) : imprint = { } if date is not None : imprint [ 'date' ] = normalize_date ( date ) if place is not None : imprint [ 'place' ] = place if publisher is not None : imprint [ 'publisher' ] = publisher self . _append_to ( 'imprints' , imprint )
|
Make a dictionary that is representing a book .
|
3,750
|
def add_inspire_categories ( self , subject_terms , source = None ) : for category in subject_terms : category_dict = self . _sourced_dict ( source , term = category , ) self . _append_to ( 'inspire_categories' , category_dict )
|
Add inspire categories .
|
3,751
|
def add_keyword ( self , keyword , schema = None , source = None ) : keyword_dict = self . _sourced_dict ( source , value = keyword ) if schema is not None : keyword_dict [ 'schema' ] = schema self . _append_to ( 'keywords' , keyword_dict )
|
Add a keyword .
|
3,752
|
def add_private_note ( self , private_notes , source = None ) : self . _append_to ( '_private_notes' , self . _sourced_dict ( source , value = private_notes , ) )
|
Add private notes .
|
3,753
|
def add_publication_info ( self , year = None , cnum = None , artid = None , page_end = None , page_start = None , journal_issue = None , journal_title = None , journal_volume = None , pubinfo_freetext = None , material = None , parent_record = None , parent_isbn = None , ) : if journal_title and all ( not field for field in ( cnum , artid , journal_issue , journal_volume , page_start , page_end ) ) : self . add_public_note ( 'Submitted to {}' . format ( journal_title ) ) return publication_item = { } for key in ( 'cnum' , 'artid' , 'page_end' , 'page_start' , 'journal_issue' , 'journal_title' , 'journal_volume' , 'year' , 'pubinfo_freetext' , 'material' ) : if locals ( ) [ key ] is not None : publication_item [ key ] = locals ( ) [ key ] if parent_record is not None : parent_item = { '$ref' : parent_record } publication_item [ 'parent_record' ] = parent_item if parent_isbn is not None : publication_item [ 'parent_isbn' ] = normalize_isbn ( parent_isbn ) if page_start and page_end : try : self . add_number_of_pages ( int ( page_end ) - int ( page_start ) + 1 ) except ( TypeError , ValueError ) : pass self . _append_to ( 'publication_info' , publication_item ) if is_citeable ( self . record [ 'publication_info' ] ) : self . set_citeable ( True )
|
Add publication info .
|
3,754
|
def add_thesis ( self , defense_date = None , degree_type = None , institution = None , date = None ) : self . record . setdefault ( 'thesis_info' , { } ) thesis_item = { } for key in ( 'defense_date' , 'date' ) : if locals ( ) [ key ] is not None : thesis_item [ key ] = locals ( ) [ key ] if degree_type is not None : thesis_item [ 'degree_type' ] = degree_type . lower ( ) if institution is not None : thesis_item [ 'institutions' ] = [ { 'name' : institution } ] self . record [ 'thesis_info' ] = thesis_item
|
Add thesis info .
|
3,755
|
def add_license ( self , url = None , license = None , material = None , imposing = None ) : hep_license = { } try : license_from_url = get_license_from_url ( url ) if license_from_url is not None : license = license_from_url except ValueError : pass for key in ( 'url' , 'license' , 'material' , 'imposing' ) : if locals ( ) [ key ] is not None : hep_license [ key ] = locals ( ) [ key ] self . _append_to ( 'license' , hep_license )
|
Add license .
|
3,756
|
def add_public_note ( self , public_note , source = None ) : self . _append_to ( 'public_notes' , self . _sourced_dict ( source , value = public_note , ) )
|
Add public note .
|
3,757
|
def add_title ( self , title , subtitle = None , source = None ) : title_entry = self . _sourced_dict ( source , title = title , ) if subtitle is not None : title_entry [ 'subtitle' ] = subtitle self . _append_to ( 'titles' , title_entry )
|
Add title .
|
3,758
|
def add_title_translation ( self , title , language , source = None ) : title_translation = self . _sourced_dict ( source , title = title , language = language , ) self . _append_to ( 'title_translations' , title_translation )
|
Add title translation .
|
3,759
|
def add_report_number ( self , report_number , source = None ) : self . _append_to ( 'report_numbers' , self . _sourced_dict ( source , value = report_number , ) )
|
Add report numbers .
|
3,760
|
def add_collaboration ( self , collaboration ) : collaborations = normalize_collaboration ( collaboration ) for collaboration in collaborations : self . _append_to ( 'collaborations' , { 'value' : collaboration } )
|
Add collaboration .
|
3,761
|
def add_copyright ( self , material = None , holder = None , statement = None , url = None , year = None ) : copyright = { } for key in ( 'holder' , 'statement' , 'url' ) : if locals ( ) [ key ] is not None : copyright [ key ] = locals ( ) [ key ] if material is not None : copyright [ 'material' ] = material . lower ( ) if year is not None : copyright [ 'year' ] = int ( year ) self . _append_to ( 'copyright' , copyright )
|
Add Copyright .
|
3,762
|
def add_figure ( self , key , url , ** kwargs ) : figure = self . _check_metadata_for_file ( key = key , url = url , ** kwargs ) for dict_key in ( 'caption' , 'label' , 'material' , 'filename' , 'url' , 'original_url' , ) : if kwargs . get ( dict_key ) is not None : figure [ dict_key ] = kwargs [ dict_key ] if key_already_there ( figure , self . record . get ( 'figures' , ( ) ) ) : raise ValueError ( 'There\'s already a figure with the key %s.' % figure [ 'key' ] ) self . _append_to ( 'figures' , figure ) self . add_document
|
Add a figure .
|
3,763
|
def fit_offset_and_rotation ( coords0 , coords1 ) : coords0 = numpy . asarray ( coords0 ) coords1 = numpy . asarray ( coords1 ) cp = coords0 . mean ( axis = 0 ) cq = coords1 . mean ( axis = 0 ) p0 = coords0 - cp q0 = coords1 - cq crossvar = numpy . dot ( numpy . transpose ( p0 ) , q0 ) u , _ , vt = linalg . svd ( crossvar ) d = linalg . det ( u ) * linalg . det ( vt ) if d < 0 : u [ : , - 1 ] = - u [ : , - 1 ] rot = numpy . transpose ( numpy . dot ( u , vt ) ) off = - numpy . dot ( rot , cp ) + cq return off , rot
|
Fit a rotation and a traslation between two sets points .
|
3,764
|
def pil_image3d ( input , size = ( 800 , 600 ) , pcb_rotate = ( 0 , 0 , 0 ) , timeout = 20 , showgui = False ) : f = tempfile . NamedTemporaryFile ( suffix = '.png' , prefix = 'eagexp_' ) output = f . name export_image3d ( input , output = output , size = size , pcb_rotate = pcb_rotate , timeout = timeout , showgui = showgui ) im = Image . open ( output ) return im
|
same as export_image3d but there is no output file PIL object is returned instead
|
3,765
|
def _make_color_fn ( color ) : def _color ( text = "" ) : return ( _color_sep + color + _color_sep2 + text + _color_sep + "default" + _color_sep2 ) return _color
|
Create a function that set the foreground color .
|
3,766
|
def just_log ( * texts , sep = "" ) : if config . silent : return text = _color_sep + "default" + _color_sep2 + sep . join ( texts ) array = text . split ( _color_sep ) for part in array : parts = part . split ( _color_sep2 , 1 ) if len ( parts ) != 2 or not parts [ 1 ] : continue if not config . color : print ( parts [ 1 ] , end = '' ) else : colors . foreground ( parts [ 0 ] ) print ( parts [ 1 ] , end = '' , flush = colors . is_win32 ) if config . color : colors . foreground ( "default" ) print ( )
|
Log a text without adding the current time .
|
3,767
|
def log ( * texts , sep = "" ) : text = sep . join ( texts ) count = text . count ( "\n" ) just_log ( "\n" * count , * get_time ( ) , text . replace ( "\n" , "" ) , sep = sep )
|
Log a text .
|
3,768
|
def find_files ( globs ) : last_cwd = os . getcwd ( ) os . chdir ( config . cwd ) gex , gin = separate_globs ( globs ) exclude = [ ] for glob in gex : parse_glob ( glob , exclude ) files = [ ] include = [ ] order = 0 for glob in gin : order += 1 array = parse_glob ( glob , include ) base = find_base ( glob ) for file in array : if file not in exclude : files . append ( ( order , base , file ) ) os . chdir ( last_cwd ) return files
|
Find files to include .
|
3,769
|
def src ( globs , ** options ) : if isinstance ( globs , str ) : globs = [ globs ] files = find_files ( globs ) stream = Stream ( ) options [ "cwd" ] = config . cwd if "base" in options : options [ "base" ] = os . path . abspath ( options [ "base" ] ) for infile in files : file = File ( infile [ 2 ] , ** options ) file . relpath = file . path file . order = infile [ 0 ] file . base = options . get ( "base" , infile [ 1 ] ) stream . append_file ( file ) stream . end_of_stream ( ) if options . get ( "read" , True ) : return stream . pipe ( FileReader ( ) ) return stream
|
Read some files and return a stream .
|
3,770
|
def log_to_history ( logger , name ) : def log_to_history_decorator ( method ) : def l2h_method ( self , ri ) : history_header = fits . Header ( ) fh = FITSHistoryHandler ( history_header ) fh . setLevel ( logging . INFO ) logger . addHandler ( fh ) try : result = method ( self , ri ) field = getattr ( result , name , None ) if field : with field . open ( ) as hdulist : hdr = hdulist [ 0 ] . header hdr . extend ( history_header . cards ) return result finally : logger . removeHandler ( fh ) return l2h_method return log_to_history_decorator
|
Decorate function adding a logger handler stored in FITS .
|
3,771
|
def create_db_info ( ) : result = { } result [ 'instrument' ] = '' result [ 'uuid' ] = '' result [ 'tags' ] = { } result [ 'type' ] = '' result [ 'mode' ] = '' result [ 'observation_date' ] = "" result [ 'origin' ] = { } return result
|
Create metadata structure
|
3,772
|
def task ( obj = None , deps = None ) : if callable ( obj ) : __task ( obj . __name__ , obj ) return obj def __decorated ( func ) : __task ( obj if obj else obj . __name__ , deps , func ) return func return __decorated
|
Decorator for creating a task .
|
3,773
|
def _read_one_byte ( self , fd ) : c = os . read ( fd , 1 ) if not c : raise OSError return c
|
Read a single byte or raise OSError on failure .
|
3,774
|
def arg_file_is_new ( parser , arg , mode = 'w' ) : if os . path . exists ( arg ) : parser . error ( "\nThe file \"%s\"\nalready exists and " "cannot be overwritten!" % arg ) else : handler = open ( arg , mode = mode ) return handler
|
Auxiliary function to give an error if the file already exists .
|
3,775
|
def intersection_spectrail_arcline ( spectrail , arcline ) : expected_x = ( arcline . xlower_line + arcline . xupper_line ) / 2.0 rootfunct = arcline . poly_funct ( spectrail . poly_funct ) rootfunct . coef [ 1 ] -= 1 tmp_xroots = rootfunct . roots ( ) xroot = tmp_xroots [ np . abs ( tmp_xroots - expected_x ) . argmin ( ) ] if np . isreal ( xroot ) : xroot = xroot . real else : raise ValueError ( "xroot=" + str ( xroot ) + " is a complex number" ) yroot = spectrail . poly_funct ( xroot ) return xroot , yroot
|
Compute intersection of spectrum trail with arc line .
|
3,776
|
def offset ( self , offset_value ) : new_instance = deepcopy ( self ) new_instance . poly_funct . coef [ 0 ] += offset_value return new_instance
|
Return a copy of self shifted a constant offset .
|
3,777
|
def compute_operation ( file1 , file2 , operation , output , display , args_z1z2 , args_bbox , args_keystitle , args_geometry ) : with fits . open ( file1 ) as hdulist : image_header1 = hdulist [ 0 ] . header image1 = hdulist [ 0 ] . data . astype ( np . float ) naxis1 = image_header1 [ 'naxis1' ] naxis2 = image_header1 [ 'naxis2' ] if display == 'all' : ximshow_file ( file1 . name , args_z1z2 = args_z1z2 , args_bbox = args_bbox , args_keystitle = args_keystitle , args_geometry = args_geometry , debugplot = 12 ) with fits . open ( file2 ) as hdulist : image_header2 = hdulist [ 0 ] . header image2 = hdulist [ 0 ] . data . astype ( np . float ) naxis1_ = image_header2 [ 'naxis1' ] naxis2_ = image_header2 [ 'naxis2' ] if display == 'all' : ximshow_file ( file2 . name , args_z1z2 = args_z1z2 , args_bbox = args_bbox , args_keystitle = args_keystitle , args_geometry = args_geometry , debugplot = 12 ) if naxis1 != naxis1_ : raise ValueError ( "NAXIS1 values are different." ) if naxis2 != naxis2_ : raise ValueError ( "NAXIS2 values are different." ) if operation == "+" : solution = image1 + image2 elif operation == "-" : solution = image1 - image2 elif operation == "*" : solution = image1 * image2 elif operation == "/" : solution = image1 / image2 else : raise ValueError ( "Unexpected operation=" + str ( operation ) ) hdu = fits . PrimaryHDU ( solution . astype ( np . float ) , image_header1 ) hdu . writeto ( output , overwrite = True ) if display in [ 'all' , 'result' ] : ximshow_file ( output . name , args_z1z2 = args_z1z2 , args_bbox = args_bbox , args_keystitle = args_keystitle , args_geometry = args_geometry , debugplot = 12 )
|
Compute output = file1 operation file2 .
|
3,778
|
def robust_std ( x , debug = False ) : x = numpy . asarray ( x ) q25 = numpy . percentile ( x , 25 ) q75 = numpy . percentile ( x , 75 ) sigmag = 0.7413 * ( q75 - q25 ) if debug : print ( 'debug|sigmag -> q25......................:' , q25 ) print ( 'debug|sigmag -> q75......................:' , q75 ) print ( 'debug|sigmag -> Robust standard deviation:' , sigmag ) return sigmag
|
Compute a robust estimator of the standard deviation
|
3,779
|
def summary ( x , rm_nan = False , debug = False ) : if type ( x ) is np . ndarray : xx = np . copy ( x ) else : if type ( x ) is list : xx = np . array ( x ) else : raise ValueError ( 'x=' + str ( x ) + ' must be a numpy.ndarray' ) if xx . ndim is not 1 : raise ValueError ( 'xx.dim=' + str ( xx . ndim ) + ' must be 1' ) if rm_nan : xx = xx [ np . logical_not ( np . isnan ( xx ) ) ] npoints = len ( xx ) ok = npoints > 0 result = { 'npoints' : npoints , 'minimum' : np . min ( xx ) if ok else 0 , 'percentile25' : np . percentile ( xx , 25 ) if ok else 0 , 'median' : np . percentile ( xx , 50 ) if ok else 0 , 'mean' : np . mean ( xx ) if ok else 0 , 'percentile75' : np . percentile ( xx , 75 ) if ok else 0 , 'maximum' : np . max ( xx ) if ok else 0 , 'std' : np . std ( xx ) if ok else 0 , 'robust_std' : robust_std ( xx ) if ok else 0 , 'percentile15' : np . percentile ( xx , 15.86553 ) if ok else 0 , 'percentile84' : np . percentile ( xx , 84.13447 ) if ok else 0 } if debug : print ( '>>> ========================================' ) print ( '>>> STATISTICAL SUMMARY:' ) print ( '>>> ----------------------------------------' ) print ( '>>> Number of points.........:' , result [ 'npoints' ] ) print ( '>>> Minimum..................:' , result [ 'minimum' ] ) print ( '>>> 1st Quartile.............:' , result [ 'percentile25' ] ) print ( '>>> Median...................:' , result [ 'median' ] ) print ( '>>> Mean.....................:' , result [ 'mean' ] ) print ( '>>> 3rd Quartile.............:' , result [ 'percentile75' ] ) print ( '>>> Maximum..................:' , result [ 'maximum' ] ) print ( '>>> ----------------------------------------' ) print ( '>>> Standard deviation.......:' , result [ 'std' ] ) print ( '>>> Robust standard deviation:' , result [ 'robust_std' ] ) print ( '>>> 0.1586553 percentile.....:' , result [ 'percentile15' ] ) print ( '>>> 0.8413447 percentile.....:' , result [ 'percentile84' ] ) print ( '>>> ========================================' ) return result
|
Compute basic statistical parameters .
|
3,780
|
def fit_trace_polynomial ( trace , deg , axis = 0 ) : dispaxis = axis_to_dispaxis ( axis ) pfit = numpy . polyfit ( trace [ : , 0 ] , trace [ : , 1 ] , deg ) start = trace [ 0 , 0 ] stop = trace [ - 1 , 0 ] , return PolyTrace ( start , stop , axis , pfit )
|
Fit a trace information table to a polynomial .
|
3,781
|
def price_humanized ( value , inst , currency = None ) : return ( natural_number_with_currency ( value , ugettext ( 'CZK' ) if currency is None else currency ) if value is not None else ugettext ( '(None)' ) )
|
Return a humanized price
|
3,782
|
def get_imgid ( self , img ) : imgid = img . filename ( ) hdr = self . get_header ( img ) if 'checksum' in hdr : return hdr [ 'checksum' ] if 'filename' in hdr : return hdr [ 'filename' ] if not imgid : imgid = repr ( img ) return imgid
|
Obtain a unique identifier of the image .
|
3,783
|
def log_starting ( self ) : self . start_time = time . perf_counter ( ) logger . log ( "Starting '" , logger . cyan ( self . name ) , "'..." )
|
Log that the task has started .
|
3,784
|
def log_finished ( self ) : delta = time . perf_counter ( ) - self . start_time logger . log ( "Finished '" , logger . cyan ( self . name ) , "' after " , logger . magenta ( time_to_text ( delta ) ) )
|
Log that this task is done .
|
3,785
|
def call_task_fn ( self ) : if not self . fn : return self . log_finished ( ) future = asyncio . Future ( ) future . add_done_callback ( lambda x : self . log_finished ( ) ) if inspect . iscoroutinefunction ( self . fn ) : f = asyncio . ensure_future ( self . fn ( ) ) f . add_done_callback ( lambda x : self . bind_end ( x . result ( ) , future ) ) else : self . bind_end ( self . fn ( ) , future ) return future
|
Call the function attached to the task .
|
3,786
|
def bind_end ( self , stream , future ) : if not isinstance ( stream , Stream ) : future . set_result ( None ) else : stream . pipe ( TaskEndTransformer ( future ) )
|
Bind a TaskEndTransformer to a stream .
|
3,787
|
async def start_deps ( self , deps ) : deps = list ( filter ( lambda dep : dep not in self . called , deps ) ) self . called += deps runners = list ( filter ( lambda x : x and x . future , map ( lambda dep : pylp . start ( dep ) , deps ) ) ) if len ( runners ) != 0 : await asyncio . wait ( map ( lambda runner : runner . future , runners ) ) future = self . call_task_fn ( ) if future : await future
|
Start running dependencies .
|
3,788
|
def frommembers ( cls , members = ( ) ) : return cls . fromint ( sum ( map ( cls . _map . __getitem__ , set ( members ) ) ) )
|
Create a set from an iterable of members .
|
3,789
|
def frombools ( cls , bools = ( ) ) : return cls . fromint ( sum ( compress ( cls . _atoms , bools ) ) )
|
Create a set from an iterable of boolean evaluable items .
|
3,790
|
def frombits ( cls , bits = '0' ) : if len ( bits ) > cls . _len : raise ValueError ( 'too many bits %r' % ( bits , ) ) return cls . fromint ( bits [ : : - 1 ] , 2 )
|
Create a set from binary string .
|
3,791
|
def atoms ( self , reverse = False ) : if reverse : return filter ( self . __and__ , reversed ( self . _atoms ) ) return filter ( self . __and__ , self . _atoms )
|
Yield the singleton for every set member .
|
3,792
|
def inatoms ( self , reverse = False ) : if reverse : return filterfalse ( self . __and__ , reversed ( self . _atoms ) ) return filterfalse ( self . __and__ , self . _atoms )
|
Yield the singleton for every non - member .
|
3,793
|
def powerset ( self , start = None , excludestart = False ) : if start is None : start = self . infimum other = self . atoms ( ) else : if self | start != self : raise ValueError ( '%r is no subset of %r' % ( start , self ) ) other = self . fromint ( self & ~ start ) . atoms ( ) return map ( self . frombitset , combos . shortlex ( start , list ( other ) ) )
|
Yield combinations from start to self in short lexicographic order .
|
3,794
|
def change ( obj , ** changed_fields ) : obj_field_names = { field . name for field in obj . _meta . fields } | { field . attname for field in obj . _meta . fields } | { 'pk' } for field_name , value in changed_fields . items ( ) : if field_name not in obj_field_names : raise ValueError ( "'{}' is an invalid field name" . format ( field_name ) ) setattr ( obj , field_name , value ) return obj
|
Changes a given changed_fields on object and returns changed object .
|
3,795
|
def change_and_save ( obj , update_only_changed_fields = False , save_kwargs = None , ** changed_fields ) : from chamber . models import SmartModel change ( obj , ** changed_fields ) if update_only_changed_fields and not isinstance ( obj , SmartModel ) : raise TypeError ( 'update_only_changed_fields can be used only with SmartModel' ) save_kwargs = save_kwargs if save_kwargs is not None else { } if update_only_changed_fields : save_kwargs [ 'update_only_changed_fields' ] = True obj . save ( ** save_kwargs ) return obj
|
Changes a given changed_fields on object saves it and returns changed object .
|
3,796
|
def bulk_change_and_save ( iterable , update_only_changed_fields = False , save_kwargs = None , ** changed_fields ) : return [ change_and_save ( obj , update_only_changed_fields = update_only_changed_fields , save_kwargs = save_kwargs , ** changed_fields ) for obj in iterable ]
|
Changes a given changed_fields on each object in a given iterable saves objects and returns the changed objects .
|
3,797
|
def gauss_box_model ( x , amplitude = 1.0 , mean = 0.0 , stddev = 1.0 , hpix = 0.5 ) : z = ( x - mean ) / stddev z2 = z + hpix / stddev z1 = z - hpix / stddev return amplitude * ( norm . cdf ( z2 ) - norm . cdf ( z1 ) )
|
Integrate a Gaussian profile .
|
3,798
|
def gauss_box_model_deriv ( x , amplitude = 1.0 , mean = 0.0 , stddev = 1.0 , hpix = 0.5 ) : z = ( x - mean ) / stddev z2 = z + hpix / stddev z1 = z - hpix / stddev da = norm . cdf ( z2 ) - norm . cdf ( z1 ) fp2 = norm_pdf_t ( z2 ) fp1 = norm_pdf_t ( z1 ) dl = - amplitude / stddev * ( fp2 - fp1 ) ds = - amplitude / stddev * ( fp2 * z2 - fp1 * z1 ) dd = amplitude / stddev * ( fp2 + fp1 ) return da , dl , ds , dd
|
Derivative of the integral of a Gaussian profile .
|
3,799
|
def find_peaks_spectrum ( sx , nwinwidth , threshold = 0 , debugplot = 0 ) : if type ( sx ) is not np . ndarray : raise ValueError ( "sx=" + str ( sx ) + " must be a numpy.ndarray" ) elif sx . ndim is not 1 : raise ValueError ( "sx.ndim=" + str ( sx . ndim ) + " must be 1" ) sx_shape = sx . shape nmed = nwinwidth // 2 if debugplot >= 10 : print ( 'find_peaks_spectrum> sx shape......:' , sx_shape ) print ( 'find_peaks_spectrum> nwinwidth.....:' , nwinwidth ) print ( 'find_peaks_spectrum> nmed..........:' , nmed ) print ( 'find_peaks_spectrum> data_threshold:' , threshold ) print ( 'find_peaks_spectrum> the first and last' , nmed , 'pixels will be ignored' ) xpeaks = [ ] if sx_shape [ 0 ] < nwinwidth : print ( 'find_peaks_spectrum> sx shape......:' , sx_shape ) print ( 'find_peaks_spectrum> nwinwidth.....:' , nwinwidth ) raise ValueError ( 'sx.shape < nwinwidth' ) i = nmed while i < sx_shape [ 0 ] - nmed : if sx [ i ] > threshold : peak_ok = True j = 0 loop = True while loop : if sx [ i - nmed + j ] > sx [ i - nmed + j + 1 ] : peak_ok = False j += 1 loop = ( j < nmed ) and peak_ok if peak_ok : j = nmed + 1 loop = True while loop : if sx [ i - nmed + j - 1 ] < sx [ i - nmed + j ] : peak_ok = False j += 1 loop = ( j < nwinwidth ) and peak_ok if peak_ok : xpeaks . append ( i ) i += nwinwidth - 1 else : i += 1 else : i += 1 ixpeaks = np . array ( xpeaks ) if debugplot >= 10 : print ( 'find_peaks_spectrum> number of peaks found:' , len ( ixpeaks ) ) print ( ixpeaks ) return ixpeaks
|
Find peaks in array .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.