idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
3,600
def rotate ( self ) : self . _logger . info ( 'Rotating data files. New batch number will be: %s' , self . batchno + 1 ) self . estore . close ( ) self . estore = None self . batchno += 1 self . estore = self . _open_event_store ( )
Rotate the files to disk .
3,601
def _find_batch_containing_event ( self , uuid ) : if self . estore . key_exists ( uuid ) : return self . batchno else : for batchno in range ( self . batchno - 1 , - 1 , - 1 ) : db = self . _open_event_store ( batchno ) with contextlib . closing ( db ) : if db . key_exists ( uuid ) : return batchno return None
Find the batch number that contains a certain event .
3,602
def from_config ( config , ** options ) : required_args = ( 'storage-backends' , ) optional_args = { 'events_per_batch' : 25000 } rconfig . check_config_options ( "SyncedRotationEventStores" , required_args , tuple ( optional_args . keys ( ) ) , options ) if "events_per_batch" in options : events_per_batch = int ( options [ "events_per_batch" ] ) else : events_per_batch = optional_args [ "events_per_batch" ] estore = SyncedRotationEventStores ( events_per_batch ) for section in options [ 'storage-backends' ] . split ( ' ' ) : try : substore = rconfig . construct_eventstore ( config , section ) estore . add_rotated_store ( substore ) except Exception as e : _logger . exception ( 'Could not instantiate substore from' ' section %s' , section ) estore . close ( ) raise return estore
Instantiate an SyncedRotationEventStores from config .
3,603
def hexdump ( stream ) : if isinstance ( stream , six . string_types ) : stream = BytesIO ( stream ) row = 0 while True : data = stream . read ( 16 ) if not data : break hextets = data . encode ( 'hex' ) . ljust ( 32 ) canonical = printable ( data ) print ( '%08x %s %s |%s|' % ( row * 16 , ' ' . join ( hextets [ x : x + 2 ] for x in range ( 0x00 , 0x10 , 2 ) ) , ' ' . join ( hextets [ x : x + 2 ] for x in range ( 0x10 , 0x20 , 2 ) ) , canonical , ) ) row += 1
Display stream contents in hexadecimal and ASCII format . The stream specified must either be a file - like object that supports the read method to receive bytes or it can be a string .
3,604
def printable ( sequence ) : return '' . join ( list ( map ( lambda c : c if c in PRINTABLE else '.' , sequence ) ) )
Return a printable string from the input sequence
3,605
def sparkline ( data ) : min_value = float ( min ( data ) ) max_value = float ( max ( data ) ) steps = ( max_value - min_value ) / float ( len ( SPARKCHAR ) - 1 ) return '' . join ( [ SPARKCHAR [ int ( ( float ( value ) - min_value ) / steps ) ] for value in data ] )
Return a spark line for the given data set .
3,606
def get_language_stemmer ( language ) : from lunr . languages import SUPPORTED_LANGUAGES from nltk . stem . snowball import SnowballStemmer return SnowballStemmer ( SUPPORTED_LANGUAGES [ language ] )
Retrieves the SnowballStemmer for a particular language .
3,607
def nltk_stemmer ( stemmer , token , i = None , tokens = None ) : def wrapped_stem ( token , metadata = None ) : return stemmer . stem ( token ) return token . update ( wrapped_stem )
Wrapper around a NLTK SnowballStemmer which includes stop words for each language .
3,608
def is_seq ( obj ) : if not hasattr ( obj , '__iter__' ) : return False if isinstance ( obj , basestring ) : return False return True
Returns True if object is not a string but is iterable
3,609
def register ( cls , func ) : cls . _add_version_info ( func ) cls . _upgrade_funcs . add ( func ) return func
Decorate a migration function with this method to make it available for migrating cases .
3,610
def _add_version_info ( func ) : pattern = r'v(?P<source>\d+)_to_(?P<target>\d+)$' match = re . match ( pattern , func . __name__ ) if not match : raise ValueError ( "migration function name must match " + pattern ) func . source , func . target = map ( int , match . groups ( ) )
Add . source and . target attributes to the registered function .
3,611
def migrate_doc ( self , doc ) : orig_ver = doc . get ( self . version_attribute_name , 0 ) funcs = self . _get_migrate_funcs ( orig_ver , self . target_version ) for func in funcs : func ( self , doc ) doc [ self . version_attribute_name ] = func . target return doc
Migrate the doc from its current version to the target version and return it .
3,612
def _get_func ( cls , source_ver , target_ver ) : matches = ( func for func in cls . _upgrade_funcs if func . source == source_ver and func . target == target_ver ) try : match , = matches except ValueError : raise ValueError ( f"No migration from {source_ver} to {target_ver}" ) return match
Return exactly one function to convert from source to target
3,613
def get_uid ( brain_or_object ) : if is_portal ( brain_or_object ) : return '0' if is_brain ( brain_or_object ) and base_hasattr ( brain_or_object , "UID" ) : return brain_or_object . UID return get_object ( brain_or_object ) . UID ( )
Get the Plone UID for this object
3,614
def get_icon ( brain_or_object , html_tag = True ) : portal_types = get_tool ( "portal_types" ) fti = portal_types . getTypeInfo ( brain_or_object . portal_type ) icon = fti . getIcon ( ) if not icon : return "" url = "%s/%s" % ( get_url ( get_portal ( ) ) , icon ) if not html_tag : return url tag = '<img width="16" height="16" src="{url}" title="{title}" />' . format ( url = url , title = get_title ( brain_or_object ) ) return tag
Get the icon of the content object
3,615
def get_review_history ( brain_or_object , rev = True ) : obj = get_object ( brain_or_object ) review_history = [ ] try : workflow = get_tool ( "portal_workflow" ) review_history = workflow . getInfoFor ( obj , 'review_history' ) except WorkflowException as e : message = str ( e ) logger . error ( "Cannot retrieve review_history on {}: {}" . format ( obj , message ) ) if not isinstance ( review_history , ( list , tuple ) ) : logger . error ( "get_review_history: expected list, recieved {}" . format ( review_history ) ) review_history = [ ] if rev is True : review_history . reverse ( ) return review_history
Get the review history for the given brain or context .
3,616
def get_cancellation_status ( brain_or_object , default = "active" ) : if is_brain ( brain_or_object ) : return getattr ( brain_or_object , "cancellation_state" , default ) workflows = get_workflows_for ( brain_or_object ) if 'bika_cancellation_workflow' not in workflows : return default return get_workflow_status_of ( brain_or_object , 'cancellation_state' )
Get the cancellation_state of an object
3,617
def get_inactive_status ( brain_or_object , default = "active" ) : if is_brain ( brain_or_object ) : return getattr ( brain_or_object , "inactive_state" , default ) workflows = get_workflows_for ( brain_or_object ) if 'bika_inactive_workflow' not in workflows : return default return get_workflow_status_of ( brain_or_object , 'inactive_state' )
Get the cancellation_state of an objct
3,618
def set_log_level ( verbose , quiet ) : if quiet : verbose = - 1 if verbose < 0 : verbose = logging . CRITICAL elif verbose == 0 : verbose = logging . WARNING elif verbose == 1 : verbose = logging . INFO elif 1 < verbose : verbose = logging . DEBUG LOGGER . setLevel ( verbose )
Ses the logging level of the script based on command line options .
3,619
def detect_pattern_format ( pattern_filename , encoding , on_word_boundaries ) : tsv = True boundaries = on_word_boundaries with open_file ( pattern_filename ) as input_file : for line in input_file : line = line . decode ( encoding ) if line . count ( '\t' ) != 1 : tsv = False if '\\b' in line : boundaries = True if boundaries and not tsv : break return tsv , boundaries
Automatically detects the pattern file format and determines whether the Aho - Corasick string matching should pay attention to word boundaries or not .
3,620
def sub_escapes ( sval ) : sval = sval . replace ( '\\a' , '\a' ) sval = sval . replace ( '\\b' , '\x00' ) sval = sval . replace ( '\\f' , '\f' ) sval = sval . replace ( '\\n' , '\n' ) sval = sval . replace ( '\\r' , '\r' ) sval = sval . replace ( '\\t' , '\t' ) sval = sval . replace ( '\\v' , '\v' ) sval = sval . replace ( '\\\\' , '\\' ) return sval
Process escaped characters in sval .
3,621
def build_trie ( pattern_filename , pattern_format , encoding , on_word_boundaries ) : boundaries = on_word_boundaries if pattern_format == 'auto' or not on_word_boundaries : tsv , boundaries = detect_pattern_format ( pattern_filename , encoding , on_word_boundaries ) if pattern_format == 'auto' : if tsv : pattern_format = 'tsv' else : pattern_format = 'sed' trie = fsed . ahocorasick . AhoCorasickTrie ( ) num_candidates = 0 with open_file ( pattern_filename ) as pattern_file : for lineno , line in enumerate ( pattern_file ) : line = line . decode ( encoding ) . rstrip ( '\n' ) if not line . strip ( ) : continue if pattern_format == 'tsv' : fields = line . split ( '\t' ) if len ( fields ) != 2 : LOGGER . warning ( ( 'skipping line {} of pattern file (not ' 'in tab-separated format): {}' ) . format ( lineno , line ) ) continue before , after = fields elif pattern_format == 'sed' : before = after = None line = line . lstrip ( ) if line [ 0 ] == 's' : delim = line [ 1 ] if delim in '.^$*+?[](){}|\\' : delim = '\\' + delim fields = re . split ( r'(?<!\\){}' . format ( delim ) , line ) if len ( fields ) == 4 : before , after = fields [ 1 ] , fields [ 2 ] before = re . sub ( r'(?<!\\)\\{}' . format ( delim ) , delim , before ) after = re . sub ( r'(?<!\\)\\{}' . format ( delim ) , delim , after ) if before is None or after is None : LOGGER . warning ( ( 'skipping line {} of pattern file (not ' 'in sed format): {}' ) . format ( lineno , line ) ) continue num_candidates += 1 if on_word_boundaries and before != before . strip ( ) : LOGGER . warning ( ( 'before pattern on line {} padded whitespace; ' 'this may interact strangely with the --words ' 'option: {}' ) . format ( lineno , line ) ) before = sub_escapes ( before ) after = sub_escapes ( after ) if boundaries : before = fsed . ahocorasick . boundary_transform ( before , on_word_boundaries ) trie [ before ] = after LOGGER . info ( '{} patterns loaded from {}' . format ( num_candidates , pattern_filename ) ) return trie , boundaries
Constructs a finite state machine for performing string rewriting .
3,622
def warn_prefix_values ( trie ) : for current , _parent in trie . dfs ( ) : if current . has_value and current . longest_prefix is not None : LOGGER . warn ( ( 'pattern {} (value {}) is a superstring of pattern ' '{} (value {}) and will never be matched' ) . format ( current . prefix , current . value , current . longest_prefix . prefix , current . longest_prefix . value ) )
Prints warning messages for every node that has both a value and a longest_prefix .
3,623
def rewrite_str_with_trie ( sval , trie , boundaries = False , slow = False ) : if boundaries : sval = fsed . ahocorasick . boundary_transform ( sval ) if slow : sval = trie . replace ( sval ) else : sval = trie . greedy_replace ( sval ) if boundaries : sval = '' . join ( fsed . ahocorasick . boundary_untransform ( sval ) ) return sval
Rewrites a string using the given trie object .
3,624
def register_function ( cls , fn , label ) : if label in cls . registered_functions : log . warning ( "Overwriting existing registered function %s" , label ) fn . label = label cls . registered_functions [ fn . label ] = fn
Register a function with the pipeline .
3,625
def load ( cls , serialised ) : pipeline = cls ( ) for fn_name in serialised : try : fn = cls . registered_functions [ fn_name ] except KeyError : raise BaseLunrException ( "Cannot load unregistered function " . format ( fn_name ) ) else : pipeline . add ( fn ) return pipeline
Loads a previously serialised pipeline .
3,626
def add ( self , * args ) : for fn in args : self . warn_if_function_not_registered ( fn ) self . _stack . append ( fn )
Adds new functions to the end of the pipeline .
3,627
def after ( self , existing_fn , new_fn ) : self . warn_if_function_not_registered ( new_fn ) try : index = self . _stack . index ( existing_fn ) self . _stack . insert ( index + 1 , new_fn ) except ValueError as e : six . raise_from ( BaseLunrException ( "Cannot find existing_fn" ) , e )
Adds a single function after a function that already exists in the pipeline .
3,628
def run ( self , tokens ) : for fn in self . _stack : results = [ ] for i , token in enumerate ( tokens ) : result = fn ( token , i , tokens ) if not result : continue if isinstance ( result , ( list , tuple ) ) : results . extend ( result ) else : results . append ( result ) tokens = results return tokens
Runs the current list of functions that make up the pipeline against the passed tokens .
3,629
def run_string ( self , string , metadata = None ) : token = Token ( string , metadata ) return [ str ( tkn ) for tkn in self . run ( [ token ] ) ]
Convenience method for passing a string through a pipeline and getting strings out . This method takes care of wrapping the passed string in a token and mapping the resulting tokens back to strings .
3,630
def get_client ( ) : with contextlib . suppress ( Exception ) : store = Storage . from_URI ( ) assert isinstance ( store , pmxbot . storage . MongoDBStorage ) return store . db . database . client
Use the same MongoDB client as pmxbot if available .
3,631
def create_db_in_shard ( db_name , shard , client = None ) : client = client or pymongo . MongoClient ( ) res = client . admin . command ( 'flushRouterConfig' ) if not res . get ( 'ok' ) : raise RuntimeError ( "unable to flush router config" ) if shard not in get_ids ( client . config . shards ) : raise ValueError ( f"Unknown shard {shard}" ) if db_name in get_ids ( client . config . databases ) : raise ValueError ( "database already exists" ) client [ db_name ] . foo . insert ( { 'foo' : 1 } ) client [ db_name ] . foo . drop ( ) if client [ db_name ] . collection_names ( ) : raise ValueError ( "database has collections" ) primary = client [ 'config' ] . databases . find_one ( db_name ) [ 'primary' ] if primary != shard : res = client . admin . command ( 'movePrimary' , value = db_name , to = shard ) if not res . get ( 'ok' ) : raise RuntimeError ( str ( res ) ) return ( f"Successfully created {db_name} in {shard} via {client.nodes} " f"from {hostname}" )
In a sharded cluster create a database in a particular shard .
3,632
def luhn_checksum ( number , chars = DIGITS ) : length = len ( chars ) number = [ chars . index ( n ) for n in reversed ( str ( number ) ) ] return ( sum ( number [ : : 2 ] ) + sum ( sum ( divmod ( i * 2 , length ) ) for i in number [ 1 : : 2 ] ) ) % length
Calculates the Luhn checksum for number
3,633
def luhn_calc ( number , chars = DIGITS ) : checksum = luhn_checksum ( str ( number ) + chars [ 0 ] , chars ) return chars [ - checksum ]
Calculate the Luhn check digit for number .
3,634
def to_decimal ( number , strip = '- ' ) : if isinstance ( number , six . integer_types ) : return str ( number ) number = str ( number ) number = re . sub ( r'[%s]' % re . escape ( strip ) , '' , number ) if number . startswith ( '0x' ) : return to_decimal ( int ( number [ 2 : ] , 16 ) ) elif number . startswith ( 'o' ) : return to_decimal ( int ( number [ 1 : ] , 8 ) ) elif number . startswith ( 'b' ) : return to_decimal ( int ( number [ 1 : ] , 2 ) ) else : return str ( int ( number ) )
Converts a number to a string of decimals in base 10 .
3,635
def get_class_method ( cls_or_inst , method_name ) : cls = cls_or_inst if isinstance ( cls_or_inst , type ) else cls_or_inst . __class__ meth = getattr ( cls , method_name , None ) if isinstance ( meth , property ) : meth = meth . fget elif isinstance ( meth , cached_property ) : meth = meth . func return meth
Returns a method from a given class or instance . When the method doest not exist it returns None . Also works with properties and cached properties .
3,636
def manage_fits ( list_of_frame ) : import astropy . io . fits as fits import numina . types . dataframe as df refs = [ ] for frame in list_of_frame : if isinstance ( frame , str ) : ref = fits . open ( frame ) refs . append ( ref ) elif isinstance ( frame , fits . HDUList ) : refs . append ( frame ) elif isinstance ( frame , df . DataFrame ) : ref = frame . open ( ) refs . append ( ref ) else : refs . append ( frame ) try : yield refs finally : for obj in refs : obj . close ( )
Manage a list of FITS resources
3,637
def logging_from_debugplot ( debugplot ) : if isinstance ( debugplot , int ) : if abs ( debugplot ) >= 10 : logging . basicConfig ( level = logging . DEBUG ) else : logging . basicConfig ( level = logging . INFO ) else : raise ValueError ( "Unexpected debugplot=" + str ( debugplot ) )
Set debugging level based on debugplot value .
3,638
def ximplot ( ycut , title = None , show = True , plot_bbox = ( 0 , 0 ) , geometry = ( 0 , 0 , 640 , 480 ) , tight_layout = True , debugplot = None ) : if type ( ycut ) is not np . ndarray : raise ValueError ( "ycut=" + str ( ycut ) + " must be a numpy.ndarray" ) elif ycut . ndim is not 1 : raise ValueError ( "ycut.ndim=" + str ( ycut . dim ) + " must be 1" ) nc1 , nc2 = plot_bbox plot_coord = ( nc1 == 0 and nc2 == 0 ) naxis1_ = ycut . size if not plot_coord : if naxis1_ != nc2 - nc1 + 1 : raise ValueError ( "ycut.size=" + str ( ycut . size ) + " does not correspond to bounding box size" ) from numina . array . display . matplotlib_qt import plt if not show : plt . ioff ( ) fig = plt . figure ( ) ax = fig . add_subplot ( 111 ) ax . autoscale ( False ) ymin = ycut . min ( ) ymax = ycut . max ( ) if plot_coord : xmin = - 0.5 xmax = ( naxis1_ - 1 ) + 0.5 xcut = np . arange ( naxis1_ , dtype = np . float ) ax . set_xlabel ( 'image array index in the X direction' ) ax . set_ylabel ( 'pixel value' ) else : xmin = float ( nc1 ) - 0.5 xmax = float ( nc2 ) + 0.5 xcut = np . linspace ( start = nc1 , stop = nc2 , num = nc2 - nc1 + 1 ) ax . set_xlabel ( 'image pixel in the X direction' ) ax . set_ylabel ( 'pixel value' ) ax . set_xlim ( xmin , xmax ) ax . set_ylim ( ymin , ymax ) ax . plot ( xcut , ycut , '-' ) if title is not None : ax . set_title ( title ) set_window_geometry ( geometry ) if show : pause_debugplot ( debugplot , pltshow = show , tight_layout = tight_layout ) else : if tight_layout : plt . tight_layout ( ) return ax
Auxiliary function to display 1d plot .
3,639
def oversample1d ( sp , crval1 , cdelt1 , oversampling = 1 , debugplot = 0 ) : if sp . ndim != 1 : raise ValueError ( 'Unexpected array dimensions' ) naxis1 = sp . size naxis1_over = naxis1 * oversampling cdelt1_over = cdelt1 / oversampling xmin = crval1 - cdelt1 / 2 crval1_over = xmin + cdelt1_over / 2 sp_over = np . zeros ( naxis1_over ) for i in range ( naxis1 ) : i1 = i * oversampling i2 = i1 + oversampling sp_over [ i1 : i2 ] = sp [ i ] if abs ( debugplot ) in ( 21 , 22 ) : crvaln = crval1 + ( naxis1 - 1 ) * cdelt1 crvaln_over = crval1_over + ( naxis1_over - 1 ) * cdelt1_over xover = np . linspace ( crval1_over , crvaln_over , naxis1_over ) ax = ximplotxy ( np . linspace ( crval1 , crvaln , naxis1 ) , sp , 'bo' , label = 'original' , show = False ) ax . plot ( xover , sp_over , 'r+' , label = 'resampled' ) pause_debugplot ( debugplot , pltshow = True ) return sp_over , crval1_over , cdelt1_over
Oversample spectrum .
3,640
def map_borders ( wls ) : midpt_wl = 0.5 * ( wls [ 1 : ] + wls [ : - 1 ] ) all_borders = np . zeros ( ( wls . shape [ 0 ] + 1 , ) ) all_borders [ 1 : - 1 ] = midpt_wl all_borders [ 0 ] = 2 * wls [ 0 ] - midpt_wl [ 0 ] all_borders [ - 1 ] = 2 * wls [ - 1 ] - midpt_wl [ - 1 ] return all_borders
Compute borders of pixels for interpolation .
3,641
def import_object ( path ) : spl = path . split ( '.' ) if len ( spl ) == 1 : return importlib . import_module ( path ) cls = spl [ - 1 ] mods = '.' . join ( spl [ : - 1 ] ) mm = importlib . import_module ( mods ) try : obj = getattr ( mm , cls ) return obj except AttributeError : pass rr = importlib . import_module ( path ) return rr
Import an object given its fully qualified name .
3,642
def make_parser ( add_help = True , exclude_args = None ) : if exclude_args is None : exclude_args = [ ] parser = argparse . ArgumentParser ( add_help = add_help ) parser . description = ( "Filter, transform and export a list of JSON " "objects on stdin to JSON or CSV on stdout" ) if "--columns" not in exclude_args : parser . add_argument ( "--columns" , dest = "columns_file" , help = "the JSON file specifying the columns to be output" , ) if ( "-i" not in exclude_args ) and ( "--input" not in exclude_args ) : parser . add_argument ( "-i" , "--input" , help = "read input from the given file instead of from stdin" , dest = 'input_data' , ) if ( "-c" not in exclude_args ) and ( "--column" not in exclude_args ) : parser . add_argument ( "-c" , "--column" , action = ColumnsAction ) if "--pattern" not in exclude_args : parser . add_argument ( "--pattern" , action = ColumnsAction , nargs = '+' ) if "--max-length" not in exclude_args : parser . add_argument ( "--max-length" , action = ColumnsAction ) if "--strip" not in exclude_args : parser . add_argument ( "--strip" , nargs = "?" , action = ColumnsAction ) if "--deduplicate" not in exclude_args : parser . add_argument ( "--deduplicate" , nargs = '?' , action = ColumnsAction ) if "--case-sensitive" not in exclude_args : parser . add_argument ( "--case-sensitive" , nargs = '?' , action = ColumnsAction ) if "--unique" not in exclude_args : parser . add_argument ( "--unique" , nargs = "?" , action = ColumnsAction ) if ( "-p" not in exclude_args ) and ( "--pretty" not in exclude_args ) : parser . add_argument ( "-p" , "--pretty" , action = "store_true" ) return parser
Return an argparse . ArgumentParser object with losser s arguments .
3,643
def parse ( parser = None , args = None ) : if not parser : parser = make_parser ( ) try : parsed_args = parser . parse_args ( args ) except SystemExit as err : raise CommandLineExit ( err . code ) try : columns = parsed_args . columns except AttributeError : columns = collections . OrderedDict ( ) parsed_args . columns = columns for title , spec in columns . items ( ) : if "pattern" not in spec : raise ColumnWithoutPatternError ( 'Column "{0}" needs a pattern' . format ( title ) ) if len ( spec [ "pattern" ] ) == 1 : spec [ "pattern" ] = spec [ "pattern" ] [ 0 ] if columns and parsed_args . columns_file : raise ColumnsAndColumnsFileError ( "You can't use the --column and --columns options together (yet)" ) elif parsed_args . columns_file and not columns : parsed_args . columns = parsed_args . columns_file elif ( not columns ) and ( not parsed_args . columns_file ) : raise NoColumnsError ( "You must give either a --columns or at least one -c/--column " "argument" ) else : assert columns return parsed_args
Parse the command line arguments return an argparse namespace object .
3,644
def do ( parser = None , args = None , in_ = None , table_function = None ) : in_ = in_ or sys . stdin table_function = table_function or losser . table parsed_args = parse ( parser = parser , args = args ) if parsed_args . input_data : input_data = open ( parsed_args . input_data , 'r' ) . read ( ) else : input_data = in_ . read ( ) dicts = json . loads ( input_data ) csv_string = table_function ( dicts , parsed_args . columns , csv = True , pretty = parsed_args . pretty ) return csv_string
Read command - line args and stdin return the result .
3,645
def generate_gaussian_profile ( seeing_fwhm ) : FWHM_G = 2 * math . sqrt ( 2 * math . log ( 2 ) ) sigma = seeing_fwhm / FWHM_G amplitude = 1.0 / ( 2 * math . pi * sigma * sigma ) seeing_model = Gaussian2D ( amplitude = amplitude , x_mean = 0.0 , y_mean = 0.0 , x_stddev = sigma , y_stddev = sigma ) return seeing_model
Generate a normalized Gaussian profile from its FWHM
3,646
def generate_moffat_profile ( seeing_fwhm , alpha ) : scale = 2 * math . sqrt ( 2 ** ( 1.0 / alpha ) - 1 ) gamma = seeing_fwhm / scale amplitude = 1.0 / math . pi * ( alpha - 1 ) / gamma ** 2 seeing_model = Moffat2D ( amplitude = amplitude , x_mean = 0.0 , y_mean = 0.0 , gamma = gamma , alpha = alpha ) return seeing_model
Generate a normalized Moffat profile from its FWHM and alpha
3,647
def field_to_dict ( field , instance ) : from django . db . models . fields . related import ManyToManyField return ( many_to_many_field_to_dict ( field , instance ) if isinstance ( field , ManyToManyField ) else field . value_from_object ( instance ) )
Converts a model field to a dictionary
3,648
def model_to_dict ( instance , fields = None , exclude = None ) : return { field . name : field_to_dict ( field , instance ) for field in chain ( instance . _meta . concrete_fields , instance . _meta . many_to_many ) if not should_exclude_field ( field , fields , exclude ) }
The same implementation as django model_to_dict but editable fields are allowed
3,649
def change_and_save ( self , update_only_changed_fields = False , ** changed_fields ) : bulk_change_and_save ( self , update_only_changed_fields = update_only_changed_fields , ** changed_fields ) return self . filter ( )
Changes a given changed_fields on each object in the queryset saves objects and returns the changed objects in the queryset .
3,650
def extent ( self ) : return ( self . intervals [ 1 ] . pix1 - 0.5 , self . intervals [ 1 ] . pix2 - 0.5 , self . intervals [ 0 ] . pix1 - 0.5 , self . intervals [ 0 ] . pix2 - 0.5 , )
Helper for matplotlib imshow
3,651
def readout ( self ) : elec = self . simulate_poisson_variate ( ) elec_pre = self . saturate ( elec ) elec_f = self . pre_readout ( elec_pre ) adu_r = self . base_readout ( elec_f ) adu_p = self . post_readout ( adu_r ) self . clean_up ( ) return adu_p
Readout the detector .
3,652
def parse_arg_line ( fargs ) : fargs = fargs . strip ( ) if fargs == '' : return { } pairs = [ s . strip ( ) for s in fargs . split ( ',' ) ] result = [ ] for p in pairs : fe = p . find ( "=" ) if fe == - 1 : raise ValueError ( "malformed" ) key = p [ : fe ] val = p [ fe + 1 : ] tok = "'{}': {}" . format ( key , val ) result . append ( tok ) tokj = ',' . join ( result ) result = "{{ {0} }}" . format ( tokj ) state = ast . literal_eval ( result ) return state
parse limited form of arguments of function
3,653
def natural_number_with_currency ( number , currency , show_decimal_place = True , use_nbsp = True ) : humanized = '{} {}' . format ( numberformat . format ( number = number , decimal_sep = ',' , decimal_pos = 2 if show_decimal_place else 0 , grouping = 3 , thousand_sep = ' ' , force_grouping = True ) , force_text ( currency ) ) return mark_safe ( humanized . replace ( ' ' , '\u00a0' ) ) if use_nbsp else humanized
Return a given number formatter a price for humans .
3,654
def extract_db_info ( self , obj , keys ) : objl = self . convert ( obj ) result = super ( DataFrameType , self ) . extract_db_info ( objl , keys ) ext = self . datamodel . extractor_map [ 'fits' ] if objl : with objl . open ( ) as hdulist : for field in keys : result [ field ] = ext . extract ( field , hdulist ) tags = result [ 'tags' ] for field in self . tags_keys : tags [ field ] = ext . extract ( field , hdulist ) return result else : return result
Extract tags from serialized file
3,655
def readc ( prompt , default = None , valid = None , question_mark = True ) : cresult = None if question_mark : cquestion_mark = ' ? ' else : cquestion_mark = '' loop = True while loop : if default is None : print ( prompt + cquestion_mark , end = '' ) sys . stdout . flush ( ) else : print ( prompt + ' [' + str ( default ) + ']' + cquestion_mark , end = '' ) sys . stdout . flush ( ) cresult = sys . stdin . readline ( ) . strip ( ) if cresult == '' and default is not None : cresult = str ( default ) if len ( cresult ) == 1 : loop = False if valid is not None : for c in cresult : if c not in str ( valid ) : print ( '*** Error: invalid characters found.' ) print ( '*** Valid characters are:' , valid ) print ( '*** Try again!' ) loop = True else : print ( '*** Error: invalid string length. Try again!' ) return cresult
Return a single character read from keyboard
3,656
def read_value ( ftype , prompt , default = None , minval = None , maxval = None , allowed_single_chars = None , question_mark = True ) : result = None if question_mark : cquestion_mark = ' ? ' else : cquestion_mark = '' if minval is not None : try : iminval = ftype ( minval ) except ValueError : raise ValueError ( "'" + str ( minval ) + "' cannot " + "be used as an minval in readi()" ) else : iminval = None if maxval is not None : try : imaxval = ftype ( maxval ) except ValueError : raise ValueError ( "'" + str ( maxval ) + "' cannot " + "be used as an maxval in readi()" ) else : imaxval = None if minval is None and maxval is None : cminmax = '' elif minval is None : cminmax = ' (number <= ' + str ( imaxval ) + ')' elif maxval is None : cminmax = ' (number >= ' + str ( iminval ) + ')' else : cminmax = ' (' + str ( minval ) + ' <= number <= ' + str ( maxval ) + ')' loop = True while loop : if default is None : print ( prompt + cminmax + cquestion_mark , end = '' ) sys . stdout . flush ( ) else : print ( prompt + cminmax + ' [' + str ( default ) + ']' + cquestion_mark , end = '' ) sys . stdout . flush ( ) cresult = sys . stdin . readline ( ) . strip ( ) if cresult == '' and default is not None : cresult = str ( default ) if len ( cresult ) == 1 : if allowed_single_chars is not None : if cresult in allowed_single_chars : return cresult try : result = ftype ( cresult ) except ValueError : print ( "*** Error: invalid " + str ( ftype ) + " value. Try again!" ) else : if minval is None and maxval is None : loop = False elif minval is None : if result <= imaxval : loop = False else : print ( "*** Error: number out of range. Try again!" ) elif maxval is None : if result >= iminval : loop = False else : print ( "*** Error: number out of range. Try again!" ) else : if iminval <= result <= imaxval : loop = False else : print ( "*** Error: number out of range. Try again!" ) return result
Return value read from keyboard
3,657
def load_product_object ( self , name ) : product_entry = self . products [ name ] product = self . _get_base_object ( product_entry ) return product
Load product object according to name
3,658
def depsolve ( self ) : requires = { } provides = { } for mode , r in self . recipes . items ( ) : l = self . load_recipe_object ( mode ) for field , vv in l . requirements ( ) . items ( ) : if vv . type . isproduct ( ) : name = vv . type . name ( ) pe = ProductEntry ( name , mode , field ) requires [ name ] = pe for field , vv in l . products ( ) . items ( ) : if vv . type . isproduct ( ) : name = vv . type . name ( ) pe = ProductEntry ( name , mode , field ) provides [ name ] = pe return requires , provides
Load all recipes to search for products
3,659
def search_mode_provides ( self , product , pipeline = 'default' ) : pipeline = self . pipelines [ pipeline ] for obj , mode , field in self . iterate_mode_provides ( self . modes , pipeline ) : if obj . name ( ) == product : return ProductEntry ( obj . name ( ) , mode . key , field ) else : raise ValueError ( 'no mode provides %s' % product )
Search the mode that provides a given product
3,660
def select_configuration ( self , obresult ) : logger = logging . getLogger ( __name__ ) logger . debug ( 'calling default configuration selector' ) ref = obresult . get_sample_frame ( ) extr = self . datamodel . extractor_map [ 'fits' ] if ref : result = extr . extract ( 'insconf' , ref ) if result : logger . debug ( 'found insconf config uuid=%s' , result ) if result in self . configurations : return self . configurations [ result ] else : for conf in self . configurations . values ( ) : if conf . name == result : return conf else : raise KeyError ( 'insconf {} does not match any config' . format ( result ) ) date_obs = extr . extract ( 'observation_date' , ref ) for key , conf in self . configurations . items ( ) : if key == 'default' : continue if conf . date_end is not None : upper_t = date_obs < conf . date_end else : upper_t = True if upper_t and ( date_obs >= conf . date_start ) : logger . debug ( 'found date match, config uuid=%s' , key ) return conf else : logger . debug ( 'no match, using default configuration' ) return self . configurations [ 'default' ]
Select instrument configuration based on OB
3,661
def select_profile ( self , obresult ) : logger = logging . getLogger ( __name__ ) logger . debug ( 'calling default profile selector' ) insconf = obresult . configuration if insconf != 'default' : key = insconf date_obs = None keyname = 'uuid' else : ref = obresult . get_sample_frame ( ) if ref is None : key = obresult . instrument date_obs = None keyname = 'name' else : extr = self . datamodel . extractor_map [ 'fits' ] date_obs = extr . extract ( 'observation_date' , ref ) key = extr . extract ( 'insconf' , ref ) if key is not None : keyname = 'uuid' else : key = extr . extract ( 'instrument' , ref ) keyname = 'name' return key , date_obs , keyname
Select instrument profile based on OB
3,662
def get_recipe_object ( self , mode_name , pipeline_name = 'default' ) : active_mode = self . modes [ mode_name ] active_pipeline = self . pipelines [ pipeline_name ] recipe = active_pipeline . get_recipe_object ( active_mode ) return recipe
Build a recipe object from a given mode name
3,663
def pause_debugplot ( debugplot , optional_prompt = None , pltshow = False , tight_layout = True ) : if debugplot not in DEBUGPLOT_CODES : raise ValueError ( 'Invalid debugplot value:' , debugplot ) if debugplot < 0 : debugplot_ = - debugplot pltclose = True else : debugplot_ = debugplot pltclose = False if pltshow : if debugplot_ in [ 1 , 2 , 11 , 12 , 21 , 22 ] : if tight_layout : plt . tight_layout ( ) if debugplot_ in [ 1 , 11 , 21 ] : plt . show ( block = False ) plt . pause ( 0.2 ) elif debugplot_ in [ 2 , 12 , 22 ] : print ( 'Press "q" to continue...' , end = '' ) sys . stdout . flush ( ) plt . show ( ) print ( '' ) else : if debugplot_ in [ 2 , 12 , 22 ] : if optional_prompt is None : print ( 'Press <RETURN> to continue...' , end = '' ) else : print ( optional_prompt , end = '' ) sys . stdout . flush ( ) cdummy = sys . stdin . readline ( ) . strip ( ) if debugplot_ in [ 1 , 2 , 11 , 12 , 21 , 22 ] and pltclose : plt . close ( )
Ask the user to press RETURN to continue after plotting .
3,664
def mode_half_sample ( a , is_sorted = False ) : a = np . asanyarray ( a ) if not is_sorted : sdata = np . sort ( a ) else : sdata = a n = len ( sdata ) if n == 1 : return sdata [ 0 ] elif n == 2 : return 0.5 * ( sdata [ 0 ] + sdata [ 1 ] ) elif n == 3 : ind = - sdata [ 0 ] + 2 * sdata [ 1 ] - sdata [ 2 ] if ind < 0 : return 0.5 * ( sdata [ 0 ] + sdata [ 1 ] ) elif ind > 0 : return 0.5 * ( sdata [ 1 ] + sdata [ 2 ] ) else : return sdata [ 1 ] else : N = int ( math . ceil ( n / 2.0 ) ) w = sdata [ ( N - 1 ) : ] - sdata [ : ( n - N + 1 ) ] ar = w . argmin ( ) return mode_half_sample ( sdata [ ar : ar + N ] , is_sorted = True )
Estimate the mode using the Half Sample mode .
3,665
def overplot_ds9reg ( filename , ax ) : with open ( filename ) as f : file_content = f . read ( ) . splitlines ( ) first_line = file_content [ 0 ] if "# Region file format: DS9" not in first_line : raise ValueError ( "Unrecognized ds9 region file format" ) for line in file_content : if line [ 0 : 4 ] == "line" : line_fields = line . split ( ) x1 = float ( line_fields [ 1 ] ) y1 = float ( line_fields [ 2 ] ) x2 = float ( line_fields [ 3 ] ) y2 = float ( line_fields [ 4 ] ) if "color" in line : i = line . find ( "color=" ) color = line [ i + 6 : i + 13 ] else : color = "green" ax . plot ( [ x1 , x2 ] , [ y1 , y2 ] , '-' , color = color ) elif line [ 0 : 4 ] == "text" : line_fields = line . split ( ) x0 = float ( line_fields [ 1 ] ) y0 = float ( line_fields [ 2 ] ) text = line_fields [ 3 ] [ 1 : - 1 ] if "color" in line : i = line . find ( "color=" ) color = line [ i + 6 : i + 13 ] else : color = "green" ax . text ( x0 , y0 , text , fontsize = 8 , bbox = dict ( boxstyle = "round,pad=0.1" , fc = "white" , ec = "grey" , ) , color = color , fontweight = 'bold' , backgroundcolor = 'white' , ha = 'center' ) else : pass
Overplot a ds9 region file .
3,666
def find_peaks_indexes ( arr , window_width = 5 , threshold = 0.0 , fpeak = 0 ) : _check_window_width ( window_width ) if ( fpeak < 0 or fpeak + 1 >= window_width ) : raise ValueError ( 'fpeak must be in the range 0- window_width - 2' ) kernel_peak = kernel_peak_function ( threshold , fpeak ) out = generic_filter ( arr , kernel_peak , window_width , mode = "reflect" ) result , = numpy . nonzero ( out ) return filter_array_margins ( arr , result , window_width )
Find indexes of peaks in a 1d array .
3,667
def refine_peaks ( arr , ipeaks , window_width ) : _check_window_width ( window_width ) step = window_width // 2 ipeaks = filter_array_margins ( arr , ipeaks , window_width ) winoff = numpy . arange ( - step , step + 1 , dtype = 'int' ) peakwin = ipeaks [ : , numpy . newaxis ] + winoff ycols = arr [ peakwin ] ww = return_weights ( window_width ) coff2 = numpy . dot ( ww , ycols . T ) uc = - 0.5 * coff2 [ 1 ] / coff2 [ 2 ] yc = coff2 [ 0 ] + uc * ( coff2 [ 1 ] + coff2 [ 2 ] * uc ) xc = ipeaks + 0.5 * ( window_width - 1 ) * uc return xc , yc
Refine the peak location previously found by find_peaks_indexes
3,668
def complete_config ( config ) : if not config . has_section ( 'run' ) : config . add_section ( 'run' ) values = { 'basedir' : os . getcwd ( ) , 'task_control' : 'control.yaml' , } for k , v in values . items ( ) : if not config . has_option ( 'run' , k ) : config . set ( 'run' , k , v ) return config
Complete config with default values
3,669
def centering_centroid ( data , xi , yi , box , nloop = 10 , toldist = 1e-3 , maxdist = 10.0 ) : cxy = ( xi , yi ) origin = ( xi , yi ) back = 0.0 if nloop == 0 : return xi , yi , 0.0 , 0 , 'not recentering' for i in range ( nloop ) : nxy , back = _centering_centroid_loop_xy ( data , cxy , box ) dst = distance . euclidean ( origin , nxy ) if dst > maxdist : msg = 'maximum distance (%5.2f) from origin reached' % maxdist return cxy [ 0 ] , cxy [ 1 ] , back , 2 , msg dst = distance . euclidean ( nxy , cxy ) if dst < toldist : return nxy [ 0 ] , nxy [ 1 ] , back , 1 , 'converged in iteration %i' % i else : cxy = nxy return nxy [ 0 ] , nxy [ 1 ] , back , 3 , 'not converged in %i iterations' % nloop
returns x y background status message
3,670
def cache_for ( ** timedelta_kw ) : max_age_timedelta = timedelta ( ** timedelta_kw ) def decorate_func ( func ) : @ wraps ( func ) def decorate_func_call ( * a , ** kw ) : callback = SetCacheControlHeadersFromTimedeltaCallback ( max_age_timedelta ) registry_provider = AfterThisRequestCallbackRegistryProvider ( ) registry = registry_provider . provide ( ) registry . add ( callback ) return func ( * a , ** kw ) return decorate_func_call return decorate_func
Set Cache - Control headers and Expires - header .
3,671
def cache ( * cache_control_items , ** cache_control_kw ) : cache_control_kw . update ( cache_control_items ) def decorate_func ( func ) : @ wraps ( func ) def decorate_func_call ( * a , ** kw ) : callback = SetCacheControlHeadersCallback ( ** cache_control_kw ) registry_provider = AfterThisRequestCallbackRegistryProvider ( ) registry = registry_provider . provide ( ) registry . add ( callback ) return func ( * a , ** kw ) return decorate_func_call return decorate_func
Set Cache - Control headers .
3,672
def dont_cache ( ) : def decorate_func ( func ) : @ wraps ( func ) def decorate_func_call ( * a , ** kw ) : callback = SetCacheControlHeadersForNoCachingCallback ( ) registry_provider = AfterThisRequestCallbackRegistryProvider ( ) registry = registry_provider . provide ( ) registry . add ( callback ) return func ( * a , ** kw ) return decorate_func_call return decorate_func
Set Cache - Control headers for no caching
3,673
def filter_empty_parameters ( func ) : @ wraps ( func ) def func_wrapper ( self , * args , ** kwargs ) : my_kwargs = { key : value for key , value in kwargs . items ( ) if value not in EMPTIES } args_is_empty = all ( arg in EMPTIES for arg in args ) if ( { 'source' , 'material' } . issuperset ( my_kwargs ) or not my_kwargs ) and args_is_empty : return return func ( self , * args , ** my_kwargs ) return func_wrapper
Decorator that is filtering empty parameters .
3,674
def author_id_normalize_and_schema ( uid , schema = None ) : def _get_uid_normalized_in_schema ( _uid , _schema ) : regex , template = _RE_AUTHORS_UID [ _schema ] match = regex . match ( _uid ) if match : return template . format ( match . group ( 'uid' ) ) if idutils . is_orcid ( uid ) and schema in ( None , 'ORCID' ) : return idutils . normalize_orcid ( uid ) , 'ORCID' if schema and schema not in _RE_AUTHORS_UID : raise UnknownUIDSchema ( uid ) if schema : normalized_uid = _get_uid_normalized_in_schema ( uid , schema ) if normalized_uid : return normalized_uid , schema else : raise SchemaUIDConflict ( schema , uid ) match_schema , normalized_uid = None , None for candidate_schema in _RE_AUTHORS_UID : candidate_uid = _get_uid_normalized_in_schema ( uid , candidate_schema ) if candidate_uid : if match_schema : raise UnknownUIDSchema ( uid ) match_schema = candidate_schema normalized_uid = candidate_uid if match_schema : return normalized_uid , match_schema raise UnknownUIDSchema ( uid )
Detect and normalize an author UID schema .
3,675
def normalize_arxiv_category ( category ) : category = _NEW_CATEGORIES . get ( category . lower ( ) , category ) for valid_category in valid_arxiv_categories ( ) : if ( category . lower ( ) == valid_category . lower ( ) or category . lower ( ) . replace ( '-' , '.' ) == valid_category . lower ( ) ) : return valid_category return category
Normalize arXiv category to be schema compliant .
3,676
def valid_arxiv_categories ( ) : schema = load_schema ( 'elements/arxiv_categories' ) categories = schema [ 'enum' ] categories . extend ( _NEW_CATEGORIES . keys ( ) ) return categories
List of all arXiv categories that ever existed .
3,677
def classify_field ( value ) : if not ( isinstance ( value , six . string_types ) and value ) : return schema = load_schema ( 'elements/inspire_field' ) inspire_categories = schema [ 'properties' ] [ 'term' ] [ 'enum' ] for inspire_category in inspire_categories : if value . upper ( ) == inspire_category . upper ( ) : return inspire_category category = normalize_arxiv_category ( value ) return ARXIV_TO_INSPIRE_CATEGORY_MAPPING . get ( category , 'Other' )
Normalize value to an Inspire category .
3,678
def split_pubnote ( pubnote_str ) : pubnote = { } parts = pubnote_str . split ( ',' ) if len ( parts ) > 2 : pubnote [ 'journal_title' ] = parts [ 0 ] pubnote [ 'journal_volume' ] = parts [ 1 ] pubnote [ 'page_start' ] , pubnote [ 'page_end' ] , pubnote [ 'artid' ] = split_page_artid ( parts [ 2 ] ) return { key : val for ( key , val ) in six . iteritems ( pubnote ) if val is not None }
Split pubnote into journal information .
3,679
def get_schema_path ( schema , resolved = False ) : def _strip_first_path_elem ( path ) : stripped_path = path . split ( os . path . sep , 1 ) [ 1 : ] return '' . join ( stripped_path ) def _schema_to_normalized_path ( schema ) : path = os . path . normpath ( os . path . sep + urlsplit ( schema ) . path ) if path . startswith ( os . path . sep ) : path = path [ 1 : ] if not path . endswith ( '.json' ) : path += '.json' return path path = _schema_to_normalized_path ( schema ) while path : if resolved : schema_path = os . path . abspath ( os . path . join ( _resolved_schema_root_path , path ) ) else : schema_path = os . path . abspath ( os . path . join ( _schema_root_path , path ) ) if os . path . exists ( schema_path ) : return os . path . abspath ( schema_path ) path = _strip_first_path_elem ( path ) raise SchemaNotFound ( schema = schema )
Retrieve the installed path for the given schema .
3,680
def load_schema ( schema_name , resolved = False ) : schema_data = '' with open ( get_schema_path ( schema_name , resolved ) ) as schema_fd : schema_data = json . loads ( schema_fd . read ( ) ) return schema_data
Load the given schema from wherever it s installed .
3,681
def _load_schema_for_record ( data , schema = None ) : if schema is None : if '$schema' not in data : raise SchemaKeyNotFound ( data = data ) schema = data [ '$schema' ] if isinstance ( schema , six . string_types ) : schema = load_schema ( schema_name = schema ) return schema
Load the schema from a given record .
3,682
def validate ( data , schema = None ) : schema = _load_schema_for_record ( data , schema ) return jsonschema_validate ( instance = data , schema = schema , resolver = LocalRefResolver . from_schema ( schema ) , format_checker = inspire_format_checker , )
Validate the given dictionary against the given schema .
3,683
def get_validation_errors ( data , schema = None ) : schema = _load_schema_for_record ( data , schema ) errors = Draft4Validator ( schema , resolver = LocalRefResolver . from_schema ( schema ) , format_checker = inspire_format_checker ) return errors . iter_errors ( data )
Validation errors for a given record .
3,684
def normalize_collaboration ( collaboration ) : if not collaboration : return [ ] collaboration = collaboration . strip ( ) if collaboration . startswith ( '(' ) and collaboration . endswith ( ')' ) : collaboration = collaboration [ 1 : - 1 ] collaborations = _RE_AND . split ( collaboration ) collaborations = ( _RE_COLLABORATION_LEADING . sub ( '' , collab ) for collab in collaborations ) collaborations = ( _RE_COLLABORATION_TRAILING . sub ( '' , collab ) for collab in collaborations ) return [ collab . strip ( ) for collab in collaborations ]
Normalize collaboration string .
3,685
def get_license_from_url ( url ) : if not url : return split_url = urlsplit ( url , scheme = 'http' ) if split_url . netloc . lower ( ) == 'creativecommons.org' : if 'publicdomain' in split_url . path : match = _RE_PUBLIC_DOMAIN_URL . match ( split_url . path ) if match is None : license = [ 'public domain' ] else : license = [ 'CC0' ] license . extend ( part for part in match . groups ( ) if part ) else : license = [ 'CC' ] match = _RE_LICENSE_URL . match ( split_url . path ) license . extend ( part . upper ( ) for part in match . groups ( ) if part ) elif split_url . netloc == 'arxiv.org' : license = [ 'arXiv' ] match = _RE_LICENSE_URL . match ( split_url . path ) license . extend ( part for part in match . groups ( ) if part ) else : raise ValueError ( 'Unknown license URL' ) return u' ' . join ( license )
Get the license abbreviation from an URL .
3,686
def convert_old_publication_info_to_new ( publication_infos ) : result = [ ] hidden_publication_infos = [ ] for publication_info in publication_infos : _publication_info = copy . deepcopy ( publication_info ) journal_title = _publication_info . get ( 'journal_title' ) try : journal_title = _JOURNALS_RENAMED_OLD_TO_NEW [ journal_title ] _publication_info [ 'journal_title' ] = journal_title result . append ( _publication_info ) continue except KeyError : pass journal_volume = _publication_info . get ( 'journal_volume' ) if journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME and journal_volume and len ( journal_volume ) == 4 : try : was_last_century = int ( journal_volume [ : 2 ] ) > 50 except ValueError : pass else : _publication_info [ 'year' ] = int ( '19' + journal_volume [ : 2 ] if was_last_century else '20' + journal_volume [ : 2 ] ) _publication_info [ 'journal_volume' ] = journal_volume [ 2 : ] result . append ( _publication_info ) continue if journal_title and journal_volume and journal_title . lower ( ) not in JOURNALS_IGNORED_IN_OLD_TO_NEW : volume_starts_with_a_letter = _RE_VOLUME_STARTS_WITH_A_LETTER . match ( journal_volume ) volume_ends_with_a_letter = _RE_VOLUME_ENDS_WITH_A_LETTER . match ( journal_volume ) match = volume_starts_with_a_letter or volume_ends_with_a_letter if match : _publication_info . pop ( 'journal_record' , None ) if journal_title in _JOURNALS_RENAMED_OLD_TO_NEW . values ( ) : _publication_info [ 'journal_title' ] = journal_title else : _publication_info [ 'journal_title' ] = '' . join ( [ journal_title , '' if journal_title . endswith ( '.' ) else ' ' , match . group ( 'letter' ) , ] ) _publication_info [ 'journal_volume' ] = match . group ( 'volume' ) hidden = _publication_info . pop ( 'hidden' , None ) if hidden : hidden_publication_infos . append ( _publication_info ) else : result . append ( _publication_info ) for publication_info in hidden_publication_infos : if publication_info not in result : publication_info [ 'hidden' ] = True result . append ( publication_info ) return result
Convert a publication_info value from the old format to the new .
3,687
def convert_new_publication_info_to_old ( publication_infos ) : def _needs_a_hidden_pubnote ( journal_title , journal_volume ) : return ( journal_title in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE and journal_volume in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE [ journal_title ] ) result = [ ] for publication_info in publication_infos : _publication_info = copy . deepcopy ( publication_info ) journal_title = _publication_info . get ( 'journal_title' ) try : journal_title = _JOURNALS_RENAMED_NEW_TO_OLD [ journal_title ] _publication_info [ 'journal_title' ] = journal_title result . append ( _publication_info ) continue except KeyError : pass journal_volume = _publication_info . get ( 'journal_volume' ) year = _publication_info . get ( 'year' ) if ( journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME and year and journal_volume and len ( journal_volume ) == 2 ) : two_digit_year = str ( year ) [ 2 : ] _publication_info [ 'journal_volume' ] = '' . join ( [ two_digit_year , journal_volume ] ) result . append ( _publication_info ) continue if journal_title and journal_volume : match = _RE_TITLE_ENDS_WITH_A_LETTER . match ( journal_title ) if match and _needs_a_hidden_pubnote ( journal_title , journal_volume ) : _publication_info [ 'journal_title' ] = match . group ( 'title' ) _publication_info [ 'journal_volume' ] = journal_volume + match . group ( 'letter' ) result . append ( _publication_info ) _publication_info = copy . deepcopy ( publication_info ) _publication_info [ 'hidden' ] = True _publication_info [ 'journal_title' ] = match . group ( 'title' ) _publication_info [ 'journal_volume' ] = match . group ( 'letter' ) + journal_volume elif match and journal_title not in _JOURNALS_ALREADY_ENDING_WITH_A_LETTER : _publication_info [ 'journal_title' ] = match . group ( 'title' ) _publication_info [ 'journal_volume' ] = match . group ( 'letter' ) + journal_volume result . append ( _publication_info ) return result
Convert back a publication_info value from the new format to the old .
3,688
def fix_reference_url ( url ) : new_url = url new_url = fix_url_bars_instead_of_slashes ( new_url ) new_url = fix_url_add_http_if_missing ( new_url ) new_url = fix_url_replace_tilde ( new_url ) try : rfc3987 . parse ( new_url , rule = "URI" ) return new_url except ValueError : return url
Used to parse an incorect url to try to fix it with the most common ocurrences for errors . If the fixed url is still incorrect it returns None .
3,689
def is_arxiv ( obj ) : arxiv_test = obj . split ( ) if not arxiv_test : return False matched_arxiv = ( RE_ARXIV_PRE_2007_CLASS . match ( arxiv_test [ 0 ] ) or RE_ARXIV_POST_2007_CLASS . match ( arxiv_test [ 0 ] ) ) if not matched_arxiv : return False if not matched_arxiv . group ( 'category' ) : return True valid_arxiv_categories_lower = [ category . lower ( ) for category in valid_arxiv_categories ( ) ] category = matched_arxiv . group ( 'category' ) . lower ( ) return ( category in valid_arxiv_categories_lower or category . replace ( '-' , '.' ) in valid_arxiv_categories_lower )
Return True if obj contains an arXiv identifier .
3,690
def normalize_arxiv ( obj ) : obj = obj . split ( ) [ 0 ] matched_arxiv_pre = RE_ARXIV_PRE_2007_CLASS . match ( obj ) if matched_arxiv_pre : return ( '/' . join ( matched_arxiv_pre . group ( "extraidentifier" , "identifier" ) ) ) . lower ( ) matched_arxiv_post = RE_ARXIV_POST_2007_CLASS . match ( obj ) if matched_arxiv_post : return matched_arxiv_post . group ( "identifier" ) return None
Return a normalized arXiv identifier from obj .
3,691
def resolve_remote ( self , uri ) : try : return super ( LocalRefResolver , self ) . resolve_remote ( uri ) except ValueError : return super ( LocalRefResolver , self ) . resolve_remote ( 'file://' + get_schema_path ( uri . rsplit ( '.json' , 1 ) [ 0 ] ) )
Resolve a uri or relative path to a schema .
3,692
def set_path ( self , path ) : if os . path . isabs ( path ) : path = os . path . normpath ( os . path . join ( self . cwd , path ) ) self . path = path self . relative = os . path . relpath ( self . path , self . base )
Set the path of the file .
3,693
def clone ( self , path = None , * , with_contents = True , ** options ) : file = File ( path if path else self . path , cwd = options . get ( "cwd" , self . cwd ) ) file . base = options . get ( "base" , self . base ) if with_contents : file . contents = options . get ( "contents" , self . contents ) return file
Clone the file .
3,694
def launch_cli ( ) : parser = argparse . ArgumentParser ( prog = "pylp" , description = "Call some tasks defined in your pylpfile." ) parser . add_argument ( "-v" , "--version" , action = "version" , version = "Pylp %s" % version , help = "get the Pylp version and exit" ) parser . add_argument ( '--pylpfile' , nargs = 1 , help = "manually set path of pylpfile" , metavar = "<path>" ) parser . add_argument ( '--cwd' , nargs = 1 , help = "manually set the CWD" , metavar = "<dir path>" ) parser . add_argument ( '--no-color' , action = "store_false" , help = "force Pylp to not display colors" ) parser . add_argument ( '--silent' , action = "store_true" , help = "disable all Pylp logging" ) parser . add_argument ( 'tasks' , nargs = "*" , default = [ "default" ] , help = "tasks to execute (if none, execute the 'default' task)" , metavar = "<task>" ) args = parser . parse_args ( ) if args . cwd : config . cwd = args . cwd [ 0 ] else : config . cwd = os . getcwd ( ) if args . pylpfile : pylpfile = args . pylpfile [ 0 ] if not args . pylpfile : pylpfile = path . join ( config . cwd , "pylpfile.py" ) elif not args . cwd : config . cwd = path . dirname ( pylpfile ) config . color = args . no_color config . silent = args . silent run ( pylpfile , args . tasks )
Launch the CLI .
3,695
def add_affiliation ( self , value , curated_relation = None , record = None ) : if value : affiliation = { 'value' : value } if record : affiliation [ 'record' ] = record if curated_relation is not None : affiliation [ 'curated_relation' ] = curated_relation self . _ensure_list_field ( 'affiliations' , affiliation )
Add an affiliation .
3,696
def set_uid ( self , uid , schema = None ) : try : uid , schema = author_id_normalize_and_schema ( uid , schema ) except UnknownUIDSchema : pass self . _ensure_field ( 'ids' , [ ] ) self . obj [ 'ids' ] = [ id_ for id_ in self . obj [ 'ids' ] if id_ . get ( 'schema' ) != schema ] self . _add_uid ( uid , schema )
Set a unique ID .
3,697
def singleton ( klass ) : instances = { } def getinstance ( * args , ** kwargs ) : if klass not in instances : instances [ klass ] = klass ( * args , ** kwargs ) return instances [ klass ] return wraps ( klass ) ( getinstance )
Create singleton from class
3,698
def translation_activate_block ( function = None , language = None ) : def _translation_activate_block ( function ) : def _decorator ( * args , ** kwargs ) : tmp_language = translation . get_language ( ) try : translation . activate ( language or settings . LANGUAGE_CODE ) return function ( * args , ** kwargs ) finally : translation . activate ( tmp_language ) return wraps ( function ) ( _decorator ) if function : return _translation_activate_block ( function ) else : return _translation_activate_block
Activate language only for one method or function
3,699
async def uv_protection_window ( self , low : float = 3.5 , high : float = 3.5 ) -> dict : return await self . request ( 'get' , 'protection' , params = { 'from' : str ( low ) , 'to' : str ( high ) } )
Get data on when a UV protection window is .