idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
54,600 | def get_capabilities_by_type ( self , strict_type_matching : bool = False ) -> Dict [ Type , Dict [ str , Dict [ str , Parser ] ] ] : check_var ( strict_type_matching , var_types = bool , var_name = 'strict_matching' ) res = dict ( ) for typ in self . get_all_supported_types ( ) : res [ typ ] = self . get_capabilities_for_type ( typ , strict_type_matching ) return res | For all types that are supported lists all extensions that can be parsed into such a type . For each extension provides the list of parsers supported . The order is most pertinent first |
54,601 | def get_capabilities_by_ext ( self , strict_type_matching : bool = False ) -> Dict [ str , Dict [ Type , Dict [ str , Parser ] ] ] : check_var ( strict_type_matching , var_types = bool , var_name = 'strict_matching' ) res = dict ( ) for ext in self . get_all_supported_exts_for_type ( type_to_match = JOKER , strict = strict_type_matching ) : res [ ext ] = self . get_capabilities_for_ext ( ext , strict_type_matching ) return res | For all extensions that are supported lists all types that can be parsed from this extension . For each type provide the list of parsers supported . The order is most pertinent first |
54,602 | def get_capabilities_for_ext ( self , ext , strict_type_matching : bool = False ) -> Dict [ Type , Dict [ str , Parser ] ] : r = dict ( ) for typ in self . get_all_supported_types_for_ext ( ext ) : matching = self . find_all_matching_parsers ( strict_type_matching , desired_type = typ , required_ext = ext ) [ 0 ] r [ typ ] = dict ( ) exact = list ( reversed ( matching [ 2 ] ) ) if len ( exact ) > 0 : r [ typ ] [ '1_exact_match' ] = exact approx = list ( reversed ( matching [ 1 ] ) ) if len ( approx ) > 0 : r [ typ ] [ '2_approx_match' ] = approx generic = list ( reversed ( matching [ 0 ] ) ) if len ( generic ) > 0 : r [ typ ] [ '3_generic' ] = generic return r | Utility method to return for a given file extension all known ways to parse a file with this extension organized by target object type . |
54,603 | def get_all_supported_types_for_ext ( self , ext_to_match : str , strict_type_matching : bool = False ) -> Set [ Type ] : matching = self . find_all_matching_parsers ( required_ext = ext_to_match , strict = strict_type_matching ) [ 0 ] return { typ for types in [ p . supported_types for p in ( matching [ 0 ] + matching [ 1 ] + matching [ 2 ] ) ] for typ in types } | Utility method to return the set of all supported types that may be parsed from files with the given extension . ext = JOKER is a joker that means all extensions |
54,604 | def get_all_supported_exts_for_type ( self , type_to_match : Type [ Any ] , strict : bool ) -> Set [ str ] : matching = self . find_all_matching_parsers ( desired_type = type_to_match , strict = strict ) [ 0 ] return { ext for exts in [ p . supported_exts for p in ( matching [ 0 ] + matching [ 1 ] + matching [ 2 ] ) ] for ext in exts } | Utility method to return the set of all supported file extensions that may be converted to objects of the given type . type = JOKER is a joker that means all types |
54,605 | def find_all_matching_parsers ( self , strict : bool , desired_type : Type [ Any ] = JOKER , required_ext : str = JOKER ) -> Tuple [ Tuple [ List [ Parser ] , List [ Parser ] , List [ Parser ] ] , List [ Parser ] , List [ Parser ] , List [ Parser ] ] : check_var ( strict , var_types = bool , var_name = 'strict' ) desired_type = get_validated_type ( desired_type , 'desired_type' , enforce_not_joker = False ) matching_parsers_generic = [ ] matching_parsers_approx = [ ] matching_parsers_exact = [ ] no_type_match_but_ext_match = [ ] no_ext_match_but_type_match = [ ] no_match = [ ] for p in self . _generic_parsers : match = p . is_able_to_parse ( desired_type = desired_type , desired_ext = required_ext , strict = strict ) if match : if is_any_type ( desired_type ) : matching_parsers_exact . append ( p ) else : matching_parsers_generic . append ( p ) else : if p . is_able_to_parse ( desired_type = desired_type , desired_ext = JOKER , strict = strict ) : no_ext_match_but_type_match . append ( p ) else : pass for p in self . _specific_parsers : match , exact_match = p . is_able_to_parse_detailed ( desired_type = desired_type , desired_ext = required_ext , strict = strict ) if match : if is_any_type ( desired_type ) : no_type_match_but_ext_match . append ( p ) else : if exact_match is None or exact_match : matching_parsers_exact . append ( p ) else : matching_parsers_approx . append ( p ) else : if p . is_able_to_parse ( desired_type = JOKER , desired_ext = required_ext , strict = strict ) : no_type_match_but_ext_match . append ( p ) elif p . is_able_to_parse ( desired_type = desired_type , desired_ext = JOKER , strict = strict ) : no_ext_match_but_type_match . append ( p ) else : no_match . append ( p ) return ( matching_parsers_generic , matching_parsers_approx , matching_parsers_exact ) , no_type_match_but_ext_match , no_ext_match_but_type_match , no_match | Implementation of the parent method by lookin into the registry to find the most appropriate parsers to use in order |
54,606 | def _build_parser_for_fileobject_and_desiredtype ( self , obj_on_filesystem : PersistedObject , object_typ : Type [ T ] , logger : Logger = None ) -> Dict [ Type , Parser ] : parsers = OrderedDict ( ) errors = OrderedDict ( ) try : p = self . __build_parser_for_fileobject_and_desiredtype ( obj_on_filesystem , object_typ = object_typ , logger = logger ) parsers [ object_typ ] = p except NoParserFoundForObjectExt as e : logger . warning ( "{} - {}" . format ( type ( e ) . __name__ , e ) ) errors [ e ] = e except NoParserFoundForObjectType as f : logger . warning ( "{} - {}" . format ( type ( f ) . __name__ , f ) ) errors [ f ] = f if is_collection ( object_typ , strict = True ) : if len ( errors ) > 0 : raise next ( iter ( errors . values ( ) ) ) else : return parsers subclasses = get_all_subclasses ( object_typ ) for subclass in subclasses [ 0 : GLOBAL_CONFIG . dict_to_object_subclass_limit ] : try : parsers [ subclass ] = self . __build_parser_for_fileobject_and_desiredtype ( obj_on_filesystem , object_typ = subclass , logger = logger ) except NoParserFoundForObjectExt as e : logger . warning ( "{} - {}" . format ( type ( e ) . __name__ , e ) ) errors [ e ] = e except NoParserFoundForObjectType as f : logger . warning ( "{} - {}" . format ( type ( f ) . __name__ , f ) ) errors [ f ] = f if len ( subclasses ) > GLOBAL_CONFIG . dict_to_object_subclass_limit : warn ( 'Type {} has more than {} subclasses, only {} were tried to convert it, with no success. You ' 'can raise this limit by setting the appropriate option with `parsyfiles_global_config()`' '' . format ( object_typ , len ( subclasses ) , GLOBAL_CONFIG . dict_to_object_subclass_limit ) ) return parsers | Builds a parser for each subtype of object_typ |
54,607 | def get_all_conversion_chains_to_type ( self , to_type : Type [ Any ] ) -> Tuple [ List [ Converter ] , List [ Converter ] , List [ Converter ] ] : return self . get_all_conversion_chains ( to_type = to_type ) | Utility method to find all converters to a given type |
54,608 | def get_all_conversion_chains_from_type ( self , from_type : Type [ Any ] ) -> Tuple [ List [ Converter ] , List [ Converter ] , List [ Converter ] ] : return self . get_all_conversion_chains ( from_type = from_type ) | Utility method to find all converters from a given type . |
54,609 | def get_all_conversion_chains ( self , from_type : Type [ Any ] = JOKER , to_type : Type [ Any ] = JOKER ) -> Tuple [ List [ Converter ] , List [ Converter ] , List [ Converter ] ] : pass | Utility method to find all converters or conversion chains matching the provided query . |
54,610 | def get_all_conversion_chains ( self , from_type : Type [ Any ] = JOKER , to_type : Type [ Any ] = JOKER ) -> Tuple [ List [ Converter ] , List [ Converter ] , List [ Converter ] ] : if from_type is JOKER and to_type is JOKER : matching_dest_generic = self . _generic_nonstrict_conversion_chains . copy ( ) + self . _generic_conversion_chains . copy ( ) matching_dest_approx = [ ] matching_dest_exact = self . _specific_non_strict_conversion_chains . copy ( ) + self . _specific_conversion_chains . copy ( ) else : matching_dest_generic , matching_dest_approx , matching_dest_exact = [ ] , [ ] , [ ] to_type = get_validated_type ( to_type , 'to_type' , enforce_not_joker = False ) for c in ( self . _generic_nonstrict_conversion_chains + self . _generic_conversion_chains ) : match , source_exact , dest_exact = c . is_able_to_convert_detailed ( strict = self . strict , from_type = from_type , to_type = to_type ) if match : if is_any_type ( to_type ) : matching_dest_exact . append ( c ) else : matching_dest_generic . append ( c ) for c in ( self . _specific_non_strict_conversion_chains + self . _specific_conversion_chains ) : match , source_exact , dest_exact = c . is_able_to_convert_detailed ( strict = self . strict , from_type = from_type , to_type = to_type ) if match : if not is_any_type ( to_type ) : if dest_exact : matching_dest_exact . append ( c ) else : matching_dest_approx . append ( c ) else : pass return matching_dest_generic , matching_dest_approx , matching_dest_exact | Utility method to find matching converters or conversion chains . |
54,611 | def find_all_matching_parsers ( self , strict : bool , desired_type : Type [ Any ] = JOKER , required_ext : str = JOKER ) -> Tuple [ Tuple [ List [ Parser ] , List [ Parser ] , List [ Parser ] ] , List [ Parser ] , List [ Parser ] , List [ Parser ] ] : desired_type = get_validated_type ( desired_type , 'desired_type' , enforce_not_joker = False ) matching , no_type_match_but_ext_match , no_ext_match_but_type_match , no_match = super ( ParserRegistryWithConverters , self ) . find_all_matching_parsers ( strict = self . is_strict , desired_type = desired_type , required_ext = required_ext ) matching_p_generic , matching_p_approx , matching_p_exact = matching if desired_type is JOKER : parsers_to_complete_with_converters = no_type_match_but_ext_match + matching_p_generic + matching_p_approx + matching_p_exact else : parsers_to_complete_with_converters = no_type_match_but_ext_match + matching_p_generic + matching_p_approx matching_c_generic_to_type , matching_c_approx_to_type , matching_c_exact_to_type = self . get_all_conversion_chains_to_type ( to_type = desired_type ) all_matching_converters = matching_c_generic_to_type + matching_c_approx_to_type + matching_c_exact_to_type matching_p_generic_with_approx_chain , matching_p_approx_with_approx_chain , matching_p_exact_with_approx_chain = [ ] , [ ] , [ ] for parser in reversed ( parsers_to_complete_with_converters ) : for typ in parser . supported_types : match_results = self . _complete_parsers_with_converters ( parser , typ , desired_type , matching_c_generic_to_type , matching_c_approx_to_type , matching_c_exact_to_type ) matching_p_generic = match_results [ 1 ] + matching_p_generic matching_p_approx = match_results [ 3 ] + matching_p_approx matching_p_exact = match_results [ 5 ] + matching_p_exact matching_p_generic_with_approx_chain = match_results [ 0 ] + matching_p_generic_with_approx_chain matching_p_approx_with_approx_chain = match_results [ 2 ] + matching_p_approx_with_approx_chain matching_p_exact_with_approx_chain = match_results [ 4 ] + matching_p_exact_with_approx_chain matching_p_generic = matching_p_generic_with_approx_chain + matching_p_generic matching_p_approx = matching_p_approx_with_approx_chain + matching_p_approx matching_p_exact = matching_p_exact_with_approx_chain + matching_p_exact for parser in reversed ( no_match ) : for typ in parser . supported_types : for converter in reversed ( all_matching_converters ) : if converter . is_able_to_convert ( self . is_strict , from_type = typ , to_type = desired_type ) : if ParsingChain . are_worth_chaining ( parser , typ , converter ) : no_ext_match_but_type_match . insert ( 0 , ParsingChain ( parser , converter , strict = self . is_strict , base_parser_chosen_dest_type = typ ) ) matching_p_generic = sorted ( matching_p_generic , key = len , reverse = True ) matching_p_approx = sorted ( matching_p_approx , key = len , reverse = True ) matching_p_exact = sorted ( matching_p_exact , key = len , reverse = True ) return ( matching_p_generic , matching_p_approx , matching_p_exact ) , no_type_match_but_ext_match , no_ext_match_but_type_match , no_match | Overrides the parent method to find parsers appropriate to a given extension and type . This leverages both the parser registry and the converter registry to propose parsing chains in a relevant order |
54,612 | def _complete_parsers_with_converters ( self , parser , parser_supported_type , desired_type , matching_c_generic_to_type , matching_c_approx_to_type , matching_c_exact_to_type ) : matching_p_generic , matching_p_generic_with_approx_chain , matching_p_approx , matching_p_approx_with_approx_chain , matching_p_exact , matching_p_exact_with_approx_chain = [ ] , [ ] , [ ] , [ ] , [ ] , [ ] desired_types = get_alternate_types_resolving_forwardref_union_and_typevar ( desired_type ) for desired_type in desired_types : desired_type = get_validated_type ( desired_type , 'desired_type' , enforce_not_joker = False ) if not is_any_type ( parser_supported_type ) : for cv in matching_c_generic_to_type : if cv . is_able_to_convert ( strict = True , from_type = parser_supported_type , to_type = desired_type ) : if ParsingChain . are_worth_chaining ( parser , parser_supported_type , cv ) : chain = ParsingChain ( parser , cv , strict = True , base_parser_chosen_dest_type = parser_supported_type ) matching_p_generic . append ( chain ) elif ( not self . strict ) and cv . is_able_to_convert ( strict = False , from_type = parser_supported_type , to_type = desired_type ) : if ParsingChain . are_worth_chaining ( parser , parser_supported_type , cv ) : chain = ParsingChain ( parser , cv , strict = False , base_parser_chosen_dest_type = parser_supported_type ) matching_p_generic_with_approx_chain . append ( chain ) for cv in matching_c_approx_to_type : if cv . is_able_to_convert ( strict = True , from_type = parser_supported_type , to_type = desired_type ) : if ParsingChain . are_worth_chaining ( parser , parser_supported_type , cv ) : chain = ParsingChain ( parser , cv , strict = True , base_parser_chosen_dest_type = parser_supported_type ) matching_p_approx . append ( chain ) elif ( not self . strict ) and cv . is_able_to_convert ( strict = False , from_type = parser_supported_type , to_type = desired_type ) : if ParsingChain . are_worth_chaining ( parser , parser_supported_type , cv ) : chain = ParsingChain ( parser , cv , strict = False , base_parser_chosen_dest_type = parser_supported_type ) matching_p_approx_with_approx_chain . append ( chain ) for cv in matching_c_exact_to_type : if cv . is_able_to_convert ( strict = True , from_type = parser_supported_type , to_type = desired_type ) : if ParsingChain . are_worth_chaining ( parser , parser_supported_type , cv ) : chain = ParsingChain ( parser , cv , strict = True , base_parser_chosen_dest_type = parser_supported_type ) matching_p_exact . append ( chain ) elif ( not self . strict ) and cv . is_able_to_convert ( strict = False , from_type = parser_supported_type , to_type = desired_type ) : if ParsingChain . are_worth_chaining ( parser , parser_supported_type , cv ) : chain = ParsingChain ( parser , cv , strict = False , base_parser_chosen_dest_type = parser_supported_type ) matching_p_exact_with_approx_chain . append ( chain ) return matching_p_generic_with_approx_chain , matching_p_generic , matching_p_approx_with_approx_chain , matching_p_approx , matching_p_exact_with_approx_chain , matching_p_exact | Internal method to create parsing chains made of a parser and converters from the provided lists . Once again a JOKER for a type means joker here . |
54,613 | def get_changed_files ( include_staged = False ) : process = subprocess . Popen ( [ 'git' , 'status' , '--porcelain' ] , stdout = subprocess . PIPE , stderr = subprocess . STDOUT ) stdout , __ = process . communicate ( ) if process . returncode != 0 : raise ValueError ( stdout ) files = [ ] for line in stdout . decode ( ) . split ( '\n' ) : if not line or line . startswith ( '#' ) : continue assert line [ 2 ] == ' ' if not include_staged and line [ 1 ] == ' ' : continue files . append ( line [ 3 : ] ) return files | Returns a list of the files that changed in the Git repository . This is used to check if the files that are supposed to be upgraded have changed . If so the upgrade will be prevented . |
54,614 | def _parse_doc ( docs ) : name = "(?:[a-zA-Z][a-zA-Z0-9-_]*)" re_var = re . compile ( r"^ *(%s)(?: */(%s))? *:(.*)$" % ( name , name ) ) re_opt = re . compile ( r"^ *(?:(-[a-zA-Z0-9]),? +)?--(%s)(?: *=(%s))? *:(.*)$" % ( name , name ) ) shorts , metavars , helps , description , epilog = { } , { } , { } , "" , "" if docs : for line in docs . split ( "\n" ) : line = line . strip ( ) if line . startswith ( ':param' ) : line = line [ len ( ':param' ) : ] if line . startswith ( ':rtype:' ) : continue if line . strip ( ) == "----" : break m = re_var . match ( line ) if m : if epilog : helps [ prev ] += epilog . strip ( ) epilog = "" if m . group ( 2 ) : shorts [ m . group ( 1 ) ] = m . group ( 2 ) helps [ m . group ( 1 ) ] = m . group ( 3 ) . strip ( ) prev = m . group ( 1 ) previndent = len ( line ) - len ( line . lstrip ( ) ) continue m = re_opt . match ( line ) if m : if epilog : helps [ prev ] += epilog . strip ( ) epilog = "" name = m . group ( 2 ) . replace ( "-" , "_" ) helps [ name ] = m . group ( 4 ) prev = name if m . group ( 1 ) : shorts [ name ] = m . group ( 1 ) if m . group ( 3 ) : metavars [ name ] = m . group ( 3 ) previndent = len ( line ) - len ( line . lstrip ( ) ) continue if helps : if line . startswith ( " " * ( previndent + 1 ) ) : helps [ prev ] += "\n" + line . strip ( ) else : epilog += "\n" + line . strip ( ) else : description += "\n" + line . strip ( ) if line . strip ( ) : previndent = len ( line ) - len ( line . lstrip ( ) ) return shorts , metavars , helps , description , epilog | Converts a well - formed docstring into documentation to be fed into argparse . |
54,615 | def quote ( text ) : 'Handle quote characters' if not isinstance ( text , unicode ) : text = text . decode ( 'utf-8' ) for qp in QUOTEPAIRS : if text [ 0 ] == qp [ 0 ] and text [ - 1 ] == qp [ - 1 ] and len ( text ) >= 2 : return text for qp in QUOTEPAIRS : if qp [ 1 ] not in text : return qp [ 0 ] + text + qp [ 1 ] raise ValueError ( u'The value "%s" is not quoted and contains too many quote characters to quote' % text ) | Handle quote characters |
54,616 | def reltags ( self , src , cache = None ) : if not self . _tag_assocs : return set ( ) if cache == None : cache = { } q = _otq ( ) q . append ( src ) updateq = _otq ( ) while q : i = q . popleft ( ) if i in cache : continue cache [ i ] = set ( ) for ( s , t ) in self . transitions_to ( i ) : q . append ( s ) if self . is_tagged ( t , s , i ) : cache [ i ] . add ( ( self . tag ( t , s , i ) , s , i ) ) updateq . appendleft ( ( i , s ) ) while updateq : i = updateq . popleft ( ) cache [ i [ 0 ] ] . update ( cache [ i [ 1 ] ] ) return cache [ src ] | returns all the tags that are relevant at this state cache should be a dictionary and it is updated by the function |
54,617 | def make_log_metric ( level = logging . INFO , msg = "%d items in %.2f seconds" ) : def log_metric ( name , count , elapsed ) : log_name = 'instrument.{}' . format ( name ) if name else 'instrument' logging . getLogger ( log_name ) . log ( level , msg , count , elapsed ) return log_metric | Make a new metric function that logs at the given level |
54,618 | def DBObject ( table_name , versioning = VersioningTypes . NONE ) : def wrapped ( cls ) : field_names = set ( ) all_fields = [ ] for name in dir ( cls ) : fld = getattr ( cls , name ) if fld and isinstance ( fld , Field ) : fld . name = name all_fields . append ( fld ) field_names . add ( name ) def add_missing_field ( name , default = '' , insert_pos = None ) : if name not in field_names : fld = Field ( default = default ) fld . name = name all_fields . insert ( len ( all_fields ) if insert_pos is None else insert_pos , fld ) add_missing_field ( 'id' , insert_pos = 0 ) add_missing_field ( '_create_date' ) add_missing_field ( '_last_update' ) if versioning == VersioningTypes . DELTA_HISTORY : add_missing_field ( '_version_hist' , default = list ) cls . __table_name__ = table_name cls . __versioning__ = versioning cls . __fields__ = all_fields if not ctor_overridable ( cls ) : raise TypeError ( 'Classes with user-supplied __init__ should not be decorated ' 'with DBObject. Use the setup method' ) cls . __init__ = _auto_init cls . get_table_name = classmethod ( _get_table_name ) cls . get_id = _get_id cls . set_id = _set_id cls . to_data = _to_data cls . from_data = classmethod ( _from_data ) cls . index_names = classmethod ( _index_names ) cls . indexes = _indexes cls . get_version_hist = _get_version_hist Storable . register ( cls ) cls = DatabaseEnabled ( cls ) if versioning == VersioningTypes . DELTA_HISTORY : cls . save = _delta_save ( cls . save ) return cls return wrapped | Classes annotated with DBObject gain persistence methods . |
54,619 | def train ( self , data , target , ** kwargs ) : non_predictors = [ i . replace ( " " , "_" ) . lower ( ) for i in list ( set ( data [ 'team' ] ) ) ] + [ "team" , "next_year_wins" ] self . column_names = [ l for l in list ( data . columns ) if l not in non_predictors ] results , folds = self . cross_validate ( data , non_predictors , ** kwargs ) self . gather_results ( results , folds , data ) | Used in the training phase . Override . |
54,620 | def as_command ( self ) : try : params = self . unbound_func . __click_params__ params . reverse ( ) del self . unbound_func . __click_params__ except AttributeError : params = [ ] help = inspect . getdoc ( self . real_func ) if isinstance ( help , bytes ) : help = help . decode ( 'utf-8' ) self . options . setdefault ( 'help' , help ) @ pass_script_info_decorator def callback ( info , * args , ** kwargs ) : if self . with_reloader : app = info . load_app ( ) if app . debug : def inner ( ) : return self . command_callback ( info , * args , ** kwargs ) run_with_reloader ( inner , extra_files = get_reloader_extra_files ( ) ) return self . command_callback ( info , * args , ** kwargs ) return self . cls ( name = self . name , callback = callback , params = params , ** self . options ) | Creates the click command wrapping the function |
54,621 | def main ( command_line_arguments = None ) : args = command_line_options ( command_line_arguments ) cmap = mpl . cm . get_cmap ( name = 'hsv' ) count = len ( args . files ) + ( len ( args . baselines ) if args . baselines else 0 ) colors = [ cmap ( i ) for i in numpy . linspace ( 0 , 1.0 , count + 1 ) ] logger . info ( "Loading %d score files" % len ( args . files ) ) scores = [ read_score_file ( os . path . join ( args . directory , f ) ) for f in args . files ] false_alarms = [ ] detection_rate = [ ] logger . info ( "Computing FROC curves" ) for score in scores : tmin = min ( score [ 2 ] ) tmax = max ( score [ 2 ] ) count = 100 thresholds = [ tmin + float ( x ) / count * ( tmax - tmin ) for x in range ( count + 2 ) ] false_alarms . append ( [ ] ) detection_rate . append ( [ ] ) for threshold in thresholds : detection_rate [ - 1 ] . append ( numpy . count_nonzero ( numpy . array ( score [ 1 ] ) >= threshold ) / float ( score [ 0 ] ) ) false_alarms [ - 1 ] . append ( numpy . count_nonzero ( numpy . array ( score [ 2 ] ) >= threshold ) ) if args . baselines is not None : for baseline in args . baselines : dr = [ ] fa = [ ] with open ( os . path . join ( args . baseline_directory , baseline ) ) as f : for line in f : splits = line . rstrip ( ) . split ( ) dr . append ( float ( splits [ 0 ] ) ) fa . append ( int ( splits [ 1 ] ) ) false_alarms . append ( fa ) detection_rate . append ( dr ) logger . info ( "Plotting FROC curves to file '%s'" , args . output ) pdf = PdfPages ( args . output ) figure = _plot_froc ( false_alarms , detection_rate , colors , args . legends , args . title , args . max ) mpl . xlabel ( 'False Alarm (of %d pruned)' % len ( scores [ 0 ] [ 2 ] ) ) mpl . ylabel ( 'Detection Rate in \%% (total %d faces)' % scores [ 0 ] [ 0 ] ) pdf . savefig ( figure ) pdf . close ( ) if args . count_detections : for i , f in enumerate ( args . files ) : det , all = count_detections ( f ) print ( "The number of detected faces for %s is %d out of %d" % ( args . legends [ i ] , det , all ) ) | Reads score files computes error measures and plots curves . |
54,622 | def get_resources ( minify = False ) : all_resources = dict ( ) subclasses = resource_base . ResourceBase . __subclasses__ ( ) + resource_definitions . ResourceAngular . __subclasses__ ( ) for resource in subclasses : obj = resource ( minify ) all_resources [ resource . RESOURCE_NAME ] = dict ( css = tuple ( obj . resources_css ) , js = tuple ( obj . resources_js ) ) return all_resources | Find all resources which subclass ResourceBase . |
54,623 | def search ( self , dsl , params ) : query_parameters = [ ] for key , value in params : query_parameters . append ( self . CITEDBY_THRIFT . kwargs ( str ( key ) , str ( value ) ) ) try : result = self . client . search ( dsl , query_parameters ) except self . CITEDBY_THRIFT . ServerError : raise ServerError ( 'you may trying to run a bad DSL Query' ) try : return json . loads ( result ) except : return None | Free queries to ES index . |
54,624 | def raise_error ( error ) : exc_type = error . get ( 'exc_type' ) if exc_type and exc_type . startswith ( ERROR_PREFIX ) : exc_type = exc_type [ len ( ERROR_PREFIX ) : ] exc_cls = ERROR_TYPE_MAP . get ( exc_type , DashiError ) else : exc_cls = DashiError raise exc_cls ( ** error ) | Intakes a dict of remote error information and raises a DashiError |
54,625 | def fire ( self , name , operation , args = None , ** kwargs ) : if args : if kwargs : raise TypeError ( "specify args dict or keyword arguments, not both" ) else : args = kwargs d = dict ( op = operation , args = args ) headers = { 'sender' : self . add_sysname ( self . name ) } dest = self . add_sysname ( name ) def _fire ( channel ) : with Producer ( channel ) as producer : producer . publish ( d , routing_key = dest , headers = headers , serializer = self . _serializer , exchange = self . _exchange , declare = [ self . _exchange ] ) log . debug ( "sending message to %s" , dest ) with connections [ self . _pool_conn ] . acquire ( block = True ) as conn : _ , channel = self . ensure ( conn , _fire ) conn . maybe_close_channel ( channel ) | Send a message without waiting for a reply |
54,626 | def call ( self , name , operation , timeout = 10 , args = None , ** kwargs ) : if args : if kwargs : raise TypeError ( "specify args dict or keyword arguments, not both" ) else : args = kwargs msg_id = uuid . uuid4 ( ) . hex queue_arguments = { 'x-expires' : int ( ( timeout + 1 ) * 1000 ) } queue = Queue ( name = msg_id , exchange = self . _exchange , routing_key = msg_id , durable = False , queue_arguments = queue_arguments ) messages = [ ] event = threading . Event ( ) def _callback ( body , message ) : messages . append ( body ) message . ack ( ) event . set ( ) d = dict ( op = operation , args = args ) headers = { 'reply-to' : msg_id , 'sender' : self . add_sysname ( self . name ) } dest = self . add_sysname ( name ) def _declare_and_send ( channel ) : consumer = Consumer ( channel , ( queue , ) , callbacks = ( _callback , ) ) with Producer ( channel ) as producer : producer . publish ( d , routing_key = dest , headers = headers , exchange = self . _exchange , serializer = self . _serializer ) return consumer log . debug ( "sending call to %s:%s" , dest , operation ) with connections [ self . _pool_conn ] . acquire ( block = True ) as conn : consumer , channel = self . ensure ( conn , _declare_and_send ) try : self . _consume ( conn , consumer , timeout = timeout , until_event = event ) try : queue = queue . bind ( channel ) queue . delete ( nowait = True ) except Exception : log . exception ( "error deleting queue" ) finally : conn . maybe_close_channel ( channel ) msg_body = messages [ 0 ] if msg_body . get ( 'error' ) : raise_error ( msg_body [ 'error' ] ) else : return msg_body . get ( 'result' ) | Send a message and wait for reply |
54,627 | def handle ( self , operation , operation_name = None , sender_kwarg = None ) : if not self . _consumer : self . _consumer = DashiConsumer ( self , self . _conn , self . _name , self . _exchange , sysname = self . _sysname ) self . _consumer . add_op ( operation_name or operation . __name__ , operation , sender_kwarg = sender_kwarg ) | Handle an operation using the specified function |
54,628 | def link_exceptions ( self , custom_exception = None , dashi_exception = None ) : if custom_exception is None : raise ValueError ( "custom_exception must be set" ) if dashi_exception is None : raise ValueError ( "dashi_exception must be set" ) self . _linked_exceptions [ custom_exception ] = dashi_exception | Link a custom exception thrown on the receiver to a dashi exception |
54,629 | def ensure ( self , connection , func , * args , ** kwargs ) : channel = None while 1 : try : if channel is None : channel = connection . channel ( ) return func ( channel , * args , ** kwargs ) , channel except ( connection . connection_errors , IOError ) : self . _call_errback ( ) channel = self . connect ( connection ) | Perform an operation until success |
54,630 | def re_tab ( s ) : l = [ ] p = 0 for i in range ( 8 , len ( s ) , 8 ) : if s [ i - 2 : i ] == " " : l . append ( s [ p : i ] . rstrip ( ) + "\t" ) p = i if p == 0 : return s else : l . append ( s [ p : ] ) return "" . join ( l ) | Return a tabbed string from an expanded one . |
54,631 | def read_next_line ( self ) : next_line = self . file . readline ( ) if not next_line or next_line [ - 1 : ] != '\n' : self . file = None else : next_line = next_line [ : - 1 ] expanded = next_line . expandtabs ( ) edit = urwid . Edit ( "" , expanded , allow_tab = True ) edit . set_edit_pos ( 0 ) edit . original_text = next_line self . lines . append ( edit ) return next_line | Read another line from the file . |
54,632 | def _get_at_pos ( self , pos ) : if pos < 0 : return None , None if len ( self . lines ) > pos : return self . lines [ pos ] , pos if self . file is None : return None , None assert pos == len ( self . lines ) , "out of order request?" self . read_next_line ( ) return self . lines [ - 1 ] , pos | Return a widget for the line number passed . |
54,633 | def split_focus ( self ) : focus = self . lines [ self . focus ] pos = focus . edit_pos edit = urwid . Edit ( "" , focus . edit_text [ pos : ] , allow_tab = True ) edit . original_text = "" focus . set_edit_text ( focus . edit_text [ : pos ] ) edit . set_edit_pos ( 0 ) self . lines . insert ( self . focus + 1 , edit ) | Divide the focus edit widget at the cursor location . |
54,634 | def combine_focus_with_prev ( self ) : above , ignore = self . get_prev ( self . focus ) if above is None : return focus = self . lines [ self . focus ] above . set_edit_pos ( len ( above . edit_text ) ) above . set_edit_text ( above . edit_text + focus . edit_text ) del self . lines [ self . focus ] self . focus -= 1 | Combine the focus edit widget with the one above . |
54,635 | def combine_focus_with_next ( self ) : below , ignore = self . get_next ( self . focus ) if below is None : return focus = self . lines [ self . focus ] focus . set_edit_text ( focus . edit_text + below . edit_text ) del self . lines [ self . focus + 1 ] | Combine the focus edit widget with the one below . |
54,636 | def handle_keypress ( self , k ) : if k == "esc" : self . save_file ( ) raise urwid . ExitMainLoop ( ) elif k == "delete" : self . walker . combine_focus_with_next ( ) elif k == "backspace" : self . walker . combine_focus_with_prev ( ) elif k == "enter" : self . walker . split_focus ( ) self . view . keypress ( size , "down" ) self . view . keypress ( size , "home" ) | Last resort for keypresses . |
54,637 | def save_file ( self ) : l = [ ] walk = self . walker for edit in walk . lines : if edit . original_text . expandtabs ( ) == edit . edit_text : l . append ( edit . original_text ) else : l . append ( re_tab ( edit . edit_text ) ) while walk . file is not None : l . append ( walk . read_next_line ( ) ) outfile = open ( self . save_name , "w" ) l_iter = iter ( l ) line = next ( l_iter ) prefix = "" while True : try : outfile . write ( prefix + line ) prefix = "\n" line = next ( l_iter ) except StopIteration : if line != "\n" : outfile . write ( "\n" ) break | Write the file out to disk . |
54,638 | def _media ( self ) : css = [ 'markymark/css/markdown-editor.css' ] iconlibrary_css = getattr ( settings , 'MARKYMARK_FONTAWESOME_CSS' , 'markymark/fontawesome/fontawesome.min.css' ) if iconlibrary_css : css . append ( iconlibrary_css ) media = forms . Media ( css = { 'all' : css } , js = ( 'markymark/js/markdown-editor.js' , ) ) renderer = initialize_renderer ( ) for extension in renderer . registeredExtensions : if hasattr ( extension , 'media' ) : media += extension . media return media | Returns a forms . Media instance with the basic editor media and media from all registered extensions . |
54,639 | def getsuffix ( subject ) : index = subject . rfind ( '.' ) if index > subject . replace ( '\\' , '/' ) . rfind ( '/' ) : return subject [ index + 1 : ] return None | Returns the suffix of a filename . If the file has no suffix returns None . Can return an empty string if the filenam ends with a period . |
54,640 | def init_app ( self , app ) : app . config . setdefault ( 'STATICS_MINIFY' , False ) self . all_resources = ALL_RESOURCES_MINIFIED if app . config . get ( 'STATICS_MINIFY' ) else ALL_RESOURCES self . all_variables = ALL_VARIABLES if not hasattr ( app , 'extensions' ) : app . extensions = dict ( ) if 'statics' in app . extensions : raise ValueError ( 'Already registered extension STATICS.' ) app . extensions [ 'statics' ] = _StaticsState ( self , app ) name = 'flask_statics_helper' static_url_path = '{0}/{1}' . format ( app . static_url_path , name ) self . blueprint = Blueprint ( name , __name__ , template_folder = 'templates' , static_folder = 'static' , static_url_path = static_url_path ) self . blueprint . add_app_template_global ( self . all_variables , '_flask_statics_helper_all_variables' ) self . blueprint . add_app_template_global ( self . all_resources , '_flask_statics_helper_all_resources' ) app . register_blueprint ( self . blueprint ) | Initialize the extension . |
54,641 | def measure_board_rms ( control_board , n_samples = 10 , sampling_ms = 10 , delay_between_samples_ms = 0 ) : try : results = control_board . measure_impedance ( n_samples , sampling_ms , delay_between_samples_ms , True , True , [ ] ) except RuntimeError : logger . warning ( 'Error encountered during high-voltage RMS ' 'measurement.' , exc_info = True ) data = pd . DataFrame ( None , columns = [ 'board measured V' , 'divider resistor index' ] ) else : data = pd . DataFrame ( { 'board measured V' : results . V_hv } ) data [ 'divider resistor index' ] = results . hv_resistor return data | Read RMS voltage samples from control board high - voltage feedback circuit . |
54,642 | def find_good ( control_board , actuation_steps , resistor_index , start_index , end_index ) : lower = start_index upper = end_index while lower < upper - 1 : index = lower + ( upper - lower ) / 2 v = actuation_steps [ index ] control_board . set_waveform_voltage ( v ) data = measure_board_rms ( control_board ) valid_data = data [ data [ 'divider resistor index' ] >= 0 ] if ( valid_data [ 'divider resistor index' ] < resistor_index ) . sum ( ) : upper = index else : lower = index control_board . set_waveform_voltage ( actuation_steps [ lower ] ) data = measure_board_rms ( control_board ) return lower , data | Use a binary search over the range of provided actuation_steps to find the maximum actuation voltage that is measured by the board feedback circuit using the specified feedback resistor . |
54,643 | def resistor_max_actuation_readings ( control_board , frequencies , oscope_reading_func ) : control_board . set_waveform_voltage ( 0 ) control_board . auto_adjust_amplifier_gain = False control_board . amplifier_gain = 1. target_voltage = 0.1 control_board . set_waveform_voltage ( target_voltage ) oscope_rms = oscope_reading_func ( ) estimated_amplifier_gain = oscope_rms / target_voltage max_post_gain_V = 0.8 * control_board . max_waveform_voltage max_actuation_V = max_post_gain_V / estimated_amplifier_gain actuation_steps = np . linspace ( 0.005 , max_actuation_V , num = 50 ) resistor_count = len ( control_board . a0_series_resistance ) conditions = pd . DataFrame ( [ [ r , f ] for r in range ( resistor_count - 1 , - 1 , - 1 ) for f in frequencies ] , columns = [ 'resistor index' , 'frequency' ] ) def max_actuation_reading ( x ) : r = x [ 'resistor index' ] . values [ 0 ] f = x [ 'frequency' ] . values [ 0 ] control_board . set_waveform_frequency ( f ) actuation_index , data = find_good ( control_board , actuation_steps , r , 0 , len ( actuation_steps ) - 1 ) board_measured_rms = data . loc [ data [ 'divider resistor index' ] >= 0 , 'board measured V' ] . mean ( ) oscope_rms = oscope_reading_func ( ) print 'R=%s, f=%s' % ( r , f ) return pd . DataFrame ( [ [ r , f , actuation_index , board_measured_rms , oscope_rms ] ] , columns = [ 'resistor index' , 'frequency' , 'actuation index' , 'board measured V' , 'oscope measured V' ] ) return ( conditions . groupby ( [ 'resistor index' , 'frequency' ] ) . apply ( max_actuation_reading ) . reset_index ( drop = True ) ) | For each resistor in the high - voltage feedback resistor bank read the board measured voltage and the oscilloscope measured voltage for an actuation voltage that nearly saturates the feedback resistor . |
54,644 | def fit_feedback_params ( calibration , max_resistor_readings ) : R1 = 10e6 def fit_resistor_params ( x ) : resistor_index = x [ 'resistor index' ] . values [ 0 ] p0 = [ calibration . R_hv [ resistor_index ] , calibration . C_hv [ resistor_index ] ] def error ( p , df , R1 ) : v1 = compute_from_transfer_function ( calibration . hw_version . major , 'V1' , V2 = df [ 'board measured V' ] , R1 = R1 , R2 = p [ 0 ] , C2 = p [ 1 ] , f = df [ 'frequency' ] . values ) e = df [ 'oscope measured V' ] - v1 return e p1 , success = optimize . leastsq ( error , p0 , args = ( x , R1 ) ) p1 = np . abs ( p1 ) return pd . DataFrame ( [ p0 + p1 . tolist ( ) ] , columns = [ 'original R' , 'original C' , 'fitted R' , 'fitted C' ] ) . T results = ( max_resistor_readings [ max_resistor_readings [ 'resistor index' ] >= 0 ] . groupby ( [ 'resistor index' ] ) . apply ( fit_resistor_params ) ) data = results . unstack ( ) data . columns = data . columns . droplevel ( ) return data | Fit model of control board high - voltage feedback resistor and parasitic capacitance values based on measured voltage readings . |
54,645 | def update_control_board_calibration ( control_board , fitted_params ) : control_board . a0_series_resistance = fitted_params [ 'fitted R' ] . values control_board . a0_series_capacitance = fitted_params [ 'fitted C' ] . values | Update the control board with the specified fitted parameters . |
54,646 | def load ( self ) : data = self . dict_class ( ) for path in self . paths : if path in self . paths_loaded : continue try : with open ( path , 'r' ) as file : path_data = yaml . load ( file . read ( ) ) data = dict_merge ( data , path_data ) self . paths_loaded . add ( path ) except IOError : if not path . endswith ( '.local.yml' ) : print 'CONFIG NOT FOUND: %s' % ( path ) self . data = data | Load each path in order . Remember paths already loaded and only load new ones . |
54,647 | def _initialize ( self , settings_module ) : self . settings_list = [ ] for setting in dir ( global_settings ) : if setting == setting . upper ( ) : setattr ( self , setting , getattr ( global_settings , setting ) ) self . settings_list . append ( setting ) if settings_module is not None : self . SETTINGS_MODULE = settings_module try : mod = import_module ( self . SETTINGS_MODULE ) except ImportError : error_message = "Could not import settings at {0}" . format ( self . SETTINGS_MODULE ) log . exception ( error_message ) raise ImportError ( error_message ) for setting in dir ( mod ) : if setting == setting . upper ( ) : if setting == "INSTALLED_APPS" : self . INSTALLED_APPS += getattr ( mod , setting ) else : setattr ( self , setting , getattr ( mod , setting ) ) self . settings_list . append ( setting ) if hasattr ( self , "PATH_SETTINGS" ) : for path in self . PATH_SETTINGS : sys . path . extend ( getattr ( self , path ) ) self . settings_list = list ( set ( self . settings_list ) ) | Initialize the settings from a given settings_module settings_module - path to settings module |
54,648 | def _setup ( self ) : settings_module = None try : settings_module = os . environ [ global_settings . MODULE_VARIABLE ] except KeyError : error_message = "Settings not properly configured. Cannot find the environment variable {0}" . format ( global_settings . MODULE_VARIABLE ) log . exception ( error_message ) self . _initialize ( settings_module ) self . _configure_logging ( ) | Perform initial setup of the settings class such as getting the settings module and setting the settings |
54,649 | def _configure_logging ( self ) : if not self . LOGGING_CONFIG : dictConfig ( self . DEFAULT_LOGGING ) else : dictConfig ( self . LOGGING_CONFIG ) | Setting up logging from logging config in settings |
54,650 | def ensure_context ( ** vars ) : ctx = _context_stack . top stacked = False if not ctx : ctx = Context ( ) stacked = True _context_stack . push ( ctx ) ctx . update ( vars ) try : yield ctx finally : if stacked : _context_stack . pop ( ) | Ensures that a context is in the stack creates one otherwise . |
54,651 | def request_context ( app , request ) : vars = { } if request . view_args is not None : vars . update ( request . view_args ) vars . update ( { "request" : request , "GET" : AttrDict ( request . args . to_dict ( ) ) , "POST" : AttrDict ( request . form . to_dict ( ) ) , "app" : app , "config" : app . config , "session" : session , "g" : g , "now" : datetime . datetime . now , "utcnow" : datetime . datetime . utcnow , "today" : datetime . date . today } ) context = Context ( vars ) context . vars [ "current_context" ] = context return context | Creates a Context instance from the given request object |
54,652 | def clone ( self , ** override_vars ) : c = Context ( self . vars , self . data ) c . executed_actions = set ( self . executed_actions ) c . vars . update ( override_vars ) return c | Creates a copy of this context |
54,653 | def mpl_get_cb_bound_below_plot ( ax ) : position = ax . get_position ( ) figW , figH = ax . get_figure ( ) . get_size_inches ( ) fig_aspect = figH / figW box_aspect = ax . get_data_ratio ( ) pb = position . frozen ( ) pb1 = pb . shrunk_to_aspect ( box_aspect , pb , fig_aspect ) . bounds ax_size = ax . get_position ( ) . bounds sizes = [ ax_size [ 0 ] , ax_size [ 1 ] - 0.14 , pb1 [ 2 ] , 0.03 ] return sizes | Return the coordinates for a colorbar axes below the provided axes object . Take into account the changes of the axes due to aspect ratio settings . |
54,654 | def main ( ) : table = docraptor = DocRaptor ( ) print ( "Create test_basic.xls" ) with open ( "test_basic.xls" , "wb" ) as pdf_file : pdf_file . write ( docraptor . create ( { "document_content" : table , "document_type" : "xls" , "test" : True } ) . content ) | Generate an XLS with specified content . |
54,655 | def restore_gc_state ( ) : old_isenabled = gc . isenabled ( ) old_flags = gc . get_debug ( ) try : yield finally : gc . set_debug ( old_flags ) ( gc . enable if old_isenabled else gc . disable ) ( ) | Restore the garbage collector state on leaving the with block . |
54,656 | def add_view_file_mapping ( self , pattern , cls ) : if isinstance ( pattern , str ) : if not pattern . endswith ( "*" ) : _ , ext = os . path . splitext ( pattern ) self . allowed_extensions . add ( ext ) pattern = re . compile ( "^" + re . escape ( pattern ) . replace ( "\\*" , ".+" ) + "$" , re . I ) self . view_class_files_map . append ( ( pattern , cls ) ) | Adds a mapping between a file and a view class . Pattern can be an extension in the form . EXT or a filename . |
54,657 | def get_file_view_cls ( self , filename ) : if filename is None : return self . default_view_class for pattern , cls in self . view_class_files_map : if pattern . match ( filename ) : return cls return self . default_view_class | Returns the view class associated to a filename |
54,658 | def children ( self , vertex ) : return [ self . head ( edge ) for edge in self . out_edges ( vertex ) ] | Return the list of immediate children of the given vertex . |
54,659 | def parents ( self , vertex ) : return [ self . tail ( edge ) for edge in self . in_edges ( vertex ) ] | Return the list of immediate parents of this vertex . |
54,660 | def descendants ( self , start , generations = None ) : visited = self . vertex_set ( ) visited . add ( start ) to_visit = deque ( [ ( start , 0 ) ] ) while to_visit : vertex , depth = to_visit . popleft ( ) if depth == generations : continue for child in self . children ( vertex ) : if child not in visited : visited . add ( child ) to_visit . append ( ( child , depth + 1 ) ) return self . full_subgraph ( visited ) | Return the subgraph of all nodes reachable from the given start vertex including that vertex . |
54,661 | def ancestors ( self , start , generations = None ) : visited = self . vertex_set ( ) visited . add ( start ) to_visit = deque ( [ ( start , 0 ) ] ) while to_visit : vertex , depth = to_visit . popleft ( ) if depth == generations : continue for parent in self . parents ( vertex ) : if parent not in visited : visited . add ( parent ) to_visit . append ( ( parent , depth + 1 ) ) return self . full_subgraph ( visited ) | Return the subgraph of all nodes from which the given vertex is reachable including that vertex . |
54,662 | def _component_graph ( self ) : sccs = [ ] stack = [ ] boundaries = [ ] identified = self . vertex_set ( ) index = self . vertex_dict ( ) to_do = [ ] def visit_vertex ( v ) : index [ v ] = len ( stack ) stack . append ( ( 'VERTEX' , v ) ) boundaries . append ( index [ v ] ) to_do . append ( ( leave_vertex , v ) ) to_do . extend ( ( visit_edge , w ) for w in self . children ( v ) ) def visit_edge ( v ) : if v in identified : stack . append ( ( 'EDGE' , v ) ) elif v in index : while index [ v ] < boundaries [ - 1 ] : boundaries . pop ( ) else : to_do . append ( ( visit_vertex , v ) ) def leave_vertex ( v ) : if boundaries [ - 1 ] == index [ v ] : root = boundaries . pop ( ) scc = stack [ root : ] del stack [ root : ] for item_type , w in scc : if item_type == 'VERTEX' : identified . add ( w ) del index [ w ] sccs . append ( scc ) stack . append ( ( 'EDGE' , v ) ) for v in self . vertices : if v not in identified : to_do . append ( ( visit_vertex , v ) ) while to_do : operation , v = to_do . pop ( ) operation ( v ) stack . pop ( ) return sccs | Compute the graph of strongly connected components . |
54,663 | def source_components ( self ) : raw_sccs = self . _component_graph ( ) vertex_to_root = self . vertex_dict ( ) non_sources = self . vertex_set ( ) for scc in raw_sccs : root = scc [ 0 ] [ 1 ] for item_type , w in scc : if item_type == 'VERTEX' : vertex_to_root [ w ] = root elif item_type == 'EDGE' : non_sources . add ( vertex_to_root [ w ] ) sccs = [ ] for raw_scc in raw_sccs : root = raw_scc [ 0 ] [ 1 ] if root not in non_sources : sccs . append ( [ v for vtype , v in raw_scc if vtype == 'VERTEX' ] ) return [ self . full_subgraph ( scc ) for scc in sccs ] | Return the strongly connected components not reachable from any other component . Any component in the graph is reachable from one of these . |
54,664 | def strongly_connected_components ( self ) : raw_sccs = self . _component_graph ( ) sccs = [ ] for raw_scc in raw_sccs : sccs . append ( [ v for vtype , v in raw_scc if vtype == 'VERTEX' ] ) return [ self . full_subgraph ( scc ) for scc in sccs ] | Return list of strongly connected components of this graph . |
54,665 | def signature ( self ) : hashable_fields = [ 'MERCHANT' , 'ORDER_REF' , 'ORDER_DATE' , 'ORDER_SHIPPING' , 'PRICES_CURRENCY' , 'DISCOUNT' , 'DESTINATION_CITY' , 'DESTINATION_STATE' , 'DESTINATION_COUNTRY' , 'PAY_METHOD' , 'SELECTED_INSTALLMENTS_NO' , 'TESTORDER' ] result = text_type ( ) suffix = text_type ( ) for field in self : if field . name == 'ORDER_HASH' : continue field_value = field . value ( ) if field . name in hashable_fields and field_value : encoded_value = text_type ( '{length}{value}' ) . format ( length = len ( text_type ( field_value ) . encode ( 'utf-8' ) ) , value = field_value ) if field . name == 'TESTORDER' or field . name == 'SELECTED_INSTALLMENTS_NO' : suffix += encoded_value else : result += encoded_value if field . name == 'ORDER' : for detail in PAYU_ORDER_DETAILS : if any ( [ detail in order and order [ detail ] for order in field_value ] ) : for order in field_value : value = order . get ( detail , '' ) item = text_type ( '{length}{value}' ) . format ( length = len ( text_type ( value ) . encode ( 'utf-8' ) ) , value = value ) if detail == 'PRICE_TYPE' : suffix += item else : result += item result += suffix result = result . encode ( 'utf-8' ) return hmac . new ( PAYU_MERCHANT_KEY , result ) . hexdigest ( ) | Compute the ORDER_HASH of the request . |
54,666 | def _prepare_orders ( self , orders ) : for detail in PAYU_ORDER_DETAILS : if not any ( [ detail in order for order in orders ] ) : for order in orders : order [ detail ] = PAYU_ORDER_DETAILS_DEFAULTS . get ( detail , None ) return orders | Each order needs to have all it s details filled with default value or None in case those are not already filled . |
54,667 | def staticfiles_url_fetcher ( url ) : if url . startswith ( '/' ) : base_url = staticfiles_storage . base_url filename = url . replace ( base_url , '' , 1 ) path = finders . find ( filename ) if path : with open ( path , 'rb' ) as f : data = f . read ( ) else : f = staticfiles_storage . open ( filename ) data = f . read ( ) f . close ( ) return { 'string' : data , 'mime_type' : mimetypes . guess_type ( url ) [ 0 ] , } else : return default_url_fetcher ( url ) | Returns the file matching url . |
54,668 | def render_pdf ( template , file_ , url_fetcher = staticfiles_url_fetcher , context = None , ) : context = context or { } html = get_template ( template ) . render ( context ) HTML ( string = html , base_url = 'not-used://' , url_fetcher = url_fetcher , ) . write_pdf ( target = file_ , ) | Writes the PDF data into file_ . Note that file_ can actually be a Django Response object as well . |
54,669 | def encode_bytes ( src_buf , dst_file ) : if not isinstance ( src_buf , bytes ) : raise TypeError ( 'src_buf must by bytes.' ) len_src_buf = len ( src_buf ) assert 0 <= len_src_buf <= 2 ** 16 - 1 num_written_bytes = len_src_buf + 2 len_buf = FIELD_U16 . pack ( len_src_buf ) dst_file . write ( len_buf ) dst_file . write ( src_buf ) return num_written_bytes | Encode a buffer length followed by the bytes of the buffer itself . |
54,670 | def decode_bytes ( f ) : buf = f . read ( FIELD_U16 . size ) if len ( buf ) < FIELD_U16 . size : raise UnderflowDecodeError ( ) ( num_bytes , ) = FIELD_U16 . unpack_from ( buf ) num_bytes_consumed = FIELD_U16 . size + num_bytes buf = f . read ( num_bytes ) if len ( buf ) < num_bytes : raise UnderflowDecodeError ( ) return num_bytes_consumed , buf | Decode a buffer length from a 2 - byte unsigned int then read the subsequent bytes . |
54,671 | def encode_utf8 ( s , f ) : encode = codecs . getencoder ( 'utf8' ) encoded_str_bytes , num_encoded_chars = encode ( s ) num_encoded_str_bytes = len ( encoded_str_bytes ) assert 0 <= num_encoded_str_bytes <= 2 ** 16 - 1 num_encoded_bytes = num_encoded_str_bytes + 2 f . write ( FIELD_U8 . pack ( ( num_encoded_str_bytes & 0xff00 ) >> 8 ) ) f . write ( FIELD_U8 . pack ( num_encoded_str_bytes & 0x00ff ) ) f . write ( encoded_str_bytes ) return num_encoded_bytes | UTF - 8 encodes string s to file - like object f according to the MQTT Version 3 . 1 . 1 specification in section 1 . 5 . 3 . |
54,672 | def encode_varint ( v , f ) : assert v >= 0 num_bytes = 0 while True : b = v % 0x80 v = v // 0x80 if v > 0 : b = b | 0x80 f . write ( FIELD_U8 . pack ( b ) ) num_bytes += 1 if v == 0 : break return num_bytes | Encode integer v to file f . |
54,673 | def unpack ( self , struct ) : v = struct . unpack ( self . read ( struct . size ) ) return v | Read as many bytes as are required to extract struct then unpack and return a tuple of the values . |
54,674 | def unpack_bytes ( self ) : num_bytes_consumed , b = decode_bytes ( self . __f ) self . __num_bytes_consumed += num_bytes_consumed return num_bytes_consumed , b | Unpack a utf - 8 string encoded as described in MQTT Version 3 . 1 . 1 section 1 . 5 . 3 line 177 . This is a 16 - bit unsigned length followed by a utf - 8 encoded string . |
54,675 | def read ( self , num_bytes ) : buf = self . __f . read ( num_bytes ) assert len ( buf ) <= num_bytes if len ( buf ) < num_bytes : raise UnderflowDecodeError ( ) self . __num_bytes_consumed += num_bytes return buf | Read num_bytes and return them . |
54,676 | def timeout ( self , value ) : if not self . params : self . params = dict ( timeout = value ) return self self . params [ 'timeout' ] = value return self | Specifies a timeout on the search query |
54,677 | def filtered ( self , efilter ) : if not self . params : self . params = { 'filter' : efilter } return self if not self . params . has_key ( 'filter' ) : self . params [ 'filter' ] = efilter return self self . params [ 'filter' ] . update ( efilter ) return self | Applies a filter to the search |
54,678 | def size ( self , value ) : if not self . params : self . params = dict ( size = value ) return self self . params [ 'size' ] = value return self | The number of hits to return . Defaults to 10 |
54,679 | def from_offset ( self , value ) : if not self . params : self . params = dict ( { 'from' : value } ) return self self . params [ 'from' ] = value return self | The starting from index of the hits to return . Defaults to 0 . |
54,680 | def sorted ( self , fsort ) : if not self . params : self . params = dict ( ) self . params [ 'sort' ] = fsort return self | Allows to add one or more sort on specific fields . Each sort can be reversed as well . The sort is defined on a per field level with special field name for _score to sort by score . |
54,681 | def doc_create ( self , index , itype , value ) : request = self . session url = 'http://%s:%s/%s/%s/' % ( self . host , self . port , index , itype ) if self . verbose : print value response = request . post ( url , value ) return response | Creates a document |
54,682 | def search_index_simple ( self , index , key , search_term ) : request = self . session url = 'http://%s:%s/%s/_search?q=%s:%s' % ( self . host , self . port , index , key , search_term ) response = request . get ( url ) return response | Search the index using a simple key and search_term |
54,683 | def search_index_advanced ( self , index , query ) : request = self . session url = 'http://%s:%s/%s/_search' % ( self . host , self . port , index ) if self . params : content = dict ( query = query , ** self . params ) else : content = dict ( query = query ) if self . verbose : print content response = request . post ( url , content ) return response | Advanced search query against an entire index |
54,684 | def map ( self , index_name , index_type , map_value ) : request = self . session url = 'http://%s:%s/%s/%s/_mapping' % ( self . host , self . port , index_name , index_type ) content = { index_type : { 'properties' : map_value } } if self . verbose : print content response = request . put ( url , content ) return response | Enable a specific map for an index and type |
54,685 | def list_types ( index_name , host = 'localhost' , port = '9200' ) : return ElasticSearch ( host = host , port = port ) . type_list ( index_name ) | Lists the context types available in an index |
54,686 | def type_list ( self , index_name ) : request = self . session url = 'http://%s:%s/%s/_mapping' % ( self . host , self . port , index_name ) response = request . get ( url ) if request . status_code == 200 : return response [ index_name ] . keys ( ) else : return response | List the types available in an index |
54,687 | def raw ( self , module , method = 'GET' , data = None ) : request = self . session url = 'http://%s:%s/%s' % ( self . host , self . port , module ) if self . verbose : print data if method == 'GET' : response = request . get ( url ) elif method == 'POST' : response = request . post ( url , data ) elif method == 'PUT' : response = request . put ( url , data ) elif method == 'DELETE' : response = request . delete ( url ) else : return { 'error' : 'No such request method %s' % method } return response | Submits or requsts raw input |
54,688 | def inverse ( self , N ) : if N == 0 : return 0 lm , hm = 1 , 0 low , high = N % self . P , self . P while low > 1 : r = high // low nm , new = hm - lm * r , high - low * r lm , low , hm , high = nm , new , lm , low return lm % self . P | Returns the modular inverse of an integer with respect to the field characteristic P . |
54,689 | def is_on_curve ( self , point ) : X , Y = point . X , point . Y return ( pow ( Y , 2 , self . P ) - pow ( X , 3 , self . P ) - self . a * X - self . b ) % self . P == 0 | Checks whether a point is on the curve . |
54,690 | def generate_private_key ( self ) : random_string = base64 . b64encode ( os . urandom ( 4096 ) ) . decode ( 'utf-8' ) binary_data = bytes ( random_string , 'utf-8' ) hash_object = hashlib . sha256 ( binary_data ) message_digest_bin = hash_object . digest ( ) message_digest_hex = binascii . hexlify ( message_digest_bin ) return message_digest_hex | Generates a private key based on the password . |
54,691 | def generate_public_key ( self ) : private_key = int ( self . private_key , 16 ) if private_key >= self . N : raise Exception ( 'Invalid private key.' ) G = JacobianPoint ( self . Gx , self . Gy , 1 ) public_key = G * private_key x_hex = '{0:0{1}x}' . format ( public_key . X , 64 ) y_hex = '{0:0{1}x}' . format ( public_key . Y , 64 ) return '04' + x_hex + y_hex | Generates a public key from the hex - encoded private key using elliptic curve cryptography . The private key is multiplied by a predetermined point on the elliptic curve called the generator point G resulting in the corresponding private key . The generator point is always the same for all Bitcoin users . |
54,692 | def to_affine ( self ) : X , Y , Z = self . x , self . y , self . inverse ( self . z ) return ( ( X * Z ** 2 ) % P , ( Y * Z ** 3 ) % P ) | Converts this point to an affine representation . |
54,693 | def slope ( self , other ) : X1 , Y1 , X2 , Y2 = self . X , self . Y , other . X , other . Y Y3 = Y1 - Y2 X3 = X1 - X2 return ( Y3 * self . inverse ( X3 ) ) % self . P | Determines the slope between this point and another point . |
54,694 | def to_jacobian ( self ) : if not self : return JacobianPoint ( X = 0 , Y = 0 , Z = 0 ) return JacobianPoint ( X = self . X , Y = self . Y , Z = 1 ) | Converts this point to a Jacobian representation . |
54,695 | def import_model ( self , name , path = "floyd.db.models" ) : if name in self . _model_cache : return self . _model_cache [ name ] try : model = getattr ( __import__ ( path , None , None , [ name ] ) , name ) self . _model_cache [ name ] = model except ImportError : return False return model | imports a model of name from path returning from local model cache if it has been previously loaded otherwise importing |
54,696 | def parse_md ( self ) : post_content = _MARKDOWN . convert ( self . raw_src ) if hasattr ( _MARKDOWN , 'Meta' ) : for key in _MARKDOWN . Meta : print "\t meta: %s: %s (%s)" % ( key , _MARKDOWN . Meta [ key ] [ 0 ] , type ( _MARKDOWN . Meta [ key ] [ 0 ] ) ) if key == 'pubdate' : setattr ( self , key , datetime . datetime . fromtimestamp ( float ( _MARKDOWN . Meta [ key ] [ 0 ] ) ) ) else : setattr ( self , key , _MARKDOWN . Meta [ key ] [ 0 ] ) self . content = post_content self . stub = self . __key__ if not hasattr ( self , 'pubdate' ) : print '\t Notice: setting default pubdate' setattr ( self , 'pubdate' , datetime . datetime . now ( ) ) | Takes a post path and returns a dictionary of variables |
54,697 | def execute_train_task_with_dependencies ( self , task_cls , ** kwargs ) : log . info ( "Task {0}" . format ( get_task_name ( task_cls ) ) ) task_inst = task_cls ( ) for arg in task_inst . args : if arg not in kwargs : kwargs [ arg ] = task_inst . args [ arg ] if hasattr ( task_inst , "dependencies" ) : deps = task_inst . dependencies dep_results = [ ] for dep in deps : log . info ( "Dependency {0}" . format ( get_task_name ( dep ) ) ) dep_results . append ( self . execute_train_task_with_dependencies ( dep . cls , ** dep . args ) ) trained_dependencies = [ ] for i in xrange ( 0 , len ( deps ) ) : dep = deps [ i ] dep_result = dep_results [ i ] name = dep . name namespace = dep . namespace category = dep . category trained_dependencies . append ( TrainedDependency ( category = category , namespace = namespace , name = name , inst = dep ) ) task_inst . trained_dependencies = trained_dependencies task_inst . train ( ** kwargs ) return task_inst | Run the training as well as any dependencies of the training task_cls - class of a task |
54,698 | def execute_predict_task ( self , task_inst , predict_data , ** kwargs ) : result = task_inst . predict ( predict_data , ** task_inst . args ) return result | Do a prediction task_inst - instance of a task |
54,699 | def train ( self , ** kwargs ) : log . info ( "Starting to train..." ) if not self . setup_run : self . setup ( ) self . trained_tasks = [ ] for task in self . tasks : data = self . reformatted_input [ task . data_format ] [ 'data' ] target = self . reformatted_input [ task . data_format ] [ 'target' ] if data is None : raise Exception ( "Data cannot be none. Check the config file to make sure the right input is being read." ) kwargs [ 'data' ] = data kwargs [ 'target' ] = target trained_task = self . execute_train_task_with_dependencies ( task , ** kwargs ) self . trained_tasks . append ( trained_task ) if hasattr ( trained_task , 'data' ) : self . reformatted_input [ task . data_format ] [ 'data' ] = trained_task . data log . info ( "Finished training." ) | Do the workflow training |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.