idx
int64 0
63k
| question
stringlengths 53
5.28k
| target
stringlengths 5
805
|
|---|---|---|
1,700
|
def get_reference ( root ) : reference = { } elem = root . find ( 'bibliographyLink' ) if elem is None : raise MissingElementError ( 'bibliographyLink' ) ref_doi = elem . get ( 'doi' , None ) ref_key = elem . get ( 'preferredKey' , None ) if ref_doi is not None : try : ref = crossref_api . works ( ids = ref_doi ) [ 'message' ] except ( HTTPError , habanero . RequestError , ConnectionError ) : if ref_key is None : raise KeywordError ( 'DOI not found and preferredKey attribute not set' ) else : warn ( 'Missing doi attribute in bibliographyLink or lookup failed. ' 'Setting "detail" key as a fallback; please update to the appropriate fields.' ) reference [ 'detail' ] = ref_key if reference [ 'detail' ] [ - 1 ] != '.' : reference [ 'detail' ] += '.' else : if ref_key is not None : warn ( 'Using DOI to obtain reference information, rather than preferredKey.' ) reference [ 'doi' ] = elem . attrib [ 'doi' ] reference [ 'journal' ] = ref . get ( 'container-title' ) [ 0 ] ref_year = ref . get ( 'published-print' ) or ref . get ( 'published-online' ) reference [ 'year' ] = int ( ref_year [ 'date-parts' ] [ 0 ] [ 0 ] ) reference [ 'volume' ] = int ( ref . get ( 'volume' ) ) reference [ 'pages' ] = ref . get ( 'page' ) reference [ 'authors' ] = [ ] for author in ref [ 'author' ] : auth = { } auth [ 'name' ] = ' ' . join ( [ author [ 'given' ] , author [ 'family' ] ] ) orcid = author . get ( 'ORCID' ) if orcid : auth [ 'ORCID' ] = orcid . lstrip ( 'http://orcid.org/' ) reference [ 'authors' ] . append ( auth ) elif ref_key is not None : warn ( 'Missing doi attribute in bibliographyLink. ' 'Setting "detail" key as a fallback; please update to the appropriate fields.' ) reference [ 'detail' ] = ref_key if reference [ 'detail' ] [ - 1 ] != '.' : reference [ 'detail' ] += '.' else : raise MissingAttributeError ( 'preferredKey' , 'bibliographyLink' ) return reference
|
Read reference info from root of ReSpecTh XML file .
|
1,701
|
def get_ignition_type ( root ) : properties = { } elem = root . find ( 'ignitionType' ) if elem is None : raise MissingElementError ( 'ignitionType' ) elem = elem . attrib if 'target' in elem : ign_target = elem [ 'target' ] . rstrip ( ';' ) . upper ( ) else : raise MissingAttributeError ( 'target' , 'ignitionType' ) if 'type' in elem : ign_type = elem [ 'type' ] if ign_type == 'baseline max intercept from d/dt' : ign_type = 'd/dt max extrapolated' else : raise MissingAttributeError ( 'type' , 'ignitionType' ) if len ( ign_target . split ( ';' ) ) > 1 : raise NotImplementedError ( 'Multiple ignition targets not supported.' ) if ign_target == 'OHEX' : ign_target = 'OH*' elif ign_target == 'CHEX' : ign_target = 'CH*' elif ign_target == 'P' : ign_target = 'pressure' elif ign_target == 'T' : ign_target = 'temperature' if ign_target not in [ 'pressure' , 'temperature' , 'OH' , 'OH*' , 'CH*' , 'CH' ] : raise KeywordError ( ign_target + ' not valid ignition target' ) if ign_type not in [ 'max' , 'd/dt max' , '1/2 max' , 'min' , 'd/dt max extrapolated' ] : raise KeywordError ( ign_type + ' not valid ignition type' ) properties [ 'type' ] = ign_type properties [ 'target' ] = ign_target return properties
|
Gets ignition type and target .
|
1,702
|
def ReSpecTh_to_ChemKED ( filename_xml , file_author = '' , file_author_orcid = '' , * , validate = False ) : tree = etree . parse ( filename_xml ) root = tree . getroot ( ) properties = get_file_metadata ( root ) properties [ 'reference' ] = get_reference ( root ) properties [ 'reference' ] [ 'detail' ] = ( properties [ 'reference' ] . get ( 'detail' , '' ) + 'Converted from ReSpecTh XML file ' + os . path . basename ( filename_xml ) ) properties . update ( get_experiment_kind ( root ) ) properties [ 'common-properties' ] = get_common_properties ( root ) properties [ 'common-properties' ] [ 'ignition-type' ] = get_ignition_type ( root ) properties [ 'datapoints' ] = get_datapoints ( root ) has_pres_rise = ( 'pressure-rise' in properties [ 'common-properties' ] or any ( [ True for dp in properties [ 'datapoints' ] if 'pressure-rise' in dp ] ) ) if has_pres_rise and properties [ 'apparatus' ] [ 'kind' ] == 'rapid compression machine' : raise KeywordError ( 'Pressure rise cannot be defined for RCM.' ) has_vol_hist = any ( [ t . get ( 'type' ) == 'volume' for dp in properties [ 'datapoints' ] for t in dp . get ( 'time-histories' , [ { } ] ) ] ) if has_vol_hist and properties [ 'apparatus' ] [ 'kind' ] == 'shock tube' : raise KeywordError ( 'Volume history cannot be defined for shock tube.' ) if file_author_orcid and not file_author : raise KeywordError ( 'If file_author_orcid is specified, file_author must be as well' ) if file_author : temp_author = { 'name' : file_author } if file_author_orcid : temp_author [ 'ORCID' ] = file_author_orcid properties [ 'file-authors' ] . append ( temp_author ) for idx in range ( len ( properties [ 'datapoints' ] ) ) : for prop in properties [ 'common-properties' ] : properties [ 'datapoints' ] [ idx ] [ prop ] = properties [ 'common-properties' ] [ prop ] if validate : chemked . ChemKED ( dict_input = properties ) return properties
|
Convert ReSpecTh XML file to ChemKED - compliant dictionary .
|
1,703
|
def respth2ck ( argv = None ) : parser = ArgumentParser ( description = 'Convert a ReSpecTh XML file to a ChemKED YAML file.' ) parser . add_argument ( '-i' , '--input' , type = str , required = True , help = 'Input filename (e.g., "file1.yaml")' ) parser . add_argument ( '-o' , '--output' , type = str , required = False , default = '' , help = 'Output filename (e.g., "file1.xml")' ) parser . add_argument ( '-fa' , '--file-author' , dest = 'file_author' , type = str , required = False , default = '' , help = 'File author name to override original' ) parser . add_argument ( '-fo' , '--file-author-orcid' , dest = 'file_author_orcid' , type = str , required = False , default = '' , help = 'File author ORCID' ) args = parser . parse_args ( argv ) filename_ck = args . output filename_xml = args . input properties = ReSpecTh_to_ChemKED ( filename_xml , args . file_author , args . file_author_orcid , validate = True ) if not filename_ck : filename_ck = os . path . join ( os . path . dirname ( filename_xml ) , os . path . splitext ( os . path . basename ( filename_xml ) ) [ 0 ] + '.yaml' ) with open ( filename_ck , 'w' ) as outfile : yaml . dump ( properties , outfile , default_flow_style = False ) print ( 'Converted to ' + filename_ck )
|
Command - line entry point for converting a ReSpecTh XML file to a ChemKED YAML file .
|
1,704
|
def ck2respth ( argv = None ) : parser = ArgumentParser ( description = 'Convert a ChemKED YAML file to a ReSpecTh XML file.' ) parser . add_argument ( '-i' , '--input' , type = str , required = True , help = 'Input filename (e.g., "file1.xml")' ) parser . add_argument ( '-o' , '--output' , type = str , required = False , default = '' , help = 'Output filename (e.g., "file1.yaml")' ) args = parser . parse_args ( argv ) c = chemked . ChemKED ( yaml_file = args . input ) c . convert_to_ReSpecTh ( args . output )
|
Command - line entry point for converting a ChemKED YAML file to a ReSpecTh XML file .
|
1,705
|
def main ( argv = None ) : parser = ArgumentParser ( description = 'Convert between ReSpecTh XML file and ChemKED YAML file ' 'automatically based on file extension.' ) parser . add_argument ( '-i' , '--input' , type = str , required = True , help = 'Input filename (e.g., "file1.yaml" or "file2.xml")' ) parser . add_argument ( '-o' , '--output' , type = str , required = False , default = '' , help = 'Output filename (e.g., "file1.xml" or "file2.yaml")' ) parser . add_argument ( '-fa' , '--file-author' , dest = 'file_author' , type = str , required = False , default = '' , help = 'File author name to override original' ) parser . add_argument ( '-fo' , '--file-author-orcid' , dest = 'file_author_orcid' , type = str , required = False , default = '' , help = 'File author ORCID' ) args = parser . parse_args ( argv ) if os . path . splitext ( args . input ) [ 1 ] == '.xml' and os . path . splitext ( args . output ) [ 1 ] == '.yaml' : respth2ck ( [ '-i' , args . input , '-o' , args . output , '-fa' , args . file_author , '-fo' , args . file_author_orcid ] ) elif os . path . splitext ( args . input ) [ 1 ] == '.yaml' and os . path . splitext ( args . output ) [ 1 ] == '.xml' : c = chemked . ChemKED ( yaml_file = args . input ) c . convert_to_ReSpecTh ( args . output ) elif os . path . splitext ( args . input ) [ 1 ] == '.xml' and os . path . splitext ( args . output ) [ 1 ] == '.xml' : raise KeywordError ( 'Cannot convert .xml to .xml' ) elif os . path . splitext ( args . input ) [ 1 ] == '.yaml' and os . path . splitext ( args . output ) [ 1 ] == '.yaml' : raise KeywordError ( 'Cannot convert .yaml to .yaml' ) else : raise KeywordError ( 'Input/output args need to be .xml/.yaml' )
|
General function for converting between ReSpecTh and ChemKED files based on extension .
|
1,706
|
def process_exception ( self , request , exception ) : if request . user and hasattr ( request . user , 'email' ) : request . META [ 'USER' ] = request . user . email
|
Add user details .
|
1,707
|
def process_response ( self , request , response ) : if response . status_code == 404 and not settings . DEBUG : domain = request . get_host ( ) path = request . get_full_path ( ) referer = force_text ( request . META . get ( 'HTTP_REFERER' , '' ) , errors = 'replace' ) if not self . is_ignorable_request ( request , path , domain , referer ) : ua = request . META . get ( 'HTTP_USER_AGENT' , '<none>' ) ip = request . META . get ( 'REMOTE_ADDR' , '<none>' ) user = None if request . user and hasattr ( request . user , 'email' ) : user = request . user . email content = ( "Referrer: %s\n" "Requested URL: %s\n" "User agent: %s\n" "IP address: %s\n" "User: %s\n" ) % ( referer , path , ua , ip , user ) if self . is_internal_request ( domain , referer ) : internal = 'INTERNAL ' else : internal = '' mail_managers ( "Broken %slink on %s" % ( internal , domain ) , content , fail_silently = True ) return response
|
Send broken link emails for relevant 404 NOT FOUND responses .
|
1,708
|
def is_internal_request ( self , domain , referer ) : return bool ( re . match ( "^https?://%s/" % re . escape ( domain ) , referer ) )
|
Returns True if referring URL is the same domain as current request .
|
1,709
|
def parse ( self , representation ) : elements = extract_tokens ( representation ) try : scales = [ DurationRepresentation ( float ( p [ 0 ] ) , Scale ( p [ 1 ] ) ) for p in elements ] except ValueError : raise ScaleFormatError ( "Malformed duration representation: {0}" . format ( representation ) ) return scales
|
Parses a duration string representation
|
1,710
|
def get_or_create_from_ip ( ip ) : data = ip_api_handler . get ( ip ) if data and any ( v for v in data . values ( ) ) : if data . get ( 'ip_address' , None ) is None or not data [ 'ip_address' ] : data [ 'ip_address' ] = ip return IPInfo . objects . get_or_create ( ** data ) return None , False
|
Get or create an entry using obtained information from an IP .
|
1,711
|
def update_ip_info ( self , since_days = 10 , save = False , force = False ) : try : last_check = IPInfoCheck . objects . get ( ip_address = self . client_ip_address ) since_last = datetime . date . today ( ) - last_check . date if since_last <= datetime . timedelta ( days = since_days ) : if not self . ip_info or ( self . ip_info != last_check . ip_info and force ) : self . ip_info = last_check . ip_info self . save ( ) return True elif save : self . save ( ) return False ip_info , created = IPInfo . get_or_create_from_ip ( self . client_ip_address ) last_check . date = datetime . date . today ( ) last_check . save ( ) if created : last_check . ip_info = ip_info self . ip_info = ip_info self . save ( ) return True elif save : self . save ( ) return False except IPInfoCheck . DoesNotExist : self . ip_info = IPInfoCheck . check_ip ( self . client_ip_address ) self . save ( ) return True
|
Update the IP info .
|
1,712
|
def start_daemon ( ) : if RequestLog . daemon is None : parser = get_nginx_parser ( ) RequestLog . daemon = RequestLog . ParseToDBThread ( parser , daemon = True ) RequestLog . daemon . start ( ) return RequestLog . daemon
|
Start a thread to continuously read log files and append lines in DB .
|
1,713
|
def from_respecth ( cls , filename_xml , file_author = '' , file_author_orcid = '' ) : properties = ReSpecTh_to_ChemKED ( filename_xml , file_author , file_author_orcid , validate = False ) return cls ( dict_input = properties )
|
Construct a ChemKED instance directly from a ReSpecTh file .
|
1,714
|
def validate_yaml ( self , properties ) : validator = OurValidator ( schema ) if not validator . validate ( properties ) : for key , value in validator . errors . items ( ) : if any ( [ 'unallowed value' in v for v in value ] ) : print ( ( '{key} has an illegal value. Allowed values are {values} and are case ' 'sensitive.' ) . format ( key = key , values = schema [ key ] [ 'allowed' ] ) ) raise ValueError ( validator . errors )
|
Validate the parsed YAML file for adherance to the ChemKED format .
|
1,715
|
def write_file ( self , filename , * , overwrite = False ) : if exists ( filename ) and not overwrite : raise OSError ( filename + ' already present. Specify "overwrite=True" ' 'to overwrite, or rename.' ) with open ( filename , 'w' ) as yaml_file : yaml . dump ( self . _properties , yaml_file )
|
Write new ChemKED YAML file based on object .
|
1,716
|
def process_quantity ( self , properties ) : quant = Q_ ( properties [ 0 ] ) if len ( properties ) > 1 : unc = properties [ 1 ] uncertainty = unc . get ( 'uncertainty' , False ) upper_uncertainty = unc . get ( 'upper-uncertainty' , False ) lower_uncertainty = unc . get ( 'lower-uncertainty' , False ) uncertainty_type = unc . get ( 'uncertainty-type' ) if uncertainty_type == 'relative' : if uncertainty : quant = quant . plus_minus ( float ( uncertainty ) , relative = True ) elif upper_uncertainty and lower_uncertainty : warn ( 'Asymmetric uncertainties are not supported. The ' 'maximum of lower-uncertainty and upper-uncertainty ' 'has been used as the symmetric uncertainty.' ) uncertainty = max ( float ( upper_uncertainty ) , float ( lower_uncertainty ) ) quant = quant . plus_minus ( uncertainty , relative = True ) else : raise ValueError ( 'Either "uncertainty" or "upper-uncertainty" and ' '"lower-uncertainty" need to be specified.' ) elif uncertainty_type == 'absolute' : if uncertainty : uncertainty = Q_ ( uncertainty ) quant = quant . plus_minus ( uncertainty . to ( quant . units ) . magnitude ) elif upper_uncertainty and lower_uncertainty : warn ( 'Asymmetric uncertainties are not supported. The ' 'maximum of lower-uncertainty and upper-uncertainty ' 'has been used as the symmetric uncertainty.' ) uncertainty = max ( Q_ ( upper_uncertainty ) , Q_ ( lower_uncertainty ) ) quant = quant . plus_minus ( uncertainty . to ( quant . units ) . magnitude ) else : raise ValueError ( 'Either "uncertainty" or "upper-uncertainty" and ' '"lower-uncertainty" need to be specified.' ) else : raise ValueError ( 'uncertainty-type must be one of "absolute" or "relative"' ) return quant
|
Process the uncertainty information from a given quantity and return it
|
1,717
|
def get_cantera_composition_string ( self , species_conversion = None ) : if self . composition_type in [ 'mole fraction' , 'mass fraction' ] : factor = 1.0 elif self . composition_type == 'mole percent' : factor = 100.0 else : raise ValueError ( 'Unknown composition type: {}' . format ( self . composition_type ) ) if species_conversion is None : comps = [ '{!s}:{:.4e}' . format ( c . species_name , c . amount . magnitude / factor ) for c in self . composition . values ( ) ] else : comps = [ ] for c in self . composition . values ( ) : amount = c . amount . magnitude / factor idents = [ getattr ( c , s , False ) for s in [ 'species_name' , 'InChI' , 'SMILES' ] ] present = [ i in species_conversion for i in idents ] if not any ( present ) : comps . append ( '{!s}:{:.4e}' . format ( c . species_name , amount ) ) else : if len ( [ i for i in present if i ] ) > 1 : raise ValueError ( 'More than one conversion present for species {}' . format ( c . species_name ) ) ident = idents [ present . index ( True ) ] species_replacement_name = species_conversion . pop ( ident ) comps . append ( '{!s}:{:.4e}' . format ( species_replacement_name , amount ) ) if len ( species_conversion ) > 0 : raise ValueError ( 'Unknown species in conversion: {}' . format ( species_conversion ) ) return ', ' . join ( comps )
|
Get the composition in a string format suitable for input to Cantera .
|
1,718
|
def get_cantera_mole_fraction ( self , species_conversion = None ) : if self . composition_type == 'mass fraction' : raise ValueError ( 'Cannot get mole fractions from the given composition.\n' '{}' . format ( self . composition ) ) else : return self . get_cantera_composition_string ( species_conversion )
|
Get the mole fractions in a string format suitable for input to Cantera .
|
1,719
|
def get_cantera_mass_fraction ( self , species_conversion = None ) : if self . composition_type in [ 'mole fraction' , 'mole percent' ] : raise ValueError ( 'Cannot get mass fractions from the given composition.\n' '{}' . format ( self . composition ) ) else : return self . get_cantera_composition_string ( species_conversion )
|
Get the mass fractions in a string format suitable for input to Cantera .
|
1,720
|
def lockfile ( lockfile_name , lock_wait_timeout = - 1 ) : def decorator ( func ) : @ wraps ( func ) def wrapper ( * args , ** kwargs ) : lock = FileLock ( lockfile_name ) try : lock . acquire ( lock_wait_timeout ) except AlreadyLocked : return except LockTimeout : return try : result = func ( * args , ** kwargs ) finally : lock . release ( ) return result return wrapper return decorator
|
Only runs the method if the lockfile is not acquired .
|
1,721
|
def get_username ( identifier ) : pattern = re . compile ( '.+@\w+\..+' ) if pattern . match ( identifier ) : try : user = User . objects . get ( email = identifier ) except : raise Http404 else : return user . username else : return identifier
|
Checks if a string is a email adress or not .
|
1,722
|
def _create_payload ( self , symbols ) : payload = { 'access_key' : self . access_key } if symbols is not None : payload [ 'symbols' ] = ',' . join ( symbols ) return payload
|
Creates a payload with no none values .
|
1,723
|
def historical_rates ( self , date , symbols = None ) : try : if isinstance ( date , datetime . date ) : date = date . isoformat ( ) symbols = symbols or self . symbols payload = self . _create_payload ( symbols ) url = BASE_URL + date response = requests . get ( url , params = payload ) response . raise_for_status ( ) return response . json ( ) except requests . exceptions . RequestException as ex : raise FixerioException ( str ( ex ) )
|
Get historical rates for any day since date .
|
1,724
|
def distinct ( l ) : seen = set ( ) seen_add = seen . add return ( _ for _ in l if not ( _ in seen or seen_add ( _ ) ) )
|
Return a list where the duplicates have been removed .
|
1,725
|
def iter_format_modules ( lang ) : if check_for_language ( lang ) : format_locations = [ ] for path in CUSTOM_FORMAT_MODULE_PATHS : format_locations . append ( path + '.%s' ) format_locations . append ( 'django.conf.locale.%s' ) locale = to_locale ( lang ) locales = [ locale ] if '_' in locale : locales . append ( locale . split ( '_' ) [ 0 ] ) for location in format_locations : for loc in locales : try : yield import_module ( '.formats' , location % loc ) except ImportError : pass
|
Does the heavy lifting of finding format modules .
|
1,726
|
def get_format_modules ( lang = None , reverse = False ) : if lang is None : lang = get_language ( ) modules = _format_modules_cache . setdefault ( lang , list ( iter_format_modules ( lang ) ) ) if reverse : return list ( reversed ( modules ) ) return modules
|
Returns a list of the format modules found
|
1,727
|
def as_view ( cls , ** initkwargs ) : for key in initkwargs : if key in cls . http_method_names : raise TypeError ( "You tried to pass in the %s method name as a " "keyword argument to %s(). Don't do that." % ( key , cls . __name__ ) ) if not hasattr ( cls , key ) : raise TypeError ( "%s() received an invalid keyword %r. as_view " "only accepts arguments that are already " "attributes of the class." % ( cls . __name__ , key ) ) def view ( request , * args , ** kwargs ) : self = cls ( ** initkwargs ) if hasattr ( self , 'get' ) and not hasattr ( self , 'head' ) : self . head = self . get self . request = request self . args = args self . kwargs = kwargs self . authed_view = initkwargs . get ( 'authed_view' ) self . authed_view_kwargs = initkwargs . get ( 'authed_view_kwargs' ) self . anonymous_view = initkwargs . get ( 'anonymous_view' ) self . anonymous_view_kwargs = initkwargs . get ( 'anonymous_view_kwargs' ) return self . dispatch ( request , * args , ** kwargs ) update_wrapper ( view , cls , updated = ( ) ) update_wrapper ( view , cls . dispatch , assigned = ( ) ) return view
|
Main entry point for a request - response process .
|
1,728
|
def context ( self ) : stats = status_codes_by_date_stats ( ) attacks_data = [ { 'type' : 'line' , 'zIndex' : 9 , 'name' : _ ( 'Attacks' ) , 'data' : [ ( v [ 0 ] , v [ 1 ] [ 'attacks' ] ) for v in stats ] } ] codes_data = [ { 'zIndex' : 4 , 'name' : '2xx' , 'data' : [ ( v [ 0 ] , v [ 1 ] [ 200 ] ) for v in stats ] } , { 'zIndex' : 5 , 'name' : '3xx' , 'data' : [ ( v [ 0 ] , v [ 1 ] [ 300 ] ) for v in stats ] } , { 'zIndex' : 6 , 'name' : '4xx' , 'data' : [ ( v [ 0 ] , v [ 1 ] [ 400 ] ) for v in stats ] } , { 'zIndex' : 8 , 'name' : '5xx' , 'data' : [ ( v [ 0 ] , v [ 1 ] [ 500 ] ) for v in stats ] } ] return { 'generic_chart' : json . dumps ( status_codes_by_date_chart ( ) ) , 'attacks_data' : json . dumps ( attacks_data ) , 'codes_data' : json . dumps ( codes_data ) }
|
Get the context .
|
1,729
|
def widgets ( self ) : widgets = [ ] for i , chart in enumerate ( most_visited_pages_charts ( ) ) : widgets . append ( Widget ( html_id = 'most_visited_chart_%d' % i , content = json . dumps ( chart ) , template = 'meerkat/widgets/highcharts.html' , js_code = [ 'plotOptions.tooltip.pointFormatter' ] ) ) return widgets
|
Get the items .
|
1,730
|
def get ( self , str_representation ) : for scale in self . SCALES : if str_representation in scale : return scale raise ScaleFormatError ( "Unsupported scale format: {0}" . format ( str_representation ) )
|
Retrieves a scale representation from it s string representation
|
1,731
|
def valid_token ( token ) : is_scale = False try : Scale ( token ) is_scale = True except ScaleFormatError : pass if any ( [ token . isdigit ( ) , token in SEPARATOR_TOKENS , is_scale ] ) : return True return False
|
Asserts a provided string is a valid duration token representation
|
1,732
|
def extract_tokens ( representation , separators = SEPARATOR_CHARACTERS ) : buff = "" elements = [ ] last_index = 0 last_token = None for index , c in enumerate ( representation ) : if c in separators : if buff : if not valid_token ( buff ) : raise InvalidTokenError ( "Duration representation {0} contains " "an invalid token: {1}" . format ( representation , buff ) ) if not buff . strip ( ) in SEPARATOR_TOKENS : elements . append ( buff ) buff = "" last_token = None else : token = compute_char_token ( c ) if ( token is not None and last_token is not None and token != last_token ) : elements . append ( buff ) buff = c else : buff += c last_token = token elements . append ( buff ) return list ( zip ( elements [ : : 2 ] , elements [ 1 : : 2 ] ) )
|
Extracts durations tokens from a duration representation .
|
1,733
|
def create_random_string ( length = 7 , chars = 'ABCDEFGHJKMNPQRSTUVWXYZ23456789' , repetitions = False ) : if repetitions : return '' . join ( random . choice ( chars ) for _ in range ( length ) ) return '' . join ( random . sample ( chars , length ) )
|
Returns a random string based on the provided arguments .
|
1,734
|
def load_member ( fqn ) : modulename , member_name = split_fqn ( fqn ) module = __import__ ( modulename , globals ( ) , locals ( ) , member_name ) return getattr ( module , member_name )
|
Loads and returns a class for a given fully qualified name .
|
1,735
|
def split_fqn ( fqn ) : if hasattr ( fqn , '__call__' ) : fqn_string = fqn ( ) else : fqn_string = fqn return fqn_string . rsplit ( '.' , 1 )
|
Returns the left and right part of the import .
|
1,736
|
def send ( self , data ) : self . logger . debug ( 'Send data: {}' . format ( data ) ) if not self . connected : self . logger . warning ( 'Connection not established. Return...' ) return self . websocket . send ( json . dumps ( data ) )
|
Sends data to the server .
|
1,737
|
def _on_message ( self , socket , message ) : data = json . loads ( message ) message_type = None identifier = None subscription = None if 'type' in data : message_type = data [ 'type' ] if 'identifier' in data : identifier = json . loads ( data [ 'identifier' ] ) if identifier is not None : subscription = self . find_subscription ( identifier ) if subscription is not None : subscription . received ( data ) elif message_type == 'welcome' : self . logger . debug ( 'Welcome message received.' ) for subscription in self . subscriptions . values ( ) : if subscription . state == 'connection_pending' : subscription . create ( ) elif message_type == 'ping' : if self . log_ping : self . logger . debug ( 'Ping received.' ) else : self . logger . warning ( 'Message not supported. (Message: {})' . format ( message ) )
|
Called aways when a message arrives .
|
1,738
|
def _on_close ( self , socket ) : self . logger . debug ( 'Connection closed.' ) for subscription in self . subscriptions . values ( ) : if subscription . state == 'subscribed' : subscription . state = 'connection_pending'
|
Called when the connection was closed .
|
1,739
|
def connected ( self ) : return self . websocket is not None and self . websocket . sock is not None and self . websocket . sock . connected
|
If connected to server .
|
1,740
|
def find_subscription ( self , identifier ) : for subscription in self . subscriptions . values ( ) : if subscription . identifier == identifier : return subscription
|
Finds a subscription by it s identifier .
|
1,741
|
def create ( self ) : self . logger . debug ( 'Create subscription on server...' ) if not self . connection . connected : self . state = 'connection_pending' return data = { 'command' : 'subscribe' , 'identifier' : self . _identifier_string ( ) } self . connection . send ( data ) self . state = 'pending'
|
Subscribes at the server .
|
1,742
|
def remove ( self ) : self . logger . debug ( 'Remove subscription from server...' ) data = { 'command' : 'unsubscribe' , 'identifier' : self . _identifier_string ( ) } self . connection . send ( data ) self . state = 'unsubscribed'
|
Removes the subscription .
|
1,743
|
def send ( self , message ) : self . logger . debug ( 'Send message: {}' . format ( message ) ) if self . state == 'pending' or self . state == 'connection_pending' : self . logger . info ( 'Connection not established. Add message to queue.' ) self . message_queue . append ( message ) return elif self . state == 'unsubscribed' or self . state == 'rejected' : self . logger . warning ( 'Not subscribed! Message discarded.' ) return data = { 'command' : 'message' , 'identifier' : self . _identifier_string ( ) , 'data' : message . raw_message ( ) } self . connection . send ( data )
|
Sends data to the server on the subscription channel .
|
1,744
|
def received ( self , data ) : self . logger . debug ( 'Data received: {}' . format ( data ) ) message_type = None if 'type' in data : message_type = data [ 'type' ] if message_type == 'confirm_subscription' : self . _subscribed ( ) elif message_type == 'reject_subscription' : self . _rejected ( ) elif self . receive_callback is not None and 'message' in data : self . receive_callback ( data [ 'message' ] ) else : self . logger . warning ( 'Message type unknown. ({})' . format ( message_type ) )
|
API for the connection to forward information to this subscription instance .
|
1,745
|
def _subscribed ( self ) : self . logger . debug ( 'Subscription confirmed.' ) self . state = 'subscribed' for message in self . message_queue : self . send ( message )
|
Called when the subscription was accepted successfully .
|
1,746
|
def cli_opts ( ) : parser = argparse . ArgumentParser ( ) parser . add_argument ( "--homeassistant-config" , type = str , required = False , dest = "config" , help = "Create configuration section for home assistant" , ) parser . add_argument ( "-f" , "--filter" , type = str , required = False , dest = "filter" , help = "Ignore events related with these devices" , ) parser . add_argument ( "-o" , "--output" , type = str , required = False , dest = "output" , help = "Send output to file" , ) parser . add_argument ( "-v" , "--verbose" , action = "store_true" , dest = "verbose" , help = "Verbose output" , ) parser . add_argument ( 'device' ) return parser . parse_args ( )
|
Handle the command line options
|
1,747
|
def _setup_signal_handler ( self ) : signal . signal ( signal . SIGTERM , self . _signal_handler ) signal . signal ( signal . SIGINT , self . _signal_handler ) signal . signal ( signal . SIGQUIT , self . _signal_handler )
|
Register signal handlers
|
1,748
|
def _signal_handler ( self , signum , frame ) : if self . _options . config : with open ( self . _options . config , "w" ) as cfg : yaml . dump ( self . _home_assistant_config ( ) , cfg ) print ( "Dumped home assistant configuration at" , self . _options . config ) self . _connection . close ( ) sys . exit ( 0 )
|
Method called when handling signals
|
1,749
|
def start ( self ) : print ( "Entering monitoring mode, press CTRL-C to quit" ) serial = self . _connection . serial while True : serial . write ( b"@R" ) length = int ( serial . read ( ) , 16 ) data = serial . read ( length * 2 ) message = messages . parse ( data ) if not ( self . _options . filter and message . entity and message . entity in self . _devices ) : logging . debug ( " " . join ( message . bytes ) ) if not self . _options . config or message . entity is None or message . entity in self . _devices : continue print ( "New device found" ) ha_id = input ( "Enter home assistant unique ID: " ) name = input ( "Enter name: " ) self . _add_device ( scs_id = message . entity , ha_id = ha_id , name = name )
|
Monitor the bus for events and handle them
|
1,750
|
def _add_device ( self , scs_id , ha_id , name ) : if scs_id in self . _devices : return self . _devices [ scs_id ] = { 'name' : name , 'ha_id' : ha_id }
|
Add device to the list of known ones
|
1,751
|
def _home_assistant_config ( self ) : devices = { } for scs_id , dev in self . _devices . items ( ) : devices [ dev [ 'ha_id' ] ] = { 'name' : dev [ 'name' ] , 'scs_id' : scs_id } return { 'devices' : devices }
|
Creates home assistant configuration for the known devices
|
1,752
|
def _load_filter ( self , config ) : path = pathlib . Path ( config ) if not path . is_file ( ) : return with open ( config , 'r' ) as conf : devices = yaml . load ( conf ) [ 'devices' ] for ha_id , dev in devices . items ( ) : self . _devices [ dev [ 'scs_id' ] ] = { ha_id : dev , 'name' : dev [ 'name' ] }
|
Load the filter file and populates self . _devices accordingly
|
1,753
|
def close ( self ) : self . _serial . write ( b"@c" ) self . _serial . read ( ) self . _serial . close ( )
|
Closes the connection to the serial port and ensure no pending operatoin are left
|
1,754
|
def load ( self , value ) : if self . property_type is None : return value elif not isinstance ( self . property_type , BaseType ) : raise TypeError ( 'property_type must be schematics BaseType' ) else : native_value = self . property_type . to_native ( value ) self . property_type . validate ( native_value ) return native_value
|
Load a value converting it to the proper type if validation_type exists .
|
1,755
|
def _update_property_keys ( cls ) : for attr_name , config_prop in cls . _iter_config_props ( ) : if config_prop . property_key is None : config_prop . property_key = attr_name
|
Set unspecified property_keys for each ConfigProperty to the name of the class attr
|
1,756
|
def _set_instance_prop ( self , attr_name , config_prop , value ) : setattr ( self , attr_name , value ) if not config_prop . exclude_from_varz : self . varz [ attr_name ] = value
|
Set instance property to a value and add it varz if needed
|
1,757
|
def _load ( self ) : for attr_name , config_prop in self . _iter_config_props ( ) : found = False for loader in self . _loaders : if loader . exists ( config_prop . property_key ) : raw_value = loader . get ( config_prop . property_key ) converted_value = config_prop . load ( raw_value ) self . _set_instance_prop ( attr_name , config_prop , converted_value ) found = True break if not found : if not config_prop . required or config_prop . default is not None : self . _set_instance_prop ( attr_name , config_prop , config_prop . default ) else : raise ValueError ( 'Missing required ConfigProperty {}' . format ( attr_name ) )
|
Load values for all ConfigProperty attributes
|
1,758
|
def in_same_dir ( as_file , target_file ) : return os . path . abspath ( os . path . join ( os . path . dirname ( as_file ) , target_file ) )
|
Return an absolute path to a target file that is located in the same directory as as_file
|
1,759
|
def compare_name ( given_name , family_name , question_name ) : given_name = given_name . lower ( ) family_name = family_name . lower ( ) question_name = question_name . lower ( ) if ',' in question_name : name_split = question_name . split ( ',' ) name_split . reverse ( ) question_name = ' ' . join ( name_split ) . strip ( ) question_name = question_name . replace ( '.' , '' ) given_name = given_name . replace ( '.' , '' ) family_name = family_name . replace ( '.' , '' ) given_name = list ( filter ( None , re . split ( r"[, \-.]+" , given_name ) ) ) num_family_names = len ( list ( filter ( None , re . split ( "[, .]+" , family_name ) ) ) ) name_split = list ( filter ( None , re . split ( r"[, \-.]+" , question_name ) ) ) first_name = [ name_split [ 0 ] ] if len ( name_split ) > 2 : first_name += [ n for n in name_split [ 1 : - num_family_names ] ] if len ( first_name ) > 1 and len ( given_name ) == len ( first_name ) : for i in range ( 1 , len ( first_name ) ) : first_name [ i ] = first_name [ i ] [ 0 ] given_name [ i ] = given_name [ i ] [ 0 ] elif len ( given_name ) != len ( first_name ) : min_names = min ( len ( given_name ) , len ( first_name ) ) first_name = first_name [ : min_names ] given_name = given_name [ : min_names ] if len ( first_name [ 0 ] ) == 1 or len ( given_name [ 0 ] ) == 1 : given_name [ 0 ] = given_name [ 0 ] [ 0 ] first_name [ 0 ] = first_name [ 0 ] [ 0 ] if len ( first_name [ 0 ] ) > 1 or len ( given_name [ 0 ] ) > 1 : given_name [ 0 ] = given_name [ 0 ] [ 0 ] first_name [ 0 ] = name_split [ 0 ] [ 0 ] if num_family_names == 1 and '-' in family_name : num_hyphen = family_name . count ( '-' ) family_name_compare = '-' . join ( name_split [ - ( num_hyphen + 1 ) : ] ) else : family_name_compare = ' ' . join ( name_split [ - num_family_names : ] ) return given_name == first_name and family_name == family_name_compare
|
Compares a name in question to a specified name separated into given and family .
|
1,760
|
def _validate_isvalid_history ( self , isvalid_history , field , value ) : history_type = value [ 'type' ] if history_type . endswith ( 'emission' ) : history_type = 'emission' elif history_type . endswith ( 'absorption' ) : history_type = 'absorption' quantity = 1.0 * ( units ( value [ 'quantity' ] [ 'units' ] ) ) try : quantity . to ( property_units [ history_type ] ) except pint . DimensionalityError : self . _error ( field , 'incompatible units; should be consistent ' 'with ' + property_units [ history_type ] ) time = 1.0 * ( units ( value [ 'time' ] [ 'units' ] ) ) try : time . to ( property_units [ 'time' ] ) except pint . DimensionalityError : self . _error ( field , 'incompatible units; should be consistent ' 'with ' + property_units [ 'time' ] ) n_cols = len ( value [ 'values' ] [ 0 ] ) max_cols = max ( value [ 'time' ] [ 'column' ] , value [ 'quantity' ] [ 'column' ] , value . get ( 'uncertainty' , { } ) . get ( 'column' , 0 ) ) + 1 if n_cols > max_cols : self . _error ( field , 'too many columns in the values' ) elif n_cols < max_cols : self . _error ( field , 'not enough columns in the values' )
|
Checks that the given time history is properly formatted .
|
1,761
|
def _validate_isvalid_quantity ( self , isvalid_quantity , field , value ) : quantity = Q_ ( value [ 0 ] ) low_lim = 0.0 * units ( property_units [ field ] ) try : if quantity <= low_lim : self . _error ( field , 'value must be greater than 0.0 {}' . format ( property_units [ field ] ) , ) except pint . DimensionalityError : self . _error ( field , 'incompatible units; should be consistent ' 'with ' + property_units [ field ] )
|
Checks for valid given value and appropriate units .
|
1,762
|
def _validate_isvalid_uncertainty ( self , isvalid_uncertainty , field , value ) : self . _validate_isvalid_quantity ( True , field , value ) if len ( value ) > 1 and value [ 1 ] [ 'uncertainty-type' ] != 'relative' : if value [ 1 ] . get ( 'uncertainty' ) is not None : self . _validate_isvalid_quantity ( True , field , [ value [ 1 ] [ 'uncertainty' ] ] ) if value [ 1 ] . get ( 'upper-uncertainty' ) is not None : self . _validate_isvalid_quantity ( True , field , [ value [ 1 ] [ 'upper-uncertainty' ] ] ) if value [ 1 ] . get ( 'lower-uncertainty' ) is not None : self . _validate_isvalid_quantity ( True , field , [ value [ 1 ] [ 'lower-uncertainty' ] ] )
|
Checks for valid given value and appropriate units with uncertainty .
|
1,763
|
def _validate_isvalid_orcid ( self , isvalid_orcid , field , value ) : if isvalid_orcid and 'ORCID' in value : try : res = search_orcid ( value [ 'ORCID' ] ) except ConnectionError : warn ( 'network not available, ORCID not validated.' ) return except HTTPError : self . _error ( field , 'ORCID incorrect or invalid for ' + value [ 'name' ] ) return family_name = res [ 'name' ] [ 'family-name' ] [ 'value' ] given_name = res [ 'name' ] [ 'given-names' ] [ 'value' ] if not compare_name ( given_name , family_name , value [ 'name' ] ) : self . _error ( field , 'Name and ORCID do not match. Name supplied: ' + value [ 'name' ] + '. Name associated with ORCID: ' + ' ' . join ( [ given_name , family_name ] ) )
|
Checks for valid ORCID if given .
|
1,764
|
def _validate_isvalid_composition ( self , isvalid_composition , field , value ) : sum_amount = 0.0 if value [ 'kind' ] in [ 'mass fraction' , 'mole fraction' ] : low_lim = 0.0 up_lim = 1.0 total_amount = 1.0 elif value [ 'kind' ] in [ 'mole percent' ] : low_lim = 0.0 up_lim = 100.0 total_amount = 100.0 else : self . _error ( field , 'composition kind must be "mole percent", "mass fraction", or ' '"mole fraction"' ) return False for sp in value [ 'species' ] : amount = sp [ 'amount' ] [ 0 ] sum_amount += amount if amount < low_lim : self . _error ( field , 'Species ' + sp [ 'species-name' ] + ' ' + value [ 'kind' ] + ' must be greater than {:.1f}' . format ( low_lim ) ) elif amount > up_lim : self . _error ( field , 'Species ' + sp [ 'species-name' ] + ' ' + value [ 'kind' ] + ' must be less than {:.1f}' . format ( up_lim ) ) if not np . isclose ( total_amount , sum_amount ) : self . _error ( field , 'Species ' + value [ 'kind' ] + 's do not sum to {:.1f}: ' . format ( total_amount ) + '{:f}' . format ( sum_amount ) )
|
Checks for valid specification of composition .
|
1,765
|
def convert_types_slow ( df ) : dtypes = get_types ( df ) for k , v in dtypes . items ( ) : t = df [ df [ 'key' ] == k ] t [ 'value' ] = t [ 'value' ] . astype ( v ) df = df . apply ( convert_row , axis = 1 ) return df
|
This is a slow operation .
|
1,766
|
def plot_all ( * args , ** kwargs ) : dfs = do_all ( * args , ** kwargs ) ps = [ ] for line in dfs : f , df , config = line df . plot ( title = config [ 'name' ] ) ps . append ( df ) return ps
|
Read all the trial data and plot the result of applying a function on them .
|
1,767
|
def serialize ( v , known_modules = [ ] ) : tname = name ( v , known_modules = known_modules ) func = serializer ( tname ) return func ( v ) , tname
|
Get a text representation of an object .
|
1,768
|
def deserialize ( type_ , value = None , ** kwargs ) : if not isinstance ( type_ , str ) : return type_ des = deserializer ( type_ , ** kwargs ) if value is None : return des return des ( value )
|
Get an object from a text representation
|
1,769
|
def content ( self ) : if self . _content is None : self . _content = self . parse_files ( ) return self . _content
|
Return parsed data . Parse it if not already parsed .
|
1,770
|
def parse_files ( self ) : log_re = self . log_format_regex log_lines = [ ] for log_file in self . matching_files ( ) : with open ( log_file ) as f : matches = re . finditer ( log_re , f . read ( ) ) for match in matches : log_lines . append ( match . groupdict ( ) ) return log_lines
|
Find the files and parse them .
|
1,771
|
def serialize_distribution ( network_agents , known_modules = [ ] ) : d = deepcopy ( list ( network_agents ) ) for v in d : if 'threshold' in v : del v [ 'threshold' ] v [ 'agent_type' ] = serialize_type ( v [ 'agent_type' ] , known_modules = known_modules ) return d
|
When serializing an agent distribution remove the thresholds in order to avoid cluttering the YAML definition file .
|
1,772
|
def _validate_states ( states , topology ) : states = states or [ ] if isinstance ( states , dict ) : for x in states : assert x in topology . node else : assert len ( states ) <= len ( topology ) return states
|
Validate states to avoid ignoring states during initialization
|
1,773
|
def _convert_agent_types ( ind , to_string = False , ** kwargs ) : if to_string : return serialize_distribution ( ind , ** kwargs ) return deserialize_distribution ( ind , ** kwargs )
|
Convenience method to allow specifying agents by class or class name .
|
1,774
|
def _agent_from_distribution ( distribution , value = - 1 , agent_id = None ) : if value < 0 : value = random . random ( ) for d in sorted ( distribution , key = lambda x : x [ 'threshold' ] ) : threshold = d [ 'threshold' ] if not ( ( agent_id is not None and threshold == STATIC_THRESHOLD and agent_id in d [ 'ids' ] ) or ( value >= threshold [ 0 ] and value < threshold [ 1 ] ) ) : continue state = { } if 'state' in d : state = deepcopy ( d [ 'state' ] ) return d [ 'agent_type' ] , state raise Exception ( 'Distribution for value {} not found in: {}' . format ( value , distribution ) )
|
Used in the initialization of agents given an agent distribution .
|
1,775
|
def launch ( self , port = None ) : if port is not None : self . port = port url = 'http://127.0.0.1:{PORT}' . format ( PORT = self . port ) print ( 'Interface starting at {url}' . format ( url = url ) ) self . listen ( self . port ) tornado . ioloop . IOLoop . instance ( ) . start ( )
|
Run the app .
|
1,776
|
def status_codes_by_date_stats ( ) : def date_counter ( queryset ) : return dict ( Counter ( map ( lambda dt : ms_since_epoch ( datetime . combine ( make_naive ( dt ) , datetime . min . time ( ) ) ) , list ( queryset . values_list ( 'datetime' , flat = True ) ) ) ) ) codes = { low : date_counter ( RequestLog . objects . filter ( status_code__gte = low , status_code__lt = high ) ) for low , high in ( ( 200 , 300 ) , ( 300 , 400 ) , ( 400 , 500 ) ) } codes [ 500 ] = date_counter ( RequestLog . objects . filter ( status_code__gte = 500 ) ) codes [ 'attacks' ] = date_counter ( RequestLog . objects . filter ( status_code__in = ( 400 , 444 , 502 ) ) ) stats = { } for code in ( 200 , 300 , 400 , 500 , 'attacks' ) : for date , count in codes [ code ] . items ( ) : if stats . get ( date , None ) is None : stats [ date ] = { 200 : 0 , 300 : 0 , 400 : 0 , 500 : 0 , 'attacks' : 0 } stats [ date ] [ code ] += count stats = sorted ( [ ( k , v ) for k , v in stats . items ( ) ] , key = lambda x : x [ 0 ] ) return stats
|
Get stats for status codes by date .
|
1,777
|
def enter ( self , pub_id , * nodes ) : try : pub = self [ 'pubs' ] [ pub_id ] except KeyError : raise ValueError ( 'Pub {} is not available' . format ( pub_id ) ) if not pub [ 'open' ] or ( pub [ 'capacity' ] < ( len ( nodes ) + pub [ 'occupancy' ] ) ) : return False pub [ 'occupancy' ] += len ( nodes ) for node in nodes : node [ 'pub' ] = pub_id return True
|
Agents will try to enter . The pub checks if it is possible
|
1,778
|
def exit ( self , pub_id , * node_ids ) : try : pub = self [ 'pubs' ] [ pub_id ] except KeyError : raise ValueError ( 'Pub {} is not available' . format ( pub_id ) ) for node_id in node_ids : node = self . get_agent ( node_id ) if pub_id == node [ 'pub' ] : del node [ 'pub' ] pub [ 'occupancy' ] -= 1
|
Agents will notify the pub they want to leave
|
1,779
|
def looking_for_friends ( self ) : self . info ( 'I am looking for friends' ) available_friends = list ( self . get_agents ( drunk = False , pub = None , state_id = self . looking_for_friends . id ) ) if not available_friends : self . info ( 'Life sucks and I\'m alone!' ) return self . at_home befriended = self . try_friends ( available_friends ) if befriended : return self . looking_for_pub
|
Look for friends to drink with
|
1,780
|
def looking_for_pub ( self ) : if self [ 'pub' ] != None : return self . sober_in_pub self . debug ( 'I am looking for a pub' ) group = list ( self . get_neighboring_agents ( ) ) for pub in self . env . available_pubs ( ) : self . debug ( 'We\'re trying to get into {}: total: {}' . format ( pub , len ( group ) ) ) if self . env . enter ( pub , self , * group ) : self . info ( 'We\'re all {} getting in {}!' . format ( len ( group ) , pub ) ) return self . sober_in_pub
|
Look for a pub that accepts me and my friends
|
1,781
|
def befriend ( self , other_agent , force = False ) : if force or self [ 'openness' ] > random ( ) : self . env . add_edge ( self , other_agent ) self . info ( 'Made some friend {}' . format ( other_agent ) ) return True return False
|
Try to become friends with another agent . The chances of success depend on both agents openness .
|
1,782
|
def try_friends ( self , others ) : befriended = False k = int ( 10 * self [ 'openness' ] ) shuffle ( others ) for friend in islice ( others , k ) : if friend == self : continue if friend . befriend ( self ) : self . befriend ( friend , force = True ) self . debug ( 'Hooray! new friend: {}' . format ( friend . id ) ) befriended = True else : self . debug ( '{} does not want to be friends' . format ( friend . id ) ) return befriended
|
Look for random agents around me and try to befriend them
|
1,783
|
def profile_distribution ( data ) : if len ( data ) == 0 : return ( data , np . nan , np . nan , np . nan , np . nan , np . nan , np . nan , np . nan , np . nan , np . nan ) else : ddof = 1 if len ( data ) > 1 else 0 dist_mean = np . mean ( data ) dist_stdev = np . std ( data , ddof = ddof ) dist_min , dist_quartile1 , dist_quartile2 , dist_quartile3 , dist_max = np . percentile ( data , [ 0 , 25 , 50 , 75 , 100 ] ) dist_skew = skew ( data ) dist_kurtosis = kurtosis ( data ) return ( data , dist_mean , dist_stdev , dist_skew , dist_kurtosis , dist_min , dist_quartile1 , dist_quartile2 , dist_quartile3 , dist_max )
|
Compute the mean standard deviation min quartile1 quartile2 quartile3 and max of a vector
|
1,784
|
def to_native ( self , value ) : if isinstance ( value , dict ) : return value elif isinstance ( value , six . string_types ) : native_value = json . loads ( value ) if isinstance ( native_value , dict ) : return native_value else : raise ConversionError ( u'Cannot load value as a dict: {}' . format ( value ) )
|
Return the value as a dict raising error if conversion to dict is not possible
|
1,785
|
def to_native ( self , value ) : if isinstance ( value , six . string_types ) : value_list = value . split ( self . string_delim ) else : value_list = value to_native = self . member_type . to_native if self . member_type is not None else lambda x : x return [ to_native ( item ) for item in value_list ]
|
Load a value as a list converting items if necessary
|
1,786
|
def validate_member_type ( self , value ) : if self . member_type : for item in value : self . member_type . validate ( item )
|
Validate each member of the list if member_type exists
|
1,787
|
def validate_length ( self , value ) : list_len = len ( value ) if value else 0 if self . max_length is not None and list_len > self . max_length : raise ValidationError ( u'List has {} values; max length is {}' . format ( list_len , self . max_length ) ) if self . min_length is not None and list_len < self . min_length : raise ValidationError ( u'List has {} values; min length is {}' . format ( list_len , self . min_length ) )
|
Validate the length of value if min_length or max_length was specified
|
1,788
|
def validate_resource ( self , value ) : def do_backoff ( * args , ** kwargs ) : attempts = 0 while True : try : self . _test_connection ( * args , ** kwargs ) break except ValidationError : wait_secs = min ( self . _max_wait , 2 ** attempts ) attempts += 1 if attempts < self . _max_tries : time . sleep ( wait_secs ) else : raise do_backoff ( value )
|
Validate the network resource with exponential backoff
|
1,789
|
def list_metafeatures ( cls , group = "all" ) : if group == "all" : return copy . deepcopy ( cls . IDS ) elif group == "landmarking" : return list ( filter ( lambda mf_id : "ErrRate" in mf_id or "Kappa" in mf_id , cls . IDS ) ) elif group == "target_dependent" : return list ( filter ( cls . _resource_is_target_dependent , cls . IDS ) ) else : raise ValueError ( f"Unknown group {group}" )
|
Returns a list of metafeatures computable by the Metafeatures class .
|
1,790
|
def _sample_rows ( self , X , Y , sample_shape , seed ) : if sample_shape [ 0 ] is None or X . shape [ 0 ] <= sample_shape [ 0 ] : X_sample , Y_sample = X , Y elif Y is None : np . random . seed ( seed ) row_indices = np . random . choice ( X . shape [ 0 ] , size = sample_shape [ 0 ] , replace = False ) X_sample , Y_sample = X . iloc [ row_indices ] , Y else : drop_size = X . shape [ 0 ] - sample_shape [ 0 ] sample_size = sample_shape [ 0 ] sss = StratifiedShuffleSplit ( n_splits = 2 , test_size = drop_size , train_size = sample_size , random_state = seed ) row_indices , _ = next ( sss . split ( X , Y ) ) X_sample , Y_sample = X . iloc [ row_indices ] , Y . iloc [ row_indices ] return ( X_sample , Y_sample )
|
Stratified uniform sampling of rows according to the classes in Y . Ensures there are enough samples from each class in Y for cross validation .
|
1,791
|
def _fetch_secrets ( vault_url , path , token ) : url = _url_joiner ( vault_url , 'v1' , path ) resp = requests . get ( url , headers = VaultLoader . _get_headers ( token ) ) resp . raise_for_status ( ) data = resp . json ( ) if data . get ( 'errors' ) : raise VaultException ( u'Error fetching Vault secrets from path {}: {}' . format ( path , data [ 'errors' ] ) ) return data [ 'data' ]
|
Read data from the vault path
|
1,792
|
def _fetch_app_role_token ( vault_url , role_id , secret_id ) : url = _url_joiner ( vault_url , 'v1/auth/approle/login' ) resp = requests . post ( url , data = { 'role_id' : role_id , 'secret_id' : secret_id } ) resp . raise_for_status ( ) data = resp . json ( ) if data . get ( 'errors' ) : raise VaultException ( u'Error fetching Vault token: {}' . format ( data [ 'errors' ] ) ) return data [ 'auth' ] [ 'client_token' ]
|
Get a Vault token using the RoleID and SecretID
|
1,793
|
def reload ( self ) : self . _source = self . _fetch_secrets ( self . _vault_url , self . _path , self . _token )
|
Reread secrets from the vault path
|
1,794
|
def sorted_options ( sort_options ) : return [ { 'title' : v [ 'title' ] , 'value' : ( '-{0}' . format ( k ) if v . get ( 'default_order' , 'asc' ) == 'desc' else k ) , } for k , v in sorted ( sort_options . items ( ) , key = lambda x : x [ 1 ] . get ( 'order' , 0 ) ) ]
|
Sort sort options for display .
|
1,795
|
def html_to_plain_text ( html ) : soup = BeautifulSoup ( html , "html.parser" ) parser = HTML2PlainParser ( ) parser . feed ( str ( soup . encode ( 'utf-8' ) ) ) result = parser . text . rstrip ( ) if parser . links : result += '\n\n' for link in parser . links : result += '[{}]: {}\n' . format ( link [ 0 ] , link [ 1 ] ) return result
|
Converts html code into formatted plain text .
|
1,796
|
def handle_data ( self , data ) : if self . lasttag not in self . ignored_elements : text = data . replace ( '\n' , '' ) if text : if self . lasttag == 'li' : self . text += ' * ' self . text += text if self . lasttag in self . newline_after_elements : self . text += '\n'
|
Handles data between tags .
|
1,797
|
def run ( self ) : task = None monitor_task = MonitorTask ( notification_endpoint = self . _handle_message ) while True : if self . _terminate : self . _logger . info ( "scsgate.Reactor exiting" ) self . _connection . close ( ) break try : task = self . _request_queue . get_nowait ( ) self . _logger . debug ( "scsgate.Reactor: got task {}" . format ( task ) ) except queue . Empty : task = monitor_task try : task . execute ( connection = self . _connection ) except ExecutionError as err : self . _logger . error ( err )
|
Starts the thread
|
1,798
|
def mygenerator ( n = 5 , n_edges = 5 ) : G = nx . Graph ( ) for i in range ( n ) : G . add_node ( i ) for i in range ( n_edges ) : nodes = list ( G . nodes ) n_in = choice ( nodes ) nodes . remove ( n_in ) n_out = choice ( nodes ) G . add_edge ( n_in , n_out ) return G
|
Just a simple generator that creates a network with n nodes and n_edges edges . Edges are assigned randomly only avoiding self loops .
|
1,799
|
def insert_files ( self , rootpath , directoryInFilter = None , directoryExFilter = None , compileInFilter = None , compileExFilter = None , contentInFilter = None , contentExFilter = None ) : directoryInFilter = self . DirectoryInFilter if directoryInFilter is None else directoryInFilter directoryExFilter = self . DirectoryExFilter if directoryExFilter is None else directoryExFilter compileInFilter = self . CompileInFilter if compileInFilter is None else compileInFilter compileExFilter = self . CompileExFilter if compileExFilter is None else compileExFilter contentInFilter = self . ContentInFilter if contentInFilter is None else contentInFilter contentExFilter = self . ContentExFilter if contentExFilter is None else contentExFilter def filter ( text , filters , explicit ) : if explicit : return any ( fnmatch . fnmatch ( text , f ) for f in filters ) return not filters or any ( fnmatch . fnmatch ( text , f ) for f in filters ) for root , dirnames , filenames in os . walk ( rootpath ) : searchdir = os . path . normpath ( os . path . normcase ( root ) ) if filter ( searchdir , directoryExFilter , True ) : dirnames [ : ] = [ ] elif filter ( searchdir , directoryInFilter , False ) : for filepath in [ os . path . join ( root , filename ) for filename in filenames ] : if filter ( filepath , compileInFilter , False ) and not filter ( filepath , compileExFilter , True ) : self . CompileFiles . append ( filepath ) elif filter ( filepath , contentInFilter , False ) and not filter ( filepath , contentExFilter , True ) : self . ContentFiles . append ( filepath )
|
Inserts files by recursive traversing the rootpath and inserting files according the addition filter parameters .
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.