idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
15,300
def remove ( self , w ) : if w not in self . f2i : raise ValueError ( "'{}' does not exist." . format ( w ) ) if w in self . reserved : raise ValueError ( "'{}' is one of the reserved words, and thus" "cannot be removed." . format ( w ) ) index = self . f2i [ w ] del self . f2i [ w ] del self . i2f [ index ] self . words . remove ( w )
Removes a word from the vocab . The indices are unchanged .
15,301
def reconstruct_indices ( self ) : del self . i2f , self . f2i self . f2i , self . i2f = { } , { } for i , w in enumerate ( self . words ) : self . f2i [ w ] = i self . i2f [ i ] = w
Reconstruct word indices in case of word removals . Vocabulary does not handle empty indices when words are removed hence it need to be told explicity about when to reconstruct them .
15,302
def run ( self , data ) : result_type = namedtuple ( 'Result' , 'code messages' ) if self . passes is True : result = result_type ( Checker . Code . PASSED , '' ) elif self . passes is False : if self . allow_failure : result = result_type ( Checker . Code . IGNORED , '' ) else : result = result_type ( Checker . Code . FAILED , '' ) else : try : result = self . check ( data , ** self . arguments ) messages = '' if isinstance ( result , tuple ) : result , messages = result if result not in Checker . Code : result = Checker . Code . PASSED if bool ( result ) else Checker . Code . FAILED if result == Checker . Code . FAILED and self . allow_failure : result = Checker . Code . IGNORED result = result_type ( result , messages ) except NotImplementedError : result = result_type ( Checker . Code . NOT_IMPLEMENTED , '' ) self . result = result
Run the check method and format the result for analysis .
15,303
def is_numeric ( value ) : return type ( value ) in [ int , float , np . int8 , np . int16 , np . int32 , np . int64 , np . float16 , np . float32 , np . float64 , np . float128 ]
Test if a value is numeric .
15,304
def cmd ( send , msg , args ) : if msg == 'list' : fortunes = list_fortunes ( ) + list_fortunes ( True ) send ( " " . join ( fortunes ) , ignore_length = True ) else : output = get_fortune ( msg , args [ 'name' ] ) for line in output . splitlines ( ) : send ( line )
Returns a fortune .
15,305
def compile_theme ( theme_id = None ) : from engineer . processors import convert_less from engineer . themes import ThemeManager if theme_id is None : themes = ThemeManager . themes ( ) . values ( ) else : themes = [ ThemeManager . theme ( theme_id ) ] with ( indent ( 2 ) ) : puts ( colored . yellow ( "Compiling %s themes." % len ( themes ) ) ) for theme in themes : theme_output_path = ( theme . static_root / ( 'stylesheets/%s_precompiled.css' % theme . id ) ) . normpath ( ) puts ( colored . cyan ( "Compiling theme %s to %s" % ( theme . id , theme_output_path ) ) ) with indent ( 4 ) : puts ( "Compiling..." ) convert_less ( theme . static_root / ( 'stylesheets/%s.less' % theme . id ) , theme_output_path , minify = True ) puts ( colored . green ( "Done." , bold = True ) )
Compiles a theme .
15,306
def list_theme ( ) : from engineer . themes import ThemeManager themes = ThemeManager . themes ( ) col1 , col2 = map ( max , zip ( * [ ( len ( t . id ) + 2 , len ( t . root_path ) + 2 ) for t in themes . itervalues ( ) ] ) ) themes = ThemeManager . themes_by_finder ( ) for finder in sorted ( themes . iterkeys ( ) ) : if len ( themes [ finder ] ) > 0 : puts ( "%s: " % finder ) for theme in sorted ( themes [ finder ] , key = lambda _ : _ . id ) : with indent ( 4 ) : puts ( columns ( [ colored . cyan ( "%s:" % theme . id ) , col1 ] , [ colored . white ( theme . root_path , bold = True ) , col2 ] ) )
List all available Engineer themes .
15,307
def quantile_norm ( X ) : quantiles = np . mean ( np . sort ( X , axis = 0 ) , axis = 1 ) ranks = np . apply_along_axis ( stats . rankdata , 0 , X ) rank_indices = ranks . astype ( int ) - 1 Xn = quantiles [ rank_indices ] return ( Xn )
Normalize the columns of X to each have the same distribution .
15,308
def corrdfs ( df1 , df2 , method ) : dcorr = pd . DataFrame ( columns = df1 . columns , index = df2 . columns ) dpval = pd . DataFrame ( columns = df1 . columns , index = df2 . columns ) for c1 in df1 : for c2 in df2 : if method == 'spearman' : dcorr . loc [ c2 , c1 ] , dpval . loc [ c2 , c1 ] = spearmanr ( df1 [ c1 ] , df2 [ c2 ] , nan_policy = 'omit' ) elif method == 'pearson' : dcorr . loc [ c2 , c1 ] , dpval . loc [ c2 , c1 ] = pearsonr ( df1 [ c1 ] , df2 [ c2 ] , ) if not df1 . columns . name is None : dcorr . columns . name = df1 . columns . name dpval . columns . name = df1 . columns . name if not df2 . columns . name is None : dcorr . index . name = df2 . columns . name dpval . index . name = df2 . columns . name return dcorr , dpval
df1 in columns df2 in rows
15,309
def pretty_description ( description , wrap_at = None , indent = 0 ) : if wrap_at is None or wrap_at < 0 : width = console_width ( default = 79 ) if wrap_at is None : wrap_at = width else : wrap_at += width indent = ' ' * indent text_wrapper = textwrap . TextWrapper ( width = wrap_at , replace_whitespace = False , initial_indent = indent , subsequent_indent = indent ) new_desc = [ ] for line in description . split ( '\n' ) : new_desc . append ( line . replace ( '\n' , '' ) . strip ( ) ) while not new_desc [ 0 ] : del new_desc [ 0 ] while not new_desc [ - 1 ] : del new_desc [ - 1 ] separators = [ i for i , l in enumerate ( new_desc ) if not l ] paragraphs = [ ] if separators : start , end = 0 , separators [ 0 ] paragraphs . append ( new_desc [ start : end ] ) for i in range ( len ( separators ) - 1 ) : start = end + 1 end = separators [ i + 1 ] paragraphs . append ( new_desc [ start : end ] ) paragraphs . append ( new_desc [ end + 1 : ] ) return '\n\n' . join ( text_wrapper . fill ( ' ' . join ( p ) ) for p in paragraphs ) return text_wrapper . fill ( ' ' . join ( new_desc ) )
Return a pretty formatted string given some text .
15,310
def print_name ( self , indent = 0 , end = '\n' ) : print ( Style . BRIGHT + ' ' * indent + self . name , end = end )
Print name with optional indent and end .
15,311
def print ( self ) : print ( '{dim}Identifier:{none} {cyan}{identifier}{none}\n' '{dim}Name:{none} {name}\n' '{dim}Description:{none}\n{description}' . format ( dim = Style . DIM , cyan = Fore . CYAN , none = Style . RESET_ALL , identifier = self . identifier , name = self . name , description = pretty_description ( self . description , indent = 2 ) ) ) if hasattr ( self , 'argument_list' ) and self . argument_list : print ( '{dim}Arguments:{none}' . format ( dim = Style . DIM , none = Style . RESET_ALL ) ) for argument in self . argument_list : argument . print ( indent = 2 )
Print self .
15,312
def filter ( self , value , table = None ) : if table is not None : filterable = self . filterable_func ( value , table ) else : filterable = self . filterable_func ( value ) return filterable
Return True if the value should be pruned ; False otherwise .
15,313
def get_data ( self , file_path = sys . stdin , delimiter = ',' , categories_delimiter = None ) : if file_path == sys . stdin : logger . info ( 'Read data from standard input' ) lines = [ line . replace ( '\n' , '' ) for line in file_path ] else : logger . info ( 'Read data from file ' + file_path ) with open ( file_path ) as file : lines = list ( file ) columns = lines [ 0 ] . rstrip ( '\n' ) . split ( delimiter ) [ 1 : ] categories = None if categories_delimiter : columns , categories = zip ( * [ c . split ( categories_delimiter , 1 ) for c in columns ] ) size = len ( columns ) data = [ list ( map ( int , l . split ( delimiter ) [ 1 : ] ) ) for l in lines [ 1 : size + 1 ] ] return DesignStructureMatrix ( data , columns , categories )
Implement get_dsm method from Provider class .
15,314
def parse_tags ( self ) : tags = [ ] try : for tag in self . _tag_group_dict [ "tags" ] : tags . append ( Tag ( tag ) ) except : return tags return tags
Parses tags in tag group
15,315
def update ( self ) : if self . _is_ignored or "tags" not in self . _tag_group_dict : return for i in range ( len ( self . _tag_group_dict [ "tags" ] ) ) : tag_dict = self . _tag_group_dict [ "tags" ] [ i ] for tag in self . _tags : if tag . name == tag_dict [ "common.ALLTYPES_NAME" ] : self . _tag_group_dict [ "tags" ] [ i ] = tag . as_dict ( ) break for i in range ( len ( self . _sub_groups ) ) : sub_group = self . _sub_groups [ i ] sub_group . update ( ) self . _tag_group_dict [ "tag_groups" ] [ i ] = sub_group . as_dict ( )
Updates the dictionary of the tag group
15,316
def cmd ( send , msg , args ) : if not msg : send ( "Evaluate what?" ) return params = { 'format' : 'plaintext' , 'reinterpret' : 'true' , 'input' : msg , 'appid' : args [ 'config' ] [ 'api' ] [ 'wolframapikey' ] } req = get ( 'http://api.wolframalpha.com/v2/query' , params = params ) if req . status_code == 403 : send ( "WolframAlpha is having issues." ) return if not req . content : send ( "WolframAlpha returned an empty response." ) return xml = fromstring ( req . content ) output = xml . findall ( './pod' ) key = args [ 'config' ] [ 'api' ] [ 'bitlykey' ] url = get_short ( "http://www.wolframalpha.com/input/?i=%s" % quote ( msg ) , key ) text = "No output found." for x in output : if 'primary' in x . keys ( ) : text = x . find ( './subpod/plaintext' ) . text if text is None : send ( "No Output parsable" ) else : for t in text . splitlines ( ) [ : 3 ] : send ( t ) send ( "See %s for more info" % url )
Queries WolframAlpha .
15,317
def _get_soup ( page ) : request = requests . get ( page ) data = request . text return bs4 . BeautifulSoup ( data )
Return BeautifulSoup object for given page
15,318
def search ( term , category = Categories . ALL , pages = 1 , sort = None , order = None ) : s = Search ( ) s . search ( term = term , category = category , pages = pages , sort = sort , order = order ) return s
Return a search result for term in category . Can also be sorted and span multiple pages .
15,319
def popular ( category = None , sortOption = "title" ) : s = Search ( ) s . popular ( category , sortOption ) return s
Return a search result containing torrents appearing on the KAT home page . Can be categorized . Cannot be sorted or contain multiple pages
15,320
def recent ( category = None , pages = 1 , sort = None , order = None ) : s = Search ( ) s . recent ( category , pages , sort , order ) return s
Return most recently added torrents . Can be sorted and categorized and contain multiple pages .
15,321
def print_details ( self ) : print ( "Title:" , self . title ) print ( "Category:" , self . category ) print ( "Page: " , self . page ) print ( "Size: " , self . size ) print ( "Files: " , self . files ) print ( "Age: " , self . age ) print ( "Seeds:" , self . seeders ) print ( "Leechers: " , self . leechers ) print ( "Magnet: " , self . magnet ) print ( "Download: " , self . download ) print ( "Verified:" , self . isVerified )
Print torrent details
15,322
def search ( self , term = None , category = None , pages = 1 , url = search_url , sort = None , order = None ) : if not self . current_url : self . current_url = url if self . current_url == Search . base_url : results = self . _get_results ( self . current_url ) self . _add_results ( results ) else : search = self . _format_search ( term , category ) sorting = self . _format_sort ( sort , order ) for i in range ( pages ) : results = self . _get_results ( search + "/" + str ( self . _current_page ) + "/" + sorting ) self . _add_results ( results ) self . _current_page += 1 self . _current_page -= 1
Search a given URL for torrent results .
15,323
def _categorize ( self , category ) : self . torrents = [ result for result in self . torrents if result . category == category ]
Remove torrents with unwanted category from self . torrents
15,324
def page ( self , i ) : self . torrents = list ( ) self . _current_page = i self . search ( term = self . term , category = self . category , sort = self . sort , order = self . order )
Get page i of search results
15,325
def _get_results ( self , page ) : soup = _get_soup ( page ) details = soup . find_all ( "tr" , class_ = "odd" ) even = soup . find_all ( "tr" , class_ = "even" ) for i in range ( len ( even ) ) : details . insert ( ( i * 2 ) + 1 , even [ i ] ) return self . _parse_details ( details )
Find every div tag containing torrent details on given page then parse the results into a list of Torrents and return them
15,326
def _parse_details ( self , tag_list ) : result = list ( ) for i , item in enumerate ( tag_list ) : title = item . find ( "a" , class_ = "cellMainLink" ) title_text = title . text link = title . get ( "href" ) tds = item . find_all ( "td" , class_ = "center" ) size = tds [ 0 ] . text files = tds [ 1 ] . text age = tds [ 2 ] . text seed = tds [ 3 ] . text leech = tds [ 4 ] . text magnet = item . find ( "a" , class_ = "imagnet icon16" ) download = item . find ( "a" , class_ = "idownload icon16" ) isVerified = item . find ( "a" , class_ = "iverify icon16" ) != None if magnet : magnet = magnet . get ( "href" ) if download : download = download . get ( "href" ) if self . current_url == self . base_url : category = self . _get_torrent_category ( item , result = i ) else : category = self . _get_torrent_category ( item ) result . append ( Torrent ( title_text , category , link , size , seed , leech , magnet , download , files , age , isVerified ) ) return result
Given a list of tags from either a search page or the KAT home page parse the details and return a list of Torrents
15,327
def init_weights ( self ) : self . W = np . random . randn ( self . n_neurons , self . n_inputs ) * np . sqrt ( 2 / self . n_inputs ) self . b = np . zeros ( ( self . n_neurons , 1 ) )
Performs He initialization
15,328
def bootstrap_executive_office_states ( self , election ) : content_type = ContentType . objects . get_for_model ( election . race . office ) for division in Division . objects . filter ( level = self . STATE_LEVEL ) : PageContent . objects . get_or_create ( content_type = content_type , object_id = election . race . office . pk , election_day = election . election_day , division = division ) page_type , created = PageType . objects . get_or_create ( model_type = ContentType . objects . get ( app_label = 'government' , model = 'office' ) , election_day = election . election_day , division_level = self . NATIONAL_LEVEL , jurisdiction = self . FEDERAL_JURISDICTION , office = election . race . office , ) PageContent . objects . get_or_create ( content_type = ContentType . objects . get_for_model ( page_type ) , object_id = page_type . pk , election_day = election . election_day , ) page_type , created = PageType . objects . get_or_create ( model_type = ContentType . objects . get ( app_label = 'government' , model = 'office' ) , election_day = election . election_day , division_level = self . STATE_LEVEL , jurisdiction = self . FEDERAL_JURISDICTION , office = election . race . office , ) PageContent . objects . get_or_create ( content_type = ContentType . objects . get_for_model ( page_type ) , object_id = page_type . pk , election_day = election . election_day , )
Create state page content exclusively for the U . S . president .
15,329
def average_last_builds ( connection , package , limit = 5 ) : state = build_states . COMPLETE opts = { 'limit' : limit , 'order' : '-completion_time' } builds = yield connection . listBuilds ( package , state = state , queryOpts = opts ) if not builds : defer . returnValue ( None ) durations = [ build . duration for build in builds ] average = sum ( durations , timedelta ( ) ) / len ( durations ) defer . returnValue ( average )
Find the average duration time for the last couple of builds .
15,330
def model_resources ( self ) : response = jsonify ( { 'apiVersion' : '0.1' , 'swaggerVersion' : '1.1' , 'basePath' : '%s%s' % ( self . base_uri ( ) , self . api . url_prefix ) , 'apis' : self . get_model_resources ( ) } ) response . headers . add ( 'Cache-Control' , 'max-age=0' ) return response
Listing of all supported resources .
15,331
def model_resource ( self , resource_name ) : resource = first ( [ resource for resource in self . api . _registry . values ( ) if resource . get_api_name ( ) == resource_name ] ) data = { 'apiVersion' : '0.1' , 'swaggerVersion' : '1.1' , 'basePath' : '%s%s' % ( self . base_uri ( ) , self . api . url_prefix ) , 'resourcePath' : '/meta/%s' % resource . get_api_name ( ) , 'apis' : self . get_model_apis ( resource ) , 'models' : self . get_model ( resource ) } response = jsonify ( data ) response . headers . add ( 'Cache-Control' , 'max-age=0' ) return response
Details of a specific model resource .
15,332
def _high_dim_sim ( self , v , w , normalize = False , X = None , idx = 0 ) : sim = np . exp ( ( - np . linalg . norm ( v - w ) ** 2 ) / ( 2 * self . _sigma [ idx ] ** 2 ) ) if normalize : return sim / sum ( map ( lambda x : x [ 1 ] , self . _knn ( idx , X , high_dim = True ) ) ) else : return sim
Similarity measurement based on Gaussian Distribution
15,333
def init ( confdir = "/etc/cslbot" ) : multiprocessing . set_start_method ( 'spawn' ) parser = argparse . ArgumentParser ( ) parser . add_argument ( '-d' , '--debug' , help = 'Enable debug logging.' , action = 'store_true' ) parser . add_argument ( '--validate' , help = 'Initialize the db and perform other sanity checks.' , action = 'store_true' ) args = parser . parse_args ( ) loglevel = logging . DEBUG if args . debug else logging . INFO logging . basicConfig ( level = loglevel , format = "%(asctime)s %(levelname)s:%(module)s:%(message)s" ) logging . getLogger ( "requests" ) . setLevel ( logging . WARNING ) cslbot = IrcBot ( confdir ) if args . validate : cslbot . shutdown_mp ( ) print ( "Everything is ready to go!" ) return try : cslbot . start ( ) except KeyboardInterrupt : cslbot . disconnect ( 'Bot received a Ctrl-C' ) cslbot . shutdown_mp ( ) sys . exit ( 0 ) except Exception as ex : cslbot . shutdown_mp ( False ) logging . error ( "The bot died! %s" , ex ) output = "" . join ( traceback . format_exc ( ) ) . strip ( ) for line in output . split ( '\n' ) : logging . error ( line ) sys . exit ( 1 )
The bot s main entry point .
15,334
def get_version ( self ) : _ , version = misc . get_version ( self . confdir ) if version is None : return "Can't get the version." else : return "cslbot - %s" % version
Get the version .
15,335
def shutdown_mp ( self , clean = True ) : if hasattr ( self , 'server' ) : try : self . server . socket . shutdown ( socket . SHUT_RDWR ) except OSError : pass self . server . socket . close ( ) self . server . shutdown ( ) if hasattr ( self , 'handler' ) : self . handler . workers . stop_workers ( clean )
Shutdown all the multiprocessing .
15,336
def handle_msg ( self , c , e ) : try : self . handler . handle_msg ( c , e ) except Exception as ex : backtrace . handle_traceback ( ex , c , self . get_target ( e ) , self . config )
Handles all messages .
15,337
def reload_handler ( self , c , e ) : cmd = self . is_reload ( e ) cmdchar = self . config [ 'core' ] [ 'cmdchar' ] if cmd is not None : if self . reload_event . set ( ) : admins = [ self . config [ 'auth' ] [ 'owner' ] ] else : with self . handler . db . session_scope ( ) as session : admins = [ x . nick for x in session . query ( orm . Permissions ) . all ( ) ] if e . source . nick not in admins : c . privmsg ( self . get_target ( e ) , "Nope, not gonna do it." ) return importlib . reload ( reloader ) self . reload_event . set ( ) cmdargs = cmd [ len ( '%sreload' % cmdchar ) + 1 : ] try : if reloader . do_reload ( self , e , cmdargs ) : if self . config . getboolean ( 'feature' , 'server' ) : self . server = server . init_server ( self ) self . reload_event . clear ( ) logging . info ( "Successfully reloaded" ) except Exception as ex : backtrace . handle_traceback ( ex , c , self . get_target ( e ) , self . config )
This handles reloads .
15,338
def _options_to_dict ( df ) : kolums = [ "k1" , "k2" , "value" ] d = df [ kolums ] . values . tolist ( ) dc = { } for x in d : dc . setdefault ( x [ 0 ] , { } ) dc [ x [ 0 ] ] [ x [ 1 ] ] = x [ 2 ] return dc
Make a dictionary to print .
15,339
def _get_repo ( ) : command = [ 'git' , 'rev-parse' , '--show-toplevel' ] if six . PY2 : try : return check_output ( command ) . decode ( 'utf-8' ) . strip ( ) except CalledProcessError : return '' else : return ( run ( command , stdout = PIPE , stderr = PIPE ) . stdout . decode ( 'utf-8' ) . strip ( ) )
Identify the path to the repository origin .
15,340
def _entry_must_exist ( df , k1 , k2 ) : count = df [ ( df [ 'k1' ] == k1 ) & ( df [ 'k2' ] == k2 ) ] . shape [ 0 ] if count == 0 : raise NotRegisteredError ( "Option {0}.{1} not registered" . format ( k1 , k2 ) )
Evaluate key - subkey existence .
15,341
def _entry_must_not_exist ( df , k1 , k2 ) : count = df [ ( df [ 'k1' ] == k1 ) & ( df [ 'k2' ] == k2 ) ] . shape [ 0 ] if count > 0 : raise AlreadyRegisteredError ( "Option {0}.{1} already registered" . format ( k1 , k2 ) )
Evaluate key - subkey non - existence .
15,342
def register_option ( self , key , subkey , default , _type , definition , values = None , locked = False ) : if not self . open : return key , subkey = _lower_keys ( key , subkey ) _entry_must_not_exist ( self . gc , key , subkey ) ev . value_eval ( default , _type ) values = None if values is False else values new_opt = pd . Series ( [ key , subkey , default , _type , default , locked , definition , values ] , index = self . clmn ) self . gc = self . gc . append ( new_opt , ignore_index = True )
Create a new option .
15,343
def unregister_option ( self , key , subkey ) : if not self . open : return key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) self . gc = self . gc [ ~ ( ( self . gc [ 'k1' ] == key ) & ( self . gc [ 'k2' ] == subkey ) ) ]
Removes an option from the manager .
15,344
def get_option ( self , key , subkey , in_path_none = False ) : key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) df = self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ] if df [ "type" ] . values [ 0 ] == "bool" : return bool ( df [ "value" ] . values [ 0 ] ) elif df [ "type" ] . values [ 0 ] == "int" : return int ( df [ "value" ] . values [ 0 ] ) elif df [ "type" ] . values [ 0 ] == "path_in" : if df [ "value" ] . values [ 0 ] is None and not in_path_none : raise ValueError ( 'Unspecified path for {0}.{1}' . format ( key , subkey ) ) return df [ "value" ] . values [ 0 ] else : return df [ "value" ] . values [ 0 ]
Get the current value of the option .
15,345
def get_option_default ( self , key , subkey ) : key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) df = self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ] if df [ "type" ] . values [ 0 ] == "bool" : return bool ( df [ "default" ] . values [ 0 ] ) elif df [ "type" ] . values [ 0 ] == "int" : return int ( df [ "default" ] . values [ 0 ] ) else : return df [ "default" ] . values [ 0 ]
Get the default value of the option .
15,346
def get_option_description ( self , key , subkey ) : key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) return self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ] [ "description" ] . values [ 0 ]
Get the string describing a particular option .
15,347
def get_option_type ( self , key , subkey ) : key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) return self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ] [ "type" ] . values [ 0 ]
Get the type of a particular option .
15,348
def get_option_alternatives ( self , key , subkey ) : key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) return self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ] [ "values" ] . values [ 0 ]
Get list of available values for an option .
15,349
def set_option ( self , key , subkey , value ) : key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) df = self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ] if df [ "locked" ] . values [ 0 ] : raise ValueError ( "{0}.{1} option is locked" . format ( key , subkey ) ) ev . value_eval ( value , df [ "type" ] . values [ 0 ] ) if not self . check_option ( key , subkey , value ) : info = "{0}.{1} accepted options are: " . format ( key , subkey ) info += "[{}]" . format ( ", " . join ( df [ "values" ] . values [ 0 ] ) ) raise ValueError ( info ) self . gc . loc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) , "value" ] = value
Sets the value of an option .
15,350
def check_option ( self , key , subkey , value ) : key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) df = self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ] ev . value_eval ( value , df [ "type" ] . values [ 0 ] ) if df [ "values" ] . values [ 0 ] is not None : return value in df [ "values" ] . values [ 0 ] return True
Evaluate if a given value fits the option .
15,351
def reset_option ( self , key , subkey ) : if not self . open : return key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) df = self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ] if df [ "locked" ] . values [ 0 ] : raise ValueError ( "{0}.{1} option is locked" . format ( key , subkey ) ) val = df [ "default" ] . values [ 0 ] self . gc . loc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) , "value" ] = val
Resets a single option to the default values .
15,352
def lock_option ( self , key , subkey ) : key , subkey = _lower_keys ( key , subkey ) _entry_must_exist ( self . gc , key , subkey ) self . gc . loc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) , "locked" ] = True
Make an option unmutable .
15,353
def reset_options ( self , empty = True ) : if empty : self . gc = pd . DataFrame ( columns = self . clmn ) else : self . gc [ "value" ] = self . gc [ "default" ]
Empty ALL options .
15,354
def set_options_from_file ( self , filename , file_format = 'yaml' ) : if file_format . lower ( ) == 'yaml' : return self . set_options_from_YAML ( filename ) elif file_format . lower ( ) == 'json' : return self . set_options_from_JSON ( filename ) else : raise ValueError ( 'Unknown format {}' . format ( file_format ) )
Load options from file .
15,355
def set_options_from_dict ( self , data_dict , filename = None ) : if filename is not None : filename = os . path . dirname ( filename ) for k in data_dict : if not isinstance ( data_dict [ k ] , dict ) : raise ValueError ( "The input data has to be a dict of dict" ) for sk in data_dict [ k ] : if self . gc [ ( self . gc [ "k1" ] == k ) & ( self . gc [ "k2" ] == sk ) ] . shape [ 0 ] == 0 : continue if isinstance ( data_dict [ k ] [ sk ] , six . string_types ) : data_dict [ k ] [ sk ] = str ( data_dict [ k ] [ sk ] ) _type = self . gc [ ( self . gc [ "k1" ] == k ) & ( self . gc [ "k2" ] == sk ) ] [ [ "type" ] ] . values [ 0 ] data_dict [ k ] [ sk ] = ev . cast ( data_dict [ k ] [ sk ] , _type ) if self . get_option ( k , sk , True ) != data_dict [ k ] [ sk ] : try : self . set_option ( k , sk , data_dict [ k ] [ sk ] ) except IOError : if filename is None : raise IOError ( 'Error path: {0}.{1}' . format ( k , sk ) ) npat = os . path . join ( filename , data_dict [ k ] [ sk ] ) self . set_option ( k , sk , os . path . normpath ( npat ) ) except ValueError : pass
Load options from a dictionary .
15,356
def write_options_to_file ( self , filename , file_format = 'yaml' ) : if file_format . lower ( ) == 'yaml' : self . write_options_to_YAML ( filename ) elif file_format . lower ( ) == 'json' : self . write_options_to_JSON ( filename ) else : raise ValueError ( 'Unknown format {}' . format ( file_format ) )
Write options to file .
15,357
def write_options_to_YAML ( self , filename ) : fd = open ( filename , "w" ) yaml . dump ( _options_to_dict ( self . gc ) , fd , default_flow_style = False ) fd . close ( )
Writes the options in YAML format to a file .
15,358
def write_options_to_JSON ( self , filename ) : fd = open ( filename , "w" ) fd . write ( json . dumps ( _options_to_dict ( self . gc ) , indent = 2 , separators = ( ',' , ': ' ) ) ) fd . close ( )
Writes the options in JSON format to a file .
15,359
def document_options ( self ) : k1 = max ( [ len ( _ ) for _ in self . gc [ 'k1' ] ] ) + 4 k1 = max ( [ k1 , len ( 'Option Class' ) ] ) k2 = max ( [ len ( _ ) for _ in self . gc [ 'k2' ] ] ) + 4 k2 = max ( [ k2 , len ( 'Option ID' ) ] ) separators = " " . join ( [ "" . join ( [ "=" , ] * k1 ) , "" . join ( [ "=" , ] * k2 ) , "" . join ( [ "=" , ] * 11 ) ] ) line = ( "{0:>" + str ( k1 ) + "} {1:>" + str ( k2 ) + "} {2}" ) data = [ ] data . append ( separators ) data . append ( line . format ( 'Option Class' , 'Option ID' , 'Description' ) ) data . append ( separators ) for _ , row in self . gc . iterrows ( ) : data . append ( line . format ( "**" + row [ 'k1' ] + "**" , "**" + row [ 'k2' ] + "**" , row [ 'description' ] ) ) data . append ( separators ) return "\n" . join ( data )
Generates a docstring table to add to the library documentation .
15,360
def get_local_config_file ( cls , filename ) : if os . path . isfile ( filename ) : return filename else : try : config_repo = _get_repo ( ) if len ( config_repo ) == 0 : raise Exception ( ) config_repo = os . path . join ( config_repo , filename ) if os . path . isfile ( config_repo ) : return config_repo else : raise Exception ( ) except Exception : home = os . getenv ( "HOME" , os . path . expanduser ( "~" ) ) config_home = os . path . join ( home , filename ) if os . path . isfile ( config_home ) : return config_home return None
Find local file to setup default values .
15,361
def cmd ( send , msg , args ) : parser = arguments . ArgParser ( args [ 'config' ] ) parser . add_argument ( 'section' , nargs = '?' ) parser . add_argument ( 'command' ) try : cmdargs = parser . parse_args ( msg ) except arguments . ArgumentException as e : send ( str ( e ) ) return if cmdargs . section : html = get ( 'http://linux.die.net/man/%s/%s' % ( cmdargs . section , cmdargs . command ) ) short = fromstring ( html . text ) . find ( './/meta[@name="description"]' ) if short is not None : short = short . get ( 'content' ) send ( "%s -- http://linux.die.net/man/%s/%s" % ( short , cmdargs . section , cmdargs . command ) ) else : send ( "No manual entry for %s in section %s" % ( cmdargs . command , cmdargs . section ) ) else : for section in range ( 0 , 8 ) : html = get ( 'http://linux.die.net/man/%d/%s' % ( section , cmdargs . command ) ) if html . status_code == 200 : short = fromstring ( html . text ) . find ( './/meta[@name="description"]' ) if short is not None : short = short . get ( 'content' ) send ( "%s -- http://linux.die.net/man/%d/%s" % ( short , section , cmdargs . command ) ) return send ( "No manual entry for %s" % cmdargs . command )
Gets a man page .
15,362
def cmd ( send , msg , args ) : key = args [ 'config' ] [ 'api' ] [ 'bitlykey' ] parser = arguments . ArgParser ( args [ 'config' ] ) parser . add_argument ( '--blacklist' ) parser . add_argument ( '--unblacklist' ) try : cmdargs , msg = parser . parse_known_args ( msg ) msg = ' ' . join ( msg ) except arguments . ArgumentException as e : send ( str ( e ) ) return if cmdargs . blacklist : if args [ 'is_admin' ] ( args [ 'nick' ] ) : send ( blacklist_word ( args [ 'db' ] , cmdargs . blacklist ) ) else : send ( "Blacklisting is admin-only" ) elif cmdargs . unblacklist : if args [ 'is_admin' ] ( args [ 'nick' ] ) : send ( unblacklist_word ( args [ 'db' ] , cmdargs . unblacklist ) ) else : send ( "Unblacklisting is admin-only" ) else : defn , url = get_urban ( msg , args [ 'db' ] , key ) send ( defn ) if url : send ( "See full definition at %s" % url )
Gets a definition from urban dictionary .
15,363
def people ( self ) : people_response = self . get_request ( 'people/' ) return [ Person ( self , pjson [ 'user' ] ) for pjson in people_response ]
Generates a list of all People .
15,364
def tasks ( self ) : tasks_response = self . get_request ( 'tasks/' ) return [ Task ( self , tjson [ 'task' ] ) for tjson in tasks_response ]
Generates a list of all Tasks .
15,365
def clients ( self ) : clients_response = self . get_request ( 'clients/' ) return [ Client ( self , cjson [ 'client' ] ) for cjson in clients_response ]
Generates a list of all Clients .
15,366
def get_client ( self , client_id ) : client_response = self . get_request ( 'clients/%s' % client_id ) return Client ( self , client_response [ 'client' ] )
Gets a single client by id .
15,367
def get_project ( self , project_id ) : project_response = self . get_request ( 'projects/%s' % project_id ) return Project ( self , project_response [ 'project' ] )
Gets a single project by id .
15,368
def create_person ( self , first_name , last_name , email , department = None , default_rate = None , admin = False , contractor = False ) : person = { 'user' : { 'first_name' : first_name , 'last_name' : last_name , 'email' : email , 'department' : department , 'default_hourly_rate' : default_rate , 'is_admin' : admin , 'is_contractor' : contractor , } } response = self . post_request ( 'people/' , person , follow = True ) if response : return Person ( self , response [ 'user' ] )
Creates a Person with the given information .
15,369
def create_project ( self , name , client_id , budget = None , budget_by = 'none' , notes = None , billable = True ) : project = { 'project' : { 'name' : name , 'client_id' : client_id , 'budget_by' : budget_by , 'budget' : budget , 'notes' : notes , 'billable' : billable , } } response = self . post_request ( 'projects/' , project , follow = True ) if response : return Project ( self , response [ 'project' ] )
Creates a Project with the given information .
15,370
def create_client ( self , name ) : client = { 'client' : { 'name' : name , } } response = self . post_request ( 'clients/' , client , follow = True ) if response : return Client ( self , response [ 'client' ] )
Creates a Client with the given information .
15,371
def delete ( self ) : response = self . hv . delete_request ( 'people/' + str ( self . id ) ) return response
Deletes the person immediately .
15,372
def task_assignments ( self ) : url = str . format ( 'projects/{}/task_assignments' , self . id ) response = self . hv . get_request ( url ) return [ TaskAssignment ( self . hv , tj [ 'task_assignment' ] ) for tj in response ]
Retrieves all tasks currently assigned to this project .
15,373
def partial ( cls , prefix , source ) : match = prefix + "." matches = cls ( [ ( key [ len ( match ) : ] , source [ key ] ) for key in source if key . startswith ( match ) ] ) if not matches : raise ValueError ( ) return matches
Strip a prefix from the keys of another dictionary returning a Bunch containing only valid key value pairs .
15,374
def cmd ( send , msg , args ) : parser = arguments . ArgParser ( args [ 'config' ] ) parser . add_argument ( '--nick' , action = arguments . NickParser ) parser . add_argument ( '--ignore-case' , '-i' , action = 'store_true' ) parser . add_argument ( 'string' , nargs = '*' ) try : cmdargs = parser . parse_args ( msg ) except arguments . ArgumentException as e : send ( str ( e ) ) return if not cmdargs . string : send ( 'Please specify a search term.' ) return cmdchar = args [ 'config' ] [ 'core' ] [ 'cmdchar' ] term = ' ' . join ( cmdargs . string ) if cmdargs . nick : query = args [ 'db' ] . query ( Log ) . filter ( Log . type == 'pubmsg' , Log . source == cmdargs . nick , ~ Log . msg . startswith ( cmdchar ) ) else : query = args [ 'db' ] . query ( Log ) . filter ( Log . type == 'pubmsg' , ~ Log . msg . startswith ( cmdchar ) ) if cmdargs . ignore_case : query = query . filter ( Log . msg . ilike ( '%%%s%%' % escape ( term ) ) ) else : query = query . filter ( Log . msg . like ( '%%%s%%' % escape ( term ) ) ) query = query . order_by ( Log . time . desc ( ) ) result = query . limit ( 1 ) . first ( ) count = query . count ( ) if result is not None : logtime = result . time . strftime ( '%Y-%m-%d %H:%M:%S' ) send ( "%s was last said by %s at %s (%d occurrences)" % ( result . msg , result . source , logtime , count ) ) elif cmdargs . nick : send ( '%s has never said %s.' % ( cmdargs . nick , term ) ) else : send ( '%s has never been said.' % term )
Greps the log for a string .
15,375
def pick_action_todo ( ) : for ndx , todo in enumerate ( things_to_do ) : if roll_dice ( todo [ "chance" ] ) : cur_act = actions [ get_action_by_name ( todo [ "name" ] ) ] if todo [ "WHERE_COL" ] == "energy" and my_char [ "energy" ] > todo [ "WHERE_VAL" ] : return cur_act if todo [ "WHERE_COL" ] == "gold" and my_char [ "gold" ] > todo [ "WHERE_VAL" ] : return cur_act return actions [ 3 ]
only for testing and AI - user will usually choose an action Sort of works
15,376
def do_action ( character , action ) : stats = "Energy=" + str ( round ( character [ "energy" ] , 0 ) ) + ", " stats += "Gold=" + str ( round ( character [ "gold" ] , 0 ) ) + ", " ndx_action_skill = get_skill_by_name ( action [ "name" ] , character ) stats += "Skill=" + str ( round ( character [ "skills" ] [ ndx_action_skill ] [ "level" ] , 1 ) ) my_char [ "energy" ] -= action [ "cost_energy" ] my_char [ "skills" ] [ ndx_action_skill ] [ "level" ] += action [ "exp_gain" ] reward_item = action [ "reward_item" ] inv = get_inventory_by_name ( reward_item , my_char ) if roll_dice ( action [ "reward_chance" ] ) : my_char [ "inventory" ] [ inv ] [ "val" ] += 1 print ( character [ "name" ] + " is " + action [ "name" ] + ". " + stats + ' FOUND ' + reward_item ) else : print ( character [ "name" ] + " is " + action [ "name" ] + ". " + stats )
called by main game loop to run an action
15,377
def get_inventory_by_name ( nme , character ) : for ndx , sk in enumerate ( character [ "inventory" ] ) : if sk [ "name" ] == nme : return ndx return 0
returns the inventory index by name
15,378
def get_skill_by_name ( nme , character ) : for ndx , sk in enumerate ( character [ "skills" ] ) : if sk [ "name" ] == nme : return ndx return 0
returns the skill by name in a character
15,379
def attribute_checker ( operator , attribute , value = '' ) : return { '=' : lambda el : el . get ( attribute ) == value , '~' : lambda el : value in el . get ( attribute , '' ) . split ( ) , '^' : lambda el : el . get ( attribute , '' ) . startswith ( value ) , '$' : lambda el : el . get ( attribute , '' ) . endswith ( value ) , '*' : lambda el : value in el . get ( attribute , '' ) , '|' : lambda el : el . get ( attribute , '' ) == value or el . get ( attribute , '' ) . startswith ( '%s-' % value ) , } . get ( operator , lambda el : el . has_key ( attribute ) )
Takes an operator attribute and optional value ; returns a function that will return True for elements that match that combination .
15,380
def select ( soup , selector ) : tokens = selector . split ( ) current_context = [ soup ] for token in tokens : m = attribselect_re . match ( token ) if m : tag , attribute , operator , value = m . groups ( ) if not tag : tag = True checker = attribute_checker ( operator , attribute , value ) found = [ ] for context in current_context : found . extend ( [ el for el in context . findAll ( tag ) if checker ( el ) ] ) current_context = found continue if '#' in token : tag , id = token . split ( '#' , 1 ) if not tag : tag = True el = current_context [ 0 ] . find ( tag , { 'id' : id } ) if not el : return [ ] current_context = [ el ] continue if '.' in token : tag , klass = token . split ( '.' , 1 ) if not tag : tag = True found = [ ] for context in current_context : found . extend ( context . findAll ( tag , { 'class' : lambda attr : attr and klass in attr . split ( ) } ) ) current_context = found continue if token == '*' : found = [ ] for context in current_context : found . extend ( context . findAll ( True ) ) current_context = found continue if not tag_re . match ( token ) : return [ ] found = [ ] for context in current_context : found . extend ( context . findAll ( token ) ) current_context = found return current_context
soup should be a BeautifulSoup instance ; selector is a CSS selector specifying the elements you want to retrieve .
15,381
def gradient ( self , P , Q , Y , i ) : return 4 * sum ( [ ( P [ i , j ] - Q [ i , j ] ) * ( Y [ i ] - Y [ j ] ) * ( 1 + np . linalg . norm ( Y [ i ] - Y [ j ] ) ** 2 ) ** - 1 for j in range ( Y . shape [ 0 ] ) ] )
Computes the gradient of KL divergence with respect to the i th example of Y
15,382
def try_ ( block , except_ = None , else_ = None , finally_ = None ) : ensure_callable ( block ) if not ( except_ or else_ or finally_ ) : raise TypeError ( "at least one of `except_`, `else_` or `finally_` " "functions must be provided" ) if else_ and not except_ : raise TypeError ( "`else_` can only be provided along with `except_`" ) if except_ : if callable ( except_ ) : except_ = [ ( Exception , except_ ) ] else : ensure_iterable ( except_ ) if is_mapping ( except_ ) : ensure_ordered_mapping ( except_ ) except_ = except_ . items ( ) def handle_exception ( ) : exc_type , exc_object = sys . exc_info ( ) [ : 2 ] for t , handler in except_ : if issubclass ( exc_type , t ) : return handler ( exc_object ) raise if else_ : ensure_callable ( else_ ) if finally_ : ensure_callable ( finally_ ) try : block ( ) except : return handle_exception ( ) else : return else_ ( ) finally : finally_ ( ) else : try : block ( ) except : return handle_exception ( ) else : return else_ ( ) else : if finally_ : ensure_callable ( finally_ ) try : return block ( ) except : return handle_exception ( ) finally : finally_ ( ) else : try : return block ( ) except : return handle_exception ( ) elif finally_ : ensure_callable ( finally_ ) try : return block ( ) finally : finally_ ( )
Emulate a try block .
15,383
def with_ ( contextmanager , do ) : ensure_contextmanager ( contextmanager ) ensure_callable ( do ) with contextmanager as value : return do ( value )
Emulate a with statement performing an operation within context .
15,384
def _cast_repr ( self , caster , * args , ** kwargs ) : if self . __repr_content is None : self . __repr_content = hash_and_truncate ( self ) assert self . __uses_default_repr return caster ( self . __repr_content , * args , ** kwargs )
Will cast this constant with the provided caster passing args and kwargs .
15,385
def cmd ( send , * _ ) : thread_names = [ ] for x in sorted ( threading . enumerate ( ) , key = lambda k : k . name ) : res = re . match ( r'Thread-(\d+$)' , x . name ) if res : tid = int ( res . group ( 1 ) ) if x . _target . __name__ == '_worker' : thread_names . append ( ( tid , "%s running server thread" % x . name ) ) elif x . _target . __module__ == 'multiprocessing.pool' : thread_names . append ( ( tid , "%s running multiprocessing pool worker thread" % x . name ) ) else : res = re . match ( r'Thread-(\d+)' , x . name ) tid = 0 if res : tid = int ( res . group ( 1 ) ) thread_names . append ( ( tid , x . name ) ) for x in sorted ( thread_names , key = lambda k : k [ 0 ] ) : send ( x [ 1 ] )
Enumerate threads .
15,386
def pipe ( ) : try : from os import pipe return pipe ( ) except : pipe = Pipe ( ) return pipe . reader_fd , pipe . writer_fd
Return the optimum pipe implementation for the capabilities of the active system .
15,387
def read ( self ) : try : return self . reader . recv ( 1 ) except socket . error : ex = exception ( ) . exception if ex . args [ 0 ] == errno . EWOULDBLOCK : raise IOError raise
Emulate a file descriptors read method
15,388
def replace_all ( text , dic ) : for i , j in dic . iteritems ( ) : text = text . replace ( i , j ) return text
Takes a string and dictionary . replaces all occurrences of i with j
15,389
def replace_u_start_month ( month ) : month = month . lstrip ( '-' ) if month == 'uu' or month == '0u' : return '01' if month == 'u0' : return '10' return month . replace ( 'u' , '0' )
Find the earliest legitimate month .
15,390
def replace_u_end_month ( month ) : month = month . lstrip ( '-' ) if month == 'uu' or month == '1u' : return '12' if month == 'u0' : return '10' if month == '0u' : return '09' if month [ 1 ] in [ '1' , '2' ] : return month . replace ( 'u' , '1' ) return month . replace ( 'u' , '0' )
Find the latest legitimate month .
15,391
def replace_u_start_day ( day ) : day = day . lstrip ( '-' ) if day == 'uu' or day == '0u' : return '01' if day == 'u0' : return '10' return day . replace ( 'u' , '0' )
Find the earliest legitimate day .
15,392
def replace_u_end_day ( day , year , month ) : day = day . lstrip ( '-' ) year = int ( year ) month = int ( month . lstrip ( '-' ) ) if day == 'uu' or day == '3u' : return str ( calendar . monthrange ( year , month ) [ 1 ] ) if day == '0u' or day == '1u' : return day . replace ( 'u' , '9' ) if day == '2u' or day == 'u9' : if month != '02' or calendar . isleap ( year ) : return '29' elif day == '2u' : return '28' else : return '19' if 1 < int ( day [ 1 ] ) < 9 : return day . replace ( 'u' , '2' ) if day == 'u1' : if calendar . monthrange ( year , month ) [ 1 ] == 31 : return '31' else : return '21' if day == 'u0' : if calendar . monthrange ( year , month ) [ 1 ] >= 30 : return '30' else : return '20'
Find the latest legitimate day .
15,393
def replace_u ( matchobj ) : pieces = list ( matchobj . groups ( '' ) ) if 'u' in pieces [ 1 ] : pieces [ 1 ] = pieces [ 1 ] . replace ( 'u' , '0' ) if 'u' in pieces [ 5 ] : pieces [ 5 ] = pieces [ 5 ] . replace ( 'u' , '9' ) if 'u' in pieces [ 2 ] : pieces [ 2 ] = '-' + replace_u_start_month ( pieces [ 2 ] ) if 'u' in pieces [ 6 ] : pieces [ 6 ] = '-' + replace_u_end_month ( pieces [ 6 ] ) if 'u' in pieces [ 3 ] : pieces [ 3 ] = '-' + replace_u_start_day ( pieces [ 3 ] ) if 'u' in pieces [ 7 ] : pieces [ 7 ] = '-' + replace_u_end_day ( pieces [ 7 ] , year = pieces [ 5 ] , month = pieces [ 6 ] ) return '' . join ( ( '' . join ( pieces [ : 4 ] ) , '/' , '' . join ( pieces [ 4 : ] ) ) )
Break the interval into parts and replace u s .
15,394
def zero_year_special_case ( from_date , to_date , start , end ) : if start == 'pos' and end == 'pos' : if from_date . startswith ( '0000' ) and not to_date . startswith ( '0000' ) : return True if not from_date . startswith ( '0000' ) and to_date . startswith ( '0000' ) : return False if from_date . startswith ( '0000' ) and to_date . startswith ( '0000' ) : if len ( from_date ) == 4 : fm , fd = 1 , 1 elif len ( from_date ) == 7 : fm , fd = int ( from_date [ 5 : 7 ] ) , 1 elif len ( from_date ) == 10 : fm , fd = int ( from_date [ 5 : 7 ] ) , int ( from_date [ 8 : 10 ] ) if len ( to_date ) == 4 : tm , td = 1 , 1 elif len ( to_date ) == 7 : tm , td = int ( to_date [ 5 : 7 ] ) , 1 elif len ( to_date ) == 10 : tm , td = int ( to_date [ 5 : 7 ] ) , int ( to_date [ 8 : 10 ] ) if from_date == to_date : return True if fm <= tm : if fd <= td : return True else : return False else : return False elif start == 'neg' and end == 'neg' : return False elif start == 'neg' and end == 'pos' : if from_date . startswith ( "0000" ) : return False else : return True
strptime does not resolve a 0000 year we must handle this .
15,395
def is_valid_interval ( edtf_candidate ) : from_date = None to_date = None end , start = 'pos' , 'pos' if edtf_candidate . count ( '/' ) == 1 : edtf_candidate = replace_all ( edtf_candidate , interval_replacements ) edtf_candidate = re . sub ( U_PATTERN , replace_u , edtf_candidate ) parts = edtf_candidate . split ( '/' ) if parts [ 0 ] . startswith ( "-" ) : start = 'neg' parts [ 0 ] = parts [ 0 ] [ 1 : ] if parts [ 1 ] . startswith ( "-" ) : end = 'neg' parts [ 1 ] = parts [ 1 ] [ 1 : ] if start == 'pos' and end == 'neg' : return False if parts [ 0 ] . startswith ( "0000" ) or parts [ 1 ] . startswith ( "0000" ) : return zero_year_special_case ( parts [ 0 ] , parts [ 1 ] , start , end ) if parts [ 0 ] . count ( "-" ) == 2 : from_date = datetime . datetime . strptime ( parts [ 0 ] , "%Y-%m-%d" ) if parts [ 1 ] . count ( "-" ) == 2 : to_date = datetime . datetime . strptime ( parts [ 1 ] , "%Y-%m-%d" ) if parts [ 0 ] . count ( "-" ) == 1 : from_date = datetime . datetime . strptime ( parts [ 0 ] , "%Y-%m" ) if parts [ 1 ] . count ( "-" ) == 1 : to_date = datetime . datetime . strptime ( parts [ 1 ] , "%Y-%m" ) if parts [ 0 ] . count ( "-" ) == 0 : if parts [ 0 ] == 'unknown' : from_date = datetime . datetime . strptime ( "0001" , "%Y" ) else : from_date = datetime . datetime . strptime ( parts [ 0 ] , "%Y" ) if parts [ 1 ] . count ( "-" ) == 0 : if parts [ 1 ] == 'open' or parts [ 1 ] == 'unknown' : to_date = 'open' else : to_date = datetime . datetime . strptime ( parts [ 1 ] , "%Y" ) if start == 'neg' and end == 'pos' : return True elif start == 'neg' and end == 'neg' : if from_date >= to_date and from_date and to_date : return True elif ( parts [ 1 ] == 'unknown' or parts [ 1 ] == 'open' or parts [ 0 ] == 'unknown' ) : return True elif start == 'pos' and end == 'pos' : if from_date <= to_date and from_date and to_date : return True else : return False else : return False
Test to see if the edtf candidate is a valid interval
15,396
def isLevel2 ( edtf_candidate ) : if "[" in edtf_candidate or "{" in edtf_candidate : result = edtf_candidate == level2Expression elif " " in edtf_candidate : result = False else : result = edtf_candidate == level2Expression return result
Checks to see if the date is level 2 valid
15,397
def is_valid ( edtf_candidate ) : if ( isLevel0 ( edtf_candidate ) or isLevel1 ( edtf_candidate ) or isLevel2 ( edtf_candidate ) ) : if '/' in edtf_candidate : return is_valid_interval ( edtf_candidate ) else : return True else : return False
isValid takes a candidate date and returns if it is valid or not
15,398
def is_direct_subclass ( class_ , of ) : ensure_class ( class_ ) ensure_class ( of ) return of in class_ . __bases__
Check whether given class is a direct subclass of the other .
15,399
def ensure_direct_subclass ( class_ , of ) : if not is_direct_subclass ( class_ , of ) : raise TypeError ( "expected a direct subclass of %r, got %s instead" % ( of , class_ . __name__ ) ) return class_
Check whether given class is a direct subclass of another .