idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
14,800
def get_queryset ( self ) : try : date = ElectionDay . objects . get ( date = self . kwargs [ 'date' ] ) except Exception : raise APIException ( 'No elections on {}.' . format ( self . kwargs [ 'date' ] ) ) office_ids = [ ] for election in date . elections . all ( ) : office = election . race . office if not office . body : office_ids . append ( office . uid ) return Office . objects . filter ( uid__in = office_ids )
Returns a queryset of all executive offices holding an election on a date .
14,801
def cmd ( send , msg , args ) : implements = [ 'the golden gate bridge' , 'a large trout' , 'a clue-by-four' , 'a fresh haddock' , 'moon' , 'an Itanium' , 'fwilson' , 'a wombat' ] methods = [ 'around a bit' , 'upside the head' ] if not msg : channel = args [ 'target' ] if args [ 'target' ] != 'private' else args [ 'config' ] [ 'core' ] [ 'channel' ] with args [ 'handler' ] . data_lock : users = list ( args [ 'handler' ] . channels [ channel ] . users ( ) ) slap = 'slaps %s %s with %s' send ( slap % ( choice ( users ) , choice ( methods ) , choice ( implements ) ) , 'action' ) else : reason = '' method = choice ( methods ) implement = '' msg = msg . split ( ) slapee = msg [ 0 ] i = 1 args = False while i < len ( msg ) : if msg [ i ] == 'for' : args = True if reason : send ( "Invalid Syntax: You can only have one for clause!" ) return i += 1 while i < len ( msg ) : if msg [ i ] == 'with' : break reason += " " reason += msg [ i ] i += 1 reason = reason . strip ( ) elif msg [ i ] == 'with' : args = True if implement : send ( "Invalid Synatx: You can only have one with clause!" ) return i += 1 while i < len ( msg ) : if msg [ i ] == 'for' : break implement += msg [ i ] implement += ' ' i += 1 implement = implement . strip ( ) elif not args : slapee += ' ' + msg [ i ] i += 1 if not implement : implement = choice ( implements ) if reason : slap = 'slaps %s %s with %s for %s' % ( slapee , method , implement , reason ) else : slap = 'slaps %s %s with %s' % ( slapee , method , implement ) send ( slap , 'action' )
Slap somebody .
14,802
def cmd ( send , msg , args ) : if not msg : with args [ 'handler' ] . data_lock : users = list ( args [ 'handler' ] . channels [ args [ 'target' ] ] . users ( ) ) if args [ 'target' ] != 'private' else [ args [ 'nick' ] ] msg = choice ( users ) chain = get_chain ( args [ 'db' ] , msg ) if chain : send ( " -> " . join ( chain ) ) else : send ( "%s has never changed their nick." % msg )
Gets previous nicks .
14,803
def clear_database ( engine : Connectable , schemas : Iterable [ str ] = ( ) ) -> None : assert check_argument_types ( ) metadatas = [ ] all_schemas = ( None , ) all_schemas += tuple ( schemas ) for schema in all_schemas : metadata = MetaData ( ) metadata . reflect ( engine , schema = schema , views = True ) metadatas . append ( metadata ) for metadata in metadatas : metadata . drop_all ( engine , checkfirst = False )
Clear any tables from an existing database .
14,804
def cmd ( send , msg , args ) : if not hasattr ( args [ 'handler' ] , msg ) : send ( "That attribute was not found in the handler." ) return send ( str ( getattr ( args [ 'handler' ] , msg ) ) )
Inspects a bot attribute .
14,805
def cmd ( send , msg , args ) : if not msg : send ( "Invalid Syntax." ) return char = msg [ 0 ] msg = [ x . replace ( r'\/' , '/' ) for x in re . split ( r'(?<!\\)\%s' % char , msg [ 1 : ] , maxsplit = 2 ) ] if len ( msg ) == 2 and args [ 'config' ] [ 'feature' ] . getboolean ( 'lazyregex' ) : msg . append ( '' ) if not msg or len ( msg ) < 3 : send ( "Invalid Syntax." ) return if args [ 'type' ] == 'privmsg' : send ( "Don't worry, %s is not a grammar Nazi." % args [ 'botnick' ] ) return string = msg [ 0 ] replacement = msg [ 1 ] modifiers = get_modifiers ( msg [ 2 ] , args [ 'nick' ] , args [ 'config' ] [ 'core' ] [ 'nickregex' ] ) if modifiers is None : send ( "Invalid modifiers." ) return try : regex = re . compile ( string , re . IGNORECASE ) if modifiers [ 'ignorecase' ] else re . compile ( string ) log = get_log ( args [ 'db' ] , args [ 'target' ] , modifiers [ 'nick' ] ) workers = args [ 'handler' ] . workers result = workers . run_pool ( do_replace , [ log , args [ 'config' ] [ 'core' ] , char , regex , replacement ] ) try : msg = result . get ( 5 ) except multiprocessing . TimeoutError : workers . restart_pool ( ) send ( "Sed regex timed out." ) return if msg : send ( msg ) else : send ( "No match found." ) except sre_constants . error as ex : raise CommandFailedException ( ex )
Corrects a previous message .
14,806
def resize ( self , size ) : count = max ( int ( size ) , 0 ) - len ( self ) if count == 0 : pass elif - count == len ( self ) : self . _value = bytes ( ) elif count > 0 : self . _value += b'\x00' * count else : self . _value = self . _value [ : count ] size = len ( self ) self . _bit_size = size * 8 self . _align_to_byte_size = size
Re - sizes the Stream field by appending zero bytes or removing bytes from the end .
14,807
def value ( self ) : length = self . _value . find ( b'\x00' ) if length >= 0 : return self . _value [ : length ] . decode ( 'ascii' ) else : return self . _value . decode ( 'ascii' )
Field value as an ascii encoded string .
14,808
def _set_alignment ( self , group_size , bit_offset = 0 , auto_align = False ) : field_offset = int ( bit_offset ) if auto_align : field_size , bit_offset = divmod ( field_offset , 8 ) if bit_offset is not 0 : field_size += 1 field_size = max ( field_size , 1 ) else : field_size = int ( group_size ) alignment = Alignment ( field_size , field_offset ) if field_size not in range ( 1 , 8 ) : raise FieldAlignmentError ( self , self . index , alignment ) if not ( 0 <= field_offset <= 63 ) : raise FieldAlignmentError ( self , self . index , alignment ) if field_offset >= field_size * 8 : raise FieldAlignmentError ( self , self . index , alignment ) self . _align_to_byte_size = alignment . byte_size self . _align_to_bit_offset = alignment . bit_offset
Sets the alignment of the Decimal field .
14,809
def value ( self ) : if self . _enum and issubclass ( self . _enum , Enumeration ) : name = self . _enum . get_name ( self . _value ) if name : return name return self . _value
Field value as an enum name string . Fall back is an unsigned integer number .
14,810
def to_dict ( self ) : d = dict ( doses = self . doses , ns = self . ns , incidences = self . incidences ) d . update ( self . kwargs ) return d
Returns a dictionary representation of the dataset .
14,811
def _calculate_plotting ( n , incidence ) : p = incidence / float ( n ) z = stats . norm . ppf ( 0.975 ) q = 1. - p ll = ( ( 2 * n * p + 2 * z - 1 ) - z * np . sqrt ( 2 * z - ( 2 + 1 / n ) + 4 * p * ( n * q + 1 ) ) ) / ( 2 * ( n + 2 * z ) ) ul = ( ( 2 * n * p + 2 * z + 1 ) + z * np . sqrt ( 2 * z + ( 2 + 1 / n ) + 4 * p * ( n * q - 1 ) ) ) / ( 2 * ( n + 2 * z ) ) return p , ll , ul
Add confidence intervals to dichotomous datasets . From bmds231_manual . pdf pg 124 - 5 .
14,812
def cd ( path , on = os ) : original = on . getcwd ( ) on . chdir ( path ) yield on . chdir ( original )
Change the current working directory within this context . Preserves the previous working directory and can be applied to remote connections that offer
14,813
def url_builder ( self , endpoint , * , root = None , params = None , url_params = None ) : if root is None : root = self . ROOT return '' . join ( [ root , endpoint , '?' + urlencode ( url_params ) if url_params else '' , ] ) . format ( ** params or { } )
Create a URL for the specified endpoint .
14,814
def from_env ( cls ) : token = getenv ( cls . TOKEN_ENV_VAR ) if token is None : msg = 'missing environment variable: {!r}' . format ( cls . TOKEN_ENV_VAR ) raise ValueError ( msg ) return cls ( api_token = token )
Create a service instance from an environment variable .
14,815
def url_builder ( self , endpoint , params = None , url_params = None ) : if url_params is None : url_params = OrderedDict ( ) url_params [ self . AUTH_PARAM ] = self . api_token return super ( ) . url_builder ( endpoint , params = params , url_params = url_params , )
Add authentication URL parameter .
14,816
def cmd ( send , msg , args ) : if msg and not check_exists ( msg ) : send ( "Non-existant subreddit." ) return subreddit = msg if msg else None send ( random_post ( subreddit , args [ 'config' ] [ 'api' ] [ 'bitlykey' ] ) )
Gets a random Reddit post .
14,817
def cmd ( send , msg , args ) : if not msg : send ( "Choose what?" ) return choices = msg . split ( ' or ' ) action = [ 'draws a slip of paper from a hat and gets...' , 'says eenie, menie, miney, moe and chooses...' , 'picks a random number and gets...' , 'rolls dice and gets...' , 'asks a random person and gets...' , 'plays rock, paper, scissors, lizard, spock and gets...' ] send ( "%s %s" % ( choice ( action ) , choice ( choices ) ) , 'action' )
Chooses between multiple choices .
14,818
def cmd ( send , msg , args ) : if not msg or len ( msg . split ( ) ) < 2 : send ( "Pester needs at least two arguments." ) return match = re . match ( '(%s+) (.*)' % args [ 'config' ] [ 'core' ] [ 'nickregex' ] , msg ) if match : message = match . group ( 2 ) + " " send ( '%s: %s' % ( match . group ( 1 ) , message * 3 ) ) else : send ( "Invalid Syntax." )
Pesters somebody .
14,819
def get_model ( cls , version , model_name ) : models = cls . versions [ version ] . model_options for keystore in models . values ( ) : if model_name in keystore : return keystore [ model_name ] raise ValueError ( "Unknown model name" )
Return BMDS model class given BMDS version and model - name .
14,820
def _add_to_to_ordered_dict ( self , d , dataset_index , recommended_only = False ) : if self . doses_dropped_sessions : for key in sorted ( list ( self . doses_dropped_sessions . keys ( ) ) ) : session = self . doses_dropped_sessions [ key ] session . _add_single_session_to_to_ordered_dict ( d , dataset_index , recommended_only ) self . _add_single_session_to_to_ordered_dict ( d , dataset_index , recommended_only )
Save a session to an ordered dictionary . In some cases a single session may include a final session as well as other BMDS executions where doses were dropped . This will include all sessions .
14,821
def _add_single_session_to_to_ordered_dict ( self , d , dataset_index , recommended_only ) : for model_index , model in enumerate ( self . models ) : show_null = False if recommended_only : if self . recommendation_enabled : if self . recommended_model is None : if model_index == 0 : show_null = True else : continue elif self . recommended_model == model : pass else : continue else : if model_index == 0 : show_null = True else : continue d [ "dataset_index" ] . append ( dataset_index ) d [ "doses_dropped" ] . append ( self . doses_dropped ) model . _to_df ( d , model_index , show_null )
Save a single session to an ordered dictionary .
14,822
def _group_models ( self ) : od = OrderedDict ( ) for i , model in enumerate ( self . models ) : output = getattr ( model , "output" , { } ) if output . get ( "AIC" ) and output . get ( "BMD" ) and output [ "BMD" ] > 0 : key = "{}-{}" . format ( output [ "AIC" ] , output [ "BMD" ] ) if key in od : od [ key ] . append ( model ) else : od [ key ] = [ model ] else : od [ i ] = [ model ] def _get_num_params ( model ) : return ( len ( model . output [ "parameters" ] ) if hasattr ( model , "output" ) and "parameters" in model . output else 0 ) for key , _models in od . items ( ) : _models . sort ( key = _get_num_params ) return list ( od . values ( ) )
If AIC and BMD are numeric and identical then treat models as identical . Returns a list of lists . The outer list is a list of related models the inner list contains each individual model sorted by the number of parameters in ascending order .
14,823
def is_numeric ( obj ) : try : obj + obj , obj - obj , obj * obj , obj ** obj , obj / obj except ZeroDivisionError : return True except Exception : return False else : return True
This detects whether an input object is numeric or not .
14,824
def handle ( send , msg , args ) : session = args [ 'db' ] matches = re . findall ( r"\b(?<!-)(%s{2,16})(\+\+|--)" % args [ 'config' ] [ 'core' ] [ 'nickregex' ] , msg ) if not matches : return if args [ 'type' ] == 'privmsg' : send ( 'Hey, no points in private messages!' ) return for match in matches : if args [ 'abuse' ] ( args [ 'nick' ] , 5 , 'scores' ) : return name , direction = match [ 0 ] . lower ( ) , match [ 1 ] if direction == "++" : score = 1 if name == args [ 'nick' ] . lower ( ) : send ( "%s: No self promotion! You lose 10 points." % args [ 'nick' ] ) score = - 10 else : score = - 1 row = session . query ( Scores ) . filter ( Scores . nick == name ) . first ( ) if row is None : session . add ( Scores ( score = score , nick = name ) ) session . commit ( ) else : row . score += score session . commit ( )
Handles scores .
14,825
def handle ( send , msg , args ) : output = textutils . gen_xkcd_sub ( msg , True ) if output is None : return if args [ 'type' ] == 'action' : send ( "correction: * %s %s" % ( args [ 'nick' ] , output ) ) else : send ( "%s actually meant: %s" % ( args [ 'nick' ] , output ) )
Implements several XKCD comics .
14,826
def cmd ( send , msg , args ) : c , nick = args [ 'handler' ] . connection , args [ 'nick' ] channel = args [ 'target' ] if args [ 'target' ] != 'private' else args [ 'config' ] [ 'core' ] [ 'channel' ] if not msg : send ( "Nuke who?" ) return with args [ 'handler' ] . data_lock : users = args [ 'handler' ] . channels [ channel ] . users ( ) if msg in users : do_nuke ( c , nick , msg , channel ) elif msg == args [ 'botnick' ] : send ( "Sorry, Self-Nuking is disabled pending aquisition of a Lead-Lined Fridge." ) else : send ( "I'm sorry. Anonymous Nuking is not allowed" )
Nukes somebody .
14,827
def trim ( s , prefix = None , suffix = None , strict = False ) : ensure_string ( s ) has_prefix = prefix is not None has_suffix = suffix is not None if has_prefix == has_suffix : raise ValueError ( "exactly one of either prefix or suffix must be provided" ) if has_prefix : ensure_string ( prefix ) if s . startswith ( prefix ) : return s [ len ( prefix ) : ] elif strict : raise ValueError ( "string %r does not start with expected prefix %r" % ( s , prefix ) ) if has_suffix : ensure_string ( suffix ) if s . endswith ( suffix ) : return s [ : - len ( suffix ) ] if suffix else s elif strict : raise ValueError ( "string %r does not end with expected suffix %r" % ( s , suffix ) ) return s
Trim a string removing given prefix or suffix .
14,828
def join ( delimiter , iterable , ** kwargs ) : ensure_string ( delimiter ) ensure_iterable ( iterable ) ensure_keyword_args ( kwargs , optional = ( 'errors' , 'with_' ) ) errors = kwargs . get ( 'errors' , True ) if errors in ( 'raise' , True ) : iterable = imap ( ensure_string , iterable ) elif errors in ( 'ignore' , None ) : iterable = ifilter ( is_string , iterable ) elif errors in ( 'cast' , False ) : iterable = imap ( delimiter . __class__ , iterable ) elif errors == 'replace' : if 'with_' not in kwargs : raise ValueError ( "'replace' error policy requires specifying " "replacement through with_=" ) with_ = kwargs [ 'with_' ] if is_string ( with_ ) : replacement = lambda x : with_ elif callable ( with_ ) : replacement = with_ else : raise TypeError ( "error replacement must be a string or function, " "got %s" % type ( with_ ) . __name__ ) iterable = ( x if is_string ( x ) else ensure_string ( replacement ( x ) ) for x in iterable ) else : raise TypeError ( "%r is not a valid error handling policy for join()" % ( errors , ) ) return delimiter . join ( iterable )
Returns a string which is a concatenation of strings in iterable separated by given delimiter .
14,829
def camel_case ( arg , capitalize = None ) : ensure_string ( arg ) if not arg : return arg words = split ( arg ) first_word = words [ 0 ] if len ( words ) > 0 else None words = [ word . capitalize ( ) for word in words ] if first_word is not None : if capitalize is True : first_word = first_word . capitalize ( ) elif capitalize is False : first_word = first_word [ 0 ] . lower ( ) + first_word [ 1 : ] words [ 0 ] = first_word return join ( arg . __class__ ( ) , words )
Converts given text with whitespaces between words into equivalent camel - cased one .
14,830
def random ( length , chars = None ) : if chars is None : chars = string . ascii_letters + string . digits else : ensure_string ( chars ) if not chars : raise ValueError ( "character set must not be empty" ) if is_pair ( length ) : length = randint ( * length ) elif isinstance ( length , Integral ) : if not length > 0 : raise ValueError ( "random string length must be positive (got %r)" % ( length , ) ) else : raise TypeError ( "random string length must be an integer; " "got '%s'" % type ( length ) . __name__ ) return join ( chars . __class__ ( ) , ( choice ( chars ) for _ in xrange ( length ) ) )
Generates a random string .
14,831
def with_ ( self , replacement ) : ensure_string ( replacement ) if is_mapping ( self . _replacements ) : raise ReplacementError ( "string replacements already provided" ) self . _replacements = dict . fromkeys ( self . _replacements , replacement ) return self
Provide replacement for string needles .
14,832
def in_ ( self , haystack ) : from taipan . collections import dicts ensure_string ( haystack ) if not is_mapping ( self . _replacements ) : raise ReplacementError ( "string replacements not provided" ) if not self . _replacements : return haystack if len ( self . _replacements ) == 1 : return haystack . replace ( * dicts . peekitem ( self . _replacements ) ) or_ = haystack . __class__ ( '|' ) regex = join ( or_ , imap ( re . escape , sorted ( self . _replacements , key = len , reverse = True ) ) ) do_replace = lambda match : self . _replacements [ match . group ( ) ] return re . sub ( regex , do_replace , haystack )
Perform replacement in given string .
14,833
def wrap_list ( item ) : if item is None : return [ ] elif isinstance ( item , list ) : return item elif isinstance ( item , ( tuple , set ) ) : return list ( item ) else : return [ item ]
Returns an object as a list .
14,834
def update_additive ( dict1 , dict2 ) : for key , value in dict2 . items ( ) : if key not in dict1 : dict1 [ key ] = value else : if isinstance ( dict1 [ key ] , collections . Mapping ) : assert isinstance ( value , collections . Mapping ) update_additive ( dict1 [ key ] , value ) else : assert not isinstance ( value , collections . Mapping ) dict1 [ key ] = value
A utility method to update a dict or other mapping type with the contents of another dict .
14,835
def diff_dir ( dir_cmp , left_path = True ) : for name in dir_cmp . diff_files : if left_path : path_root = dir_cmp . left else : path_root = dir_cmp . right yield path . joinpath ( path_root , name ) for sub in dir_cmp . subdirs . values ( ) : for the_dir in diff_dir ( sub , left_path ) : yield the_dir
A generator that given a filecmp . dircmp object yields the paths to all files that are different . Works recursively .
14,836
def get_params ( self , * keys ) : if len ( keys ) == 0 : return vars ( self ) else : return [ vars ( self ) [ k ] for k in keys ]
Returns the specified parameters for the current preprocessor .
14,837
def _import_single_searches ( self ) : searches = { "BMD" : r"(?<!Setting )BMD = +(%s)" % self . re_num , "BMDL" : r"BMDL = +(%s)" % self . re_num , "BMDU" : r"BMDU = +(%s)" % self . re_num , "CSF" : r"Cancer Slope Factor = +(%s)" % self . re_num , "AIC" : r"AIC: +(%s)" % ( self . re_num ) , "model_version" : r"Version: ([\d\.]+);" , "model_date" : r"Date: ([\d/]+)" , } for search in searches : m = re . search ( searches [ search ] , self . output_text ) if m : try : self . output [ search ] = float ( m . group ( 1 ) ) except : self . output [ search ] = m . group ( 1 ) else : self . output [ search ] = - 999
Look for simple one - line regex searches common across dataset types .
14,838
def _import_warnings ( self ) : warnings = ( r"Warning: BMDL computation is at best imprecise for these data" , r"THE MODEL HAS PROBABLY NOT CONVERGED!!!" , "THIS USUALLY MEANS THE MODEL HAS NOT CONVERGED!" , r"BMR value is not in the range of the mean function" , r"BMD = 100\*\(maximum dose\)" , r"BMDL computation failed\." , "Warning: optimum may not have been found. Bad completion code in Optimization routine." , "Warning: Likelihood for fitted model larger than the Likelihood for model A3." , ) self . output [ "warnings" ] = [ ] for warning in warnings : m = re . search ( warning , self . output_text ) if m : self . output [ "warnings" ] . append ( m . group ( ) )
Add custom warnings found in output files .
14,839
def _import_dich_vals ( self ) : m = re . search ( r"Chi\^2 = ({0}|\w+) +d.f. = +({0}|\w+) +P-value = +({0}|\w+)" . format ( self . re_num ) , self . output_text , ) cw = { 1 : "Chi2" , 2 : "df" , 3 : "p_value4" } for val in cw : try : self . output [ cw [ val ] ] = float ( m . group ( val ) ) except : self . output [ cw [ val ] ] = - 999
Import simple dichotomous values .
14,840
def transformation ( func ) : @ wraps ( func ) def func_as_transformation ( * args , ** kwargs ) : if hasattr ( args [ 0 ] , 'history' ) : history = args [ 0 ] . history else : history = [ ] image = func ( * args , ** kwargs ) image = Image . from_array ( image , log_in_history = False ) image . history = history image . history . append ( 'Applied {} transform' . format ( func . __name__ ) ) if AutoWrite . on : fpath = AutoName . name ( func ) try : if AutoWrite . auto_safe_dtype : safe_range_im = 255 * normalise ( image ) pil_im = PIL . Image . fromarray ( safe_range_im . astype ( np . uint8 ) ) else : pil_im = PIL . Image . fromarray ( image ) except TypeError : raise ( TypeError ( "Cannot handle this data type: {}" . format ( image . dtype ) ) ) pil_im . save ( fpath ) return image return func_as_transformation
Function decorator to turn another function into a transformation .
14,841
def smooth_gaussian ( image , sigma = 1 ) : return scipy . ndimage . filters . gaussian_filter ( image , sigma = sigma , mode = "nearest" )
Returns Gaussian smoothed image .
14,842
def equalize_adaptive_clahe ( image , ntiles = 8 , clip_limit = 0.01 ) : skimage_float_im = normalise ( image ) if np . all ( skimage_float_im ) : raise ( RuntimeError ( "Cannot equalise when there is no variation." ) ) normalised = skimage . exposure . equalize_adapthist ( skimage_float_im , ntiles_x = ntiles , ntiles_y = ntiles , clip_limit = clip_limit ) assert np . max ( normalised ) == 1.0 assert np . min ( normalised ) == 0.0 return normalised
Return contrast limited adaptive histogram equalized image . The return value is normalised to the range 0 to 1 .
14,843
def threshold_otsu ( image , multiplier = 1.0 ) : otsu_value = skimage . filters . threshold_otsu ( image ) return image > otsu_value * multiplier
Return image thresholded using Otsu s method .
14,844
def cmd ( send , msg , args ) : parser = arguments . ArgParser ( args [ 'config' ] ) parser . add_argument ( 'action' , choices = [ 'check' , 'master' , 'commit' ] , nargs = '?' ) try : cmdargs = parser . parse_args ( msg ) except arguments . ArgumentException as e : send ( str ( e ) ) return api_output = get ( 'https://api.github.com/repos/%s/branches/master' % args [ 'config' ] [ 'api' ] [ 'githubrepo' ] ) . json ( ) commit , version = misc . get_version ( args [ 'handler' ] . confdir ) if not cmdargs . action : send ( version ) return if cmdargs . action == 'master' : send ( api_output [ 'commit' ] [ 'sha' ] ) elif cmdargs . action == 'check' : if commit is None : send ( "Not running from git, version %s" % version ) else : check = 'Same' if api_output [ 'commit' ] [ 'sha' ] == commit else 'Different' send ( check ) elif cmdargs . action == 'commit' : if commit is None : send ( "Not running from git, version %s" % version ) else : send ( commit )
Check the git revison .
14,845
def cmd ( send , _ , args ) : send ( "%s! %s" % ( args [ 'name' ] . upper ( ) , random . choice ( squirrels ) ) )
Ships a product .
14,846
def getAsGrassAsciiRaster ( self , tableName , rasterId = 1 , rasterIdFieldName = 'id' , rasterFieldName = 'raster' , newSRID = None ) : arcInfoGrid = self . getAsGdalRaster ( rasterFieldName , tableName , rasterIdFieldName , rasterId , 'AAIGrid' , newSRID ) . splitlines ( ) nCols = int ( arcInfoGrid [ 0 ] . split ( ) [ 1 ] ) nRows = int ( arcInfoGrid [ 1 ] . split ( ) [ 1 ] ) xLLCorner = float ( arcInfoGrid [ 2 ] . split ( ) [ 1 ] ) yLLCorner = float ( arcInfoGrid [ 3 ] . split ( ) [ 1 ] ) cellSize = float ( arcInfoGrid [ 4 ] . split ( ) [ 1 ] ) for i in range ( 0 , 5 ) : arcInfoGrid . pop ( 0 ) if 'NODATA_value' in arcInfoGrid [ 0 ] : arcInfoGrid . pop ( 0 ) north = yLLCorner + ( cellSize * nRows ) south = yLLCorner east = xLLCorner + ( cellSize * nCols ) west = xLLCorner grassHeader = [ 'cols: %s' % nCols , 'rows: %s' % nRows , 'west: %s' % west , 'east: %s' % east , 'south: %s' % south , 'north: %s' % north ] for header in grassHeader : arcInfoGrid . insert ( 0 , header ) arcInfoGridString = '\n' . join ( arcInfoGrid ) return arcInfoGridString
Returns a string representation of the raster in GRASS ASCII raster format .
14,847
def supportedGdalRasterFormats ( cls , sqlAlchemyEngineOrSession ) : if isinstance ( sqlAlchemyEngineOrSession , Engine ) : sessionMaker = sessionmaker ( bind = sqlAlchemyEngineOrSession ) session = sessionMaker ( ) elif isinstance ( sqlAlchemyEngineOrSession , Session ) : session = sqlAlchemyEngineOrSession statement = 'SELECT * FROM st_gdaldrivers() ORDER BY short_name;' result = session . execute ( statement ) supported = dict ( ) for row in result : supported [ row [ 1 ] ] = { 'description' : row [ 2 ] , 'options' : row [ 3 ] } return supported
Return a list of the supported GDAL raster formats .
14,848
def setColorRamp ( self , colorRamp = None ) : if not colorRamp : self . _colorRamp = RasterConverter . setDefaultColorRamp ( ColorRampEnum . COLOR_RAMP_HUE ) else : self . _colorRamp = colorRamp
Set the color ramp of the raster converter instance
14,849
def setDefaultColorRamp ( self , colorRampEnum = ColorRampEnum . COLOR_RAMP_HUE ) : self . _colorRamp = ColorRampGenerator . generateDefaultColorRamp ( colorRampEnum )
Returns the color ramp as a list of RGB tuples
14,850
def isNumber ( self , value ) : try : str ( value ) float ( value ) return True except ValueError : return False
Validate whether a value is a number or not
14,851
def check_exists ( subreddit ) : req = get ( 'http://www.reddit.com/r/%s/about.json' % subreddit , headers = { 'User-Agent' : 'CslBot/1.0' } ) if req . json ( ) . get ( 'kind' ) == 'Listing' : return False return req . status_code == 200
Make sure that a subreddit actually exists .
14,852
def random_post ( subreddit , apikey ) : subreddit = '/r/random' if subreddit is None else '/r/%s' % subreddit urlstr = 'http://reddit.com%s/random?%s' % ( subreddit , time . time ( ) ) url = get ( urlstr , headers = { 'User-Agent' : 'CslBot/1.0' } ) . url return '** %s - %s' % ( get_title ( url , apikey ) , get_short ( url , apikey ) )
Gets a random post from a subreddit and returns a title and shortlink to it .
14,853
def parse_to_dict ( self ) : lst = [ ] with open ( self . fname , 'r' ) as f : hdr = f . readline ( ) self . hdrs = hdr . split ( ',' ) for line in f : cols = line . split ( ',' ) if len ( cols ) == len ( self . hdrs ) : d = { } for ndx , col_header in enumerate ( self . hdrs ) : d [ col_header . strip ( '\n' ) . strip ( ) ] = cols [ ndx ] . strip ( '\n' ) . strip ( ) lst . append ( d ) else : print ( "Error parsing " + self . fname + " line : " + line ) return lst
parse raw CSV into dictionary
14,854
def get_random_choice ( self ) : i = random . randint ( 0 , len ( self . dat ) - 1 ) return self . dat [ i ] [ 'name' ]
returns a random name from the class
14,855
def random_stats ( self , all_stats , race , ch_class ) : stats = [ ] res = { } for s in all_stats : stats . append ( s [ 'stat' ] ) res [ s [ 'stat' ] ] = 0 cur_stat = 0 for stat in stats : for ndx , i in enumerate ( self . classes . dat ) : if i [ 'name' ] == ch_class : cur_stat = int ( i [ stat ] ) for ndx , i in enumerate ( self . races . dat ) : if i [ 'name' ] == race : cur_stat += int ( i [ stat ] ) if cur_stat < 1 : cur_stat = 1 elif cur_stat > 10 : if stat not in ( 'Health' , 'max_health' ) : cur_stat = 10 res [ stat ] = cur_stat return res
create random stats based on the characters class and race This looks up the tables from CharacterCollection to get base stats and applies a close random fit
14,856
def load_from_file ( self , fname ) : with open ( fname , 'r' ) as f : for line in f : k , v = line . split ( ' = ' ) self . _parse_char_line_to_self ( k , v )
OVERWRITES the current character object from stats in file
14,857
def _parse_char_line_to_self ( self , k , v ) : k = k . strip ( ' ' ) . strip ( '\n' ) v = v . strip ( ' ' ) . strip ( '\n' ) if k == 'CHARACTER' : self . name = v elif k == 'Race' : self . race = v elif k == 'Class' : self . ch_class = v elif k == 'STATS' : self . stats = self . _extract_stats_from_line ( v ) elif k == 'Story' : self . story = v . strip ( ' ' ) . strip ( '\n' ) elif k == 'SKILLS' : self . skills = v . split ( ', ' ) elif k == 'INVENTORY' : self . inventory = v . split ( ', ' )
takes a line from a saved file split into key and values and updates the appropriate self parameters of character .
14,858
def save_to_file ( self , fname ) : with open ( fname , 'w' ) as f : f . write ( str ( self ) )
saves a characters data to file
14,859
def copy ( self ) : return Character ( self . name , self . race , self . ch_class , self . stats , self . skills , self . story , self . inventory )
make an identical copy of the character
14,860
def palette ( fg , bg = - 1 ) : if not hasattr ( palette , "counter" ) : palette . counter = 1 if not hasattr ( palette , "selections" ) : palette . selections = { } selection = "%s%s" % ( str ( fg ) , str ( bg ) ) if not selection in palette . selections : palette . selections [ selection ] = palette . counter palette . counter += 1 colors = [ c for c in dir ( _curses ) if c . startswith ( 'COLOR' ) ] if isinstance ( fg , str ) : if not "COLOR_" + fg . upper ( ) in colors : fg = - 1 else : fg = getattr ( _curses , "COLOR_" + fg . upper ( ) ) if isinstance ( bg , str ) : if not "COLOR_" + bg . upper ( ) in colors : bg = - 1 else : bg = getattr ( _curses , "COLOR_" + bg . upper ( ) ) _curses . init_pair ( palette . selections [ selection ] , fg , bg ) return _curses . color_pair ( palette . selections [ selection ] )
Since curses only supports a finite amount of initialised colour pairs we memoise any selections you ve made as an attribute on this function
14,861
def start ( self ) : self . window = _curses . initscr ( ) _curses . savetty ( ) _curses . start_color ( ) _curses . use_default_colors ( ) self . window . leaveok ( 1 ) _curses . raw ( ) self . window . keypad ( 1 ) _curses . noecho ( ) _curses . cbreak ( ) _curses . nonl ( ) _curses . curs_set ( 0 ) if self . blocking : self . window . nodelay ( 0 ) else : self . window . nodelay ( 1 ) self . running = True while self . running : self . cycle ( ) if self . friendly and not self . blocking : time . sleep ( self . delay ) self . stop ( )
Window event loop
14,862
def stop ( self ) : _curses . nocbreak ( ) self . window . keypad ( 0 ) _curses . echo ( ) _curses . resetty ( ) _curses . endwin ( ) self . running = False
Restore the TTY to its original state .
14,863
def coordinate ( self , panes = [ ] , index = 0 ) : y = 0 for i , element in enumerate ( self . panes ) : x = 0 if isinstance ( element , list ) : current_height = 0 for j , pane in enumerate ( element ) : if pane . hidden : continue current_width = pane . width current_height = pane . height upper = ( ( y , x ) , ( y , x + current_width ) ) lower = ( ( y + ( current_height if current_height > 1 else 0 ) , x ) , ( y + ( current_height if current_height > 1 else 0 ) , x + current_width ) ) pane . coords = [ upper , lower ] x += current_width y += ( current_height + 1 if current_height > 1 else 1 ) else : if element . hidden : continue current_width = element . width current_height = element . height upper = ( ( y , x ) , ( y , x + current_width ) ) lower = ( ( y + ( current_height if current_height > 1 else 0 ) , x ) , ( y + ( current_height if current_height > 1 else 0 ) , x + current_width ) ) element . coords = [ upper , lower ] y += ( current_height + 1 if current_height > 1 else 1 ) if self . debug : coordinates = "Coordinates: " + str ( [ p . coords for p in self ] ) if len ( coordinates ) > self . width : coordinates = coordinates [ : self . width - 3 ] coordinates += '...' self . addstr ( self . height - 3 , 0 , coordinates )
Update pane coordinate tuples based on their height and width relative to other panes within the dimensions of the current window .
14,864
def addstr ( self , h , w , text , attrs = 0 ) : self . update_window_size ( ) if h > self . height or w > self . width : return try : self . window . addstr ( h , w , text , attrs ) except Exception as e : pass
A safe addstr wrapper
14,865
def update_window_size ( self ) : height , width = self . window . getmaxyx ( ) if self . height != height or self . width != width : self . height , self . width = height , width self . window . clear ( )
Update the current window object with its current height and width and clear the screen if they ve changed .
14,866
def add ( self , pane ) : if isinstance ( pane , list ) : initialised_panes = [ ] for p in pane : initialised_panes . append ( self . init_pane ( p ) ) self . panes . append ( initialised_panes ) else : pane = self . init_pane ( pane ) self . panes . append ( pane )
Adds new panes to the window
14,867
def get ( self , name , default = None , cache = False ) : if cache == True : for pane in self . cache : if pane . name == name : return pane return default for pane in self : if pane . name == name : return pane return default
Get a pane by name possibly from the cache . Return None if not found .
14,868
def process_input ( self , character ) : func = None try : func = getattr ( self , "handle_%s" % chr ( character ) , None ) except : pass if func : func ( )
A subclassable method for dealing with input characters .
14,869
def cmd ( send , msg , args ) : if args [ 'type' ] == 'privmsg' : send ( 'GCC is a group exercise!' ) return if 'include' in msg : send ( "We're not a terribly inclusive community around here." ) return if 'import' in msg : send ( "I'll have you know that standards compliance is important." ) return tmpfile = tempfile . NamedTemporaryFile ( ) for line in msg . splitlines ( ) : line = line + '\n' tmpfile . write ( line . encode ( ) ) tmpfile . flush ( ) process = subprocess . run ( [ 'gcc' , '-o' , '/dev/null' , '-xc' , tmpfile . name ] , stdout = subprocess . PIPE , stderr = subprocess . STDOUT , timeout = 5 , universal_newlines = True ) tmpfile . close ( ) output = process . stdout . splitlines ( ) [ : 3 ] for line in output : send ( line , target = args [ 'nick' ] ) if process . returncode == 0 : send ( gen_slogan ( "gcc victory" ) ) else : send ( gen_slogan ( "gcc failed" ) )
Compiles stuff .
14,870
def get_image_list ( self , page = 1 , per_page = 20 ) : url = self . api_url + '/api/images' params = { 'page' : page , 'per_page' : per_page } response = self . _request_url ( url , 'get' , params = params , with_access_token = True ) headers , result = self . _parse_and_check ( response ) images = ImageList . from_list ( result ) images . set_attributes_from_headers ( headers ) return images
Return a list of user s saved images
14,871
def upload_image ( self , image_file , referer_url = None , title = None , desc = None , created_at = None , collection_id = None ) : url = self . upload_url + '/api/upload' data = { } if referer_url is not None : data [ 'referer_url' ] = referer_url if title is not None : data [ 'title' ] = title if desc is not None : data [ 'desc' ] = desc if created_at is not None : data [ 'created_at' ] = str ( created_at ) if collection_id is not None : data [ 'collection_id' ] = collection_id files = { 'imagedata' : image_file } response = self . _request_url ( url , 'post' , data = data , files = files , with_access_token = True ) headers , result = self . _parse_and_check ( response ) return Image . from_dict ( result )
Upload an image
14,872
def get_oembed ( self , url ) : api_url = self . api_url + '/api/oembed' parameters = { 'url' : url } response = self . _request_url ( api_url , 'get' , params = parameters ) headers , result = self . _parse_and_check ( response ) return result
Return an oEmbed format json dictionary
14,873
def _request_url ( self , url , method , params = None , data = None , files = None , with_client_id = False , with_access_token = False ) : headers = { } if data is None : data = { } if params is None : params = { } if with_client_id and self . _client_id is not None : params [ 'client_id' ] = self . _client_id if with_access_token and self . _access_token is not None : headers [ 'Authorization' ] = "Bearer " + self . _access_token try : return requests . request ( method , url , params = params , data = data , files = files , headers = headers ) except requests . RequestException as e : raise GyazoError ( str ( e ) )
Send HTTP request
14,874
def validate_rows_length ( data , length , message = None , exception = MatrixError ) : if message is None : message = 'All rows must have the same length (same number of columns)' for row in data : if len ( row ) != length : raise exception ( message )
Validate that all rows have the same length .
14,875
def validate_square ( data , message = None , exception = MatrixError ) : rows , columns = len ( data ) , len ( data [ 0 ] ) if data else 0 if message is None : message = 'Number of rows: %s != number of columns: %s in matrix' % ( rows , columns ) if rows != columns : raise exception ( message )
Validate that the matrix has equal number of rows and columns .
14,876
def validate_categories_equal_entities ( categories , entities , message = None , exception = MatrixError ) : nb_categories = len ( categories ) nb_entities = len ( entities ) if message is None : message = 'Number of categories: %s != number of entities: %s' % ( nb_categories , nb_entities ) if categories and nb_categories != nb_entities : raise exception ( message )
Validate that the matrix has equal number of entities and categories .
14,877
def validate ( self ) : super ( ) . validate ( ) nb_entities = len ( self . entities ) if nb_entities != self . rows : raise self . error ( 'Number of entities: %s != number of rows: %s' % ( nb_entities , self . rows ) )
Base validation + entities = rows .
14,878
def transitive_closure ( self ) : data = [ [ 1 if j else 0 for j in i ] for i in self . data ] for k in range ( self . rows ) : for i in range ( self . rows ) : for j in range ( self . rows ) : if data [ i ] [ k ] and data [ k ] [ j ] : data [ i ] [ j ] = 1 return data
Compute the transitive closure of the matrix .
14,879
def validate ( self ) : super ( ) . validate ( ) nb_entities = len ( self . entities ) if nb_entities != self . rows + self . columns : raise self . error ( 'Number of entities: %s != number of rows + ' 'number of columns: %s+%s=%s' % ( nb_entities , self . rows , self . columns , self . rows + self . columns ) )
Base validation + entities = rows + columns .
14,880
def default_entities ( self ) : return [ str ( i ) for i in range ( self . rows + self . columns ) ]
Return range from 0 to rows + columns .
14,881
def validate ( self ) : super ( ) . validate ( ) message_dsm = 'Matrix at [%s:%s] is not an instance of ' 'DesignStructureMatrix or MultipleDomainMatrix.' message_ddm = 'Matrix at [%s:%s] is not an instance of ' 'DomainMappingMatrix or MultipleDomainMatrix.' messages = [ ] for i , row in enumerate ( self . data ) : for j , cell in enumerate ( row ) : if i == j : if not isinstance ( cell , ( DesignStructureMatrix , MultipleDomainMatrix ) ) : messages . append ( message_dsm % ( i , j ) ) elif not isinstance ( cell , ( DomainMappingMatrix , MultipleDomainMatrix ) ) : messages . append ( message_ddm % ( i , j ) ) if messages : raise self . error ( '\n' . join ( messages ) )
Base validation + each cell is instance of DSM or MDM .
14,882
def _calc_distortion ( self ) : m = self . _X . shape [ 0 ] self . distortion = 1 / m * sum ( linalg . norm ( self . _X [ i , : ] - self . centroids [ self . clusters [ i ] ] ) ** 2 for i in range ( m ) ) return self . distortion
Calculates the distortion value of the current clusters
14,883
def _move_centroids ( self ) : for k in range ( self . n_clusters ) : if k in self . clusters : centroid = np . mean ( self . _X [ self . clusters == k , : ] , axis = 0 ) self . centroids [ k ] = centroid else : self . n_clusters -= 1 self . centroids = self . centroids [ : self . n_clusters ] self . clusters -= 1 k -= 1
Calculate new centroids as the means of the samples in each cluster
14,884
def _closest_centroid ( self , x ) : closest_centroid = 0 distance = 10 ^ 9 for i in range ( self . n_clusters ) : current_distance = linalg . norm ( x - self . centroids [ i ] ) if current_distance < distance : closest_centroid = i distance = current_distance return closest_centroid
Returns the index of the closest centroid to the sample
14,885
def _assign_clusters ( self ) : self . clusters = np . array ( [ self . _closest_centroid ( x ) for x in self . _X ] )
Assign the samples to the closest centroids to create clusters
14,886
def fit ( self , X ) : self . _X = super ( ) . cluster ( X ) candidates = [ ] for _ in range ( self . n_runs ) : self . _init_random_centroids ( ) while True : prev_clusters = self . clusters self . _assign_clusters ( ) self . _move_centroids ( ) if np . all ( prev_clusters == self . clusters ) : break self . _calc_distortion ( ) candidates . append ( ( self . distortion , self . centroids , self . clusters ) ) candidates . sort ( key = lambda x : x [ 0 ] ) self . distortion = candidates [ 0 ] [ 0 ] self . centroids = candidates [ 0 ] [ 1 ] self . clusters = candidates [ 0 ] [ 2 ] return self
The K - Means itself
14,887
def cmd ( send , * _ ) : words = [ [ verb , noun , abbrev , noun , adj , abbrev , noun ] , [ verb , adj , abbrev , noun ] , [ verb , abbrev , noun , verb , adj , noun ] , [ verb , noun , ingverb , adj , abbrev , noun ] , [ adj , abbrev , noun , verb , adj , noun ] , [ abbrev , noun , verb , adj , noun , verb , abbrev , noun ] , [ ingverb , noun , verb , adj , abbrev , noun ] , [ verb , adj , abbrev , noun , verb , abbrev , noun ] ] msgtype = [ "If we %s the %s, we can get to the %s %s through the %s %s %s!" % tuple ( map ( choice , words [ 0 ] ) ) , "We need to %s the %s %s %s!" % tuple ( map ( choice , words [ 1 ] ) ) , "Try to %s the %s %s, maybe it will %s the %s %s!" % tuple ( map ( choice , words [ 2 ] ) ) , "You can't %s the %s without %s the %s %s %s!" % tuple ( map ( choice , words [ 3 ] ) ) , "Use the %s %s %s, then you can %s the %s %s!" % tuple ( map ( choice , words [ 4 ] ) ) , "The %s %s is down, %s the %s %s so we can %s the %s %s!" % tuple ( map ( choice , words [ 5 ] ) ) , "%s the %s won't do anything, we need to %s the %s %s %s!" % tuple ( map ( choice , words [ 6 ] ) ) , "I'll %s the %s %s %s, that should %s the %s %s!" % tuple ( map ( choice , words [ 7 ] ) ) ] send ( choice ( msgtype ) )
Causes the bot to generate some jargon .
14,888
def _GenerateStaticsTable ( self , title = 'Current Statistics' ) : if len ( self . __categories . keys ( ) ) < 1 : return '' d = self . __categories keys = sorted ( d . keys ( ) ) cats = ', ' . join ( [ '"%s"' % k for k in keys ] ) vals = ', ' . join ( [ '%d' % d [ k ] for k in keys ] ) return r % ( title , '-' * len ( title ) , cats , vals )
Generates a statics table based on set categories
14,889
def _ProduceSingleContent ( self , mod , showprivate = False , showinh = False ) : try : all = mod [ 1 ] . __all__ except AttributeError : raise RuntimeError ( 'Module (%s) MUST have `__all__` defined.' % mod [ 1 ] . __name__ ) try : name = mod [ 1 ] . __displayname__ except AttributeError : name = mod [ 0 ] try : category = mod [ 1 ] . __category__ self . __categories . setdefault ( category , 0 ) self . __categories [ category ] += 1 except AttributeError : pass feats = inspect . getmembers ( mod [ 1 ] ) fname = 'content/' + mod [ 1 ] . __name__ . replace ( '.' , '/' ) . replace ( ' ' , '-' ) + '.rst' feats = [ f for f in feats if f [ 0 ] in all and ( showprivate or not f [ 0 ] [ 0 : 1 ] == '_' ) ] with open ( fname , 'w' ) as fid : fid . write ( Classifier . GetModuleText ( name , mod [ 1 ] . __name__ , showprivate = showprivate ) ) for f in feats : if inspect . isclass ( f [ 1 ] ) or inspect . isfunction ( f [ 1 ] ) : try : featname = f [ 1 ] . __displayname__ except AttributeError : featname = f [ 1 ] . __name__ try : category = f [ 1 ] . __category__ self . __categories . setdefault ( category , 0 ) self . __categories [ category ] += 1 except AttributeError : pass if inspect . isclass ( f [ 1 ] ) : fid . write ( Classifier . GetClassText ( featname , '%s.%s' % ( mod [ 1 ] . __name__ , f [ 1 ] . __name__ ) , showprivate = showprivate , showinh = showinh ) ) elif inspect . isfunction ( f [ 1 ] ) : fid . write ( Classifier . GetFunctionText ( featname , '%s.%s' % ( mod [ 1 ] . __name__ , f [ 1 ] . __name__ ) ) ) fid . close ( ) return '\n %s' % ( fname . split ( '/' ) [ - 1 ] )
An internal helper to create a page for a single module . This will automatically generate the needed RSF to document the module and save the module to its own page in its appropriate location .
14,890
def _ProduceContent ( self , mods , showprivate = False , showinh = False ) : result = '' nestedresult = '' for mod in mods : try : all = mod [ 1 ] . __all__ except AttributeError : raise RuntimeError ( 'Module (%s) MUST have `__all__` defined.' % mod [ 1 ] . __name__ ) if not showprivate and mod [ 0 ] [ 0 : 1 ] == '_' : continue if mod [ 0 ] [ 0 : 2 ] == '__' : continue result += self . _ProduceSingleContent ( mod , showprivate , showinh ) return result
An internal helper to create pages for several modules that do not have nested modules . This will automatically generate the needed RSF to document each module module and save the module to its own page appropriately .
14,891
def _MakePackagePages ( self , package , showprivate = False , nested = False , showinh = False ) : def checkNoNested ( mod ) : try : all = mod . __all__ except AttributeError : return False mems = inspect . getmembers ( mod , inspect . ismodule ) mems = [ m for m in mems if m [ 0 ] in mod . __all__ ] if len ( mems ) > 0 : return False return True mods = inspect . getmembers ( package , inspect . ismodule ) nmods , pvt , npkgs = [ ] , [ ] , [ ] for mod in mods : if checkNoNested ( mod [ 1 ] ) : if mod [ 0 ] [ 0 ] == '_' : pvt . append ( mod ) else : nmods . append ( mod ) else : npkgs . append ( mod ) if showprivate : nmods += pvt files = [ ] ignore = [ ] for pkg in npkgs : pt = '%s/%s/%s' % ( self . path , package . __name__ . replace ( '.' , '/' ) , pkg [ 1 ] . __name__ . split ( '.' ) [ - 1 ] ) if os . path . exists ( pt ) : shutil . rmtree ( pt ) os . makedirs ( pt ) ignore += inspect . getmembers ( pkg [ 1 ] ) f = self . _MakePackagePages ( pkg [ 1 ] , showprivate = showprivate , nested = True , showinh = showinh ) files . append ( f . split ( package . __name__ . replace ( '.' , '/' ) + '/' ) [ 1 ] ) if nested : try : name = package . __displayname__ except AttributeError : name = package . __name__ index = r % ( name , '*' * len ( name ) ) index += '\n ' . join ( files ) index += '\n ' + self . _ProduceContent ( nmods , showprivate = showprivate , showinh = showinh ) findex = 'content/%s/index.rst' % ( package . __name__ . replace ( '.' , '/' ) ) with open ( findex , 'w' ) as f : if package . __doc__ : f . write ( package . __doc__ ) f . write ( index ) return '\n ' + findex names = '\n %s/%s/' % ( self . path , package . __name__ . replace ( '.' , '/' ) ) nmods = [ m for m in nmods if m not in ignore ] return names . join ( self . _ProduceContent ( nmods , showprivate = showprivate , showinh = showinh ) . split ( '\n ' ) + files )
An internal helper to generate all of the pages for a given package
14,892
def _DocPackageFromTop ( self , packages , showprivate = False , showinh = False ) : appIndex = '' if not isinstance ( packages , list ) : packages = [ packages ] if os . path . exists ( 'content' ) : shutil . rmtree ( 'content' ) os . makedirs ( 'content' ) appIndex += r % ( 'API Index' ) for i in range ( len ( packages ) ) : package = packages [ i ] try : name = package . __displayname__ except AttributeError : name = package . __name__ path = 'content/%s' % package . __name__ if os . path . exists ( path ) : shutil . rmtree ( path ) os . makedirs ( path ) meta = 'About %s\n%s\n' % ( name , '=' * len ( 'About ' + name ) ) author = getattr ( package , "__author__" , None ) license = getattr ( package , "__license__" , None ) copyright = getattr ( package , "__copyright__" , None ) version = getattr ( package , "__version__" , None ) if author : meta += '\n* Author: %s' % author if license : meta += '\n* License: %s' % license if copyright : meta += '\n* Copyright: %s' % copyright if version : meta += '\n* Version: %s' % version about = '%s/%s' % ( path , 'index.rst' ) this_toc = r % ( name ) this_toc += self . _MakePackagePages ( package , showprivate = showprivate , showinh = showinh ) this_toc = this_toc . replace ( '%s/' % path , '' ) with open ( about , 'w' ) as f : f . write ( '%s\n\n' % meta ) if package . __doc__ : f . write ( package . __doc__ ) f . write ( this_toc ) appIndex += '\n %s' % about return appIndex
Generates all of the documentation for given packages and appends new tocrees to the index . All documentation pages will be under the set relative path .
14,893
def eye ( root = None , zodb_uri = None , port = 8080 ) : if root is not None : root_factory = lambda request : Node ( root ) elif zodb_uri is not None : if '://' not in zodb_uri : zodb_uri = 'file://' + os . path . abspath ( zodb_uri ) from repoze . zodbconn . finder import PersistentApplicationFinder finder = PersistentApplicationFinder ( zodb_uri , appmaker = lambda root : Node ( root ) ) root_factory = lambda request : finder ( request . environ ) else : raise RuntimeError ( "Must specify root object or ZODB URI." ) app = Eye ( root_factory ) if 'DEBUG' in os . environ : from repoze . debug . pdbpm import PostMortemDebug app = PostMortemDebug ( app ) serve ( app , host = '127.0.0.1' , port = port )
Serves a WSGI app to browse objects based on a root object or ZODB URI .
14,894
def cmd ( send , _ , args ) : if args [ 'target' ] == 'private' : send ( "You're all alone!" ) return with args [ 'handler' ] . data_lock : channel = args [ 'handler' ] . channels [ args [ 'target' ] ] voiced = len ( [ x for x in args [ 'handler' ] . voiced [ args [ 'target' ] ] . values ( ) if x ] ) total = len ( channel . users ( ) ) send ( "%d active users, %d total users, %g%% active" % ( voiced , total , voiced / total * 100 ) )
Returns stats on the active users .
14,895
def determine_end_point ( http_request , url ) : if url . endswith ( 'aggregates' ) or url . endswith ( 'aggregates/' ) : return 'aggregates' else : return 'detail' if is_detail_url ( http_request , url ) else 'list'
returns detail list or aggregates
14,896
def run_simulation ( self ) : for _ in range ( self . num_fights ) : self . c1 . stats [ 'Health' ] = self . c1 . stats [ 'max_health' ] self . c2 . stats [ 'Health' ] = self . c2 . stats [ 'max_health' ] b = Battle ( self . c1 , self . c2 , self . traits , self . rules , print_console = 'No' ) if b . status == self . c1 . name : self . num_c1 += 1 else : self . num_c2 += 1 if self . num_c1 > self . num_c2 : self . winner = self . c1 . name else : self . winner = self . c2 . name
runs the simulation
14,897
def take_damage ( self , c , dmg ) : if c . name == self . c1 . name : self . c1 . stats [ 'Health' ] = self . c1 . stats [ 'Health' ] - dmg else : self . c2 . stats [ 'Health' ] = self . c2 . stats [ 'Health' ] - dmg
wrapper to apply damage taken to a character
14,898
def show_message ( self , c_attack , c_defend , result , dmg , print_console = 'Yes' ) : perc_health_att = '[' + str ( round ( ( c_attack . stats [ 'Health' ] * 100 ) / c_attack . stats [ 'max_health' ] ) ) + '%]' perc_health_def = '[' + str ( round ( ( c_defend . stats [ 'Health' ] * 100 ) / c_defend . stats [ 'max_health' ] ) ) + '%]' if result == 'Miss' : txt = c_attack . name + ' ' + perc_health_att . rjust ( 6 ) + ' miss ' + c_defend . name + ' ' + perc_health_def . rjust ( 6 ) elif result == 'Crit' : txt = c_attack . name + ' ' + perc_health_att . rjust ( 6 ) + ' CRIT ' + c_defend . name + ' ' + perc_health_def . rjust ( 6 ) txt += ' for ' + str ( dmg ) else : txt = c_attack . name + ' ' + perc_health_att . rjust ( 6 ) + ' hits ' + c_defend . name + ' ' + perc_health_def . rjust ( 6 ) txt += ' for ' + str ( dmg ) if print_console == 'Yes' : print ( txt )
function to wrap the display of the battle messages
14,899
def iterqueue ( queue , expected ) : while expected > 0 : for item in iter ( queue . get , EXIT ) : yield item expected -= 1
Iterate all value from the queue until the expected number of EXIT elements is received