idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
54,700
def read_input ( self , input_cls , filename , ** kwargs ) : input_inst = input_cls ( ) input_inst . read_input ( filename ) return input_inst . get_data ( )
Read in input and do some minimal preformatting input_cls - the class to use to read the input filename - input filename
54,701
def reformat_file ( self , input_file , input_format , output_format ) : if input_file is None or input_format is None : return None try : input_cls = self . find_input ( input_format ) input_inst = input_cls ( ) except TypeError : return None try : input_inst . read_input ( self . absolute_filepath ( input_file ) ) except IOError : return None formatter = find_needed_formatter ( input_format , output_format ) if formatter is None : raise Exception ( "Cannot find a formatter that can convert from {0} to {1}" . format ( self . input_format , output_format ) ) formatter_inst = formatter ( ) formatter_inst . read_input ( input_inst . get_data ( ) , input_format ) data = formatter_inst . get_data ( output_format ) return data
Reformat input data files to a format the tasks can use
54,702
def reformat_input ( self , ** kwargs ) : reformatted_input = { } needed_formats = [ ] for task_cls in self . tasks : needed_formats . append ( task_cls . data_format ) self . needed_formats = list ( set ( needed_formats ) ) for output_format in self . needed_formats : reformatted_input . update ( { output_format : { 'data' : self . reformat_file ( self . input_file , self . input_format , output_format ) , 'target' : self . reformat_file ( self . target_file , self . target_format , output_format ) } } ) return reformatted_input
Reformat input data
54,703
def _create_modulename ( cdef_sources , source , sys_version ) : key = '\x00' . join ( [ sys_version [ : 3 ] , source , cdef_sources ] ) key = key . encode ( 'utf-8' ) k1 = hex ( binascii . crc32 ( key [ 0 : : 2 ] ) & 0xffffffff ) k1 = k1 . lstrip ( '0x' ) . rstrip ( 'L' ) k2 = hex ( binascii . crc32 ( key [ 1 : : 2 ] ) & 0xffffffff ) k2 = k2 . lstrip ( '0' ) . rstrip ( 'L' ) return '_xprintidle_cffi_{0}{1}' . format ( k1 , k2 )
This is the same as CFFI s create modulename except we don t include the CFFI version .
54,704
def is_authenticated_with_token ( self ) : server_login_response = post_log_in ( self , keyid = self . user_fingerprint , user_token_result = self . user_auth_token ) if not check_server_login_stage2_response ( server_login_response ) : raise GPGAuthStage2Exception ( "Login endpoint wrongly formatted" ) self . cookies . save ( ignore_discard = True ) logger . info ( 'is_authenticated_with_token: OK' ) return True
GPGAuth Stage 2
54,705
def save ( self , obj , run_id ) : id_code = self . generate_save_identifier ( obj , run_id ) self . store . save ( obj , id_code )
Save a workflow obj - instance of a workflow to save run_id - unique id to give the run
54,706
def setup_tasks ( self , tasks ) : task_classes = [ ] for task in tasks : category , namespace , name = task . split ( "." ) try : cls = find_in_registry ( category = category , namespace = namespace , name = name ) [ 0 ] except TypeError : log . error ( "Could not find the task with category.namespace.name {0}" . format ( task ) ) raise TypeError task_classes . append ( cls ) self . tasks = task_classes
Find task classes from category . namespace . name strings tasks - list of strings
54,707
def initialize_workflow ( self , workflow ) : self . workflow = workflow ( ) self . workflow . tasks = self . tasks self . workflow . input_file = self . input_file self . workflow . input_format = self . input_format self . workflow . target_file = self . target_file self . workflow . target_format = self . target_format self . workflow . run_id = self . run_id self . workflow . setup ( )
Create a workflow workflow - a workflow class
54,708
def reformat_filepath ( self , config_file , filename ) : if not filename . startswith ( "/" ) : filename = self . config_file_format . format ( config_file , filename ) return filename
Convert relative paths in config file to absolute
54,709
def item_lister ( command , _connection , page_size , page_number , sort_by , sort_order , item_class , result_set , ** kwargs ) : page = page_number while True : item_collection = _connection . get_list ( command , page_size = page_size , page_number = page , sort_by = sort_by , sort_order = sort_order , item_class = item_class , ** kwargs ) result_set . total_count = item_collection . total_count result_set . page_number = page for item in item_collection . items : yield item if item_collection . total_count < 0 or item_collection . page_size == 0 : break if len ( item_collection . items ) > 0 : page += 1 else : break
A generator function for listing Video and Playlist objects .
54,710
def get_manifest ( self , asset_xml ) : manifest = '<?xml version="1.0" encoding="utf-8"?>' manifest += '<publisher-upload-manifest publisher-id="%s" ' % self . publisher_id manifest += 'preparer="%s" ' % self . preparer if self . report_success : manifest += 'report-success="TRUE">\n' for notify in self . notifications : manifest += '<notify email="%s"/>' % notify if self . callback : manifest += '<callback entity-url="%s"/>' % self . callback manifest += asset_xml manifest += '</publisher-upload-manifest>' return manifest
Construct and return the xml manifest to deliver along with video file .
54,711
def _send_file ( self , filename ) : ftp = ftplib . FTP ( host = self . host ) ftp . login ( user = self . user , passwd = self . password ) ftp . set_pasv ( True ) ftp . storbinary ( "STOR %s" % os . path . basename ( filename ) , file ( filename , 'rb' ) )
Sends a file via FTP .
54,712
def _post ( self , data , file_to_upload = None ) : params = { "JSONRPC" : simplejson . dumps ( data ) } req = None if file_to_upload : req = http_core . HttpRequest ( self . write_url ) req . method = 'POST' req . add_body_part ( "JSONRPC" , simplejson . dumps ( data ) , 'text/plain' ) upload = file ( file_to_upload , "rb" ) req . add_body_part ( "filePath" , upload , 'application/octet-stream' ) req . end_of_parts ( ) content_type = "multipart/form-data; boundary=%s" % http_core . MIME_BOUNDARY req . headers [ 'Content-Type' ] = content_type req . headers [ 'User-Agent' ] = config . USER_AGENT req = http_core . ProxiedHttpClient ( ) . request ( req ) else : msg = urllib . urlencode ( { 'json' : params [ 'JSONRPC' ] } ) req = urllib2 . urlopen ( self . write_url , msg ) if req : result = simplejson . loads ( req . read ( ) ) if 'error' in result and result [ 'error' ] : exceptions . BrightcoveError . raise_exception ( result [ 'error' ] ) return result [ 'result' ]
Make the POST request .
54,713
def _get_response ( self , ** kwargs ) : url = self . read_url + "?output=JSON&token=%s" % self . read_token for key in kwargs : if key and kwargs [ key ] : val = kwargs [ key ] if isinstance ( val , ( list , tuple ) ) : val = "," . join ( val ) url += "&%s=%s" % ( key , val ) self . _api_url = url req = urllib2 . urlopen ( url ) data = simplejson . loads ( req . read ( ) ) self . _api_raw_data = data if data and data . get ( 'error' , None ) : exceptions . BrightcoveError . raise_exception ( data [ 'error' ] ) if data == None : raise exceptions . NoDataFoundError ( "No data found for %s" % repr ( kwargs ) ) return data
Make the GET request .
54,714
def get_list ( self , command , item_class , page_size , page_number , sort_by , sort_order , ** kwargs ) : data = self . _get_response ( command = command , page_size = page_size , page_number = page_number , sort_by = sort_by , sort_order = sort_order , video_fields = None , get_item_count = "true" , ** kwargs ) return ItemCollection ( data = data , item_class = item_class , _connection = self )
Not intended to be called directly but rather through an by the ItemResultSet object iterator .
54,715
def setup_formats ( self ) : methods = self . get_methods ( ) for m in methods : if m . startswith ( "from_" ) : self . input_formats . append ( re . sub ( "from_" , "" , m ) ) elif m . startswith ( "to_" ) : self . output_formats . append ( re . sub ( "to_" , "" , m ) )
Inspects its methods to see what it can convert from and to
54,716
def get_data ( self , data_format ) : if data_format not in self . output_formats : raise Exception ( "Output format {0} not available with this class. Available formats are {1}." . format ( data_format , self . output_formats ) ) data_converter = getattr ( self , "to_" + data_format ) return data_converter ( )
Reads the common format and converts to output data data_format - the format of the output data . See utils . input . dataformats
54,717
def from_csv ( self , input_data ) : reformatted_data = [ ] for ( i , row ) in enumerate ( input_data ) : if i == 0 : headers = row else : data_row = { } for ( j , h ) in enumerate ( headers ) : data_row . update ( { h : row [ j ] } ) reformatted_data . append ( data_row ) return reformatted_data
Reads csv format input data and converts to json .
54,718
def to_dataframe ( self ) : keys = self . data [ 0 ] . keys ( ) column_list = [ ] for k in keys : key_list = [ ] for i in xrange ( 0 , len ( self . data ) ) : key_list . append ( self . data [ i ] [ k ] ) column_list . append ( key_list ) df = DataFrame ( np . asarray ( column_list ) . transpose ( ) , columns = keys ) for i in xrange ( 0 , df . shape [ 1 ] ) : if is_number ( df . iloc [ : , i ] ) : df . iloc [ : , i ] = df . iloc [ : , i ] . astype ( float ) return df
Reads the common format self . data and writes out to a dataframe .
54,719
def check_extensions ( extensions : Set [ str ] , allow_multifile : bool = False ) : check_var ( extensions , var_types = set , var_name = 'extensions' ) for ext in extensions : check_extension ( ext , allow_multifile = allow_multifile )
Utility method to check that all extensions in the provided set are valid
54,720
def are_worth_chaining ( parser , to_type : Type [ S ] , converter : Converter [ S , T ] ) -> bool : if not parser . can_chain : return False elif not is_any_type ( to_type ) and is_any_type ( converter . to_type ) : return True elif issubclass ( to_type , converter . to_type ) : return False else : return True
Utility method to check if it makes sense to chain this parser with the given destination type and the given converter to create a parsing chain . Returns True if it brings value to chain them .
54,721
def _execute ( self , logger : Logger , options : Dict [ str , Dict [ str , Any ] ] ) -> T : pass
Implementing classes should perform the parsing here possibly using custom methods of self . parser .
54,722
def create_parsing_plan ( self , desired_type : Type [ T ] , filesystem_object : PersistedObject , logger : Logger , options : Dict [ str , Dict [ str , Any ] ] ) -> ParsingPlan [ T ] : pass
Creates a parsing plan to parse the given filesystem object into the given desired_type . Implementing classes may wish to support additional parameters .
54,723
def add ( self , f_ipaddr , f_macaddr , f_hostname , f_netbios_name , f_engineer , f_asset_group , f_confirmed ) : return self . send . host_add ( f_ipaddr , f_macaddr , f_hostname , f_netbios_name , f_engineer , f_asset_group , f_confirmed )
Add a t_hosts record
54,724
def retrieve_data ( self ) : df = self . manager . get_historic_data ( self . start . date ( ) , self . end . date ( ) ) df . replace ( 0 , np . nan , inplace = True ) return df
Retrives data as a DataFrame .
54,725
def get_min_risk ( self , weights , cov_matrix ) : def func ( weights ) : return np . matmul ( np . matmul ( weights . transpose ( ) , cov_matrix ) , weights ) def func_deriv ( weights ) : return ( np . matmul ( weights . transpose ( ) , cov_matrix . transpose ( ) ) + np . matmul ( weights . transpose ( ) , cov_matrix ) ) constraints = ( { 'type' : 'eq' , 'fun' : lambda weights : ( weights . sum ( ) - 1 ) } ) solution = self . solve_minimize ( func , weights , constraints , func_deriv = func_deriv ) allocation = solution . x return allocation
Minimizes the variance of a portfolio .
54,726
def get_max_return ( self , weights , returns ) : def func ( weights ) : return np . dot ( weights , returns . values ) * - 1 constraints = ( { 'type' : 'eq' , 'fun' : lambda weights : ( weights . sum ( ) - 1 ) } ) solution = self . solve_minimize ( func , weights , constraints ) max_return = solution . fun * - 1 return max_return
Maximizes the returns of a portfolio .
54,727
def efficient_frontier ( self , returns , cov_matrix , min_return , max_return , count ) : columns = [ coin for coin in self . SUPPORTED_COINS ] values = pd . DataFrame ( columns = columns ) weights = [ 1 / len ( self . SUPPORTED_COINS ) ] * len ( self . SUPPORTED_COINS ) def func ( weights ) : return np . matmul ( np . matmul ( weights . transpose ( ) , cov_matrix ) , weights ) def func_deriv ( weights ) : return ( np . matmul ( weights . transpose ( ) , cov_matrix . transpose ( ) ) + np . matmul ( weights . transpose ( ) , cov_matrix ) ) for point in np . linspace ( min_return , max_return , count ) : constraints = ( { 'type' : 'eq' , 'fun' : lambda weights : ( weights . sum ( ) - 1 ) } , { 'type' : 'ineq' , 'fun' : lambda weights , i = point : ( np . dot ( weights , returns . values ) - i ) } ) solution = self . solve_minimize ( func , weights , constraints , func_deriv = func_deriv ) columns = { } for index , coin in enumerate ( self . SUPPORTED_COINS ) : columns [ coin ] = math . floor ( solution . x [ index ] * 100 * 100 ) / 100 values = values . append ( columns , ignore_index = True ) return values
Returns a DataFrame of efficient portfolio allocations for count risk indices .
54,728
def solve_minimize ( self , func , weights , constraints , lower_bound = 0.0 , upper_bound = 1.0 , func_deriv = False ) : bounds = ( ( lower_bound , upper_bound ) , ) * len ( self . SUPPORTED_COINS ) return minimize ( fun = func , x0 = weights , jac = func_deriv , bounds = bounds , constraints = constraints , method = 'SLSQP' , options = { 'disp' : False } )
Returns the solution to a minimization problem .
54,729
def allocate ( self ) : df = self . manager . get_historic_data ( ) [ self . SUPPORTED_COINS ] change_columns = [ ] for column in df : if column in self . SUPPORTED_COINS : change_column = '{}_change' . format ( column ) values = pd . Series ( ( df [ column ] . shift ( - 1 ) - df [ column ] ) / - df [ column ] . shift ( - 1 ) ) . values df [ change_column ] = values change_columns . append ( change_column ) columns = change_columns risks = df [ columns ] . apply ( np . nanvar , axis = 0 ) returns = df [ columns ] . apply ( np . nanmean , axis = 0 ) cov_matrix = df [ columns ] . cov ( ) cov_matrix . values [ [ np . arange ( len ( self . SUPPORTED_COINS ) ) ] * 2 ] = df [ columns ] . apply ( np . nanvar , axis = 0 ) weights = np . array ( [ 1 / len ( self . SUPPORTED_COINS ) ] * len ( self . SUPPORTED_COINS ) ) . reshape ( len ( self . SUPPORTED_COINS ) , 1 ) min_risk = self . get_min_risk ( weights , cov_matrix ) min_return = np . dot ( min_risk , returns . values ) max_return = self . get_max_return ( weights , returns ) frontier = self . efficient_frontier ( returns , cov_matrix , min_return , max_return , 6 ) return frontier
Returns an efficient portfolio allocation for the given risk index .
54,730
def handle_default_options ( options ) : if options . settings : os . environ [ 'PERCEPT_SETTINGS_MODULE' ] = options . settings if options . pythonpath : options . pythonpath = os . path . abspath ( os . path . expanduser ( options . pythonpath ) ) up_one_path = os . path . abspath ( os . path . join ( options . pythonpath , ".." ) ) sys . path . append ( options . pythonpath ) sys . path . append ( up_one_path ) return options
Pass in a Values instance from OptionParser . Handle settings and pythonpath options - Values from OptionParser
54,731
def create_parser ( self , prog_name , subcommand ) : parser = OptionParser ( prog = prog_name , usage = self . usage ( subcommand ) , option_list = self . option_list ) return parser
Create an OptionParser prog_name - Name of a command subcommand - Name of a subcommand
54,732
def hook ( name = None , * args , ** kwargs ) : def decorator ( f ) : if not hasattr ( f , "hooks" ) : f . hooks = [ ] f . hooks . append ( ( name or f . __name__ , args , kwargs ) ) return f return decorator
Decorator to register the function as a hook
54,733
def expose ( rule , ** options ) : def decorator ( f ) : if not hasattr ( f , "urls" ) : f . urls = [ ] if isinstance ( rule , ( list , tuple ) ) : f . urls . extend ( rule ) else : f . urls . append ( ( rule , options ) ) return f return decorator
Decorator to add an url rule to a function
54,734
def _create_unicode_map ( ) : unicode_map = { } for beta , uni in _map . BETACODE_MAP . items ( ) : norm = unicodedata . normalize ( 'NFC' , uni ) unicode_map [ norm ] = beta unicode_map [ uni ] = beta final_sigma_norm = unicodedata . normalize ( 'NFC' , _FINAL_LC_SIGMA ) unicode_map [ final_sigma_norm ] = 's' unicode_map [ _FINAL_LC_SIGMA ] = 's' return unicode_map
Create the inverse map from unicode to betacode .
54,735
def _create_conversion_trie ( strict ) : t = pygtrie . CharTrie ( ) for beta , uni in _map . BETACODE_MAP . items ( ) : if strict : t [ beta ] = uni else : diacritics = beta [ 1 : ] perms = itertools . permutations ( diacritics ) for perm in perms : perm_str = beta [ 0 ] + '' . join ( perm ) t [ perm_str . lower ( ) ] = uni t [ perm_str . upper ( ) ] = uni return t
Create the trie for betacode conversion .
54,736
def _find_max_beta_token_len ( ) : max_beta_len = - 1 for beta , uni in _map . BETACODE_MAP . items ( ) : if len ( beta ) > max_beta_len : max_beta_len = len ( beta ) return max_beta_len
Finds the maximum length of a single betacode token .
54,737
def beta_to_uni ( text , strict = False ) : param_key = ( strict , ) try : t = _BETA_CONVERSION_TRIES [ param_key ] except KeyError : t = _create_conversion_trie ( * param_key ) _BETA_CONVERSION_TRIES [ param_key ] = t transform = [ ] idx = 0 possible_word_boundary = False while idx < len ( text ) : if possible_word_boundary and _penultimate_sigma_word_final ( transform ) : transform [ - 2 ] = _FINAL_LC_SIGMA step = t . longest_prefix ( text [ idx : idx + _MAX_BETA_TOKEN_LEN ] ) if step : possible_word_boundary = text [ idx ] in _BETA_PUNCTUATION key , value = step transform . append ( value ) idx += len ( key ) else : possible_word_boundary = True transform . append ( text [ idx ] ) idx += 1 if possible_word_boundary and _penultimate_sigma_word_final ( transform ) : transform [ - 2 ] = _FINAL_LC_SIGMA elif len ( transform ) > 0 and transform [ - 1 ] == _MEDIAL_LC_SIGMA : transform [ - 1 ] = _FINAL_LC_SIGMA converted = '' . join ( transform ) return converted
Converts the given text from betacode to unicode .
54,738
def uni_to_beta ( text ) : u = _UNICODE_MAP transform = [ ] for ch in text : try : conv = u [ ch ] except KeyError : conv = ch transform . append ( conv ) converted = '' . join ( transform ) return converted
Convert unicode text to a betacode equivalent .
54,739
def __calculate_order ( self , node_dict ) : if len ( node_dict . keys ( ) ) != len ( set ( node_dict . keys ( ) ) ) : raise DependencyTreeException ( "Duplicate Keys Exist in node dictionary!" ) valid_order = [ node for node , dependencies in node_dict . items ( ) if len ( dependencies ) == 0 ] remaining_nodes = [ node for node in node_dict . keys ( ) if node not in valid_order ] while len ( remaining_nodes ) > 0 : node_added = False for node in remaining_nodes : dependencies = [ d for d in node_dict [ node ] if d not in valid_order ] if len ( dependencies ) == 0 : valid_order . append ( node ) remaining_nodes . remove ( node ) node_added = True if not node_added : invalid_node = remaining_nodes [ 0 ] invalid_dependency = ', ' . join ( node_dict [ invalid_node ] ) if invalid_dependency not in remaining_nodes : raise DependencyTreeException ( "Missing dependency! One or more of ({dependency}) are missing for {dependant}." . format ( dependant = invalid_node , dependency = invalid_dependency ) ) else : raise DependencyTreeException ( "The dependency %s is cyclic or dependent on a cyclic dependency" % invalid_dependency ) return valid_order
Determine a valid ordering of the nodes in which a node is not called before all of it s dependencies .
54,740
def warn_import_error ( type_of_obj_support : str , caught : ImportError ) : msg = StringIO ( ) msg . writelines ( 'Import Error while trying to add support for ' + type_of_obj_support + '. You may continue but ' 'the associated parsers and converters wont be available : \n' ) traceback . print_tb ( caught . __traceback__ , file = msg ) msg . writelines ( str ( caught . __class__ . __name__ ) + ' : ' + str ( caught ) + '\n' ) warn ( msg . getvalue ( ) )
Utility method to print a warning message about failed import of some modules
54,741
def create_parser_options ( lazy_mfcollection_parsing : bool = False ) -> Dict [ str , Dict [ str , Any ] ] : return { MultifileCollectionParser . __name__ : { 'lazy_parsing' : lazy_mfcollection_parsing } }
Utility method to create a default options structure with the lazy parsing inside
54,742
def register_default_plugins ( root_parser : ParserRegistryWithConverters ) : try : from parsyfiles . plugins_base . support_for_primitive_types import get_default_primitive_parsers , get_default_primitive_converters root_parser . register_parsers ( get_default_primitive_parsers ( ) ) root_parser . register_converters ( get_default_primitive_converters ( ) ) except ImportError as e : warn_import_error ( 'primitive types' , e ) try : from parsyfiles . plugins_base . support_for_collections import get_default_collection_parsers , get_default_collection_converters root_parser . register_parsers ( get_default_collection_parsers ( root_parser , root_parser ) ) root_parser . register_converters ( get_default_collection_converters ( root_parser ) ) except ImportError as e : warn_import_error ( 'dict' , e ) try : from parsyfiles . plugins_base . support_for_objects import get_default_object_parsers , get_default_object_converters root_parser . register_parsers ( get_default_object_parsers ( root_parser , root_parser ) ) root_parser . register_converters ( get_default_object_converters ( root_parser ) ) except ImportError as e : warn_import_error ( 'objects' , e ) try : from parsyfiles . plugins_base . support_for_configparser import get_default_config_parsers , get_default_config_converters root_parser . register_parsers ( get_default_config_parsers ( ) ) root_parser . register_converters ( get_default_config_converters ( root_parser ) ) except ImportError as e : warn_import_error ( 'config' , e ) try : from parsyfiles . plugins_optional . support_for_jprops import get_default_jprops_parsers root_parser . register_parsers ( get_default_jprops_parsers ( root_parser , root_parser ) ) except ImportError as e : warn_import_error ( 'jprops' , e ) try : from parsyfiles . plugins_optional . support_for_yaml import get_default_yaml_parsers root_parser . register_parsers ( get_default_yaml_parsers ( root_parser , root_parser ) ) except ImportError as e : warn_import_error ( 'yaml' , e ) try : from parsyfiles . plugins_optional . support_for_numpy import get_default_np_parsers , get_default_np_converters root_parser . register_parsers ( get_default_np_parsers ( ) ) root_parser . register_converters ( get_default_np_converters ( ) ) except ImportError as e : warn_import_error ( 'numpy' , e ) try : from parsyfiles . plugins_optional . support_for_pandas import get_default_pandas_parsers , get_default_pandas_converters root_parser . register_parsers ( get_default_pandas_parsers ( ) ) root_parser . register_converters ( get_default_pandas_converters ( ) ) except ImportError as e : warn_import_error ( 'pandas' , e )
Utility method to register all default plugins on the given parser + converter registry
54,743
def parse_collection ( self , item_file_prefix : str , base_item_type : Type [ T ] , item_name_for_log : str = None , file_mapping_conf : FileMappingConfiguration = None , options : Dict [ str , Dict [ str , Any ] ] = None ) -> Dict [ str , T ] : item_name_for_log = item_name_for_log or '' check_var ( item_name_for_log , var_types = str , var_name = 'item_name_for_log' ) collection_type = Dict [ str , base_item_type ] if len ( item_name_for_log ) > 0 : item_name_for_log = item_name_for_log + ' ' self . logger . debug ( '**** Starting to parse ' + item_name_for_log + 'collection of <' + get_pretty_type_str ( base_item_type ) + '> at location ' + item_file_prefix + ' ****' ) return self . _parse__item ( collection_type , item_file_prefix , file_mapping_conf , options = options )
Main method to parse a collection of items of type base_item_type .
54,744
def parse_item ( self , location : str , item_type : Type [ T ] , item_name_for_log : str = None , file_mapping_conf : FileMappingConfiguration = None , options : Dict [ str , Dict [ str , Any ] ] = None ) -> T : item_name_for_log = item_name_for_log or '' check_var ( item_name_for_log , var_types = str , var_name = 'item_name_for_log' ) if len ( item_name_for_log ) > 0 : item_name_for_log = item_name_for_log + ' ' self . logger . debug ( '**** Starting to parse single object ' + item_name_for_log + 'of type <' + get_pretty_type_str ( item_type ) + '> at location ' + location + ' ****' ) return self . _parse__item ( item_type , location , file_mapping_conf , options = options )
Main method to parse an item of type item_type
54,745
def _parse__item ( self , item_type : Type [ T ] , item_file_prefix : str , file_mapping_conf : FileMappingConfiguration = None , options : Dict [ str , Dict [ str , Any ] ] = None ) -> T : options = options or create_parser_options ( ) file_mapping_conf = file_mapping_conf or WrappedFileMappingConfiguration ( ) obj = file_mapping_conf . create_persisted_object ( item_file_prefix , logger = self . logger ) self . logger . debug ( '' ) pp = self . create_parsing_plan ( item_type , obj , logger = self . logger ) self . logger . debug ( '' ) res = pp . execute ( logger = self . logger , options = options ) self . logger . debug ( '' ) return res
Common parsing steps to parse an item
54,746
def SpamsumDistance ( ssA , ssB ) : mA = re . match ( '^(\d+)[:](.*)$' , ssA ) mB = re . match ( '^(\d+)[:](.*)$' , ssB ) if mA == None or mB == None : raise "do not appear to be spamsum signatures" if mA . group ( 1 ) != mB . group ( 1 ) : return max ( [ len ( mA . group ( 2 ) ) , len ( mB . group ( 2 ) ) ] ) else : return LevDistance ( mA . group ( 2 ) , mB . group ( 2 ) )
returns the spamsum distance between ssA and ssB if they use a different block size assume maximum distance otherwise returns the LevDistance
54,747
def add_image ( self , image_path , annotations ) : self . image_paths . append ( image_path ) self . bounding_boxes . append ( [ bounding_box_from_annotation ( ** a ) for a in annotations ] )
Adds an image and its bounding boxes to the current list of files
54,748
def save ( self , list_file ) : bob . io . base . create_directories_safe ( os . path . dirname ( list_file ) ) with open ( list_file , 'w' ) as f : for i in range ( len ( self . image_paths ) ) : f . write ( self . image_paths [ i ] ) for bbx in self . bounding_boxes [ i ] : f . write ( "\t[%f %f %f %f]" % ( bbx . top_f , bbx . left_f , bbx . size_f [ 0 ] , bbx . size_f [ 1 ] ) ) f . write ( "\n" )
Saves the current list of annotations to the given file .
54,749
def _feature_file ( self , parallel = None , index = None ) : if index is None : index = 0 if parallel is None or "SGE_TASK_ID" not in os . environ else int ( os . environ [ "SGE_TASK_ID" ] ) return os . path . join ( self . feature_directory , "Features_%02d.hdf5" % index )
Returns the name of an intermediate file for storing features .
54,750
def get ( self , param , default = EMPTY ) : if not self . has ( param ) : if default is not EMPTY : return default raise ParamNotFoundException ( "value for %s not found" % param ) context_dict = copy . deepcopy ( self . manifest . get_context_dict ( ) ) for k , v in self . raw_dict . items ( ) : context_dict [ "%s:%s" % ( self . feature_name , k ) ] = v cur_value = self . raw_dict [ param ] prev_value = None max_depth = 5 while cur_value != prev_value and max_depth > 0 : prev_value = cur_value try : cur_value = str ( prev_value ) % context_dict except KeyError : e = sys . exc_info ( ) [ 1 ] key = e . args [ 0 ] if key . startswith ( 'config:' ) : missing_key = key . split ( ':' ) [ 1 ] if self . manifest . inputs . is_input ( missing_key ) : val = self . manifest . inputs . get_input ( missing_key ) context_dict [ key ] = val else : logger . warn ( "Could not specialize %s! Error: %s" % ( self . raw_dict [ param ] , e ) ) return self . raw_dict [ param ] except ValueError : return cur_value max_depth -= 1 return cur_value
Returns the nparam value and returns the default if it doesn t exist . If default is none an exception will be raised instead .
54,751
def set ( self , param , value ) : self . raw_dict [ param ] = value self . manifest . set ( self . feature_name , param , value )
sets the param to the value provided
54,752
def remove ( self , param ) : if self . has ( param ) : del ( self . raw_dict [ param ] ) self . manifest . remove_option ( self . feature_name , param )
Remove a parameter from the manifest
54,753
def set_if_empty ( self , param , default ) : if not self . has ( param ) : self . set ( param , default )
Set the parameter to the default if it doesn t exist
54,754
def to_dict ( self ) : return dict ( ( k , str ( self . get ( k ) ) ) for k in self . raw_dict )
Returns the context fully specialized as a dictionary
54,755
def write_to_manifest ( self ) : self . manifest . remove_section ( self . feature_name ) self . manifest . add_section ( self . feature_name ) for k , v in self . raw_dict . items ( ) : self . manifest . set ( self . feature_name , k , v )
Overwrites the section of the manifest with the featureconfig s value
54,756
def round_to_05 ( n , exp = None , mode = 's' ) : n = np . asarray ( n ) if exp is None : exp = np . floor ( np . log10 ( np . abs ( n ) ) ) ntmp = np . abs ( n ) / 10. ** exp if mode == 's' : n1 = ntmp s = 1. n2 = nret = np . floor ( ntmp ) else : n1 = nret = np . ceil ( ntmp ) s = - 1. n2 = ntmp return np . where ( n1 - n2 > 0.5 , np . sign ( n ) * ( nret + s * 0.5 ) * 10. ** exp , np . sign ( n ) * nret * 10. ** exp )
Round to the next 0 . 5 - value .
54,757
def convert_radian ( coord , * variables ) : if any ( v . attrs . get ( 'units' ) == 'radian' for v in variables ) : return coord * 180. / np . pi return coord
Convert the given coordinate from radian to degree
54,758
def replace_coord ( self , i ) : da = next ( islice ( self . data_iterator , i , i + 1 ) ) name , coord = self . get_alternative_coord ( da , i ) other_coords = { key : da . coords [ key ] for key in set ( da . coords ) . difference ( da . dims ) } ret = da . rename ( { da . dims [ - 1 ] : name } ) . assign_coords ( ** { name : coord } ) . assign_coords ( ** other_coords ) return ret
Replace the coordinate for the data array at the given position
54,759
def value2pickle ( self ) : return { key : s . get_edgecolor ( ) for key , s in self . ax . spines . items ( ) }
Return the current axis colors
54,760
def set_default_formatters ( self , which = None ) : if which is None or which == 'minor' : self . default_formatters [ 'minor' ] = self . axis . get_minor_formatter ( ) if which is None or which == 'major' : self . default_formatters [ 'major' ] = self . axis . get_major_formatter ( )
Sets the default formatters that is used for updating to None
54,761
def plotted_data ( self ) : return InteractiveList ( [ arr for arr , val in zip ( self . iter_data , cycle ( slist ( self . value ) ) ) if val is not None ] )
The data that is shown to the user
54,762
def axis ( self ) : return getattr ( self . colorbar . ax , self . axis_locations [ self . position ] + 'axis' )
axis of the colorbar with the ticks . Will be overwritten during update process .
54,763
def default_formatters ( self ) : if self . _default_formatters : return self . _default_formatters else : self . set_default_formatters ( ) return self . _default_formatters
Default locator of the axis of the colorbars
54,764
def get_xyz_2d ( self , xcoord , x , ycoord , y , u , v ) : xy = xcoord . values . ravel ( ) + 1j * ycoord . values . ravel ( ) dist = np . abs ( xy - ( x + 1j * y ) ) imin = np . nanargmin ( dist ) xy_min = xy [ imin ] return ( xy_min . real , xy_min . imag , u . values . ravel ( ) [ imin ] , v . values . ravel ( ) [ imin ] )
Get closest x y and z for the given x and y in data for 2d coords
54,765
def hist2d ( self , da , ** kwargs ) : if self . value is None or self . value == 'counts' : normed = False else : normed = True y = da . values x = da . coords [ da . dims [ 0 ] ] . values counts , xedges , yedges = np . histogram2d ( x , y , normed = normed , ** kwargs ) if self . value == 'counts' : counts = counts / counts . sum ( ) . astype ( float ) return counts , xedges , yedges
Make the two dimensional histogram
54,766
def _statsmodels_bivariate_kde ( self , x , y , bws , xsize , ysize , xyranges ) : import statsmodels . nonparametric . api as smnp for i , ( coord , bw ) in enumerate ( zip ( [ x , y ] , bws ) ) : if isinstance ( bw , six . string_types ) : bw_func = getattr ( smnp . bandwidths , "bw_" + bw ) bws [ i ] = bw_func ( coord ) kde = smnp . KDEMultivariate ( [ x , y ] , "cc" , bws ) x_support = np . linspace ( xyranges [ 0 ] [ 0 ] , xyranges [ 0 ] [ 1 ] , xsize ) y_support = np . linspace ( xyranges [ 1 ] [ 0 ] , xyranges [ 1 ] [ 1 ] , ysize ) xx , yy = np . meshgrid ( x_support , y_support ) z = kde . pdf ( [ xx . ravel ( ) , yy . ravel ( ) ] ) . reshape ( xx . shape ) return x_support , y_support , z
Compute a bivariate kde using statsmodels . This function is mainly motivated through seaborn . distributions . _statsmodels_bivariate_kde
54,767
def append_diff_hist ( diff , diff_hist = list ( ) ) : diff , diff_hist = _norm_json_params ( diff , diff_hist ) if not diff_hist : diff_hist = list ( ) diff_hist . append ( { 'diff' : diff , 'diff_date' : now_field ( ) } ) return diff_hist
Given a diff as generated by record_diff append a diff record to the list of diff_hist records .
54,768
def _find_video ( self ) : data = None if self . id : data = self . connection . get_item ( 'find_video_by_id' , video_id = self . id ) elif self . reference_id : data = self . connection . get_item ( 'find_video_by_reference_id' , reference_id = self . reference_id ) if data : self . _load ( data )
Lookup and populate pybrightcove . video . Video object given a video id or reference_id .
54,769
def to_xml ( self ) : xml = '' for asset in self . assets : xml += '<asset filename="%s" ' % os . path . basename ( asset [ 'filename' ] ) xml += ' refid="%(refid)s"' % asset xml += ' size="%(size)s"' % asset xml += ' hash-code="%s"' % asset [ 'hash-code' ] xml += ' type="%(type)s"' % asset if asset . get ( 'encoding-rate' , None ) : xml += ' encoding-rate="%s"' % asset [ 'encoding-rate' ] if asset . get ( 'frame-width' , None ) : xml += ' frame-width="%s"' % asset [ 'frame-width' ] if asset . get ( 'frame-height' , None ) : xml += ' frame-height="%s"' % asset [ 'frame-height' ] if asset . get ( 'display-name' , None ) : xml += ' display-name="%s"' % asset [ 'display-name' ] if asset . get ( 'encode-to' , None ) : xml += ' encode-to="%s"' % asset [ 'encode-to' ] if asset . get ( 'encode-multiple' , None ) : xml += ' encode-multiple="%s"' % asset [ 'encode-multiple' ] if asset . get ( 'h264-preserve-as-rendition' , None ) : xml += ' h264-preserve-as-rendition="%s"' % asset [ 'h264-preserve-as-rendition' ] if asset . get ( 'h264-no-processing' , None ) : xml += ' h264-no-processing="%s"' % asset [ 'h264-no-processing' ] xml += ' />\n' xml += '<title name="%(name)s" refid="%(referenceId)s" active="TRUE" ' if self . start_date : xml += 'start-date="%(start_date)s" ' if self . end_date : xml += 'end-date="%(end_date)s" ' for asset in self . assets : if asset . get ( 'encoding-rate' , None ) == None : choice = enums . AssetTypeEnum if asset . get ( 'type' , None ) == choice . VIDEO_FULL : xml += 'video-full-refid="%s" ' % asset . get ( 'refid' ) if asset . get ( 'type' , None ) == choice . THUMBNAIL : xml += 'thumbnail-refid="%s" ' % asset . get ( 'refid' ) if asset . get ( 'type' , None ) == choice . VIDEO_STILL : xml += 'video-still-refid="%s" ' % asset . get ( 'refid' ) if asset . get ( 'type' , None ) == choice . FLV_BUMPER : xml += 'flash-prebumper-refid="%s" ' % asset . get ( 'refid' ) xml += '>\n' if self . short_description : xml += '<short-description><![CDATA[%(shortDescription)s]]>' xml += '</short-description>\n' if self . long_description : xml += '<long-description><![CDATA[%(longDescription)s]]>' xml += '</long-description>\n' for tag in self . tags : xml += '<tag><![CDATA[%s]]></tag>\n' % tag for asset in self . assets : if asset . get ( 'encoding-rate' , None ) : xml += '<rendition-refid>%s</rendition-refid>\n' % asset [ 'refid' ] for meta in self . metadata : xml += '<custom-%s-value name="%s">%s</custom-%s-value>' % ( meta [ 'type' ] , meta [ 'key' ] , meta [ 'value' ] , meta [ 'type' ] ) xml += '</title>' xml = xml % self . _to_dict ( ) return xml
Converts object into an XML string .
54,770
def _load ( self , data ) : self . raw_data = data self . creation_date = _convert_tstamp ( data [ 'creationDate' ] ) self . economics = data [ 'economics' ] self . id = data [ 'id' ] self . last_modified_date = _convert_tstamp ( data [ 'lastModifiedDate' ] ) self . length = data [ 'length' ] self . link_text = data [ 'linkText' ] self . link_url = data [ 'linkURL' ] self . long_description = data [ 'longDescription' ] self . name = data [ 'name' ] self . plays_total = data [ 'playsTotal' ] self . plays_trailing_week = data [ 'playsTrailingWeek' ] self . published_date = _convert_tstamp ( data [ 'publishedDate' ] ) self . start_date = _convert_tstamp ( data . get ( 'startDate' , None ) ) self . end_date = _convert_tstamp ( data . get ( 'endDate' , None ) ) self . reference_id = data [ 'referenceId' ] self . short_description = data [ 'shortDescription' ] self . tags = [ ] for tag in data [ 'tags' ] : self . tags . append ( tag ) self . thumbnail_url = data [ 'thumbnailURL' ] self . video_still_url = data [ 'videoStillURL' ]
Deserialize a dictionary of data into a pybrightcove . video . Video object .
54,771
def get_custom_metadata ( self ) : if self . id is not None : data = self . connection . get_item ( 'find_video_by_id' , video_id = self . id , video_fields = "customFields" ) for key in data . get ( "customFields" , { } ) . keys ( ) : val = data [ "customFields" ] . get ( key ) if val is not None : self . add_custom_metadata ( key , val )
Fetches custom metadta for an already exisiting Video .
54,772
def add_custom_metadata ( self , key , value , meta_type = None ) : self . metadata . append ( { 'key' : key , 'value' : value , 'type' : meta_type } )
Add custom metadata to the Video . meta_type is required for XML API .
54,773
def add_asset ( self , filename , asset_type , display_name , encoding_rate = None , frame_width = None , frame_height = None , encode_to = None , encode_multiple = False , h264_preserve_as_rendition = False , h264_no_processing = False ) : m = hashlib . md5 ( ) fp = file ( filename , 'rb' ) bits = fp . read ( 262144 ) while bits : m . update ( bits ) bits = fp . read ( 262144 ) fp . close ( ) hash_code = m . hexdigest ( ) refid = "%s-%s" % ( os . path . basename ( filename ) , hash_code ) asset = { 'filename' : filename , 'type' : asset_type , 'size' : os . path . getsize ( filename ) , 'refid' : refid , 'hash-code' : hash_code } if encoding_rate : asset . update ( { 'encoding-rate' : encoding_rate } ) if frame_width : asset . update ( { 'frame-width' : frame_width } ) if frame_height : asset . update ( { 'frame-height' : frame_height } ) if display_name : asset . update ( { 'display-name' : display_name } ) if encode_to : asset . update ( { 'encode-to' : encode_to } ) asset . update ( { 'encode-multiple' : encode_multiple } ) if encode_multiple and h264_preserve_as_rendition : asset . update ( { 'h264-preserve-as-rendition' : h264_preserve_as_rendition } ) else : if h264_no_processing : asset . update ( { 'h264-no-processing' : h264_no_processing } ) self . assets . append ( asset )
Add an asset to the Video object .
54,774
def save ( self , create_multiple_renditions = True , preserve_source_rendition = True , encode_to = enums . EncodeToEnum . FLV ) : if is_ftp_connection ( self . connection ) and len ( self . assets ) > 0 : self . connection . post ( xml = self . to_xml ( ) , assets = self . assets ) elif not self . id and self . _filename : self . id = self . connection . post ( 'create_video' , self . _filename , create_multiple_renditions = create_multiple_renditions , preserve_source_rendition = preserve_source_rendition , encode_to = encode_to , video = self . _to_dict ( ) ) elif not self . id and len ( self . renditions ) > 0 : self . id = self . connection . post ( 'create_video' , video = self . _to_dict ( ) ) elif self . id : data = self . connection . post ( 'update_video' , video = self . _to_dict ( ) ) if data : self . _load ( data )
Creates or updates the video
54,775
def delete ( self , cascade = False , delete_shares = False ) : if self . id : self . connection . post ( 'delete_video' , video_id = self . id , cascade = cascade , delete_shares = delete_shares ) self . id = None
Deletes the video .
54,776
def get_upload_status ( self ) : if self . id : return self . connection . post ( 'get_upload_status' , video_id = self . id )
Get the status of the video that has been uploaded .
54,777
def share ( self , accounts ) : if not isinstance ( accounts , ( list , tuple ) ) : msg = "Video.share expects an iterable argument" raise exceptions . PyBrightcoveError ( msg ) raise exceptions . PyBrightcoveError ( "Not yet implemented" )
Create a share
54,778
def set_image ( self , image , filename = None , resize = False ) : if self . id : data = self . connection . post ( 'add_image' , filename , video_id = self . id , image = image . to_dict ( ) , resize = resize ) if data : self . image = Image ( data = data )
Set the poster or thumbnail of a this Vidoe .
54,779
def find_related ( self , _connection = None , page_size = 100 , page_number = 0 ) : if self . id : return connection . ItemResultSet ( 'find_related_videos' , Video , _connection , page_size , page_number , None , None , video_id = self . id )
List all videos that are related to this one .
54,780
def delete_video ( video_id , cascade = False , delete_shares = False , _connection = None ) : c = _connection if not c : c = connection . APIConnection ( ) c . post ( 'delete_video' , video_id = video_id , cascade = cascade , delete_shares = delete_shares )
Delete the video represented by the video_id parameter .
54,781
def get_status ( video_id , _connection = None ) : c = _connection if not c : c = connection . APIConnection ( ) return c . post ( 'get_upload_status' , video_id = video_id )
Get the status of a video given the video_id parameter .
54,782
def activate ( video_id , _connection = None ) : c = _connection if not c : c = connection . APIConnection ( ) data = c . post ( 'update_video' , video = { 'id' : video_id , 'itemState' : enums . ItemStateEnum . ACTIVE } ) return Video ( data = data , _connection = c )
Mark a video as Active
54,783
def find_modified ( since , filter_list = None , _connection = None , page_size = 25 , page_number = 0 , sort_by = enums . DEFAULT_SORT_BY , sort_order = enums . DEFAULT_SORT_ORDER ) : filters = [ ] if filter_list is not None : filters = filter_list if not isinstance ( since , datetime ) : msg = 'The parameter "since" must be a datetime object.' raise exceptions . PyBrightcoveError ( msg ) fdate = int ( since . strftime ( "%s" ) ) / 60 return connection . ItemResultSet ( 'find_modified_videos' , Video , _connection , page_size , page_number , sort_by , sort_order , from_date = fdate , filter = filters )
List all videos modified since a certain date .
54,784
def find_all ( _connection = None , page_size = 100 , page_number = 0 , sort_by = enums . DEFAULT_SORT_BY , sort_order = enums . DEFAULT_SORT_ORDER ) : return connection . ItemResultSet ( 'find_all_videos' , Video , _connection , page_size , page_number , sort_by , sort_order )
List all videos .
54,785
def find_by_tags ( and_tags = None , or_tags = None , _connection = None , page_size = 100 , page_number = 0 , sort_by = enums . DEFAULT_SORT_BY , sort_order = enums . DEFAULT_SORT_ORDER ) : err = None if not and_tags and not or_tags : err = "You must supply at least one of either and_tags or or_tags." if and_tags and not isinstance ( and_tags , ( tuple , list ) ) : err = "The and_tags argument for Video.find_by_tags must an " err += "iterable" if or_tags and not isinstance ( or_tags , ( tuple , list ) ) : err = "The or_tags argument for Video.find_by_tags must an " err += "iterable" if err : raise exceptions . PyBrightcoveError ( err ) atags = None otags = None if and_tags : atags = ',' . join ( [ str ( t ) for t in and_tags ] ) if or_tags : otags = ',' . join ( [ str ( t ) for t in or_tags ] ) return connection . ItemResultSet ( 'find_videos_by_tags' , Video , _connection , page_size , page_number , sort_by , sort_order , and_tags = atags , or_tags = otags )
List videos given a certain set of tags .
54,786
def find_by_text ( text , _connection = None , page_size = 100 , page_number = 0 , sort_by = enums . DEFAULT_SORT_BY , sort_order = enums . DEFAULT_SORT_ORDER ) : return connection . ItemResultSet ( 'find_videos_by_text' , Video , _connection , page_size , page_number , sort_by , sort_order , text = text )
List videos that match the text in title or description .
54,787
def find_by_campaign ( campaign_id , _connection = None , page_size = 100 , page_number = 0 , sort_by = enums . DEFAULT_SORT_BY , sort_order = enums . DEFAULT_SORT_ORDER ) : return connection . ItemResultSet ( 'find_videos_by_campaign_id' , Video , _connection , page_size , page_number , sort_by , sort_order , campaign_id = campaign_id )
List all videos for a given campaign .
54,788
def find_by_user ( user_id , _connection = None , page_size = 100 , page_number = 0 , sort_by = enums . DEFAULT_SORT_BY , sort_order = enums . DEFAULT_SORT_ORDER ) : return connection . ItemResultSet ( 'find_videos_by_user_id' , Video , _connection , page_size , page_number , sort_by , sort_order , user_id = user_id )
List all videos uploaded by a certain user .
54,789
def find_by_reference_ids ( reference_ids , _connection = None , page_size = 100 , page_number = 0 , sort_by = enums . DEFAULT_SORT_BY , sort_order = enums . DEFAULT_SORT_ORDER ) : if not isinstance ( reference_ids , ( list , tuple ) ) : err = "Video.find_by_reference_ids expects an iterable argument" raise exceptions . PyBrightcoveError ( err ) ids = ',' . join ( reference_ids ) return connection . ItemResultSet ( 'find_videos_by_reference_ids' , Video , _connection , page_size , page_number , sort_by , sort_order , reference_ids = ids )
List all videos identified by a list of reference ids
54,790
def find_by_ids ( ids , _connection = None , page_size = 100 , page_number = 0 , sort_by = enums . DEFAULT_SORT_BY , sort_order = enums . DEFAULT_SORT_ORDER ) : if not isinstance ( ids , ( list , tuple ) ) : err = "Video.find_by_ids expects an iterable argument" raise exceptions . PyBrightcoveError ( err ) ids = ',' . join ( [ str ( i ) for i in ids ] ) return connection . ItemResultSet ( 'find_videos_by_ids' , Video , _connection , page_size , page_number , sort_by , sort_order , video_ids = ids )
List all videos identified by a list of Brightcove video ids
54,791
def __wrap ( self , func ) : def deffunc ( * args , ** kwargs ) : if hasattr ( inspect , 'signature' ) : function_args = inspect . signature ( func ) . parameters else : function_args = inspect . getargspec ( func ) . args filtered_kwargs = kwargs . copy ( ) for param in function_args : if param in kwargs : filtered_kwargs [ param ] = kwargs [ param ] elif param in self . _defaults : filtered_kwargs [ param ] = self . _defaults [ param ] return func ( * args , ** filtered_kwargs ) wrapped = functools . update_wrapper ( deffunc , func ) wrapped . __doc__ = ( 'WARNING: this function has been modified by the Presets ' 'package.\nDefault parameter values described in the ' 'documentation below may be inaccurate.\n\n{}' . format ( wrapped . __doc__ ) ) return wrapped
This decorator overrides the default arguments of a function .
54,792
def _sumDiceRolls ( self , rollList ) : if isinstance ( rollList , RollList ) : self . rolls . append ( rollList ) return rollList . sum ( ) else : return rollList
convert from dice roll structure to a single integer result
54,793
def annotated_references ( obj ) : references = KeyTransformDict ( transform = id , default_factory = list ) for type_ in type ( obj ) . __mro__ : if type_ in type_based_references : type_based_references [ type_ ] ( obj , references ) add_attr ( obj , "__dict__" , references ) add_attr ( obj , "__class__" , references ) if isinstance ( obj , type ) : add_attr ( obj , "__mro__" , references ) return references
Return known information about references held by the given object .
54,794
def object_annotation ( obj ) : if isinstance ( obj , BASE_TYPES ) : return repr ( obj ) if type ( obj ) . __name__ == 'function' : return "function\\n{}" . format ( obj . __name__ ) elif isinstance ( obj , types . MethodType ) : if six . PY2 : im_class = obj . im_class if im_class is None : im_class_name = "<None>" else : im_class_name = im_class . __name__ try : func_name = obj . __func__ . __name__ except AttributeError : func_name = "<anonymous>" return "instancemethod\\n{}.{}" . format ( im_class_name , func_name , ) else : try : func_name = obj . __func__ . __qualname__ except AttributeError : func_name = "<anonymous>" return "instancemethod\\n{}" . format ( func_name ) elif isinstance ( obj , list ) : return "list[{}]" . format ( len ( obj ) ) elif isinstance ( obj , tuple ) : return "tuple[{}]" . format ( len ( obj ) ) elif isinstance ( obj , dict ) : return "dict[{}]" . format ( len ( obj ) ) elif isinstance ( obj , types . ModuleType ) : return "module\\n{}" . format ( obj . __name__ ) elif isinstance ( obj , type ) : return "type\\n{}" . format ( obj . __name__ ) elif six . PY2 and isinstance ( obj , types . InstanceType ) : return "instance\\n{}" . format ( obj . __class__ . __name__ ) elif isinstance ( obj , weakref . ref ) : referent = obj ( ) if referent is None : return "weakref (dead referent)" else : return "weakref to id 0x{:x}" . format ( id ( referent ) ) elif isinstance ( obj , types . FrameType ) : filename = obj . f_code . co_filename if len ( filename ) > FRAME_FILENAME_LIMIT : filename = "..." + filename [ - ( FRAME_FILENAME_LIMIT - 3 ) : ] return "frame\\n{}:{}" . format ( filename , obj . f_lineno , ) else : return "object\\n{}.{}" . format ( type ( obj ) . __module__ , type ( obj ) . __name__ , )
Return a string to be used for Graphviz nodes . The string should be short but as informative as possible .
54,795
def disttar ( target , source , env ) : import tarfile env_dict = env . Dictionary ( ) if env_dict . get ( "DISTTAR_FORMAT" ) in [ "gz" , "bz2" ] : tar_format = env_dict [ "DISTTAR_FORMAT" ] else : tar_format = "" base_name = str ( target [ 0 ] ) . split ( '.tar' ) [ 0 ] ( target_dir , dir_name ) = os . path . split ( base_name ) if target_dir and not os . path . exists ( target_dir ) : os . makedirs ( target_dir ) print >> sys . stderr , 'DistTar: Writing %s' % str ( target [ 0 ] ) print >> sys . stderr , ' with contents: %s' % [ str ( s ) for s in source ] tar = tarfile . open ( str ( target [ 0 ] ) , "w:%s" % tar_format ) for item in source : item = str ( item ) sys . stderr . write ( "." ) tar . add ( item , '%s/%s' % ( dir_name , item ) ) sys . stderr . write ( "\n" ) tar . close ( )
tar archive builder
54,796
def disttar_suffix ( env , sources ) : env_dict = env . Dictionary ( ) if env_dict . has_key ( "DISTTAR_FORMAT" ) and env_dict [ "DISTTAR_FORMAT" ] in [ "gz" , "bz2" ] : return ".tar." + env_dict [ "DISTTAR_FORMAT" ] else : return ".tar"
tar archive suffix generator
54,797
def generate ( env ) : disttar_action = SCons . Action . Action ( disttar , disttar_string ) env [ 'BUILDERS' ] [ 'DistTar' ] = Builder ( action = disttar_action , emitter = disttar_emitter , suffix = disttar_suffix , target_factory = env . fs . Entry ) env . AppendUnique ( DISTTAR_FORMAT = 'gz' )
Add builders and construction variables for the DistTar builder .
54,798
def find_one ( self , cls , id ) : found = self . find_by_index ( cls , 'id' , id ) return found [ 0 ] if found else None
Find single keyed row - as per the gludb spec .
54,799
def save ( self , obj ) : cur = self . _conn ( ) . cursor ( ) tabname = obj . __class__ . get_table_name ( ) index_names = obj . __class__ . index_names ( ) or [ ] col_names = [ 'id' , 'value' ] + index_names value_holders = [ '%s' ] * len ( col_names ) updates = [ '%s = EXCLUDED.%s' % ( cn , cn ) for cn in col_names [ 1 : ] ] if not obj . id : id = uuid ( ) obj . id = id query = 'insert into {0} ({1}) values ({2}) on conflict(id) do update set {3};' . format ( tabname , ',' . join ( col_names ) , ',' . join ( value_holders ) , ',' . join ( updates ) , ) values = [ obj . id , obj . to_data ( ) ] index_vals = obj . indexes ( ) or { } values += [ index_vals . get ( name , 'NULL' ) for name in index_names ] with self . _conn ( ) as conn : with conn . cursor ( ) as cur : cur . execute ( query , tuple ( values ) )
Save current instance - as per the gludb spec .