idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
48,800
def reverse_index_mapping ( self ) : if self . _reverse_index_mapping is None : if self . is_indexed : r = np . zeros ( self . base_length , dtype = np . int32 ) - 1 r [ self . order ] = np . arange ( len ( self . order ) , dtype = np . int32 ) elif self . data . base is None : r = np . arange ( self . data_length , dtype = np . int32 ) else : r = np . zeros ( self . base_length , dtype = np . int32 ) - 1 r [ self . data_start - self . base_start : self . data_end - self . base_start ] = np . arange ( self . data_length , dtype = np . int32 ) self . _reverse_index_mapping = r return self . _reverse_index_mapping
Get mapping from this segment s indexes to the indexes of the base array .
48,801
def get_reverse_index ( self , base_index ) : r = self . reverse_index_mapping [ base_index ] if r < 0 : raise IndexError ( "index %d not mapped in this segment" % base_index ) return r
Get index into this segment s data given the index into the base data
48,802
def resize ( self , newsize , zeros = True ) : if not self . can_resize : raise ValueError ( "Segment %s can't be resized" % str ( self ) ) if not self . rawdata . is_base : raise ValueError ( "Only container segments can be resized" ) origsize = len ( self ) self . rawdata . resize ( newsize ) self . set_raw ( self . rawdata ) newsize = len ( self ) if zeros : if newsize > origsize : self . data [ origsize : ] = 0 self . style [ origsize : ] = 0 return origsize , newsize
Resize the data arrays .
48,803
def reconstruct_raw ( self , rawdata ) : start , end = self . _rawdata_bounds r = rawdata [ start : end ] delattr ( self , '_rawdata_bounds' ) try : if self . _order_list : order = to_numpy_list ( self . _order_list ) r = r . get_indexed ( order ) delattr ( self , '_order_list' ) except AttributeError : pass self . set_raw ( r )
Reconstruct the pointers to the parent data arrays
48,804
def get_parallel_raw_data ( self , other ) : start , end = other . byte_bounds_offset ( ) r = self . rawdata [ start : end ] if other . rawdata . is_indexed : r = r . get_indexed [ other . order ] return r
Get the raw data that is similar to the specified other segment
48,805
def serialize_session ( self , mdict ) : mdict [ "comment ranges" ] = [ list ( a ) for a in self . get_style_ranges ( comment = True ) ] mdict [ "data ranges" ] = [ list ( a ) for a in self . get_style_ranges ( data = True ) ] for i in range ( 1 , user_bit_mask ) : r = [ list ( a ) for a in self . get_style_ranges ( user = i ) ] if r : slot = "user style %d" % i mdict [ slot ] = r mdict [ "comments" ] = self . get_sorted_comments ( )
Save extra metadata to a dict so that it can be serialized
48,806
def get_index_from_base_index ( self , base_index ) : r = self . rawdata try : index = r . get_reverse_index ( base_index ) except IndexError : raise IndexError ( "index %d not in this segment" % base_index ) if index < 0 : raise IndexError ( "index %d not in this segment" % base_index ) return int ( index )
Get index into this array s data given the index into the base array
48,807
def get_style_ranges ( self , ** kwargs ) : style_bits = self . get_style_bits ( ** kwargs ) matches = ( self . style & style_bits ) == style_bits return self . bool_to_ranges ( matches )
Return a list of start end pairs that match the specified style
48,808
def fixup_comments ( self ) : style_base = self . rawdata . style_base comment_text_indexes = np . asarray ( list ( self . rawdata . extra . comments . keys ( ) ) , dtype = np . uint32 ) comment_mask = self . get_style_mask ( comment = True ) has_comments = np . where ( style_base & comment_bit_mask > 0 ) [ 0 ] both = np . intersect1d ( comment_text_indexes , has_comments ) log . info ( "fixup comments: %d correctly marked, %d without style, %d empty text" % ( np . alen ( both ) , np . alen ( comment_text_indexes ) - np . alen ( both ) , np . alen ( has_comments ) - np . alen ( both ) ) ) style_base &= comment_mask comment_style = self . get_style_bits ( comment = True ) style_base [ comment_text_indexes ] |= comment_style
Remove any style bytes that are marked as commented but have no comment and add any style bytes where there s a comment but it isn t marked in the style data .
48,809
def get_entire_style_ranges ( self , split_comments = None , ** kwargs ) : style_bits = self . get_style_bits ( ** kwargs ) matches = self . get_comment_locations ( ** kwargs ) groups = np . split ( matches , np . where ( np . diff ( matches ) != 0 ) [ 0 ] + 1 ) if split_comments is None : split_comments = [ ] ranges = [ ] last_end = 0 if len ( groups ) == 1 and len ( groups [ 0 ] ) == 0 : return last_style = - 1 for group in groups : size = len ( group ) next_end = last_end + size style = matches [ last_end ] masked_style = style & style_bits if style & comment_bit_mask : if masked_style in split_comments : ranges . append ( ( ( last_end , next_end ) , masked_style ) ) else : if last_style == masked_style : ( ( prev_end , _ ) , _ ) = ranges . pop ( ) ranges . append ( ( ( prev_end , next_end ) , masked_style ) ) else : ranges . append ( ( ( last_end , next_end ) , masked_style ) ) else : if last_style == masked_style : ( ( prev_end , _ ) , _ ) = ranges . pop ( ) ranges . append ( ( ( prev_end , next_end ) , masked_style ) ) else : ranges . append ( ( ( last_end , next_end ) , masked_style ) ) last_style = masked_style last_end = next_end return ranges
Find sections of the segment that have the same style value .
48,810
def restore_comments ( self , restore_data ) : for start , end , styles , items in restore_data : log . debug ( "range: %d-%d" % ( start , end ) ) self . style [ start : end ] = styles for i in range ( start , end ) : rawindex , comment = items [ i ] if comment : log . debug ( " restoring comment: rawindex=%d, '%s'" % ( rawindex , comment ) ) self . rawdata . extra . comments [ rawindex ] = comment else : try : del self . rawdata . extra . comments [ rawindex ] log . debug ( " no comment in original data, removed comment in current data at rawindex=%d" % rawindex ) except KeyError : log . debug ( " no comment in original data or current data at rawindex=%d" % rawindex ) pass
Restore comment styles and data
48,811
def copy_user_data ( self , source , index_offset = 0 ) : for index , comment in source . iter_comments_in_segment ( ) : self . set_comment_at ( index + index_offset , comment )
Copy comments and other user data from the source segment to this segment .
48,812
def execute_xpath ( xpath_string , sql_function , uuid , version ) : settings = get_current_registry ( ) . settings with db_connect ( ) as db_connection : with db_connection . cursor ( ) as cursor : try : cursor . execute ( SQL [ sql_function ] , { 'document_uuid' : uuid , 'document_version' : version , 'xpath_string' : xpath_string } ) except psycopg2 . Error as e : exc = httpexceptions . HTTPBadRequest ( ) exc . explanation = e . pgerror raise exc for res in cursor . fetchall ( ) : yield { 'name' : res [ 0 ] , 'uuid' : res [ 1 ] , 'version' : res [ 2 ] , 'xpath_results' : res [ 3 ] }
Executes either xpath or xpath - module SQL function with given input params .
48,813
def tree_to_html ( tree ) : ul = etree . Element ( 'ul' ) html_listify ( [ tree ] , ul ) return HTML_WRAPPER . format ( etree . tostring ( ul ) )
Return html list version of book tree .
48,814
def _get_content_json ( ident_hash = None ) : request = get_current_request ( ) routing_args = request and request . matchdict or { } if not ident_hash : ident_hash = routing_args [ 'ident_hash' ] as_collated = asbool ( request . GET . get ( 'as_collated' , True ) ) page_ident_hash = routing_args . get ( 'page_ident_hash' , '' ) p_id , p_version = ( None , None ) if page_ident_hash : try : p_id , p_version = split_ident_hash ( page_ident_hash ) except IdentHashShortId as e : p_id = get_uuid ( e . id ) p_version = e . version except IdentHashMissingVersion as e : p_id = e . id p_version = None id , version = split_ident_hash ( ident_hash , containing = p_id ) with db_connect ( ) as db_connection : with db_connection . cursor ( ) as cursor : result = get_content_metadata ( id , version , cursor ) result [ 'canon_url' ] = get_canonical_url ( result , request ) if result [ 'mediaType' ] == COLLECTION_MIMETYPE : result [ 'tree' ] = get_tree ( ident_hash , cursor , as_collated = as_collated ) result [ 'collated' ] = as_collated if not result [ 'tree' ] : result [ 'tree' ] = get_tree ( ident_hash , cursor ) result [ 'collated' ] = False if page_ident_hash : for id_ in flatten_tree_to_ident_hashes ( result [ 'tree' ] ) : id , version = split_ident_hash ( id_ ) if id == p_id and ( version == p_version or not p_version ) : content = None if as_collated : content = get_collated_content ( id_ , ident_hash , cursor ) if content : result = get_content_metadata ( id , version , cursor ) result [ 'canon_url' ] = ( get_canonical_url ( result , request ) ) result [ 'content' ] = content [ : ] return result raise httpexceptions . HTTPFound ( request . route_path ( request . matched_route . name , _query = request . params , ident_hash = join_ident_hash ( id , version ) , ext = routing_args [ 'ext' ] ) , headers = [ ( "Cache-Control" , "max-age=60, public" ) ] ) raise httpexceptions . HTTPNotFound ( ) else : result = get_content_metadata ( id , version , cursor ) result [ 'canon_url' ] = get_canonical_url ( result , request ) args = dict ( id = id , version = result [ 'version' ] , filename = 'index.cnxml.html' ) cursor . execute ( SQL [ 'get-resource-by-filename' ] , args ) try : content = cursor . fetchone ( ) [ 0 ] except ( TypeError , IndexError , ) : logger . debug ( "module found, but " "'index.cnxml.html' is missing." ) raise httpexceptions . HTTPNotFound ( ) result [ 'content' ] = content [ : ] return result
Return a content as a dict from its ident - hash ( uuid
48,815
def get_content_json ( request ) : result = _get_content_json ( ) resp = request . response resp . status = "200 OK" resp . content_type = 'application/json' resp . body = json . dumps ( result ) return result , resp
Retrieve content as JSON using the ident - hash ( uuid
48,816
def get_content_html ( request ) : result = _get_content_json ( ) media_type = result [ 'mediaType' ] if media_type == COLLECTION_MIMETYPE : content = tree_to_html ( result [ 'tree' ] ) else : content = result [ 'content' ] resp = request . response resp . body = content resp . status = "200 OK" resp . content_type = 'application/xhtml+xml' return result , resp
Retrieve content as HTML using the ident - hash ( uuid
48,817
def html_listify ( tree , root_ul_element , parent_id = None ) : request = get_current_request ( ) is_first_node = parent_id is None if is_first_node : parent_id = tree [ 0 ] [ 'id' ] for node in tree : li_elm = etree . SubElement ( root_ul_element , 'li' ) a_elm = etree . SubElement ( li_elm , 'a' ) a_elm . text = node [ 'title' ] if node [ 'id' ] != 'subcol' : if is_first_node : a_elm . set ( 'href' , request . route_path ( 'content' , ident_hash = node [ 'id' ] , ext = '.html' ) ) else : a_elm . set ( 'href' , request . route_path ( 'content' , separator = ':' , ident_hash = parent_id , page_ident_hash = node [ 'id' ] , ext = '.html' ) ) if 'contents' in node : elm = etree . SubElement ( li_elm , 'ul' ) html_listify ( node [ 'contents' ] , elm , parent_id )
Recursively construct HTML nested list version of book tree . The original caller should not call this function with the parent_id defined .
48,818
def get_export_allowable_types ( cursor , exports_dirs , id , version ) : request = get_current_request ( ) type_settings = request . registry . settings [ '_type_info' ] type_names = [ k for k , v in type_settings ] type_infos = [ v for k , v in type_settings ] file_tuples = get_export_files ( cursor , id , version , type_names , exports_dirs , read_file = False ) type_settings = dict ( type_settings ) for filename , mimetype , file_size , file_created , state , file_content in file_tuples : type_name = filename . rsplit ( '.' , 1 ) [ - 1 ] type_info = type_settings [ type_name ] yield { 'format' : type_info [ 'user_friendly_name' ] , 'filename' : filename , 'size' : file_size , 'created' : file_created and file_created . isoformat ( ) or None , 'state' : state , 'details' : type_info [ 'description' ] , 'path' : request . route_path ( 'export' , ident_hash = join_ident_hash ( id , version ) , type = type_name , ignore = u'/{}' . format ( filename ) ) }
Return export types .
48,819
def get_book_info ( cursor , real_dict_cursor , book_id , book_version , page_id , page_version ) : book_ident_hash = join_ident_hash ( book_id , book_version ) page_ident_hash = join_ident_hash ( page_id , page_version ) tree = get_tree ( book_ident_hash , cursor ) if not tree or page_ident_hash not in flatten_tree_to_ident_hashes ( tree ) : raise httpexceptions . HTTPNotFound ( ) sql_statement = real_dict_cursor . execute ( sql_statement , vars = ( book_ident_hash , ) ) return real_dict_cursor . fetchone ( )
Return information about a given book .
48,820
def get_portal_type ( cursor , id , version ) : args = join_ident_hash ( id , version ) sql_statement = cursor . execute ( sql_statement , vars = ( args , ) ) res = cursor . fetchone ( ) if res is None : return None else : return res [ 0 ]
Return the module s portal_type .
48,821
def get_books_containing_page ( cursor , uuid , version , context_uuid = None , context_version = None ) : with db_connect ( ) as db_connection : with db_connection . cursor ( cursor_factory = psycopg2 . extras . RealDictCursor ) as real_dict_cursor : if context_uuid and context_version : return [ get_book_info ( cursor , real_dict_cursor , context_uuid , context_version , uuid , version ) ] else : portal_type = get_portal_type ( cursor , uuid , version ) if portal_type == 'Module' : real_dict_cursor . execute ( SQL [ 'get-books-containing-page' ] , { 'document_uuid' : uuid , 'document_version' : version } ) return real_dict_cursor . fetchall ( ) else : return [ ]
Return a list of book names and UUIDs that contain a given module UUID .
48,822
def get_canonical_url ( metadata , request ) : slug_title = u'/{}' . format ( '-' . join ( metadata [ 'title' ] . split ( ) ) ) settings = get_current_registry ( ) . settings canon_host = settings . get ( 'canonical-hostname' , re . sub ( 'archive.' , '' , request . host ) ) if metadata [ 'canonical' ] is None : canon_url = request . route_url ( 'content' , ident_hash = metadata [ 'id' ] , ignore = slug_title ) else : canon_url = request . route_url ( 'content' , ident_hash = metadata [ 'canonical' ] , separator = ':' , page_ident_hash = metadata [ 'id' ] , ignore = slug_title ) return re . sub ( request . host , canon_host , canon_url )
Builds canonical in book url from a pages metadata .
48,823
def map_indices_child2parent ( child , child_indices ) : parent = child . hparent pf = parent . filter . all idx = np . where ( pf ) [ 0 ] parent_indices = idx [ child_indices ] return parent_indices
Map child RTDCBase event indices to parent RTDCBase
48,824
def map_indices_child2root ( child , child_indices ) : while True : indices = map_indices_child2parent ( child = child , child_indices = child_indices ) if isinstance ( child . hparent , RTDC_Hierarchy ) : child = child . hparent child_indices = indices else : break return indices
Map RTDC_Hierarchy event indices to root RTDCBase
48,825
def map_indices_parent2child ( child , parent_indices ) : parent = child . hparent pf = parent . filter . all child_indices = [ ] count = 0 for ii in range ( len ( pf ) ) : if pf [ ii ] : if ii in parent_indices : child_indices . append ( count ) count += 1 return np . array ( child_indices )
Map parent RTDCBase event indices to RTDC_Hierarchy
48,826
def map_indices_root2child ( child , root_indices ) : hierarchy = [ child ] while True : if isinstance ( child . hparent , RTDC_Hierarchy ) : hierarchy . append ( child . hparent ) child = child . hparent else : break indices = root_indices for hp in hierarchy [ : : - 1 ] : indices = map_indices_parent2child ( child = hp , parent_indices = indices ) return indices
Map root RTDCBase event indices to child RTDCBase
48,827
def apply_manual_indices ( self , manual_indices ) : if self . parent_changed : msg = "Cannot apply filter, because parent changed: " + "dataset {}. " . format ( self . rtdc_ds ) + "Run `RTDC_Hierarchy.apply_filter()` first!" raise HierarchyFilterError ( msg ) else : self . _man_root_ids = list ( manual_indices ) cidx = map_indices_root2child ( child = self . rtdc_ds , root_indices = manual_indices ) if len ( cidx ) : self . manual [ cidx ] = False
Write to self . manual
48,828
def retrieve_manual_indices ( self ) : if self . parent_changed : pass else : pbool = map_indices_child2root ( child = self . rtdc_ds , child_indices = np . where ( ~ self . manual ) [ 0 ] ) . tolist ( ) pold = self . _man_root_ids pall = sorted ( list ( set ( pbool + pold ) ) ) pvis_c = map_indices_root2child ( child = self . rtdc_ds , root_indices = pall ) . tolist ( ) pvis_p = map_indices_child2root ( child = self . rtdc_ds , child_indices = pvis_c ) . tolist ( ) phid = list ( set ( pall ) - set ( pvis_p ) ) all_idx = list ( set ( pbool + phid ) ) self . _man_root_ids = sorted ( all_idx ) return self . _man_root_ids
Read from self . manual
48,829
def apply_filter ( self , * args , ** kwargs ) : if self . filter is not None : self . filter . retrieve_manual_indices ( ) self . hparent . apply_filter ( ) event_count = np . sum ( self . hparent . _filter ) self . _events = { } self . _events [ "index" ] = np . arange ( 1 , event_count + 1 ) if "contour" in self . hparent : self . _events [ "contour" ] = ChildContour ( self ) if "image" in self . hparent : self . _events [ "image" ] = ChildImage ( self ) if "mask" in self . hparent : self . _events [ "mask" ] = ChildMask ( self ) if "trace" in self . hparent : trdict = { } for flname in dfn . FLUOR_TRACES : if flname in self . hparent [ "trace" ] : trdict [ flname ] = ChildTrace ( self , flname ) self . _events [ "trace" ] = trdict self . config [ "experiment" ] [ "event count" ] = event_count self . _init_filters ( ) super ( RTDC_Hierarchy , self ) . apply_filter ( * args , ** kwargs )
Overridden apply_filter to perform tasks for hierarchy child
48,830
def hash ( self ) : hph = self . hparent . hash hpfilt = hashobj ( self . hparent . _filter ) dhash = hashobj ( hph + hpfilt ) return dhash
Hashes of a hierarchy child changes if the parent changes
48,831
def find_video_file ( rtdc_dataset ) : video = None if rtdc_dataset . _fdir . exists ( ) : videos = [ v . name for v in rtdc_dataset . _fdir . rglob ( "*.avi" ) ] meas_id = rtdc_dataset . _mid videos = [ v for v in videos if v . split ( "_" ) [ 0 ] == meas_id ] videos . sort ( ) if len ( videos ) != 0 : video = videos [ 0 ] for v in videos : if v . endswith ( "imag.avi" ) : video = v break elif v . endswith ( "imaq.avi" ) : video = v break if video is None : return None else : return rtdc_dataset . _fdir / video
Tries to find a video file that belongs to an RTDC dataset
48,832
def as_new_format ( self , format = "ATR" ) : first_data = len ( self . header ) raw = self . rawdata [ first_data : ] data = add_atr_header ( raw ) newraw = SegmentData ( data ) image = self . __class__ ( newraw ) return image
Create a new disk image in the specified format
48,833
def set_source ( self , source ) : route = Navigator ( source = self . source ) . navigate_to_source ( source ) self . source = source return self . _send_keystroke ( route , wait = True )
Selects and saves source .
48,834
def find_migrations_directory ( ) : here = os . path . abspath ( os . path . dirname ( __file__ ) ) return os . path . join ( here , 'sql/migrations' )
Finds and returns the location of the database migrations directory . This function is used from a setuptools entry - point for db - migrator .
48,835
def declare_api_routes ( config ) : def pregenerator ( path ) : variables = [ ( s . split ( ':' ) [ 0 ] , '' ) for s in path . split ( '{' ) [ 1 : ] ] def wrapper ( request , elements , kwargs ) : modified_kwargs = dict ( variables ) modified_kwargs . update ( kwargs ) return elements , modified_kwargs return wrapper def add_route ( name , path , * args , ** kwargs ) : return config . add_route ( name , path , * args , pregenerator = pregenerator ( path ) , ** kwargs ) add_route ( 'content' , '/contents/{ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)}{separator:(:?)}{page_ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)?}{ignore:(/[^/.]*?/?)?}{ext:([.](html|json))?}' ) add_route ( 'resource' , '/resources/{hash}{ignore:(/.*)?}' ) add_route ( 'export' , '/exports/{ident_hash}.{type}{ignore:(/.*)?}' ) add_route ( 'extras' , '/extras{key:(/(featured|messages|licenses|subjects|languages))?}' ) add_route ( 'content-extras' , '/extras/{ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)}{separator:(:?)}{page_ident_hash:([^:/@.]+(@[0-9.]*[0-9]+)?)?}' ) add_route ( 'search' , '/search' ) add_route ( 'in-book-search' , '/search/{ident_hash:([^:/]+)}' ) add_route ( 'in-book-search-page' , '/search/{ident_hash:([^:/]+)}:{page_ident_hash}' ) add_route ( 'sitemap-index' , '/sitemap_index.xml' ) add_route ( 'sitemap' , '/sitemap-{from_id}.xml' ) add_route ( 'robots' , '/robots.txt' ) add_route ( 'legacy-redirect' , '/content/{objid}{ignore:(/)?}' ) add_route ( 'legacy-redirect-latest' , '/content/{objid}/latest{ignore:(/)?}{filename:(.+)?}' ) add_route ( 'legacy-redirect-w-version' , '/content/{objid}/{objver}{ignore:(/)?}{filename:(.+)?}' ) add_route ( 'recent' , '/feeds/recent.rss' ) add_route ( 'oai' , '/feeds/oai' ) add_route ( 'xpath' , '/xpath.html' ) add_route ( 'xpath-json' , '/xpath.json' )
Declare routes with a custom pregenerator .
48,836
def declare_type_info ( config ) : settings = config . registry . settings settings [ '_type_info' ] = [ ] for line in settings [ 'exports-allowable-types' ] . splitlines ( ) : if not line . strip ( ) : continue type_name , type_info = line . strip ( ) . split ( ':' , 1 ) type_info = type_info . split ( ',' , 3 ) settings [ '_type_info' ] . append ( ( type_name , { 'type_name' : type_name , 'file_extension' : type_info [ 0 ] , 'mimetype' : type_info [ 1 ] , 'user_friendly_name' : type_info [ 2 ] , 'description' : type_info [ 3 ] , } ) )
Lookup type info from app configuration .
48,837
def main ( global_config , ** settings ) : initialize_sentry_integration ( ) config = Configurator ( settings = settings ) declare_api_routes ( config ) declare_type_info ( config ) config . include ( 'pyramid_jinja2' ) config . add_jinja2_renderer ( '.rss' ) config . add_jinja2_renderer ( '.xml' ) mandatory_settings = [ 'exports-directories' , 'exports-allowable-types' ] for setting in mandatory_settings : if not settings . get ( setting , None ) : raise ValueError ( 'Missing {} config setting.' . format ( setting ) ) config . scan ( ignore = '.tests' ) config . include ( 'cnxarchive.events.main' ) config . add_tween ( 'cnxarchive.tweens.conditional_http_tween_factory' ) return config . make_wsgi_app ( )
Main WSGI application factory .
48,838
def _init_lsr ( n_items , alpha , initial_params ) : if initial_params is None : weights = np . ones ( n_items ) else : weights = exp_transform ( initial_params ) chain = alpha * np . ones ( ( n_items , n_items ) , dtype = float ) return weights , chain
Initialize the LSR Markov chain and the weights .
48,839
def _ilsr ( fun , params , max_iter , tol ) : converged = NormOfDifferenceTest ( tol , order = 1 ) for _ in range ( max_iter ) : params = fun ( initial_params = params ) if converged ( params ) : return params raise RuntimeError ( "Did not converge after {} iterations" . format ( max_iter ) )
Iteratively refine LSR estimates until convergence .
48,840
def lsr_pairwise_dense ( comp_mat , alpha = 0.0 , initial_params = None ) : n_items = comp_mat . shape [ 0 ] ws , chain = _init_lsr ( n_items , alpha , initial_params ) denom = np . tile ( ws , ( n_items , 1 ) ) chain += comp_mat . T / ( denom + denom . T ) chain -= np . diag ( chain . sum ( axis = 1 ) ) return log_transform ( statdist ( chain ) )
Compute the LSR estimate of model parameters given dense data .
48,841
def ilsr_pairwise_dense ( comp_mat , alpha = 0.0 , initial_params = None , max_iter = 100 , tol = 1e-8 ) : fun = functools . partial ( lsr_pairwise_dense , comp_mat = comp_mat , alpha = alpha ) return _ilsr ( fun , initial_params , max_iter , tol )
Compute the ML estimate of model parameters given dense data .
48,842
def rank_centrality ( n_items , data , alpha = 0.0 ) : _ , chain = _init_lsr ( n_items , alpha , None ) for winner , loser in data : chain [ loser , winner ] += 1.0 idx = chain > 0 chain [ idx ] = chain [ idx ] / ( chain + chain . T ) [ idx ] chain -= np . diag ( chain . sum ( axis = 1 ) ) return log_transform ( statdist ( chain ) )
Compute the Rank Centrality estimate of model parameters .
48,843
def _log_phi ( z ) : if z * z < 0.0492 : coef = - z / SQRT2PI val = functools . reduce ( lambda acc , c : coef * ( c + acc ) , CS , 0 ) res = - 2 * val - log ( 2 ) dres = exp ( - ( z * z ) / 2 - res ) / SQRT2PI elif z < - 11.3137 : num = functools . reduce ( lambda acc , r : - z * acc / SQRT2 + r , RS , 0.5641895835477550741 ) den = functools . reduce ( lambda acc , q : - z * acc / SQRT2 + q , QS , 1.0 ) res = log ( num / ( 2 * den ) ) - ( z * z ) / 2 dres = abs ( den / num ) * sqrt ( 2.0 / pi ) else : res = log ( normal_cdf ( z ) ) dres = exp ( - ( z * z ) / 2 - res ) / SQRT2PI return res , dres
Stable computation of the log of the Normal CDF and its derivative .
48,844
def _init_ws ( n_items , comparisons , prior_inv , tau , nu ) : prec = np . zeros ( ( n_items , n_items ) ) xs = np . zeros ( n_items ) for i , ( a , b ) in enumerate ( comparisons ) : prec [ ( a , a , b , b ) , ( a , b , a , b ) ] += tau [ i ] * MAT_ONE_FLAT xs [ a ] += nu [ i ] xs [ b ] -= nu [ i ] cov = inv_posdef ( prior_inv + prec ) mean = cov . dot ( xs ) return mean , cov , xs , prec
Initialize parameters in the weight space .
48,845
def exp_transform ( params ) : weights = np . exp ( np . asarray ( params ) - np . mean ( params ) ) return ( len ( weights ) / weights . sum ( ) ) * weights
Transform parameters into exp - scale weights .
48,846
def softmax ( xs ) : ys = xs - np . max ( xs ) exps = np . exp ( ys ) return exps / exps . sum ( axis = 0 )
Stable implementation of the softmax function .
48,847
def inv_posdef ( mat ) : chol = np . linalg . cholesky ( mat ) ident = np . eye ( mat . shape [ 0 ] ) res = solve_triangular ( chol , ident , lower = True , overwrite_b = True ) return np . transpose ( res ) . dot ( res )
Stable inverse of a positive definite matrix .
48,848
def footrule_dist ( params1 , params2 = None ) : r assert params2 is None or len ( params1 ) == len ( params2 ) ranks1 = rankdata ( params1 , method = "average" ) if params2 is None : ranks2 = np . arange ( 1 , len ( params1 ) + 1 , dtype = float ) else : ranks2 = rankdata ( params2 , method = "average" ) return np . sum ( np . abs ( ranks1 - ranks2 ) )
r Compute Spearman s footrule distance between two models .
48,849
def kendalltau_dist ( params1 , params2 = None ) : r assert params2 is None or len ( params1 ) == len ( params2 ) ranks1 = rankdata ( params1 , method = "ordinal" ) if params2 is None : ranks2 = np . arange ( 1 , len ( params1 ) + 1 , dtype = float ) else : ranks2 = rankdata ( params2 , method = "ordinal" ) tau , _ = kendalltau ( ranks1 , ranks2 ) n_items = len ( params1 ) n_pairs = n_items * ( n_items - 1 ) / 2 return round ( ( n_pairs - n_pairs * tau ) / 2 )
r Compute the Kendall tau distance between two models .
48,850
def rmse ( params1 , params2 ) : r assert len ( params1 ) == len ( params2 ) params1 = np . asarray ( params1 ) - np . mean ( params1 ) params2 = np . asarray ( params2 ) - np . mean ( params2 ) sqrt_n = math . sqrt ( len ( params1 ) ) return np . linalg . norm ( params1 - params2 , ord = 2 ) / sqrt_n
r Compute the root - mean - squared error between two models .
48,851
def statdist ( generator ) : generator = np . asarray ( generator ) n = generator . shape [ 0 ] with warnings . catch_warnings ( ) : warnings . filterwarnings ( "ignore" ) lu , piv = spl . lu_factor ( generator . T , check_finite = False ) left = lu [ : - 1 , : - 1 ] right = - lu [ : - 1 , - 1 ] try : res = spl . solve_triangular ( left , right , check_finite = False ) except : raise ValueError ( "stationary distribution could not be computed. " "Perhaps the Markov chain has more than one absorbing class?" ) res = np . append ( res , 1.0 ) return ( n / res . sum ( ) ) * res
Compute the stationary distribution of a Markov chain .
48,852
def generate_params ( n_items , interval = 5.0 , ordered = False ) : r params = np . random . uniform ( low = 0 , high = interval , size = n_items ) if ordered : params . sort ( ) return params - params . mean ( )
r Generate random model parameters .
48,853
def compare ( items , params , rank = False ) : probs = probabilities ( items , params ) if rank : return np . random . choice ( items , size = len ( items ) , replace = False , p = probs ) else : return np . random . choice ( items , p = probs )
Generate a comparison outcome that follows Luce s axiom .
48,854
def probabilities ( items , params ) : params = np . asarray ( params ) return softmax ( params . take ( items ) )
Compute the comparison outcome probabilities given a subset of items .
48,855
def from_rankings ( cls , data , penalty ) : top1 = list ( ) for ranking in data : for i , winner in enumerate ( ranking [ : - 1 ] ) : top1 . append ( ( winner , ranking [ i + 1 : ] ) ) return cls ( top1 , penalty )
Alternative constructor for ranking data .
48,856
def _mm ( n_items , data , initial_params , alpha , max_iter , tol , mm_fun ) : if initial_params is None : params = np . zeros ( n_items ) else : params = initial_params converged = NormOfDifferenceTest ( tol = tol , order = 1 ) for _ in range ( max_iter ) : nums , denoms = mm_fun ( n_items , data , params ) params = log_transform ( ( nums + alpha ) / ( denoms + alpha ) ) if converged ( params ) : return params raise RuntimeError ( "Did not converge after {} iterations" . format ( max_iter ) )
Iteratively refine MM estimates until convergence .
48,857
def _mm_pairwise ( n_items , data , params ) : weights = exp_transform ( params ) wins = np . zeros ( n_items , dtype = float ) denoms = np . zeros ( n_items , dtype = float ) for winner , loser in data : wins [ winner ] += 1.0 val = 1.0 / ( weights [ winner ] + weights [ loser ] ) denoms [ winner ] += val denoms [ loser ] += val return wins , denoms
Inner loop of MM algorithm for pairwise data .
48,858
def _mm_rankings ( n_items , data , params ) : weights = exp_transform ( params ) wins = np . zeros ( n_items , dtype = float ) denoms = np . zeros ( n_items , dtype = float ) for ranking in data : sum_ = weights . take ( ranking ) . sum ( ) for i , winner in enumerate ( ranking [ : - 1 ] ) : wins [ winner ] += 1 val = 1.0 / sum_ for item in ranking [ i : ] : denoms [ item ] += val sum_ -= weights [ winner ] return wins , denoms
Inner loop of MM algorithm for ranking data .
48,859
def _mm_top1 ( n_items , data , params ) : weights = exp_transform ( params ) wins = np . zeros ( n_items , dtype = float ) denoms = np . zeros ( n_items , dtype = float ) for winner , losers in data : wins [ winner ] += 1 val = 1 / ( weights . take ( losers ) . sum ( ) + weights [ winner ] ) for item in itertools . chain ( [ winner ] , losers ) : denoms [ item ] += val return wins , denoms
Inner loop of MM algorithm for top1 data .
48,860
def _choicerank ( n_items , data , params ) : weights = exp_transform ( params ) adj , adj_t , traffic_in , traffic_out = data zs = adj . dot ( weights ) with np . errstate ( invalid = "ignore" ) : denoms = adj_t . dot ( traffic_out / zs ) return traffic_in , denoms
Inner loop of ChoiceRank algorithm .
48,861
def choicerank ( digraph , traffic_in , traffic_out , weight = None , initial_params = None , alpha = 1.0 , max_iter = 10000 , tol = 1e-8 ) : import networkx as nx n_items = len ( digraph ) nodes = np . arange ( n_items ) adj = nx . to_scipy_sparse_matrix ( digraph , nodelist = nodes , weight = weight ) adj_t = adj . T . tocsr ( ) traffic_in = np . asarray ( traffic_in ) traffic_out = np . asarray ( traffic_out ) data = ( adj , adj_t , traffic_in , traffic_out ) return _mm ( n_items , data , initial_params , alpha , max_iter , tol , _choicerank )
Compute the MAP estimate of a network choice model s parameters .
48,862
def decorate ( cls , function ) : should_skip = getattr ( function , 'typesafety_skip' , False ) if cls . is_function_validated ( function ) or should_skip : return function validator = cls ( function ) if not validator . need_validate_arguments and not validator . need_validate_return_value : return function @ functools . wraps ( function ) def __wrapper ( * args , ** kwargs ) : return validator ( * args , ** kwargs ) __wrapper . __validator__ = validator return __wrapper
Decorate a function so the function call is checked whenever a call is made . The calls that do not need any checks are skipped .
48,863
def undecorate ( cls , function ) : if cls . is_function_validated ( function ) : return cls . get_function_validator ( function ) . function return function
Remove validator decoration from a function .
48,864
def validate_arguments ( self , locals_dict ) : for key , value , validator in self . __map_arguments ( locals_dict ) : if not self . __is_valid ( value , validator ) : key_name = repr ( key ) func_name = self . __function . __name__ annotation = self . __argument_annotation . get ( key ) message = self . ARG_TYPE_ERROR_MESSAGE . format ( key_name , func_name , self . __format_expectation ( annotation ) , value . __class__ . __name__ ) raise TypesafetyError ( message )
Validate the arguments passed to a function . If an error occurred the function will throw a TypesafetyError .
48,865
def validate_return_value ( self , retval ) : if self . __return_annotation is None : return if not self . __is_valid ( retval , self . __return_annotation ) : func_name = self . __function . __name__ msg = self . RET_TYPE_ERROR_MESSAGE . format ( func_name , self . __format_expectation ( self . __return_annotation ) , retval . __class__ . __name__ ) raise TypesafetyError ( msg )
Validate the return value of a function call . If an error occurred the function will throw a TypesafetyError .
48,866
def find_module ( self , fullname , path = None ) : loader = None if self . __filter is None or self . __filter ( fullname ) : loader = ModuleLoader ( self , fullname , path ) return loader
Find the module . Required for the Python meta - loading mechanism .
48,867
def load_module ( self , loader ) : modfile , pathname , description = loader . info module = imp . load_module ( loader . fullname , modfile , pathname , description ) sys . modules [ loader . fullname ] = module self . __loaded_modules . add ( loader . fullname ) autodecorator . decorate_module ( module , decorator = self . __decorator ) return module
Load the module . Required for the Python meta - loading mechanism .
48,868
def after_commit_listener ( self , session ) : log . info ( 'Commiting indexing orders for session %s' % session ) try : if not any ( ( session . index_new [ self . cls_name ] , session . index_dirty [ self . cls_name ] , session . index_deleted [ self . cls_name ] ) ) : return if session . redis is not None : self . _queue_job ( session . redis , self . settings [ 'redis.queue_name' ] , self . index_operation_name , session . index_new [ self . cls_name ] , session . index_dirty [ self . cls_name ] , session . index_deleted [ self . cls_name ] , self . settings ) else : log . info ( 'Redis not found, falling back to indexing synchronously without redis' ) self . index_operation ( session . index_new [ self . cls_name ] , session . index_dirty [ self . cls_name ] , session . index_deleted [ self . cls_name ] , self . settings ) session . index_new [ self . cls_name ] . clear ( ) session . index_dirty [ self . cls_name ] . clear ( ) session . index_deleted [ self . cls_name ] . clear ( ) except AttributeError : log . warning ( 'Trying to commit indexing orders, but indexing sets are not present.' )
Processing the changes . All new or changed items are now indexed . All deleted items are now removed from the index .
48,869
def after_rollback_listener ( self , session ) : log . info ( 'Removing indexing orders.' ) try : session . index_new [ self . cls_name ] . clear ( ) session . index_dirty [ self . cls_name ] . clear ( ) session . index_deleted [ self . cls_name ] . clear ( ) except ( AttributeError , KeyError ) : log . warning ( 'Trying to remove indexing orders, but indexing sets are not present.' )
Rollback of the transaction undo the indexes . If our transaction is terminated we will reset the indexing assignments .
48,870
def parse_sort_string ( sort ) : if sort is None : return [ '_score' ] l = sort . rsplit ( ',' ) sortlist = [ ] for se in l : se = se . strip ( ) order = 'desc' if se [ 0 : 1 ] == '-' else 'asc' field = se [ 1 : ] if se [ 0 : 1 ] in [ '-' , '+' ] else se field = field . strip ( ) sortlist . append ( { field : { "order" : order , "unmapped_type" : "string" , "missing" : "_last" } } ) sortlist . append ( '_score' ) return sortlist
Parse a sort string for use with elasticsearch
48,871
def parse_filter_params ( query_params , filterable ) : if query_params is not None : filter_params = { } for fq in query_params . mixed ( ) : if fq in filterable : filter_params [ fq ] = query_params . mixed ( ) . get ( fq ) return filter_params else : return { }
Parse query_params to a filter params dict . Merge multiple values for one key to a list . Filter out keys that aren t filterable .
48,872
def init_atom_feed ( self , feed ) : atom_feed = FeedGenerator ( ) atom_feed . id ( id = self . request . route_url ( self . get_atom_feed_url , id = feed . id ) ) atom_feed . link ( href = self . request . route_url ( self . get_atom_feed_url , id = feed . id ) , rel = 'self' ) atom_feed . language ( 'nl-BE' ) self . link_to_sibling ( feed , 'previous' , atom_feed ) self . link_to_sibling ( feed , 'next' , atom_feed ) return atom_feed
Initializing an atom feed feedgen . feed . FeedGenerator given a feed object
48,873
def _generate_atom_feed ( self , feed ) : atom_feed = self . init_atom_feed ( feed ) atom_feed . title ( "Feed" ) return atom_feed
A function returning a feed like feedgen . feed . FeedGenerator . The function can be overwritten when used in other applications .
48,874
def from_private_key ( account_name , private_key = None , private_key_path = None , storage = None , storage_path = None , api_version = "v3" , readonly = False , http_client = None , ga_hook = None ) : if not private_key : if not private_key_path : raise GapyError ( "Must provide either a private_key or a private_key_file" ) if isinstance ( private_key_path , basestring ) : private_key_path = open ( private_key_path ) private_key = private_key_path . read ( ) storage = _get_storage ( storage , storage_path ) scope = GOOGLE_API_SCOPE_READONLY if readonly else GOOGLE_API_SCOPE credentials = SignedJwtAssertionCredentials ( account_name , private_key , scope ) credentials . set_store ( storage ) return Client ( _build ( credentials , api_version , http_client ) , ga_hook )
Create a client for a service account .
48,875
def _build ( credentials , api_version , http_client = None ) : if not http_client : http_client = httplib2 . Http ( ) authorised_client = credentials . authorize ( http_client ) return build ( "analytics" , api_version , http = authorised_client )
Build the client object .
48,876
def ad_unif_inf ( statistic ) : z = statistic if z < 2 : return ( exp ( - 1.2337141 / z ) / sqrt ( z ) * ( 2.00012 + ( .247105 - ( .0649821 - ( .0347962 - ( .011672 - .00168691 * z ) * z ) * z ) * z ) * z ) ) else : return exp ( - exp ( 1.0776 - ( 2.30695 - ( .43424 - ( .082433 - ( .008056 - .0003146 * z ) * z ) * z ) * z ) * z ) )
Approximates the limiting distribution to about 5 decimal digits .
48,877
def ad_unif_fix ( samples , pinf ) : n = samples c = .01265 + .1757 / n if pinf < c : return ( ( ( .0037 / n + .00078 ) / n + .00006 ) / n ) * g1 ( pinf / c ) elif pinf < .8 : return ( ( .01365 / n + .04213 ) / n ) * g2 ( ( pinf - c ) / ( .8 - c ) ) else : return g3 ( pinf ) / n
Corrects the limiting distribution for a finite sample size .
48,878
def check_metadata ( self ) : metadata = self . distribution . metadata missing = [ ] for attr in ( "name" , "version" , "url" ) : if not ( hasattr ( metadata , attr ) and getattr ( metadata , attr ) ) : missing . append ( attr ) if not metadata . author and not metadata . maintainer : missing . append ( "author" ) if self . enforce_email : missing . append ( "author_email" ) else : if ( metadata . author and self . enforce_email and not metadata . author_email ) : missing . append ( "author_email" ) if ( metadata . maintainer and self . enforce_email and not metadata . maintainer_email ) : missing . append ( "maintainer_email" ) if ( metadata . author and metadata . maintainer and metadata . author == metadata . maintainer ) : self . warn ( "Maintainer should be omitted if identical to Author.\n" "See https://www.python.org/dev/peps/pep-0345/" "#maintainer-email-optional" ) if ( metadata . author_email and metadata . maintainer_email and metadata . author_email == metadata . maintainer_email ) : self . warn ( "Maintainer Email should be omitted if" "identical to Author's.\n" "See https://www.python.org/dev/peps/pep-0345/" "#maintainer-email-optional" ) if missing : self . warn ( "missing required meta-data: %s" % ", " . join ( missing ) )
Ensure all required meta - data are supplied .
48,879
def from_url ( cls , url ) : parsed = urlparse ( url ) return cls ( username = parsed . username , password = parsed . password , hostname = parsed . hostname , port = parsed . port , database = parsed . path . lstrip ( '/' ) , query_params = dict ( param . split ( '=' ) for param in parsed . query . split ( '&' ) ) if parsed . query else { } , )
Construct a PostgresConfig from a URL .
48,880
def get_last_modified_date ( filename ) : if os . path . isfile ( filename ) : t = os . path . getmtime ( filename ) return datetime . date . fromtimestamp ( t ) . strftime ( '%d/%m/%Y' ) return None
Get the last modified date of a given file
48,881
def get_file_size ( filename ) : if os . path . isfile ( filename ) : return convert_size ( os . path . getsize ( filename ) ) return None
Get the file size of a given file
48,882
def convert_size ( size_bytes ) : if size_bytes == 0 : return "0B" size_name = ( "B" , "KB" , "MB" , "GB" , "TB" , "PB" , "EB" , "ZB" , "YB" ) i = int ( math . floor ( math . log ( size_bytes , 1024 ) ) ) p = math . pow ( 1024 , i ) s = round ( size_bytes / p , 2 ) return "%s %s" % ( s , size_name [ i ] )
Transform bytesize to a human readable filesize
48,883
def validate_all_attributes ( self ) : errors = { } for name in self . trait_names ( ) : try : getattr ( self , name ) except TraitError as e : errors [ name ] = e if errors : raise MultipleTraitErrors ( errors )
Force validation of all traits .
48,884
def example_instance ( cls , skip = ( ) ) : kwargs = { } for name , trait in iteritems ( cls . class_traits ( ) ) : if name in skip : continue value = trait . example_value if value is Undefined : continue kwargs [ name ] = value return cls ( ** kwargs )
Generate an example instance of a Serializable subclass .
48,885
def example_yaml ( cls , skip = ( ) ) : return cls . example_instance ( skip = skip ) . to_yaml ( skip = skip )
Generate an example yaml string for a Serializable subclass .
48,886
def write_example_yaml ( cls , dest , skip = ( ) ) : inst = cls . example_instance ( skip = skip ) with open ( dest , 'w' ) as f : inst . to_yaml ( stream = f , skip = skip )
Write a file containing an example yaml string for a Serializable subclass .
48,887
def to_base64 ( self , skip = ( ) ) : return base64 . b64encode ( ensure_bytes ( self . to_json ( skip = skip ) , encoding = 'utf-8' , ) )
Construct from base64 - encoded JSON .
48,888
def delete ( self , object_id ) : obj = self . session . query ( self . cls ) . filter_by ( id = object_id ) . one ( ) self . session . delete ( obj ) return obj
Delete an object by its id
48,889
def save ( self , obj ) : if obj not in self . session : self . session . add ( obj ) else : obj = self . session . merge ( obj ) self . session . flush ( ) self . session . refresh ( obj ) return obj
save an object
48,890
def add_named_concept_filters ( self , named_filter_concepts ) : for concept_key , concept_name in named_filter_concepts . items ( ) : self . add_concept_filter ( concept_key , concept_name = concept_name )
Adds named concept filters
48,891
def add_concept_filter ( self , concept , concept_name = None ) : if concept in self . query_params . keys ( ) : if not concept_name : concept_name = concept if isinstance ( self . query_params [ concept ] , list ) : if self . es_version == '1' : es_filter = { 'or' : [ ] } for or_filter in self . query_params [ concept ] : es_filter [ 'or' ] . append ( self . _build_concept_term ( concept_name , or_filter ) ) else : es_filter = { "bool" : { "should" : [ ] } } for or_filter in self . query_params [ concept ] : es_filter [ "bool" ] [ "should" ] . append ( self . _build_concept_term ( concept_name , or_filter ) ) else : es_filter = self . _build_concept_term ( concept_name , self . query_params [ concept ] ) self . filters . append ( es_filter )
Add a concept filter
48,892
def build ( self ) : if self . es_version == '1' : if len ( self . filters ) > 0 : return { 'filtered' : { 'query' : self . query , 'filter' : { 'and' : self . filters } } } else : return self . query else : query = { 'bool' : { 'must' : self . query } } if len ( self . filters ) > 0 : query [ "bool" ] [ "filter" ] = self . filters return query
Builds the query string which can be used for a search query
48,893
def parse ( cls , request , default_start = 0 , default_end = 9 , max_end = 50 ) : settings = request . registry . settings page_param = settings . get ( 'oe.paging.page.queryparam' , 'pagina' ) if 'Range' in request . headers and request . headers [ 'Range' ] is not '' : match = re . match ( '^items=([0-9]+)-([0-9]+)$' , request . headers [ 'Range' ] ) if match : start = int ( match . group ( 1 ) ) end = int ( match . group ( 2 ) ) if end < start : end = start if max_end and end > start + max_end : end = start + max_end return cls ( start , end ) else : raise RangeParseException ( 'range header does not match expected format' ) elif page_param in request . params : per_page_param = settings . get ( 'oe.paging.per_page.queryparam' , 'per_pagina' ) page = int ( request . params . get ( page_param ) ) items_per_page = int ( request . params . get ( per_page_param , default_end - default_start + 1 ) ) start = default_start + items_per_page * ( page - 1 ) end = start + items_per_page - 1 return cls ( start , end , page ) else : return cls ( default_start , default_end )
Parse the range headers into a range object . When there are no range headers check for a page pagina parameter otherwise use the defaults defaults
48,894
def set_response_headers ( self , request , total_count ) : response = request . response response . headerlist . append ( ( 'Access-Control-Expose-Headers' , 'Content-Range, X-Content-Range' ) ) response . accept_ranges = 'items' if total_count is None : raise RangeParseException ( 'Provided length value is null' ) if total_count > 0 : response . content_range = self . content_range ( total_count ) self . set_link_headers ( request , total_count )
Set the correct range headers on the response
48,895
def set_link_headers ( self , request , total_count ) : response = request . response if request . headers . get ( 'Range' ) : return settings = request . registry . settings page_param = settings . get ( 'oe.paging.page.queryparam' , 'pagina' ) per_page_param = settings . get ( 'oe.paging.per_page.queryparam' , 'per_pagina' ) url = request . path_url try : queryparams = request . params . mixed ( ) except AttributeError : queryparams = request . params page_size = self . get_page_size ( ) current_page = self . start // page_size + 1 queryparams [ per_page_param ] = page_size links = { 'first' : 1 , 'last' : int ( math . ceil ( float ( total_count ) / page_size ) ) } if current_page != links [ 'first' ] : links [ 'prev' ] = current_page - 1 if current_page != links [ 'last' ] : links [ 'next' ] = current_page + 1 response . headers [ 'Link' ] = self . _make_link_headers ( links , page_param , queryparams , url )
Sets Link headers on the response .
48,896
def preparer ( value ) : if value is None or value == colander . null : return colander . null return value . strip ( ) . replace ( '.' , '' )
Edit a value to a value that can be validated as a kbo number .
48,897
def cvm_unif_inf ( statistic ) : args = inf_args / statistic return ( inf_cs * exp ( - args ) * kv ( .25 , args ) ) . sum ( ) / statistic ** .5
Calculates the limiting distribution of the Cramer - von Mises statistic .
48,898
def cvm_unif_fix1 ( statistic ) : args = fix1_args / statistic kvs = kv ( ( .25 , .75 , 1.25 ) , args [ : , : , newaxis ] ) gs , hs = exp ( - args ) * tensordot ( ( ( 1 , 1 , 0 ) , ( 2 , 3 , - 1 ) ) , kvs , axes = ( 1 , 2 ) ) a = dot ( ( 7 , 16 , 7 ) , fix1_csa * gs ) . sum ( ) / statistic ** 1.5 b = dot ( ( 1 , 0 , 24 ) , fix1_csb * hs ) . sum ( ) / statistic ** 2.5 return cvm_unif_inf ( statistic ) / 12 - a - b
Approximates the first - term of the small sample count Gotze expansion .
48,899
def merge ( * ds ) : if not ds : raise ValueError ( "Must provide at least one dict to merge()." ) out = { } for d in ds : out . update ( d ) return out
Merge together a sequence if dictionaries .