idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
11,600
def is_open ( self , id , time , day ) : details = self . get_details ( id ) has_data = False for obj in details [ "objects" ] : hours = obj [ "open_hours" ] [ day ] if hours : has_data = True for interval in hours : interval = interval . replace ( ' ' , '' ) . split ( '-' ) open_time = interval [ 0 ] close_time = interval [ 1 ] if open_time < time < close_time : return True if has_data : return False else : return None
Checks if the venue is open at the time of day given a venue id .
11,601
def search ( self , name = None , category = None , description = None , price = None , price__gt = None , price__gte = None , price__lt = None , price__lte = None , location = ( None , None ) , radius = None , tl_coord = ( None , None ) , br_coord = ( None , None ) , country = None , locality = None , region = None , postal_code = None , street_address = None , website_url = None ) : params = self . _get_params ( name = name , description = description , price = price , price__gt = price__gt , price__gte = price__gte , price__lt = price__lt , price__lte = price__lte , location = location , radius = radius , tl_coord = tl_coord , br_coord = br_coord , country = country , locality = locality , region = region , postal_code = postal_code , street_address = street_address , website_url = website_url ) return self . _create_query ( 'search' , params )
Locu Menu Item Search API Call Wrapper
11,602
def main ( ctx , root_project_dir , verbose ) : root_project_dir = discover_conf_py_directory ( root_project_dir ) ctx . obj = { 'root_project_dir' : root_project_dir , 'verbose' : verbose } if verbose : log_level = logging . DEBUG else : log_level = logging . INFO logger = logging . getLogger ( 'documenteer' ) logger . addHandler ( logging . StreamHandler ( ) ) logger . setLevel ( log_level )
stack - docs is a CLI for building LSST Stack documentation such as pipelines . lsst . io .
11,603
def help ( ctx , topic , ** kw ) : if topic is None : click . echo ( ctx . parent . get_help ( ) ) else : click . echo ( main . commands [ topic ] . get_help ( ctx ) )
Show help for any command .
11,604
def clean ( ctx ) : logger = logging . getLogger ( __name__ ) dirnames = [ 'py-api' , '_build' , 'modules' , 'packages' ] dirnames = [ os . path . join ( ctx . obj [ 'root_project_dir' ] , dirname ) for dirname in dirnames ] for dirname in dirnames : if os . path . isdir ( dirname ) : shutil . rmtree ( dirname ) logger . debug ( 'Cleaned up %r' , dirname ) else : logger . debug ( 'Did not clean up %r (missing)' , dirname )
Clean Sphinx build products .
11,605
def query_with_attributes ( type_to_query , client ) : session = client . create_session ( ) query = session . query ( Attribute . name , Attribute . value , Entity . id ) . join ( Entity ) . filter ( Entity . type == type_to_query ) df = client . df_query ( query ) session . close ( ) df = df . dropna ( how = 'any' ) df = df . set_index ( [ 'id' , 'name' ] ) . unstack ( ) . reset_index ( ) df . columns = [ 'id' ] + list ( df . columns . get_level_values ( 1 ) [ 1 : ] ) return df
Query all entities of a specific type with their attributes
11,606
def reset ( self ) : for name in self . __dict__ : if name . startswith ( "_" ) : continue attr = getattr ( self , name ) setattr ( self , name , attr and attr . __class__ ( ) )
Reset all fields of this object to class defaults
11,607
def geojson_polygon_to_mask ( feature , shape , lat_idx , lon_idx ) : import matplotlib matplotlib . use ( 'agg' ) import matplotlib . pyplot as plt from matplotlib import patches import numpy as np if feature . geometry . type not in ( 'Polygon' , 'MultiPolygon' ) : raise ValueError ( "Cannot handle feature of type " + feature . geometry . type ) dpi = 100 fig = plt . figure ( frameon = False , dpi = dpi , ) fig . set_size_inches ( shape [ 1 ] / float ( dpi ) , shape [ 0 ] / float ( dpi ) ) ax = plt . Axes ( fig , [ 0. , 0. , 1. , 1. ] ) ax . set_axis_off ( ) ax . set_xlim ( [ 0 , shape [ 1 ] ] ) ax . set_ylim ( [ 0 , shape [ 0 ] ] ) fig . add_axes ( ax ) if feature . geometry . type == 'Polygon' : coords = [ feature . geometry . coordinates ] else : coords = feature . geometry . coordinates for poly_coords in coords : for i , outline in enumerate ( poly_coords ) : value = 0. if i == 0 else 1. outline = np . array ( outline ) xs = lon_idx ( outline [ : , 0 ] ) ys = lat_idx ( outline [ : , 1 ] ) poly = patches . Polygon ( list ( zip ( xs , ys ) ) , facecolor = ( value , value , value ) , edgecolor = 'none' , antialiased = True ) ax . add_patch ( poly ) fig . canvas . draw ( ) data = np . fromstring ( fig . canvas . tostring_rgb ( ) , dtype = np . uint8 , sep = '' ) data = data . reshape ( fig . canvas . get_width_height ( ) [ : : - 1 ] + ( 3 , ) ) [ : , : , 0 ] assert data . shape [ 0 ] == shape [ 0 ] assert data . shape [ 1 ] == shape [ 1 ] data = 1. - data . astype ( float ) / 255. data = data [ : : - 1 , : ] plt . close ( 'all' ) return data
Convert a GeoJSON polygon feature to a numpy array
11,608
def load ( self ) : df = pd . read_csv ( self . input_file , sep = ',' , quotechar = '"' , encoding = 'utf-8' , dtype = object ) df = df [ [ 'NUTS-Code' , 'Description' ] ] df . columns = [ 'key' , 'name' ] df = df [ df [ 'key' ] . str . len ( ) == 4 ] df = df [ df [ 'key' ] . str [ 2 : ] != 'ZZ' ] return df
Load data from default location
11,609
def input_file ( self ) : return path . join ( path . dirname ( __file__ ) , 'data' , 'tgs{:s}.tsv' . format ( self . number ) )
Returns the input file name with a default relative path
11,610
def load ( self , key_filter = None , header_preproc = None ) : df = pd . read_csv ( self . input_file , sep = '\t' , dtype = object ) if key_filter is not None : df = df [ df [ df . columns [ 0 ] ] . str . match ( key_filter ) ] meta_col = df . columns [ 0 ] df [ meta_col ] = df [ meta_col ] . str . split ( ',' ) . str [ - 1 ] for col_name in df . columns [ 1 : ] : stripped = df [ col_name ] . str . replace ( r'[a-z]' , '' ) df [ col_name ] = pd . to_numeric ( stripped , errors = 'coerce' ) if header_preproc is not None : df . columns = list ( df . columns [ : 1 ] ) + [ header_preproc ( c ) for c in df . columns [ 1 : ] ] df . columns = [ 'key' ] + [ int ( y ) for y in df . columns [ 1 : ] ] return df
Load data table from tsv file from default location
11,611
def load ( self ) : from scipy . io import netcdf_file from scipy import interpolate import numpy as np f = netcdf_file ( self . input_file ) out = dict ( ) lats = f . variables [ 'lat' ] [ : ] . copy ( ) lons = f . variables [ 'lon' ] [ : ] . copy ( ) out [ 'data' ] = np . roll ( f . variables [ self . variable_name ] [ : , : , : ] . copy ( ) , shift = len ( lons ) // 2 , axis = 2 ) lons = np . roll ( lons , shift = len ( lons ) // 2 ) lons [ lons > 180 ] -= 360 out [ 'data' ] = np . ma . array ( out [ 'data' ] ) out [ 'data' ] [ out [ 'data' ] < - 1.e6 ] = np . ma . masked out [ 'lat_idx' ] = interpolate . interp1d ( x = lats , y = np . arange ( len ( lats ) ) ) out [ 'lon_idx' ] = interpolate . interp1d ( x = lons , y = np . arange ( len ( lons ) ) ) f . close ( ) return out
Load the climate data as a map
11,612
def clear ( self ) : self . mark_incomplete ( ) for suffix in list ( CLIMATE_SEASON_SUFFIXES . values ( ) ) : try : indicator = self . session . query ( models . ClimateIndicator ) . filter ( models . ClimateIndicator . description == self . description + suffix ) . one ( ) self . session . delete ( indicator ) except NoResultFound : pass self . close_session ( )
Clear output of one climate variable
11,613
def run ( self ) : import numpy as np query = self . session . query ( models . NUTS2Region . key , models . NUTS2Region . id ) region_ids = self . client . df_query ( query ) . set_index ( 'key' ) [ 'id' ] . to_dict ( ) data = next ( self . requires ( ) ) . load ( ) nuts = NUTS2GeoJSONInputFile ( ) . load ( ) indicator_ids = dict ( ) t_data = dict ( ) for season , suffix in CLIMATE_SEASON_SUFFIXES . items ( ) : indicator = models . ClimateIndicator ( description = self . description + suffix ) self . session . add ( indicator ) self . session . commit ( ) indicator_ids [ season ] = indicator . id if season == 'summer' : t_data [ season ] = np . ma . average ( data [ 'data' ] [ 3 : 9 , : , : ] , axis = 0 ) else : t_data [ season ] = np . ma . average ( 0.5 * ( data [ 'data' ] [ 0 : 3 , : , : ] + data [ 'data' ] [ 9 : 12 , : , : ] ) , axis = 0 ) objects = [ ] current_value_id = models . ClimateValue . get_max_id ( self . session ) for feature in nuts : mask = geojson_polygon_to_mask ( feature = feature , shape = t_data [ 'summer' ] . shape , lat_idx = data [ 'lat_idx' ] , lon_idx = data [ 'lon_idx' ] ) for season in list ( CLIMATE_SEASON_SUFFIXES . keys ( ) ) : value = np . ma . average ( t_data [ season ] , weights = mask ) region_id = region_ids . get ( feature . properties [ 'NUTS_ID' ] , None ) if region_id is not None : region_id = int ( region_id ) current_value_id += 1 objects . append ( models . ClimateValue ( id = current_value_id , value = value , region_id = region_id , indicator_id = indicator_ids [ season ] ) ) self . session . bulk_save_objects ( objects ) self . session . commit ( ) self . done ( )
Load climate data and convert to indicator objects
11,614
def lose ( spin ) : try : spin . close ( ) except Exception as excpt : err = excpt . args [ 0 ] spin . drive ( CLOSE_ERR , err ) finally : spin . destroy ( ) spin . drive ( LOST )
It is used to close TCP connection and unregister the Spin instance from untwisted reactor .
11,615
def create_server ( addr , port , backlog ) : server = Spin ( ) server . bind ( ( addr , port ) ) server . listen ( backlog ) Server ( server ) server . add_map ( ACCEPT , lambda server , spin : install_basic_handles ( spin ) ) return server
Set up a TCP server and installs the basic handles Stdin Stdout in the clients .
11,616
def create_client ( addr , port ) : sock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) sock . connect_ex ( ( addr , port ) ) spin = Spin ( sock ) Client ( spin ) spin . add_map ( CONNECT , install_basic_handles ) spin . add_map ( CONNECT_ERR , lambda con , err : lose ( con ) ) return spin
Set up a TCP client and installs the basic handles Stdin Stdout .
11,617
def main ( argv = None ) : app = application . Application ( ) app . run ( argv ) app . exit ( )
Execute the main bit of the application .
11,618
def fingerprint_similarity ( mol1 , mol2 ) : idmol1 = to_real_mol ( mol1 ) idmol2 = to_real_mol ( mol2 ) fp1 = idmol1 . fingerprint ( "sim" ) fp2 = idmol2 . fingerprint ( "sim" ) return round ( idg . similarity ( fp1 , fp2 , "tanimoto" ) , 2 )
Calculate Indigo fingerprint similarity
11,619
def devmodel_to_array ( model_name , train_fraction = 1 ) : model_outputs = - 6 + model_name . Data_summary . shape [ 0 ] devmodel = model_name rawdf = devmodel . Data rawdf = rawdf . sample ( frac = 1 ) datadf = rawdf . select_dtypes ( include = [ np . number ] ) data = np . array ( datadf ) n = data . shape [ 0 ] d = data . shape [ 1 ] d -= model_outputs n_train = int ( n * train_fraction ) n_test = n - n_train X_train = np . zeros ( ( n_train , d ) ) X_test = np . zeros ( ( n_test , d ) ) Y_train = np . zeros ( ( n_train , model_outputs ) ) Y_test = np . zeros ( ( n_test , model_outputs ) ) X_train [ : ] = data [ : n_train , : - model_outputs ] Y_train [ : ] = ( data [ : n_train , - model_outputs : ] . astype ( float ) ) X_test [ : ] = data [ n_train : , : - model_outputs ] Y_test [ : ] = ( data [ n_train : , - model_outputs : ] . astype ( float ) ) return X_train , Y_train , X_test , Y_test
a standardized method of turning a dev_model object into training and testing arrays
11,620
def dapply ( self , fn , pairwise = False , symmetric = True , diagonal = False , block = None , ** kwargs ) : search_keys = [ k for k , v in kwargs . items ( ) if isinstance ( v , list ) and len ( v ) > 1 ] functions = util . make_list ( fn ) search = list ( product ( functions , util . dict_product ( kwargs ) ) ) results = [ ] for fn , kw in search : if not pairwise : r = self . index . to_series ( ) . apply ( lambda step : fn ( step , ** kw ) ) else : r = apply_pairwise ( self , fn , symmetric = symmetric , diagonal = diagonal , block = block , ** kw ) name = [ ] if len ( functions ) == 1 else [ fn . __name__ ] name += util . dict_subset ( kw , search_keys ) . values ( ) if isinstance ( r , pd . DataFrame ) : columns = pd . MultiIndex . from_tuples ( [ tuple ( name + util . make_list ( c ) ) for c in r . columns ] ) r . columns = columns else : r . name = tuple ( name ) results . append ( r ) if len ( results ) > 1 : result = pd . concat ( results , axis = 1 ) column_names = [ ] if len ( functions ) == 1 else [ None ] column_names += search_keys column_names += [ None ] * ( len ( result . columns . names ) - len ( column_names ) ) result . columns . names = column_names return StepFrame ( result ) else : result = results [ 0 ] if isinstance ( result , pd . DataFrame ) : return StepFrame ( result ) else : result . name = functions [ 0 ] . __name__ return StepSeries ( result )
Apply function to each step object in the index
11,621
def _identifyBranches ( self ) : if self . debug : sys . stdout . write ( "Identifying branches: " ) start = time . clock ( ) seen = set ( ) self . branches = set ( ) for e1 , e2 in self . edges : if e1 not in seen : seen . add ( e1 ) else : self . branches . add ( e1 ) if e2 not in seen : seen . add ( e2 ) else : self . branches . add ( e2 ) if self . debug : end = time . clock ( ) sys . stdout . write ( "%f s\n" % ( end - start ) )
A helper function for determining all of the branches in the tree . This should be called after the tree has been fully constructed and its nodes and edges are populated .
11,622
def _identifySuperGraph ( self ) : if self . debug : sys . stdout . write ( "Condensing Graph: " ) start = time . clock ( ) G = nx . DiGraph ( ) G . add_edges_from ( self . edges ) if self . short_circuit : self . superNodes = G . nodes ( ) self . superArcs = G . edges ( ) return self . augmentedEdges = { } N = len ( self . Y ) processed = np . zeros ( N ) for node in range ( N ) : if processed [ node ] : continue if G . in_degree ( node ) == 1 and G . out_degree ( node ) == 1 : removedNodes = [ ] lower_link = list ( G . in_edges ( node ) ) [ 0 ] [ 0 ] while ( G . in_degree ( lower_link ) == 1 and G . out_degree ( lower_link ) == 1 ) : new_lower_link = list ( G . in_edges ( lower_link ) ) [ 0 ] [ 0 ] G . add_edge ( new_lower_link , node ) G . remove_node ( lower_link ) removedNodes . append ( lower_link ) lower_link = new_lower_link removedNodes . reverse ( ) removedNodes . append ( node ) upper_link = list ( G . out_edges ( node ) ) [ 0 ] [ 1 ] while ( G . in_degree ( upper_link ) == 1 and G . out_degree ( upper_link ) == 1 ) : new_upper_link = list ( G . out_edges ( upper_link ) ) [ 0 ] [ 1 ] G . add_edge ( node , new_upper_link ) G . remove_node ( upper_link ) removedNodes . append ( upper_link ) upper_link = new_upper_link G . add_edge ( lower_link , upper_link ) G . remove_node ( node ) self . augmentedEdges [ ( lower_link , upper_link ) ] = removedNodes processed [ removedNodes ] = 1 self . superNodes = G . nodes ( ) self . superArcs = G . edges ( ) if self . debug : end = time . clock ( ) sys . stdout . write ( "%f s\n" % ( end - start ) )
A helper function for determining the condensed representation of the tree . That is one that does not hold all of the internal nodes of the graph . The results will be stored in ContourTree . superNodes and ContourTree . superArcs . These two can be used to potentially speed up queries by limiting the searching on the graph to only nodes on these super arcs .
11,623
def get_seeds ( self , threshold ) : seeds = [ ] for e1 , e2 in self . superArcs : if self . Y [ e1 ] <= threshold <= self . Y [ e2 ] : if ( e1 , e2 ) in self . augmentedEdges : edgeList = self . augmentedEdges [ ( e1 , e2 ) ] elif ( e2 , e1 ) in self . augmentedEdges : e1 , e2 = e2 , e1 edgeList = list ( reversed ( self . augmentedEdges [ ( e1 , e2 ) ] ) ) else : continue startNode = e1 for endNode in edgeList + [ e2 ] : if self . Y [ endNode ] >= threshold : break startNode = endNode seeds . append ( startNode ) seeds . append ( endNode ) return seeds
Returns a list of seed points for isosurface extraction given a threshold value
11,624
def _construct_nx_tree ( self , thisTree , thatTree = None ) : if self . debug : sys . stdout . write ( "Networkx Tree construction: " ) start = time . clock ( ) nxTree = nx . DiGraph ( ) nxTree . add_edges_from ( thisTree . edges ) nodesOfThatTree = [ ] if thatTree is not None : nodesOfThatTree = thatTree . nodes . keys ( ) for ( superNode , _ ) , nodes in thisTree . augmentedEdges . items ( ) : superNodeEdge = list ( nxTree . out_edges ( superNode ) ) if len ( superNodeEdge ) > 1 : warnings . warn ( "The supernode {} should have only a single " "emanating edge. Merge tree is invalidly " "structured" . format ( superNode ) ) endNode = superNodeEdge [ 0 ] [ 1 ] startNode = superNode nxTree . remove_edge ( startNode , endNode ) for node in nodes : if thatTree is None or node in nodesOfThatTree : nxTree . add_edge ( startNode , node ) startNode = node if startNode != endNode : nxTree . add_edge ( startNode , endNode ) if self . debug : end = time . clock ( ) sys . stdout . write ( "%f s\n" % ( end - start ) ) return nxTree
A function for creating networkx instances that can be used more efficiently for graph manipulation than the MergeTree class .
11,625
def _process_tree ( self , thisTree , thatTree ) : if self . debug : sys . stdout . write ( "Processing Tree: " ) start = time . clock ( ) if len ( thisTree . nodes ( ) ) > 1 : leaves = set ( [ v for v in thisTree . nodes ( ) if thisTree . in_degree ( v ) == 0 and thatTree . in_degree ( v ) < 2 ] ) else : leaves = set ( ) while len ( leaves ) > 0 : v = leaves . pop ( ) edges = list ( thisTree . out_edges ( v ) ) if len ( edges ) != 1 : warnings . warn ( "The node {} should have a single emanating " "edge.\n" . format ( v ) ) e1 = edges [ 0 ] [ 0 ] e2 = edges [ 0 ] [ 1 ] if self . Y [ e1 ] < self . Y [ e2 ] : self . edges . append ( ( e1 , e2 ) ) else : self . edges . append ( ( e2 , e1 ) ) thisTree . remove_node ( v ) if thatTree . out_degree ( v ) == 0 : thatTree . remove_node ( v ) else : if len ( thatTree . in_edges ( v ) ) > 0 : startNode = list ( thatTree . in_edges ( v ) ) [ 0 ] [ 0 ] else : startNode = None if len ( thatTree . out_edges ( v ) ) > 0 : endNode = list ( thatTree . out_edges ( v ) ) [ 0 ] [ 1 ] else : endNode = None if startNode is not None and endNode is not None : thatTree . add_edge ( startNode , endNode ) thatTree . remove_node ( v ) if len ( thisTree . nodes ( ) ) > 1 : leaves = set ( [ v for v in thisTree . nodes ( ) if thisTree . in_degree ( v ) == 0 and thatTree . in_degree ( v ) < 2 ] ) else : leaves = set ( ) if self . debug : end = time . clock ( ) sys . stdout . write ( "%f s\n" % ( end - start ) )
A function that will process either a split or join tree with reference to the other tree and store it as part of this CT instance .
11,626
def read_git_branch ( ) : if os . getenv ( 'TRAVIS' ) : return os . getenv ( 'TRAVIS_BRANCH' ) else : try : repo = git . repo . base . Repo ( search_parent_directories = True ) return repo . active_branch . name except Exception : return ''
Obtain the current branch name from the Git repository . If on Travis CI use the TRAVIS_BRANCH environment variable .
11,627
def read_git_commit_timestamp ( repo_path = None ) : repo = git . repo . base . Repo ( path = repo_path , search_parent_directories = True ) head_commit = repo . head . commit return head_commit . committed_datetime
Obtain the timestamp from the current head commit of a Git repository .
11,628
def read_git_commit_timestamp_for_file ( filepath , repo_path = None ) : repo = git . repo . base . Repo ( path = repo_path , search_parent_directories = True ) head_commit = repo . head . commit for commit in head_commit . iter_parents ( filepath ) : return commit . committed_datetime raise IOError ( 'File {} not found' . format ( filepath ) )
Obtain the timestamp for the most recent commit to a given file in a Git repository .
11,629
def get_filepaths_with_extension ( extname , root_dir = '.' ) : if not extname . startswith ( '.' ) : extname = '.' + extname extname = extname . lower ( ) root_dir = os . path . abspath ( root_dir ) selected_filenames = [ ] for dirname , sub_dirnames , filenames in os . walk ( root_dir ) : for filename in filenames : if os . path . splitext ( filename ) [ - 1 ] . lower ( ) == extname : full_filename = os . path . join ( dirname , filename ) selected_filenames . append ( os . path . relpath ( full_filename , start = root_dir ) ) return selected_filenames
Get relative filepaths of files in a directory and sub - directories with the given extension .
11,630
def get_project_content_commit_date ( root_dir = '.' , exclusions = None ) : logger = logging . getLogger ( __name__ ) extensions = ( 'rst' , 'ipynb' , 'png' , 'jpeg' , 'jpg' , 'svg' , 'gif' ) content_paths = [ ] for extname in extensions : content_paths += get_filepaths_with_extension ( extname , root_dir = root_dir ) exclude = Matcher ( exclusions if exclusions else [ 'readme.rst' , 'license.rst' ] ) content_paths = [ p for p in content_paths if not ( exclude ( p ) or exclude ( p . split ( os . path . sep ) [ 0 ] ) ) ] logger . debug ( 'Found content paths: {}' . format ( ', ' . join ( content_paths ) ) ) if not content_paths : raise RuntimeError ( 'No content files found in {}' . format ( root_dir ) ) commit_datetimes = [ ] for filepath in content_paths : try : datetime = read_git_commit_timestamp_for_file ( filepath , repo_path = root_dir ) commit_datetimes . append ( datetime ) except IOError : logger . warning ( 'Could not get commit for {}, skipping' . format ( filepath ) ) if not commit_datetimes : raise RuntimeError ( 'No content commits could be found' ) latest_datetime = max ( commit_datetimes ) return latest_datetime
Get the datetime for the most recent commit to a project that affected Sphinx content .
11,631
def form_ltd_edition_name ( git_ref_name = None ) : if git_ref_name is None : name = read_git_branch ( ) else : name = git_ref_name m = TICKET_BRANCH_PATTERN . match ( name ) if m is not None : return m . group ( 1 ) m = TAG_PATTERN . match ( name ) if m is not None : return name if name == 'master' : name = 'Current' name = name . replace ( '/' , '-' ) name = name . replace ( '_' , '-' ) name = name . replace ( '.' , '-' ) return name
Form the LSST the Docs edition name for this branch using the same logic as LTD Keeper does for transforming branch names into edition names .
11,632
def itersheets ( self ) : for ws in self . worksheets : prev_ws = self . active_worksheet self . active_worksheet = ws try : yield ws finally : self . active_worksheet = prev_ws
Iterates over the worksheets in the book and sets the active worksheet as the current one before yielding .
11,633
def to_xlsx ( self , ** kwargs ) : from xlsxwriter . workbook import Workbook as _Workbook self . workbook_obj = _Workbook ( ** kwargs ) self . workbook_obj . set_calc_mode ( self . calc_mode ) for worksheet in self . itersheets ( ) : worksheet . to_xlsx ( workbook = self ) self . workbook_obj . filename = self . filename if self . filename : self . workbook_obj . close ( ) return self . workbook_obj
Write workbook to a . xlsx file using xlsxwriter . Return a xlsxwriter . workbook . Workbook .
11,634
def get_table ( self , name ) : if name is None : assert self . active_table , "Can't get table without name unless an active table is set" name = self . active_table . name if self . active_worksheet : table = self . active_worksheet . get_table ( name ) assert table is self . active_table , "Active table is not from the active sheet" return table , self . active_worksheet for ws in self . worksheets : try : table = ws . get_table ( name ) if table is self . active_table : return table , ws except KeyError : pass raise RuntimeError ( "Active table not found in any sheet" ) if "!" in name : ws_name , table_name = map ( lambda x : x . strip ( "'" ) , name . split ( "!" , 1 ) ) for ws in self . worksheets : if ws . name == ws_name : table = ws . get_table ( table_name ) return table , ws raise KeyError ( name ) if self . active_worksheet : table = self . active_worksheet . get_table ( name ) return table , self . active_worksheet for ws in self . worksheets : try : table = ws . get_table ( name ) return table , ws except KeyError : pass raise KeyError ( name )
Return a table worksheet pair for the named table
11,635
def send_message ( self , output ) : file_system_event = None if self . my_action_input : file_system_event = self . my_action_input . file_system_event or None output_action = ActionInput ( file_system_event , output , self . name , "*" ) Global . MESSAGE_DISPATCHER . send_message ( output_action )
Send a message to the socket
11,636
def stop ( self ) : Global . LOGGER . debug ( f"action {self.name} stopped" ) self . is_running = False self . on_stop ( )
Stop the current action
11,637
def run ( self ) : Global . LOGGER . debug ( f"action {self.name} is running" ) for tmp_monitored_input in self . monitored_input : sender = "*" + tmp_monitored_input + "*" Global . LOGGER . debug ( f"action {self.name} is monitoring {sender}" ) while self . is_running : try : time . sleep ( Global . CONFIG_MANAGER . sleep_interval ) self . on_cycle ( ) except Exception as exc : Global . LOGGER . error ( f"error while running the action {self.name}: {str(exc)}" )
Start the action
11,638
def create_action_for_code ( cls , action_code , name , configuration , managed_input ) : Global . LOGGER . debug ( f"creating action {name} for code {action_code}" ) Global . LOGGER . debug ( f"configuration length: {len(configuration)}" ) Global . LOGGER . debug ( f"input: {managed_input}" ) my_actions_file = Action . search_actions ( ) for filename in my_actions_file : module_name = os . path . basename ( os . path . normpath ( filename ) ) [ : - 3 ] context = { } Action . load_module ( module_name , filename ) for subclass in Action . __subclasses__ ( ) : if subclass . type == action_code : action_class = subclass action = action_class ( name , configuration , managed_input ) return action subclass = None gc . collect ( )
Factory method to create an instance of an Action from an input code
11,639
def extract_class ( jar , name ) : with jar . open ( name ) as entry : return LinkableClass ( javatools . unpack_class ( entry ) )
Extracts a LinkableClass from a jar .
11,640
def _format_summary_node ( self , task_class ) : modulename = task_class . __module__ classname = task_class . __name__ nodes = [ ] nodes . append ( self . _format_class_nodes ( task_class ) ) nodes . append ( self . _format_config_nodes ( modulename , classname ) ) methods = ( 'run' , 'runDataRef' ) for method in methods : if hasattr ( task_class , method ) : method_obj = getattr ( task_class , method ) nodes . append ( self . _format_method_nodes ( method_obj , modulename , classname ) ) return nodes
Format a section node containg a summary of a Task class s key APIs .
11,641
def _format_class_nodes ( self , task_class ) : modulename = task_class . __module__ classname = task_class . __name__ fullname = '.' . join ( ( modulename , classname ) ) signature = Signature ( task_class , bound_method = False ) desc_sig_node = self . _format_signature ( signature , modulename , classname , fullname , 'py:class' ) content_node = desc_content ( ) content_node += self . _create_doc_summary ( task_class , fullname , 'py:class' ) desc_node = desc ( ) desc_node [ 'noindex' ] = True desc_node [ 'domain' ] = 'py' desc_node [ 'objtype' ] = 'class' desc_node += desc_sig_node desc_node += content_node return desc_node
Create a desc node summarizing the class docstring .
11,642
def _format_method_nodes ( self , task_method , modulename , classname ) : methodname = task_method . __name__ fullname = '.' . join ( ( modulename , classname , methodname ) ) signature = Signature ( task_method , bound_method = True ) desc_sig_node = self . _format_signature ( signature , modulename , classname , fullname , 'py:meth' ) content_node = desc_content ( ) content_node += self . _create_doc_summary ( task_method , fullname , 'py:meth' ) desc_node = desc ( ) desc_node [ 'noindex' ] = True desc_node [ 'domain' ] = 'py' desc_node [ 'objtype' ] = 'method' desc_node += desc_sig_node desc_node += content_node return desc_node
Create a desc node summarizing a method docstring .
11,643
def _create_doc_summary ( self , obj , fullname , refrole ) : summary_text = extract_docstring_summary ( get_docstring ( obj ) ) summary_text = summary_text . strip ( ) if summary_text . endswith ( '.' ) : summary_text = summary_text . rstrip ( '.' ) content_node_p = nodes . paragraph ( text = summary_text ) content_node_p += self . _create_api_details_link ( fullname , refrole ) return content_node_p
Create a paragraph containing the object s one - sentence docstring summary with a link to further documentation .
11,644
def _create_api_details_link ( self , fullname , refrole ) : ref_text = '... <{}>' . format ( fullname ) xref = PyXRefRole ( ) xref_nodes , _ = xref ( refrole , ref_text , ref_text , self . lineno , self . state . inliner ) return xref_nodes
Appends a link to the API docs labelled as ... that is appended to the content paragraph of an API description .
11,645
def _format_config_nodes ( self , modulename , classname ) : fullname = '{0}.{1}.config' . format ( modulename , classname ) desc_sig_node = desc_signature ( ) desc_sig_node [ 'module' ] = modulename desc_sig_node [ 'class' ] = classname desc_sig_node [ 'fullname' ] = fullname prefix = 'attribute' desc_sig_node += desc_annotation ( prefix , prefix ) desc_sig_name_node = desc_addname ( 'config' , 'config' ) desc_sig_name_node [ 'classes' ] . extend ( [ 'xref' , 'py' ] ) desc_sig_node += desc_sig_name_node summary_text = ( 'Access configuration fields and retargetable subtasks.' ) content_node_p = nodes . paragraph ( text = summary_text ) content_node = desc_content ( ) content_node += content_node_p desc_node = desc ( ) desc_node [ 'noindex' ] = True desc_node [ 'domain' ] = 'py' desc_node [ 'objtype' ] = 'attribute' desc_node += desc_sig_node desc_node += content_node return desc_node
Create a desc node summarizing the config attribute
11,646
def _format_import_example ( self , task_class ) : code = 'from {0.__module__} import {0.__name__}' . format ( task_class ) literal_node = nodes . literal_block ( code , code ) literal_node [ 'language' ] = 'py' return [ literal_node ]
Generate nodes that show a code sample demonstrating how to import the task class .
11,647
def _format_api_docs_link_message ( self , task_class ) : fullname = '{0.__module__}.{0.__name__}' . format ( task_class ) p_node = nodes . paragraph ( ) _ = 'See the ' p_node += nodes . Text ( _ , _ ) xref = PyXRefRole ( ) xref_nodes , _ = xref ( 'py:class' , '~' + fullname , '~' + fullname , self . lineno , self . state . inliner ) p_node += xref_nodes _ = ' API reference for complete details.' p_node += nodes . Text ( _ , _ ) seealso_node = seealso ( ) seealso_node += p_node return [ seealso_node ]
Format a message referring the reader to the full API docs .
11,648
def send_exception ( self ) : self . compiler . reset ( ) exc = traceback . format_exc ( ) self . writer . write ( exc . encode ( 'utf8' ) ) yield from self . writer . drain ( )
When an exception has occurred write the traceback to the user .
11,649
def handle_one_command ( self ) : while True : yield from self . write_prompt ( ) codeobj = yield from self . read_command ( ) if codeobj is not None : yield from self . run_command ( codeobj )
Process a single command . May have many lines .
11,650
def run_command ( self , codeobj ) : try : value , stdout = yield from self . attempt_exec ( codeobj , self . namespace ) except Exception : yield from self . send_exception ( ) return else : yield from self . send_output ( value , stdout )
Execute a compiled code object and write the output back to the client .
11,651
def read_command ( self ) : reader = self . reader line = yield from reader . readline ( ) if line == b'' : raise ConnectionResetError ( ) try : codeobj = self . attempt_compile ( line . rstrip ( b'\n' ) ) except SyntaxError : yield from self . send_exception ( ) return return codeobj
Read a command from the user line by line .
11,652
def send_output ( self , value , stdout ) : writer = self . writer if value is not None : writer . write ( '{!r}\n' . format ( value ) . encode ( 'utf8' ) ) if stdout : writer . write ( stdout . encode ( 'utf8' ) ) yield from writer . drain ( )
Write the output or value of the expression back to user .
11,653
def call ( self , method , * args ) : try : response = getattr ( self . client . service , method ) ( * args ) except ( URLError , SSLError ) as e : log . exception ( 'Failed to connect to responsys service' ) raise ConnectError ( "Request to service timed out" ) except WebFault as web_fault : fault_name = getattr ( web_fault . fault , 'faultstring' , None ) error = str ( web_fault . fault . detail ) if fault_name == 'TableFault' : raise TableFault ( error ) if fault_name == 'ListFault' : raise ListFault ( error ) if fault_name == 'API_LIMIT_EXCEEDED' : raise ApiLimitError ( error ) if fault_name == 'AccountFault' : raise AccountFault ( error ) raise ServiceError ( web_fault . fault , web_fault . document ) return response
Calls the service method defined with the arguments provided
11,654
def connect ( self ) : if self . session and self . session . is_expired : self . disconnect ( abandon_session = True ) if not self . session : try : login_result = self . login ( self . username , self . password ) except AccountFault : log . error ( 'Login failed, invalid username or password' ) raise else : self . session = login_result . session_id self . connected = time ( ) return self . connected
Connects to the Responsys soap service
11,655
def disconnect ( self , abandon_session = False ) : self . connected = False if ( self . session and self . session . is_expired ) or abandon_session : try : self . logout ( ) except : log . warning ( 'Logout call to responsys failed, session may have not been terminated' , exc_info = True ) del self . session return True
Disconnects from the Responsys soap service
11,656
def merge_list_members ( self , list_ , record_data , merge_rule ) : list_ = list_ . get_soap_object ( self . client ) record_data = record_data . get_soap_object ( self . client ) merge_rule = merge_rule . get_soap_object ( self . client ) return MergeResult ( self . call ( 'mergeListMembers' , list_ , record_data , merge_rule ) )
Responsys . mergeListMembers call
11,657
def merge_list_members_RIID ( self , list_ , record_data , merge_rule ) : list_ = list_ . get_soap_object ( self . client ) result = self . call ( 'mergeListMembersRIID' , list_ , record_data , merge_rule ) return RecipientResult ( result . recipientResult )
Responsys . mergeListMembersRIID call
11,658
def delete_list_members ( self , list_ , query_column , ids_to_delete ) : list_ = list_ . get_soap_object ( self . client ) result = self . call ( 'deleteListMembers' , list_ , query_column , ids_to_delete ) if hasattr ( result , '__iter__' ) : return [ DeleteResult ( delete_result ) for delete_result in result ] return [ DeleteResult ( result ) ]
Responsys . deleteListMembers call
11,659
def retrieve_list_members ( self , list_ , query_column , field_list , ids_to_retrieve ) : list_ = list_ . get_soap_object ( self . client ) result = self . call ( 'retrieveListMembers' , list_ , query_column , field_list , ids_to_retrieve ) return RecordData . from_soap_type ( result . recordData )
Responsys . retrieveListMembers call
11,660
def create_table ( self , table , fields ) : table = table . get_soap_object ( self . client ) return self . call ( 'createTable' , table , fields )
Responsys . createTable call
11,661
def create_table_with_pk ( self , table , fields , primary_keys ) : table = table . get_soap_object ( self . client ) return self . call ( 'createTableWithPK' , table , fields , primary_keys )
Responsys . createTableWithPK call
11,662
def delete_table ( self , table ) : table = table . get_soap_object ( self . client ) return self . call ( 'deleteTable' , table )
Responsys . deleteTable call
11,663
def delete_profile_extension_members ( self , profile_extension , query_column , ids_to_delete ) : profile_extension = profile_extension . get_soap_object ( self . client ) result = self . call ( 'deleteProfileExtensionMembers' , profile_extension , query_column , ids_to_delete ) if hasattr ( result , '__iter__' ) : return [ DeleteResult ( delete_result ) for delete_result in result ] return [ DeleteResult ( result ) ]
Responsys . deleteProfileExtensionRecords call
11,664
def retrieve_profile_extension_records ( self , profile_extension , field_list , ids_to_retrieve , query_column = 'RIID' ) : profile_extension = profile_extension . get_soap_object ( self . client ) return RecordData . from_soap_type ( self . call ( 'retrieveProfileExtensionRecords' , profile_extension , query_column , field_list , ids_to_retrieve ) )
Responsys . retrieveProfileExtensionRecords call
11,665
def truncate_table ( self , table ) : table = table . get_soap_object ( self . client ) return self . call ( 'truncateTable' , table )
Responsys . truncateTable call
11,666
def delete_table_records ( self , table , query_column , ids_to_delete ) : table = table . get_soap_object ( self . client ) result = self . call ( 'deleteTableRecords' , table , query_column , ids_to_delete ) if hasattr ( result , '__iter__' ) : return [ DeleteResult ( delete_result ) for delete_result in result ] return [ DeleteResult ( result ) ]
Responsys . deleteTableRecords call
11,667
def merge_table_records ( self , table , record_data , match_column_names ) : table = table . get_soap_object ( self . client ) record_data = record_data . get_soap_object ( self . client ) return MergeResult ( self . call ( 'mergeTableRecords' , table , record_data , match_column_names ) )
Responsys . mergeTableRecords call
11,668
def merge_table_records_with_pk ( self , table , record_data , insert_on_no_match , update_on_match ) : table = table . get_soap_object ( self . client ) record_data = record_data . get_soap_object ( self . client ) return MergeResult ( self . call ( 'mergeTableRecordsWithPK' , table , record_data , insert_on_no_match , update_on_match ) )
Responsys . mergeTableRecordsWithPK call
11,669
def merge_into_profile_extension ( self , profile_extension , record_data , match_column , insert_on_no_match , update_on_match ) : profile_extension = profile_extension . get_soap_object ( self . client ) record_data = record_data . get_soap_object ( self . client ) results = self . call ( 'mergeIntoProfileExtension' , profile_extension , record_data , match_column , insert_on_no_match , update_on_match ) return [ RecipientResult ( result ) for result in results ]
Responsys . mergeIntoProfileExtension call
11,670
def retrieve_table_records ( self , table , query_column , field_list , ids_to_retrieve ) : table = table . get_soap_object ( self . client ) return RecordData . from_soap_type ( self . call ( 'retrieveTableRecords' , table , query_column , field_list , ids_to_retrieve ) )
Responsys . retrieveTableRecords call
11,671
def normalize_docroot ( app , root ) : srcdir = app . env . srcdir default_version = app . config . javalink_default_version if isinstance ( root , basestring ) : ( url , base ) = _parse_docroot_str ( srcdir , root ) return { 'root' : url , 'base' : base , 'version' : default_version } else : normalized = { } normalized [ 'root' ] = _parse_docroot_str ( srcdir , root [ 'root' ] ) [ 0 ] if 'base' in root : normalized [ 'base' ] = _parse_docroot_str ( srcdir , root [ 'base' ] ) [ 1 ] else : normalized [ 'base' ] = _parse_docroot_str ( srcdir , root [ 'root' ] ) [ 1 ] if 'version' in root : normalized [ 'version' ] = root [ 'version' ] else : normalized [ 'version' ] = default_version return normalized
Creates a package - list URL and a link base from a docroot element .
11,672
def assign_valence ( mol ) : for u , v , bond in mol . bonds_iter ( ) : if bond . order == 2 : mol . atom ( u ) . pi = 1 mol . atom ( v ) . pi = 1 if mol . atom ( u ) . symbol == "O" and not mol . atom ( u ) . charge : mol . atom ( v ) . carbonyl_C = 1 if mol . atom ( v ) . symbol == "O" and not mol . atom ( v ) . charge : mol . atom ( u ) . carbonyl_C = 1 elif bond . order == 3 : mol . atom ( u ) . pi = mol . atom ( v ) . pi = 2 max_nbr = { "C" : 4 , "Si" : 4 , "N" : 3 , "P" : 3 , "As" : 3 , "O" : 2 , "S" : 2 , "Se" : 2 , "F" : 1 , "Cl" : 1 , "Br" : 1 , "I" : 1 } for i , nbrs in mol . neighbors_iter ( ) : atom = mol . atom ( i ) if len ( nbrs ) == 2 and all ( bond . order == 2 for bond in nbrs . values ( ) ) : atom . pi = 2 if atom . symbol in max_nbr : h_cnt = max_nbr [ atom . symbol ] - len ( nbrs ) - atom . pi + atom . charge if h_cnt > 0 : mol . atom ( i ) . add_hydrogen ( h_cnt ) mol . descriptors . add ( "Valence" )
Assign pi electron and hydrogens
11,673
def assign_charge ( mol , force_recalc = False ) : mol . require ( "Aromatic" ) for i , nbrs in mol . neighbors_iter ( ) : atom = mol . atom ( i ) nbrcnt = len ( nbrs ) if atom . symbol == "N" : if not atom . pi : mol . atom ( i ) . charge_phys = 1 elif nbrcnt == 1 and atom . pi == 2 : ni = list ( nbrs . keys ( ) ) [ 0 ] conj = False sp2n = None for nni , nnb in mol . neighbors ( ni ) . items ( ) : if mol . atom ( nni ) . symbol == "N" and nnb . order == 2 and not mol . atom ( nni ) . aromatic : mol . atom ( nni ) . charge_conj = 1 conj = True elif mol . atom ( nni ) . symbol == "N" and nni != i : sp2n = nni if conj : mol . atom ( i ) . charge_phys = 1 if sp2n is not None : mol . atom ( sp2n ) . charge_conj = 1 elif atom . symbol == "O" and nbrcnt == 1 and atom . pi == 2 : ni = list ( nbrs . keys ( ) ) [ 0 ] conj = False if mol . atom ( ni ) . symbol == "N" : mol . atom ( i ) . n_oxide = True mol . atom ( ni ) . n_oxide = True for nni , nnb in mol . neighbors ( ni ) . items ( ) : if mol . atom ( nni ) . symbol in ( "O" , "S" ) and nnb . order == 2 and not mol . atom ( ni ) . n_oxide : mol . atom ( nni ) . charge_conj = - 1 conj = True if conj : mol . atom ( i ) . charge_phys = - 1 elif atom . symbol == "S" and nbrcnt == 1 : ni = list ( nbrs . keys ( ) ) [ 0 ] if mol . atom ( ni ) . aromatic : mol . atom ( i ) . charge_phys = - 1 mol . charge_assigned = True mol . descriptors . add ( "Phys_charge" )
Assign charges in physiological condition
11,674
def get_type ( type_name ) : parts = type_name . split ( '.' ) if len ( parts ) < 2 : raise SphinxError ( 'Type must be fully-qualified, ' 'of the form ``module.MyClass``. Got: {}' . format ( type_name ) ) module_name = "." . join ( parts [ 0 : - 1 ] ) name = parts [ - 1 ] return getattr ( import_module ( module_name ) , name )
Get a type given its importable name .
11,675
def get_task_config_fields ( config_class ) : from lsst . pex . config import Field def is_config_field ( obj ) : return isinstance ( obj , Field ) return _get_alphabetical_members ( config_class , is_config_field )
Get all configuration Fields from a Config class .
11,676
def get_subtask_fields ( config_class ) : from lsst . pex . config import ConfigurableField , RegistryField def is_subtask_field ( obj ) : return isinstance ( obj , ( ConfigurableField , RegistryField ) ) return _get_alphabetical_members ( config_class , is_subtask_field )
Get all configurable subtask fields from a Config class .
11,677
def _get_alphabetical_members ( obj , predicate ) : fields = dict ( inspect . getmembers ( obj , predicate ) ) keys = list ( fields . keys ( ) ) keys . sort ( ) return { k : fields [ k ] for k in keys }
Get members of an object sorted alphabetically .
11,678
def typestring ( obj ) : obj_type = type ( obj ) return '.' . join ( ( obj_type . __module__ , obj_type . __name__ ) )
Make a string for the object s type
11,679
def get_docstring ( obj ) : docstring = getdoc ( obj , allow_inherited = True ) if docstring is None : logger = getLogger ( __name__ ) logger . warning ( "Object %s doesn't have a docstring." , obj ) docstring = 'Undocumented' return prepare_docstring ( docstring , ignore = 1 )
Extract the docstring from an object as individual lines .
11,680
def extract_docstring_summary ( docstring ) : summary_lines = [ ] for line in docstring : if line == '' : break else : summary_lines . append ( line ) return ' ' . join ( summary_lines )
Get the first summary sentence from a docstring .
11,681
def run ( self ) : request_cache = cache . get_request_cache ( ) session = client . get_client ( ) . create_session ( ) self . mark_incomplete ( ) universes = [ ] with session . no_autoflush : movies = session . query ( models . Movie ) . all ( ) for movie in movies : article = request_cache . get ( "http://marvel.wikia.com" + movie . url , xpath = "//article[@id='WikiaMainContent']" , rate_limit = 0.5 ) doc = html . fromstring ( article ) node = doc . xpath ( "//span[@id='Appearances']" ) [ 0 ] node = node . getparent ( ) appearance_type = "Featured Characters" node = node . getnext ( ) while node is not None and node . tag != 'h2' : if node . tag == 'ul' and ( 'characters' in appearance_type . lower ( ) or 'villains' in appearance_type . lower ( ) ) : for li in node . iter ( 'li' ) : for a in li : if a . tag != 'a' : continue if "image" in a . get ( "class" , "" ) or not a . get ( "href" ) . startswith ( "/wiki/" ) : continue match = re . search ( r'\(.*?\)' , a . get ( 'href' ) ) if match : universes . append ( match . group ( ) [ 1 : - 1 ] ) try : character = session . query ( models . Character ) . filter ( models . Character . url == a . get ( "href" ) ) . one ( ) appearance = models . MovieAppearance ( movie_id = movie . id , character_id = character . id , appearance_type = appearance_type ) session . add ( appearance ) except NoResultFound : pass break elif node . tag == 'p' : appearance_type = " " . join ( node . itertext ( ) ) . strip ( ) . strip ( ':' ) . strip ( ) node = node . getnext ( ) print ( "\nNumber of character appearances per universe: " ) print ( pd . Series ( data = universes ) . value_counts ( ) ) session . commit ( ) session . close ( ) self . mark_complete ( )
Run loading of movie appearances .
11,682
def run ( self ) : self . mark_incomplete ( ) session = client . get_client ( ) . create_session ( ) cpi = ConsumerPriceIndexFile ( ) . load ( ) max_cpi_year = cpi [ 'Year' ] . max ( ) cpi = cpi . set_index ( 'Year' ) [ 'Annual' ] for movie in session . query ( models . Movie ) . all ( ) : if movie . year is not None and movie . budget is not None : if movie . year > max_cpi_year : movie . budget_inflation_adjusted = movie . budget else : movie . budget_inflation_adjusted = movie . budget * cpi . loc [ max_cpi_year ] / cpi . loc [ movie . year ] session . commit ( ) session . close ( ) self . mark_complete ( )
Compute and store inflation - adjusted movie budgets
11,683
def _argsort ( y_score , k = None ) : ranks = y_score . argsort ( ) argsort = ranks [ : : - 1 ] if k is not None : argsort = argsort [ 0 : k ] return argsort
Returns the indexes in descending order of the top k score or all scores if k is None
11,684
def count ( y_true , y_score = None , countna = False ) : if not countna : return ( ~ np . isnan ( to_float ( y_true ) ) ) . sum ( ) else : return len ( y_true )
Counts the number of examples . If countna is False then only count labeled examples i . e . those with y_true not NaN
11,685
def count_series ( y_true , y_score , countna = False ) : y_true , y_score = to_float ( y_true , y_score ) top = _argsort ( y_score ) if not countna : a = ( ~ np . isnan ( y_true [ top ] ) ) . cumsum ( ) else : a = range ( 1 , len ( y_true ) + 1 ) return pd . Series ( a , index = range ( 1 , len ( a ) + 1 ) )
Returns series whose i - th entry is the number of examples in the top i
11,686
def baseline ( y_true , y_score = None ) : if len ( y_true ) > 0 : return np . nansum ( y_true ) / count ( y_true , countna = False ) else : return 0.0
Number of positive labels divided by number of labels or zero if there are no labels
11,687
def roc_auc ( y_true , y_score ) : notnull = ~ np . isnan ( y_true ) fpr , tpr , thresholds = sklearn . metrics . roc_curve ( y_true [ notnull ] , y_score [ notnull ] ) return sklearn . metrics . auc ( fpr , tpr )
Returns are under the ROC curve
11,688
def recall_series ( y_true , y_score , k = None , value = True ) : y_true , y_score = to_float ( y_true , y_score ) top = _argsort ( y_score , k ) if not value : y_true = 1 - y_true a = np . nan_to_num ( y_true [ top ] ) . cumsum ( ) return pd . Series ( a , index = np . arange ( 1 , len ( a ) + 1 ) )
Returns series of length k whose i - th entry is the recall in the top i
11,689
def autorotate ( image , orientation = None ) : orientation_value = orientation if orientation else image . _getexif ( ) . get ( EXIF_KEYS . get ( 'Orientation' ) ) if orientation_value is None : raise ImDirectException ( "No orientation available in Exif " "tag or given explicitly." ) if orientation_value in ( 1 , 2 ) : i = image elif orientation_value in ( 3 , 4 ) : i = image . transpose ( Image . ROTATE_180 ) elif orientation_value in ( 5 , 6 ) : i = image . transpose ( Image . ROTATE_270 ) elif orientation_value in ( 7 , 8 ) : i = image . transpose ( Image . ROTATE_90 ) else : i = image if orientation_value in ( 2 , 4 , 5 , 7 ) : i = i . transpose ( Image . FLIP_LEFT_RIGHT ) return i
Rotate and return an image according to its Exif information .
11,690
def imdirect_open ( fp ) : img = pil_open ( fp , 'r' ) if img . format == 'JPEG' : if isinstance ( fp , string_types ) : exif = piexif . load ( text_type_to_use ( fp ) ) else : fp . seek ( 0 ) exif = piexif . load ( fp . read ( ) ) orientation_value = exif . get ( '0th' , { } ) . get ( piexif . ImageIFD . Orientation ) if orientation_value is None or orientation_value == 1 : return img img_rot = autorotate ( img ) exif = update_exif_for_rotated_image ( exif ) with io . BytesIO ( ) as bio : img_rot . save ( bio , format = 'jpeg' , exif = piexif . dump ( exif ) ) bio . seek ( 0 ) img_rot_new = pil_open ( bio , 'r' ) img_rot_new . load ( ) img = img_rot_new return img
Opens identifies the given image file and rotates it if it is a JPEG .
11,691
def monkey_patch ( enabled = True ) : if enabled : Image . open = imdirect_open else : Image . open = pil_open
Monkey patching PIL . Image . open method
11,692
def save_with_exif_info ( img , * args , ** kwargs ) : if 'exif' in kwargs : exif = kwargs . pop ( 'exif' ) else : exif = img . info . get ( 'exif' ) img . save ( * args , exif = exif , ** kwargs )
Saves an image using PIL preserving the exif information .
11,693
def create ( context , resource , ** kwargs ) : data = utils . sanitize_kwargs ( ** kwargs ) uri = '%s/%s' % ( context . dci_cs_api , resource ) r = context . session . post ( uri , timeout = HTTP_TIMEOUT , json = data ) return r
Create a resource
11,694
def get ( context , resource , ** kwargs ) : uri = '%s/%s/%s' % ( context . dci_cs_api , resource , kwargs . pop ( 'id' ) ) r = context . session . get ( uri , timeout = HTTP_TIMEOUT , params = kwargs ) return r
List a specific resource
11,695
def get_data ( context , resource , ** kwargs ) : url_suffix = '' if 'keys' in kwargs and kwargs [ 'keys' ] : url_suffix = '/?keys=%s' % ',' . join ( kwargs . pop ( 'keys' ) ) uri = '%s/%s/%s/data%s' % ( context . dci_cs_api , resource , kwargs . pop ( 'id' ) , url_suffix ) r = context . session . get ( uri , timeout = HTTP_TIMEOUT , params = kwargs ) return r
Retrieve data field from a resource
11,696
def update ( context , resource , ** kwargs ) : etag = kwargs . pop ( 'etag' ) id = kwargs . pop ( 'id' ) data = utils . sanitize_kwargs ( ** kwargs ) uri = '%s/%s/%s' % ( context . dci_cs_api , resource , id ) r = context . session . put ( uri , timeout = HTTP_TIMEOUT , headers = { 'If-match' : etag } , json = data ) return r
Update a specific resource
11,697
def delete ( context , resource , id , ** kwargs ) : etag = kwargs . pop ( 'etag' , None ) id = id subresource = kwargs . pop ( 'subresource' , None ) subresource_id = kwargs . pop ( 'subresource_id' , None ) uri = '%s/%s/%s' % ( context . dci_cs_api , resource , id ) if subresource : uri = '%s/%s/%s' % ( uri , subresource , subresource_id ) r = context . session . delete ( uri , timeout = HTTP_TIMEOUT , headers = { 'If-match' : etag } ) return r
Delete a specific resource
11,698
def purge ( context , resource , ** kwargs ) : uri = '%s/%s/purge' % ( context . dci_cs_api , resource ) if 'force' in kwargs and kwargs [ 'force' ] : r = context . session . post ( uri , timeout = HTTP_TIMEOUT ) else : r = context . session . get ( uri , timeout = HTTP_TIMEOUT ) return r
Purge resource type .
11,699
def parse_rst_content ( content , state ) : container_node = nodes . section ( ) container_node . document = state . document viewlist = ViewList ( ) for i , line in enumerate ( content . splitlines ( ) ) : viewlist . append ( line , source = '' , offset = i ) with switch_source_input ( state , viewlist ) : state . nested_parse ( viewlist , 0 , container_node ) return container_node . children
Parse rST - formatted string content into docutils nodes