idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
52,500
def id_source ( source , full = False ) : if source not in source_ids : return '' if full : return source_ids [ source ] [ 1 ] else : return source_ids [ source ] [ 0 ]
Returns the name of a website - scrapping function .
52,501
def set_probe_position ( self , new_probe_position ) : if new_probe_position is not None : new_probe_position = Geometry . FloatPoint . make ( new_probe_position ) new_probe_position = Geometry . FloatPoint ( y = max ( min ( new_probe_position . y , 1.0 ) , 0.0 ) , x = max ( min ( new_probe_position . x , 1.0 ) , 0.0 ) ) old_probe_position = self . __probe_position_value . value if ( ( old_probe_position is None ) != ( new_probe_position is None ) ) or ( old_probe_position != new_probe_position ) : self . __probe_position_value . value = new_probe_position self . probe_state_changed_event . fire ( self . probe_state , self . probe_position )
Set the probe position in normalized coordinates with origin at top left .
52,502
def apply_metadata_groups ( self , properties : typing . Dict , metatdata_groups : typing . Tuple [ typing . List [ str ] , str ] ) -> None : pass
Apply metadata groups to properties .
52,503
def create_analytic_backend ( settings ) : backend = settings . get ( 'backend' ) if isinstance ( backend , basestring ) : backend = import_string ( backend ) elif backend : backend = backend else : raise KeyError ( 'backend' ) return backend ( settings . get ( "settings" , { } ) )
Creates a new Analytics backend from the settings
52,504
def size_to_content ( self , horizontal_padding = None , vertical_padding = None ) : if horizontal_padding is None : horizontal_padding = 0 if vertical_padding is None : vertical_padding = 0 self . sizing . set_fixed_size ( Geometry . IntSize ( 18 + 2 * horizontal_padding , 18 + 2 * vertical_padding ) )
Size the canvas item to the text content .
52,505
def success_rate ( self ) : if self . successes + self . fails == 0 : success_rate = 0 else : total_attempts = self . successes + self . fails success_rate = ( self . successes * 100 / total_attempts ) return success_rate
Returns a float with the rate of success from all the logged results .
52,506
def add_result ( self , source , found , runtime ) : self . source_stats [ source . __name__ ] . add_runtime ( runtime ) if found : self . source_stats [ source . __name__ ] . successes += 1 else : self . source_stats [ source . __name__ ] . fails += 1
Adds a new record to the statistics database . This function is intended to be called after a website has been scraped . The arguments indicate the function that was called the time taken to scrap the website and a boolean indicating if the lyrics were found or not .
52,507
def avg_time ( self , source = None ) : if source is None : runtimes = [ ] for rec in self . source_stats . values ( ) : runtimes . extend ( [ r for r in rec . runtimes if r != 0 ] ) return avg ( runtimes ) else : if callable ( source ) : return avg ( self . source_stats [ source . __name__ ] . runtimes ) else : return avg ( self . source_stats [ source ] . runtimes )
Returns the average time taken to scrape lyrics . If a string or a function is passed as source return the average time taken to scrape lyrics from that source otherwise return the total average .
52,508
def calculate ( self ) : best , worst , fastest , slowest = ( ) , ( ) , ( ) , ( ) found = notfound = total_time = 0 for source , rec in self . source_stats . items ( ) : if not best or rec . successes > best [ 1 ] : best = ( source , rec . successes , rec . success_rate ( ) ) if not worst or rec . successes < worst [ 1 ] : worst = ( source , rec . successes , rec . success_rate ( ) ) avg_time = self . avg_time ( source ) if not fastest or ( avg_time != 0 and avg_time < fastest [ 1 ] ) : fastest = ( source , avg_time ) if not slowest or ( avg_time != 0 and avg_time > slowest [ 1 ] ) : slowest = ( source , avg_time ) found += rec . successes notfound += rec . fails total_time += sum ( rec . runtimes ) return { 'best' : best , 'worst' : worst , 'fastest' : fastest , 'slowest' : slowest , 'found' : found , 'notfound' : notfound , 'total_time' : total_time }
Calculate the overall counts of best worst fastest slowest total found total not found and total runtime
52,509
def print_stats ( self ) : stats = self . calculate ( ) total_time = '%d:%02d:%02d' % ( stats [ 'total_time' ] / 3600 , ( stats [ 'total_time' ] / 3600 ) / 60 , ( stats [ 'total_time' ] % 3600 ) % 60 ) output = output = output . format ( total_time = total_time , found = stats [ 'found' ] , notfound = stats [ 'notfound' ] , best = stats [ 'best' ] [ 0 ] . capitalize ( ) , best_count = stats [ 'best' ] [ 1 ] , best_rate = stats [ 'best' ] [ 2 ] , worst = stats [ 'worst' ] [ 0 ] . capitalize ( ) , worst_count = stats [ 'worst' ] [ 1 ] , worst_rate = stats [ 'worst' ] [ 2 ] , fastest = stats [ 'fastest' ] [ 0 ] . capitalize ( ) , fastest_time = stats [ 'fastest' ] [ 1 ] , slowest = stats [ 'slowest' ] [ 0 ] . capitalize ( ) , slowest_time = stats [ 'slowest' ] [ 1 ] , avg_time = self . avg_time ( ) ) for source in sources : stat = str ( self . source_stats [ source . __name__ ] ) output += f'\n{source.__name__.upper()}\n{stat}\n' print ( output )
Print a series of relevant stats about a full execution . This function is meant to be called at the end of the program .
52,510
def broadcast ( cls , s1 : ParamsList , s2 : ParamsList ) -> BroadcastTuple : if len ( s1 ) == 0 : return s2 , [ ] , [ ] if len ( s2 ) == 0 : return s1 , [ ] , [ ] subscope = list ( set ( s1 ) & set ( s2 ) ) if len ( subscope ) == len ( s1 ) : subscope = s1 elif len ( subscope ) == len ( s2 ) : subscope = s2 perm1 = [ ] if s1 [ - len ( subscope ) : ] != subscope : i = 0 for var in s1 : if var not in subscope : perm1 . append ( i ) i += 1 else : j = subscope . index ( var ) perm1 . append ( len ( s1 ) - len ( subscope ) + j ) perm2 = [ ] if s2 [ - len ( subscope ) : ] != subscope : i = 0 for var in s2 : if var not in subscope : perm2 . append ( i ) i += 1 else : j = subscope . index ( var ) perm2 . append ( len ( s2 ) - len ( subscope ) + j ) scope = [ ] if len ( s1 ) >= len ( s2 ) : if perm1 == [ ] : scope = s1 else : for i in range ( len ( s1 ) ) : scope . append ( s1 [ perm1 . index ( i ) ] ) else : if perm2 == [ ] : scope = s2 else : for i in range ( len ( s2 ) ) : scope . append ( s2 [ perm2 . index ( i ) ] ) return ( scope , perm1 , perm2 )
It broadcasts the smaller scope over the larger scope .
52,511
def get_baseparser_extended_df ( sample , bp_lines , ref , alt ) : columns = "chrom\tpos\tref\tcov\tA\tC\tG\tT\t*\t-\t+" . split ( ) if bp_lines is None : return None bpdf = pd . DataFrame ( [ [ sample ] + l . rstrip ( '\n' ) . split ( "\t" ) for l in bp_lines if len ( l ) > 0 ] , columns = [ "sample" ] + columns , dtype = np . object ) bpdf [ bpdf == "" ] = None bpdf = bpdf [ bpdf [ "cov" ] . astype ( int ) > 0 ] if len ( bpdf ) == 0 : return None if ref and alt : bpdf = pd . concat ( [ bpdf , pd . DataFrame ( { "val_ref" : pd . Series ( ref ) , "val_alt" : pd . Series ( alt ) } ) ] , axis = 1 ) bpdf = pd . concat ( [ bpdf , bpdf . apply ( _val_al , axis = 1 ) ] , axis = 1 ) bpdf = pd . concat ( [ bpdf , bpdf . apply ( _most_common_indel , axis = 1 ) ] , axis = 1 ) bpdf = pd . concat ( [ bpdf , bpdf . apply ( _most_common_al , axis = 1 ) ] , axis = 1 ) bpdf [ "most_common_count" ] = bpdf . apply ( lambda x : max ( [ x . most_common_al_count , x . most_common_indel_count ] ) , axis = 1 ) bpdf [ "most_common_maf" ] = bpdf . apply ( lambda x : max ( [ x . most_common_al_maf , x . most_common_indel_maf ] ) , axis = 1 ) return bpdf
Turn baseParser results into a dataframe
52,512
def filter_out_mutations_in_normal ( tumordf , normaldf , most_common_maf_min = 0.2 , most_common_count_maf_threshold = 20 , most_common_count_min = 1 ) : df = tumordf . merge ( normaldf , on = [ "chrom" , "pos" ] , suffixes = ( "_T" , "_N" ) ) common_al = ( df . most_common_al_count_T == df . most_common_count_T ) & ( df . most_common_al_T == df . most_common_al_N ) common_indel = ( df . most_common_indel_count_T == df . most_common_count_T ) & ( df . most_common_indel_T == df . imost_common_indel_N ) normal_criteria = ( ( df . most_common_count_N >= most_common_count_maf_threshold ) & ( df . most_common_maf_N > most_common_maf_min ) ) | ( ( df . most_common_count_N < most_common_count_maf_threshold ) & ( df . most_common_count_N > most_common_count_min ) ) df = df [ ~ ( common_al | common_indel ) & normal_criteria ] for c in df . columns : if c . endswith ( "_N" ) : del df [ c ] df . columns = [ c [ : - 2 ] if c . endswith ( "_T" ) else c for c in df . columns ] return df
Remove mutations that are in normal
52,513
def select_only_revertant_mutations ( bpdf , snv = None , ins = None , dlt = None ) : if sum ( [ bool ( snv ) , bool ( ins ) , bool ( dlt ) ] ) != 1 : raise ( Exception ( "Should be either snv, ins or del" . format ( snv ) ) ) if snv : if snv not in [ "A" , "C" , "G" , "T" ] : raise ( Exception ( "snv {} should be A, C, G or T" . format ( snv ) ) ) return bpdf [ ( bpdf . most_common_al == snv ) & ( bpdf . most_common_al_count == bpdf . most_common_count ) ] elif bool ( ins ) : return bpdf [ ( ( bpdf . most_common_indel . apply ( lambda x : len ( x ) + len ( ins ) % 3 if x else None ) == 0 ) & ( bpdf . most_common_indel_type == "+" ) & ( bpdf . most_common_count == bpdf . most_common_indel_count ) ) | ( ( bpdf . most_common_indel . apply ( lambda x : len ( ins ) - len ( x ) % 3 if x else None ) == 0 ) & ( bpdf . most_common_indel_type == "-" ) & ( bpdf . most_common_count == bpdf . most_common_indel_count ) ) ] elif bool ( dlt ) : return bpdf [ ( ( bpdf . most_common_indel . apply ( lambda x : len ( x ) - len ( dlt ) % 3 if x else None ) == 0 ) & ( bpdf . most_common_indel_type == "+" ) & ( bpdf . most_common_count == bpdf . most_common_indel_count ) ) | ( ( bpdf . most_common_indel . apply ( lambda x : - len ( dlt ) - len ( x ) % 3 if x else None ) == 0 ) & ( bpdf . most_common_indel_type == "-" ) & ( bpdf . most_common_count == bpdf . most_common_indel_count ) ) ] else : raise ( Exception ( "No mutation given?" ) )
Selects only mutations that revert the given mutations in a single event .
52,514
def _prep_clients ( self , clients ) : for pool_id , client in enumerate ( clients ) : if hasattr ( client , "pool_id" ) : raise ValueError ( "%r is already part of a pool." , client ) setattr ( client , "pool_id" , pool_id ) self . _wrap_functions ( client ) return clients
Prep a client by tagging it with and id and wrapping methods .
52,515
def _wrap_functions ( self , client ) : def wrap ( fn ) : def wrapper ( * args , ** kwargs ) : try : return fn ( * args , ** kwargs ) except ( ConnectionError , TimeoutError ) : self . _penalize_client ( client ) raise return functools . update_wrapper ( wrapper , fn ) for name in dir ( client ) : if name . startswith ( "_" ) : continue if name in ( "echo" , "execute_command" , "parse_response" ) : continue obj = getattr ( client , name ) if not callable ( obj ) : continue log . debug ( "Wrapping %s" , name ) setattr ( client , name , wrap ( obj ) )
Wrap public functions to catch ConnectionError .
52,516
def _prune_penalty_box ( self ) : added = False for client in self . penalty_box . get ( ) : log . info ( "Client %r is back up." , client ) self . active_clients . append ( client ) added = True if added : self . _sort_clients ( )
Restores clients that have reconnected .
52,517
def get_client ( self , shard_key ) : self . _prune_penalty_box ( ) if len ( self . active_clients ) == 0 : raise ClusterEmptyError ( "All clients are down." ) if not isinstance ( shard_key , bytes ) : shard_key = shard_key . encode ( "utf-8" ) hashed = mmh3 . hash ( shard_key ) pos = hashed % len ( self . initial_clients ) if self . initial_clients [ pos ] in self . active_clients : return self . initial_clients [ pos ] else : pos = hashed % len ( self . active_clients ) return self . active_clients [ pos ]
Get the client for a given shard based on what s available .
52,518
def _penalize_client ( self , client ) : if client in self . active_clients : log . warning ( "%r marked down." , client ) self . active_clients . remove ( client ) self . penalty_box . add ( client ) else : log . info ( "%r not in active client list." )
Place client in the penalty box .
52,519
def zrevrange_with_int_score ( self , key , max_score , min_score ) : self . _prune_penalty_box ( ) if len ( self . active_clients ) == 0 : raise ClusterEmptyError ( "All clients are down." ) element__score = defaultdict ( int ) for client in self . active_clients : revrange = client . zrevrangebyscore ( key , max_score , min_score , withscores = True , score_cast_func = int ) for element , count in revrange : element__score [ element ] = max ( element__score [ element ] , int ( count ) ) return element__score
Get the zrevrangebyscore across the cluster . Highest score for duplicate element is returned . A faster method should be written if scores are not needed .
52,520
def chain ( * regexes , ** kwargs ) : prepend_negation = kwargs . get ( 'prepend_negation' , True ) return Linker ( regexes , prepend_negation = prepend_negation )
A helper function to interact with the regular expression engine that compiles and applies partial matches to a string .
52,521
def generate_s3_url ( files ) : if files : vault = g . client . Vault . get_personal_vault ( ) files = json . loads ( files ) objects = [ ] for i in xrange ( len ( files ) ) : obj = g . client . Object . create ( vault_id = vault . id , object_type = 'file' , filename = files [ i ] . get ( 'filename' ) , mimetype = files [ i ] . get ( 'mimetype' ) , size = files [ i ] . get ( 'size' ) ) objects . append ( { 'id' : obj . id , 'filename' : obj . filename , 'upload_url' : obj . upload_url } ) return json . dumps ( objects )
Takes files from React side creates SolveBio Object containing signed S3 URL .
52,522
def handle_uploaded_files ( uploaded_files ) : if uploaded_files : uploaded_files = json . loads ( uploaded_files ) [ 0 ] _id = uploaded_files . get ( 'id' ) _filename = os . path . splitext ( uploaded_files . get ( 'filename' ) ) [ 0 ] dataset = g . client . Dataset . get_or_create_by_full_path ( '~/' + _filename ) g . client . DatasetImport . create ( dataset_id = dataset . id , object_id = _id ) dataset . activity ( follow = True ) SELECTED_COLS = [ 'col_a' , 'col_b' , 'col_c' ] query = dataset . query ( fields = SELECTED_COLS ) return html . Div ( dt . DataTable ( id = 'data-table' , rows = list ( query ) , columns = SELECTED_COLS ) )
Handles downstream processes using metadata about the uploaded files from React side .
52,523
def create_camera_panel ( document_controller , panel_id , properties ) : camera_panel_type = properties . get ( "camera_panel_type" ) for component in Registry . get_components_by_type ( "camera_panel" ) : if component . camera_panel_type == camera_panel_type : hardware_source_id = properties [ "hardware_source_id" ] hardware_source = HardwareSource . HardwareSourceManager ( ) . get_hardware_source_for_hardware_source_id ( hardware_source_id ) camera_device = getattr ( hardware_source , "camera" , None ) camera_settings = getattr ( hardware_source , "camera_settings" , None ) ui_handler = component . get_ui_handler ( api_broker = PlugInManager . APIBroker ( ) , event_loop = document_controller . event_loop , hardware_source_id = hardware_source_id , camera_device = camera_device , camera_settings = camera_settings ) panel = Panel . Panel ( document_controller , panel_id , properties ) panel . widget = Declarative . DeclarativeWidget ( document_controller . ui , document_controller . event_loop , ui_handler ) return panel return None
Create a custom camera panel .
52,524
def resolve_expression ( self , query = None , allow_joins = True , reuse = None , summarize = False , for_save = False ) : c = self . copy ( ) c . is_summary = summarize c . for_save = for_save final_points = [ ] for i , p in enumerate ( self . params ) : try : float ( p ) except : _ , source , _ , join_list , last = query . setup_joins ( six . text_type ( p ) . split ( '__' ) , query . model . _meta , query . get_initial_alias ( ) ) [ : 5 ] target , alias , _ = query . trim_joins ( source , join_list , last ) final_points . append ( "%s.%s" % ( alias , target [ 0 ] . get_attname_column ( ) [ 1 ] ) ) else : final_points . append ( six . text_type ( p ) ) c . params = final_points return c
Setup any data here this method will be called before final SQL is generated
52,525
def in_distance ( self , distance , fields , points , annotate = '_ed_distance' ) : clone = self . _clone ( ) return clone . annotate ( ** { annotate : EarthDistance ( [ LlToEarth ( fields ) , LlToEarth ( points ) ] ) } ) . filter ( ** { '{0}__lte' . format ( annotate ) : distance } )
Filter rows inside a circunference of radius distance distance
52,526
def get ( self , path , default = _NoDefault , as_type = None , resolve_references = True ) : value = self . _source steps_taken = [ ] try : for step in path . split ( self . _separator ) : steps_taken . append ( step ) value = value [ step ] if as_type : return as_type ( value ) elif isinstance ( value , Mapping ) : namespace = type ( self ) ( separator = self . _separator , missing = self . _missing ) namespace . _source = value namespace . _root = self . _root return namespace elif resolve_references and isinstance ( value , str ) : return self . _resolve ( value ) else : return value except ConfiguredReferenceError : raise except KeyError as e : if default is not _NoDefault : return default else : missing_key = self . _separator . join ( steps_taken ) raise NotConfiguredError ( 'no configuration for key {}' . format ( missing_key ) , key = missing_key ) from e
Gets a value for the specified path .
52,527
def action ( action_callback = None , ** kwargs ) : if action_callback is None : return lambda fn : action ( fn , ** kwargs ) else : return Action ( action_callback , ** kwargs ) . decorate_module ( action_callback )
Chisel action decorator
52,528
def get_plain_logname ( base_name , root_dir , enable_json ) : if enable_json : nested_dir = os . path . join ( root_dir , 'plain' ) if os . path . exists ( root_dir ) and not os . path . exists ( nested_dir ) : os . mkdir ( nested_dir ) root_dir = nested_dir return os . path . join ( root_dir , '{}.log' . format ( base_name ) )
we nest all plain logs to prevent double log shipping
52,529
def succ ( cmd , check_stderr = True , stdout = None , stderr = None ) : code , out , err = run ( cmd ) if stdout is not None : stdout [ : ] = out if stderr is not None : stderr [ : ] = err if code != 0 : for l in out : print ( l ) assert code == 0 , 'Return: {} {}\nStderr: {}' . format ( code , cmd , err ) if check_stderr : assert err == [ ] , 'Error: {} {}' . format ( err , code ) return code , out , err
Alias to run with check return code and stderr
52,530
def wait_socket ( host , port , timeout = 120 ) : return wait_result ( lambda : check_socket ( host , port ) , True , timeout )
Wait for socket opened on remote side . Return False after timeout
52,531
def interpolate_sysenv ( line , defaults = { } ) : map = ChainMap ( os . environ , defaults ) return line . format ( ** map )
Format line system environment variables + defaults
52,532
def cd ( dir_name ) : old_path = os . path . abspath ( '.' ) os . chdir ( dir_name ) try : yield os . chdir ( old_path ) except Exception : os . chdir ( old_path ) raise
do something in other directory and return back after block ended
52,533
def _is_new ( self , identifier ) : if identifier in self . tracker : return False else : self . tracker . append ( identifier ) self . tracker . pop ( 0 ) return True
Returns True if identifier hasn t been seen before .
52,534
def on_put ( self , request , response , txn_id = None ) : response . body = "{}" if not self . _is_new ( txn_id ) : response . status = falcon . HTTP_200 return request . context [ "body" ] = request . stream . read ( ) try : events = json . loads ( request . context [ "body" ] . decode ( "utf-8" ) ) [ "events" ] except ( KeyError , ValueError , UnicodeDecodeError ) : response . status = falcon . HTTP_400 response . body = "Malformed request body" return if self . handler ( EventStream ( events , self . Api ) ) : response . status = falcon . HTTP_200 else : response . status = falcon . HTTP_400
Responds to PUT request containing events .
52,535
def on_get ( self , request , response , user_id = None ) : response . body = "{}" if self . handler ( user_id ) : response . status = falcon . HTTP_200 self . api . register ( utils . mxid2localpart ( user_id ) ) else : response . status = falcon . HTTP_404
Responds to GET request for users .
52,536
def set_frame_parameters ( self , profile_index : int , frame_parameters ) -> None : self . frame_parameters_changed_event . fire ( profile_index , frame_parameters )
Set the frame parameters with the settings index and fire the frame parameters changed event .
52,537
def euler ( dfun , xzero , timerange , timestep ) : return zip ( * list ( Euler ( dfun , xzero , timerange , timestep ) ) )
Euler method integration . This function wraps the Euler class .
52,538
def verlet ( dfun , xzero , vzero , timerange , timestep ) : return zip ( * list ( Verlet ( dfun , xzero , vzero , timerange , timestep ) ) )
Verlet method integration . This function wraps the Verlet class .
52,539
def backwardeuler ( dfun , xzero , timerange , timestep ) : return zip ( * list ( BackwardEuler ( dfun , xzero , timerange , timestep ) ) )
Backward Euler method integration . This function wraps BackwardEuler .
52,540
def add_request ( self , request ) : if request . name in self . requests : raise ValueError ( 'redefinition of request "{0}"' . format ( request . name ) ) self . requests [ request . name ] = request for method , url in request . urls : if RE_URL_ARG . search ( url ) : request_regex = '^' + RE_URL_ARG_ESC . sub ( r'/(?P<\1>[^/]+)' , re . escape ( url ) ) + '$' self . __request_regex . append ( ( method , re . compile ( request_regex ) , request ) ) else : request_key = ( method , url ) if request_key in self . __request_urls : raise ValueError ( 'redefinition of request URL "{0}"' . format ( url ) ) self . __request_urls [ request_key ] = request
Add a request object
52,541
def add_header ( self , key , value ) : assert isinstance ( key , str ) , 'header key must be of type str' assert isinstance ( value , str ) , 'header value must be of type str' self . headers [ key ] = value
Add a response header
52,542
def response ( self , status , content_type , content , headers = None ) : assert not isinstance ( content , ( str , bytes ) ) , 'response content cannot be of type str or bytes' response_headers = [ ( 'Content-Type' , content_type ) ] if headers : response_headers . extend ( headers ) self . start_response ( status , response_headers ) return content
Send an HTTP response
52,543
def response_text ( self , status , text = None , content_type = 'text/plain' , encoding = 'utf-8' , headers = None ) : if text is None : if isinstance ( status , str ) : text = status else : text = status . phrase return self . response ( status , content_type , [ text . encode ( encoding ) ] , headers = headers )
Send a plain - text response
52,544
def response_json ( self , status , response , content_type = 'application/json' , encoding = 'utf-8' , headers = None , jsonp = None ) : encoder = JSONEncoder ( check_circular = self . app . validate_output , allow_nan = False , sort_keys = True , indent = 2 if self . app . pretty_output else None , separators = ( ',' , ': ' ) if self . app . pretty_output else ( ',' , ':' ) ) content = encoder . encode ( response ) if jsonp : content_list = [ jsonp . encode ( encoding ) , b'(' , content . encode ( encoding ) , b');' ] else : content_list = [ content . encode ( encoding ) ] return self . response ( status , content_type , content_list , headers = headers )
Send a JSON response
52,545
def reconstruct_url ( self , path_info = None , query_string = None , relative = False ) : environ = self . environ if relative : url = '' else : url = environ [ 'wsgi.url_scheme' ] + '://' if environ . get ( 'HTTP_HOST' ) : url += environ [ 'HTTP_HOST' ] else : url += environ [ 'SERVER_NAME' ] if environ [ 'wsgi.url_scheme' ] == 'https' : if environ [ 'SERVER_PORT' ] != '443' : url += ':' + environ [ 'SERVER_PORT' ] else : if environ [ 'SERVER_PORT' ] != '80' : url += ':' + environ [ 'SERVER_PORT' ] url += quote ( environ . get ( 'SCRIPT_NAME' , '' ) ) if path_info is None : url += quote ( environ . get ( 'PATH_INFO' , '' ) ) else : url += path_info if query_string is None : if environ . get ( 'QUERY_STRING' ) : url += '?' + environ [ 'QUERY_STRING' ] else : if query_string : if isinstance ( query_string , str ) : url += '?' + query_string else : url += '?' + encode_query_string ( query_string ) return url
Reconstructs the request URL using the algorithm provided by PEP3333
52,546
def read_envvar_file ( name , extension ) : envvar_file = environ . get ( '{}_config_file' . format ( name ) . upper ( ) ) if envvar_file : return loadf ( envvar_file ) else : return NotConfigured
Read values from a file provided as a environment variable NAME_CONFIG_FILE .
52,547
def loaders ( * specifiers ) : for specifier in specifiers : if isinstance ( specifier , Locality ) : yield from _LOADERS [ specifier ] else : yield specifier
Generates loaders in the specified order .
52,548
def load ( * fps , missing = Missing . silent ) : return Configuration ( * ( yaml . safe_load ( fp . read ( ) ) for fp in fps ) , missing = missing )
Read a . Configuration instance from file - like objects .
52,549
def loadf ( * fnames , default = _NoDefault , missing = Missing . silent ) : def readf ( fname ) : if default is _NoDefault or path . exists ( fname ) : with open ( fname , 'r' ) as fp : return yaml . safe_load ( fp . read ( ) ) or { } else : return default return Configuration ( * ( readf ( path . expanduser ( fname ) ) for fname in fnames ) , missing = missing )
Read a . Configuration instance from named files .
52,550
def loads ( * strings , missing = Missing . silent ) : return Configuration ( * ( yaml . safe_load ( string ) for string in strings ) , missing = missing )
Read a . Configuration instance from strings .
52,551
def load_name ( * names , load_order = DEFAULT_LOAD_ORDER , extension = 'yaml' , missing = Missing . silent ) : def generate_sources ( ) : for source , name in product ( load_order , names ) : if callable ( source ) : yield source ( name , extension ) else : candidate = path . expanduser ( source . format ( name = name , extension = extension ) ) yield loadf ( candidate , default = NotConfigured ) return Configuration ( * generate_sources ( ) , missing = missing )
Read a . Configuration instance by name trying to read from files in increasing significance . The default load order is . system . user . application . environment .
52,552
def acquire ( self , block = True ) : while True : if self . redis . set ( self . name , self . value , px = self . timeout , nx = True ) : return True if not block : return False time . sleep ( self . sleep )
Acquire lock . Blocks until acquired if block is True otherwise returns False if the lock could not be acquired .
52,553
def run ( self ) : log . info ( "Waiting for lock, currently held by %s" , self . lock . who ( ) ) if self . lock . acquire ( ) : log . info ( "Lock '%s' acquired" , self . lockname ) while True : if self . process is None : self . process = self . spawn ( self . command ) log . info ( "Spawned PID %d" , self . process . pid ) child_status = self . process . poll ( ) if child_status is not None : log . error ( "Child died with exit code %d" , child_status ) sys . exit ( 1 ) if not self . lock . refresh ( ) : who = self . lock . who ( ) if who is None : if self . lock . acquire ( block = False ) : log . warning ( "Lock refresh failed, but successfully re-acquired unclaimed lock" ) else : log . error ( "Lock refresh and subsequent re-acquire failed, giving up (Lock now held by %s)" , self . lock . who ( ) ) self . cleanup ( ) sys . exit ( os . EX_UNAVAILABLE ) else : log . error ( "Lock refresh failed, %s stole it - bailing out" , self . lock . who ( ) ) self . cleanup ( ) sys . exit ( os . EX_UNAVAILABLE ) time . sleep ( self . sleep )
Run process if nobody else is otherwise wait until we re needed . Never returns .
52,554
def spawn ( self , command ) : if self . shell : args = command else : args = shlex . split ( command ) return subprocess . Popen ( args , shell = self . shell )
Spawn process .
52,555
def cleanup ( self ) : if self . process is None : return if self . process . poll ( ) is None : log . info ( "Sending TERM to %d" , self . process . pid ) self . process . terminate ( ) start = time . clock ( ) while time . clock ( ) - start < 1.0 : time . sleep ( 0.05 ) if self . process . poll ( ) is not None : break else : log . info ( "Sending KILL to %d" , self . process . pid ) self . process . kill ( ) assert self . process . poll ( ) is not None
Clean up making sure the process is stopped before we pack up and go home .
52,556
def handle_signal ( self , sig , frame ) : if sig in [ signal . SIGINT ] : log . warning ( "Ctrl-C pressed, shutting down..." ) if sig in [ signal . SIGTERM ] : log . warning ( "SIGTERM received, shutting down..." ) self . cleanup ( ) sys . exit ( - sig )
Handles signals surprisingly .
52,557
def validate ( data , schema , defined_keys = False ) : if isinstance ( data , dict ) : validator = Validator ( data , schema , defined_keys = defined_keys ) validator . validate ( ) else : raise TypeError ( 'expected data to be of type dict, but got: %s' % type ( data ) )
Main entry point for the validation engine .
52,558
def strfdelta ( tdelta , fmt ) : substitutes = dict ( ) hours , rem = divmod ( tdelta . total_seconds ( ) , 3600 ) minutes , seconds = divmod ( rem , 60 ) substitutes [ "H" ] = '{:02d}' . format ( int ( hours ) ) substitutes [ "M" ] = '{:02d}' . format ( int ( minutes ) ) substitutes [ "S" ] = '{:02d}' . format ( int ( seconds ) ) return DeltaTemplate ( fmt ) . substitute ( ** substitutes )
Used to format datetime . timedelta objects . Works just like strftime
52,559
def get_user_information ( ) : try : import pwd _username = pwd . getpwuid ( os . getuid ( ) ) [ 0 ] _userid = os . getuid ( ) _uname = os . uname ( ) [ 1 ] except ImportError : import getpass _username = getpass . getuser ( ) _userid = 0 import platform _uname = platform . node ( ) return _username , _userid , _uname
Returns the user s information
52,560
def list_container_groups ( self , resource_group_name ) : print ( "Listing container groups in resource group '{0}'..." . format ( resource_group_name ) ) container_groups = self . client . container_groups . list_by_resource_group ( resource_group_name ) for container_group in container_groups : print ( " {0}" . format ( container_group . name ) )
Lists the container groups in the specified resource group .
52,561
def cauldron_extras ( self ) : for extra in super ( Dimension , self ) . cauldron_extras : yield extra if self . formatters : prop = self . id + '_raw' else : prop = self . id_prop yield self . id + '_id' , lambda row : getattr ( row , prop )
Yield extra tuples containing a field name and a callable that takes a row
52,562
def make_column_suffixes ( self ) : if self . column_suffixes : return self . column_suffixes if len ( self . columns ) == 0 : return ( ) elif len ( self . columns ) == 1 : if self . formatters : return '_raw' , else : return '' , elif len ( self . columns ) == 2 : if self . formatters : return '_id' , '_raw' , else : return '_id' , '' , else : raise BadIngredient ( 'column_suffixes must be supplied if there is ' 'more than one column' )
Make sure we have the right column suffixes . These will be appended to id when generating the query .
52,563
def parse_condition ( cond , selectable , aggregated = False , default_aggregation = 'sum' ) : if cond is None : return None else : if 'and' in cond : conditions = [ parse_condition ( c , selectable , aggregated , default_aggregation ) for c in cond [ 'and' ] ] return and_ ( * conditions ) elif 'or' in cond : conditions = [ parse_condition ( c , selectable , aggregated , default_aggregation ) for c in cond [ 'or' ] ] return or_ ( * conditions ) elif 'field' not in cond : raise BadIngredient ( 'field must be defined in condition' ) field = parse_field ( cond [ 'field' ] , selectable , aggregated = aggregated , default_aggregation = default_aggregation ) if 'in' in cond : value = cond [ 'in' ] if isinstance ( value , dict ) : raise BadIngredient ( 'value for in must be a list' ) condition_expression = getattr ( field , 'in_' ) ( tuple ( value ) ) elif 'gt' in cond : value = cond [ 'gt' ] if isinstance ( value , ( list , dict ) ) : raise BadIngredient ( 'conditional value must be a scalar' ) condition_expression = getattr ( field , '__gt__' ) ( value ) elif 'gte' in cond : value = cond [ 'gte' ] if isinstance ( value , ( list , dict ) ) : raise BadIngredient ( 'conditional value must be a scalar' ) condition_expression = getattr ( field , '__ge__' ) ( value ) elif 'lt' in cond : value = cond [ 'lt' ] if isinstance ( value , ( list , dict ) ) : raise BadIngredient ( 'conditional value must be a scalar' ) condition_expression = getattr ( field , '__lt__' ) ( value ) elif 'lte' in cond : value = cond [ 'lte' ] if isinstance ( value , ( list , dict ) ) : raise BadIngredient ( 'conditional value must be a scalar' ) condition_expression = getattr ( field , '__le__' ) ( value ) elif 'eq' in cond : value = cond [ 'eq' ] if isinstance ( value , ( list , dict ) ) : raise BadIngredient ( 'conditional value must be a scalar' ) condition_expression = getattr ( field , '__eq__' ) ( value ) elif 'ne' in cond : value = cond [ 'ne' ] if isinstance ( value , ( list , dict ) ) : raise BadIngredient ( 'conditional value must be a scalar' ) condition_expression = getattr ( field , '__ne__' ) ( value ) else : raise BadIngredient ( 'Bad condition' ) return condition_expression
Create a SQLAlchemy clause from a condition .
52,564
def tokenize ( s ) : s = s . replace ( '+' , ' PLUS ' ) . replace ( '-' , ' MINUS ' ) . replace ( '/' , ' DIVIDE ' ) . replace ( '*' , ' MULTIPLY ' ) words = [ w for w in s . split ( ' ' ) if w ] return words
Tokenize a string by splitting it by + and -
52,565
def _find_in_columncollection ( columns , name ) : for col in columns : if col . name == name or getattr ( col , '_label' , None ) == name : return col return None
Find a column in a column collection by name or _label
52,566
def find_column ( selectable , name ) : from recipe import Recipe if isinstance ( selectable , Recipe ) : selectable = selectable . subquery ( ) if isinstance ( selectable , DeclarativeMeta ) : col = getattr ( selectable , name , None ) if col is not None : return col col = _find_in_columncollection ( selectable . __table__ . columns , name ) if col is not None : return col elif hasattr ( selectable , 'c' ) and isinstance ( selectable . c , ImmutableColumnCollection ) : col = getattr ( selectable . c , name , None ) if col is not None : return col col = _find_in_columncollection ( selectable . c , name ) if col is not None : return col raise BadIngredient ( 'Can not find {} in {}' . format ( name , selectable ) )
Find a column named name in selectable
52,567
def parse_validated_field ( fld , selectable ) : aggr_fn = IngredientValidator . aggregation_lookup [ fld [ 'aggregation' ] ] field = find_column ( selectable , fld [ 'value' ] ) for operator in fld . get ( 'operators' , [ ] ) : op = operator [ 'operator' ] other_field = parse_validated_field ( operator [ 'field' ] , selectable ) field = IngredientValidator . operator_lookup [ op ] ( field ) ( other_field ) condition = fld . get ( 'condition' , None ) if condition : condition = parse_condition ( condition , selectable ) field = case ( [ ( condition , field ) ] ) field = aggr_fn ( field ) return field
Converts a validated field to sqlalchemy . Field references are looked up in selectable
52,568
def AutomaticShelf ( table ) : if hasattr ( table , '__table__' ) : table = table . __table__ config = introspect_table ( table ) return Shelf . from_config ( config , table )
Given a SQLAlchemy Table automatically generate a Shelf with metrics and dimensions based on its schema .
52,569
def introspect_table ( table ) : d = { } for c in table . columns : if isinstance ( c . type , String ) : d [ c . name ] = { 'kind' : 'Dimension' , 'field' : c . name } if isinstance ( c . type , ( Integer , Float ) ) : d [ c . name ] = { 'kind' : 'Metric' , 'field' : c . name } return d
Given a SQLAlchemy Table object return a Shelf description suitable for passing to Shelf . from_config .
52,570
def pop ( self , k , d = _POP_DEFAULT ) : if d is _POP_DEFAULT : return self . _ingredients . pop ( k ) else : return self . _ingredients . pop ( k , d )
Pop an ingredient off of this shelf .
52,571
def dimension_ids ( self ) : return self . _sorted_ingredients ( [ d . id for d in self . values ( ) if isinstance ( d , Dimension ) ] )
Return the Dimensions on this shelf in the order in which they were used .
52,572
def from_config ( cls , obj , selectable , ingredient_constructor = ingredient_from_validated_dict , metadata = None ) : from recipe import Recipe if isinstance ( selectable , Recipe ) : selectable = selectable . subquery ( ) elif isinstance ( selectable , basestring ) : if '.' in selectable : schema , tablename = selectable . split ( '.' ) else : schema , tablename = None , selectable selectable = Table ( tablename , metadata , schema = schema , extend_existing = True , autoload = True ) d = { } for k , v in iteritems ( obj ) : d [ k ] = ingredient_constructor ( v , selectable ) shelf = cls ( d , select_from = selectable ) return shelf
Create a shelf using a dict shelf definition .
52,573
def find ( self , obj , filter_to_class = Ingredient , constructor = None ) : if callable ( constructor ) : obj = constructor ( obj , shelf = self ) if isinstance ( obj , basestring ) : set_descending = obj . startswith ( '-' ) if set_descending : obj = obj [ 1 : ] if obj not in self : raise BadRecipe ( "{} doesn't exist on the shelf" . format ( obj ) ) ingredient = self [ obj ] if not isinstance ( ingredient , filter_to_class ) : raise BadRecipe ( '{} is not a {}' . format ( obj , filter_to_class ) ) if set_descending : ingredient . ordering = 'desc' return ingredient elif isinstance ( obj , filter_to_class ) : return obj else : raise BadRecipe ( '{} is not a {}' . format ( obj , filter_to_class ) )
Find an Ingredient optionally using the shelf .
52,574
def brew_query_parts ( self ) : columns , group_bys , filters , havings = [ ] , [ ] , set ( ) , set ( ) for ingredient in self . ingredients ( ) : if ingredient . query_columns : columns . extend ( ingredient . query_columns ) if ingredient . group_by : group_bys . extend ( ingredient . group_by ) if ingredient . filters : filters . update ( ingredient . filters ) if ingredient . havings : havings . update ( ingredient . havings ) return { 'columns' : columns , 'group_bys' : group_bys , 'filters' : filters , 'havings' : havings , }
Make columns group_bys filters havings
52,575
def enchant ( self , list , cache_context = None ) : enchantedlist = [ ] if list : sample_item = list [ 0 ] extra_fields , extra_callables = [ ] , [ ] for ingredient in self . values ( ) : if not isinstance ( ingredient , ( Dimension , Metric ) ) : continue if cache_context : ingredient . cache_context += str ( cache_context ) for extra_field , extra_callable in ingredient . cauldron_extras : extra_fields . append ( extra_field ) extra_callables . append ( extra_callable ) keyed_tuple = lightweight_named_tuple ( 'result' , sample_item . _fields + tuple ( extra_fields ) ) for row in list : values = row + tuple ( fn ( row ) for fn in extra_callables ) enchantedlist . append ( keyed_tuple ( values ) ) return enchantedlist
Add any calculated values to each row of a resultset generating a new namedtuple
52,576
def modify_postquery_parts ( self , postquery_parts ) : if self . _summarize_over is None : return postquery_parts assert self . _summarize_over in self . recipe . dimension_ids subq = postquery_parts [ 'query' ] . subquery ( name = 'summarize' ) summarize_over_dim = set ( ( self . _summarize_over , self . _summarize_over + '_id' , self . _summarize_over + '_raw' ) ) dim_column_names = set ( dim for dim in self . recipe . dimension_ids ) . union ( set ( dim + '_id' for dim in self . recipe . dimension_ids ) ) . union ( set ( dim + '_raw' for dim in self . recipe . dimension_ids ) ) used_dim_column_names = dim_column_names - summarize_over_dim group_by_columns = [ col for col in subq . c if col . name in used_dim_column_names ] metric_columns = [ ] for col in subq . c : if col . name not in dim_column_names : met = self . recipe . _cauldron . find ( col . name , Metric ) summary_aggregation = met . meta . get ( 'summary_aggregation' , None ) if summary_aggregation is None : if str ( met . expression ) . startswith ( u'avg' ) : summary_aggregation = func . avg elif str ( met . expression ) . startswith ( u'count' ) : summary_aggregation = func . sum elif str ( met . expression ) . startswith ( u'sum' ) : summary_aggregation = func . sum if summary_aggregation is None : raise BadRecipe ( u'Provide a summary_aggregation for metric' u' {}' . format ( col . name ) ) metric_columns . append ( summary_aggregation ( col ) . label ( col . name ) ) order_by_columns = [ ] for col in postquery_parts [ 'query' ] . _order_by : subq_col = getattr ( subq . c , col . name , getattr ( subq . c , col . name + '_raw' , None ) ) if subq_col is not None : order_by_columns . append ( subq_col ) postquery_parts [ 'query' ] = self . recipe . _session . query ( * ( group_by_columns + metric_columns ) ) . group_by ( * group_by_columns ) . order_by ( * order_by_columns ) self . recipe . _cauldron . pop ( self . _summarize_over , None ) return postquery_parts
Take a recipe that has dimensions Resummarize it over one of the dimensions returning averages of the metrics .
52,577
def anonymize ( self , value ) : assert isinstance ( value , bool ) if self . _anonymize != value : self . dirty = True self . _anonymize = value return self . recipe
Should this recipe be anonymized
52,578
def add_ingredients ( self ) : for ingredient in self . recipe . _cauldron . values ( ) : if hasattr ( ingredient . meta , 'anonymizer' ) : anonymizer = ingredient . meta . anonymizer if isinstance ( anonymizer , basestring ) : kwargs = { } anonymizer_locale = getattr ( ingredient . meta , 'anonymizer_locale' , None ) anonymizer_postprocessor = getattr ( ingredient . meta , 'anonymizer_postprocessor' , None ) if anonymizer_postprocessor is not None : kwargs [ 'postprocessor' ] = anonymizer_postprocessor if anonymizer_locale is not None : kwargs [ 'locale' ] = anonymizer_locale anonymizer = FakerAnonymizer ( anonymizer , ** kwargs ) ingredient . formatters = [ f for f in ingredient . formatters if not isinstance ( f , FakerAnonymizer ) ] if self . _anonymize : if ingredient . meta . anonymizer not in ingredient . formatters : ingredient . formatters . append ( anonymizer ) else : if ingredient . meta . anonymizer in ingredient . formatters : ingredient . formatters . remove ( anonymizer )
Put the anonymizers in the last position of formatters
52,579
def blend ( self , blend_recipe , join_base , join_blend ) : assert isinstance ( blend_recipe , Recipe ) self . blend_recipes . append ( blend_recipe ) self . blend_types . append ( 'inner' ) self . blend_criteria . append ( ( join_base , join_blend ) ) self . dirty = True return self . recipe
Blend a recipe into the base recipe . This performs an inner join of the blend_recipe to the base recipe s SQL .
52,580
def compare ( self , compare_recipe , suffix = '_compare' ) : assert isinstance ( compare_recipe , Recipe ) assert isinstance ( suffix , basestring ) self . compare_recipe . append ( compare_recipe ) self . suffix . append ( suffix ) self . dirty = True return self . recipe
Adds a comparison recipe to a base recipe .
52,581
def prettyprintable_sql ( statement , dialect = None , reindent = True ) : if isinstance ( statement , sqlalchemy . orm . Query ) : if dialect is None : dialect = statement . session . get_bind ( ) . dialect statement = statement . statement if dialect : DialectKlass = dialect . __class__ else : DialectKlass = DefaultDialect class LiteralDialect ( DialectKlass ) : colspecs = { String : StringLiteral , DateTime : StringLiteral , Date : StringLiteral , NullType : StringLiteral , } compiled = statement . compile ( dialect = LiteralDialect ( ) , compile_kwargs = { 'literal_binds' : True } ) return sqlparse . format ( str ( compiled ) , reindent = reindent )
Generate an SQL expression string with bound parameters rendered inline for the given SQLAlchemy statement . The function can also receive a sqlalchemy . orm . Query object instead of statement .
52,582
def _normalize_coerce_to_format_with_lookup ( self , v ) : try : return self . format_lookup . get ( v , v ) except TypeError : return v
Replace a format with a default
52,583
def _validate_type_scalar ( self , value ) : if isinstance ( value , _int_types + ( _str_type , float , date , datetime , bool ) ) : return True
Is not a list or a dict
52,584
def from_config ( cls , shelf , obj , ** kwargs ) : def subdict ( d , keys ) : new = { } for k in keys : if k in d : new [ k ] = d [ k ] return new core_kwargs = subdict ( obj , recipe_schema [ 'schema' ] . keys ( ) ) core_kwargs = normalize_schema ( recipe_schema , core_kwargs ) core_kwargs [ 'filters' ] = [ parse_condition ( filter , shelf . Meta . select_from ) if isinstance ( filter , dict ) else filter for filter in obj . get ( 'filters' , [ ] ) ] core_kwargs . update ( kwargs ) recipe = cls ( shelf = shelf , ** core_kwargs ) for ext in recipe . recipe_extensions : additional_schema = getattr ( ext , 'recipe_schema' , None ) if additional_schema is not None : ext_data = subdict ( obj , additional_schema . keys ( ) ) ext_data = normalize_dict ( additional_schema , ext_data ) recipe = ext . from_config ( ext_data ) return recipe
Construct a Recipe from a plain Python dictionary .
52,585
def shelf ( self , shelf = None ) : if shelf is None : self . _shelf = Shelf ( { } ) elif isinstance ( shelf , Shelf ) : self . _shelf = shelf elif isinstance ( shelf , dict ) : self . _shelf = Shelf ( shelf ) else : raise BadRecipe ( 'shelf must be a dict or recipe.shelf.Shelf' ) if self . _select_from is None and self . _shelf . Meta . select_from is not None : self . _select_from = self . _shelf . Meta . select_from return self
Defines a shelf to use for this recipe
52,586
def metrics ( self , * metrics ) : for m in metrics : self . _cauldron . use ( self . _shelf . find ( m , Metric ) ) self . dirty = True return self
Add a list of Metric ingredients to the query . These can either be Metric objects or strings representing metrics on the shelf .
52,587
def dimensions ( self , * dimensions ) : for d in dimensions : self . _cauldron . use ( self . _shelf . find ( d , Dimension ) ) self . dirty = True return self
Add a list of Dimension ingredients to the query . These can either be Dimension objects or strings representing dimensions on the shelf .
52,588
def order_by ( self , * order_bys ) : self . _order_bys = [ ] for ingr in order_bys : order_by = self . _shelf . find ( ingr , ( Dimension , Metric ) ) self . _order_bys . append ( order_by ) self . dirty = True return self
Add a list of ingredients to order by to the query . These can either be Dimension or Metric objects or strings representing order_bys on the shelf .
52,589
def limit ( self , limit ) : if self . _limit != limit : self . dirty = True self . _limit = limit return self
Limit the number of rows returned from the database .
52,590
def offset ( self , offset ) : if self . _offset != offset : self . dirty = True self . _offset = offset return self
Offset a number of rows before returning rows from the database .
52,591
def _is_postgres ( self ) : if self . _is_postgres_engine is None : is_postgres_engine = False try : dialect = self . session . bind . engine . name if 'redshift' in dialect or 'postg' in dialect or 'pg' in dialect : is_postgres_engine = True except : pass self . _is_postgres_engine = is_postgres_engine return self . _is_postgres_engine
Determine if the running engine is postgres
52,592
def _prepare_order_bys ( self ) : order_bys = OrderedSet ( ) if self . _order_bys : for ingredient in self . _order_bys : if isinstance ( ingredient , Dimension ) : columns = reversed ( ingredient . columns ) else : columns = ingredient . columns for c in columns : order_by = c . desc ( ) if ingredient . ordering == 'desc' else c if str ( order_by ) not in [ str ( o ) for o in order_bys ] : order_bys . add ( order_by ) return list ( order_bys )
Build a set of order by columns
52,593
def query ( self ) : if len ( self . _cauldron . ingredients ( ) ) == 0 : raise BadRecipe ( 'No ingredients have been added to this recipe' ) if not self . dirty and self . _query : return self . _query for extension in self . recipe_extensions : extension . add_ingredients ( ) recipe_parts = self . _cauldron . brew_query_parts ( ) recipe_parts [ 'order_bys' ] = self . _prepare_order_bys ( ) for extension in self . recipe_extensions : recipe_parts = extension . modify_recipe_parts ( recipe_parts ) query = self . _session . query ( * recipe_parts [ 'columns' ] ) if self . _select_from is not None : query = query . select_from ( self . _select_from ) recipe_parts [ 'query' ] = query . group_by ( * recipe_parts [ 'group_bys' ] ) . order_by ( * recipe_parts [ 'order_bys' ] ) . filter ( * recipe_parts [ 'filters' ] ) if recipe_parts [ 'havings' ] : for having in recipe_parts [ 'havings' ] : recipe_parts [ 'query' ] = recipe_parts [ 'query' ] . having ( having ) for extension in self . recipe_extensions : recipe_parts = extension . modify_prequery_parts ( recipe_parts ) if self . _select_from is None and len ( recipe_parts [ 'query' ] . selectable . froms ) != 1 : raise BadRecipe ( 'Recipes must use ingredients that all come from ' 'the same table. \nDetails on this recipe:\n{' '}' . format ( str ( self . _cauldron ) ) ) for extension in self . recipe_extensions : recipe_parts = extension . modify_postquery_parts ( recipe_parts ) recipe_parts = run_hooks ( recipe_parts , 'modify_query' , self . dynamic_extensions ) if self . _limit and self . _limit > 0 : recipe_parts [ 'query' ] = recipe_parts [ 'query' ] . limit ( self . _limit ) if self . _offset and self . _offset > 0 : recipe_parts [ 'query' ] = recipe_parts [ 'query' ] . offset ( self . _offset ) self . _query = recipe_parts [ 'query' ] self . dirty = False return self . _query
Generates a query using the ingredients supplied by the recipe .
52,594
def dirty ( self ) : if self . _dirty : return True else : for extension in self . recipe_extensions : if extension . dirty : return True return False
The recipe is dirty if it is flagged dirty or any extensions are flagged dirty
52,595
def dirty ( self , value ) : if value : self . _dirty = True else : self . _dirty = False for extension in self . recipe_extensions : extension . dirty = False
If dirty is true set the recipe to dirty flag . If false clear the recipe and all extension dirty flags
52,596
def subquery ( self , name = None ) : query = self . query ( ) return query . subquery ( name = name )
The recipe s query as a subquery suitable for use in joins or other queries .
52,597
def as_table ( self , name = None ) : if name is None : name = self . _id return alias ( self . subquery ( ) , name = name )
Return an alias to a table
52,598
def _validate_condition_keys ( self , field , value , error ) : if 'field' in value : operators = self . nonscalar_conditions + self . scalar_conditions matches = sum ( 1 for k in operators if k in value ) if matches == 0 : error ( field , 'Must contain one of {}' . format ( operators ) ) return False elif matches > 1 : error ( field , 'Must contain no more than one of {}' . format ( operators ) ) return False return True elif 'and' in value : for condition in value [ 'and' ] : self . _validate_condition_keys ( field , condition , error ) elif 'or' in value : for condition in value [ 'or' ] : self . _validate_condition_keys ( field , condition , error ) else : error ( field , "Must contain field + operator keys, 'and', or 'or'." ) return False
Validates that all of the keys in one of the sets of keys are defined as keys of value .
52,599
def survival ( self , parents , offspring , elite = None , elite_index = None , X = None , X_O = None , F = None , F_O = None ) : if self . sel == 'tournament' : survivors , survivor_index = self . tournament ( parents + offspring , self . tourn_size , num_selections = len ( parents ) ) elif self . sel == 'lexicase' : survivor_index = self . lexicase ( np . vstack ( ( F , F_O ) ) , num_selections = len ( parents ) , survival = True ) survivors = [ ( parents + offspring ) [ s ] for s in survivor_index ] elif self . sel == 'epsilon_lexicase' : if self . lex_size : sizes = [ len ( i . stack ) for i in ( parents + offspring ) ] survivor_index = self . epsilon_lexicase ( np . vstack ( ( F , F_O ) ) , sizes , num_selections = F . shape [ 0 ] , survival = True ) survivors = [ ( parents + offspring ) [ s ] for s in survivor_index ] else : survivor_index = self . epsilon_lexicase ( np . vstack ( ( F , F_O ) ) , [ ] , num_selections = F . shape [ 0 ] , survival = True ) survivors = [ ( parents + offspring ) [ s ] for s in survivor_index ] elif self . sel == 'deterministic_crowding' : survivors , survivor_index = self . deterministic_crowding ( parents , offspring , X , X_O ) elif self . sel == 'random' : survivor_index = self . random_state . permutation ( np . arange ( 2 * len ( parents ) ) ) [ : len ( parents ) ] survivors = [ ( parents + offspring ) [ s ] for s in survivor_index ] if self . elitism : if min ( [ x . fitness for x in survivors ] ) > elite . fitness : rep_index = np . argmax ( [ x . fitness for x in survivors ] ) survivors [ rep_index ] = elite survivor_index [ rep_index ] = elite_index return survivors , survivor_index
routes to the survival method returns survivors