idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
8,400
def assert_tz_offset ( tz ) : tz_offset = get_tz_offset ( tz ) system_offset = get_system_offset ( ) if tz_offset != system_offset : msg = ( 'Timezone offset does not match system offset: {0} != {1}. ' 'Please, check your config files.' ) . format ( tz_offset , system_offset ) raise ValueError ( msg )
Assert that system s timezone offset equals to the timezone offset found .
8,401
def resources ( self ) : if self . _resources_objects is not None : return self . _resources_objects resources = self . parent . parent . resources resources = [ resources [ name ] for name in self . _resources ] self . _resources_objects = resources return resources
Return all the resources which this function refer to .
8,402
def update ( self ) : if self . _owner_changed : self . update_owner ( self . owner ) self . _resources = [ res . name for res in self . resources ] return self . parent . update ( self )
Update this function .
8,403
def table_creator ( func ) : def method ( self , table_name , ** kwargs ) : if self . odps . exist_table ( table_name ) : return if kwargs . get ( 'project' , self . odps . project ) != self . odps . project : tunnel = TableTunnel ( self . odps , project = kwargs [ 'project' ] ) else : tunnel = self . tunnel func ( self . odps , table_name , tunnel = tunnel , ** kwargs ) self . after_create_test_data ( table_name ) method . __name__ = func . __name__ setattr ( TestDataMixIn , func . __name__ , method ) return func
Decorator for table creating method
8,404
def confusion_matrix ( df , col_true = None , col_pred = None ) : if not col_pred : col_pred = get_field_name_by_role ( df , FieldRole . PREDICTED_CLASS ) return _run_cm_node ( df , col_true , col_pred ) [ 0 ]
Compute confusion matrix of a predicted DataFrame .
8,405
def accuracy_score ( df , col_true = None , col_pred = None , normalize = True ) : if not col_pred : col_pred = get_field_name_by_role ( df , FieldRole . PREDICTED_CLASS ) mat , _ = _run_cm_node ( df , col_true , col_pred ) if np is not None : acc_count = np . sum ( np . diag ( mat ) ) if not normalize : return acc_count else : return acc_count * 1.0 / np . sum ( mat ) else : diag_sum = mat_sum = 0 mat_size = len ( mat ) for i in compat . irange ( mat_size ) : for j in compat . irange ( mat_size ) : if i == j : diag_sum += mat [ i ] [ j ] mat_sum += mat [ i ] [ j ] if not normalize : return diag_sum else : return diag_sum * 1.0 / mat_sum
Compute accuracy of a predicted DataFrame .
8,406
def fbeta_score ( df , col_true = None , col_pred = 'precision_result' , beta = 1.0 , pos_label = 1 , average = None ) : r if not col_pred : col_pred = get_field_name_by_role ( df , FieldRole . PREDICTED_CLASS ) mat , label_list = _run_cm_node ( df , col_true , col_pred ) class_dict = dict ( ( label , idx ) for idx , label in enumerate ( label_list ) ) tps = np . diag ( mat ) pred_count = np . sum ( mat , axis = 0 ) supp_count = np . sum ( mat , axis = 1 ) beta2 = beta ** 2 precision = tps * 1.0 / pred_count recall = tps * 1.0 / supp_count ppr = precision * beta2 + recall ppr [ ppr == 0 ] = 1e-6 fbeta = ( 1 + beta2 ) * precision * recall / ppr if average is None : return fbeta elif average == 'binary' : class_idx = class_dict [ pos_label ] return fbeta [ class_idx ] elif average == 'micro' : g_precision = np . sum ( tps ) * 1.0 / np . sum ( supp_count ) g_recall = np . sum ( tps ) * 1.0 / np . sum ( pred_count ) return ( 1 + beta2 ) * g_precision * g_recall / ( beta2 * g_precision + g_recall ) elif average == 'macro' : return np . mean ( fbeta ) elif average == 'weighted' : return sum ( fbeta * supp_count ) / sum ( supp_count )
r Compute f - beta score of a predicted DataFrame . f - beta is defined as
8,407
def f1_score ( df , col_true = None , col_pred = 'precision_result' , pos_label = 1 , average = None ) : r if not col_pred : col_pred = get_field_name_by_role ( df , FieldRole . PREDICTED_CLASS ) return fbeta_score ( df , col_true , col_pred , pos_label = pos_label , average = average )
r Compute f - 1 score of a predicted DataFrame . f - 1 is defined as
8,408
def average_precision_score ( df , col_true = None , col_pred = None , col_scores = None , pos_label = 1 ) : if not col_pred : col_pred = get_field_name_by_role ( df , FieldRole . PREDICTED_CLASS ) if not col_scores : col_scores = get_field_name_by_role ( df , FieldRole . PREDICTED_SCORE ) thresh , tp , fn , tn , fp = _run_roc_node ( df , pos_label , col_true , col_pred , col_scores ) precisions = np . squeeze ( np . asarray ( tp * 1.0 / ( tp + fp ) ) ) recalls = np . squeeze ( np . asarray ( tp * 1.0 / ( tp + fn ) ) ) return np . trapz ( precisions , recalls )
Compute average precision score i . e . the area under precision - recall curve . Note that this method will trigger the defined flow to execute .
8,409
def _var_uint64_byte_size_no_tag ( uint64 ) : if uint64 > UINT64_MAX : raise errors . EncodeError ( 'Value out of range: %d' % uint64 ) bytes = 1 while uint64 > 0x7f : bytes += 1 uint64 >>= 7 return bytes
Returns the bytes required to serialize a single varint . uint64 must be unsigned .
8,410
def train ( self , * args , ** kwargs ) : objs = self . _do_transform ( * args , ** kwargs ) obj_list = [ objs , ] if not isinstance ( objs , Iterable ) else objs for obj in obj_list : if not isinstance ( obj , ODPSModelExpr ) : continue for meta in [ 'predictor' , 'recommender' ] : if meta not in self . _metas : continue mod = __import__ ( self . __class__ . __module__ . __name__ , fromlist = [ '' ] ) if not hasattr ( self , '_env' ) else self . _env action_cls_name = underline_to_capitalized ( self . _metas [ meta ] ) if not hasattr ( mod , action_cls_name ) : action_cls_name = '_' + action_cls_name setattr ( obj , '_' + meta , mod + '.' + action_cls_name ) return objs
Perform training on a DataFrame . The label field is specified by the label_field method .
8,411
def wait ( fs , timeout = None , return_when = ALL_COMPLETED ) : with _AcquireFutures ( fs ) : done = set ( f for f in fs if f . _state in [ CANCELLED_AND_NOTIFIED , FINISHED ] ) not_done = set ( fs ) - done if ( return_when == FIRST_COMPLETED ) and done : return DoneAndNotDoneFutures ( done , not_done ) elif ( return_when == FIRST_EXCEPTION ) and done : if any ( f for f in done if not f . cancelled ( ) and f . exception ( ) is not None ) : return DoneAndNotDoneFutures ( done , not_done ) if len ( done ) == len ( fs ) : return DoneAndNotDoneFutures ( done , not_done ) waiter = _create_and_install_waiters ( fs , return_when ) waiter . event . wait ( timeout ) for f in fs : with f . _condition : f . _waiters . remove ( waiter ) done . update ( waiter . finished_futures ) return DoneAndNotDoneFutures ( done , set ( fs ) - done )
Wait for the futures in the given sequence to complete .
8,412
def cancel ( self ) : with self . _condition : if self . _state in [ RUNNING , FINISHED ] : return False if self . _state in [ CANCELLED , CANCELLED_AND_NOTIFIED ] : return True self . _state = CANCELLED self . _condition . notify_all ( ) self . _invoke_callbacks ( ) return True
Cancel the future if possible .
8,413
def add_done_callback ( self , fn ) : with self . _condition : if self . _state not in [ CANCELLED , CANCELLED_AND_NOTIFIED , FINISHED ] : self . _done_callbacks . append ( fn ) return fn ( self )
Attaches a callable that will be called when the future finishes .
8,414
def set_exception_info ( self , exception , traceback ) : with self . _condition : self . _exception = exception self . _traceback = traceback self . _state = FINISHED for waiter in self . _waiters : waiter . add_exception ( self ) self . _condition . notify_all ( ) self . _invoke_callbacks ( )
Sets the result of the future as being the given exception and traceback .
8,415
def persist ( self , name , project = None , drop_model = False , ** kwargs ) : return super ( ODPSModelExpr , self ) . persist ( name , project = project , drop_model = drop_model , ** kwargs )
Persist the execution into a new model .
8,416
def quantile ( expr , prob = None , ** kw ) : prob = kw . get ( '_prob' , prob ) output_type = _stats_type ( expr ) if isinstance ( prob , ( list , set ) ) and not isinstance ( expr , GroupBy ) : output_type = types . List ( output_type ) return _reduction ( expr , Quantile , output_type , _prob = prob )
Percentile value .
8,417
def nunique ( expr ) : output_type = types . int64 if isinstance ( expr , SequenceExpr ) : return NUnique ( _value_type = output_type , _inputs = [ expr ] ) elif isinstance ( expr , SequenceGroupBy ) : return GroupedNUnique ( _data_type = output_type , _inputs = [ expr . to_column ( ) ] , _grouped = expr . input ) elif isinstance ( expr , CollectionExpr ) : unique_input = _extract_unique_input ( expr ) if unique_input : return nunique ( unique_input ) else : return NUnique ( _value_type = types . int64 , _inputs = expr . _project_fields ) elif isinstance ( expr , GroupBy ) : if expr . _to_agg : inputs = expr . input [ expr . _to_agg . names ] . _project_fields else : inputs = expr . input . _project_fields return GroupedNUnique ( _data_type = types . int64 , _inputs = inputs , _grouped = expr )
The distinct count .
8,418
def cat ( expr , others = None , sep = None , na_rep = None ) : if others is not None : from . strings import _cat as cat_str return cat_str ( expr , others , sep = sep , na_rep = na_rep ) return _cat ( expr , sep = sep , na_rep = na_rep )
Concatenate strings in sequence with given separator
8,419
def moment ( expr , order , central = False ) : if not isinstance ( order , six . integer_types ) : raise ValueError ( 'Only integer-ordered moments are supported.' ) if order < 0 : raise ValueError ( 'Only non-negative orders are supported.' ) output_type = _stats_type ( expr ) return _reduction ( expr , Moment , output_type , _order = order , _center = central )
Calculate the n - th order moment of the sequence
8,420
def stop ( self ) : instance_status = Instance . InstanceStatus ( status = 'Terminated' ) xml_content = instance_status . serialize ( ) headers = { 'Content-Type' : 'application/xml' } self . _client . put ( self . resource ( ) , xml_content , headers = headers )
Stop this instance .
8,421
def get_task_results ( self ) : results = self . get_task_results_without_format ( ) if options . tunnel . string_as_binary : return compat . OrderedDict ( [ ( k , bytes ( result ) ) for k , result in six . iteritems ( results ) ] ) else : return compat . OrderedDict ( [ ( k , str ( result ) ) for k , result in six . iteritems ( results ) ] )
Get all the task results .
8,422
def get_task_summary ( self , task_name ) : params = { 'instancesummary' : '' , 'taskname' : task_name } resp = self . _client . get ( self . resource ( ) , params = params ) map_reduce = resp . json ( ) . get ( 'Instance' ) if map_reduce : json_summary = map_reduce . get ( 'JsonSummary' ) if json_summary : summary = Instance . TaskSummary ( json . loads ( json_summary ) ) summary . summary_text = map_reduce . get ( 'Summary' ) summary . json_summary = json_summary return summary
Get a task s summary mostly used for MapReduce .
8,423
def get_task_statuses ( self ) : params = { 'taskstatus' : '' } resp = self . _client . get ( self . resource ( ) , params = params ) self . parse ( self . _client , resp , obj = self ) return dict ( [ ( task . name , task ) for task in self . _tasks ] )
Get all tasks statuses
8,424
def get_task_cost ( self , task_name ) : summary = self . get_task_summary ( task_name ) if summary is None : return None if 'Cost' in summary : task_cost = summary [ 'Cost' ] cpu_cost = task_cost . get ( 'CPU' ) memory = task_cost . get ( 'Memory' ) input_size = task_cost . get ( 'Input' ) return Instance . TaskCost ( cpu_cost , memory , input_size )
Get task cost
8,425
def get_task_info ( self , task_name , key ) : params = OrderedDict ( [ ( 'info' , '' ) , ( 'taskname' , task_name ) , ( 'key' , key ) ] ) resp = self . _client . get ( self . resource ( ) , params = params ) return resp . text
Get task related information .
8,426
def put_task_info ( self , task_name , key , value ) : params = OrderedDict ( [ ( 'info' , '' ) , ( 'taskname' , task_name ) ] ) headers = { 'Content-Type' : 'application/xml' } body = self . TaskInfo ( key = key , value = value ) . serialize ( ) self . _client . put ( self . resource ( ) , params = params , headers = headers , data = body )
Put information into a task .
8,427
def get_task_quota ( self , task_name ) : params = OrderedDict ( [ ( 'instancequota' , '' ) , ( 'taskname' , task_name ) ] ) resp = self . _client . get ( self . resource ( ) , params = params ) return json . loads ( resp . text )
Get queueing info of the task . Note that time between two calls should larger than 30 seconds otherwise empty dict is returned .
8,428
def get_sql_task_cost ( self ) : resp = self . get_task_result ( self . get_task_names ( ) [ 0 ] ) cost = json . loads ( resp ) sql_cost = cost [ 'Cost' ] [ 'SQL' ] udf_num = sql_cost . get ( 'UDF' ) complexity = sql_cost . get ( 'Complexity' ) input_size = sql_cost . get ( 'Input' ) return Instance . SQLCost ( udf_num , complexity , input_size )
Get cost information of the sql task . Including input data size number of UDF Complexity of the sql task
8,429
def is_terminated ( self , retry = False ) : retry_num = options . retry_times while retry_num > 0 : try : return self . status == Instance . Status . TERMINATED except ( errors . InternalServerError , errors . RequestTimeTooSkewed ) : retry_num -= 1 if not retry or retry_num <= 0 : raise
If this instance has finished or not .
8,430
def is_successful ( self , retry = False ) : if not self . is_terminated ( retry = retry ) : return False retry_num = options . retry_times while retry_num > 0 : try : statuses = self . get_task_statuses ( ) return all ( task . status == Instance . Task . TaskStatus . SUCCESS for task in statuses . values ( ) ) except ( errors . InternalServerError , errors . RequestTimeTooSkewed ) : retry_num -= 1 if not retry or retry_num <= 0 : raise
If the instance runs successfully .
8,431
def wait_for_completion ( self , interval = 1 ) : while not self . is_terminated ( retry = True ) : try : time . sleep ( interval ) except KeyboardInterrupt : break
Wait for the instance to complete and neglect the consequence .
8,432
def wait_for_success ( self , interval = 1 ) : self . wait_for_completion ( interval = interval ) if not self . is_successful ( retry = True ) : for task_name , task in six . iteritems ( self . get_task_statuses ( ) ) : exc = None if task . status == Instance . Task . TaskStatus . FAILED : exc = errors . parse_instance_error ( self . get_task_result ( task_name ) ) elif task . status != Instance . Task . TaskStatus . SUCCESS : exc = errors . ODPSError ( '%s, status=%s' % ( task_name , task . status . value ) ) if exc : exc . instance_id = self . id raise exc
Wait for instance to complete and check if the instance is successful .
8,433
def get_task_progress ( self , task_name ) : params = { 'instanceprogress' : task_name , 'taskname' : task_name } resp = self . _client . get ( self . resource ( ) , params = params ) return Instance . Task . TaskProgress . parse ( self . _client , resp )
Get task s current progress
8,434
def get_task_detail ( self , task_name ) : def _get_detail ( ) : from . . compat import json params = { 'instancedetail' : '' , 'taskname' : task_name } resp = self . _client . get ( self . resource ( ) , params = params ) return json . loads ( resp . text if six . PY3 else resp . content , object_pairs_hook = OrderedDict ) result = _get_detail ( ) if not result : self . get_task_detail2 ( task_name ) return _get_detail ( ) else : return result
Get task s detail
8,435
def get_task_detail2 ( self , task_name ) : from . . compat import json params = { 'detail' : '' , 'taskname' : task_name } resp = self . _client . get ( self . resource ( ) , params = params ) res = resp . text if six . PY3 else resp . content try : return json . loads ( res , object_pairs_hook = OrderedDict ) except ValueError : return res
Get task s detail v2
8,436
def get_logview_address ( self , hours = None ) : hours = hours or options . log_view_hours project = self . project url = '%s/authorization' % project . resource ( ) policy = { 'expires_in_hours' : hours , 'policy' : { 'Statement' : [ { 'Action' : [ 'odps:Read' ] , 'Effect' : 'Allow' , 'Resource' : 'acs:odps:*:projects/%s/instances/%s' % ( project . name , self . id ) } ] , 'Version' : '1' , } } headers = { 'Content-Type' : 'application/json' } params = { 'sign_bearer_token' : '' } data = json . dumps ( policy ) res = self . _client . post ( url , data , headers = headers , params = params ) content = res . text if six . PY3 else res . content root = ElementTree . fromstring ( content ) token = root . find ( 'Result' ) . text link = options . log_view_host + "/logview/?h=" + self . _client . endpoint + "&p=" + project . name + "&i=" + self . id + "&token=" + token return link
Get logview address of the instance object by hours .
8,437
def open_reader ( self , * args , ** kwargs ) : use_tunnel = kwargs . get ( 'use_tunnel' , kwargs . get ( 'tunnel' ) ) auto_fallback_result = use_tunnel is None if use_tunnel is None : use_tunnel = options . tunnel . use_instance_tunnel result_fallback_errors = ( errors . InvalidProjectTable , errors . InvalidArgument ) if use_tunnel : if 'limit_enabled' in kwargs : kwargs [ 'limit' ] = kwargs [ 'limit_enabled' ] del kwargs [ 'limit_enabled' ] if 'limit' not in kwargs : kwargs [ 'limit' ] = options . tunnel . limit_instance_tunnel auto_fallback_protection = False if kwargs [ 'limit' ] is None : kwargs [ 'limit' ] = False auto_fallback_protection = True try : return self . _open_tunnel_reader ( ** kwargs ) except result_fallback_errors : if not auto_fallback_result : raise if not kwargs . get ( 'limit' ) : warnings . warn ( 'Instance tunnel not supported, will fallback to ' 'conventional ways. 10000 records will be limited.' ) except requests . Timeout : if not auto_fallback_result : raise if not kwargs . get ( 'limit' ) : warnings . warn ( 'Instance tunnel timed out, will fallback to ' 'conventional ways. 10000 records will be limited.' ) except ( Instance . DownloadSessionCreationError , errors . InstanceTypeNotSupported ) : if not auto_fallback_result : raise except errors . NoPermission : if not auto_fallback_protection : raise if not kwargs . get ( 'limit' ) : warnings . warn ( 'Project under protection, 10000 records will be limited.' ) kwargs [ 'limit' ] = True return self . _open_tunnel_reader ( ** kwargs ) return self . _open_result_reader ( * args , ** kwargs )
Open the reader to read records from the result of the instance . If tunnel is True instance tunnel will be used . Otherwise conventional routine will be used . If instance tunnel is not available and tunnel is not specified the method will fall back to the conventional routine . Note that the number of records returned is limited unless options . limited_instance_tunnel is set to True or limit = True is configured under instance tunnel mode . Otherwise the number of records returned is always limited .
8,438
def in_qtconsole ( ) : try : ip = get_ipython ( ) front_end = ( ip . config . get ( 'KernelApp' , { } ) . get ( 'parent_appname' , "" ) or ip . config . get ( 'IPKernelApp' , { } ) . get ( 'parent_appname' , "" ) ) if 'qtconsole' in front_end . lower ( ) : return True except : return False return False
check if we re inside an IPython qtconsole
8,439
def isatty ( file ) : if ( multiprocessing . current_process ( ) . name != 'MainProcess' or threading . current_thread ( ) . getName ( ) != 'MainThread' ) : return False if hasattr ( file , 'isatty' ) : return file . isatty ( ) elif ( OutStream is not None and isinstance ( file , ( OutStream , IPythonIOStream ) ) and ( ( hasattr ( file , 'name' ) and file . name == 'stdout' ) or ( hasattr ( file , 'stream' ) and isinstance ( file . stream , PyreadlineConsole ) ) ) ) : return True return False
Returns True if file is a tty .
8,440
def color_print ( * args , ** kwargs ) : file = kwargs . get ( 'file' , _get_stdout ( ) ) end = kwargs . get ( 'end' , '\n' ) write = file . write if isatty ( file ) and options . console . use_color : for i in range ( 0 , len ( args ) , 2 ) : msg = args [ i ] if i + 1 == len ( args ) : color = '' else : color = args [ i + 1 ] if color : msg = _color_text ( msg , color ) if not six . PY3 and isinstance ( msg , bytes ) : msg = _decode_preferred_encoding ( msg ) write = _write_with_fallback ( msg , write , file ) write ( end ) else : for i in range ( 0 , len ( args ) , 2 ) : msg = args [ i ] if not six . PY3 and isinstance ( msg , bytes ) : msg = _decode_preferred_encoding ( msg ) write ( msg ) write ( end )
Prints colors and styles to the terminal uses ANSI escape sequences .
8,441
def human_time ( seconds ) : units = [ ( 'y' , 60 * 60 * 24 * 7 * 52 ) , ( 'w' , 60 * 60 * 24 * 7 ) , ( 'd' , 60 * 60 * 24 ) , ( 'h' , 60 * 60 ) , ( 'm' , 60 ) , ( 's' , 1 ) , ] seconds = int ( seconds ) if seconds < 60 : return ' {0:2d}s' . format ( seconds ) for i in range ( len ( units ) - 1 ) : unit1 , limit1 = units [ i ] unit2 , limit2 = units [ i + 1 ] if seconds >= limit1 : return '{0:2d}{1}{2:2d}{3}' . format ( seconds // limit1 , unit1 , ( seconds % limit1 ) // limit2 , unit2 ) return ' ~inf'
Returns a human - friendly time string that is always exactly 6 characters long .
8,442
def human_file_size ( size ) : suffixes = ' kMGTPEZY' if size == 0 : num_scale = 0 else : num_scale = int ( math . floor ( math . log ( size ) / math . log ( 1000 ) ) ) num_scale = max ( num_scale , 0 ) if num_scale >= len ( suffixes ) : suffix = '?' else : suffix = suffixes [ num_scale ] num_scale = int ( math . pow ( 1000 , num_scale ) ) value = float ( size ) / num_scale str_value = str ( value ) if suffix == ' ' : if '.' in str_value : str_value = str_value [ : str_value . index ( '.' ) ] elif str_value [ 2 ] == '.' : str_value = str_value [ : 2 ] else : str_value = str_value [ : 3 ] return "{0:>3s}{1}" . format ( str_value , suffix )
Returns a human - friendly string representing a file size that is 2 - 4 characters long .
8,443
def update ( self , value = None ) : if value is None : value = self . _current_value + 1 self . _current_value = value if self . _ipython_widget : try : self . _update_ipython_widget ( value ) except RuntimeError : pass else : self . _update_console ( value )
Update progress bar via the console or notebook accordingly .
8,444
def map ( cls , function , items , multiprocess = False , file = None ) : results = [ ] if file is None : file = _get_stdout ( ) with cls ( len ( items ) , file = file ) as bar : step_size = max ( 200 , bar . _bar_length ) steps = max ( int ( float ( len ( items ) ) / step_size ) , 1 ) if not multiprocess : for i , item in enumerate ( items ) : results . append ( function ( item ) ) if ( i % steps ) == 0 : bar . update ( i ) else : p = multiprocessing . Pool ( ) for i , result in enumerate ( p . imap_unordered ( function , items , steps ) ) : bar . update ( i ) results . append ( result ) p . close ( ) p . join ( ) return results
Does a map operation while displaying a progress bar with percentage complete .
8,445
def hll_count ( expr , error_rate = 0.01 , splitter = None ) : with open ( os . path . join ( path , 'lib' , 'hll.py' ) ) as hll_file : local = { } six . exec_ ( hll_file . read ( ) , local ) HyperLogLog = local [ 'HyperLogLog' ] return expr . agg ( HyperLogLog , rtype = types . int64 , args = ( error_rate , splitter ) )
Calculate HyperLogLog count
8,446
def bloomfilter ( collection , on , column , capacity = 3000 , error_rate = 0.01 ) : if not isinstance ( column , Column ) : raise TypeError ( 'bloomfilter can only filter on the column of a collection' ) with open ( os . path . join ( path , 'lib' , 'bloomfilter.py' ) ) as bloomfilter_file : local = { } six . exec_ ( bloomfilter_file . read ( ) , local ) BloomFilter = local [ 'BloomFilter' ] col_name = column . source_name or column . name on_name = on . name if isinstance ( on , SequenceExpr ) else on rand_name = '%s_%s' % ( on_name , str ( uuid . uuid4 ( ) ) . replace ( '-' , '_' ) ) on_col = collection . _get_field ( on ) . rename ( rand_name ) src_collection = collection collection = collection [ collection , on_col ] @ output ( src_collection . schema . names , src_collection . schema . types ) class Filter ( object ) : def __init__ ( self , resources ) : table = resources [ 0 ] bloom = BloomFilter ( capacity , error_rate ) for row in table : bloom . add ( str ( getattr ( row , col_name ) ) ) self . bloom = bloom def __call__ ( self , row ) : if str ( getattr ( row , rand_name ) ) not in self . bloom : return return row [ : - 1 ] return collection . apply ( Filter , axis = 1 , resources = [ column . input , ] )
Filter collection on the on sequence by BloomFilter built by column
8,447
def cumsum ( expr , sort = None , ascending = True , unique = False , preceding = None , following = None ) : if expr . _data_type == types . boolean : output_type = types . int64 else : output_type = expr . _data_type return _cumulative_op ( expr , CumSum , sort = sort , ascending = ascending , unique = unique , preceding = preceding , following = following , data_type = output_type )
Calculate cumulative summation of a sequence expression .
8,448
def cummax ( expr , sort = None , ascending = True , unique = False , preceding = None , following = None ) : return _cumulative_op ( expr , CumMax , sort = sort , ascending = ascending , unique = unique , preceding = preceding , following = following )
Calculate cumulative maximum of a sequence expression .
8,449
def cummin ( expr , sort = None , ascending = True , unique = False , preceding = None , following = None ) : return _cumulative_op ( expr , CumMin , sort = sort , ascending = ascending , unique = unique , preceding = preceding , following = following )
Calculate cumulative minimum of a sequence expression .
8,450
def cummean ( expr , sort = None , ascending = True , unique = False , preceding = None , following = None ) : data_type = _stats_type ( expr ) return _cumulative_op ( expr , CumMean , sort = sort , ascending = ascending , unique = unique , preceding = preceding , following = following , data_type = data_type )
Calculate cumulative mean of a sequence expression .
8,451
def cummedian ( expr , sort = None , ascending = True , unique = False , preceding = None , following = None ) : data_type = _stats_type ( expr ) return _cumulative_op ( expr , CumMedian , sort = sort , ascending = ascending , unique = unique , preceding = preceding , following = following , data_type = data_type )
Calculate cumulative median of a sequence expression .
8,452
def cumcount ( expr , sort = None , ascending = True , unique = False , preceding = None , following = None ) : data_type = types . int64 return _cumulative_op ( expr , CumCount , sort = sort , ascending = ascending , unique = unique , preceding = preceding , following = following , data_type = data_type )
Calculate cumulative count of a sequence expression .
8,453
def cumstd ( expr , sort = None , ascending = True , unique = False , preceding = None , following = None ) : data_type = _stats_type ( expr ) return _cumulative_op ( expr , CumStd , sort = sort , ascending = ascending , unique = unique , preceding = preceding , following = following , data_type = data_type )
Calculate cumulative standard deviation of a sequence expression .
8,454
def nth_value ( expr , nth , skip_nulls = False , sort = None , ascending = True ) : return _cumulative_op ( expr , NthValue , data_type = expr . _data_type , sort = sort , ascending = ascending , _nth = nth , _skip_nulls = skip_nulls )
Get nth value of a grouped and sorted expression .
8,455
def rank ( expr , sort = None , ascending = True ) : return _rank_op ( expr , Rank , types . int64 , sort = sort , ascending = ascending )
Calculate rank of a sequence expression .
8,456
def dense_rank ( expr , sort = None , ascending = True ) : return _rank_op ( expr , DenseRank , types . int64 , sort = sort , ascending = ascending )
Calculate dense rank of a sequence expression .
8,457
def percent_rank ( expr , sort = None , ascending = True ) : return _rank_op ( expr , PercentRank , types . float64 , sort = sort , ascending = ascending )
Calculate percentage rank of a sequence expression .
8,458
def row_number ( expr , sort = None , ascending = True ) : return _rank_op ( expr , RowNumber , types . int64 , sort = sort , ascending = ascending )
Calculate row number of a sequence expression .
8,459
def qcut ( expr , bins , labels = False , sort = None , ascending = True ) : if labels is None or labels : raise NotImplementedError ( 'Showing bins or customizing labels not supported' ) return _rank_op ( expr , QCut , types . int64 , sort = sort , ascending = ascending , _bins = bins )
Get quantile - based bin indices of every element of a grouped and sorted expression . The indices of bins start from 0 . If cuts are not of equal sizes extra items will be appended into the first group .
8,460
def cume_dist ( expr , sort = None , ascending = True ) : return _rank_op ( expr , CumeDist , types . float64 , sort = sort , ascending = ascending )
Calculate cumulative ratio of a sequence expression .
8,461
def lag ( expr , offset , default = None , sort = None , ascending = True ) : return _shift_op ( expr , Lag , offset , default = default , sort = sort , ascending = ascending )
Get value in the row offset rows prior to the current row .
8,462
def lead ( expr , offset , default = None , sort = None , ascending = True ) : return _shift_op ( expr , Lead , offset , default = default , sort = sort , ascending = ascending )
Get value in the row offset rows after to the current row .
8,463
def get_model ( self ) : url = self . resource ( ) params = { 'data' : '' } resp = self . _client . get ( url , params = params ) return resp . text
Get PMML text of the current model . Note that model file obtained via this method might be incomplete due to size limitations .
8,464
def parse_response ( resp ) : host_id , msg , code = None , None , None try : content = resp . content root = ET . fromstring ( content ) code = root . find ( './Code' ) . text msg = root . find ( './Message' ) . text request_id = root . find ( './RequestId' ) . text host_id = root . find ( './HostId' ) . text except ETParseError : request_id = resp . headers . get ( 'x-odps-request-id' , None ) if len ( resp . content ) > 0 : obj = json . loads ( resp . text ) msg = obj [ 'Message' ] code = obj . get ( 'Code' ) host_id = obj . get ( 'HostId' ) if request_id is None : request_id = obj . get ( 'RequestId' ) else : return clz = globals ( ) . get ( code , ODPSError ) return clz ( msg , request_id = request_id , code = code , host_id = host_id )
Parses the content of response and returns an exception object .
8,465
def throw_if_parsable ( resp ) : e = None try : e = parse_response ( resp ) except : LOG . debug ( utils . stringify_expt ( ) ) if e is not None : raise e if resp . status_code == 404 : raise NoSuchObject ( 'No such object.' ) else : text = resp . text if six . PY3 else resp . content if text : raise ODPSError ( text , code = str ( resp . status_code ) ) else : raise ODPSError ( str ( resp . status_code ) )
Try to parse the content of the response and raise an exception if neccessary .
8,466
def merge_data ( * data_frames , ** kwargs ) : from . specialized import build_merge_expr from . . utils import ML_ARG_PREFIX if len ( data_frames ) <= 1 : raise ValueError ( 'Count of DataFrames should be at least 2.' ) norm_data_pairs = [ ] df_tuple = collections . namedtuple ( 'MergeTuple' , 'df cols exclude' ) for pair in data_frames : if isinstance ( pair , tuple ) : if len ( pair ) == 2 : df , cols = pair exclude = False else : df , cols , exclude = pair if isinstance ( cols , six . string_types ) : cols = cols . split ( ',' ) else : df , cols , exclude = pair , None , False norm_data_pairs . append ( df_tuple ( df , cols , exclude ) ) auto_rename = kwargs . get ( 'auto_rename' , False ) sel_cols_dict = dict ( ( idx , tp . cols ) for idx , tp in enumerate ( norm_data_pairs ) if tp . cols and not tp . exclude ) ex_cols_dict = dict ( ( idx , tp . cols ) for idx , tp in enumerate ( norm_data_pairs ) if tp . cols and tp . exclude ) merge_expr = build_merge_expr ( len ( norm_data_pairs ) ) arg_dict = dict ( _params = { 'autoRenameCol' : str ( auto_rename ) } , selected_cols = sel_cols_dict , excluded_cols = ex_cols_dict ) for idx , dp in enumerate ( norm_data_pairs ) : arg_dict [ ML_ARG_PREFIX + 'input%d' % ( 1 + idx ) ] = dp . df out_df = merge_expr ( register_expr = True , _exec_id = uuid . uuid4 ( ) , _output_name = 'output' , ** arg_dict ) out_df . _ml_uplink = [ dp . df for dp in norm_data_pairs ] out_df . _perform_operation ( op . MergeFieldsOperation ( auto_rename , sel_cols_dict , ex_cols_dict ) ) out_df . _rebuild_df_schema ( ) return out_df
Merge DataFrames by column . Number of rows in tables must be the same .
8,467
def exclude_fields ( self , * args ) : if not args : raise ValueError ( "Field list cannot be None." ) new_df = copy_df ( self ) fields = _render_field_set ( args ) self . _assert_ml_fields_valid ( * fields ) new_df . _perform_operation ( op . ExcludeFieldsOperation ( fields ) ) return new_df
Exclude one or more fields from feature fields .
8,468
def select_features ( self , * args , ** kwargs ) : if not args : raise ValueError ( "Field list cannot be empty." ) augment = kwargs . get ( 'add' , False ) fields = _render_field_set ( args ) self . _assert_ml_fields_valid ( * fields ) return _batch_change_roles ( self , fields , FieldRole . FEATURE , augment )
Select one or more fields as feature fields .
8,469
def weight_field ( self , f ) : if f is None : raise ValueError ( "Field name cannot be None." ) self . _assert_ml_fields_valid ( f ) return _change_singleton_roles ( self , { f : FieldRole . WEIGHT } , clear_feature = True )
Select one field as the weight field .
8,470
def label_field ( self , f ) : if f is None : raise ValueError ( "Label field name cannot be None." ) self . _assert_ml_fields_valid ( f ) return _change_singleton_roles ( self , { _get_field_name ( f ) : FieldRole . LABEL } , clear_feature = True )
Select one field as the label field .
8,471
def continuous ( self , * args ) : new_df = copy_df ( self ) fields = _render_field_set ( args ) self . _assert_ml_fields_valid ( * fields ) new_df . _perform_operation ( op . FieldContinuityOperation ( dict ( ( _get_field_name ( f ) , True ) for f in fields ) ) ) return new_df
Set fields to be continuous .
8,472
def discrete ( self , * args ) : new_df = copy_df ( self ) fields = _render_field_set ( args ) self . _assert_ml_fields_valid ( * fields ) new_df . _perform_operation ( op . FieldContinuityOperation ( dict ( ( _get_field_name ( f ) , False ) for f in fields ) ) ) return new_df
Set fields to be discrete .
8,473
def roles ( self , clear_features = True , ** field_roles ) : field_roles = dict ( ( k , v . name if isinstance ( v , SequenceExpr ) else v ) for k , v in six . iteritems ( field_roles ) ) self . _assert_ml_fields_valid ( * list ( six . itervalues ( field_roles ) ) ) field_roles = dict ( ( _get_field_name ( f ) , MLField . translate_role_name ( role ) ) for role , f in six . iteritems ( field_roles ) ) if field_roles : return _change_singleton_roles ( self , field_roles , clear_features ) else : return self
Set roles of fields
8,474
def split ( self , frac ) : from . . import preprocess split_obj = getattr ( preprocess , '_Split' ) ( fraction = frac ) return split_obj . transform ( self )
Split the DataFrame into two DataFrames with certain ratio .
8,475
def append_id ( self , id_col_name = 'append_id' , cols = None ) : from . . import preprocess if id_col_name in self . schema : raise ValueError ( 'ID column collides with existing columns.' ) append_id_obj = getattr ( preprocess , '_AppendID' ) ( id_col = id_col_name , selected_cols = cols ) return append_id_obj . transform ( self )
Append an ID column to current DataFrame .
8,476
def continuous ( self ) : field_name = self . name new_df = copy_df ( self ) new_df . _perform_operation ( op . FieldContinuityOperation ( { field_name : True } ) ) return new_df
Set sequence to be continuous .
8,477
def discrete ( self ) : field_name = self . name new_df = copy_df ( self ) new_df . _perform_operation ( op . FieldContinuityOperation ( { field_name : False } ) ) return new_df
Set sequence to be discrete .
8,478
def role ( self , role_name ) : field_name = self . name field_roles = { field_name : MLField . translate_role_name ( role_name ) } if field_roles : return _change_singleton_roles ( self , field_roles , True ) else : return self
Set role of current column
8,479
def _init ( self , * args , ** kwargs ) : self . _init_attr ( '_deps' , None ) self . _init_attr ( '_ban_optimize' , False ) self . _init_attr ( '_engine' , None ) self . _init_attr ( '_Expr__execution' , None ) self . _init_attr ( '_need_cache' , False ) self . _init_attr ( '_mem_cache' , False ) if '_id' not in kwargs : kwargs [ '_id' ] = new_id ( ) super ( Expr , self ) . _init ( * args , ** kwargs )
_deps is used for common dependencies . When a expr depend on other exprs and the expr is not calculated from the others the _deps are specified to identify the dependencies .
8,480
def compile ( self ) : from . . engines import get_default_engine engine = get_default_engine ( self ) return engine . compile ( self )
Compile this expression into an ODPS SQL
8,481
def persist ( self , name , partitions = None , partition = None , lifecycle = None , project = None , ** kwargs ) : if lifecycle is None and options . lifecycle is not None : lifecycle = options . lifecycle if not name . startswith ( TEMP_TABLE_PREFIX ) else options . temp_lifecycle return self . _handle_delay_call ( 'persist' , self , name , partitions = partitions , partition = partition , lifecycle = lifecycle , project = project , ** kwargs )
Persist the execution into a new table . If partitions not specified will create a new table without partitions if the table does not exist and insert the SQL result into it . If partitions are specified they will be the partition fields of the new table . If partition is specified the data will be inserted into the exact partition of the table .
8,482
def query ( self , expr ) : from . query import CollectionVisitor if not isinstance ( expr , six . string_types ) : raise ValueError ( 'expr must be a string' ) frame = sys . _getframe ( 2 ) . f_locals try : env = frame . copy ( ) finally : del frame visitor = CollectionVisitor ( self , env ) predicate = visitor . eval ( expr ) return self . filter ( predicate )
Query the data with a boolean expression .
8,483
def filter ( self , * predicates ) : predicates = self . _get_fields ( predicates ) predicate = reduce ( operator . and_ , predicates ) return FilterCollectionExpr ( self , predicate , _schema = self . _schema )
Filter the data by predicates
8,484
def select ( self , * fields , ** kw ) : if len ( fields ) == 1 and isinstance ( fields [ 0 ] , list ) : fields = fields [ 0 ] else : fields = list ( fields ) if kw : def handle ( it ) : it = self . _defunc ( it ) if not isinstance ( it , Expr ) : it = Scalar ( it ) return it fields . extend ( [ handle ( f ) . rename ( new_name ) for new_name , f in six . iteritems ( kw ) ] ) return self . _project ( fields )
Projection columns . Remember to avoid column names conflict .
8,485
def exclude ( self , * fields ) : if len ( fields ) == 1 and isinstance ( fields [ 0 ] , list ) : exclude_fields = fields [ 0 ] else : exclude_fields = list ( fields ) exclude_fields = [ self . _defunc ( it ) for it in exclude_fields ] exclude_fields = [ field . name if not isinstance ( field , six . string_types ) else field for field in exclude_fields ] fields = [ name for name in self . _schema . names if name not in exclude_fields ] return self . _project ( fields )
Projection columns which not included in the fields
8,486
def head ( self , n = None , ** kwargs ) : if n is None : n = options . display . max_rows return self . _handle_delay_call ( 'execute' , self , head = n , ** kwargs )
Return the first n rows . Execute at once .
8,487
def tail ( self , n = None , ** kwargs ) : if n is None : n = options . display . max_rows return self . _handle_delay_call ( 'execute' , self , tail = n , ** kwargs )
Return the last n rows . Execute at once .
8,488
def to_pandas ( self , wrap = False , ** kwargs ) : try : import pandas as pd except ImportError : raise DependencyNotInstalledError ( 'to_pandas requires `pandas` library' ) def wrapper ( result ) : res = result . values if wrap : from . . import DataFrame return DataFrame ( res , schema = self . schema ) return res return self . execute ( wrapper = wrapper , ** kwargs )
Convert to pandas DataFrame . Execute at once .
8,489
def view ( self ) : proxied = get_proxied_expr ( self ) kv = dict ( ( attr , getattr ( proxied , attr ) ) for attr in get_attrs ( proxied ) ) return type ( proxied ) ( ** kv )
Clone a same collection . useful for self - join .
8,490
def to_pandas ( self , wrap = False , ** kwargs ) : try : import pandas as pd except ImportError : raise DependencyNotInstalledError ( 'to_pandas requires for `pandas` library' ) def wrapper ( result ) : df = result . values if wrap : from . . import DataFrame df = DataFrame ( df ) return df [ self . name ] return self . execute ( wrapper = wrapper , ** kwargs )
Convert to pandas Series . Execute at once .
8,491
def astype ( self , data_type ) : data_type = types . validate_data_type ( data_type ) if data_type == self . _data_type : return self attr_dict = dict ( ) attr_dict [ '_data_type' ] = data_type attr_dict [ '_source_data_type' ] = self . _source_data_type attr_dict [ '_input' ] = self new_sequence = AsTypedSequenceExpr ( ** attr_dict ) return new_sequence
Cast to a new data type .
8,492
def explode ( expr , * args , ** kwargs ) : if not isinstance ( expr , Column ) : expr = to_collection ( expr ) [ expr . name ] if isinstance ( expr , SequenceExpr ) : dtype = expr . data_type else : dtype = expr . value_type func_name = 'EXPLODE' if args and isinstance ( args [ 0 ] , ( list , tuple , set ) ) : names = list ( args [ 0 ] ) else : names = args pos = kwargs . get ( 'pos' , False ) if isinstance ( expr , ListSequenceExpr ) : if pos : func_name = 'POSEXPLODE' typos = [ df_types . int64 , dtype . value_type ] if not names : names = [ expr . name + '_pos' , expr . name ] if len ( names ) == 1 : names = [ names [ 0 ] + '_pos' , names [ 0 ] ] if len ( names ) != 2 : raise ValueError ( "The length of parameter 'names' should be exactly 1." ) else : typos = [ dtype . value_type ] if not names : names = [ expr . name ] if len ( names ) != 1 : raise ValueError ( "The length of parameter 'names' should be exactly 1." ) elif isinstance ( expr , DictSequenceExpr ) : if pos : raise ValueError ( 'Cannot support explosion with pos on dicts.' ) typos = [ dtype . key_type , dtype . value_type ] if not names : names = [ expr . name + '_key' , expr . name + '_value' ] if len ( names ) != 2 : raise ValueError ( "The length of parameter 'names' should be exactly 2." ) else : raise ValueError ( 'Cannot explode expression with type %s' % type ( expr ) . __name__ ) schema = Schema . from_lists ( names , typos ) return RowAppliedCollectionExpr ( _input = expr . input , _func = func_name , _schema = schema , _fields = [ expr ] , _keep_nulls = kwargs . get ( 'keep_nulls' , False ) )
Expand list or dict data into multiple rows
8,493
def _contains ( expr , value ) : return composite_op ( expr , ListContains , df_types . boolean , _value = _scalar ( value ) )
Check whether certain value is in the inspected list
8,494
def _keys ( expr ) : if isinstance ( expr , SequenceExpr ) : dtype = expr . data_type else : dtype = expr . value_type return composite_op ( expr , DictKeys , df_types . List ( dtype . key_type ) )
Retrieve keys of a dict
8,495
def _values ( expr ) : if isinstance ( expr , SequenceExpr ) : dtype = expr . data_type else : dtype = expr . value_type return composite_op ( expr , DictValues , df_types . List ( dtype . value_type ) )
Retrieve values of a dict
8,496
def mean_squared_error ( df , col_true , col_pred = None ) : if not col_pred : col_pred = get_field_name_by_role ( df , FieldRole . PREDICTED_VALUE ) return _run_evaluation_node ( df , col_true , col_pred ) [ 'mse' ]
Compute mean squared error of a predicted DataFrame .
8,497
def mean_absolute_error ( df , col_true , col_pred = None ) : if not col_pred : col_pred = get_field_name_by_role ( df , FieldRole . PREDICTED_VALUE ) return _run_evaluation_node ( df , col_true , col_pred ) [ 'mae' ]
Compute mean absolute error of a predicted DataFrame .
8,498
def mean_absolute_percentage_error ( df , col_true , col_pred = None ) : if not col_pred : col_pred = get_field_name_by_role ( df , FieldRole . PREDICTED_VALUE ) return _run_evaluation_node ( df , col_true , col_pred ) [ 'mape' ]
Compute mean absolute percentage error of a predicted DataFrame .
8,499
def residual_histogram ( df , col_true , col_pred = None ) : if not col_pred : col_pred = get_field_name_by_role ( df , FieldRole . PREDICTED_VALUE ) return _run_evaluation_node ( df , col_true , col_pred ) [ 'hist' ]
Compute histogram of residuals of a predicted DataFrame .