idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
8,300
def available ( ) : proc = popen_multiple ( COMMANDS , [ '-version' ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE , creationflags = PROC_FLAGS , ) proc . wait ( ) return ( proc . returncode == 0 )
Detect if the FFmpeg backend can be used on this system .
8,301
def read_data ( self , timeout = 10.0 ) : start_time = time . time ( ) while True : data = None try : data = self . stdout_reader . queue . get ( timeout = timeout ) if data : yield data else : break except queue . Empty : end_time = time . time ( ) if not data : if end_time - start_time >= timeout : raise ReadTimeoutError ( 'ffmpeg output: {}' . format ( '' . join ( self . stderr_reader . queue . queue ) ) ) else : start_time = end_time continue
Read blocks of raw PCM data from the file .
8,302
def _get_info ( self ) : out_parts = [ ] while True : line = self . proc . stderr . readline ( ) if not line : raise CommunicationError ( "stream info not found" ) if isinstance ( line , bytes ) : line = line . decode ( 'utf8' , 'ignore' ) line = line . strip ( ) . lower ( ) if 'no such file' in line : raise IOError ( 'file not found' ) elif 'invalid data found' in line : raise UnsupportedError ( ) elif 'duration:' in line : out_parts . append ( line ) elif 'audio:' in line : out_parts . append ( line ) self . _parse_info ( '' . join ( out_parts ) ) break
Reads the tool s output from its stderr stream extracts the relevant information and parses it .
8,303
def _parse_info ( self , s ) : match = re . search ( r'(\d+) hz' , s ) if match : self . samplerate = int ( match . group ( 1 ) ) else : self . samplerate = 0 match = re . search ( r'hz, ([^,]+),' , s ) if match : mode = match . group ( 1 ) if mode == 'stereo' : self . channels = 2 else : cmatch = re . match ( r'(\d+)\.?(\d)?' , mode ) if cmatch : self . channels = sum ( map ( int , cmatch . group ( ) . split ( '.' ) ) ) else : self . channels = 1 else : self . channels = 0 match = re . search ( r'duration: (\d+):(\d+):(\d+).(\d)' , s ) if match : durparts = list ( map ( int , match . groups ( ) ) ) duration = ( durparts [ 0 ] * 60 * 60 + durparts [ 1 ] * 60 + durparts [ 2 ] + float ( durparts [ 3 ] ) / 10 ) self . duration = duration else : self . duration = 0
Given relevant data from the ffmpeg output set audio parameter fields on this object .
8,304
def close ( self ) : if hasattr ( self , 'proc' ) : self . proc . poll ( ) if self . proc . returncode is None : self . proc . kill ( ) self . proc . wait ( ) if hasattr ( self , 'stderr_reader' ) : self . stderr_reader . join ( ) if hasattr ( self , 'stdout_reader' ) : self . stdout_reader . join ( ) self . proc . stdout . close ( ) self . proc . stderr . close ( ) self . devnull . close ( )
Close the ffmpeg process used to perform the decoding .
8,305
def read_blocks ( self , block_size = 4096 ) : while True : out = self . mf . read ( block_size ) if not out : break yield bytes ( out )
Generates buffers containing PCM data for the audio file .
8,306
def channels ( self ) : if self . mf . mode ( ) == mad . MODE_SINGLE_CHANNEL : return 1 elif self . mf . mode ( ) in ( mad . MODE_DUAL_CHANNEL , mad . MODE_JOINT_STEREO , mad . MODE_STEREO ) : return 2 else : return 2
The number of channels .
8,307
def multi_char_literal ( chars ) : num = 0 for index , char in enumerate ( chars ) : shift = ( len ( chars ) - index - 1 ) * 8 num |= ord ( char ) << shift return num
Emulates character integer literals in C . Given a string abc returns the value of the C single - quoted literal abc .
8,308
def _open_url ( cls , url ) : file_obj = ctypes . c_void_p ( ) check ( _coreaudio . ExtAudioFileOpenURL ( url . _obj , ctypes . byref ( file_obj ) ) ) return file_obj
Given a CFURL Python object return an opened ExtAudioFileRef .
8,309
def set_client_format ( self , desc ) : assert desc . mFormatID == AUDIO_ID_PCM check ( _coreaudio . ExtAudioFileSetProperty ( self . _obj , PROP_CLIENT_DATA_FORMAT , ctypes . sizeof ( desc ) , ctypes . byref ( desc ) ) ) self . _client_fmt = desc
Get the client format description . This describes the encoding of the data that the program will read from this object .
8,310
def get_file_format ( self ) : if self . _file_fmt is not None : return self . _file_fmt desc = AudioStreamBasicDescription ( ) size = ctypes . c_int ( ctypes . sizeof ( desc ) ) check ( _coreaudio . ExtAudioFileGetProperty ( self . _obj , PROP_FILE_DATA_FORMAT , ctypes . byref ( size ) , ctypes . byref ( desc ) ) ) self . _file_fmt = desc return desc
Get the file format description . This describes the type of data stored on disk .
8,311
def nframes ( self ) : length = ctypes . c_long ( ) size = ctypes . c_int ( ctypes . sizeof ( length ) ) check ( _coreaudio . ExtAudioFileGetProperty ( self . _obj , PROP_LENGTH , ctypes . byref ( size ) , ctypes . byref ( length ) ) ) return length . value
Gets the number of frames in the source file .
8,312
def setup ( self , bitdepth = 16 ) : fmt = self . get_file_format ( ) newfmt = copy . copy ( fmt ) newfmt . mFormatID = AUDIO_ID_PCM newfmt . mFormatFlags = PCM_IS_SIGNED_INT | PCM_IS_PACKED newfmt . mBitsPerChannel = bitdepth newfmt . mBytesPerPacket = ( fmt . mChannelsPerFrame * newfmt . mBitsPerChannel // 8 ) newfmt . mFramesPerPacket = 1 newfmt . mBytesPerFrame = newfmt . mBytesPerPacket self . set_client_format ( newfmt )
Set the client format parameters specifying the desired PCM audio data format to be read from the file . Must be called before reading from the file .
8,313
def read_data ( self , blocksize = 4096 ) : frames = ctypes . c_uint ( blocksize // self . _client_fmt . mBytesPerFrame ) buf = ctypes . create_string_buffer ( blocksize ) buflist = AudioBufferList ( ) buflist . mNumberBuffers = 1 buflist . mBuffers [ 0 ] . mNumberChannels = self . _client_fmt . mChannelsPerFrame buflist . mBuffers [ 0 ] . mDataByteSize = blocksize buflist . mBuffers [ 0 ] . mData = ctypes . cast ( buf , ctypes . c_void_p ) while True : check ( _coreaudio . ExtAudioFileRead ( self . _obj , ctypes . byref ( frames ) , ctypes . byref ( buflist ) ) ) assert buflist . mNumberBuffers == 1 size = buflist . mBuffers [ 0 ] . mDataByteSize if not size : break data = ctypes . cast ( buflist . mBuffers [ 0 ] . mData , ctypes . POINTER ( ctypes . c_char ) ) blob = data [ : size ] yield blob
Generates byte strings reflecting the audio data in the file .
8,314
def close ( self ) : if not self . closed : check ( _coreaudio . ExtAudioFileDispose ( self . _obj ) ) self . closed = True
Close the audio file and free associated memory .
8,315
def mount ( self , url , app ) : "Mount a sub-app at the url of current app." app . url = url self . mounts . append ( app )
Mount a sub - app at the url of current app .
8,316
def _map ( expr , func , rtype = None , resources = None , args = ( ) , ** kwargs ) : name = None if isinstance ( func , FunctionWrapper ) : if func . output_names : if len ( func . output_names ) > 1 : raise ValueError ( 'Map column has more than one name' ) name = func . output_names [ 0 ] if func . output_types : rtype = rtype or func . output_types [ 0 ] func = func . _func if rtype is None : rtype = utils . get_annotation_rtype ( func ) from . . . models import Function rtype = rtype or expr . dtype output_type = types . validate_data_type ( rtype ) if isinstance ( func , six . string_types ) : pass elif isinstance ( func , Function ) : pass elif inspect . isclass ( func ) : pass elif not callable ( func ) : raise ValueError ( '`func` must be a function or a callable class' ) collection_resources = utils . get_collection_resources ( resources ) is_seq = isinstance ( expr , SequenceExpr ) if is_seq : return MappedExpr ( _data_type = output_type , _func = func , _inputs = [ expr , ] , _func_args = args , _func_kwargs = kwargs , _name = name , _resources = resources , _collection_resources = collection_resources ) else : return MappedExpr ( _value_type = output_type , _func = func , _inputs = [ expr , ] , _func_args = args , _func_kwargs = kwargs , _name = name , _resources = resources , _collection_resources = collection_resources )
Call func on each element of this sequence .
8,317
def _hash ( expr , func = None ) : if func is None : func = lambda x : hash ( x ) return _map ( expr , func = func , rtype = types . int64 )
Calculate the hash value .
8,318
def _isnull ( expr ) : if isinstance ( expr , SequenceExpr ) : return IsNull ( _input = expr , _data_type = types . boolean ) elif isinstance ( expr , Scalar ) : return IsNull ( _input = expr , _value_type = types . boolean )
Return a sequence or scalar according to the input indicating if the values are null .
8,319
def _notnull ( expr ) : if isinstance ( expr , SequenceExpr ) : return NotNull ( _input = expr , _data_type = types . boolean ) elif isinstance ( expr , Scalar ) : return NotNull ( _input = expr , _value_type = types . boolean )
Return a sequence or scalar according to the input indicating if the values are not null .
8,320
def _fillna ( expr , value ) : if isinstance ( expr , SequenceExpr ) : return FillNa ( _input = expr , _fill_value = value , _data_type = expr . dtype ) elif isinstance ( expr , Scalar ) : return FillNa ( _input = expr , _fill_value = value , _value_type = expr . dtype )
Fill null with value .
8,321
def _isin ( expr , values ) : from . merge import _make_different_sources if isinstance ( values , SequenceExpr ) : expr , values = _make_different_sources ( expr , values ) if isinstance ( expr , SequenceExpr ) : return IsIn ( _input = expr , _values = values , _data_type = types . boolean ) elif isinstance ( expr , Scalar ) : return IsIn ( _input = expr , _values = values , _value_type = types . boolean )
Return a boolean sequence or scalar showing whether each element is exactly contained in the passed values .
8,322
def _notin ( expr , values ) : if isinstance ( expr , SequenceExpr ) : return NotIn ( _input = expr , _values = values , _data_type = types . boolean ) elif isinstance ( expr , Scalar ) : return NotIn ( _input = expr , _values = values , _value_type = types . boolean )
Return a boolean sequence or scalar showing whether each element is not contained in the passed values .
8,323
def _between ( expr , left , right , inclusive = True ) : if isinstance ( expr , SequenceExpr ) : return Between ( _input = expr , _left = left , _right = right , _inclusive = inclusive , _data_type = types . boolean ) elif isinstance ( expr , Scalar ) : return Between ( _input = expr , _left = left , _right = right , _inclusive = inclusive , _value_type = types . boolean )
Return a boolean sequence or scalar show whether each element is between left and right .
8,324
def _ifelse ( expr , true_expr , false_expr ) : tps = ( SequenceExpr , Scalar ) if not isinstance ( true_expr , tps ) : true_expr = Scalar ( _value = true_expr ) if not isinstance ( false_expr , tps ) : false_expr = Scalar ( _value = false_expr ) output_type = utils . highest_precedence_data_type ( * [ true_expr . dtype , false_expr . dtype ] ) is_sequence = isinstance ( expr , SequenceExpr ) or isinstance ( true_expr , SequenceExpr ) or isinstance ( false_expr , SequenceExpr ) if is_sequence : return IfElse ( _input = expr , _then = true_expr , _else = false_expr , _data_type = output_type ) else : return IfElse ( _input = expr , _then = true_expr , _else = false_expr , _value_type = output_type )
Given a boolean sequence or scalar if true will return the left else return the right one .
8,325
def _switch ( expr , * args , ** kw ) : default = _scalar ( kw . get ( 'default' ) ) if len ( args ) <= 0 : raise errors . ExpressionError ( 'Switch must accept more than one condition' ) if all ( isinstance ( arg , tuple ) and len ( arg ) == 2 for arg in args ) : conditions , thens = zip ( * args ) else : conditions = [ arg for i , arg in enumerate ( args ) if i % 2 == 0 ] thens = [ arg for i , arg in enumerate ( args ) if i % 2 == 1 ] if len ( conditions ) == len ( thens ) : conditions , thens = _scalar ( conditions ) , _scalar ( thens ) else : raise errors . ExpressionError ( 'Switch should be called by case and then pairs' ) if isinstance ( expr , ( Scalar , SequenceExpr ) ) : case = expr else : case = None if not all ( hasattr ( it , 'dtype' ) and it . dtype == types . boolean for it in conditions ) : raise errors . ExpressionError ( 'Switch must be called by all boolean conditions' ) res = thens if default is None else thens + [ default , ] output_type = utils . highest_precedence_data_type ( * ( it . dtype for it in res ) ) is_seq = isinstance ( expr , SequenceExpr ) or any ( isinstance ( it , SequenceExpr ) for it in conditions ) or any ( isinstance ( it , SequenceExpr ) for it in res ) if case is not None : is_seq = is_seq or isinstance ( case , SequenceExpr ) kwargs = dict ( ) if is_seq : kwargs [ '_data_type' ] = output_type else : kwargs [ '_value_type' ] = output_type return Switch ( _input = expr , _case = case , _conditions = conditions , _thens = thens , _default = default , ** kwargs )
Similar to the case - when in SQL . Refer to the example below
8,326
def _cut ( expr , bins , right = True , labels = None , include_lowest = False , include_under = False , include_over = False ) : is_seq = isinstance ( expr , SequenceExpr ) dtype = utils . highest_precedence_data_type ( * ( types . validate_value_type ( it ) for it in labels ) ) if labels is not None else types . int64 kw = { } if is_seq : kw [ '_data_type' ] = dtype else : kw [ '_value_type' ] = dtype return Cut ( _input = expr , _bins = bins , _right = right , _labels = labels , _include_lowest = include_lowest , _include_under = include_under , _include_over = include_over , ** kw )
Return indices of half - open bins to which each value of expr belongs .
8,327
def _int_to_datetime ( expr ) : if isinstance ( expr , SequenceExpr ) : return IntToDatetime ( _input = expr , _data_type = types . datetime ) elif isinstance ( expr , Scalar ) : return IntToDatetime ( _input = expr , _value_type = types . datetime )
Return a sequence or scalar that is the datetime value of the current numeric sequence or scalar .
8,328
def get_ddl ( self , with_comments = True , if_not_exists = False ) : shard_num = self . shard . shard_num if self . shard is not None else None return self . gen_create_table_sql ( self . name , self . schema , self . comment if with_comments else None , if_not_exists = if_not_exists , with_column_comments = with_comments , lifecycle = self . lifecycle , shard_num = shard_num , project = self . project . name , storage_handler = self . storage_handler , serde_properties = self . serde_properties , location = self . location , resources = self . resources , )
Get DDL SQL statement for the given table .
8,329
def head ( self , limit , partition = None , columns = None ) : if limit <= 0 : raise ValueError ( 'limit number should >= 0.' ) params = { 'data' : '' , 'linenum' : limit } if partition is not None : if not isinstance ( partition , odps_types . PartitionSpec ) : partition = odps_types . PartitionSpec ( partition ) params [ 'partition' ] = str ( partition ) if columns is not None and len ( columns ) > 0 : col_name = lambda col : col . name if isinstance ( col , odps_types . Column ) else col params [ 'cols' ] = ',' . join ( col_name ( col ) for col in columns ) resp = self . _client . get ( self . resource ( ) , params = params , stream = True ) with readers . RecordReader ( self . schema , resp ) as reader : for record in reader : yield record
Get the head records of a table or its partition .
8,330
def open_reader ( self , partition = None , ** kw ) : from . . tunnel . tabletunnel import TableDownloadSession reopen = kw . pop ( 'reopen' , False ) endpoint = kw . pop ( 'endpoint' , None ) download_id = kw . pop ( 'download_id' , None ) tunnel = self . _create_table_tunnel ( endpoint = endpoint ) if download_id is None : download_ids = self . _download_ids download_id = download_ids . get ( partition ) if not reopen else None download_session = tunnel . create_download_session ( table = self , partition_spec = partition , download_id = download_id , ** kw ) if download_id and download_session . status != TableDownloadSession . Status . Normal : download_session = tunnel . create_download_session ( table = self , partition_spec = partition , ** kw ) download_ids [ partition ] = download_session . id class RecordReader ( readers . AbstractRecordReader ) : def __init__ ( self ) : self . _it = iter ( self ) @ property def download_id ( self ) : return download_session . id @ property def count ( self ) : return download_session . count @ property def status ( self ) : return download_session . status def __iter__ ( self ) : for record in self . read ( ) : yield record def __next__ ( self ) : return next ( self . _it ) next = __next__ def _iter ( self , start = None , end = None , step = None ) : count = self . _calc_count ( start , end , step ) return self . read ( start = start , count = count , step = step ) def read ( self , start = None , count = None , step = None , compress = False , columns = None ) : start = start or 0 step = step or 1 count = count * step if count is not None else self . count - start if count == 0 : return with download_session . open_record_reader ( start , count , compress = compress , columns = columns ) as reader : for record in reader [ : : step ] : yield record def __enter__ ( self ) : return self def __exit__ ( self , exc_type , exc_val , exc_tb ) : pass return RecordReader ( )
Open the reader to read the entire records from this table or its partition .
8,331
def create_partition ( self , partition_spec , if_not_exists = False , async_ = False , ** kw ) : async_ = kw . get ( 'async' , async_ ) return self . partitions . create ( partition_spec , if_not_exists = if_not_exists , async_ = async_ )
Create a partition within the table .
8,332
def delete_partition ( self , partition_spec , if_exists = False , async_ = False , ** kw ) : async_ = kw . get ( 'async' , async_ ) return self . partitions . delete ( partition_spec , if_exists = if_exists , async_ = async_ )
Delete a partition within the table .
8,333
def exist_partitions ( self , prefix_spec = None ) : try : next ( self . partitions . iterate_partitions ( spec = prefix_spec ) ) except StopIteration : return False return True
Check if partitions with provided conditions exist .
8,334
def truncate ( self , async_ = False , ** kw ) : from . tasks import SQLTask async_ = kw . get ( 'async' , async_ ) task = SQLTask ( name = 'SQLTruncateTableTask' , query = 'truncate table %s.%s' % ( self . project . name , self . name ) ) instance = self . project . parent [ self . _client . project ] . instances . create ( task = task ) if not async_ : instance . wait_for_success ( ) else : return instance
truncate this table .
8,335
def drop ( self , async_ = False , if_exists = False , ** kw ) : async_ = kw . get ( 'async' , async_ ) return self . parent . delete ( self , async_ = async_ , if_exists = if_exists )
Drop this table .
8,336
def _strftime ( expr , date_format ) : return datetime_op ( expr , Strftime , output_type = types . string , _date_format = date_format )
Return formatted strings specified by date_format which supports the same string format as the python standard library . Details of the string format can be found in python string format doc
8,337
def sort_values ( expr , by , ascending = True ) : if not isinstance ( by , list ) : by = [ by , ] by = [ it ( expr ) if inspect . isfunction ( it ) else it for it in by ] return SortedCollectionExpr ( expr , _sorted_fields = by , _ascending = ascending , _schema = expr . _schema )
Sort the collection by values . sort is an alias name for sort_values
8,338
def distinct ( expr , on = None , * ons ) : on = on or list ( ) if not isinstance ( on , list ) : on = [ on , ] on = on + list ( ons ) on = [ it ( expr ) if inspect . isfunction ( it ) else it for it in on ] return DistinctCollectionExpr ( expr , _unique_fields = on , _all = ( len ( on ) == 0 ) )
Get collection with duplicate rows removed optionally only considering certain columns
8,339
def reshuffle ( expr , by = None , sort = None , ascending = True ) : by = by or RandomScalar ( ) grouped = expr . groupby ( by ) if sort : grouped = grouped . sort_values ( sort , ascending = ascending ) return ReshuffledCollectionExpr ( _input = grouped , _schema = expr . _schema )
Reshuffle data .
8,340
def std_scale ( expr , columns = None , with_means = True , with_std = True , preserve = False , suffix = '_scaled' , group = None ) : time_suffix = str ( int ( time . time ( ) ) ) def calc_agg ( expr , col ) : return [ getattr ( expr , col ) . mean ( ) . rename ( col + '_mean_' + time_suffix ) , getattr ( expr , col ) . std ( ddof = 0 ) . rename ( col + '_std_' + time_suffix ) , ] def do_scale ( expr , col ) : c = getattr ( expr , col ) mean_expr = getattr ( expr , col + '_mean_' + time_suffix ) if with_means : c = c - mean_expr mean_expr = Scalar ( 0 ) if with_std : std_expr = getattr ( expr , col + '_std_' + time_suffix ) c = ( std_expr == 0 ) . ifelse ( mean_expr , c / getattr ( expr , col + '_std_' + time_suffix ) ) return c return _scale_values ( expr , columns , calc_agg , do_scale , preserve = preserve , suffix = suffix , group = group )
Resize a data frame by mean and standard error .
8,341
def extract_kv ( expr , columns = None , kv_delim = ':' , item_delim = ',' , dtype = 'float' , fill_value = None ) : if columns is None : columns = [ expr . _get_field ( c ) for c in expr . schema . names ] intact_cols = [ ] else : columns = [ expr . _get_field ( c ) for c in utils . to_list ( columns ) ] name_set = set ( [ c . name for c in columns ] ) intact_cols = [ expr . _get_field ( c ) for c in expr . schema . names if c not in name_set ] column_type = types . validate_data_type ( dtype ) if any ( not isinstance ( c . dtype , types . String ) for c in columns ) : raise ExpressionError ( 'Key-value columns must be strings.' ) schema = DynamicSchema . from_lists ( [ c . name for c in intact_cols ] , [ c . dtype for c in intact_cols ] ) return ExtractKVCollectionExpr ( _input = expr , _columns = columns , _intact = intact_cols , _schema = schema , _column_type = column_type , _default = fill_value , _kv_delimiter = kv_delim , _item_delimiter = item_delim )
Extract values in key - value represented columns into standalone columns . New column names will be the name of the key - value column followed by an underscore and the key .
8,342
def to_kv ( expr , columns = None , kv_delim = ':' , item_delim = ',' , kv_name = 'kv_col' ) : if columns is None : columns = [ expr . _get_field ( c ) for c in expr . schema . names ] intact_cols = [ ] else : columns = [ expr . _get_field ( c ) for c in utils . to_list ( columns ) ] name_set = set ( [ c . name for c in columns ] ) intact_cols = [ expr . _get_field ( c ) for c in expr . schema . names if c not in name_set ] mapped_cols = [ c . isnull ( ) . ifelse ( Scalar ( '' ) , c . name + kv_delim + c . astype ( 'string' ) ) for c in columns ] reduced_col = reduce ( lambda a , b : ( b == '' ) . ifelse ( a , ( a == '' ) . ifelse ( b , a + item_delim + b ) ) , mapped_cols ) return expr . __getitem__ ( intact_cols + [ reduced_col . rename ( kv_name ) ] )
Merge values in specified columns into a key - value represented column .
8,343
def dropna ( expr , how = 'any' , thresh = None , subset = None ) : if subset is None : subset = [ expr . _get_field ( c ) for c in expr . schema . names ] else : subset = [ expr . _get_field ( c ) for c in utils . to_list ( subset ) ] if not subset : raise ValueError ( 'Illegal subset is provided.' ) if thresh is None : thresh = len ( subset ) if how == 'any' else 1 sum_exprs = reduce ( operator . add , ( s . notnull ( ) . ifelse ( 1 , 0 ) for s in subset ) ) return expr . filter ( sum_exprs >= thresh )
Return object with labels on given axis omitted where alternately any or all of the data are missing
8,344
def append_id ( expr , id_col = 'append_id' ) : if hasattr ( expr , '_xflow_append_id' ) : return expr . _xflow_append_id ( id_col ) else : return _append_id ( expr , id_col )
Append an ID column to current column to form a new DataFrame .
8,345
def split ( expr , frac , seed = None ) : if hasattr ( expr , '_xflow_split' ) : return expr . _xflow_split ( frac , seed = seed ) else : return _split ( expr , frac , seed = seed )
Split the current column into two column objects with certain ratio .
8,346
def applymap ( expr , func , rtype = None , resources = None , columns = None , excludes = None , args = ( ) , ** kwargs ) : if columns is not None and excludes is not None : raise ValueError ( '`columns` and `excludes` cannot be provided at the same time.' ) if not columns : excludes = excludes or [ ] if isinstance ( excludes , six . string_types ) : excludes = [ excludes ] excludes = set ( [ c if isinstance ( c , six . string_types ) else c . name for c in excludes ] ) columns = set ( [ c for c in expr . schema . names if c not in excludes ] ) else : if isinstance ( columns , six . string_types ) : columns = [ columns ] columns = set ( [ c if isinstance ( c , six . string_types ) else c . name for c in columns ] ) mapping = [ expr [ c ] if c not in columns else expr [ c ] . map ( func , rtype = rtype , resources = resources , args = args , ** kwargs ) for c in expr . schema . names ] return expr . select ( * mapping )
Call func on each element of this collection .
8,347
def read_string ( self , size ) : if size < 0 : raise errors . DecodeError ( 'Negative size %d' % size ) s = self . _input . read ( size ) if len ( s ) != size : raise errors . DecodeError ( 'String claims to have %d bytes, but read %d' % ( size , len ( s ) ) ) self . _pos += len ( s ) return s
Reads up to size bytes from the stream stopping early only if we reach the end of the stream . Returns the bytes read as a string .
8,348
def read_little_endian32 ( self ) : try : i = struct . unpack ( wire_format . FORMAT_UINT32_LITTLE_ENDIAN , self . _input . read ( 4 ) ) self . _pos += 4 return i [ 0 ] except struct . error as e : raise errors . DecodeError ( e )
Interprets the next 4 bytes of the stream as a little - endian encoded unsiged 32 - bit integer and returns that integer .
8,349
def read_little_endian64 ( self ) : try : i = struct . unpack ( wire_format . FORMAT_UINT64_LITTLE_ENDIAN , self . _input . read ( 8 ) ) self . _pos += 8 return i [ 0 ] except struct . error as e : raise errors . DecodeError ( e )
Interprets the next 8 bytes of the stream as a little - endian encoded unsiged 64 - bit integer and returns that integer .
8,350
def read_varint32 ( self ) : i = self . read_varint64 ( ) if not wire_format . INT32_MIN <= i <= wire_format . INT32_MAX : raise errors . DecodeError ( 'Value out of range for int32: %d' % i ) return int ( i )
Reads a varint from the stream interprets this varint as a signed 32 - bit integer and returns the integer .
8,351
def read_var_uint32 ( self ) : i = self . read_var_uint64 ( ) if i > wire_format . UINT32_MAX : raise errors . DecodeError ( 'Value out of range for uint32: %d' % i ) return i
Reads a varint from the stream interprets this varint as an unsigned 32 - bit integer and returns the integer .
8,352
def read_varint64 ( self ) : i = self . read_var_uint64 ( ) if i > wire_format . INT64_MAX : i -= ( 1 << 64 ) return i
Reads a varint from the stream interprets this varint as a signed 64 - bit integer and returns the integer .
8,353
def read_var_uint64 ( self ) : i = self . _read_varint_helper ( ) if not 0 <= i <= wire_format . UINT64_MAX : raise errors . DecodeError ( 'Value out of range for uint64: %d' % i ) return i
Reads a varint from the stream interprets this varint as an unsigned 64 - bit integer and returns the integer .
8,354
def _read_varint_helper ( self ) : result = 0 shift = 0 while 1 : if shift >= 64 : raise errors . DecodeError ( 'Too many bytes when decoding varint.' ) try : b = ord ( self . _input . read ( 1 ) ) except IndexError : raise errors . DecodeError ( 'Truncated varint.' ) self . _pos += 1 result |= ( ( b & 0x7f ) << shift ) shift += 7 if not ( b & 0x80 ) : return result
Helper for the various varint - reading methods above . Reads an unsigned varint - encoded integer from the stream and returns this integer .
8,355
def append_tag ( self , field_number , wire_type ) : self . _stream . append_var_uint32 ( wire_format . pack_tag ( field_number , wire_type ) )
Appends a tag containing field number and wire type information .
8,356
def append_sint32 ( self , value ) : zigzag_value = wire_format . zig_zag_encode ( value ) self . _stream . append_var_uint32 ( zigzag_value )
Appends a 32 - bit integer to our buffer zigzag - encoded and then varint - encoded .
8,357
def append_sint64 ( self , value ) : zigzag_value = wire_format . zig_zag_encode ( value ) self . _stream . append_var_uint64 ( zigzag_value )
Appends a 64 - bit integer to our buffer zigzag - encoded and then varint - encoded .
8,358
def append_sfixed32 ( self , value ) : sign = ( value & 0x80000000 ) and - 1 or 0 if value >> 32 != sign : raise errors . EncodeError ( 'SFixed32 out of range: %d' % value ) self . _stream . append_little_endian32 ( value & 0xffffffff )
Appends a signed 32 - bit integer to our buffer in little - endian byte - order .
8,359
def append_sfixed64 ( self , value ) : sign = ( value & 0x8000000000000000 ) and - 1 or 0 if value >> 64 != sign : raise errors . EncodeError ( 'SFixed64 out of range: %d' % value ) self . _stream . append_little_endian64 ( value & 0xffffffffffffffff )
Appends a signed 64 - bit integer to our buffer in little - endian byte - order .
8,360
def append_string ( self , value ) : self . _stream . append_var_uint32 ( len ( value ) ) self . _stream . append_raw_bytes ( value )
Appends a length - prefixed string to our buffer with the length varint - encoded .
8,361
def get_SHA1_bin ( word ) : from hashlib import sha1 if PY3 and isinstance ( word , str ) : word = word . encode ( 'utf-8' ) hash_s = sha1 ( ) hash_s . update ( word ) return bin ( int ( hash_s . hexdigest ( ) , 16 ) ) [ 2 : ] . zfill ( 160 )
Return SHA1 hash of any string
8,362
def get_index ( binstr , end_index = 160 ) : res = - 1 try : res = binstr . index ( '1' ) + 1 except ValueError : res = end_index return res
Return the position of the first 1 bit from the left in the word until end_index
8,363
def _estimate ( self , buffer ) : m = self . _bucket_number raw_e = self . _alpha * pow ( m , 2 ) / sum ( [ pow ( 2 , - x ) for x in buffer ] ) if raw_e <= 5 / 2.0 * m : v = buffer . count ( 0 ) if v != 0 : return m * log ( m / float ( v ) , 2 ) else : return raw_e elif raw_e <= 1 / 30.0 * 2 ** 160 : return raw_e else : return - 2 ** 160 * log ( 1 - raw_e / 2.0 ** 160 , 2 )
Return the estimate of the cardinality
8,364
def merge ( self , buffer , other_hyper_log_log ) : for i in range ( len ( buffer ) ) : buffer [ i ] = max ( buffer [ i ] , other_hyper_log_log [ i ] )
Merge the HyperLogLog
8,365
def read_string ( self ) : length = self . _stream . read_var_uint32 ( ) return self . _stream . read_string ( length )
Reads and returns a length - delimited string .
8,366
def _chk_truncate ( self ) : self . tr_size_col = - 1 max_cols = self . max_cols max_rows = self . max_rows if max_cols == 0 or max_rows == 0 : ( w , h ) = get_terminal_size ( ) self . w = w self . h = h if self . max_rows == 0 : dot_row = 1 prompt_row = 1 if self . show_dimensions : show_dimension_rows = 3 n_add_rows = self . header + dot_row + show_dimension_rows + prompt_row max_rows_adj = self . h - n_add_rows self . max_rows_adj = max_rows_adj if max_cols == 0 and len ( self . frame . columns ) > w : max_cols = w if max_rows == 0 and len ( self . frame ) > h : max_rows = h if not hasattr ( self , 'max_rows_adj' ) : self . max_rows_adj = max_rows if not hasattr ( self , 'max_cols_adj' ) : self . max_cols_adj = max_cols max_cols_adj = self . max_cols_adj max_rows_adj = self . max_rows_adj truncate_h = max_cols_adj and ( len ( self . columns ) > max_cols_adj ) truncate_v = max_rows_adj and ( len ( self . frame ) > max_rows_adj ) frame = self . frame if truncate_h : if max_cols_adj == 0 : col_num = len ( frame . columns ) elif max_cols_adj == 1 : frame = frame [ : , : max_cols ] col_num = max_cols else : col_num = ( max_cols_adj // 2 ) frame = frame [ : , : col_num ] . concat ( frame [ : , - col_num : ] , axis = 1 ) self . tr_col_num = col_num if truncate_v : if max_rows_adj == 0 : row_num = len ( frame ) if max_rows_adj == 1 : row_num = max_rows frame = frame [ : max_rows , : ] else : row_num = max_rows_adj // 2 frame = frame [ : row_num , : ] . concat ( frame [ - row_num : , : ] ) self . tr_row_num = row_num self . tr_frame = frame self . truncate_h = truncate_h self . truncate_v = truncate_v self . is_truncated = self . truncate_h or self . truncate_v
Checks whether the frame should be truncated . If so slices the frame up .
8,367
def build_input_table ( cls , name = 'inputTableName' , input_name = 'input' ) : obj = cls ( name ) obj . exporter = 'get_input_table_name' obj . input_name = input_name return obj
Build an input table parameter
8,368
def build_input_partitions ( cls , name = 'inputTablePartitions' , input_name = 'input' ) : obj = cls ( name ) obj . exporter = 'get_input_partitions' obj . input_name = input_name return obj
Build an input table partition parameter
8,369
def build_output_table ( cls , name = 'inputTableName' , output_name = 'output' ) : obj = cls ( name ) obj . exporter = 'get_output_table_name' obj . output_name = output_name return obj
Build an output table parameter
8,370
def build_output_partitions ( cls , name = 'inputTablePartitions' , output_name = 'output' ) : obj = cls ( name ) obj . exporter = 'get_output_table_partition' obj . output_name = output_name return obj
Build an output table partition parameter
8,371
def build_model_name ( cls , name = 'modelName' , output_name = 'output' ) : obj = cls ( name ) obj . exporter = 'generate_model_name' obj . output_name = output_name return obj
Build an output model name parameter .
8,372
def build_data_input ( cls , name = 'input' ) : return cls ( name , PortDirection . INPUT , type = PortType . DATA )
Build a data input port .
8,373
def build_data_output ( cls , name = 'output' , copy_input = None , schema = None ) : return cls ( name , PortDirection . OUTPUT , type = PortType . DATA , copy_input = copy_input , schema = schema )
Build a data output port .
8,374
def build_model_input ( cls , name = 'input' ) : return cls ( name , PortDirection . INPUT , type = PortType . MODEL )
Build a model input port .
8,375
def build_model_output ( cls , name = 'output' ) : return cls ( name , PortDirection . OUTPUT , type = PortType . MODEL )
Build a model output port .
8,376
def add_port ( self , port ) : self . ports . append ( port ) if port . io_type not in self . port_seqs : self . port_seqs [ port . io_type ] = 0 self . port_seqs [ port . io_type ] += 1 port . sequence = self . port_seqs [ port . io_type ] return self
Add a port object to the definition
8,377
def add_meta ( self , name , value ) : for mt in self . metas : if mt . name == name : mt . value = value return self self . metas . append ( MetaDef ( name , value ) ) return self
Add a pair of meta data to the definition
8,378
def serialize ( self ) : for keys , groups in groupby ( self . ports , lambda x : x . io_type ) : for seq , port in enumerate ( groups ) : port . sequence = seq return super ( AlgorithmDef , self ) . serialize ( )
Serialize the algorithm definition
8,379
def _count ( expr , pat , flags = 0 ) : return _string_op ( expr , Count , output_type = types . int64 , _pat = pat , _flags = flags )
Count occurrences of pattern in each string of the sequence or scalar
8,380
def _extract ( expr , pat , flags = 0 , group = 0 ) : return _string_op ( expr , Extract , _pat = pat , _flags = flags , _group = group )
Find group in each string in the Series using passed regular expression .
8,381
def _pad ( expr , width , side = 'left' , fillchar = ' ' ) : if not isinstance ( fillchar , six . string_types ) : msg = 'fillchar must be a character, not {0}' raise TypeError ( msg . format ( type ( fillchar ) . __name__ ) ) if len ( fillchar ) != 1 : raise TypeError ( 'fillchar must be a character, not str' ) if side not in ( 'left' , 'right' , 'both' ) : raise ValueError ( 'Invalid side' ) return _string_op ( expr , Pad , _width = width , _side = side , _fillchar = fillchar )
Pad strings in the sequence or scalar with an additional character to specified side .
8,382
def _slice ( expr , start = None , stop = None , step = None ) : return _string_op ( expr , Slice , _start = start , _end = stop , _step = step )
Slice substrings from each element in the sequence or scalar
8,383
def _strptime ( expr , date_format ) : return _string_op ( expr , Strptime , _date_format = date_format , output_type = types . datetime )
Return datetimes specified by date_format which supports the same string format as the python standard library . Details of the string format can be found in python string format doc
8,384
def _list_tables_model ( self , prefix = '' , project = None ) : tset = set ( ) if prefix . startswith ( TEMP_TABLE_PREFIX ) : prefix = TEMP_TABLE_MODEL_PREFIX + prefix [ len ( TEMP_TABLE_PREFIX ) : ] it = self . list_tables ( project = project , prefix = prefix ) else : it = self . list_tables ( project = project , prefix = TABLE_MODEL_PREFIX + prefix ) if TEMP_TABLE_PREFIX . startswith ( prefix ) : new_iter = self . list_tables ( project = project , prefix = TEMP_TABLE_MODEL_PREFIX ) it = itertools . chain ( it , new_iter ) for table in it : if TABLE_MODEL_SEPARATOR not in table . name : continue if not table . name . startswith ( TEMP_TABLE_MODEL_PREFIX ) and not table . name . startswith ( TABLE_MODEL_PREFIX ) : continue model_name , _ = table . name . rsplit ( TABLE_MODEL_SEPARATOR , 1 ) if model_name . startswith ( TEMP_TABLE_MODEL_PREFIX ) : model_name = TEMP_TABLE_PREFIX + model_name [ len ( TEMP_TABLE_MODEL_PREFIX ) : ] else : model_name = model_name [ len ( TABLE_MODEL_PREFIX ) : ] if model_name not in tset : tset . add ( model_name ) yield TablesModelObject ( _odps = self , name = model_name , project = project )
List all TablesModel in the given project .
8,385
def open_reader ( self , file_name , reopen = False , endpoint = None , start = None , length = None , ** kwargs ) : tunnel = self . _create_volume_tunnel ( endpoint = endpoint ) download_id = self . _download_id if not reopen else None download_session = tunnel . create_download_session ( volume = self . volume . name , partition_spec = self . name , file_name = file_name , download_id = download_id , ** kwargs ) self . _download_id = download_session . id open_args = { } if start is not None : open_args [ 'start' ] = start if length is not None : open_args [ 'length' ] = length return download_session . open ( ** open_args )
Open a volume file for read . A file - like object will be returned which can be used to read contents from volume files .
8,386
def open_writer ( self , reopen = False , endpoint = None , ** kwargs ) : tunnel = self . _create_volume_tunnel ( endpoint = endpoint ) upload_id = self . _upload_id if not reopen else None upload_session = tunnel . create_upload_session ( volume = self . volume . name , partition_spec = self . name , upload_id = upload_id , ** kwargs ) file_dict = dict ( ) class FilesWriter ( object ) : @ property def status ( self ) : return upload_session . status @ staticmethod def open ( file_name , ** kwargs ) : if file_name in file_dict : return file_dict [ file_name ] writer = upload_session . open ( file_name , ** kwargs ) file_dict [ file_name ] = writer return writer @ staticmethod def write ( file_name , buf , ** kwargs ) : writer = FilesWriter . open ( file_name , ** kwargs ) writer . write ( buf ) @ staticmethod def close ( ) : for w in six . itervalues ( file_dict ) : w . close ( ) upload_session . commit ( list ( six . iterkeys ( file_dict ) ) ) def __enter__ ( self ) : return self def __exit__ ( self , exc_type , exc_val , exc_tb ) : self . close ( ) return FilesWriter ( )
Open a volume partition to write to . You can use open method to open a file inside the volume and write to it or use write method to write to specific files .
8,387
def batch_persist ( dfs , tables , * args , ** kwargs ) : from . delay import Delay if 'async' in kwargs : kwargs [ 'async_' ] = kwargs [ 'async' ] execute_keys = ( 'ui' , 'async_' , 'n_parallel' , 'timeout' , 'close_and_notify' ) execute_kw = dict ( ( k , v ) for k , v in six . iteritems ( kwargs ) if k in execute_keys ) persist_kw = dict ( ( k , v ) for k , v in six . iteritems ( kwargs ) if k not in execute_keys ) delay = Delay ( ) persist_kw [ 'delay' ] = delay for df , table in izip ( dfs , tables ) : if isinstance ( table , tuple ) : table , partition = table else : partition = None df . persist ( table , partition = partition , * args , ** persist_kw ) return delay . execute ( ** execute_kw )
Persist multiple DataFrames into ODPS .
8,388
def _repr_fits_horizontal_ ( self , ignore_width = False ) : width , height = get_console_size ( ) max_columns = options . display . max_columns nb_columns = len ( self . columns ) if ( ( max_columns and nb_columns > max_columns ) or ( ( not ignore_width ) and width and nb_columns > ( width // 2 ) ) ) : return False if ( ignore_width or not in_interactive_session ( ) ) : return True if ( options . display . width is not None or in_ipython_frontend ( ) ) : max_rows = 1 else : max_rows = options . display . max_rows buf = six . StringIO ( ) d = self if not ( max_rows is None ) : d = d [ : min ( max_rows , len ( d ) ) ] else : return True d . to_string ( buf = buf ) value = buf . getvalue ( ) repr_width = max ( [ len ( l ) for l in value . split ( '\n' ) ] ) return repr_width < width
Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns . In case off non - interactive session no boundaries apply .
8,389
def _repr_html_ ( self ) : if self . _pandas and options . display . notebook_repr_widget : from . . import DataFrame from . . ui import show_df_widget show_df_widget ( DataFrame ( self . _values , schema = self . schema ) ) if self . _pandas : return self . _values . _repr_html_ ( ) if in_qtconsole ( ) : return None if options . display . notebook_repr_html : max_rows = options . display . max_rows max_cols = options . display . max_columns show_dimensions = options . display . show_dimensions return self . to_html ( max_rows = max_rows , max_cols = max_cols , show_dimensions = show_dimensions , notebook = True ) else : return None
Return a html representation for a particular DataFrame . Mainly for IPython notebook .
8,390
def to_html ( self , buf = None , columns = None , col_space = None , header = True , index = True , na_rep = 'NaN' , formatters = None , float_format = None , sparsify = None , index_names = True , justify = None , bold_rows = True , classes = None , escape = True , max_rows = None , max_cols = None , show_dimensions = False , notebook = False ) : formatter = fmt . ResultFrameFormatter ( self , buf = buf , columns = columns , col_space = col_space , na_rep = na_rep , formatters = formatters , float_format = float_format , sparsify = sparsify , justify = justify , index_names = index_names , header = header , index = index , bold_rows = bold_rows , escape = escape , max_rows = max_rows , max_cols = max_cols , show_dimensions = show_dimensions ) formatter . to_html ( classes = classes , notebook = notebook ) if buf is None : return formatter . buf . getvalue ( )
Render a DataFrame as an HTML table .
8,391
def get_localzone ( ) : global _cache_tz if _cache_tz is None : _cache_tz = pytz . timezone ( get_localzone_name ( ) ) utils . assert_tz_offset ( _cache_tz ) return _cache_tz
Returns the zoneinfo - based tzinfo object that matches the Windows - configured timezone .
8,392
def reload_localzone ( ) : global _cache_tz _cache_tz = pytz . timezone ( get_localzone_name ( ) ) utils . assert_tz_offset ( _cache_tz ) return _cache_tz
Reload the cached localzone . You need to call this if the timezone has changed .
8,393
def update ( self , async_ = False , ** kw ) : async_ = kw . get ( 'async' , async_ ) headers = { 'Content-Type' : 'application/xml' } new_kw = dict ( ) if self . offline_model_name : upload_keys = ( '_parent' , 'name' , 'offline_model_name' , 'offline_model_project' , 'qos' , 'instance_num' ) else : upload_keys = ( '_parent' , 'name' , 'qos' , '_model_resource' , 'instance_num' , 'predictor' , 'runtime' ) for k in upload_keys : new_kw [ k ] = getattr ( self , k ) new_kw . update ( kw ) obj = type ( self ) ( version = '0' , ** new_kw ) data = obj . serialize ( ) self . _client . put ( self . resource ( ) , data , headers = headers ) self . reload ( ) if not async_ : self . wait_for_service ( )
Update online model parameters to server .
8,394
def wait_for_service ( self , interval = 1 ) : while self . status in ( OnlineModel . Status . DEPLOYING , OnlineModel . Status . UPDATING ) : time . sleep ( interval ) if self . status == OnlineModel . Status . DEPLOY_FAILED : raise OnlineModelError ( self . last_fail_msg , self ) elif self . status != OnlineModel . Status . SERVING : raise OnlineModelError ( 'Unexpected status occurs: %s' % self . status . value , self )
Wait for the online model to be ready for service .
8,395
def wait_for_deletion ( self , interval = 1 ) : deleted = False while True : try : if self . status != OnlineModel . Status . DELETING : break except errors . NoSuchObject : deleted = True break time . sleep ( interval ) if not deleted : if self . status == OnlineModel . Status . DELETE_FAILED : raise OnlineModelError ( self . last_fail_msg , self ) else : raise OnlineModelError ( 'Unexpected status occurs: %s' % self . status . value , self )
Wait for the online model to be deleted .
8,396
def predict ( self , data , schema = None , endpoint = None ) : from . . import Projects if endpoint is not None : self . _endpoint = endpoint if self . _predict_rest is None : self . _predict_rest = RestClient ( self . _client . account , self . _endpoint , proxy = options . data_proxy ) json_data = json . dumps ( self . _build_predict_request ( data , schema ) ) headers = { 'Content-Type' : 'application/json' } predict_model = Projects ( client = self . _predict_rest ) [ self . project . name ] . online_models [ self . name ] resp = self . _predict_rest . post ( predict_model . resource ( ) , json_data , headers = headers ) if not self . _client . is_ok ( resp ) : e = errors . ODPSError . parse ( resp ) raise e return ModelPredictResults . parse ( resp ) . outputs
Predict data labels with current online model .
8,397
def move ( self , new_path , replication = None ) : if not new_path . startswith ( '/' ) : new_path = self . _normpath ( self . dirname + '/' + new_path ) else : new_path = self . _normpath ( new_path ) if new_path == self . path : raise ValueError ( 'New path should be different from the original one.' ) update_def = self . UpdateRequestXML ( path = new_path ) if replication : update_def . replication = replication headers = { 'Content-Type' : 'application/xml' , 'x-odps-volume-fs-path' : self . path , } self . _client . put ( self . parent . resource ( ) , params = { 'meta' : '' } , headers = headers , data = update_def . serialize ( ) ) self . _del_cache ( self . path ) self . path = new_path self . reload ( )
Move current path to a new location .
8,398
def get_default_runner ( udf_class , input_col_delim = ',' , null_indicator = 'NULL' , stdin = None ) : proto = udf . get_annotation ( udf_class ) in_types , out_types = parse_proto ( proto ) stdin = stdin or sys . stdin arg_parser = ArgParser ( in_types , stdin , input_col_delim , null_indicator ) stdin_feed = make_feed ( arg_parser ) collector = StdoutCollector ( out_types ) ctor = _get_runner_class ( udf_class ) return ctor ( udf_class , stdin_feed , collector )
Create a default runner with specified udf class .
8,399
def get_system_offset ( ) : import time if time . daylight and time . localtime ( ) . tm_isdst > 0 : return - time . altzone else : return - time . timezone
Get system s timezone offset using built - in library time .