idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
8,300
def available ( ) : proc = popen_multiple ( COMMANDS , [ '-version' ] , stdout = subprocess . PIPE , stderr = subprocess . PIPE , creationflags = PROC_FLAGS , ) proc . wait ( ) return ( proc . returncode == 0 )
Detect if the FFmpeg backend can be used on this system .
8,301
def read_data ( self , timeout = 10.0 ) : start_time = time . time ( ) while True : data = None try : data = self . stdout_reader . queue . get ( timeout = timeout ) if data : yield data else : break except queue . Empty : end_time = time . time ( ) if not data : if end_time - start_time >= timeout : raise ReadTimeoutE...
Read blocks of raw PCM data from the file .
8,302
def _get_info ( self ) : out_parts = [ ] while True : line = self . proc . stderr . readline ( ) if not line : raise CommunicationError ( "stream info not found" ) if isinstance ( line , bytes ) : line = line . decode ( 'utf8' , 'ignore' ) line = line . strip ( ) . lower ( ) if 'no such file' in line : raise IOError ( ...
Reads the tool s output from its stderr stream extracts the relevant information and parses it .
8,303
def _parse_info ( self , s ) : match = re . search ( r'(\d+) hz' , s ) if match : self . samplerate = int ( match . group ( 1 ) ) else : self . samplerate = 0 match = re . search ( r'hz, ([^,]+),' , s ) if match : mode = match . group ( 1 ) if mode == 'stereo' : self . channels = 2 else : cmatch = re . match ( r'(\d+)\...
Given relevant data from the ffmpeg output set audio parameter fields on this object .
8,304
def close ( self ) : if hasattr ( self , 'proc' ) : self . proc . poll ( ) if self . proc . returncode is None : self . proc . kill ( ) self . proc . wait ( ) if hasattr ( self , 'stderr_reader' ) : self . stderr_reader . join ( ) if hasattr ( self , 'stdout_reader' ) : self . stdout_reader . join ( ) self . proc . std...
Close the ffmpeg process used to perform the decoding .
8,305
def read_blocks ( self , block_size = 4096 ) : while True : out = self . mf . read ( block_size ) if not out : break yield bytes ( out )
Generates buffers containing PCM data for the audio file .
8,306
def channels ( self ) : if self . mf . mode ( ) == mad . MODE_SINGLE_CHANNEL : return 1 elif self . mf . mode ( ) in ( mad . MODE_DUAL_CHANNEL , mad . MODE_JOINT_STEREO , mad . MODE_STEREO ) : return 2 else : return 2
The number of channels .
8,307
def multi_char_literal ( chars ) : num = 0 for index , char in enumerate ( chars ) : shift = ( len ( chars ) - index - 1 ) * 8 num |= ord ( char ) << shift return num
Emulates character integer literals in C . Given a string abc returns the value of the C single - quoted literal abc .
8,308
def _open_url ( cls , url ) : file_obj = ctypes . c_void_p ( ) check ( _coreaudio . ExtAudioFileOpenURL ( url . _obj , ctypes . byref ( file_obj ) ) ) return file_obj
Given a CFURL Python object return an opened ExtAudioFileRef .
8,309
def set_client_format ( self , desc ) : assert desc . mFormatID == AUDIO_ID_PCM check ( _coreaudio . ExtAudioFileSetProperty ( self . _obj , PROP_CLIENT_DATA_FORMAT , ctypes . sizeof ( desc ) , ctypes . byref ( desc ) ) ) self . _client_fmt = desc
Get the client format description . This describes the encoding of the data that the program will read from this object .
8,310
def get_file_format ( self ) : if self . _file_fmt is not None : return self . _file_fmt desc = AudioStreamBasicDescription ( ) size = ctypes . c_int ( ctypes . sizeof ( desc ) ) check ( _coreaudio . ExtAudioFileGetProperty ( self . _obj , PROP_FILE_DATA_FORMAT , ctypes . byref ( size ) , ctypes . byref ( desc ) ) ) se...
Get the file format description . This describes the type of data stored on disk .
8,311
def nframes ( self ) : length = ctypes . c_long ( ) size = ctypes . c_int ( ctypes . sizeof ( length ) ) check ( _coreaudio . ExtAudioFileGetProperty ( self . _obj , PROP_LENGTH , ctypes . byref ( size ) , ctypes . byref ( length ) ) ) return length . value
Gets the number of frames in the source file .
8,312
def setup ( self , bitdepth = 16 ) : fmt = self . get_file_format ( ) newfmt = copy . copy ( fmt ) newfmt . mFormatID = AUDIO_ID_PCM newfmt . mFormatFlags = PCM_IS_SIGNED_INT | PCM_IS_PACKED newfmt . mBitsPerChannel = bitdepth newfmt . mBytesPerPacket = ( fmt . mChannelsPerFrame * newfmt . mBitsPerChannel // 8 ) newfmt...
Set the client format parameters specifying the desired PCM audio data format to be read from the file . Must be called before reading from the file .
8,313
def read_data ( self , blocksize = 4096 ) : frames = ctypes . c_uint ( blocksize // self . _client_fmt . mBytesPerFrame ) buf = ctypes . create_string_buffer ( blocksize ) buflist = AudioBufferList ( ) buflist . mNumberBuffers = 1 buflist . mBuffers [ 0 ] . mNumberChannels = self . _client_fmt . mChannelsPerFrame bufli...
Generates byte strings reflecting the audio data in the file .
8,314
def close ( self ) : if not self . closed : check ( _coreaudio . ExtAudioFileDispose ( self . _obj ) ) self . closed = True
Close the audio file and free associated memory .
8,315
def mount ( self , url , app ) : "Mount a sub-app at the url of current app." app . url = url self . mounts . append ( app )
Mount a sub - app at the url of current app .
8,316
def _map ( expr , func , rtype = None , resources = None , args = ( ) , ** kwargs ) : name = None if isinstance ( func , FunctionWrapper ) : if func . output_names : if len ( func . output_names ) > 1 : raise ValueError ( 'Map column has more than one name' ) name = func . output_names [ 0 ] if func . output_types : rt...
Call func on each element of this sequence .
8,317
def _hash ( expr , func = None ) : if func is None : func = lambda x : hash ( x ) return _map ( expr , func = func , rtype = types . int64 )
Calculate the hash value .
8,318
def _isnull ( expr ) : if isinstance ( expr , SequenceExpr ) : return IsNull ( _input = expr , _data_type = types . boolean ) elif isinstance ( expr , Scalar ) : return IsNull ( _input = expr , _value_type = types . boolean )
Return a sequence or scalar according to the input indicating if the values are null .
8,319
def _notnull ( expr ) : if isinstance ( expr , SequenceExpr ) : return NotNull ( _input = expr , _data_type = types . boolean ) elif isinstance ( expr , Scalar ) : return NotNull ( _input = expr , _value_type = types . boolean )
Return a sequence or scalar according to the input indicating if the values are not null .
8,320
def _fillna ( expr , value ) : if isinstance ( expr , SequenceExpr ) : return FillNa ( _input = expr , _fill_value = value , _data_type = expr . dtype ) elif isinstance ( expr , Scalar ) : return FillNa ( _input = expr , _fill_value = value , _value_type = expr . dtype )
Fill null with value .
8,321
def _isin ( expr , values ) : from . merge import _make_different_sources if isinstance ( values , SequenceExpr ) : expr , values = _make_different_sources ( expr , values ) if isinstance ( expr , SequenceExpr ) : return IsIn ( _input = expr , _values = values , _data_type = types . boolean ) elif isinstance ( expr , S...
Return a boolean sequence or scalar showing whether each element is exactly contained in the passed values .
8,322
def _notin ( expr , values ) : if isinstance ( expr , SequenceExpr ) : return NotIn ( _input = expr , _values = values , _data_type = types . boolean ) elif isinstance ( expr , Scalar ) : return NotIn ( _input = expr , _values = values , _value_type = types . boolean )
Return a boolean sequence or scalar showing whether each element is not contained in the passed values .
8,323
def _between ( expr , left , right , inclusive = True ) : if isinstance ( expr , SequenceExpr ) : return Between ( _input = expr , _left = left , _right = right , _inclusive = inclusive , _data_type = types . boolean ) elif isinstance ( expr , Scalar ) : return Between ( _input = expr , _left = left , _right = right , ...
Return a boolean sequence or scalar show whether each element is between left and right .
8,324
def _ifelse ( expr , true_expr , false_expr ) : tps = ( SequenceExpr , Scalar ) if not isinstance ( true_expr , tps ) : true_expr = Scalar ( _value = true_expr ) if not isinstance ( false_expr , tps ) : false_expr = Scalar ( _value = false_expr ) output_type = utils . highest_precedence_data_type ( * [ true_expr . dtyp...
Given a boolean sequence or scalar if true will return the left else return the right one .
8,325
def _switch ( expr , * args , ** kw ) : default = _scalar ( kw . get ( 'default' ) ) if len ( args ) <= 0 : raise errors . ExpressionError ( 'Switch must accept more than one condition' ) if all ( isinstance ( arg , tuple ) and len ( arg ) == 2 for arg in args ) : conditions , thens = zip ( * args ) else : conditions =...
Similar to the case - when in SQL . Refer to the example below
8,326
def _cut ( expr , bins , right = True , labels = None , include_lowest = False , include_under = False , include_over = False ) : is_seq = isinstance ( expr , SequenceExpr ) dtype = utils . highest_precedence_data_type ( * ( types . validate_value_type ( it ) for it in labels ) ) if labels is not None else types . int6...
Return indices of half - open bins to which each value of expr belongs .
8,327
def _int_to_datetime ( expr ) : if isinstance ( expr , SequenceExpr ) : return IntToDatetime ( _input = expr , _data_type = types . datetime ) elif isinstance ( expr , Scalar ) : return IntToDatetime ( _input = expr , _value_type = types . datetime )
Return a sequence or scalar that is the datetime value of the current numeric sequence or scalar .
8,328
def get_ddl ( self , with_comments = True , if_not_exists = False ) : shard_num = self . shard . shard_num if self . shard is not None else None return self . gen_create_table_sql ( self . name , self . schema , self . comment if with_comments else None , if_not_exists = if_not_exists , with_column_comments = with_comm...
Get DDL SQL statement for the given table .
8,329
def head ( self , limit , partition = None , columns = None ) : if limit <= 0 : raise ValueError ( 'limit number should >= 0.' ) params = { 'data' : '' , 'linenum' : limit } if partition is not None : if not isinstance ( partition , odps_types . PartitionSpec ) : partition = odps_types . PartitionSpec ( partition ) par...
Get the head records of a table or its partition .
8,330
def open_reader ( self , partition = None , ** kw ) : from . . tunnel . tabletunnel import TableDownloadSession reopen = kw . pop ( 'reopen' , False ) endpoint = kw . pop ( 'endpoint' , None ) download_id = kw . pop ( 'download_id' , None ) tunnel = self . _create_table_tunnel ( endpoint = endpoint ) if download_id is ...
Open the reader to read the entire records from this table or its partition .
8,331
def create_partition ( self , partition_spec , if_not_exists = False , async_ = False , ** kw ) : async_ = kw . get ( 'async' , async_ ) return self . partitions . create ( partition_spec , if_not_exists = if_not_exists , async_ = async_ )
Create a partition within the table .
8,332
def delete_partition ( self , partition_spec , if_exists = False , async_ = False , ** kw ) : async_ = kw . get ( 'async' , async_ ) return self . partitions . delete ( partition_spec , if_exists = if_exists , async_ = async_ )
Delete a partition within the table .
8,333
def exist_partitions ( self , prefix_spec = None ) : try : next ( self . partitions . iterate_partitions ( spec = prefix_spec ) ) except StopIteration : return False return True
Check if partitions with provided conditions exist .
8,334
def truncate ( self , async_ = False , ** kw ) : from . tasks import SQLTask async_ = kw . get ( 'async' , async_ ) task = SQLTask ( name = 'SQLTruncateTableTask' , query = 'truncate table %s.%s' % ( self . project . name , self . name ) ) instance = self . project . parent [ self . _client . project ] . instances . cr...
truncate this table .
8,335
def drop ( self , async_ = False , if_exists = False , ** kw ) : async_ = kw . get ( 'async' , async_ ) return self . parent . delete ( self , async_ = async_ , if_exists = if_exists )
Drop this table .
8,336
def _strftime ( expr , date_format ) : return datetime_op ( expr , Strftime , output_type = types . string , _date_format = date_format )
Return formatted strings specified by date_format which supports the same string format as the python standard library . Details of the string format can be found in python string format doc
8,337
def sort_values ( expr , by , ascending = True ) : if not isinstance ( by , list ) : by = [ by , ] by = [ it ( expr ) if inspect . isfunction ( it ) else it for it in by ] return SortedCollectionExpr ( expr , _sorted_fields = by , _ascending = ascending , _schema = expr . _schema )
Sort the collection by values . sort is an alias name for sort_values
8,338
def distinct ( expr , on = None , * ons ) : on = on or list ( ) if not isinstance ( on , list ) : on = [ on , ] on = on + list ( ons ) on = [ it ( expr ) if inspect . isfunction ( it ) else it for it in on ] return DistinctCollectionExpr ( expr , _unique_fields = on , _all = ( len ( on ) == 0 ) )
Get collection with duplicate rows removed optionally only considering certain columns
8,339
def reshuffle ( expr , by = None , sort = None , ascending = True ) : by = by or RandomScalar ( ) grouped = expr . groupby ( by ) if sort : grouped = grouped . sort_values ( sort , ascending = ascending ) return ReshuffledCollectionExpr ( _input = grouped , _schema = expr . _schema )
Reshuffle data .
8,340
def std_scale ( expr , columns = None , with_means = True , with_std = True , preserve = False , suffix = '_scaled' , group = None ) : time_suffix = str ( int ( time . time ( ) ) ) def calc_agg ( expr , col ) : return [ getattr ( expr , col ) . mean ( ) . rename ( col + '_mean_' + time_suffix ) , getattr ( expr , col )...
Resize a data frame by mean and standard error .
8,341
def extract_kv ( expr , columns = None , kv_delim = ':' , item_delim = ',' , dtype = 'float' , fill_value = None ) : if columns is None : columns = [ expr . _get_field ( c ) for c in expr . schema . names ] intact_cols = [ ] else : columns = [ expr . _get_field ( c ) for c in utils . to_list ( columns ) ] name_set = se...
Extract values in key - value represented columns into standalone columns . New column names will be the name of the key - value column followed by an underscore and the key .
8,342
def to_kv ( expr , columns = None , kv_delim = ':' , item_delim = ',' , kv_name = 'kv_col' ) : if columns is None : columns = [ expr . _get_field ( c ) for c in expr . schema . names ] intact_cols = [ ] else : columns = [ expr . _get_field ( c ) for c in utils . to_list ( columns ) ] name_set = set ( [ c . name for c i...
Merge values in specified columns into a key - value represented column .
8,343
def dropna ( expr , how = 'any' , thresh = None , subset = None ) : if subset is None : subset = [ expr . _get_field ( c ) for c in expr . schema . names ] else : subset = [ expr . _get_field ( c ) for c in utils . to_list ( subset ) ] if not subset : raise ValueError ( 'Illegal subset is provided.' ) if thresh is None...
Return object with labels on given axis omitted where alternately any or all of the data are missing
8,344
def append_id ( expr , id_col = 'append_id' ) : if hasattr ( expr , '_xflow_append_id' ) : return expr . _xflow_append_id ( id_col ) else : return _append_id ( expr , id_col )
Append an ID column to current column to form a new DataFrame .
8,345
def split ( expr , frac , seed = None ) : if hasattr ( expr , '_xflow_split' ) : return expr . _xflow_split ( frac , seed = seed ) else : return _split ( expr , frac , seed = seed )
Split the current column into two column objects with certain ratio .
8,346
def applymap ( expr , func , rtype = None , resources = None , columns = None , excludes = None , args = ( ) , ** kwargs ) : if columns is not None and excludes is not None : raise ValueError ( '`columns` and `excludes` cannot be provided at the same time.' ) if not columns : excludes = excludes or [ ] if isinstance ( ...
Call func on each element of this collection .
8,347
def read_string ( self , size ) : if size < 0 : raise errors . DecodeError ( 'Negative size %d' % size ) s = self . _input . read ( size ) if len ( s ) != size : raise errors . DecodeError ( 'String claims to have %d bytes, but read %d' % ( size , len ( s ) ) ) self . _pos += len ( s ) return s
Reads up to size bytes from the stream stopping early only if we reach the end of the stream . Returns the bytes read as a string .
8,348
def read_little_endian32 ( self ) : try : i = struct . unpack ( wire_format . FORMAT_UINT32_LITTLE_ENDIAN , self . _input . read ( 4 ) ) self . _pos += 4 return i [ 0 ] except struct . error as e : raise errors . DecodeError ( e )
Interprets the next 4 bytes of the stream as a little - endian encoded unsiged 32 - bit integer and returns that integer .
8,349
def read_little_endian64 ( self ) : try : i = struct . unpack ( wire_format . FORMAT_UINT64_LITTLE_ENDIAN , self . _input . read ( 8 ) ) self . _pos += 8 return i [ 0 ] except struct . error as e : raise errors . DecodeError ( e )
Interprets the next 8 bytes of the stream as a little - endian encoded unsiged 64 - bit integer and returns that integer .
8,350
def read_varint32 ( self ) : i = self . read_varint64 ( ) if not wire_format . INT32_MIN <= i <= wire_format . INT32_MAX : raise errors . DecodeError ( 'Value out of range for int32: %d' % i ) return int ( i )
Reads a varint from the stream interprets this varint as a signed 32 - bit integer and returns the integer .
8,351
def read_var_uint32 ( self ) : i = self . read_var_uint64 ( ) if i > wire_format . UINT32_MAX : raise errors . DecodeError ( 'Value out of range for uint32: %d' % i ) return i
Reads a varint from the stream interprets this varint as an unsigned 32 - bit integer and returns the integer .
8,352
def read_varint64 ( self ) : i = self . read_var_uint64 ( ) if i > wire_format . INT64_MAX : i -= ( 1 << 64 ) return i
Reads a varint from the stream interprets this varint as a signed 64 - bit integer and returns the integer .
8,353
def read_var_uint64 ( self ) : i = self . _read_varint_helper ( ) if not 0 <= i <= wire_format . UINT64_MAX : raise errors . DecodeError ( 'Value out of range for uint64: %d' % i ) return i
Reads a varint from the stream interprets this varint as an unsigned 64 - bit integer and returns the integer .
8,354
def _read_varint_helper ( self ) : result = 0 shift = 0 while 1 : if shift >= 64 : raise errors . DecodeError ( 'Too many bytes when decoding varint.' ) try : b = ord ( self . _input . read ( 1 ) ) except IndexError : raise errors . DecodeError ( 'Truncated varint.' ) self . _pos += 1 result |= ( ( b & 0x7f ) << shift ...
Helper for the various varint - reading methods above . Reads an unsigned varint - encoded integer from the stream and returns this integer .
8,355
def append_tag ( self , field_number , wire_type ) : self . _stream . append_var_uint32 ( wire_format . pack_tag ( field_number , wire_type ) )
Appends a tag containing field number and wire type information .
8,356
def append_sint32 ( self , value ) : zigzag_value = wire_format . zig_zag_encode ( value ) self . _stream . append_var_uint32 ( zigzag_value )
Appends a 32 - bit integer to our buffer zigzag - encoded and then varint - encoded .
8,357
def append_sint64 ( self , value ) : zigzag_value = wire_format . zig_zag_encode ( value ) self . _stream . append_var_uint64 ( zigzag_value )
Appends a 64 - bit integer to our buffer zigzag - encoded and then varint - encoded .
8,358
def append_sfixed32 ( self , value ) : sign = ( value & 0x80000000 ) and - 1 or 0 if value >> 32 != sign : raise errors . EncodeError ( 'SFixed32 out of range: %d' % value ) self . _stream . append_little_endian32 ( value & 0xffffffff )
Appends a signed 32 - bit integer to our buffer in little - endian byte - order .
8,359
def append_sfixed64 ( self , value ) : sign = ( value & 0x8000000000000000 ) and - 1 or 0 if value >> 64 != sign : raise errors . EncodeError ( 'SFixed64 out of range: %d' % value ) self . _stream . append_little_endian64 ( value & 0xffffffffffffffff )
Appends a signed 64 - bit integer to our buffer in little - endian byte - order .
8,360
def append_string ( self , value ) : self . _stream . append_var_uint32 ( len ( value ) ) self . _stream . append_raw_bytes ( value )
Appends a length - prefixed string to our buffer with the length varint - encoded .
8,361
def get_SHA1_bin ( word ) : from hashlib import sha1 if PY3 and isinstance ( word , str ) : word = word . encode ( 'utf-8' ) hash_s = sha1 ( ) hash_s . update ( word ) return bin ( int ( hash_s . hexdigest ( ) , 16 ) ) [ 2 : ] . zfill ( 160 )
Return SHA1 hash of any string
8,362
def get_index ( binstr , end_index = 160 ) : res = - 1 try : res = binstr . index ( '1' ) + 1 except ValueError : res = end_index return res
Return the position of the first 1 bit from the left in the word until end_index
8,363
def _estimate ( self , buffer ) : m = self . _bucket_number raw_e = self . _alpha * pow ( m , 2 ) / sum ( [ pow ( 2 , - x ) for x in buffer ] ) if raw_e <= 5 / 2.0 * m : v = buffer . count ( 0 ) if v != 0 : return m * log ( m / float ( v ) , 2 ) else : return raw_e elif raw_e <= 1 / 30.0 * 2 ** 160 : return raw_e else ...
Return the estimate of the cardinality
8,364
def merge ( self , buffer , other_hyper_log_log ) : for i in range ( len ( buffer ) ) : buffer [ i ] = max ( buffer [ i ] , other_hyper_log_log [ i ] )
Merge the HyperLogLog
8,365
def read_string ( self ) : length = self . _stream . read_var_uint32 ( ) return self . _stream . read_string ( length )
Reads and returns a length - delimited string .
8,366
def _chk_truncate ( self ) : self . tr_size_col = - 1 max_cols = self . max_cols max_rows = self . max_rows if max_cols == 0 or max_rows == 0 : ( w , h ) = get_terminal_size ( ) self . w = w self . h = h if self . max_rows == 0 : dot_row = 1 prompt_row = 1 if self . show_dimensions : show_dimension_rows = 3 n_add_rows ...
Checks whether the frame should be truncated . If so slices the frame up .
8,367
def build_input_table ( cls , name = 'inputTableName' , input_name = 'input' ) : obj = cls ( name ) obj . exporter = 'get_input_table_name' obj . input_name = input_name return obj
Build an input table parameter
8,368
def build_input_partitions ( cls , name = 'inputTablePartitions' , input_name = 'input' ) : obj = cls ( name ) obj . exporter = 'get_input_partitions' obj . input_name = input_name return obj
Build an input table partition parameter
8,369
def build_output_table ( cls , name = 'inputTableName' , output_name = 'output' ) : obj = cls ( name ) obj . exporter = 'get_output_table_name' obj . output_name = output_name return obj
Build an output table parameter
8,370
def build_output_partitions ( cls , name = 'inputTablePartitions' , output_name = 'output' ) : obj = cls ( name ) obj . exporter = 'get_output_table_partition' obj . output_name = output_name return obj
Build an output table partition parameter
8,371
def build_model_name ( cls , name = 'modelName' , output_name = 'output' ) : obj = cls ( name ) obj . exporter = 'generate_model_name' obj . output_name = output_name return obj
Build an output model name parameter .
8,372
def build_data_input ( cls , name = 'input' ) : return cls ( name , PortDirection . INPUT , type = PortType . DATA )
Build a data input port .
8,373
def build_data_output ( cls , name = 'output' , copy_input = None , schema = None ) : return cls ( name , PortDirection . OUTPUT , type = PortType . DATA , copy_input = copy_input , schema = schema )
Build a data output port .
8,374
def build_model_input ( cls , name = 'input' ) : return cls ( name , PortDirection . INPUT , type = PortType . MODEL )
Build a model input port .
8,375
def build_model_output ( cls , name = 'output' ) : return cls ( name , PortDirection . OUTPUT , type = PortType . MODEL )
Build a model output port .
8,376
def add_port ( self , port ) : self . ports . append ( port ) if port . io_type not in self . port_seqs : self . port_seqs [ port . io_type ] = 0 self . port_seqs [ port . io_type ] += 1 port . sequence = self . port_seqs [ port . io_type ] return self
Add a port object to the definition
8,377
def add_meta ( self , name , value ) : for mt in self . metas : if mt . name == name : mt . value = value return self self . metas . append ( MetaDef ( name , value ) ) return self
Add a pair of meta data to the definition
8,378
def serialize ( self ) : for keys , groups in groupby ( self . ports , lambda x : x . io_type ) : for seq , port in enumerate ( groups ) : port . sequence = seq return super ( AlgorithmDef , self ) . serialize ( )
Serialize the algorithm definition
8,379
def _count ( expr , pat , flags = 0 ) : return _string_op ( expr , Count , output_type = types . int64 , _pat = pat , _flags = flags )
Count occurrences of pattern in each string of the sequence or scalar
8,380
def _extract ( expr , pat , flags = 0 , group = 0 ) : return _string_op ( expr , Extract , _pat = pat , _flags = flags , _group = group )
Find group in each string in the Series using passed regular expression .
8,381
def _pad ( expr , width , side = 'left' , fillchar = ' ' ) : if not isinstance ( fillchar , six . string_types ) : msg = 'fillchar must be a character, not {0}' raise TypeError ( msg . format ( type ( fillchar ) . __name__ ) ) if len ( fillchar ) != 1 : raise TypeError ( 'fillchar must be a character, not str' ) if sid...
Pad strings in the sequence or scalar with an additional character to specified side .
8,382
def _slice ( expr , start = None , stop = None , step = None ) : return _string_op ( expr , Slice , _start = start , _end = stop , _step = step )
Slice substrings from each element in the sequence or scalar
8,383
def _strptime ( expr , date_format ) : return _string_op ( expr , Strptime , _date_format = date_format , output_type = types . datetime )
Return datetimes specified by date_format which supports the same string format as the python standard library . Details of the string format can be found in python string format doc
8,384
def _list_tables_model ( self , prefix = '' , project = None ) : tset = set ( ) if prefix . startswith ( TEMP_TABLE_PREFIX ) : prefix = TEMP_TABLE_MODEL_PREFIX + prefix [ len ( TEMP_TABLE_PREFIX ) : ] it = self . list_tables ( project = project , prefix = prefix ) else : it = self . list_tables ( project = project , pr...
List all TablesModel in the given project .
8,385
def open_reader ( self , file_name , reopen = False , endpoint = None , start = None , length = None , ** kwargs ) : tunnel = self . _create_volume_tunnel ( endpoint = endpoint ) download_id = self . _download_id if not reopen else None download_session = tunnel . create_download_session ( volume = self . volume . name...
Open a volume file for read . A file - like object will be returned which can be used to read contents from volume files .
8,386
def open_writer ( self , reopen = False , endpoint = None , ** kwargs ) : tunnel = self . _create_volume_tunnel ( endpoint = endpoint ) upload_id = self . _upload_id if not reopen else None upload_session = tunnel . create_upload_session ( volume = self . volume . name , partition_spec = self . name , upload_id = uploa...
Open a volume partition to write to . You can use open method to open a file inside the volume and write to it or use write method to write to specific files .
8,387
def batch_persist ( dfs , tables , * args , ** kwargs ) : from . delay import Delay if 'async' in kwargs : kwargs [ 'async_' ] = kwargs [ 'async' ] execute_keys = ( 'ui' , 'async_' , 'n_parallel' , 'timeout' , 'close_and_notify' ) execute_kw = dict ( ( k , v ) for k , v in six . iteritems ( kwargs ) if k in execute_key...
Persist multiple DataFrames into ODPS .
8,388
def _repr_fits_horizontal_ ( self , ignore_width = False ) : width , height = get_console_size ( ) max_columns = options . display . max_columns nb_columns = len ( self . columns ) if ( ( max_columns and nb_columns > max_columns ) or ( ( not ignore_width ) and width and nb_columns > ( width // 2 ) ) ) : return False if...
Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns . In case off non - interactive session no boundaries apply .
8,389
def _repr_html_ ( self ) : if self . _pandas and options . display . notebook_repr_widget : from . . import DataFrame from . . ui import show_df_widget show_df_widget ( DataFrame ( self . _values , schema = self . schema ) ) if self . _pandas : return self . _values . _repr_html_ ( ) if in_qtconsole ( ) : return None i...
Return a html representation for a particular DataFrame . Mainly for IPython notebook .
8,390
def to_html ( self , buf = None , columns = None , col_space = None , header = True , index = True , na_rep = 'NaN' , formatters = None , float_format = None , sparsify = None , index_names = True , justify = None , bold_rows = True , classes = None , escape = True , max_rows = None , max_cols = None , show_dimensions ...
Render a DataFrame as an HTML table .
8,391
def get_localzone ( ) : global _cache_tz if _cache_tz is None : _cache_tz = pytz . timezone ( get_localzone_name ( ) ) utils . assert_tz_offset ( _cache_tz ) return _cache_tz
Returns the zoneinfo - based tzinfo object that matches the Windows - configured timezone .
8,392
def reload_localzone ( ) : global _cache_tz _cache_tz = pytz . timezone ( get_localzone_name ( ) ) utils . assert_tz_offset ( _cache_tz ) return _cache_tz
Reload the cached localzone . You need to call this if the timezone has changed .
8,393
def update ( self , async_ = False , ** kw ) : async_ = kw . get ( 'async' , async_ ) headers = { 'Content-Type' : 'application/xml' } new_kw = dict ( ) if self . offline_model_name : upload_keys = ( '_parent' , 'name' , 'offline_model_name' , 'offline_model_project' , 'qos' , 'instance_num' ) else : upload_keys = ( '_...
Update online model parameters to server .
8,394
def wait_for_service ( self , interval = 1 ) : while self . status in ( OnlineModel . Status . DEPLOYING , OnlineModel . Status . UPDATING ) : time . sleep ( interval ) if self . status == OnlineModel . Status . DEPLOY_FAILED : raise OnlineModelError ( self . last_fail_msg , self ) elif self . status != OnlineModel . S...
Wait for the online model to be ready for service .
8,395
def wait_for_deletion ( self , interval = 1 ) : deleted = False while True : try : if self . status != OnlineModel . Status . DELETING : break except errors . NoSuchObject : deleted = True break time . sleep ( interval ) if not deleted : if self . status == OnlineModel . Status . DELETE_FAILED : raise OnlineModelError ...
Wait for the online model to be deleted .
8,396
def predict ( self , data , schema = None , endpoint = None ) : from . . import Projects if endpoint is not None : self . _endpoint = endpoint if self . _predict_rest is None : self . _predict_rest = RestClient ( self . _client . account , self . _endpoint , proxy = options . data_proxy ) json_data = json . dumps ( sel...
Predict data labels with current online model .
8,397
def move ( self , new_path , replication = None ) : if not new_path . startswith ( '/' ) : new_path = self . _normpath ( self . dirname + '/' + new_path ) else : new_path = self . _normpath ( new_path ) if new_path == self . path : raise ValueError ( 'New path should be different from the original one.' ) update_def = ...
Move current path to a new location .
8,398
def get_default_runner ( udf_class , input_col_delim = ',' , null_indicator = 'NULL' , stdin = None ) : proto = udf . get_annotation ( udf_class ) in_types , out_types = parse_proto ( proto ) stdin = stdin or sys . stdin arg_parser = ArgParser ( in_types , stdin , input_col_delim , null_indicator ) stdin_feed = make_fe...
Create a default runner with specified udf class .
8,399
def get_system_offset ( ) : import time if time . daylight and time . localtime ( ) . tm_isdst > 0 : return - time . altzone else : return - time . timezone
Get system s timezone offset using built - in library time .