idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
14,200
public boolean toInt ( IntWrapper intWrapper ) { if ( numBytes == 0 ) { return false ; } byte b = getByte ( 0 ) ; final boolean negative = b == '-' ; int offset = 0 ; if ( negative || b == '+' ) { offset ++ ; if ( numBytes == 1 ) { return false ; } } final byte separator = '.' ; final int radix = 10 ; final int stopVal...
Parses this UTF8String to int .
14,201
public void registerWithShuffleServer ( String host , int port , String execId , ExecutorShuffleInfo executorInfo ) throws IOException , InterruptedException { checkInit ( ) ; try ( TransportClient client = clientFactory . createUnmanagedClient ( host , port ) ) { ByteBuffer registerMessage = new RegisterExecutor ( app...
Registers this executor with an external shuffle server . This registration is required to inform the shuffle server about where and how we store our shuffle files .
14,202
public long spill ( long size , MemoryConsumer trigger ) throws IOException { if ( trigger != this ) { if ( readingIterator != null ) { return readingIterator . spill ( ) ; } return 0L ; } if ( inMemSorter == null || inMemSorter . numRecords ( ) <= 0 ) { return 0L ; } logger . info ( "Thread {} spilling sort data of {}...
Sort and spill the current records in response to memory pressure .
14,203
private long getMemoryUsage ( ) { long totalPageSize = 0 ; for ( MemoryBlock page : allocatedPages ) { totalPageSize += page . size ( ) ; } return ( ( inMemSorter == null ) ? 0 : inMemSorter . getMemoryUsage ( ) ) + totalPageSize ; }
Return the total memory usage of this sorter including the data pages and the sorter s pointer array .
14,204
private long freeMemory ( ) { updatePeakMemoryUsed ( ) ; long memoryFreed = 0 ; for ( MemoryBlock block : allocatedPages ) { memoryFreed += block . size ( ) ; freePage ( block ) ; } allocatedPages . clear ( ) ; currentPage = null ; pageCursor = 0 ; return memoryFreed ; }
Free this sorter s data pages .
14,205
private void deleteSpillFiles ( ) { for ( UnsafeSorterSpillWriter spill : spillWriters ) { File file = spill . getFile ( ) ; if ( file != null && file . exists ( ) ) { if ( ! file . delete ( ) ) { logger . error ( "Was unable to delete spill file {}" , file . getAbsolutePath ( ) ) ; } } } }
Deletes any spill files created by this sorter .
14,206
private void growPointerArrayIfNecessary ( ) throws IOException { assert ( inMemSorter != null ) ; if ( ! inMemSorter . hasSpaceForAnotherRecord ( ) ) { long used = inMemSorter . getMemoryUsage ( ) ; LongArray array ; try { array = allocateArray ( used / 8 * 2 ) ; } catch ( TooLargePageException e ) { spill ( ) ; retur...
Checks whether there is enough space to insert an additional record in to the sort pointer array and grows the array if additional space is required . If the required space cannot be obtained then the in - memory data will be spilled to disk .
14,207
private void acquireNewPageIfNecessary ( int required ) { if ( currentPage == null || pageCursor + required > currentPage . getBaseOffset ( ) + currentPage . size ( ) ) { currentPage = allocatePage ( required ) ; pageCursor = currentPage . getBaseOffset ( ) ; allocatedPages . add ( currentPage ) ; } }
Allocates more memory in order to insert an additional record . This will request additional memory from the memory manager and spill if the requested memory can not be obtained .
14,208
public void insertRecord ( Object recordBase , long recordOffset , int length , long prefix , boolean prefixIsNull ) throws IOException { assert ( inMemSorter != null ) ; if ( inMemSorter . numRecords ( ) >= numElementsForSpillThreshold ) { logger . info ( "Spilling data because number of spilledRecords crossed the thr...
Write a record to the sorter .
14,209
public void merge ( UnsafeExternalSorter other ) throws IOException { other . spill ( ) ; spillWriters . addAll ( other . spillWriters ) ; other . spillWriters . clear ( ) ; other . cleanupResources ( ) ; }
Merges another UnsafeExternalSorters into this one the other one will be emptied .
14,210
public UnsafeSorterIterator getIterator ( int startIndex ) throws IOException { if ( spillWriters . isEmpty ( ) ) { assert ( inMemSorter != null ) ; UnsafeSorterIterator iter = inMemSorter . getSortedIterator ( ) ; moveOver ( iter , startIndex ) ; return iter ; } else { LinkedList < UnsafeSorterIterator > queue = new L...
Returns an iterator starts from startIndex which will return the rows in the order as inserted .
14,211
void grow ( int neededSize ) { if ( neededSize < 0 ) { throw new IllegalArgumentException ( "Cannot grow BufferHolder by size " + neededSize + " because the size is negative" ) ; } if ( neededSize > ARRAY_MAX - totalSize ( ) ) { throw new IllegalArgumentException ( "Cannot grow BufferHolder by size " + neededSize + " b...
Grows the buffer by at least neededSize and points the row to the buffer .
14,212
public synchronized byte [ ] firstToken ( ) { if ( saslClient != null && saslClient . hasInitialResponse ( ) ) { try { return saslClient . evaluateChallenge ( new byte [ 0 ] ) ; } catch ( SaslException e ) { throw Throwables . propagate ( e ) ; } } else { return new byte [ 0 ] ; } }
Used to initiate SASL handshake with server .
14,213
public synchronized byte [ ] response ( byte [ ] token ) { try { return saslClient != null ? saslClient . evaluateChallenge ( token ) : new byte [ 0 ] ; } catch ( SaslException e ) { throw Throwables . propagate ( e ) ; } }
Respond to server s SASL token .
14,214
public synchronized void dispose ( ) { if ( saslClient != null ) { try { saslClient . dispose ( ) ; } catch ( SaslException e ) { } finally { saslClient = null ; } } }
Disposes of any system resources or security - sensitive information the SaslClient might be using .
14,215
public void getMetrics ( MetricsCollector collector , boolean all ) { MetricsRecordBuilder metricsRecordBuilder = collector . addRecord ( "sparkShuffleService" ) ; for ( Map . Entry < String , Metric > entry : metricSet . getMetrics ( ) . entrySet ( ) ) { collectMetric ( metricsRecordBuilder , entry . getKey ( ) , entr...
Get metrics from the source
14,216
public static Properties toCryptoConf ( String prefix , Iterable < Map . Entry < String , String > > conf ) { Properties props = new Properties ( ) ; for ( Map . Entry < String , String > e : conf ) { String key = e . getKey ( ) ; if ( key . startsWith ( prefix ) ) { props . setProperty ( COMMONS_CRYPTO_CONFIG_PREFIX +...
Extract the commons - crypto configuration embedded in a list of config values .
14,217
public boolean [ ] getBooleans ( int rowId , int count ) { boolean [ ] res = new boolean [ count ] ; for ( int i = 0 ; i < count ; i ++ ) { res [ i ] = getBoolean ( rowId + i ) ; } return res ; }
Gets boolean type values from [ rowId rowId + count ) . The return values for the null slots are undefined and can be anything .
14,218
public byte [ ] getBytes ( int rowId , int count ) { byte [ ] res = new byte [ count ] ; for ( int i = 0 ; i < count ; i ++ ) { res [ i ] = getByte ( rowId + i ) ; } return res ; }
Gets byte type values from [ rowId rowId + count ) . The return values for the null slots are undefined and can be anything .
14,219
public short [ ] getShorts ( int rowId , int count ) { short [ ] res = new short [ count ] ; for ( int i = 0 ; i < count ; i ++ ) { res [ i ] = getShort ( rowId + i ) ; } return res ; }
Gets short type values from [ rowId rowId + count ) . The return values for the null slots are undefined and can be anything .
14,220
public int [ ] getInts ( int rowId , int count ) { int [ ] res = new int [ count ] ; for ( int i = 0 ; i < count ; i ++ ) { res [ i ] = getInt ( rowId + i ) ; } return res ; }
Gets int type values from [ rowId rowId + count ) . The return values for the null slots are undefined and can be anything .
14,221
public long [ ] getLongs ( int rowId , int count ) { long [ ] res = new long [ count ] ; for ( int i = 0 ; i < count ; i ++ ) { res [ i ] = getLong ( rowId + i ) ; } return res ; }
Gets long type values from [ rowId rowId + count ) . The return values for the null slots are undefined and can be anything .
14,222
public float [ ] getFloats ( int rowId , int count ) { float [ ] res = new float [ count ] ; for ( int i = 0 ; i < count ; i ++ ) { res [ i ] = getFloat ( rowId + i ) ; } return res ; }
Gets float type values from [ rowId rowId + count ) . The return values for the null slots are undefined and can be anything .
14,223
public double [ ] getDoubles ( int rowId , int count ) { double [ ] res = new double [ count ] ; for ( int i = 0 ; i < count ; i ++ ) { res [ i ] = getDouble ( rowId + i ) ; } return res ; }
Gets double type values from [ rowId rowId + count ) . The return values for the null slots are undefined and can be anything .
14,224
public final CalendarInterval getInterval ( int rowId ) { if ( isNullAt ( rowId ) ) return null ; final int months = getChild ( 0 ) . getInt ( rowId ) ; final long microseconds = getChild ( 1 ) . getLong ( rowId ) ; return new CalendarInterval ( months , microseconds ) ; }
Returns the calendar interval type value for rowId . If the slot for rowId is null it should return null .
14,225
public TProcessorFactory getAuthProcFactory ( ThriftCLIService service ) throws LoginException { if ( authTypeStr . equalsIgnoreCase ( AuthTypes . KERBEROS . getAuthName ( ) ) ) { return KerberosSaslHelper . getKerberosProcessorFactory ( saslServer , service ) ; } else { return PlainSaslHelper . getPlainProcessorFactor...
Returns the thrift processor factory for HiveServer2 running in binary mode
14,226
public static void loginFromKeytab ( HiveConf hiveConf ) throws IOException { String principal = hiveConf . getVar ( ConfVars . HIVE_SERVER2_KERBEROS_PRINCIPAL ) ; String keyTabFile = hiveConf . getVar ( ConfVars . HIVE_SERVER2_KERBEROS_KEYTAB ) ; if ( principal . isEmpty ( ) || keyTabFile . isEmpty ( ) ) { throw new I...
Perform kerberos login using the hadoop shim API if the configuration is available
14,227
public static UserGroupInformation loginFromSpnegoKeytabAndReturnUGI ( HiveConf hiveConf ) throws IOException { String principal = hiveConf . getVar ( ConfVars . HIVE_SERVER2_SPNEGO_PRINCIPAL ) ; String keyTabFile = hiveConf . getVar ( ConfVars . HIVE_SERVER2_SPNEGO_KEYTAB ) ; if ( principal . isEmpty ( ) || keyTabFile...
Perform SPNEGO login using the hadoop shim API if the configuration is available
14,228
public String getDelegationToken ( String owner , String renewer ) throws HiveSQLException { if ( saslServer == null ) { throw new HiveSQLException ( "Delegation token only supported over kerberos authentication" , "08S01" ) ; } try { String tokenStr = saslServer . getDelegationTokenWithService ( owner , renewer , HS2_...
retrieve delegation token for the given user
14,229
public void cancelDelegationToken ( String delegationToken ) throws HiveSQLException { if ( saslServer == null ) { throw new HiveSQLException ( "Delegation token only supported over kerberos authentication" , "08S01" ) ; } try { saslServer . cancelDelegationToken ( delegationToken ) ; } catch ( IOException e ) { throw ...
cancel given delegation token
14,230
public static String patternToRegex ( String pattern ) { if ( pattern == null ) { return ".*" ; } else { StringBuilder result = new StringBuilder ( pattern . length ( ) ) ; boolean escaped = false ; for ( int i = 0 , len = pattern . length ( ) ; i < len ; i ++ ) { char c = pattern . charAt ( i ) ; if ( escaped ) { if (...
Convert a SQL search pattern into an equivalent Java Regex .
14,231
public static void ensureCurrentState ( Service . STATE state , Service . STATE expectedState ) { if ( state != expectedState ) { throw new IllegalStateException ( "For this operation, the " + "current service state must be " + expectedState + " instead of " + state ) ; } }
Verify that a service is in a given state .
14,232
public static void init ( Service service , HiveConf configuration ) { Service . STATE state = service . getServiceState ( ) ; ensureCurrentState ( state , Service . STATE . NOTINITED ) ; service . init ( configuration ) ; }
Initialize a service .
14,233
public static void start ( Service service ) { Service . STATE state = service . getServiceState ( ) ; ensureCurrentState ( state , Service . STATE . INITED ) ; service . start ( ) ; }
Start a service .
14,234
public static void deploy ( Service service , HiveConf configuration ) { init ( service , configuration ) ; start ( service ) ; }
Initialize then start a service .
14,235
public static void stop ( Service service ) { if ( service != null ) { Service . STATE state = service . getServiceState ( ) ; if ( state == Service . STATE . STARTED ) { service . stop ( ) ; } } }
Stop a service .
14,236
private boolean next ( ) throws IOException { if ( valuesRead >= endOfPageValueCount ) { if ( valuesRead >= totalValueCount ) { return false ; } readPage ( ) ; } ++ valuesRead ; return definitionLevelColumn . nextInt ( ) == maxDefLevel ; }
Advances to the next value . Returns true if the value is non - null .
14,237
void readBatch ( int total , WritableColumnVector column ) throws IOException { int rowId = 0 ; WritableColumnVector dictionaryIds = null ; if ( dictionary != null ) { dictionaryIds = column . reserveDictionaryIds ( total ) ; } while ( total > 0 ) { int leftInPage = ( int ) ( endOfPageValueCount - valuesRead ) ; if ( l...
Reads total values from this columnReader into column .
14,238
private SchemaColumnConvertNotSupportedException constructConvertNotSupportedException ( ColumnDescriptor descriptor , WritableColumnVector column ) { return new SchemaColumnConvertNotSupportedException ( Arrays . toString ( descriptor . getPath ( ) ) , descriptor . getPrimitiveType ( ) . getPrimitiveTypeName ( ) . toS...
Helper function to construct exception for parquet schema mismatch .
14,239
public static EventLoopGroup createEventLoop ( IOMode mode , int numThreads , String threadPrefix ) { ThreadFactory threadFactory = createThreadFactory ( threadPrefix ) ; switch ( mode ) { case NIO : return new NioEventLoopGroup ( numThreads , threadFactory ) ; case EPOLL : return new EpollEventLoopGroup ( numThreads ,...
Creates a Netty EventLoopGroup based on the IOMode .
14,240
public static Class < ? extends ServerChannel > getServerChannelClass ( IOMode mode ) { switch ( mode ) { case NIO : return NioServerSocketChannel . class ; case EPOLL : return EpollServerSocketChannel . class ; default : throw new IllegalArgumentException ( "Unknown io mode: " + mode ) ; } }
Returns the correct ServerSocketChannel class based on IOMode .
14,241
public static String getRemoteAddress ( Channel channel ) { if ( channel != null && channel . remoteAddress ( ) != null ) { return channel . remoteAddress ( ) . toString ( ) ; } return "<unknown remote>" ; }
Returns the remote address on the channel or &lt ; unknown remote&gt ; if none exists .
14,242
public static int defaultNumThreads ( int numUsableCores ) { final int availableCores ; if ( numUsableCores > 0 ) { availableCores = numUsableCores ; } else { availableCores = Runtime . getRuntime ( ) . availableProcessors ( ) ; } return Math . min ( availableCores , MAX_DEFAULT_NETTY_THREADS ) ; }
Returns the default number of threads for both the Netty client and server thread pools . If numUsableCores is 0 we will use Runtime get an approximate number of available cores .
14,243
public static synchronized PooledByteBufAllocator getSharedPooledByteBufAllocator ( boolean allowDirectBufs , boolean allowCache ) { final int index = allowCache ? 0 : 1 ; if ( _sharedPooledByteBufAllocator [ index ] == null ) { _sharedPooledByteBufAllocator [ index ] = createPooledByteBufAllocator ( allowDirectBufs , ...
Returns the lazily created shared pooled ByteBuf allocator for the specified allowCache parameter value .
14,244
public static PooledByteBufAllocator createPooledByteBufAllocator ( boolean allowDirectBufs , boolean allowCache , int numCores ) { if ( numCores == 0 ) { numCores = Runtime . getRuntime ( ) . availableProcessors ( ) ; } return new PooledByteBufAllocator ( allowDirectBufs && PlatformDependent . directBufferPreferred ( ...
Create a pooled ByteBuf allocator but disables the thread - local cache . Thread - local caches are disabled for TransportClients because the ByteBufs are allocated by the event loop thread but released by the executor thread rather than the event loop thread . Those thread - local caches actually delay the recycling o...
14,245
public static String createCookieToken ( String clientUserName ) { StringBuffer sb = new StringBuffer ( ) ; sb . append ( COOKIE_CLIENT_USER_NAME ) . append ( COOKIE_KEY_VALUE_SEPARATOR ) . append ( clientUserName ) . append ( COOKIE_ATTR_SEPARATOR ) ; sb . append ( COOKIE_CLIENT_RAND_NUMBER ) . append ( COOKIE_KEY_VAL...
Creates and returns a HS2 cookie token .
14,246
public static String getUserNameFromCookieToken ( String tokenStr ) { Map < String , String > map = splitCookieToken ( tokenStr ) ; if ( ! map . keySet ( ) . equals ( COOKIE_ATTRIBUTES ) ) { LOG . error ( "Invalid token with missing attributes " + tokenStr ) ; return null ; } return map . get ( COOKIE_CLIENT_USER_NAME ...
Parses a cookie token to retrieve client user name .
14,247
private static Map < String , String > splitCookieToken ( String tokenStr ) { Map < String , String > map = new HashMap < String , String > ( ) ; StringTokenizer st = new StringTokenizer ( tokenStr , COOKIE_ATTR_SEPARATOR ) ; while ( st . hasMoreTokens ( ) ) { String part = st . nextToken ( ) ; int separator = part . i...
Splits the cookie token into attributes pairs .
14,248
protected void validateFetchOrientation ( FetchOrientation orientation , EnumSet < FetchOrientation > supportedOrientations ) throws HiveSQLException { if ( ! supportedOrientations . contains ( orientation ) ) { throw new HiveSQLException ( "The fetch type " + orientation . toString ( ) + " is not supported for this re...
Verify if the given fetch orientation is part of the supported orientation types .
14,249
public static ByteBuffer allocateDirectBuffer ( int size ) { try { if ( CLEANER_CREATE_METHOD == null ) { try { return ByteBuffer . allocateDirect ( size ) ; } catch ( OutOfMemoryError oome ) { throw new OutOfMemoryError ( "Failed to allocate direct buffer (" + oome . getMessage ( ) + "); try increasing -XX:MaxDirectMe...
Allocate a DirectByteBuffer potentially bypassing the JVM s MaxDirectMemorySize limit .
14,250
public static void writeToMemory ( byte [ ] src , Object target , long targetOffset ) { Platform . copyMemory ( src , Platform . BYTE_ARRAY_OFFSET , target , targetOffset , src . length ) ; }
Writes the content of a byte array into a memory address identified by an object and an offset . The target memory address must already been allocated and have enough space to hold all the bytes in this string .
14,251
public static OffHeapColumnVector [ ] allocateColumns ( int capacity , StructField [ ] fields ) { OffHeapColumnVector [ ] vectors = new OffHeapColumnVector [ fields . length ] ; for ( int i = 0 ; i < fields . length ; i ++ ) { vectors [ i ] = new OffHeapColumnVector ( capacity , fields [ i ] . dataType ( ) ) ; } return...
Allocates columns to store elements of each field off heap . Capacity is the initial capacity of the vector and it will grow as necessary . Capacity is in number of elements not number of bytes .
14,252
public int putByteArray ( int rowId , byte [ ] value , int offset , int length ) { int result = arrayData ( ) . appendBytes ( length , value , offset ) ; Platform . putInt ( null , lengthData + 4L * rowId , length ) ; Platform . putInt ( null , offsetData + 4L * rowId , result ) ; return result ; }
APIs dealing with ByteArrays
14,253
protected void reserveInternal ( int newCapacity ) { int oldCapacity = ( nulls == 0L ) ? 0 : capacity ; if ( isArray ( ) || type instanceof MapType ) { this . lengthData = Platform . reallocateMemory ( lengthData , oldCapacity * 4L , newCapacity * 4L ) ; this . offsetData = Platform . reallocateMemory ( offsetData , ol...
Split out the slow path .
14,254
private void init ( int bitWidth ) { Preconditions . checkArgument ( bitWidth >= 0 && bitWidth <= 32 , "bitWidth must be >= 0 and <= 32" ) ; this . bitWidth = bitWidth ; this . bytesWidth = BytesUtils . paddedByteCountFromBits ( bitWidth ) ; this . packer = Packer . LITTLE_ENDIAN . newBytePacker ( bitWidth ) ; }
Initializes the internal state for decoding ints of bitWidth .
14,255
public void readIntegers ( int total , WritableColumnVector c , int rowId ) { int left = total ; while ( left > 0 ) { if ( this . currentCount == 0 ) this . readNextGroup ( ) ; int n = Math . min ( left , this . currentCount ) ; switch ( mode ) { case RLE : c . putInts ( rowId , n , currentValue ) ; break ; case PACKED...
Since this is only used to decode dictionary IDs only decoding integers is supported .
14,256
private int readUnsignedVarInt ( ) throws IOException { int value = 0 ; int shift = 0 ; int b ; do { b = in . read ( ) ; value |= ( b & 0x7F ) << shift ; shift += 7 ; } while ( ( b & 0x80 ) != 0 ) ; return value ; }
Reads the next varint encoded int .
14,257
private int readIntLittleEndianPaddedOnBitWidth ( ) throws IOException { switch ( bytesWidth ) { case 0 : return 0 ; case 1 : return in . read ( ) ; case 2 : { int ch2 = in . read ( ) ; int ch1 = in . read ( ) ; return ( ch1 << 8 ) + ch2 ; } case 3 : { int ch3 = in . read ( ) ; int ch2 = in . read ( ) ; int ch1 = in . ...
Reads the next byteWidth little endian int .
14,258
private void readNextGroup ( ) { try { int header = readUnsignedVarInt ( ) ; this . mode = ( header & 1 ) == 0 ? MODE . RLE : MODE . PACKED ; switch ( mode ) { case RLE : this . currentCount = header >>> 1 ; this . currentValue = readIntLittleEndianPaddedOnBitWidth ( ) ; return ; case PACKED : int numGroups = header >>...
Reads the next group .
14,259
private void changeState ( Service . STATE newState ) { state = newState ; for ( ServiceStateChangeListener l : listeners ) { l . stateChanged ( this ) ; } }
Change to a new state and notify all listeners . This is a private method that is only invoked from synchronized methods which avoid having to clone the listener list . It does imply that the state change listener methods should be short lived as they will delay the state transition .
14,260
public LongArray allocateArray ( long size ) { long required = size * 8L ; MemoryBlock page = taskMemoryManager . allocatePage ( required , this ) ; if ( page == null || page . size ( ) < required ) { throwOom ( page , required ) ; } used += required ; return new LongArray ( page ) ; }
Allocates a LongArray of size . Note that this method may throw SparkOutOfMemoryError if Spark doesn t have enough memory for this allocation or throw TooLargePageException if this LongArray is too large to fit in a single page . The caller side should take care of these two exceptions or make sure the size is small en...
14,261
protected MemoryBlock allocatePage ( long required ) { MemoryBlock page = taskMemoryManager . allocatePage ( Math . max ( pageSize , required ) , this ) ; if ( page == null || page . size ( ) < required ) { throwOom ( page , required ) ; } used += page . size ( ) ; return page ; }
Allocate a memory block with at least required bytes .
14,262
public long transferTo ( final WritableByteChannel target , final long position ) throws IOException { Preconditions . checkArgument ( position == totalBytesTransferred , "Invalid position." ) ; long writtenHeader = 0 ; if ( header . readableBytes ( ) > 0 ) { writtenHeader = copyByteBuf ( header , target ) ; totalBytes...
This code is more complicated than you would think because we might require multiple transferTo invocations in order to transfer a single MessageWithHeader to avoid busy waiting .
14,263
public void pointTo ( Object baseObject , long baseOffset , int sizeInBytes ) { assert numFields >= 0 : "numFields (" + numFields + ") should >= 0" ; assert sizeInBytes % 8 == 0 : "sizeInBytes (" + sizeInBytes + ") should be a multiple of 8" ; this . baseObject = baseObject ; this . baseOffset = baseOffset ; this . siz...
Update this UnsafeRow to point to different backing data .
14,264
public void setDecimal ( int ordinal , Decimal value , int precision ) { assertIndexIsValid ( ordinal ) ; if ( precision <= Decimal . MAX_LONG_DIGITS ( ) ) { if ( value == null ) { setNullAt ( ordinal ) ; } else { setLong ( ordinal , value . toUnscaledLong ( ) ) ; } } else { long cursor = getLong ( ordinal ) >>> 32 ; a...
Updates the decimal column .
14,265
public UnsafeRow copy ( ) { UnsafeRow rowCopy = new UnsafeRow ( numFields ) ; final byte [ ] rowDataCopy = new byte [ sizeInBytes ] ; Platform . copyMemory ( baseObject , baseOffset , rowDataCopy , Platform . BYTE_ARRAY_OFFSET , sizeInBytes ) ; rowCopy . pointTo ( rowDataCopy , Platform . BYTE_ARRAY_OFFSET , sizeInByte...
Copies this row returning a self - contained UnsafeRow that stores its data in an internal byte array rather than referencing data stored in a data page .
14,266
public static UnsafeRow createFromByteArray ( int numBytes , int numFields ) { final UnsafeRow row = new UnsafeRow ( numFields ) ; row . pointTo ( new byte [ numBytes ] , numBytes ) ; return row ; }
Creates an empty UnsafeRow from a byte array with specified numBytes and numFields . The returned row is invalid until we call copyFrom on it .
14,267
public void writeToStream ( OutputStream out , byte [ ] writeBuffer ) throws IOException { if ( baseObject instanceof byte [ ] ) { int offsetInByteArray = ( int ) ( baseOffset - Platform . BYTE_ARRAY_OFFSET ) ; out . write ( ( byte [ ] ) baseObject , offsetInByteArray , sizeInBytes ) ; } }
Write this UnsafeRow s underlying bytes to the given OutputStream .
14,268
public void doBootstrap ( TransportClient client , Channel channel ) { SparkSaslClient saslClient = new SparkSaslClient ( appId , secretKeyHolder , conf . saslEncryption ( ) ) ; try { byte [ ] payload = saslClient . firstToken ( ) ; while ( ! saslClient . isComplete ( ) ) { SaslMessage msg = new SaslMessage ( appId , p...
Performs SASL authentication by sending a token and then proceeding with the SASL challenge - response tokens until we either successfully authenticate or throw an exception due to mismatch .
14,269
SessionHandle getSessionHandle ( TOpenSessionReq req , TOpenSessionResp res ) throws HiveSQLException , LoginException , IOException { String userName = getUserName ( req ) ; String ipAddress = getIpAddress ( ) ; TProtocolVersion protocol = getMinVersion ( CLIService . SERVER_VERSION , req . getClient_protocol ( ) ) ; ...
Create a session handle
14,270
private String getProxyUser ( String realUser , Map < String , String > sessionConf , String ipAddress ) throws HiveSQLException { String proxyUser = null ; if ( cliService . getHiveConf ( ) . getVar ( ConfVars . HIVE_SERVER2_TRANSPORT_MODE ) . equalsIgnoreCase ( "http" ) ) { proxyUser = SessionManager . getProxyUserNa...
If the proxy user name is provided then check privileges to substitute the user .
14,271
public boolean getBoolean ( String key , boolean defaultValue ) { String value = get ( key ) ; if ( value == null ) { return defaultValue ; } else if ( value . equalsIgnoreCase ( "true" ) ) { return true ; } else if ( value . equalsIgnoreCase ( "false" ) ) { return false ; } else { throw new IllegalArgumentException ( ...
Returns the boolean value to which the specified key is mapped or defaultValue if there is no mapping for the key . The key match is case - insensitive .
14,272
public double getDouble ( String key , double defaultValue ) { String value = get ( key ) ; return value == null ? defaultValue : Double . parseDouble ( value ) ; }
Returns the double value to which the specified key is mapped or defaultValue if there is no mapping for the key . The key match is case - insensitive .
14,273
boolean set ( long index ) { if ( ! get ( index ) ) { data [ ( int ) ( index >>> 6 ) ] |= ( 1L << index ) ; bitCount ++ ; return true ; } return false ; }
Returns true if the bit changed value .
14,274
void putAll ( BitArray array ) { assert data . length == array . data . length : "BitArrays must be of equal length when merging" ; long bitCount = 0 ; for ( int i = 0 ; i < data . length ; i ++ ) { data [ i ] |= array . data [ i ] ; bitCount += Long . bitCount ( data [ i ] ) ; } this . bitCount = bitCount ; }
Combines the two BitArrays using bitwise OR .
14,275
public RpcHandler doBootstrap ( Channel channel , RpcHandler rpcHandler ) { return new SaslRpcHandler ( conf , channel , rpcHandler , secretKeyHolder ) ; }
Wrap the given application handler in a SaslRpcHandler that will handle the initial SASL negotiation .
14,276
public ShuffleIndexRecord getIndex ( int reduceId ) { long offset = offsets . get ( reduceId ) ; long nextOffset = offsets . get ( reduceId + 1 ) ; return new ShuffleIndexRecord ( offset , nextOffset - offset ) ; }
Get index offset for a particular reducer .
14,277
private void grow ( int neededSize ) { if ( neededSize > ARRAY_MAX - totalSize ( ) ) { throw new UnsupportedOperationException ( "Cannot grow internal buffer by size " + neededSize + " because the size after growing " + "exceeds size limitation " + ARRAY_MAX ) ; } final int length = totalSize ( ) + neededSize ; if ( bu...
Grows the buffer by at least neededSize
14,278
public int connectionTimeoutMs ( ) { long defaultNetworkTimeoutS = JavaUtils . timeStringAsSec ( conf . get ( "spark.network.timeout" , "120s" ) ) ; long defaultTimeoutMs = JavaUtils . timeStringAsSec ( conf . get ( SPARK_NETWORK_IO_CONNECTIONTIMEOUT_KEY , defaultNetworkTimeoutS + "s" ) ) * 1000 ; return ( int ) defaul...
Connect timeout in milliseconds . Default 120 secs .
14,279
List < String > buildJavaCommand ( String extraClassPath ) throws IOException { List < String > cmd = new ArrayList < > ( ) ; String [ ] candidateJavaHomes = new String [ ] { javaHome , childEnv . get ( "JAVA_HOME" ) , System . getenv ( "JAVA_HOME" ) , System . getProperty ( "java.home" ) } ; for ( String javaHome : ca...
Builds a list of arguments to run java .
14,280
private void addToClassPath ( Set < String > cp , String entries ) { if ( isEmpty ( entries ) ) { return ; } String [ ] split = entries . split ( Pattern . quote ( File . pathSeparator ) ) ; for ( String entry : split ) { if ( ! isEmpty ( entry ) ) { if ( new File ( entry ) . isDirectory ( ) && ! entry . endsWith ( Fil...
Adds entries to the classpath .
14,281
private Properties loadPropertiesFile ( ) throws IOException { Properties props = new Properties ( ) ; File propsFile ; if ( propertiesFile != null ) { propsFile = new File ( propertiesFile ) ; checkArgument ( propsFile . isFile ( ) , "Invalid properties file '%s'." , propertiesFile ) ; } else { propsFile = new File ( ...
Loads the configuration file for the application if it exists . This is either the user - specified properties file or the spark - defaults . conf file under the Spark configuration directory .
14,282
public void initialize ( InputSplit inputSplit , TaskAttemptContext taskAttemptContext ) throws IOException { FileSplit fileSplit = ( FileSplit ) inputSplit ; Configuration conf = taskAttemptContext . getConfiguration ( ) ; Reader reader = OrcFile . createReader ( fileSplit . getPath ( ) , OrcFile . readerOptions ( con...
Initialize ORC file reader and batch record reader . Please note that initBatch is needed to be called after this .
14,283
public void initBatch ( TypeDescription orcSchema , StructField [ ] requiredFields , int [ ] requestedDataColIds , int [ ] requestedPartitionColIds , InternalRow partitionValues ) { wrap = new VectorizedRowBatchWrap ( orcSchema . createRowBatch ( capacity ) ) ; assert ( ! wrap . batch ( ) . selectedInUse ) ; assert ( r...
Initialize columnar batch by setting required schema and partition information . With this information this creates ColumnarBatch with the full schema .
14,284
private boolean nextBatch ( ) throws IOException { recordReader . nextBatch ( wrap . batch ( ) ) ; int batchSize = wrap . batch ( ) . size ; if ( batchSize == 0 ) { return false ; } columnarBatch . setNumRows ( batchSize ) ; for ( int i = 0 ; i < requiredFields . length ; i ++ ) { if ( requestedDataColIds [ i ] != - 1 ...
Return true if there exists more data in the next batch . If exists prepare the next batch by copying from ORC VectorizedRowBatch columns to Spark ColumnarBatch columns .
14,285
public static long parseSecondNano ( String secondNano ) throws IllegalArgumentException { String [ ] parts = secondNano . split ( "\\." ) ; if ( parts . length == 1 ) { return toLongWithRange ( "second" , parts [ 0 ] , Long . MIN_VALUE / MICROS_PER_SECOND , Long . MAX_VALUE / MICROS_PER_SECOND ) * MICROS_PER_SECOND ; ...
Parse second_nano string in ss . nnnnnnnnn format to microseconds
14,286
public void addToChannel ( Channel ch ) throws IOException { ch . pipeline ( ) . addFirst ( ENCRYPTION_HANDLER_NAME , new EncryptionHandler ( this ) ) . addFirst ( DECRYPTION_HANDLER_NAME , new DecryptionHandler ( this ) ) ; }
Add handlers to channel .
14,287
public void close ( ) { for ( ClientPool clientPool : connectionPool . values ( ) ) { for ( int i = 0 ; i < clientPool . clients . length ; i ++ ) { TransportClient client = clientPool . clients [ i ] ; if ( client != null ) { clientPool . clients [ i ] = null ; JavaUtils . closeQuietly ( client ) ; } } } connectionPoo...
Close all connections in the connection pool and shutdown the worker thread pool .
14,288
public int write ( ByteBuffer src ) { int toTransfer = Math . min ( src . remaining ( ) , data . length - offset ) ; src . get ( data , offset , toTransfer ) ; offset += toTransfer ; return toTransfer ; }
Reads from the given buffer into the internal byte array .
14,289
private static void setConf ( String varname , String key , String varvalue , boolean register ) throws IllegalArgumentException { HiveConf conf = SessionState . get ( ) . getConf ( ) ; String value = new VariableSubstitution ( ) . substitute ( conf , varvalue ) ; if ( conf . getBoolVar ( HiveConf . ConfVars . HIVECONF...
returns non - null string for validation fail
14,290
protected synchronized void release ( boolean userAccess ) { SessionState . detachSession ( ) ; if ( ThreadWithGarbageCleanup . currentThread ( ) instanceof ThreadWithGarbageCleanup ) { ThreadWithGarbageCleanup currentThread = ( ThreadWithGarbageCleanup ) ThreadWithGarbageCleanup . currentThread ( ) ; currentThread . c...
1 . We ll remove the ThreadLocal SessionState as this thread might now serve other requests . 2 . We ll cache the ThreadLocal RawStore object for this background thread for an orderly cleanup when this thread is garbage collected later .
14,291
private String getUserFromToken ( HiveAuthFactory authFactory , String tokenStr ) throws HiveSQLException { return authFactory . getUserFromToken ( tokenStr ) ; }
extract the real user from the given token string
14,292
public void setSessionUGI ( String owner ) throws HiveSQLException { if ( owner == null ) { throw new HiveSQLException ( "No username provided for impersonation" ) ; } if ( UserGroupInformation . isSecurityEnabled ( ) ) { try { sessionUgi = UserGroupInformation . createProxyUser ( owner , UserGroupInformation . getLogi...
setup appropriate UGI for the session
14,293
public void close ( ) throws HiveSQLException { try { acquire ( true ) ; cancelDelegationToken ( ) ; } finally { try { super . close ( ) ; } finally { try { FileSystem . closeAllForUGI ( sessionUgi ) ; } catch ( IOException ioe ) { throw new HiveSQLException ( "Could not clean up file-system handles for UGI: " + sessio...
Close the file systems for the session and remove it from the FileSystem cache . Cancel the session s delegation token and close the metastore connection
14,294
private void setDelegationToken ( String delegationTokenStr ) throws HiveSQLException { this . delegationTokenStr = delegationTokenStr ; if ( delegationTokenStr != null ) { getHiveConf ( ) . set ( "hive.metastore.token.signature" , HS2TOKEN ) ; try { Utils . setTokenStr ( sessionUgi , delegationTokenStr , HS2TOKEN ) ; ...
Enable delegation token for the session save the token string and set the token . signature in hive conf . The metastore client uses this token . signature to determine where to use kerberos or delegation token
14,295
private void cancelDelegationToken ( ) throws HiveSQLException { if ( delegationTokenStr != null ) { try { Hive . get ( getHiveConf ( ) ) . cancelDelegationToken ( delegationTokenStr ) ; } catch ( HiveException e ) { throw new HiveSQLException ( "Couldn't cancel delegation token" , e ) ; } Hive . closeCurrent ( ) ; } }
If the session has a delegation token obtained from the metastore then cancel it
14,296
public void releaseExecutionMemory ( long size , MemoryConsumer consumer ) { logger . debug ( "Task {} release {} from {}" , taskAttemptId , Utils . bytesToString ( size ) , consumer ) ; memoryManager . releaseExecutionMemory ( size , taskAttemptId , consumer . getMode ( ) ) ; }
Release N bytes of execution memory for a MemoryConsumer .
14,297
public void showMemoryUsage ( ) { logger . info ( "Memory used in task " + taskAttemptId ) ; synchronized ( this ) { long memoryAccountedForByConsumers = 0 ; for ( MemoryConsumer c : consumers ) { long totalMemUsage = c . getUsed ( ) ; memoryAccountedForByConsumers += totalMemUsage ; if ( totalMemUsage > 0 ) { logger ....
Dump the memory usage of all consumers .
14,298
public MemoryBlock allocatePage ( long size , MemoryConsumer consumer ) { assert ( consumer != null ) ; assert ( consumer . getMode ( ) == tungstenMemoryMode ) ; if ( size > MAXIMUM_PAGE_SIZE_BYTES ) { throw new TooLargePageException ( size ) ; } long acquired = acquireExecutionMemory ( size , consumer ) ; if ( acquire...
Allocate a block of memory that will be tracked in the MemoryManager s page table ; this is intended for allocating large blocks of Tungsten memory that will be shared between operators .
14,299
public long encodePageNumberAndOffset ( MemoryBlock page , long offsetInPage ) { if ( tungstenMemoryMode == MemoryMode . OFF_HEAP ) { offsetInPage -= page . getBaseOffset ( ) ; } return encodePageNumberAndOffset ( page . pageNumber , offsetInPage ) ; }
Given a memory page and offset within that page encode this address into a 64 - bit long . This address will remain valid as long as the corresponding page has not been freed .