idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
14,200
public boolean toInt ( IntWrapper intWrapper ) { if ( numBytes == 0 ) { return false ; } byte b = getByte ( 0 ) ; final boolean negative = b == '-' ; int offset = 0 ; if ( negative || b == '+' ) { offset ++ ; if ( numBytes == 1 ) { return false ; } } final byte separator = '.' ; final int radix = 10 ; final int stopValue = Integer . MIN_VALUE / radix ; int result = 0 ; while ( offset < numBytes ) { b = getByte ( offset ) ; offset ++ ; if ( b == separator ) { break ; } int digit ; if ( b >= '0' && b <= '9' ) { digit = b - '0' ; } else { return false ; } if ( result < stopValue ) { return false ; } result = result * radix - digit ; if ( result > 0 ) { return false ; } } while ( offset < numBytes ) { byte currentByte = getByte ( offset ) ; if ( currentByte < '0' || currentByte > '9' ) { return false ; } offset ++ ; } if ( ! negative ) { result = - result ; if ( result < 0 ) { return false ; } } intWrapper . value = result ; return true ; }
Parses this UTF8String to int .
14,201
public void registerWithShuffleServer ( String host , int port , String execId , ExecutorShuffleInfo executorInfo ) throws IOException , InterruptedException { checkInit ( ) ; try ( TransportClient client = clientFactory . createUnmanagedClient ( host , port ) ) { ByteBuffer registerMessage = new RegisterExecutor ( appId , execId , executorInfo ) . toByteBuffer ( ) ; client . sendRpcSync ( registerMessage , registrationTimeoutMs ) ; } }
Registers this executor with an external shuffle server . This registration is required to inform the shuffle server about where and how we store our shuffle files .
14,202
public long spill ( long size , MemoryConsumer trigger ) throws IOException { if ( trigger != this ) { if ( readingIterator != null ) { return readingIterator . spill ( ) ; } return 0L ; } if ( inMemSorter == null || inMemSorter . numRecords ( ) <= 0 ) { return 0L ; } logger . info ( "Thread {} spilling sort data of {} to disk ({} {} so far)" , Thread . currentThread ( ) . getId ( ) , Utils . bytesToString ( getMemoryUsage ( ) ) , spillWriters . size ( ) , spillWriters . size ( ) > 1 ? " times" : " time" ) ; ShuffleWriteMetrics writeMetrics = new ShuffleWriteMetrics ( ) ; final UnsafeSorterSpillWriter spillWriter = new UnsafeSorterSpillWriter ( blockManager , fileBufferSizeBytes , writeMetrics , inMemSorter . numRecords ( ) ) ; spillWriters . add ( spillWriter ) ; spillIterator ( inMemSorter . getSortedIterator ( ) , spillWriter ) ; final long spillSize = freeMemory ( ) ; inMemSorter . reset ( ) ; taskContext . taskMetrics ( ) . incMemoryBytesSpilled ( spillSize ) ; taskContext . taskMetrics ( ) . incDiskBytesSpilled ( writeMetrics . bytesWritten ( ) ) ; totalSpillBytes += spillSize ; return spillSize ; }
Sort and spill the current records in response to memory pressure .
14,203
private long getMemoryUsage ( ) { long totalPageSize = 0 ; for ( MemoryBlock page : allocatedPages ) { totalPageSize += page . size ( ) ; } return ( ( inMemSorter == null ) ? 0 : inMemSorter . getMemoryUsage ( ) ) + totalPageSize ; }
Return the total memory usage of this sorter including the data pages and the sorter s pointer array .
14,204
private long freeMemory ( ) { updatePeakMemoryUsed ( ) ; long memoryFreed = 0 ; for ( MemoryBlock block : allocatedPages ) { memoryFreed += block . size ( ) ; freePage ( block ) ; } allocatedPages . clear ( ) ; currentPage = null ; pageCursor = 0 ; return memoryFreed ; }
Free this sorter s data pages .
14,205
private void deleteSpillFiles ( ) { for ( UnsafeSorterSpillWriter spill : spillWriters ) { File file = spill . getFile ( ) ; if ( file != null && file . exists ( ) ) { if ( ! file . delete ( ) ) { logger . error ( "Was unable to delete spill file {}" , file . getAbsolutePath ( ) ) ; } } } }
Deletes any spill files created by this sorter .
14,206
private void growPointerArrayIfNecessary ( ) throws IOException { assert ( inMemSorter != null ) ; if ( ! inMemSorter . hasSpaceForAnotherRecord ( ) ) { long used = inMemSorter . getMemoryUsage ( ) ; LongArray array ; try { array = allocateArray ( used / 8 * 2 ) ; } catch ( TooLargePageException e ) { spill ( ) ; return ; } catch ( SparkOutOfMemoryError e ) { if ( ! inMemSorter . hasSpaceForAnotherRecord ( ) ) { logger . error ( "Unable to grow the pointer array" ) ; throw e ; } return ; } if ( inMemSorter . hasSpaceForAnotherRecord ( ) ) { freeArray ( array ) ; } else { inMemSorter . expandPointerArray ( array ) ; } } }
Checks whether there is enough space to insert an additional record in to the sort pointer array and grows the array if additional space is required . If the required space cannot be obtained then the in - memory data will be spilled to disk .
14,207
private void acquireNewPageIfNecessary ( int required ) { if ( currentPage == null || pageCursor + required > currentPage . getBaseOffset ( ) + currentPage . size ( ) ) { currentPage = allocatePage ( required ) ; pageCursor = currentPage . getBaseOffset ( ) ; allocatedPages . add ( currentPage ) ; } }
Allocates more memory in order to insert an additional record . This will request additional memory from the memory manager and spill if the requested memory can not be obtained .
14,208
public void insertRecord ( Object recordBase , long recordOffset , int length , long prefix , boolean prefixIsNull ) throws IOException { assert ( inMemSorter != null ) ; if ( inMemSorter . numRecords ( ) >= numElementsForSpillThreshold ) { logger . info ( "Spilling data because number of spilledRecords crossed the threshold " + numElementsForSpillThreshold ) ; spill ( ) ; } growPointerArrayIfNecessary ( ) ; int uaoSize = UnsafeAlignedOffset . getUaoSize ( ) ; final int required = length + uaoSize ; acquireNewPageIfNecessary ( required ) ; final Object base = currentPage . getBaseObject ( ) ; final long recordAddress = taskMemoryManager . encodePageNumberAndOffset ( currentPage , pageCursor ) ; UnsafeAlignedOffset . putSize ( base , pageCursor , length ) ; pageCursor += uaoSize ; Platform . copyMemory ( recordBase , recordOffset , base , pageCursor , length ) ; pageCursor += length ; inMemSorter . insertRecord ( recordAddress , prefix , prefixIsNull ) ; }
Write a record to the sorter .
14,209
public void merge ( UnsafeExternalSorter other ) throws IOException { other . spill ( ) ; spillWriters . addAll ( other . spillWriters ) ; other . spillWriters . clear ( ) ; other . cleanupResources ( ) ; }
Merges another UnsafeExternalSorters into this one the other one will be emptied .
14,210
public UnsafeSorterIterator getIterator ( int startIndex ) throws IOException { if ( spillWriters . isEmpty ( ) ) { assert ( inMemSorter != null ) ; UnsafeSorterIterator iter = inMemSorter . getSortedIterator ( ) ; moveOver ( iter , startIndex ) ; return iter ; } else { LinkedList < UnsafeSorterIterator > queue = new LinkedList < > ( ) ; int i = 0 ; for ( UnsafeSorterSpillWriter spillWriter : spillWriters ) { if ( i + spillWriter . recordsSpilled ( ) > startIndex ) { UnsafeSorterIterator iter = spillWriter . getReader ( serializerManager ) ; moveOver ( iter , startIndex - i ) ; queue . add ( iter ) ; } i += spillWriter . recordsSpilled ( ) ; } if ( inMemSorter != null ) { UnsafeSorterIterator iter = inMemSorter . getSortedIterator ( ) ; moveOver ( iter , startIndex - i ) ; queue . add ( iter ) ; } return new ChainedIterator ( queue ) ; } }
Returns an iterator starts from startIndex which will return the rows in the order as inserted .
14,211
void grow ( int neededSize ) { if ( neededSize < 0 ) { throw new IllegalArgumentException ( "Cannot grow BufferHolder by size " + neededSize + " because the size is negative" ) ; } if ( neededSize > ARRAY_MAX - totalSize ( ) ) { throw new IllegalArgumentException ( "Cannot grow BufferHolder by size " + neededSize + " because the size after growing " + "exceeds size limitation " + ARRAY_MAX ) ; } final int length = totalSize ( ) + neededSize ; if ( buffer . length < length ) { int newLength = length < ARRAY_MAX / 2 ? length * 2 : ARRAY_MAX ; int roundedSize = ByteArrayMethods . roundNumberOfBytesToNearestWord ( newLength ) ; final byte [ ] tmp = new byte [ roundedSize ] ; Platform . copyMemory ( buffer , Platform . BYTE_ARRAY_OFFSET , tmp , Platform . BYTE_ARRAY_OFFSET , totalSize ( ) ) ; buffer = tmp ; row . pointTo ( buffer , buffer . length ) ; } }
Grows the buffer by at least neededSize and points the row to the buffer .
14,212
public synchronized byte [ ] firstToken ( ) { if ( saslClient != null && saslClient . hasInitialResponse ( ) ) { try { return saslClient . evaluateChallenge ( new byte [ 0 ] ) ; } catch ( SaslException e ) { throw Throwables . propagate ( e ) ; } } else { return new byte [ 0 ] ; } }
Used to initiate SASL handshake with server .
14,213
public synchronized byte [ ] response ( byte [ ] token ) { try { return saslClient != null ? saslClient . evaluateChallenge ( token ) : new byte [ 0 ] ; } catch ( SaslException e ) { throw Throwables . propagate ( e ) ; } }
Respond to server s SASL token .
14,214
public synchronized void dispose ( ) { if ( saslClient != null ) { try { saslClient . dispose ( ) ; } catch ( SaslException e ) { } finally { saslClient = null ; } } }
Disposes of any system resources or security - sensitive information the SaslClient might be using .
14,215
public void getMetrics ( MetricsCollector collector , boolean all ) { MetricsRecordBuilder metricsRecordBuilder = collector . addRecord ( "sparkShuffleService" ) ; for ( Map . Entry < String , Metric > entry : metricSet . getMetrics ( ) . entrySet ( ) ) { collectMetric ( metricsRecordBuilder , entry . getKey ( ) , entry . getValue ( ) ) ; } }
Get metrics from the source
14,216
public static Properties toCryptoConf ( String prefix , Iterable < Map . Entry < String , String > > conf ) { Properties props = new Properties ( ) ; for ( Map . Entry < String , String > e : conf ) { String key = e . getKey ( ) ; if ( key . startsWith ( prefix ) ) { props . setProperty ( COMMONS_CRYPTO_CONFIG_PREFIX + key . substring ( prefix . length ( ) ) , e . getValue ( ) ) ; } } return props ; }
Extract the commons - crypto configuration embedded in a list of config values .
14,217
public boolean [ ] getBooleans ( int rowId , int count ) { boolean [ ] res = new boolean [ count ] ; for ( int i = 0 ; i < count ; i ++ ) { res [ i ] = getBoolean ( rowId + i ) ; } return res ; }
Gets boolean type values from [ rowId rowId + count ) . The return values for the null slots are undefined and can be anything .
14,218
public byte [ ] getBytes ( int rowId , int count ) { byte [ ] res = new byte [ count ] ; for ( int i = 0 ; i < count ; i ++ ) { res [ i ] = getByte ( rowId + i ) ; } return res ; }
Gets byte type values from [ rowId rowId + count ) . The return values for the null slots are undefined and can be anything .
14,219
public short [ ] getShorts ( int rowId , int count ) { short [ ] res = new short [ count ] ; for ( int i = 0 ; i < count ; i ++ ) { res [ i ] = getShort ( rowId + i ) ; } return res ; }
Gets short type values from [ rowId rowId + count ) . The return values for the null slots are undefined and can be anything .
14,220
public int [ ] getInts ( int rowId , int count ) { int [ ] res = new int [ count ] ; for ( int i = 0 ; i < count ; i ++ ) { res [ i ] = getInt ( rowId + i ) ; } return res ; }
Gets int type values from [ rowId rowId + count ) . The return values for the null slots are undefined and can be anything .
14,221
public long [ ] getLongs ( int rowId , int count ) { long [ ] res = new long [ count ] ; for ( int i = 0 ; i < count ; i ++ ) { res [ i ] = getLong ( rowId + i ) ; } return res ; }
Gets long type values from [ rowId rowId + count ) . The return values for the null slots are undefined and can be anything .
14,222
public float [ ] getFloats ( int rowId , int count ) { float [ ] res = new float [ count ] ; for ( int i = 0 ; i < count ; i ++ ) { res [ i ] = getFloat ( rowId + i ) ; } return res ; }
Gets float type values from [ rowId rowId + count ) . The return values for the null slots are undefined and can be anything .
14,223
public double [ ] getDoubles ( int rowId , int count ) { double [ ] res = new double [ count ] ; for ( int i = 0 ; i < count ; i ++ ) { res [ i ] = getDouble ( rowId + i ) ; } return res ; }
Gets double type values from [ rowId rowId + count ) . The return values for the null slots are undefined and can be anything .
14,224
public final CalendarInterval getInterval ( int rowId ) { if ( isNullAt ( rowId ) ) return null ; final int months = getChild ( 0 ) . getInt ( rowId ) ; final long microseconds = getChild ( 1 ) . getLong ( rowId ) ; return new CalendarInterval ( months , microseconds ) ; }
Returns the calendar interval type value for rowId . If the slot for rowId is null it should return null .
14,225
public TProcessorFactory getAuthProcFactory ( ThriftCLIService service ) throws LoginException { if ( authTypeStr . equalsIgnoreCase ( AuthTypes . KERBEROS . getAuthName ( ) ) ) { return KerberosSaslHelper . getKerberosProcessorFactory ( saslServer , service ) ; } else { return PlainSaslHelper . getPlainProcessorFactory ( service ) ; } }
Returns the thrift processor factory for HiveServer2 running in binary mode
14,226
public static void loginFromKeytab ( HiveConf hiveConf ) throws IOException { String principal = hiveConf . getVar ( ConfVars . HIVE_SERVER2_KERBEROS_PRINCIPAL ) ; String keyTabFile = hiveConf . getVar ( ConfVars . HIVE_SERVER2_KERBEROS_KEYTAB ) ; if ( principal . isEmpty ( ) || keyTabFile . isEmpty ( ) ) { throw new IOException ( "HiveServer2 Kerberos principal or keytab is not correctly configured" ) ; } else { UserGroupInformation . loginUserFromKeytab ( SecurityUtil . getServerPrincipal ( principal , "0.0.0.0" ) , keyTabFile ) ; } }
Perform kerberos login using the hadoop shim API if the configuration is available
14,227
public static UserGroupInformation loginFromSpnegoKeytabAndReturnUGI ( HiveConf hiveConf ) throws IOException { String principal = hiveConf . getVar ( ConfVars . HIVE_SERVER2_SPNEGO_PRINCIPAL ) ; String keyTabFile = hiveConf . getVar ( ConfVars . HIVE_SERVER2_SPNEGO_KEYTAB ) ; if ( principal . isEmpty ( ) || keyTabFile . isEmpty ( ) ) { throw new IOException ( "HiveServer2 SPNEGO principal or keytab is not correctly configured" ) ; } else { return UserGroupInformation . loginUserFromKeytabAndReturnUGI ( SecurityUtil . getServerPrincipal ( principal , "0.0.0.0" ) , keyTabFile ) ; } }
Perform SPNEGO login using the hadoop shim API if the configuration is available
14,228
public String getDelegationToken ( String owner , String renewer ) throws HiveSQLException { if ( saslServer == null ) { throw new HiveSQLException ( "Delegation token only supported over kerberos authentication" , "08S01" ) ; } try { String tokenStr = saslServer . getDelegationTokenWithService ( owner , renewer , HS2_CLIENT_TOKEN ) ; if ( tokenStr == null || tokenStr . isEmpty ( ) ) { throw new HiveSQLException ( "Received empty retrieving delegation token for user " + owner , "08S01" ) ; } return tokenStr ; } catch ( IOException e ) { throw new HiveSQLException ( "Error retrieving delegation token for user " + owner , "08S01" , e ) ; } catch ( InterruptedException e ) { throw new HiveSQLException ( "delegation token retrieval interrupted" , "08S01" , e ) ; } }
retrieve delegation token for the given user
14,229
public void cancelDelegationToken ( String delegationToken ) throws HiveSQLException { if ( saslServer == null ) { throw new HiveSQLException ( "Delegation token only supported over kerberos authentication" , "08S01" ) ; } try { saslServer . cancelDelegationToken ( delegationToken ) ; } catch ( IOException e ) { throw new HiveSQLException ( "Error canceling delegation token " + delegationToken , "08S01" , e ) ; } }
cancel given delegation token
14,230
public static String patternToRegex ( String pattern ) { if ( pattern == null ) { return ".*" ; } else { StringBuilder result = new StringBuilder ( pattern . length ( ) ) ; boolean escaped = false ; for ( int i = 0 , len = pattern . length ( ) ; i < len ; i ++ ) { char c = pattern . charAt ( i ) ; if ( escaped ) { if ( c != SEARCH_STRING_ESCAPE ) { escaped = false ; } result . append ( c ) ; } else { if ( c == SEARCH_STRING_ESCAPE ) { escaped = true ; continue ; } else if ( c == '%' ) { result . append ( ".*" ) ; } else if ( c == '_' ) { result . append ( '.' ) ; } else { result . append ( Character . toLowerCase ( c ) ) ; } } } return result . toString ( ) ; } }
Convert a SQL search pattern into an equivalent Java Regex .
14,231
public static void ensureCurrentState ( Service . STATE state , Service . STATE expectedState ) { if ( state != expectedState ) { throw new IllegalStateException ( "For this operation, the " + "current service state must be " + expectedState + " instead of " + state ) ; } }
Verify that a service is in a given state .
14,232
public static void init ( Service service , HiveConf configuration ) { Service . STATE state = service . getServiceState ( ) ; ensureCurrentState ( state , Service . STATE . NOTINITED ) ; service . init ( configuration ) ; }
Initialize a service .
14,233
public static void start ( Service service ) { Service . STATE state = service . getServiceState ( ) ; ensureCurrentState ( state , Service . STATE . INITED ) ; service . start ( ) ; }
Start a service .
14,234
public static void deploy ( Service service , HiveConf configuration ) { init ( service , configuration ) ; start ( service ) ; }
Initialize then start a service .
14,235
public static void stop ( Service service ) { if ( service != null ) { Service . STATE state = service . getServiceState ( ) ; if ( state == Service . STATE . STARTED ) { service . stop ( ) ; } } }
Stop a service .
14,236
private boolean next ( ) throws IOException { if ( valuesRead >= endOfPageValueCount ) { if ( valuesRead >= totalValueCount ) { return false ; } readPage ( ) ; } ++ valuesRead ; return definitionLevelColumn . nextInt ( ) == maxDefLevel ; }
Advances to the next value . Returns true if the value is non - null .
14,237
void readBatch ( int total , WritableColumnVector column ) throws IOException { int rowId = 0 ; WritableColumnVector dictionaryIds = null ; if ( dictionary != null ) { dictionaryIds = column . reserveDictionaryIds ( total ) ; } while ( total > 0 ) { int leftInPage = ( int ) ( endOfPageValueCount - valuesRead ) ; if ( leftInPage == 0 ) { readPage ( ) ; leftInPage = ( int ) ( endOfPageValueCount - valuesRead ) ; } int num = Math . min ( total , leftInPage ) ; PrimitiveType . PrimitiveTypeName typeName = descriptor . getPrimitiveType ( ) . getPrimitiveTypeName ( ) ; if ( isCurrentPageDictionaryEncoded ) { defColumn . readIntegers ( num , dictionaryIds , column , rowId , maxDefLevel , ( VectorizedValuesReader ) dataColumn ) ; if ( column . hasDictionary ( ) || ( rowId == 0 && ( typeName == PrimitiveType . PrimitiveTypeName . INT32 || ( typeName == PrimitiveType . PrimitiveTypeName . INT64 && originalType != OriginalType . TIMESTAMP_MILLIS ) || typeName == PrimitiveType . PrimitiveTypeName . FLOAT || typeName == PrimitiveType . PrimitiveTypeName . DOUBLE || typeName == PrimitiveType . PrimitiveTypeName . BINARY ) ) ) { column . setDictionary ( new ParquetDictionary ( dictionary ) ) ; } else { decodeDictionaryIds ( rowId , num , column , dictionaryIds ) ; } } else { if ( column . hasDictionary ( ) && rowId != 0 ) { decodeDictionaryIds ( 0 , rowId , column , column . getDictionaryIds ( ) ) ; } column . setDictionary ( null ) ; switch ( typeName ) { case BOOLEAN : readBooleanBatch ( rowId , num , column ) ; break ; case INT32 : readIntBatch ( rowId , num , column ) ; break ; case INT64 : readLongBatch ( rowId , num , column ) ; break ; case INT96 : readBinaryBatch ( rowId , num , column ) ; break ; case FLOAT : readFloatBatch ( rowId , num , column ) ; break ; case DOUBLE : readDoubleBatch ( rowId , num , column ) ; break ; case BINARY : readBinaryBatch ( rowId , num , column ) ; break ; case FIXED_LEN_BYTE_ARRAY : readFixedLenByteArrayBatch ( rowId , num , column , descriptor . getPrimitiveType ( ) . getTypeLength ( ) ) ; break ; default : throw new IOException ( "Unsupported type: " + typeName ) ; } } valuesRead += num ; rowId += num ; total -= num ; } }
Reads total values from this columnReader into column .
14,238
private SchemaColumnConvertNotSupportedException constructConvertNotSupportedException ( ColumnDescriptor descriptor , WritableColumnVector column ) { return new SchemaColumnConvertNotSupportedException ( Arrays . toString ( descriptor . getPath ( ) ) , descriptor . getPrimitiveType ( ) . getPrimitiveTypeName ( ) . toString ( ) , column . dataType ( ) . catalogString ( ) ) ; }
Helper function to construct exception for parquet schema mismatch .
14,239
public static EventLoopGroup createEventLoop ( IOMode mode , int numThreads , String threadPrefix ) { ThreadFactory threadFactory = createThreadFactory ( threadPrefix ) ; switch ( mode ) { case NIO : return new NioEventLoopGroup ( numThreads , threadFactory ) ; case EPOLL : return new EpollEventLoopGroup ( numThreads , threadFactory ) ; default : throw new IllegalArgumentException ( "Unknown io mode: " + mode ) ; } }
Creates a Netty EventLoopGroup based on the IOMode .
14,240
public static Class < ? extends ServerChannel > getServerChannelClass ( IOMode mode ) { switch ( mode ) { case NIO : return NioServerSocketChannel . class ; case EPOLL : return EpollServerSocketChannel . class ; default : throw new IllegalArgumentException ( "Unknown io mode: " + mode ) ; } }
Returns the correct ServerSocketChannel class based on IOMode .
14,241
public static String getRemoteAddress ( Channel channel ) { if ( channel != null && channel . remoteAddress ( ) != null ) { return channel . remoteAddress ( ) . toString ( ) ; } return "<unknown remote>" ; }
Returns the remote address on the channel or &lt ; unknown remote&gt ; if none exists .
14,242
public static int defaultNumThreads ( int numUsableCores ) { final int availableCores ; if ( numUsableCores > 0 ) { availableCores = numUsableCores ; } else { availableCores = Runtime . getRuntime ( ) . availableProcessors ( ) ; } return Math . min ( availableCores , MAX_DEFAULT_NETTY_THREADS ) ; }
Returns the default number of threads for both the Netty client and server thread pools . If numUsableCores is 0 we will use Runtime get an approximate number of available cores .
14,243
public static synchronized PooledByteBufAllocator getSharedPooledByteBufAllocator ( boolean allowDirectBufs , boolean allowCache ) { final int index = allowCache ? 0 : 1 ; if ( _sharedPooledByteBufAllocator [ index ] == null ) { _sharedPooledByteBufAllocator [ index ] = createPooledByteBufAllocator ( allowDirectBufs , allowCache , defaultNumThreads ( 0 ) ) ; } return _sharedPooledByteBufAllocator [ index ] ; }
Returns the lazily created shared pooled ByteBuf allocator for the specified allowCache parameter value .
14,244
public static PooledByteBufAllocator createPooledByteBufAllocator ( boolean allowDirectBufs , boolean allowCache , int numCores ) { if ( numCores == 0 ) { numCores = Runtime . getRuntime ( ) . availableProcessors ( ) ; } return new PooledByteBufAllocator ( allowDirectBufs && PlatformDependent . directBufferPreferred ( ) , Math . min ( PooledByteBufAllocator . defaultNumHeapArena ( ) , numCores ) , Math . min ( PooledByteBufAllocator . defaultNumDirectArena ( ) , allowDirectBufs ? numCores : 0 ) , PooledByteBufAllocator . defaultPageSize ( ) , PooledByteBufAllocator . defaultMaxOrder ( ) , allowCache ? PooledByteBufAllocator . defaultTinyCacheSize ( ) : 0 , allowCache ? PooledByteBufAllocator . defaultSmallCacheSize ( ) : 0 , allowCache ? PooledByteBufAllocator . defaultNormalCacheSize ( ) : 0 , allowCache ? PooledByteBufAllocator . defaultUseCacheForAllThreads ( ) : false ) ; }
Create a pooled ByteBuf allocator but disables the thread - local cache . Thread - local caches are disabled for TransportClients because the ByteBufs are allocated by the event loop thread but released by the executor thread rather than the event loop thread . Those thread - local caches actually delay the recycling of buffers leading to larger memory usage .
14,245
public static String createCookieToken ( String clientUserName ) { StringBuffer sb = new StringBuffer ( ) ; sb . append ( COOKIE_CLIENT_USER_NAME ) . append ( COOKIE_KEY_VALUE_SEPARATOR ) . append ( clientUserName ) . append ( COOKIE_ATTR_SEPARATOR ) ; sb . append ( COOKIE_CLIENT_RAND_NUMBER ) . append ( COOKIE_KEY_VALUE_SEPARATOR ) . append ( ( new Random ( System . currentTimeMillis ( ) ) ) . nextLong ( ) ) ; return sb . toString ( ) ; }
Creates and returns a HS2 cookie token .
14,246
public static String getUserNameFromCookieToken ( String tokenStr ) { Map < String , String > map = splitCookieToken ( tokenStr ) ; if ( ! map . keySet ( ) . equals ( COOKIE_ATTRIBUTES ) ) { LOG . error ( "Invalid token with missing attributes " + tokenStr ) ; return null ; } return map . get ( COOKIE_CLIENT_USER_NAME ) ; }
Parses a cookie token to retrieve client user name .
14,247
private static Map < String , String > splitCookieToken ( String tokenStr ) { Map < String , String > map = new HashMap < String , String > ( ) ; StringTokenizer st = new StringTokenizer ( tokenStr , COOKIE_ATTR_SEPARATOR ) ; while ( st . hasMoreTokens ( ) ) { String part = st . nextToken ( ) ; int separator = part . indexOf ( COOKIE_KEY_VALUE_SEPARATOR ) ; if ( separator == - 1 ) { LOG . error ( "Invalid token string " + tokenStr ) ; return null ; } String key = part . substring ( 0 , separator ) ; String value = part . substring ( separator + 1 ) ; map . put ( key , value ) ; } return map ; }
Splits the cookie token into attributes pairs .
14,248
protected void validateFetchOrientation ( FetchOrientation orientation , EnumSet < FetchOrientation > supportedOrientations ) throws HiveSQLException { if ( ! supportedOrientations . contains ( orientation ) ) { throw new HiveSQLException ( "The fetch type " + orientation . toString ( ) + " is not supported for this resultset" , "HY106" ) ; } }
Verify if the given fetch orientation is part of the supported orientation types .
14,249
public static ByteBuffer allocateDirectBuffer ( int size ) { try { if ( CLEANER_CREATE_METHOD == null ) { try { return ByteBuffer . allocateDirect ( size ) ; } catch ( OutOfMemoryError oome ) { throw new OutOfMemoryError ( "Failed to allocate direct buffer (" + oome . getMessage ( ) + "); try increasing -XX:MaxDirectMemorySize=... to, for example, your heap size" ) ; } } long memory = allocateMemory ( size ) ; ByteBuffer buffer = ( ByteBuffer ) DBB_CONSTRUCTOR . newInstance ( memory , size ) ; try { DBB_CLEANER_FIELD . set ( buffer , CLEANER_CREATE_METHOD . invoke ( null , buffer , ( Runnable ) ( ) -> freeMemory ( memory ) ) ) ; } catch ( IllegalAccessException | InvocationTargetException e ) { freeMemory ( memory ) ; throw new IllegalStateException ( e ) ; } return buffer ; } catch ( Exception e ) { throwException ( e ) ; } throw new IllegalStateException ( "unreachable" ) ; }
Allocate a DirectByteBuffer potentially bypassing the JVM s MaxDirectMemorySize limit .
14,250
public static void writeToMemory ( byte [ ] src , Object target , long targetOffset ) { Platform . copyMemory ( src , Platform . BYTE_ARRAY_OFFSET , target , targetOffset , src . length ) ; }
Writes the content of a byte array into a memory address identified by an object and an offset . The target memory address must already been allocated and have enough space to hold all the bytes in this string .
14,251
public static OffHeapColumnVector [ ] allocateColumns ( int capacity , StructField [ ] fields ) { OffHeapColumnVector [ ] vectors = new OffHeapColumnVector [ fields . length ] ; for ( int i = 0 ; i < fields . length ; i ++ ) { vectors [ i ] = new OffHeapColumnVector ( capacity , fields [ i ] . dataType ( ) ) ; } return vectors ; }
Allocates columns to store elements of each field off heap . Capacity is the initial capacity of the vector and it will grow as necessary . Capacity is in number of elements not number of bytes .
14,252
public int putByteArray ( int rowId , byte [ ] value , int offset , int length ) { int result = arrayData ( ) . appendBytes ( length , value , offset ) ; Platform . putInt ( null , lengthData + 4L * rowId , length ) ; Platform . putInt ( null , offsetData + 4L * rowId , result ) ; return result ; }
APIs dealing with ByteArrays
14,253
protected void reserveInternal ( int newCapacity ) { int oldCapacity = ( nulls == 0L ) ? 0 : capacity ; if ( isArray ( ) || type instanceof MapType ) { this . lengthData = Platform . reallocateMemory ( lengthData , oldCapacity * 4L , newCapacity * 4L ) ; this . offsetData = Platform . reallocateMemory ( offsetData , oldCapacity * 4L , newCapacity * 4L ) ; } else if ( type instanceof ByteType || type instanceof BooleanType ) { this . data = Platform . reallocateMemory ( data , oldCapacity , newCapacity ) ; } else if ( type instanceof ShortType ) { this . data = Platform . reallocateMemory ( data , oldCapacity * 2L , newCapacity * 2L ) ; } else if ( type instanceof IntegerType || type instanceof FloatType || type instanceof DateType || DecimalType . is32BitDecimalType ( type ) ) { this . data = Platform . reallocateMemory ( data , oldCapacity * 4L , newCapacity * 4L ) ; } else if ( type instanceof LongType || type instanceof DoubleType || DecimalType . is64BitDecimalType ( type ) || type instanceof TimestampType ) { this . data = Platform . reallocateMemory ( data , oldCapacity * 8L , newCapacity * 8L ) ; } else if ( childColumns != null ) { } else { throw new RuntimeException ( "Unhandled " + type ) ; } this . nulls = Platform . reallocateMemory ( nulls , oldCapacity , newCapacity ) ; Platform . setMemory ( nulls + oldCapacity , ( byte ) 0 , newCapacity - oldCapacity ) ; capacity = newCapacity ; }
Split out the slow path .
14,254
private void init ( int bitWidth ) { Preconditions . checkArgument ( bitWidth >= 0 && bitWidth <= 32 , "bitWidth must be >= 0 and <= 32" ) ; this . bitWidth = bitWidth ; this . bytesWidth = BytesUtils . paddedByteCountFromBits ( bitWidth ) ; this . packer = Packer . LITTLE_ENDIAN . newBytePacker ( bitWidth ) ; }
Initializes the internal state for decoding ints of bitWidth .
14,255
public void readIntegers ( int total , WritableColumnVector c , int rowId ) { int left = total ; while ( left > 0 ) { if ( this . currentCount == 0 ) this . readNextGroup ( ) ; int n = Math . min ( left , this . currentCount ) ; switch ( mode ) { case RLE : c . putInts ( rowId , n , currentValue ) ; break ; case PACKED : c . putInts ( rowId , n , currentBuffer , currentBufferIdx ) ; currentBufferIdx += n ; break ; } rowId += n ; left -= n ; currentCount -= n ; } }
Since this is only used to decode dictionary IDs only decoding integers is supported .
14,256
private int readUnsignedVarInt ( ) throws IOException { int value = 0 ; int shift = 0 ; int b ; do { b = in . read ( ) ; value |= ( b & 0x7F ) << shift ; shift += 7 ; } while ( ( b & 0x80 ) != 0 ) ; return value ; }
Reads the next varint encoded int .
14,257
private int readIntLittleEndianPaddedOnBitWidth ( ) throws IOException { switch ( bytesWidth ) { case 0 : return 0 ; case 1 : return in . read ( ) ; case 2 : { int ch2 = in . read ( ) ; int ch1 = in . read ( ) ; return ( ch1 << 8 ) + ch2 ; } case 3 : { int ch3 = in . read ( ) ; int ch2 = in . read ( ) ; int ch1 = in . read ( ) ; return ( ch1 << 16 ) + ( ch2 << 8 ) + ( ch3 << 0 ) ; } case 4 : { return readIntLittleEndian ( ) ; } } throw new RuntimeException ( "Unreachable" ) ; }
Reads the next byteWidth little endian int .
14,258
private void readNextGroup ( ) { try { int header = readUnsignedVarInt ( ) ; this . mode = ( header & 1 ) == 0 ? MODE . RLE : MODE . PACKED ; switch ( mode ) { case RLE : this . currentCount = header >>> 1 ; this . currentValue = readIntLittleEndianPaddedOnBitWidth ( ) ; return ; case PACKED : int numGroups = header >>> 1 ; this . currentCount = numGroups * 8 ; if ( this . currentBuffer . length < this . currentCount ) { this . currentBuffer = new int [ this . currentCount ] ; } currentBufferIdx = 0 ; int valueIndex = 0 ; while ( valueIndex < this . currentCount ) { ByteBuffer buffer = in . slice ( bitWidth ) ; this . packer . unpack8Values ( buffer , buffer . position ( ) , this . currentBuffer , valueIndex ) ; valueIndex += 8 ; } return ; default : throw new ParquetDecodingException ( "not a valid mode " + this . mode ) ; } } catch ( IOException e ) { throw new ParquetDecodingException ( "Failed to read from input stream" , e ) ; } }
Reads the next group .
14,259
private void changeState ( Service . STATE newState ) { state = newState ; for ( ServiceStateChangeListener l : listeners ) { l . stateChanged ( this ) ; } }
Change to a new state and notify all listeners . This is a private method that is only invoked from synchronized methods which avoid having to clone the listener list . It does imply that the state change listener methods should be short lived as they will delay the state transition .
14,260
public LongArray allocateArray ( long size ) { long required = size * 8L ; MemoryBlock page = taskMemoryManager . allocatePage ( required , this ) ; if ( page == null || page . size ( ) < required ) { throwOom ( page , required ) ; } used += required ; return new LongArray ( page ) ; }
Allocates a LongArray of size . Note that this method may throw SparkOutOfMemoryError if Spark doesn t have enough memory for this allocation or throw TooLargePageException if this LongArray is too large to fit in a single page . The caller side should take care of these two exceptions or make sure the size is small enough that won t trigger exceptions .
14,261
protected MemoryBlock allocatePage ( long required ) { MemoryBlock page = taskMemoryManager . allocatePage ( Math . max ( pageSize , required ) , this ) ; if ( page == null || page . size ( ) < required ) { throwOom ( page , required ) ; } used += page . size ( ) ; return page ; }
Allocate a memory block with at least required bytes .
14,262
public long transferTo ( final WritableByteChannel target , final long position ) throws IOException { Preconditions . checkArgument ( position == totalBytesTransferred , "Invalid position." ) ; long writtenHeader = 0 ; if ( header . readableBytes ( ) > 0 ) { writtenHeader = copyByteBuf ( header , target ) ; totalBytesTransferred += writtenHeader ; if ( header . readableBytes ( ) > 0 ) { return writtenHeader ; } } long writtenBody = 0 ; if ( body instanceof FileRegion ) { writtenBody = ( ( FileRegion ) body ) . transferTo ( target , totalBytesTransferred - headerLength ) ; } else if ( body instanceof ByteBuf ) { writtenBody = copyByteBuf ( ( ByteBuf ) body , target ) ; } totalBytesTransferred += writtenBody ; return writtenHeader + writtenBody ; }
This code is more complicated than you would think because we might require multiple transferTo invocations in order to transfer a single MessageWithHeader to avoid busy waiting .
14,263
public void pointTo ( Object baseObject , long baseOffset , int sizeInBytes ) { assert numFields >= 0 : "numFields (" + numFields + ") should >= 0" ; assert sizeInBytes % 8 == 0 : "sizeInBytes (" + sizeInBytes + ") should be a multiple of 8" ; this . baseObject = baseObject ; this . baseOffset = baseOffset ; this . sizeInBytes = sizeInBytes ; }
Update this UnsafeRow to point to different backing data .
14,264
public void setDecimal ( int ordinal , Decimal value , int precision ) { assertIndexIsValid ( ordinal ) ; if ( precision <= Decimal . MAX_LONG_DIGITS ( ) ) { if ( value == null ) { setNullAt ( ordinal ) ; } else { setLong ( ordinal , value . toUnscaledLong ( ) ) ; } } else { long cursor = getLong ( ordinal ) >>> 32 ; assert cursor > 0 : "invalid cursor " + cursor ; Platform . putLong ( baseObject , baseOffset + cursor , 0L ) ; Platform . putLong ( baseObject , baseOffset + cursor + 8 , 0L ) ; if ( value == null ) { setNullAt ( ordinal ) ; Platform . putLong ( baseObject , getFieldOffset ( ordinal ) , cursor << 32 ) ; } else { final BigInteger integer = value . toJavaBigDecimal ( ) . unscaledValue ( ) ; byte [ ] bytes = integer . toByteArray ( ) ; assert ( bytes . length <= 16 ) ; Platform . copyMemory ( bytes , Platform . BYTE_ARRAY_OFFSET , baseObject , baseOffset + cursor , bytes . length ) ; setLong ( ordinal , ( cursor << 32 ) | ( ( long ) bytes . length ) ) ; } } }
Updates the decimal column .
14,265
public UnsafeRow copy ( ) { UnsafeRow rowCopy = new UnsafeRow ( numFields ) ; final byte [ ] rowDataCopy = new byte [ sizeInBytes ] ; Platform . copyMemory ( baseObject , baseOffset , rowDataCopy , Platform . BYTE_ARRAY_OFFSET , sizeInBytes ) ; rowCopy . pointTo ( rowDataCopy , Platform . BYTE_ARRAY_OFFSET , sizeInBytes ) ; return rowCopy ; }
Copies this row returning a self - contained UnsafeRow that stores its data in an internal byte array rather than referencing data stored in a data page .
14,266
public static UnsafeRow createFromByteArray ( int numBytes , int numFields ) { final UnsafeRow row = new UnsafeRow ( numFields ) ; row . pointTo ( new byte [ numBytes ] , numBytes ) ; return row ; }
Creates an empty UnsafeRow from a byte array with specified numBytes and numFields . The returned row is invalid until we call copyFrom on it .
14,267
public void writeToStream ( OutputStream out , byte [ ] writeBuffer ) throws IOException { if ( baseObject instanceof byte [ ] ) { int offsetInByteArray = ( int ) ( baseOffset - Platform . BYTE_ARRAY_OFFSET ) ; out . write ( ( byte [ ] ) baseObject , offsetInByteArray , sizeInBytes ) ; } }
Write this UnsafeRow s underlying bytes to the given OutputStream .
14,268
public void doBootstrap ( TransportClient client , Channel channel ) { SparkSaslClient saslClient = new SparkSaslClient ( appId , secretKeyHolder , conf . saslEncryption ( ) ) ; try { byte [ ] payload = saslClient . firstToken ( ) ; while ( ! saslClient . isComplete ( ) ) { SaslMessage msg = new SaslMessage ( appId , payload ) ; ByteBuf buf = Unpooled . buffer ( msg . encodedLength ( ) + ( int ) msg . body ( ) . size ( ) ) ; msg . encode ( buf ) ; buf . writeBytes ( msg . body ( ) . nioByteBuffer ( ) ) ; ByteBuffer response = client . sendRpcSync ( buf . nioBuffer ( ) , conf . authRTTimeoutMs ( ) ) ; payload = saslClient . response ( JavaUtils . bufferToArray ( response ) ) ; } client . setClientId ( appId ) ; if ( conf . saslEncryption ( ) ) { if ( ! SparkSaslServer . QOP_AUTH_CONF . equals ( saslClient . getNegotiatedProperty ( Sasl . QOP ) ) ) { throw new RuntimeException ( new SaslException ( "Encryption requests by negotiated non-encrypted connection." ) ) ; } SaslEncryption . addToChannel ( channel , saslClient , conf . maxSaslEncryptedBlockSize ( ) ) ; saslClient = null ; logger . debug ( "Channel {} configured for encryption." , client ) ; } } catch ( IOException ioe ) { throw new RuntimeException ( ioe ) ; } finally { if ( saslClient != null ) { try { saslClient . dispose ( ) ; } catch ( RuntimeException e ) { logger . error ( "Error while disposing SASL client" , e ) ; } } } }
Performs SASL authentication by sending a token and then proceeding with the SASL challenge - response tokens until we either successfully authenticate or throw an exception due to mismatch .
14,269
SessionHandle getSessionHandle ( TOpenSessionReq req , TOpenSessionResp res ) throws HiveSQLException , LoginException , IOException { String userName = getUserName ( req ) ; String ipAddress = getIpAddress ( ) ; TProtocolVersion protocol = getMinVersion ( CLIService . SERVER_VERSION , req . getClient_protocol ( ) ) ; SessionHandle sessionHandle ; if ( cliService . getHiveConf ( ) . getBoolVar ( ConfVars . HIVE_SERVER2_ENABLE_DOAS ) && ( userName != null ) ) { String delegationTokenStr = getDelegationToken ( userName ) ; sessionHandle = cliService . openSessionWithImpersonation ( protocol , userName , req . getPassword ( ) , ipAddress , req . getConfiguration ( ) , delegationTokenStr ) ; } else { sessionHandle = cliService . openSession ( protocol , userName , req . getPassword ( ) , ipAddress , req . getConfiguration ( ) ) ; } res . setServerProtocolVersion ( protocol ) ; return sessionHandle ; }
Create a session handle
14,270
private String getProxyUser ( String realUser , Map < String , String > sessionConf , String ipAddress ) throws HiveSQLException { String proxyUser = null ; if ( cliService . getHiveConf ( ) . getVar ( ConfVars . HIVE_SERVER2_TRANSPORT_MODE ) . equalsIgnoreCase ( "http" ) ) { proxyUser = SessionManager . getProxyUserName ( ) ; LOG . debug ( "Proxy user from query string: " + proxyUser ) ; } if ( proxyUser == null && sessionConf != null && sessionConf . containsKey ( HiveAuthFactory . HS2_PROXY_USER ) ) { String proxyUserFromThriftBody = sessionConf . get ( HiveAuthFactory . HS2_PROXY_USER ) ; LOG . debug ( "Proxy user from thrift body: " + proxyUserFromThriftBody ) ; proxyUser = proxyUserFromThriftBody ; } if ( proxyUser == null ) { return realUser ; } if ( ! hiveConf . getBoolVar ( HiveConf . ConfVars . HIVE_SERVER2_ALLOW_USER_SUBSTITUTION ) ) { throw new HiveSQLException ( "Proxy user substitution is not allowed" ) ; } if ( HiveAuthFactory . AuthTypes . NONE . toString ( ) . equalsIgnoreCase ( hiveConf . getVar ( ConfVars . HIVE_SERVER2_AUTHENTICATION ) ) ) { return proxyUser ; } HiveAuthFactory . verifyProxyAccess ( realUser , proxyUser , ipAddress , hiveConf ) ; LOG . debug ( "Verified proxy user: " + proxyUser ) ; return proxyUser ; }
If the proxy user name is provided then check privileges to substitute the user .
14,271
public boolean getBoolean ( String key , boolean defaultValue ) { String value = get ( key ) ; if ( value == null ) { return defaultValue ; } else if ( value . equalsIgnoreCase ( "true" ) ) { return true ; } else if ( value . equalsIgnoreCase ( "false" ) ) { return false ; } else { throw new IllegalArgumentException ( value + " is not a boolean string." ) ; } }
Returns the boolean value to which the specified key is mapped or defaultValue if there is no mapping for the key . The key match is case - insensitive .
14,272
public double getDouble ( String key , double defaultValue ) { String value = get ( key ) ; return value == null ? defaultValue : Double . parseDouble ( value ) ; }
Returns the double value to which the specified key is mapped or defaultValue if there is no mapping for the key . The key match is case - insensitive .
14,273
boolean set ( long index ) { if ( ! get ( index ) ) { data [ ( int ) ( index >>> 6 ) ] |= ( 1L << index ) ; bitCount ++ ; return true ; } return false ; }
Returns true if the bit changed value .
14,274
void putAll ( BitArray array ) { assert data . length == array . data . length : "BitArrays must be of equal length when merging" ; long bitCount = 0 ; for ( int i = 0 ; i < data . length ; i ++ ) { data [ i ] |= array . data [ i ] ; bitCount += Long . bitCount ( data [ i ] ) ; } this . bitCount = bitCount ; }
Combines the two BitArrays using bitwise OR .
14,275
public RpcHandler doBootstrap ( Channel channel , RpcHandler rpcHandler ) { return new SaslRpcHandler ( conf , channel , rpcHandler , secretKeyHolder ) ; }
Wrap the given application handler in a SaslRpcHandler that will handle the initial SASL negotiation .
14,276
public ShuffleIndexRecord getIndex ( int reduceId ) { long offset = offsets . get ( reduceId ) ; long nextOffset = offsets . get ( reduceId + 1 ) ; return new ShuffleIndexRecord ( offset , nextOffset - offset ) ; }
Get index offset for a particular reducer .
14,277
private void grow ( int neededSize ) { if ( neededSize > ARRAY_MAX - totalSize ( ) ) { throw new UnsupportedOperationException ( "Cannot grow internal buffer by size " + neededSize + " because the size after growing " + "exceeds size limitation " + ARRAY_MAX ) ; } final int length = totalSize ( ) + neededSize ; if ( buffer . length < length ) { int newLength = length < ARRAY_MAX / 2 ? length * 2 : ARRAY_MAX ; final byte [ ] tmp = new byte [ newLength ] ; Platform . copyMemory ( buffer , Platform . BYTE_ARRAY_OFFSET , tmp , Platform . BYTE_ARRAY_OFFSET , totalSize ( ) ) ; buffer = tmp ; } }
Grows the buffer by at least neededSize
14,278
public int connectionTimeoutMs ( ) { long defaultNetworkTimeoutS = JavaUtils . timeStringAsSec ( conf . get ( "spark.network.timeout" , "120s" ) ) ; long defaultTimeoutMs = JavaUtils . timeStringAsSec ( conf . get ( SPARK_NETWORK_IO_CONNECTIONTIMEOUT_KEY , defaultNetworkTimeoutS + "s" ) ) * 1000 ; return ( int ) defaultTimeoutMs ; }
Connect timeout in milliseconds . Default 120 secs .
14,279
List < String > buildJavaCommand ( String extraClassPath ) throws IOException { List < String > cmd = new ArrayList < > ( ) ; String [ ] candidateJavaHomes = new String [ ] { javaHome , childEnv . get ( "JAVA_HOME" ) , System . getenv ( "JAVA_HOME" ) , System . getProperty ( "java.home" ) } ; for ( String javaHome : candidateJavaHomes ) { if ( javaHome != null ) { cmd . add ( join ( File . separator , javaHome , "bin" , "java" ) ) ; break ; } } File javaOpts = new File ( join ( File . separator , getConfDir ( ) , "java-opts" ) ) ; if ( javaOpts . isFile ( ) ) { try ( BufferedReader br = new BufferedReader ( new InputStreamReader ( new FileInputStream ( javaOpts ) , StandardCharsets . UTF_8 ) ) ) { String line ; while ( ( line = br . readLine ( ) ) != null ) { addOptionString ( cmd , line ) ; } } } cmd . add ( "-cp" ) ; cmd . add ( join ( File . pathSeparator , buildClassPath ( extraClassPath ) ) ) ; return cmd ; }
Builds a list of arguments to run java .
14,280
private void addToClassPath ( Set < String > cp , String entries ) { if ( isEmpty ( entries ) ) { return ; } String [ ] split = entries . split ( Pattern . quote ( File . pathSeparator ) ) ; for ( String entry : split ) { if ( ! isEmpty ( entry ) ) { if ( new File ( entry ) . isDirectory ( ) && ! entry . endsWith ( File . separator ) ) { entry += File . separator ; } cp . add ( entry ) ; } } }
Adds entries to the classpath .
14,281
private Properties loadPropertiesFile ( ) throws IOException { Properties props = new Properties ( ) ; File propsFile ; if ( propertiesFile != null ) { propsFile = new File ( propertiesFile ) ; checkArgument ( propsFile . isFile ( ) , "Invalid properties file '%s'." , propertiesFile ) ; } else { propsFile = new File ( getConfDir ( ) , DEFAULT_PROPERTIES_FILE ) ; } if ( propsFile . isFile ( ) ) { try ( InputStreamReader isr = new InputStreamReader ( new FileInputStream ( propsFile ) , StandardCharsets . UTF_8 ) ) { props . load ( isr ) ; for ( Map . Entry < Object , Object > e : props . entrySet ( ) ) { e . setValue ( e . getValue ( ) . toString ( ) . trim ( ) ) ; } } } return props ; }
Loads the configuration file for the application if it exists . This is either the user - specified properties file or the spark - defaults . conf file under the Spark configuration directory .
14,282
public void initialize ( InputSplit inputSplit , TaskAttemptContext taskAttemptContext ) throws IOException { FileSplit fileSplit = ( FileSplit ) inputSplit ; Configuration conf = taskAttemptContext . getConfiguration ( ) ; Reader reader = OrcFile . createReader ( fileSplit . getPath ( ) , OrcFile . readerOptions ( conf ) . maxLength ( OrcConf . MAX_FILE_LENGTH . getLong ( conf ) ) . filesystem ( fileSplit . getPath ( ) . getFileSystem ( conf ) ) ) ; Reader . Options options = OrcInputFormat . buildOptions ( conf , reader , fileSplit . getStart ( ) , fileSplit . getLength ( ) ) ; recordReader = reader . rows ( options ) ; }
Initialize ORC file reader and batch record reader . Please note that initBatch is needed to be called after this .
14,283
public void initBatch ( TypeDescription orcSchema , StructField [ ] requiredFields , int [ ] requestedDataColIds , int [ ] requestedPartitionColIds , InternalRow partitionValues ) { wrap = new VectorizedRowBatchWrap ( orcSchema . createRowBatch ( capacity ) ) ; assert ( ! wrap . batch ( ) . selectedInUse ) ; assert ( requiredFields . length == requestedDataColIds . length ) ; assert ( requiredFields . length == requestedPartitionColIds . length ) ; for ( int i = 0 ; i < requiredFields . length ; i ++ ) { if ( requestedPartitionColIds [ i ] != - 1 ) { requestedDataColIds [ i ] = - 1 ; } } this . requiredFields = requiredFields ; this . requestedDataColIds = requestedDataColIds ; StructType resultSchema = new StructType ( requiredFields ) ; orcVectorWrappers = new org . apache . spark . sql . vectorized . ColumnVector [ resultSchema . length ( ) ] ; for ( int i = 0 ; i < requiredFields . length ; i ++ ) { DataType dt = requiredFields [ i ] . dataType ( ) ; if ( requestedPartitionColIds [ i ] != - 1 ) { OnHeapColumnVector partitionCol = new OnHeapColumnVector ( capacity , dt ) ; ColumnVectorUtils . populate ( partitionCol , partitionValues , requestedPartitionColIds [ i ] ) ; partitionCol . setIsConstant ( ) ; orcVectorWrappers [ i ] = partitionCol ; } else { int colId = requestedDataColIds [ i ] ; if ( colId == - 1 ) { OnHeapColumnVector missingCol = new OnHeapColumnVector ( capacity , dt ) ; missingCol . putNulls ( 0 , capacity ) ; missingCol . setIsConstant ( ) ; orcVectorWrappers [ i ] = missingCol ; } else { orcVectorWrappers [ i ] = new OrcColumnVector ( dt , wrap . batch ( ) . cols [ colId ] ) ; } } } columnarBatch = new ColumnarBatch ( orcVectorWrappers ) ; }
Initialize columnar batch by setting required schema and partition information . With this information this creates ColumnarBatch with the full schema .
14,284
private boolean nextBatch ( ) throws IOException { recordReader . nextBatch ( wrap . batch ( ) ) ; int batchSize = wrap . batch ( ) . size ; if ( batchSize == 0 ) { return false ; } columnarBatch . setNumRows ( batchSize ) ; for ( int i = 0 ; i < requiredFields . length ; i ++ ) { if ( requestedDataColIds [ i ] != - 1 ) { ( ( OrcColumnVector ) orcVectorWrappers [ i ] ) . setBatchSize ( batchSize ) ; } } return true ; }
Return true if there exists more data in the next batch . If exists prepare the next batch by copying from ORC VectorizedRowBatch columns to Spark ColumnarBatch columns .
14,285
public static long parseSecondNano ( String secondNano ) throws IllegalArgumentException { String [ ] parts = secondNano . split ( "\\." ) ; if ( parts . length == 1 ) { return toLongWithRange ( "second" , parts [ 0 ] , Long . MIN_VALUE / MICROS_PER_SECOND , Long . MAX_VALUE / MICROS_PER_SECOND ) * MICROS_PER_SECOND ; } else if ( parts . length == 2 ) { long seconds = parts [ 0 ] . equals ( "" ) ? 0L : toLongWithRange ( "second" , parts [ 0 ] , Long . MIN_VALUE / MICROS_PER_SECOND , Long . MAX_VALUE / MICROS_PER_SECOND ) ; long nanos = toLongWithRange ( "nanosecond" , parts [ 1 ] , 0L , 999999999L ) ; return seconds * MICROS_PER_SECOND + nanos / 1000L ; } else { throw new IllegalArgumentException ( "Interval string does not match second-nano format of ss.nnnnnnnnn" ) ; } }
Parse second_nano string in ss . nnnnnnnnn format to microseconds
14,286
public void addToChannel ( Channel ch ) throws IOException { ch . pipeline ( ) . addFirst ( ENCRYPTION_HANDLER_NAME , new EncryptionHandler ( this ) ) . addFirst ( DECRYPTION_HANDLER_NAME , new DecryptionHandler ( this ) ) ; }
Add handlers to channel .
14,287
public void close ( ) { for ( ClientPool clientPool : connectionPool . values ( ) ) { for ( int i = 0 ; i < clientPool . clients . length ; i ++ ) { TransportClient client = clientPool . clients [ i ] ; if ( client != null ) { clientPool . clients [ i ] = null ; JavaUtils . closeQuietly ( client ) ; } } } connectionPool . clear ( ) ; if ( workerGroup != null ) { workerGroup . shutdownGracefully ( ) ; workerGroup = null ; } }
Close all connections in the connection pool and shutdown the worker thread pool .
14,288
public int write ( ByteBuffer src ) { int toTransfer = Math . min ( src . remaining ( ) , data . length - offset ) ; src . get ( data , offset , toTransfer ) ; offset += toTransfer ; return toTransfer ; }
Reads from the given buffer into the internal byte array .
14,289
private static void setConf ( String varname , String key , String varvalue , boolean register ) throws IllegalArgumentException { HiveConf conf = SessionState . get ( ) . getConf ( ) ; String value = new VariableSubstitution ( ) . substitute ( conf , varvalue ) ; if ( conf . getBoolVar ( HiveConf . ConfVars . HIVECONFVALIDATION ) ) { HiveConf . ConfVars confVars = HiveConf . getConfVars ( key ) ; if ( confVars != null ) { if ( ! confVars . isType ( value ) ) { StringBuilder message = new StringBuilder ( ) ; message . append ( "'SET " ) . append ( varname ) . append ( '=' ) . append ( varvalue ) ; message . append ( "' FAILED because " ) . append ( key ) . append ( " expects " ) ; message . append ( confVars . typeString ( ) ) . append ( " type value." ) ; throw new IllegalArgumentException ( message . toString ( ) ) ; } String fail = confVars . validate ( value ) ; if ( fail != null ) { StringBuilder message = new StringBuilder ( ) ; message . append ( "'SET " ) . append ( varname ) . append ( '=' ) . append ( varvalue ) ; message . append ( "' FAILED in validation : " ) . append ( fail ) . append ( '.' ) ; throw new IllegalArgumentException ( message . toString ( ) ) ; } } else if ( key . startsWith ( "hive." ) ) { throw new IllegalArgumentException ( "hive configuration " + key + " does not exists." ) ; } } conf . verifyAndSet ( key , value ) ; if ( register ) { SessionState . get ( ) . getOverriddenConfigurations ( ) . put ( key , value ) ; } }
returns non - null string for validation fail
14,290
protected synchronized void release ( boolean userAccess ) { SessionState . detachSession ( ) ; if ( ThreadWithGarbageCleanup . currentThread ( ) instanceof ThreadWithGarbageCleanup ) { ThreadWithGarbageCleanup currentThread = ( ThreadWithGarbageCleanup ) ThreadWithGarbageCleanup . currentThread ( ) ; currentThread . cacheThreadLocalRawStore ( ) ; } if ( userAccess ) { lastAccessTime = System . currentTimeMillis ( ) ; } if ( opHandleSet . isEmpty ( ) ) { lastIdleTime = System . currentTimeMillis ( ) ; } else { lastIdleTime = 0 ; } }
1 . We ll remove the ThreadLocal SessionState as this thread might now serve other requests . 2 . We ll cache the ThreadLocal RawStore object for this background thread for an orderly cleanup when this thread is garbage collected later .
14,291
private String getUserFromToken ( HiveAuthFactory authFactory , String tokenStr ) throws HiveSQLException { return authFactory . getUserFromToken ( tokenStr ) ; }
extract the real user from the given token string
14,292
public void setSessionUGI ( String owner ) throws HiveSQLException { if ( owner == null ) { throw new HiveSQLException ( "No username provided for impersonation" ) ; } if ( UserGroupInformation . isSecurityEnabled ( ) ) { try { sessionUgi = UserGroupInformation . createProxyUser ( owner , UserGroupInformation . getLoginUser ( ) ) ; } catch ( IOException e ) { throw new HiveSQLException ( "Couldn't setup proxy user" , e ) ; } } else { sessionUgi = UserGroupInformation . createRemoteUser ( owner ) ; } }
setup appropriate UGI for the session
14,293
public void close ( ) throws HiveSQLException { try { acquire ( true ) ; cancelDelegationToken ( ) ; } finally { try { super . close ( ) ; } finally { try { FileSystem . closeAllForUGI ( sessionUgi ) ; } catch ( IOException ioe ) { throw new HiveSQLException ( "Could not clean up file-system handles for UGI: " + sessionUgi , ioe ) ; } } } }
Close the file systems for the session and remove it from the FileSystem cache . Cancel the session s delegation token and close the metastore connection
14,294
private void setDelegationToken ( String delegationTokenStr ) throws HiveSQLException { this . delegationTokenStr = delegationTokenStr ; if ( delegationTokenStr != null ) { getHiveConf ( ) . set ( "hive.metastore.token.signature" , HS2TOKEN ) ; try { Utils . setTokenStr ( sessionUgi , delegationTokenStr , HS2TOKEN ) ; } catch ( IOException e ) { throw new HiveSQLException ( "Couldn't setup delegation token in the ugi" , e ) ; } } }
Enable delegation token for the session save the token string and set the token . signature in hive conf . The metastore client uses this token . signature to determine where to use kerberos or delegation token
14,295
private void cancelDelegationToken ( ) throws HiveSQLException { if ( delegationTokenStr != null ) { try { Hive . get ( getHiveConf ( ) ) . cancelDelegationToken ( delegationTokenStr ) ; } catch ( HiveException e ) { throw new HiveSQLException ( "Couldn't cancel delegation token" , e ) ; } Hive . closeCurrent ( ) ; } }
If the session has a delegation token obtained from the metastore then cancel it
14,296
public void releaseExecutionMemory ( long size , MemoryConsumer consumer ) { logger . debug ( "Task {} release {} from {}" , taskAttemptId , Utils . bytesToString ( size ) , consumer ) ; memoryManager . releaseExecutionMemory ( size , taskAttemptId , consumer . getMode ( ) ) ; }
Release N bytes of execution memory for a MemoryConsumer .
14,297
public void showMemoryUsage ( ) { logger . info ( "Memory used in task " + taskAttemptId ) ; synchronized ( this ) { long memoryAccountedForByConsumers = 0 ; for ( MemoryConsumer c : consumers ) { long totalMemUsage = c . getUsed ( ) ; memoryAccountedForByConsumers += totalMemUsage ; if ( totalMemUsage > 0 ) { logger . info ( "Acquired by " + c + ": " + Utils . bytesToString ( totalMemUsage ) ) ; } } long memoryNotAccountedFor = memoryManager . getExecutionMemoryUsageForTask ( taskAttemptId ) - memoryAccountedForByConsumers ; logger . info ( "{} bytes of memory were used by task {} but are not associated with specific consumers" , memoryNotAccountedFor , taskAttemptId ) ; logger . info ( "{} bytes of memory are used for execution and {} bytes of memory are used for storage" , memoryManager . executionMemoryUsed ( ) , memoryManager . storageMemoryUsed ( ) ) ; } }
Dump the memory usage of all consumers .
14,298
public MemoryBlock allocatePage ( long size , MemoryConsumer consumer ) { assert ( consumer != null ) ; assert ( consumer . getMode ( ) == tungstenMemoryMode ) ; if ( size > MAXIMUM_PAGE_SIZE_BYTES ) { throw new TooLargePageException ( size ) ; } long acquired = acquireExecutionMemory ( size , consumer ) ; if ( acquired <= 0 ) { return null ; } final int pageNumber ; synchronized ( this ) { pageNumber = allocatedPages . nextClearBit ( 0 ) ; if ( pageNumber >= PAGE_TABLE_SIZE ) { releaseExecutionMemory ( acquired , consumer ) ; throw new IllegalStateException ( "Have already allocated a maximum of " + PAGE_TABLE_SIZE + " pages" ) ; } allocatedPages . set ( pageNumber ) ; } MemoryBlock page = null ; try { page = memoryManager . tungstenMemoryAllocator ( ) . allocate ( acquired ) ; } catch ( OutOfMemoryError e ) { logger . warn ( "Failed to allocate a page ({} bytes), try again." , acquired ) ; synchronized ( this ) { acquiredButNotUsed += acquired ; allocatedPages . clear ( pageNumber ) ; } return allocatePage ( size , consumer ) ; } page . pageNumber = pageNumber ; pageTable [ pageNumber ] = page ; if ( logger . isTraceEnabled ( ) ) { logger . trace ( "Allocate page number {} ({} bytes)" , pageNumber , acquired ) ; } return page ; }
Allocate a block of memory that will be tracked in the MemoryManager s page table ; this is intended for allocating large blocks of Tungsten memory that will be shared between operators .
14,299
public long encodePageNumberAndOffset ( MemoryBlock page , long offsetInPage ) { if ( tungstenMemoryMode == MemoryMode . OFF_HEAP ) { offsetInPage -= page . getBaseOffset ( ) ; } return encodePageNumberAndOffset ( page . pageNumber , offsetInPage ) ; }
Given a memory page and offset within that page encode this address into a 64 - bit long . This address will remain valid as long as the corresponding page has not been freed .