idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
14,100 | private static String prepareWindowsCommand ( List < String > cmd , Map < String , String > childEnv ) { StringBuilder cmdline = new StringBuilder ( ) ; for ( Map . Entry < String , String > e : childEnv . entrySet ( ) ) { cmdline . append ( String . format ( "set %s=%s" , e . getKey ( ) , e . getValue ( ) ) ) ; cmdline . append ( " && " ) ; } for ( String arg : cmd ) { cmdline . append ( quoteForBatchScript ( arg ) ) ; cmdline . append ( " " ) ; } return cmdline . toString ( ) ; } | Prepare a command line for execution from a Windows batch script . |
14,101 | private static List < String > prepareBashCommand ( List < String > cmd , Map < String , String > childEnv ) { if ( childEnv . isEmpty ( ) ) { return cmd ; } List < String > newCmd = new ArrayList < > ( ) ; newCmd . add ( "env" ) ; for ( Map . Entry < String , String > e : childEnv . entrySet ( ) ) { newCmd . add ( String . format ( "%s=%s" , e . getKey ( ) , e . getValue ( ) ) ) ; } newCmd . addAll ( cmd ) ; return newCmd ; } | Prepare the command for execution from a bash script . The final command will have commands to set up any needed environment variables needed by the child process . |
14,102 | public void zeroOutNullBytes ( ) { for ( int i = 0 ; i < nullBitsSize ; i += 8 ) { Platform . putLong ( getBuffer ( ) , startingOffset + i , 0L ) ; } } | Clears out null bits . This should be called before we write a new row to row buffer . |
14,103 | ClientChallenge challenge ( ) throws GeneralSecurityException { this . authNonce = randomBytes ( conf . encryptionKeyLength ( ) / Byte . SIZE ) ; SecretKeySpec authKey = generateKey ( conf . keyFactoryAlgorithm ( ) , conf . keyFactoryIterations ( ) , authNonce , conf . encryptionKeyLength ( ) ) ; initializeForAuth ( conf . cipherTransformation ( ) , authNonce , authKey ) ; this . challenge = randomBytes ( conf . encryptionKeyLength ( ) / Byte . SIZE ) ; return new ClientChallenge ( new String ( appId , UTF_8 ) , conf . keyFactoryAlgorithm ( ) , conf . keyFactoryIterations ( ) , conf . cipherTransformation ( ) , conf . encryptionKeyLength ( ) , authNonce , challenge ( appId , authNonce , challenge ) ) ; } | Create the client challenge . |
14,104 | ServerResponse respond ( ClientChallenge clientChallenge ) throws GeneralSecurityException { SecretKeySpec authKey = generateKey ( clientChallenge . kdf , clientChallenge . iterations , clientChallenge . nonce , clientChallenge . keyLength ) ; initializeForAuth ( clientChallenge . cipher , clientChallenge . nonce , authKey ) ; byte [ ] challenge = validateChallenge ( clientChallenge . nonce , clientChallenge . challenge ) ; byte [ ] response = challenge ( appId , clientChallenge . nonce , rawResponse ( challenge ) ) ; byte [ ] sessionNonce = randomBytes ( conf . encryptionKeyLength ( ) / Byte . SIZE ) ; byte [ ] inputIv = randomBytes ( conf . ivLength ( ) ) ; byte [ ] outputIv = randomBytes ( conf . ivLength ( ) ) ; SecretKeySpec sessionKey = generateKey ( clientChallenge . kdf , clientChallenge . iterations , sessionNonce , clientChallenge . keyLength ) ; this . sessionCipher = new TransportCipher ( cryptoConf , clientChallenge . cipher , sessionKey , inputIv , outputIv ) ; return new ServerResponse ( response , encrypt ( sessionNonce ) , encrypt ( outputIv ) , encrypt ( inputIv ) ) ; } | Validates the client challenge and create the encryption backend for the channel from the parameters sent by the client . |
14,105 | void validate ( ServerResponse serverResponse ) throws GeneralSecurityException { byte [ ] response = validateChallenge ( authNonce , serverResponse . response ) ; byte [ ] expected = rawResponse ( challenge ) ; Preconditions . checkArgument ( Arrays . equals ( expected , response ) ) ; byte [ ] nonce = decrypt ( serverResponse . nonce ) ; byte [ ] inputIv = decrypt ( serverResponse . inputIv ) ; byte [ ] outputIv = decrypt ( serverResponse . outputIv ) ; SecretKeySpec sessionKey = generateKey ( conf . keyFactoryAlgorithm ( ) , conf . keyFactoryIterations ( ) , nonce , conf . encryptionKeyLength ( ) ) ; this . sessionCipher = new TransportCipher ( cryptoConf , conf . cipherTransformation ( ) , sessionKey , inputIv , outputIv ) ; } | Validates the server response and initializes the cipher to use for the session . |
14,106 | private byte [ ] validateChallenge ( byte [ ] nonce , byte [ ] encryptedChallenge ) throws GeneralSecurityException { byte [ ] challenge = decrypt ( encryptedChallenge ) ; checkSubArray ( appId , challenge , 0 ) ; checkSubArray ( nonce , challenge , appId . length ) ; return Arrays . copyOfRange ( challenge , appId . length + nonce . length , challenge . length ) ; } | Validates an encrypted challenge as defined in the protocol and returns the byte array that corresponds to the actual challenge data . |
14,107 | private void checkSubArray ( byte [ ] test , byte [ ] data , int offset ) { Preconditions . checkArgument ( data . length >= test . length + offset ) ; for ( int i = 0 ; i < test . length ; i ++ ) { Preconditions . checkArgument ( test [ i ] == data [ i + offset ] ) ; } } | Checks that the test array is in the data array starting at the given offset . |
14,108 | private static int fmix ( int h1 , int length ) { h1 ^= length ; h1 ^= h1 >>> 16 ; h1 *= 0x85ebca6b ; h1 ^= h1 >>> 13 ; h1 *= 0xc2b2ae35 ; h1 ^= h1 >>> 16 ; return h1 ; } | Finalization mix - force all bits of a hash block to avalanche |
14,109 | public boolean acceptInboundMessage ( Object msg ) throws Exception { if ( msg instanceof ChunkFetchRequest ) { return false ; } else { return super . acceptInboundMessage ( msg ) ; } } | Overwrite acceptInboundMessage to properly delegate ChunkFetchRequest messages to ChunkFetchRequestHandler . |
14,110 | public TransportServer createServer ( int port , List < TransportServerBootstrap > bootstraps ) { return new TransportServer ( this , null , port , rpcHandler , bootstraps ) ; } | Create a server which will attempt to bind to a specific port . |
14,111 | public TransportServer createServer ( String host , int port , List < TransportServerBootstrap > bootstraps ) { return new TransportServer ( this , host , port , rpcHandler , bootstraps ) ; } | Create a server which will attempt to bind to a specific host and port . |
14,112 | private ChunkFetchRequestHandler createChunkFetchHandler ( TransportChannelHandler channelHandler , RpcHandler rpcHandler ) { return new ChunkFetchRequestHandler ( channelHandler . getClient ( ) , rpcHandler . getStreamManager ( ) , conf . maxChunksBeingTransferred ( ) ) ; } | Creates the dedicated ChannelHandler for ChunkFetchRequest messages . |
14,113 | void monitorChild ( ) { Process proc = childProc ; if ( proc == null ) { return ; } while ( proc . isAlive ( ) ) { try { proc . waitFor ( ) ; } catch ( Exception e ) { LOG . log ( Level . WARNING , "Exception waiting for child process to exit." , e ) ; } } synchronized ( this ) { if ( isDisposed ( ) ) { return ; } int ec ; try { ec = proc . exitValue ( ) ; } catch ( Exception e ) { LOG . log ( Level . WARNING , "Exception getting child process exit code, assuming failure." , e ) ; ec = 1 ; } if ( ec != 0 ) { State currState = getState ( ) ; if ( ! currState . isFinal ( ) || currState == State . FINISHED ) { setState ( State . FAILED , true ) ; } } dispose ( ) ; } } | Wait for the child process to exit and update the handle s state if necessary according to the exit code . |
14,114 | public T setPropertiesFile ( String path ) { checkNotNull ( path , "path" ) ; builder . setPropertiesFile ( path ) ; return self ( ) ; } | Set a custom properties file with Spark configuration for the application . |
14,115 | public T setConf ( String key , String value ) { checkNotNull ( key , "key" ) ; checkNotNull ( value , "value" ) ; checkArgument ( key . startsWith ( "spark." ) , "'key' must start with 'spark.'" ) ; builder . conf . put ( key , value ) ; return self ( ) ; } | Set a single configuration value for the application . |
14,116 | public T setAppName ( String appName ) { checkNotNull ( appName , "appName" ) ; builder . appName = appName ; return self ( ) ; } | Set the application name . |
14,117 | public T setMaster ( String master ) { checkNotNull ( master , "master" ) ; builder . master = master ; return self ( ) ; } | Set the Spark master for the application . |
14,118 | public T setDeployMode ( String mode ) { checkNotNull ( mode , "mode" ) ; builder . deployMode = mode ; return self ( ) ; } | Set the deploy mode for the application . |
14,119 | public T addAppArgs ( String ... args ) { for ( String arg : args ) { checkNotNull ( arg , "arg" ) ; builder . appArgs . add ( arg ) ; } return self ( ) ; } | Adds command line arguments for the application . |
14,120 | public T addJar ( String jar ) { checkNotNull ( jar , "jar" ) ; builder . jars . add ( jar ) ; return self ( ) ; } | Adds a jar file to be submitted with the application . |
14,121 | public T addFile ( String file ) { checkNotNull ( file , "file" ) ; builder . files . add ( file ) ; return self ( ) ; } | Adds a file to be submitted with the application . |
14,122 | public void zeroOut ( ) { for ( long off = baseOffset ; off < baseOffset + length * WIDTH ; off += WIDTH ) { Platform . putLong ( baseObj , off , 0 ) ; } } | Fill this all with 0L . |
14,123 | private static CaseInsensitiveStringMap catalogOptions ( String name , SQLConf conf ) { Map < String , String > allConfs = mapAsJavaMapConverter ( conf . getAllConfs ( ) ) . asJava ( ) ; Pattern prefix = Pattern . compile ( "^spark\\.sql\\.catalog\\." + name + "\\.(.+)" ) ; HashMap < String , String > options = new HashMap < > ( ) ; for ( Map . Entry < String , String > entry : allConfs . entrySet ( ) ) { Matcher matcher = prefix . matcher ( entry . getKey ( ) ) ; if ( matcher . matches ( ) && matcher . groupCount ( ) > 0 ) { options . put ( matcher . group ( 1 ) , entry . getValue ( ) ) ; } } return new CaseInsensitiveStringMap ( options ) ; } | Extracts a named catalog s configuration from a SQLConf . |
14,124 | synchronized String registerHandle ( AbstractAppHandle handle ) { String secret = createSecret ( ) ; secretToPendingApps . put ( secret , handle ) ; return secret ; } | Registers a handle with the server and returns the secret the child app needs to connect back . |
14,125 | public static int sort ( LongArray array , long numRecords , int startByteIndex , int endByteIndex , boolean desc , boolean signed ) { assert startByteIndex >= 0 : "startByteIndex (" + startByteIndex + ") should >= 0" ; assert endByteIndex <= 7 : "endByteIndex (" + endByteIndex + ") should <= 7" ; assert endByteIndex > startByteIndex ; assert numRecords * 2 <= array . size ( ) ; long inIndex = 0 ; long outIndex = numRecords ; if ( numRecords > 0 ) { long [ ] [ ] counts = getCounts ( array , numRecords , startByteIndex , endByteIndex ) ; for ( int i = startByteIndex ; i <= endByteIndex ; i ++ ) { if ( counts [ i ] != null ) { sortAtByte ( array , numRecords , counts [ i ] , i , inIndex , outIndex , desc , signed && i == endByteIndex ) ; long tmp = inIndex ; inIndex = outIndex ; outIndex = tmp ; } } } return Ints . checkedCast ( inIndex ) ; } | Sorts a given array of longs using least - significant - digit radix sort . This routine assumes you have extra space at the end of the array at least equal to the number of records . The sort is destructive and may relocate the data positioned within the array . |
14,126 | private static void sortAtByte ( LongArray array , long numRecords , long [ ] counts , int byteIdx , long inIndex , long outIndex , boolean desc , boolean signed ) { assert counts . length == 256 ; long [ ] offsets = transformCountsToOffsets ( counts , numRecords , array . getBaseOffset ( ) + outIndex * 8L , 8 , desc , signed ) ; Object baseObject = array . getBaseObject ( ) ; long baseOffset = array . getBaseOffset ( ) + inIndex * 8L ; long maxOffset = baseOffset + numRecords * 8L ; for ( long offset = baseOffset ; offset < maxOffset ; offset += 8 ) { long value = Platform . getLong ( baseObject , offset ) ; int bucket = ( int ) ( ( value >>> ( byteIdx * 8 ) ) & 0xff ) ; Platform . putLong ( baseObject , offsets [ bucket ] , value ) ; offsets [ bucket ] += 8 ; } } | Performs a partial sort by copying data into destination offsets for each byte value at the specified byte offset . |
14,127 | private static long [ ] [ ] getCounts ( LongArray array , long numRecords , int startByteIndex , int endByteIndex ) { long [ ] [ ] counts = new long [ 8 ] [ ] ; long bitwiseMax = 0 ; long bitwiseMin = - 1L ; long maxOffset = array . getBaseOffset ( ) + numRecords * 8L ; Object baseObject = array . getBaseObject ( ) ; for ( long offset = array . getBaseOffset ( ) ; offset < maxOffset ; offset += 8 ) { long value = Platform . getLong ( baseObject , offset ) ; bitwiseMax |= value ; bitwiseMin &= value ; } long bitsChanged = bitwiseMin ^ bitwiseMax ; for ( int i = startByteIndex ; i <= endByteIndex ; i ++ ) { if ( ( ( bitsChanged >>> ( i * 8 ) ) & 0xff ) != 0 ) { counts [ i ] = new long [ 256 ] ; for ( long offset = array . getBaseOffset ( ) ; offset < maxOffset ; offset += 8 ) { counts [ i ] [ ( int ) ( ( Platform . getLong ( baseObject , offset ) >>> ( i * 8 ) ) & 0xff ) ] ++ ; } } } return counts ; } | Computes a value histogram for each byte in the given array . |
14,128 | private static long [ ] transformCountsToOffsets ( long [ ] counts , long numRecords , long outputOffset , long bytesPerRecord , boolean desc , boolean signed ) { assert counts . length == 256 ; int start = signed ? 128 : 0 ; if ( desc ) { long pos = numRecords ; for ( int i = start ; i < start + 256 ; i ++ ) { pos -= counts [ i & 0xff ] ; counts [ i & 0xff ] = outputOffset + pos * bytesPerRecord ; } } else { long pos = 0 ; for ( int i = start ; i < start + 256 ; i ++ ) { long tmp = counts [ i & 0xff ] ; counts [ i & 0xff ] = outputOffset + pos * bytesPerRecord ; pos += tmp ; } } return counts ; } | Transforms counts into the proper unsafe output offsets for the sort type . |
14,129 | public void initialize ( String path , List < String > columns ) throws IOException , UnsupportedOperationException { super . initialize ( path , columns ) ; initializeInternal ( ) ; } | Utility API that will read all the data in path . This circumvents the need to create Hadoop objects to use this class . columns can contain the list of columns to project . |
14,130 | public boolean nextBatch ( ) throws IOException { for ( WritableColumnVector vector : columnVectors ) { vector . reset ( ) ; } columnarBatch . setNumRows ( 0 ) ; if ( rowsReturned >= totalRowCount ) return false ; checkEndOfRowGroup ( ) ; int num = ( int ) Math . min ( ( long ) capacity , totalCountLoadedSoFar - rowsReturned ) ; for ( int i = 0 ; i < columnReaders . length ; ++ i ) { if ( columnReaders [ i ] == null ) continue ; columnReaders [ i ] . readBatch ( num , columnVectors [ i ] ) ; } rowsReturned += num ; columnarBatch . setNumRows ( num ) ; numBatched = num ; batchIdx = 0 ; return true ; } | Advances to the next batch of rows . Returns false if there are no more . |
14,131 | public static void closeQuietly ( Closeable closeable ) { try { if ( closeable != null ) { closeable . close ( ) ; } } catch ( IOException e ) { logger . error ( "IOException should not have been thrown." , e ) ; } } | Closes the given object ignoring IOExceptions . |
14,132 | public static void deleteRecursively ( File file , FilenameFilter filter ) throws IOException { if ( file == null ) { return ; } if ( SystemUtils . IS_OS_UNIX && filter == null ) { try { deleteRecursivelyUsingUnixNative ( file ) ; return ; } catch ( IOException e ) { logger . warn ( "Attempt to delete using native Unix OS command failed for path = {}. " + "Falling back to Java IO way" , file . getAbsolutePath ( ) , e ) ; } } deleteRecursivelyUsingJavaIO ( file , filter ) ; } | Delete a file or directory and its contents recursively . Don t follow directories if they are symlinks . |
14,133 | public static byte [ ] bufferToArray ( ByteBuffer buffer ) { if ( buffer . hasArray ( ) && buffer . arrayOffset ( ) == 0 && buffer . array ( ) . length == buffer . remaining ( ) ) { return buffer . array ( ) ; } else { byte [ ] bytes = new byte [ buffer . remaining ( ) ] ; buffer . get ( bytes ) ; return bytes ; } } | Returns a byte array with the buffer s contents trying to avoid copying the data if possible . |
14,134 | public static void readFully ( ReadableByteChannel channel , ByteBuffer dst ) throws IOException { int expected = dst . remaining ( ) ; while ( dst . hasRemaining ( ) ) { if ( channel . read ( dst ) < 0 ) { throw new EOFException ( String . format ( "Not enough bytes in channel (expected %d)." , expected ) ) ; } } } | Fills a buffer with data read from the channel . |
14,135 | public void pointTo ( Object baseObject , long baseOffset , int sizeInBytes ) { final long keyArraySize = Platform . getLong ( baseObject , baseOffset ) ; assert keyArraySize >= 0 : "keyArraySize (" + keyArraySize + ") should >= 0" ; assert keyArraySize <= Integer . MAX_VALUE : "keyArraySize (" + keyArraySize + ") should <= Integer.MAX_VALUE" ; final int valueArraySize = sizeInBytes - ( int ) keyArraySize - 8 ; assert valueArraySize >= 0 : "valueArraySize (" + valueArraySize + ") should >= 0" ; keys . pointTo ( baseObject , baseOffset + 8 , ( int ) keyArraySize ) ; values . pointTo ( baseObject , baseOffset + 8 + keyArraySize , valueArraySize ) ; assert keys . numElements ( ) == values . numElements ( ) ; this . baseObject = baseObject ; this . baseOffset = baseOffset ; this . sizeInBytes = sizeInBytes ; } | Update this UnsafeMapData to point to different backing data . |
14,136 | public void reset ( ) { if ( isConstant ) return ; if ( childColumns != null ) { for ( ColumnVector c : childColumns ) { ( ( WritableColumnVector ) c ) . reset ( ) ; } } elementsAppended = 0 ; if ( numNulls > 0 ) { putNotNulls ( 0 , capacity ) ; numNulls = 0 ; } } | Resets this column for writing . The currently stored values are no longer accessible . |
14,137 | public WritableColumnVector reserveDictionaryIds ( int capacity ) { if ( dictionaryIds == null ) { dictionaryIds = reserveNewColumn ( capacity , DataTypes . IntegerType ) ; } else { dictionaryIds . reset ( ) ; dictionaryIds . reserve ( capacity ) ; } return dictionaryIds ; } | Reserve a integer column for ids of dictionary . |
14,138 | public final ColumnarArray getArray ( int rowId ) { if ( isNullAt ( rowId ) ) return null ; return new ColumnarArray ( arrayData ( ) , getArrayOffset ( rowId ) , getArrayLength ( rowId ) ) ; } | array offsets and lengths in the current column vector . |
14,139 | public final ColumnarMap getMap ( int rowId ) { if ( isNullAt ( rowId ) ) return null ; return new ColumnarMap ( getChild ( 0 ) , getChild ( 1 ) , getArrayOffset ( rowId ) , getArrayLength ( rowId ) ) ; } | second child column vector and puts the offsets and lengths in the current column vector . |
14,140 | public static < T > TypedColumn < T , Double > avg ( MapFunction < T , Double > f ) { return new TypedAverage < T > ( f ) . toColumnJava ( ) ; } | Average aggregate function . |
14,141 | public static < T > TypedColumn < T , Long > count ( MapFunction < T , Object > f ) { return new TypedCount < T > ( f ) . toColumnJava ( ) ; } | Count aggregate function . |
14,142 | private void readAsync ( ) throws IOException { stateChangeLock . lock ( ) ; final byte [ ] arr = readAheadBuffer . array ( ) ; try { if ( endOfStream || readInProgress ) { return ; } checkReadException ( ) ; readAheadBuffer . position ( 0 ) ; readAheadBuffer . flip ( ) ; readInProgress = true ; } finally { stateChangeLock . unlock ( ) ; } executorService . execute ( ( ) -> { stateChangeLock . lock ( ) ; try { if ( isClosed ) { readInProgress = false ; return ; } isReading = true ; } finally { stateChangeLock . unlock ( ) ; } int read = 0 ; int off = 0 , len = arr . length ; Throwable exception = null ; try { do { read = underlyingInputStream . read ( arr , off , len ) ; if ( read <= 0 ) break ; off += read ; len -= read ; } while ( len > 0 && ! isWaiting . get ( ) ) ; } catch ( Throwable ex ) { exception = ex ; if ( ex instanceof Error ) { throw ( Error ) ex ; } } finally { stateChangeLock . lock ( ) ; readAheadBuffer . limit ( off ) ; if ( read < 0 || ( exception instanceof EOFException ) ) { endOfStream = true ; } else if ( exception != null ) { readAborted = true ; readException = exception ; } readInProgress = false ; signalAsyncReadComplete ( ) ; stateChangeLock . unlock ( ) ; closeUnderlyingInputStreamIfNecessary ( ) ; } } ) ; } | Read data from underlyingInputStream to readAheadBuffer asynchronously . |
14,143 | public void cacheThreadLocalRawStore ( ) { Long threadId = this . getId ( ) ; RawStore threadLocalRawStore = HiveMetaStore . HMSHandler . getRawStore ( ) ; if ( threadLocalRawStore != null && ! threadRawStoreMap . containsKey ( threadId ) ) { LOG . debug ( "Adding RawStore: " + threadLocalRawStore + ", for the thread: " + this . getName ( ) + " to threadRawStoreMap for future cleanup." ) ; threadRawStoreMap . put ( threadId , threadLocalRawStore ) ; } } | Cache the ThreadLocal RawStore object . Called from the corresponding thread . |
14,144 | private String convertPattern ( final String pattern , boolean datanucleusFormat ) { String wStr ; if ( datanucleusFormat ) { wStr = "*" ; } else { wStr = ".*" ; } return pattern . replaceAll ( "([^\\\\])%" , "$1" + wStr ) . replaceAll ( "\\\\%" , "%" ) . replaceAll ( "^%" , wStr ) . replaceAll ( "([^\\\\])_" , "$1." ) . replaceAll ( "\\\\_" , "_" ) . replaceAll ( "^_" , "." ) ; } | Convert a pattern containing JDBC catalog search wildcards into Java regex patterns . |
14,145 | public Iterator < InternalRow > rowIterator ( ) { final int maxRows = numRows ; final MutableColumnarRow row = new MutableColumnarRow ( columns ) ; return new Iterator < InternalRow > ( ) { int rowId = 0 ; public boolean hasNext ( ) { return rowId < maxRows ; } public InternalRow next ( ) { if ( rowId >= maxRows ) { throw new NoSuchElementException ( ) ; } row . rowId = rowId ++ ; return row ; } public void remove ( ) { throw new UnsupportedOperationException ( ) ; } } ; } | Returns an iterator over the rows in this batch . |
14,146 | public InternalRow getRow ( int rowId ) { assert ( rowId >= 0 && rowId < numRows ) ; row . rowId = rowId ; return row ; } | Returns the row in this batch at rowId . Returned row is reused across calls . |
14,147 | public long sendRpc ( ByteBuffer message , RpcResponseCallback callback ) { if ( logger . isTraceEnabled ( ) ) { logger . trace ( "Sending RPC to {}" , getRemoteAddress ( channel ) ) ; } long requestId = requestId ( ) ; handler . addRpcRequest ( requestId , callback ) ; RpcChannelListener listener = new RpcChannelListener ( requestId , callback ) ; channel . writeAndFlush ( new RpcRequest ( requestId , new NioManagedBuffer ( message ) ) ) . addListener ( listener ) ; return requestId ; } | Sends an opaque message to the RpcHandler on the server - side . The callback will be invoked with the server s response or upon any failure . |
14,148 | public ByteBuffer sendRpcSync ( ByteBuffer message , long timeoutMs ) { final SettableFuture < ByteBuffer > result = SettableFuture . create ( ) ; sendRpc ( message , new RpcResponseCallback ( ) { public void onSuccess ( ByteBuffer response ) { ByteBuffer copy = ByteBuffer . allocate ( response . remaining ( ) ) ; copy . put ( response ) ; copy . flip ( ) ; result . set ( copy ) ; } public void onFailure ( Throwable e ) { result . setException ( e ) ; } } ) ; try { return result . get ( timeoutMs , TimeUnit . MILLISECONDS ) ; } catch ( ExecutionException e ) { throw Throwables . propagate ( e . getCause ( ) ) ; } catch ( Exception e ) { throw Throwables . propagate ( e ) ; } } | Synchronously sends an opaque message to the RpcHandler on the server - side waiting for up to a specified timeout for a response . |
14,149 | private boolean refill ( ) throws IOException { if ( ! byteBuffer . hasRemaining ( ) ) { byteBuffer . clear ( ) ; int nRead = 0 ; while ( nRead == 0 ) { nRead = fileChannel . read ( byteBuffer ) ; } if ( nRead < 0 ) { return false ; } byteBuffer . flip ( ) ; } return true ; } | Checks weather data is left to be read from the input stream . |
14,150 | public String signCookie ( String str ) { if ( str == null || str . isEmpty ( ) ) { throw new IllegalArgumentException ( "NULL or empty string to sign" ) ; } String signature = getSignature ( str ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Signature generated for " + str + " is " + signature ) ; } return str + SIGNATURE + signature ; } | Sign the cookie given the string token as input . |
14,151 | public String verifyAndExtract ( String signedStr ) { int index = signedStr . lastIndexOf ( SIGNATURE ) ; if ( index == - 1 ) { throw new IllegalArgumentException ( "Invalid input sign: " + signedStr ) ; } String originalSignature = signedStr . substring ( index + SIGNATURE . length ( ) ) ; String rawValue = signedStr . substring ( 0 , index ) ; String currentSignature = getSignature ( rawValue ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Signature generated for " + rawValue + " inside verify is " + currentSignature ) ; } if ( ! originalSignature . equals ( currentSignature ) ) { throw new IllegalArgumentException ( "Invalid sign, original = " + originalSignature + " current = " + currentSignature ) ; } return rawValue ; } | Verify a signed string and extracts the original string . |
14,152 | private String getSignature ( String str ) { try { MessageDigest md = MessageDigest . getInstance ( SHA_STRING ) ; md . update ( str . getBytes ( ) ) ; md . update ( secretBytes ) ; byte [ ] digest = md . digest ( ) ; return new Base64 ( 0 ) . encodeToString ( digest ) ; } catch ( NoSuchAlgorithmException ex ) { throw new RuntimeException ( "Invalid SHA digest String: " + SHA_STRING + " " + ex . getMessage ( ) , ex ) ; } } | Get the signature of the input string based on SHA digest algorithm . |
14,153 | void closeIterator ( LevelDBIterator < ? > it ) throws IOException { synchronized ( this . _db ) { DB _db = this . _db . get ( ) ; if ( _db != null ) { it . close ( ) ; } } } | Closes the given iterator if the DB is still open . Trying to close a JNI LevelDB handle with a closed DB can cause JVM crashes so this ensures that situation does not happen . |
14,154 | LevelDBTypeInfo getTypeInfo ( Class < ? > type ) throws Exception { LevelDBTypeInfo ti = types . get ( type ) ; if ( ti == null ) { LevelDBTypeInfo tmp = new LevelDBTypeInfo ( this , type , getTypeAlias ( type ) ) ; ti = types . putIfAbsent ( type , tmp ) ; if ( ti == null ) { ti = tmp ; } } return ti ; } | Returns metadata about indices for the given type . |
14,155 | DB db ( ) { DB _db = this . _db . get ( ) ; if ( _db == null ) { throw new IllegalStateException ( "DB is closed." ) ; } return _db ; } | Try to avoid use - after close since that has the tendency of crashing the JVM . This doesn t prevent methods that retrieved the instance from using it after close but hopefully will catch most cases ; otherwise we ll need some kind of locking . |
14,156 | public void registerApp ( String appId , String shuffleSecret ) { shuffleSecretMap . put ( appId , shuffleSecret ) ; logger . info ( "Registered shuffle secret for application {}" , appId ) ; } | Register an application with its secret . Executors need to first authenticate themselves with the same secret before fetching shuffle files written by other executors in this application . |
14,157 | public void registerApp ( String appId , ByteBuffer shuffleSecret ) { registerApp ( appId , JavaUtils . bytesToString ( shuffleSecret ) ) ; } | Register an application with its secret specified as a byte buffer . |
14,158 | public void free ( ) { if ( consumer != null ) { if ( array != null ) { consumer . freeArray ( array ) ; } array = null ; } } | Free the memory used by pointer array . |
14,159 | public void insertRecord ( long recordPointer , long keyPrefix , boolean prefixIsNull ) { if ( ! hasSpaceForAnotherRecord ( ) ) { throw new IllegalStateException ( "There is no space for new record" ) ; } if ( prefixIsNull && radixSortSupport != null ) { array . set ( pos , array . get ( nullBoundaryPos ) ) ; pos ++ ; array . set ( pos , array . get ( nullBoundaryPos + 1 ) ) ; pos ++ ; array . set ( nullBoundaryPos , recordPointer ) ; nullBoundaryPos ++ ; array . set ( nullBoundaryPos , keyPrefix ) ; nullBoundaryPos ++ ; } else { array . set ( pos , recordPointer ) ; pos ++ ; array . set ( pos , keyPrefix ) ; pos ++ ; } } | Inserts a record to be sorted . Assumes that the record pointer points to a record length stored as a 4 - byte integer followed by the record s bytes . |
14,160 | public static long nextPowerOf2 ( long num ) { final long highBit = Long . highestOneBit ( num ) ; return ( highBit == num ) ? num : highBit << 1 ; } | Returns the next number greater or equal num that is power of 2 . |
14,161 | public static boolean arrayEquals ( Object leftBase , long leftOffset , Object rightBase , long rightOffset , final long length ) { int i = 0 ; if ( ( leftOffset % 8 ) == ( rightOffset % 8 ) ) { while ( ( leftOffset + i ) % 8 != 0 && i < length ) { if ( Platform . getByte ( leftBase , leftOffset + i ) != Platform . getByte ( rightBase , rightOffset + i ) ) { return false ; } i += 1 ; } } if ( unaligned || ( ( ( leftOffset + i ) % 8 == 0 ) && ( ( rightOffset + i ) % 8 == 0 ) ) ) { while ( i <= length - 8 ) { if ( Platform . getLong ( leftBase , leftOffset + i ) != Platform . getLong ( rightBase , rightOffset + i ) ) { return false ; } i += 8 ; } } while ( i < length ) { if ( Platform . getByte ( leftBase , leftOffset + i ) != Platform . getByte ( rightBase , rightOffset + i ) ) { return false ; } i += 1 ; } return true ; } | Optimized byte array equality check for byte arrays . |
14,162 | public ByteBuffer toByteBuffer ( ) { ByteBuf buf = Unpooled . buffer ( encodedLength ( ) + 1 ) ; buf . writeByte ( type ( ) . id ) ; encode ( buf ) ; assert buf . writableBytes ( ) == 0 : "Writable bytes remain: " + buf . writableBytes ( ) ; return buf . nioBuffer ( ) ; } | Serializes the type byte followed by the message itself . |
14,163 | public SparkLauncher setSparkHome ( String sparkHome ) { checkNotNull ( sparkHome , "sparkHome" ) ; builder . childEnv . put ( ENV_SPARK_HOME , sparkHome ) ; return this ; } | Set a custom Spark installation location for the application . |
14,164 | public void addSpillIfNotEmpty ( UnsafeSorterIterator spillReader ) throws IOException { if ( spillReader . hasNext ( ) ) { spillReader . loadNext ( ) ; priorityQueue . add ( spillReader ) ; numRecords += spillReader . getNumRecords ( ) ; } } | Add an UnsafeSorterIterator to this merger |
14,165 | private void failRemainingBlocks ( String [ ] failedBlockIds , Throwable e ) { for ( String blockId : failedBlockIds ) { try { listener . onBlockFetchFailure ( blockId , e ) ; } catch ( Exception e2 ) { logger . error ( "Error in block fetch failure callback" , e2 ) ; } } } | Invokes the onBlockFetchFailure callback for every listed block id . |
14,166 | private String getClientNameFromCookie ( Cookie [ ] cookies ) { String currName , currValue ; for ( Cookie currCookie : cookies ) { currName = currCookie . getName ( ) ; if ( ! currName . equals ( AUTH_COOKIE ) ) { continue ; } currValue = currCookie . getValue ( ) ; currValue = signer . verifyAndExtract ( currValue ) ; if ( currValue != null ) { String userName = HttpAuthUtils . getUserNameFromCookieToken ( currValue ) ; if ( userName == null ) { LOG . warn ( "Invalid cookie token " + currValue ) ; continue ; } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Validated the cookie for user " + userName ) ; } return userName ; } } return null ; } | Retrieves the client name from cookieString . If the cookie does not correspond to a valid client the function returns null . |
14,167 | private String toCookieStr ( Cookie [ ] cookies ) { String cookieStr = "" ; for ( Cookie c : cookies ) { cookieStr += c . getName ( ) + "=" + c . getValue ( ) + " ;\n" ; } return cookieStr ; } | Convert cookie array to human readable cookie string |
14,168 | private Cookie createCookie ( String str ) throws UnsupportedEncodingException { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Cookie name = " + AUTH_COOKIE + " value = " + str ) ; } Cookie cookie = new Cookie ( AUTH_COOKIE , str ) ; cookie . setMaxAge ( cookieMaxAge ) ; if ( cookieDomain != null ) { cookie . setDomain ( cookieDomain ) ; } if ( cookiePath != null ) { cookie . setPath ( cookiePath ) ; } cookie . setSecure ( isCookieSecure ) ; return cookie ; } | Generate a server side cookie given the cookie value as the input . |
14,169 | private static String getHttpOnlyCookieHeader ( Cookie cookie ) { NewCookie newCookie = new NewCookie ( cookie . getName ( ) , cookie . getValue ( ) , cookie . getPath ( ) , cookie . getDomain ( ) , cookie . getVersion ( ) , cookie . getComment ( ) , cookie . getMaxAge ( ) , cookie . getSecure ( ) ) ; return newCookie + "; HttpOnly" ; } | Generate httponly cookie from HS2 cookie |
14,170 | private String doKerberosAuth ( HttpServletRequest request ) throws HttpAuthenticationException { if ( httpUGI != null ) { try { return httpUGI . doAs ( new HttpKerberosServerAction ( request , httpUGI ) ) ; } catch ( Exception e ) { LOG . info ( "Failed to authenticate with http/_HOST kerberos principal, " + "trying with hive/_HOST kerberos principal" ) ; } } try { return serviceUGI . doAs ( new HttpKerberosServerAction ( request , serviceUGI ) ) ; } catch ( Exception e ) { LOG . error ( "Failed to authenticate with hive/_HOST kerberos principal" ) ; throw new HttpAuthenticationException ( e ) ; } } | Do the GSS - API kerberos authentication . We already have a logged in subject in the form of serviceUGI which GSS - API will extract information from . In case of a SPNego request we use the httpUGI for the authenticating service tickets . |
14,171 | private String getAuthHeader ( HttpServletRequest request , String authType ) throws HttpAuthenticationException { String authHeader = request . getHeader ( HttpAuthUtils . AUTHORIZATION ) ; if ( authHeader == null || authHeader . isEmpty ( ) ) { throw new HttpAuthenticationException ( "Authorization header received " + "from the client is empty." ) ; } String authHeaderBase64String ; int beginIndex ; if ( isKerberosAuthMode ( authType ) ) { beginIndex = ( HttpAuthUtils . NEGOTIATE + " " ) . length ( ) ; } else { beginIndex = ( HttpAuthUtils . BASIC + " " ) . length ( ) ; } authHeaderBase64String = authHeader . substring ( beginIndex ) ; if ( authHeaderBase64String == null || authHeaderBase64String . isEmpty ( ) ) { throw new HttpAuthenticationException ( "Authorization header received " + "from the client does not contain any data." ) ; } return authHeaderBase64String ; } | Returns the base64 encoded auth header payload |
14,172 | static String join ( String sep , String ... elements ) { StringBuilder sb = new StringBuilder ( ) ; for ( String e : elements ) { if ( e != null ) { if ( sb . length ( ) > 0 ) { sb . append ( sep ) ; } sb . append ( e ) ; } } return sb . toString ( ) ; } | Joins a list of strings using the given separator . |
14,173 | static String firstNonEmptyValue ( String key , Map < ? , ? > ... maps ) { for ( Map < ? , ? > map : maps ) { String value = ( String ) map . get ( key ) ; if ( ! isEmpty ( value ) ) { return value ; } } return null ; } | Returns the first non - empty value mapped to the given key in the given maps or null otherwise . |
14,174 | static String firstNonEmpty ( String ... candidates ) { for ( String s : candidates ) { if ( ! isEmpty ( s ) ) { return s ; } } return null ; } | Returns the first non - empty non - null string in the given list or null otherwise . |
14,175 | static void checkNotNull ( Object o , String arg ) { if ( o == null ) { throw new IllegalArgumentException ( String . format ( "'%s' must not be null." , arg ) ) ; } } | Throws IllegalArgumentException if the given object is null . |
14,176 | static void checkArgument ( boolean check , String msg , Object ... args ) { if ( ! check ) { throw new IllegalArgumentException ( String . format ( msg , args ) ) ; } } | Throws IllegalArgumentException with the given message if the check is false . |
14,177 | static void checkState ( boolean check , String msg , Object ... args ) { if ( ! check ) { throw new IllegalStateException ( String . format ( msg , args ) ) ; } } | Throws IllegalStateException with the given message if the check is false . |
14,178 | static String findJarsDir ( String sparkHome , String scalaVersion , boolean failIfNotFound ) { File libdir = new File ( sparkHome , "jars" ) ; if ( ! libdir . isDirectory ( ) ) { libdir = new File ( sparkHome , String . format ( "assembly/target/scala-%s/jars" , scalaVersion ) ) ; if ( ! libdir . isDirectory ( ) ) { checkState ( ! failIfNotFound , "Library directory '%s' does not exist; make sure Spark is built." , libdir . getAbsolutePath ( ) ) ; return null ; } } return libdir . getAbsolutePath ( ) ; } | Find the location of the Spark jars dir depending on whether we re looking at a build or a distribution directory . |
14,179 | public void pointTo ( Object baseObject , long baseOffset , int sizeInBytes ) { final long numElements = Platform . getLong ( baseObject , baseOffset ) ; assert numElements >= 0 : "numElements (" + numElements + ") should >= 0" ; assert numElements <= Integer . MAX_VALUE : "numElements (" + numElements + ") should <= Integer.MAX_VALUE" ; this . numElements = ( int ) numElements ; this . baseObject = baseObject ; this . baseOffset = baseOffset ; this . sizeInBytes = sizeInBytes ; this . elementOffset = baseOffset + calculateHeaderPortionInBytes ( this . numElements ) ; } | Update this UnsafeArrayData to point to different backing data . |
14,180 | private UserGroupInformation getCurrentUGI ( HiveConf opConfig ) throws HiveSQLException { try { return Utils . getUGI ( ) ; } catch ( Exception e ) { throw new HiveSQLException ( "Unable to get current user" , e ) ; } } | Returns the current UGI on the stack |
14,181 | private RowSet prepareFromRow ( List < Object > rows , RowSet rowSet ) throws Exception { for ( Object row : rows ) { rowSet . addRow ( ( Object [ ] ) row ) ; } return rowSet ; } | already encoded to thrift - able object in ThriftFormatter |
14,182 | private HiveConf getConfigForOperation ( ) throws HiveSQLException { HiveConf sqlOperationConf = getParentSession ( ) . getHiveConf ( ) ; if ( ! getConfOverlay ( ) . isEmpty ( ) || shouldRunAsync ( ) ) { sqlOperationConf = new HiveConf ( sqlOperationConf ) ; for ( Map . Entry < String , String > confEntry : getConfOverlay ( ) . entrySet ( ) ) { try { sqlOperationConf . verifyAndSet ( confEntry . getKey ( ) , confEntry . getValue ( ) ) ; } catch ( IllegalArgumentException e ) { throw new HiveSQLException ( "Error applying statement specific settings" , e ) ; } } } return sqlOperationConf ; } | If there are query specific settings to overlay then create a copy of config There are two cases we need to clone the session config that s being passed to hive driver 1 . Async query - If the client changes a config setting that shouldn t reflect in the execution already underway 2 . confOverlay - The query specific settings should only be applied to the query config and not session |
14,183 | private synchronized boolean shouldRetry ( Throwable e ) { boolean isIOException = e instanceof IOException || ( e . getCause ( ) != null && e . getCause ( ) instanceof IOException ) ; boolean hasRemainingRetries = retryCount < maxRetries ; return isIOException && hasRemainingRetries ; } | Returns true if we should retry due a block fetch failure . We will retry if and only if the exception was an IOException and we haven t retried maxRetries times already . |
14,184 | public UnsafeRow appendRow ( Object kbase , long koff , int klen , Object vbase , long voff , int vlen ) { if ( numRows >= capacity || page == null || page . size ( ) - pageCursor < recordLength ) { return null ; } long offset = page . getBaseOffset ( ) + pageCursor ; final long recordOffset = offset ; Platform . copyMemory ( kbase , koff , base , offset , klen ) ; offset += klen ; Platform . copyMemory ( vbase , voff , base , offset , vlen ) ; offset += vlen ; Platform . putLong ( base , offset , 0 ) ; pageCursor += recordLength ; keyRowId = numRows ; keyRow . pointTo ( base , recordOffset , klen ) ; valueRow . pointTo ( base , recordOffset + klen , vlen ) ; numRows ++ ; return valueRow ; } | Append a key value pair . It copies data into the backing MemoryBlock . Returns an UnsafeRow pointing to the value if succeeds otherwise returns null . |
14,185 | public static UTF8String fromString ( String str ) { return str == null ? null : fromBytes ( str . getBytes ( StandardCharsets . UTF_8 ) ) ; } | Creates an UTF8String from String . |
14,186 | public static UTF8String blankString ( int length ) { byte [ ] spaces = new byte [ length ] ; Arrays . fill ( spaces , ( byte ) ' ' ) ; return fromBytes ( spaces ) ; } | Creates an UTF8String that contains length spaces . |
14,187 | public void writeToMemory ( Object target , long targetOffset ) { Platform . copyMemory ( base , offset , target , targetOffset , numBytes ) ; } | Writes the content of this string into a memory address identified by an object and an offset . The target memory address must already been allocated and have enough space to hold all the bytes in this string . |
14,188 | public byte [ ] getBytes ( ) { if ( offset == BYTE_ARRAY_OFFSET && base instanceof byte [ ] && ( ( byte [ ] ) base ) . length == numBytes ) { return ( byte [ ] ) base ; } else { byte [ ] bytes = new byte [ numBytes ] ; copyMemory ( base , offset , bytes , BYTE_ARRAY_OFFSET , numBytes ) ; return bytes ; } } | Returns the underline bytes will be a copy of it if it s part of another array . |
14,189 | public boolean contains ( final UTF8String substring ) { if ( substring . numBytes == 0 ) { return true ; } byte first = substring . getByte ( 0 ) ; for ( int i = 0 ; i <= numBytes - substring . numBytes ; i ++ ) { if ( getByte ( i ) == first && matchAt ( substring , i ) ) { return true ; } } return false ; } | Returns whether this contains substring or not . |
14,190 | public UTF8String toUpperCase ( ) { if ( numBytes == 0 ) { return EMPTY_UTF8 ; } byte [ ] bytes = new byte [ numBytes ] ; bytes [ 0 ] = ( byte ) Character . toTitleCase ( getByte ( 0 ) ) ; for ( int i = 0 ; i < numBytes ; i ++ ) { byte b = getByte ( i ) ; if ( numBytesForFirstByte ( b ) != 1 ) { return toUpperCaseSlow ( ) ; } int upper = Character . toUpperCase ( ( int ) b ) ; if ( upper > 127 ) { return toUpperCaseSlow ( ) ; } bytes [ i ] = ( byte ) upper ; } return fromBytes ( bytes ) ; } | Returns the upper case of this string |
14,191 | public UTF8String toLowerCase ( ) { if ( numBytes == 0 ) { return EMPTY_UTF8 ; } byte [ ] bytes = new byte [ numBytes ] ; bytes [ 0 ] = ( byte ) Character . toTitleCase ( getByte ( 0 ) ) ; for ( int i = 0 ; i < numBytes ; i ++ ) { byte b = getByte ( i ) ; if ( numBytesForFirstByte ( b ) != 1 ) { return toLowerCaseSlow ( ) ; } int lower = Character . toLowerCase ( ( int ) b ) ; if ( lower > 127 ) { return toLowerCaseSlow ( ) ; } bytes [ i ] = ( byte ) lower ; } return fromBytes ( bytes ) ; } | Returns the lower case of this string |
14,192 | public UTF8String toTitleCase ( ) { if ( numBytes == 0 ) { return EMPTY_UTF8 ; } byte [ ] bytes = new byte [ numBytes ] ; for ( int i = 0 ; i < numBytes ; i ++ ) { byte b = getByte ( i ) ; if ( i == 0 || getByte ( i - 1 ) == ' ' ) { if ( numBytesForFirstByte ( b ) != 1 ) { return toTitleCaseSlow ( ) ; } int upper = Character . toTitleCase ( b ) ; if ( upper > 127 ) { return toTitleCaseSlow ( ) ; } bytes [ i ] = ( byte ) upper ; } else { bytes [ i ] = b ; } } return fromBytes ( bytes ) ; } | Returns the title case of this string that could be used as title . |
14,193 | private UTF8String copyUTF8String ( int start , int end ) { int len = end - start + 1 ; byte [ ] newBytes = new byte [ len ] ; copyMemory ( base , offset + start , newBytes , BYTE_ARRAY_OFFSET , len ) ; return UTF8String . fromBytes ( newBytes ) ; } | Copy the bytes from the current UTF8String and make a new UTF8String . |
14,194 | public UTF8String trim ( UTF8String trimString ) { if ( trimString != null ) { return trimLeft ( trimString ) . trimRight ( trimString ) ; } else { return null ; } } | Based on the given trim string trim this string starting from both ends This method searches for each character in the source string removes the character if it is found in the trim string stops at the first not found . It calls the trimLeft first then trimRight . It returns a new string in which both ends trim characters have been removed . |
14,195 | public UTF8String trimLeft ( UTF8String trimString ) { if ( trimString == null ) return null ; int srchIdx = 0 ; int trimIdx = 0 ; while ( srchIdx < numBytes ) { UTF8String searchChar = copyUTF8String ( srchIdx , srchIdx + numBytesForFirstByte ( this . getByte ( srchIdx ) ) - 1 ) ; int searchCharBytes = searchChar . numBytes ; if ( trimString . find ( searchChar , 0 ) >= 0 ) { trimIdx += searchCharBytes ; } else { break ; } srchIdx += searchCharBytes ; } if ( trimIdx >= numBytes ) { return EMPTY_UTF8 ; } else { return copyUTF8String ( trimIdx , numBytes - 1 ) ; } } | Based on the given trim string trim this string starting from left end This method searches each character in the source string starting from the left end removes the character if it is in the trim string stops at the first character which is not in the trim string returns the new string . |
14,196 | public UTF8String trimRight ( UTF8String trimString ) { if ( trimString == null ) return null ; int charIdx = 0 ; int numChars = 0 ; int [ ] stringCharLen = new int [ numBytes ] ; int [ ] stringCharPos = new int [ numBytes ] ; while ( charIdx < numBytes ) { stringCharPos [ numChars ] = charIdx ; stringCharLen [ numChars ] = numBytesForFirstByte ( getByte ( charIdx ) ) ; charIdx += stringCharLen [ numChars ] ; numChars ++ ; } int trimEnd = numBytes - 1 ; while ( numChars > 0 ) { UTF8String searchChar = copyUTF8String ( stringCharPos [ numChars - 1 ] , stringCharPos [ numChars - 1 ] + stringCharLen [ numChars - 1 ] - 1 ) ; if ( trimString . find ( searchChar , 0 ) >= 0 ) { trimEnd -= stringCharLen [ numChars - 1 ] ; } else { break ; } numChars -- ; } if ( trimEnd < 0 ) { return EMPTY_UTF8 ; } else { return copyUTF8String ( 0 , trimEnd ) ; } } | Based on the given trim string trim this string starting from right end This method searches each character in the source string starting from the right end removes the character if it is in the trim string stops at the first character which is not in the trim string returns the new string . |
14,197 | private int find ( UTF8String str , int start ) { assert ( str . numBytes > 0 ) ; while ( start <= numBytes - str . numBytes ) { if ( ByteArrayMethods . arrayEquals ( base , offset + start , str . base , str . offset , str . numBytes ) ) { return start ; } start += 1 ; } return - 1 ; } | Find the str from left to right . |
14,198 | public static UTF8String concat ( UTF8String ... inputs ) { long totalLength = 0 ; for ( int i = 0 ; i < inputs . length ; i ++ ) { if ( inputs [ i ] != null ) { totalLength += ( long ) inputs [ i ] . numBytes ; } else { return null ; } } final byte [ ] result = new byte [ Ints . checkedCast ( totalLength ) ] ; int offset = 0 ; for ( int i = 0 ; i < inputs . length ; i ++ ) { int len = inputs [ i ] . numBytes ; copyMemory ( inputs [ i ] . base , inputs [ i ] . offset , result , BYTE_ARRAY_OFFSET + offset , len ) ; offset += len ; } return fromBytes ( result ) ; } | Concatenates input strings together into a single string . Returns null if any input is null . |
14,199 | public boolean toLong ( LongWrapper toLongResult ) { if ( numBytes == 0 ) { return false ; } byte b = getByte ( 0 ) ; final boolean negative = b == '-' ; int offset = 0 ; if ( negative || b == '+' ) { offset ++ ; if ( numBytes == 1 ) { return false ; } } final byte separator = '.' ; final int radix = 10 ; final long stopValue = Long . MIN_VALUE / radix ; long result = 0 ; while ( offset < numBytes ) { b = getByte ( offset ) ; offset ++ ; if ( b == separator ) { break ; } int digit ; if ( b >= '0' && b <= '9' ) { digit = b - '0' ; } else { return false ; } if ( result < stopValue ) { return false ; } result = result * radix - digit ; if ( result > 0 ) { return false ; } } while ( offset < numBytes ) { byte currentByte = getByte ( offset ) ; if ( currentByte < '0' || currentByte > '9' ) { return false ; } offset ++ ; } if ( ! negative ) { result = - result ; if ( result < 0 ) { return false ; } } toLongResult . value = result ; return true ; } | Parses this UTF8String to long . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.