idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
12,300
public synchronized void handleNodeLost ( Address address ) { Preconditions . checkNotNull ( address , "address should not be null" ) ; mLostNodes . add ( address ) ; for ( Runnable function : mChangeListeners ) { function . run ( ) ; } }
Updates configuration when a live node becomes lost .
12,301
public synchronized void lostNodeFound ( Address address ) { Preconditions . checkNotNull ( address , "address should not be null" ) ; mLostNodes . remove ( address ) ; for ( Runnable function : mChangeListeners ) { function . run ( ) ; } }
Updates configuration when a lost node is found .
12,302
public synchronized void addTask ( int taskId ) { Preconditions . checkArgument ( ! mTaskIdToInfo . containsKey ( taskId ) , "" ) ; mTaskIdToInfo . put ( taskId , new TaskInfo ( ) . setJobId ( mId ) . setTaskId ( taskId ) . setStatus ( Status . CREATED ) . setErrorMessage ( "" ) . setResult ( null ) ) ; }
Registers a task .
12,303
public void start ( ) throws Exception { mJournalSystem . start ( ) ; mJournalSystem . gainPrimacy ( ) ; startMaster ( true ) ; startServing ( ) ; }
Starts the Alluxio job master server .
12,304
public static URI appendPath ( URI base , String path ) throws URISyntaxException { return new URI ( base . getScheme ( ) , base . getAuthority ( ) , PathUtils . concatPath ( base . getPath ( ) , path ) , base . getQuery ( ) , base . getFragment ( ) ) ; }
Appends the given path to the given base URI .
12,305
public static Map < String , String > parseQueryString ( String query ) { Map < String , String > queryMap = new HashMap < > ( ) ; if ( query == null || query . isEmpty ( ) ) { return queryMap ; } String [ ] entries = query . split ( String . valueOf ( QUERY_SEPARATOR ) ) ; try { for ( String entry : entries ) { String [ ] parts = entry . split ( String . valueOf ( QUERY_KEY_VALUE_SEPARATOR ) ) ; if ( parts . length == 0 ) { } else if ( parts . length == 1 ) { String key = URLDecoder . decode ( parts [ 0 ] , "UTF-8" ) ; queryMap . put ( key , "" ) ; } else { String key = URLDecoder . decode ( parts [ 0 ] , "UTF-8" ) ; String value = URLDecoder . decode ( parts [ 1 ] , "UTF-8" ) ; queryMap . put ( key , value ) ; } } } catch ( UnsupportedEncodingException e ) { throw new RuntimeException ( e ) ; } return queryMap ; }
Parses the given query string and returns a map of the query parameters .
12,306
public static int hash ( int hash , String s ) { if ( s == null ) { return hash ; } return s . indexOf ( '%' ) < 0 ? 31 * hash + s . hashCode ( ) : normalizedHash ( hash , s ) ; }
Hashes a string for a URI hash . Handles octets .
12,307
public static int hashIgnoreCase ( int hash , String s ) { if ( s == null ) { return hash ; } int length = s . length ( ) ; for ( int i = 0 ; i < length ; i ++ ) { hash = 31 * hash + URIUtils . toLower ( s . charAt ( i ) ) ; } return hash ; }
Hashes a string for a URI hash while ignoring the case .
12,308
public static boolean isAuthenticationEnabled ( AlluxioConfiguration conf ) { return ! conf . getEnum ( PropertyKey . SECURITY_AUTHENTICATION_TYPE , AuthType . class ) . equals ( AuthType . NOSASL ) ; }
Checks if authentication is enabled .
12,309
public static void main ( String [ ] args ) { if ( args . length != 0 ) { LOG . info ( "java -cp {} {}" , RuntimeConstants . ALLUXIO_JAR , AlluxioWorkerMonitor . class . getCanonicalName ( ) ) ; LOG . warn ( "ignoring arguments" ) ; } AlluxioConfiguration conf = new InstancedConfiguration ( ConfigurationUtils . defaults ( ) ) ; HealthCheckClient client = new WorkerHealthCheckClient ( NetworkAddressUtils . getConnectAddress ( NetworkAddressUtils . ServiceType . WORKER_RPC , conf ) , ONE_MIN_EXP_BACKOFF , conf ) ; if ( ! client . isServing ( ) ) { System . exit ( 1 ) ; } System . exit ( 0 ) ; }
Starts the Alluxio worker monitor .
12,310
private static String computeTargetPath ( String path , String source , String destination ) throws Exception { String relativePath = PathUtils . subtractPaths ( path , source ) ; return PathUtils . concatPath ( destination , relativePath ) ; }
Computes the path that the given path should end up at when source is migrated to destination .
12,311
public static String generateDbPath ( String baseDir , String dbName ) { return PathUtils . concatPath ( baseDir , dbName ) ; }
Generates a path to use for a RocksDB database .
12,312
protected ObjectStatus getObjectStatus ( String key ) { try { FileInfo fileInfo = mKodoClinet . getFileInfo ( key ) ; if ( fileInfo == null ) { return null ; } return new ObjectStatus ( key , fileInfo . hash , fileInfo . fsize , fileInfo . putTime / 10000 ) ; } catch ( QiniuException e ) { LOG . warn ( "Failed to get Object {}, Msg: {}" , key , e ) ; } return null ; }
Gets metadata information about object . Implementations should process the key as is which may be a file or a directory key .
12,313
static int interpretOrdinal ( int ordinal , int numTiers ) { if ( ordinal >= 0 ) { return Math . min ( ordinal , numTiers - 1 ) ; } return Math . max ( numTiers + ordinal , 0 ) ; }
Interprets a tier ordinal given the number of tiers .
12,314
public void addBlock ( String tierAlias , long blockId , long blockSize , long blockLastAccessTimeMs ) { UIFileBlockInfo block = new UIFileBlockInfo ( blockId , blockSize , blockLastAccessTimeMs , tierAlias , mAlluxioConfiguration ) ; List < UIFileBlockInfo > blocksOnTier = mBlocksOnTier . get ( tierAlias ) ; if ( blocksOnTier == null ) { blocksOnTier = new ArrayList < > ( ) ; mBlocksOnTier . put ( tierAlias , blocksOnTier ) ; } blocksOnTier . add ( block ) ; Long sizeOnTier = mSizeOnTier . get ( tierAlias ) ; mSizeOnTier . put ( tierAlias , ( sizeOnTier == null ? 0L : sizeOnTier ) + blockSize ) ; }
Adds a block to the file information .
12,315
public String getCreationTime ( ) { if ( mCreationTimeMs == UIFileInfo . LocalFileInfo . EMPTY_CREATION_TIME ) { return "" ; } return CommonUtils . convertMsToDate ( mCreationTimeMs , mAlluxioConfiguration . get ( PropertyKey . USER_DATE_FORMAT_PATTERN ) ) ; }
Gets creation time .
12,316
public Map < String , Integer > getOnTierPercentages ( ) { return mOrderedTierAliases . stream ( ) . collect ( Collectors . toMap ( tier -> tier , tier -> { Long sizeOnTier = mSizeOnTier . getOrDefault ( tier , 0L ) ; return mSize > 0 ? ( int ) ( 100 * sizeOnTier / mSize ) : 0 ; } ) ) ; }
Gets on - tier percentages .
12,317
private void chgrp ( AlluxioURI path , String group , boolean recursive ) throws AlluxioException , IOException { SetAttributePOptions options = SetAttributePOptions . newBuilder ( ) . setGroup ( group ) . setRecursive ( recursive ) . build ( ) ; mFileSystem . setAttribute ( path , options ) ; System . out . println ( "Changed group of " + path + " to " + group ) ; }
Changes the group for the directory or file with the path specified in args .
12,318
public int transferTo ( ByteBuf buf ) throws IOException { Preconditions . checkState ( ! mClosed ) ; if ( mUnderFileSystemInputStream == null ) { return - 1 ; } if ( mBlockMeta . getBlockSize ( ) <= mInStreamPos ) { return - 1 ; } ByteBuf bufCopy = null ; if ( mBlockWriter != null ) { bufCopy = buf . duplicate ( ) ; bufCopy . readerIndex ( bufCopy . writerIndex ( ) ) ; } int bytesToRead = ( int ) Math . min ( buf . writableBytes ( ) , mBlockMeta . getBlockSize ( ) - mInStreamPos ) ; int bytesRead = buf . writeBytes ( mUnderFileSystemInputStream , bytesToRead ) ; if ( bytesRead <= 0 ) { return bytesRead ; } mInStreamPos += bytesRead ; if ( mBlockWriter != null && bufCopy != null ) { try { bufCopy . writerIndex ( buf . writerIndex ( ) ) ; while ( bufCopy . readableBytes ( ) > 0 ) { mLocalBlockStore . requestSpace ( mBlockMeta . getSessionId ( ) , mBlockMeta . getBlockId ( ) , mInStreamPos - mBlockWriter . getPosition ( ) ) ; mBlockWriter . append ( bufCopy ) ; } } catch ( Exception e ) { LOG . warn ( "Failed to cache data read from UFS (on transferTo()): {}" , e . getMessage ( ) ) ; cancelBlockWriter ( ) ; } } return bytesRead ; }
This interface is supposed to be used for sequence block reads .
12,319
public void close ( ) throws IOException { if ( mClosed ) { return ; } try { updateBlockWriter ( mBlockMeta . getBlockSize ( ) ) ; if ( mUnderFileSystemInputStream != null ) { mUfsInstreamManager . release ( mUnderFileSystemInputStream ) ; mUnderFileSystemInputStream = null ; } if ( mBlockWriter != null ) { mBlockWriter . close ( ) ; } mUfsResource . close ( ) ; } finally { mClosed = true ; } }
Closes the block reader . After this this block reader should not be used anymore . This is recommended to be called after the client finishes reading the block . It is usually triggered when the client unlocks the block .
12,320
private void updateUnderFileSystemInputStream ( long offset ) throws IOException { if ( ( mUnderFileSystemInputStream != null ) && offset != mInStreamPos ) { mUfsInstreamManager . release ( mUnderFileSystemInputStream ) ; mUnderFileSystemInputStream = null ; mInStreamPos = - 1 ; } if ( mUnderFileSystemInputStream == null && offset < mBlockMeta . getBlockSize ( ) ) { UnderFileSystem ufs = mUfsResource . get ( ) ; mUnderFileSystemInputStream = mUfsInstreamManager . acquire ( ufs , mBlockMeta . getUnderFileSystemPath ( ) , IdUtils . fileIdFromBlockId ( mBlockMeta . getBlockId ( ) ) , OpenOptions . defaults ( ) . setOffset ( mBlockMeta . getOffset ( ) + offset ) ) ; mInStreamPos = offset ; } }
Updates the UFS input stream given an offset to read .
12,321
private void cancelBlockWriter ( ) throws IOException { if ( mBlockWriter == null ) { return ; } try { mBlockWriter . close ( ) ; mBlockWriter = null ; mLocalBlockStore . abortBlock ( mBlockMeta . getSessionId ( ) , mBlockMeta . getBlockId ( ) ) ; } catch ( BlockDoesNotExistException e ) { LOG . warn ( "Block {} does not exist when being aborted. The session may have expired." , mBlockMeta . getBlockId ( ) ) ; } catch ( BlockAlreadyExistsException | InvalidWorkerStateException | IOException e ) { throw AlluxioStatusException . fromCheckedException ( e ) ; } }
Closes the current block writer cleans up its temp block and sets it to null .
12,322
private void updateBlockWriter ( long offset ) throws IOException { if ( mBlockWriter != null && offset > mBlockWriter . getPosition ( ) ) { cancelBlockWriter ( ) ; } try { if ( mBlockWriter == null && offset == 0 && ! mBlockMeta . isNoCache ( ) ) { BlockStoreLocation loc = BlockStoreLocation . anyDirInTier ( mStorageTierAssoc . getAlias ( 0 ) ) ; mLocalBlockStore . createBlock ( mBlockMeta . getSessionId ( ) , mBlockMeta . getBlockId ( ) , loc , mInitialBlockSize ) ; mBlockWriter = mLocalBlockStore . getBlockWriter ( mBlockMeta . getSessionId ( ) , mBlockMeta . getBlockId ( ) ) ; } } catch ( BlockAlreadyExistsException e ) { LOG . debug ( "Failed to update block writer for UFS block [blockId: {}, ufsPath: {}, offset: {}]." + "Concurrent UFS readers may be caching the same block." , mBlockMeta . getBlockId ( ) , mBlockMeta . getUnderFileSystemPath ( ) , offset , e ) ; mBlockWriter = null ; } catch ( IOException | AlluxioException e ) { LOG . warn ( "Failed to update block writer for UFS block [blockId: {}, ufsPath: {}, offset: {}]: {}" , mBlockMeta . getBlockId ( ) , mBlockMeta . getUnderFileSystemPath ( ) , offset , e . getMessage ( ) ) ; mBlockWriter = null ; } }
Updates the block writer given an offset to read . If the offset is beyond the current position of the block writer the block writer will be aborted .
12,323
protected void runWildCardCmd ( AlluxioURI wildCardPath , CommandLine cl ) throws IOException { List < AlluxioURI > paths = FileSystemShellUtils . getAlluxioURIs ( mFileSystem , wildCardPath ) ; if ( paths . size ( ) == 0 ) { throw new IOException ( wildCardPath + " does not exist." ) ; } paths . sort ( Comparator . comparing ( AlluxioURI :: getPath ) ) ; processHeader ( cl ) ; List < String > errorMessages = new ArrayList < > ( ) ; for ( AlluxioURI path : paths ) { try { runPlainPath ( path , cl ) ; } catch ( AlluxioException | IOException e ) { errorMessages . add ( e . getMessage ( ) != null ? e . getMessage ( ) : e . toString ( ) ) ; } } if ( errorMessages . size ( ) != 0 ) { throw new IOException ( Joiner . on ( '\n' ) . join ( errorMessages ) ) ; } }
Runs the command for a particular URI that may contain wildcard in its path .
12,324
public GrpcServer start ( ) throws IOException { RetryUtils . retry ( "Starting gRPC server" , ( ) -> mServer . start ( ) , new ExponentialBackoffRetry ( 100 , 500 , 5 ) ) ; mStarted = true ; return this ; }
Start serving .
12,325
private MasterWorkerInfo registerWorkerInternal ( long workerId ) { for ( IndexedSet < MasterWorkerInfo > workers : Arrays . asList ( mTempWorkers , mLostWorkers ) ) { MasterWorkerInfo worker = workers . getFirstByField ( ID_INDEX , workerId ) ; if ( worker == null ) { continue ; } synchronized ( worker ) { worker . updateLastUpdatedTimeMs ( ) ; mWorkers . add ( worker ) ; workers . remove ( worker ) ; if ( workers == mLostWorkers ) { for ( Consumer < Address > function : mLostWorkerFoundListeners ) { function . accept ( new Address ( worker . getWorkerAddress ( ) . getHost ( ) , worker . getWorkerAddress ( ) . getRpcPort ( ) ) ) ; } LOG . warn ( "A lost worker {} has requested its old id {}." , worker . getWorkerAddress ( ) , worker . getId ( ) ) ; } } return worker ; } return null ; }
Re - register a lost worker or complete registration after getting a worker id .
12,326
@ GuardedBy ( "workerInfo" ) private void processWorkerRemovedBlocks ( MasterWorkerInfo workerInfo , Collection < Long > removedBlockIds ) { for ( long removedBlockId : removedBlockIds ) { try ( LockResource lr = lockBlock ( removedBlockId ) ) { Optional < BlockMeta > block = mBlockStore . getBlock ( removedBlockId ) ; if ( block . isPresent ( ) ) { LOG . info ( "Block {} is removed on worker {}." , removedBlockId , workerInfo . getId ( ) ) ; mBlockStore . removeLocation ( removedBlockId , workerInfo . getId ( ) ) ; if ( mBlockStore . getLocations ( removedBlockId ) . size ( ) == 0 ) { mLostBlocks . add ( removedBlockId ) ; } } workerInfo . removeBlock ( removedBlockId ) ; } } }
Updates the worker and block metadata for blocks removed from a worker .
12,327
@ GuardedBy ( "workerInfo" ) private void processWorkerAddedBlocks ( MasterWorkerInfo workerInfo , Map < String , List < Long > > addedBlockIds ) { for ( Map . Entry < String , List < Long > > entry : addedBlockIds . entrySet ( ) ) { for ( long blockId : entry . getValue ( ) ) { try ( LockResource lr = lockBlock ( blockId ) ) { Optional < BlockMeta > block = mBlockStore . getBlock ( blockId ) ; if ( block . isPresent ( ) ) { workerInfo . addBlock ( blockId ) ; mBlockStore . addLocation ( blockId , BlockLocation . newBuilder ( ) . setWorkerId ( workerInfo . getId ( ) ) . setTier ( entry . getKey ( ) ) . build ( ) ) ; mLostBlocks . remove ( blockId ) ; } else { LOG . warn ( "Invalid block: {} from worker {}." , blockId , workerInfo . getWorkerAddress ( ) . getHost ( ) ) ; } } } } }
Updates the worker and block metadata for blocks added to a worker .
12,328
@ GuardedBy ( "masterBlockInfo" ) private Optional < BlockInfo > generateBlockInfo ( long blockId ) throws UnavailableException { if ( mSafeModeManager . isInSafeMode ( ) ) { throw new UnavailableException ( ExceptionMessage . MASTER_IN_SAFEMODE . getMessage ( ) ) ; } BlockMeta block ; List < BlockLocation > blockLocations ; try ( LockResource lr = lockBlock ( blockId ) ) { Optional < BlockMeta > blockOpt = mBlockStore . getBlock ( blockId ) ; if ( ! blockOpt . isPresent ( ) ) { return Optional . empty ( ) ; } block = blockOpt . get ( ) ; blockLocations = new ArrayList < > ( mBlockStore . getLocations ( blockId ) ) ; } Collections . sort ( blockLocations , Comparator . comparingInt ( o -> mGlobalStorageTierAssoc . getOrdinal ( o . getTier ( ) ) ) ) ; List < alluxio . wire . BlockLocation > locations = new ArrayList < > ( ) ; for ( BlockLocation location : blockLocations ) { MasterWorkerInfo workerInfo = mWorkers . getFirstByField ( ID_INDEX , location . getWorkerId ( ) ) ; if ( workerInfo != null ) { locations . add ( new alluxio . wire . BlockLocation ( ) . setWorkerId ( location . getWorkerId ( ) ) . setWorkerAddress ( workerInfo . getWorkerAddress ( ) ) . setTierAlias ( location . getTier ( ) ) ) ; } } return Optional . of ( new BlockInfo ( ) . setBlockId ( blockId ) . setLength ( block . getLength ( ) ) . setLocations ( locations ) ) ; }
Generates block info including worker locations for a block id .
12,329
private Set < MasterWorkerInfo > selectInfoByAddress ( Set < String > addresses , Set < MasterWorkerInfo > workerInfoSet , Set < String > workerNames ) { return workerInfoSet . stream ( ) . filter ( info -> { String host = info . getWorkerAddress ( ) . getHost ( ) ; workerNames . add ( host ) ; String ip = null ; try { ip = NetworkAddressUtils . resolveIpAddress ( host ) ; workerNames . add ( ip ) ; } catch ( UnknownHostException e ) { } if ( addresses . contains ( host ) ) { addresses . remove ( host ) ; return true ; } if ( ip != null ) { if ( addresses . contains ( ip ) ) { addresses . remove ( ip ) ; return true ; } } return false ; } ) . collect ( Collectors . toSet ( ) ) ; }
Selects the MasterWorkerInfo from workerInfoSet whose host or related IP address exists in addresses .
12,330
public void write ( WriteRequest writeRequest ) { if ( ! tryAcquireSemaphore ( ) ) { return ; } mSerializingExecutor . execute ( ( ) -> { try { if ( mContext == null ) { LOG . debug ( "Received write request {}." , writeRequest ) ; mContext = createRequestContext ( writeRequest ) ; } else { Preconditions . checkState ( ! mContext . isDoneUnsafe ( ) , "invalid request after write request is completed." ) ; } if ( mContext . isDoneUnsafe ( ) || mContext . getError ( ) != null ) { return ; } validateWriteRequest ( writeRequest ) ; if ( writeRequest . hasCommand ( ) ) { WriteRequestCommand command = writeRequest . getCommand ( ) ; if ( command . getFlush ( ) ) { flush ( ) ; } else { handleCommand ( command , mContext ) ; } } else { Preconditions . checkState ( writeRequest . hasChunk ( ) , "write request is missing data chunk in non-command message" ) ; ByteString data = writeRequest . getChunk ( ) . getData ( ) ; Preconditions . checkState ( data != null && data . size ( ) > 0 , "invalid data size from write request message" ) ; writeData ( new NioDataBuffer ( data . asReadOnlyByteBuffer ( ) , data . size ( ) ) ) ; } } catch ( Exception e ) { LogUtils . warnWithException ( LOG , "Exception occurred while processing write request {}." , writeRequest , e ) ; abort ( new Error ( AlluxioStatusException . fromThrowable ( e ) , true ) ) ; } finally { mSemaphore . release ( ) ; } } ) ; }
Handles write request .
12,331
public void writeDataMessage ( WriteRequest request , DataBuffer buffer ) { if ( buffer == null ) { write ( request ) ; return ; } Preconditions . checkState ( ! request . hasCommand ( ) , "write request command should not come with data buffer" ) ; Preconditions . checkState ( buffer . readableBytes ( ) > 0 , "invalid data size from write request message" ) ; if ( ! tryAcquireSemaphore ( ) ) { return ; } mSerializingExecutor . execute ( ( ) -> { try { writeData ( buffer ) ; } finally { mSemaphore . release ( ) ; } } ) ; }
Handles write request with data message .
12,332
public void onCompleted ( ) { mSerializingExecutor . execute ( ( ) -> { Preconditions . checkState ( mContext != null ) ; try { completeRequest ( mContext ) ; replySuccess ( ) ; } catch ( Exception e ) { LogUtils . warnWithException ( LOG , "Exception occurred while completing write request {}." , mContext . getRequest ( ) , e ) ; Throwables . throwIfUnchecked ( e ) ; abort ( new Error ( AlluxioStatusException . fromCheckedException ( e ) , true ) ) ; } } ) ; }
Handles request complete event .
12,333
public void onCancel ( ) { mSerializingExecutor . execute ( ( ) -> { try { cancelRequest ( mContext ) ; replyCancel ( ) ; } catch ( Exception e ) { LogUtils . warnWithException ( LOG , "Exception occurred while cancelling write request {}." , mContext . getRequest ( ) , e ) ; Throwables . throwIfUnchecked ( e ) ; abort ( new Error ( AlluxioStatusException . fromCheckedException ( e ) , true ) ) ; } } ) ; }
Handles request cancellation event .
12,334
public void onError ( Throwable cause ) { if ( cause instanceof StatusRuntimeException && ( ( StatusRuntimeException ) cause ) . getStatus ( ) . getCode ( ) == Status . Code . CANCELLED ) { return ; } mSerializingExecutor . execute ( ( ) -> { LogUtils . warnWithException ( LOG , "Exception thrown while handling write request {}" , mContext == null ? "unknown" : mContext . getRequest ( ) , cause ) ; abort ( new Error ( AlluxioStatusException . fromThrowable ( cause ) , false ) ) ; } ) ; }
Handles errors from the request .
12,335
@ GuardedBy ( "mLock" ) private void validateWriteRequest ( alluxio . grpc . WriteRequest request ) throws InvalidArgumentException { if ( request . hasCommand ( ) && request . getCommand ( ) . hasOffset ( ) && request . getCommand ( ) . getOffset ( ) != mContext . getPos ( ) ) { throw new InvalidArgumentException ( String . format ( "Offsets do not match [received: %d, expected: %d]." , request . getCommand ( ) . getOffset ( ) , mContext . getPos ( ) ) ) ; } }
Validates a block write request .
12,336
private void abort ( Error error ) { try { if ( mContext == null || mContext . getError ( ) != null || mContext . isDoneUnsafe ( ) ) { return ; } mContext . setError ( error ) ; cleanupRequest ( mContext ) ; replyError ( ) ; } catch ( Exception e ) { LOG . warn ( "Failed to cleanup states with error {}." , e . getMessage ( ) ) ; } }
Abort the write process due to error .
12,337
private void replyError ( ) { Error error = Preconditions . checkNotNull ( mContext . getError ( ) ) ; if ( error . isNotifyClient ( ) ) { mResponseObserver . onError ( error . getCause ( ) . toGrpcStatusException ( ) ) ; } }
Writes an error response .
12,338
public void send ( ReqT request , long timeoutMs ) throws IOException { if ( mClosed || mCanceled || mClosedFromRemote ) { throw new CancelledException ( formatErrorMessage ( "Failed to send request %s: stream is already closed or canceled." , request ) ) ; } try ( LockResource lr = new LockResource ( mLock ) ) { while ( true ) { checkError ( ) ; if ( mRequestObserver . isReady ( ) ) { break ; } try { if ( ! mReadyOrFailed . await ( timeoutMs , TimeUnit . MILLISECONDS ) ) { throw new DeadlineExceededException ( formatErrorMessage ( "Timeout sending request %s after %dms." , request , timeoutMs ) ) ; } } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; throw new CancelledException ( formatErrorMessage ( "Failed to send request %s: interrupted while waiting for server." , request ) , e ) ; } } } mRequestObserver . onNext ( request ) ; }
Sends a request . Will wait until the stream is ready before sending or timeout if the given timeout is reached .
12,339
public void send ( ReqT request ) throws IOException { if ( mClosed || mCanceled || mClosedFromRemote ) { throw new CancelledException ( formatErrorMessage ( "Failed to send request %s: stream is already closed or canceled." , request ) ) ; } try ( LockResource lr = new LockResource ( mLock ) ) { checkError ( ) ; } mRequestObserver . onNext ( request ) ; }
Sends a request . Will not wait for the stream to be ready .
12,340
public ResT receive ( long timeoutMs ) throws IOException { if ( mCompleted ) { return null ; } if ( mCanceled ) { throw new CancelledException ( formatErrorMessage ( "Stream is already canceled." ) ) ; } try { Object response = mResponses . poll ( timeoutMs , TimeUnit . MILLISECONDS ) ; if ( response == null ) { throw new DeadlineExceededException ( formatErrorMessage ( "Timeout waiting for response after %dms." , timeoutMs ) ) ; } if ( response == mResponseObserver ) { mCompleted = true ; return null ; } checkError ( ) ; return ( ResT ) response ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; throw new CancelledException ( formatErrorMessage ( "Interrupted while waiting for response." ) , e ) ; } }
Receives a response from the server . Will wait until a response is received or throw an exception if times out .
12,341
public void cancel ( ) { if ( isOpen ( ) ) { LOG . debug ( "Cancelling stream ({})" , mDescription ) ; mCanceled = true ; mRequestObserver . cancel ( "Request is cancelled by user." , null ) ; } }
Cancels the stream .
12,342
public static void main ( String [ ] args ) { if ( args . length != 0 ) { LOG . info ( "java -cp {} {}" , RuntimeConstants . ALLUXIO_JAR , AlluxioJobMasterMonitor . class . getCanonicalName ( ) ) ; LOG . warn ( "ignoring arguments" ) ; } AlluxioConfiguration conf = new InstancedConfiguration ( ConfigurationUtils . defaults ( ) ) ; HealthCheckClient client ; if ( ConfigurationUtils . isHaMode ( conf ) ) { client = new MasterHealthCheckClient . Builder ( conf ) . withAlluxioMasterType ( MasterHealthCheckClient . MasterType . JOB_MASTER ) . build ( ) ; } else { client = new JobMasterRpcHealthCheckClient ( NetworkAddressUtils . getConnectAddress ( NetworkAddressUtils . ServiceType . JOB_MASTER_RPC , conf ) , AlluxioMasterMonitor . TWO_MIN_EXP_BACKOFF , conf ) ; } if ( ! client . isServing ( ) ) { System . exit ( 1 ) ; } System . exit ( 0 ) ; }
Starts the Alluxio job_master monitor .
12,343
private void load ( AlluxioURI filePath , boolean local ) throws AlluxioException , IOException { URIStatus status = mFileSystem . getStatus ( filePath ) ; if ( status . isFolder ( ) ) { List < URIStatus > statuses = mFileSystem . listStatus ( filePath ) ; for ( URIStatus uriStatus : statuses ) { AlluxioURI newPath = new AlluxioURI ( uriStatus . getPath ( ) ) ; load ( newPath , local ) ; } } else { OpenFilePOptions options = OpenFilePOptions . newBuilder ( ) . setReadType ( ReadPType . CACHE_PROMOTE ) . build ( ) ; if ( local ) { if ( ! mFsContext . hasLocalWorker ( ) ) { System . out . println ( "When local option is specified," + " there must be a local worker available" ) ; return ; } } else if ( status . getInAlluxioPercentage ( ) == 100 ) { System . out . println ( filePath + " already in Alluxio fully" ) ; return ; } Closer closer = Closer . create ( ) ; try { FileInStream in = closer . register ( mFileSystem . openFile ( filePath , options ) ) ; byte [ ] buf = new byte [ 8 * Constants . MB ] ; while ( in . read ( buf ) != - 1 ) { } } catch ( Exception e ) { throw closer . rethrow ( e ) ; } finally { closer . close ( ) ; } } System . out . println ( filePath + " loaded" ) ; }
Loads a file or directory in Alluxio space makes it resident in Alluxio .
12,344
public void closeReaderOrWriter ( long sessionId , long blockId ) throws IOException { BlockInfo blockInfo ; try ( LockResource lr = new LockResource ( mLock ) ) { blockInfo = mBlocks . get ( new Key ( sessionId , blockId ) ) ; if ( blockInfo == null ) { LOG . warn ( "Key (block ID: {}, session ID {}) is not found when cleaning up the UFS block." , blockId , sessionId ) ; return ; } } blockInfo . closeReaderOrWriter ( ) ; }
Closes the block reader or writer and checks whether it is necessary to commit the block to Local block store .
12,345
public BlockReader getBlockReader ( final long sessionId , long blockId , long offset ) throws BlockDoesNotExistException , IOException { final BlockInfo blockInfo ; try ( LockResource lr = new LockResource ( mLock ) ) { blockInfo = getBlockInfo ( sessionId , blockId ) ; BlockReader blockReader = blockInfo . getBlockReader ( ) ; if ( blockReader != null ) { return blockReader ; } } BlockReader reader = UnderFileSystemBlockReader . create ( blockInfo . getMeta ( ) , offset , mLocalBlockStore , mUfsManager , mUfsInstreamManager ) ; blockInfo . setBlockReader ( reader ) ; return reader ; }
Creates a block reader that reads from UFS and optionally caches the block to the Alluxio block store .
12,346
synchronized long getNewDirectoryId ( JournalContext context ) throws UnavailableException { initialize ( context ) ; long containerId = mNextDirectoryId . getContainerId ( ) ; long sequenceNumber = mNextDirectoryId . getSequenceNumber ( ) ; long directoryId = BlockId . createBlockId ( containerId , sequenceNumber ) ; if ( sequenceNumber == BlockId . getMaxSequenceNumber ( ) ) { containerId = mContainerIdGenerator . getNewContainerId ( ) ; sequenceNumber = 0 ; } else { sequenceNumber ++ ; } applyAndJournal ( context , toEntry ( containerId , sequenceNumber ) ) ; return directoryId ; }
Returns the next directory id and journals the state .
12,347
public static < T > String listToString ( List < T > list ) { StringBuilder sb = new StringBuilder ( ) ; for ( T s : list ) { if ( sb . length ( ) != 0 ) { sb . append ( " " ) ; } sb . append ( s ) ; } return sb . toString ( ) ; }
Converts a list of objects to a string .
12,348
public static < T > String argsToString ( String separator , T ... args ) { StringBuilder sb = new StringBuilder ( ) ; for ( T s : args ) { if ( sb . length ( ) != 0 ) { sb . append ( separator ) ; } sb . append ( s ) ; } return sb . toString ( ) ; }
Converts varargs of objects to a string .
12,349
public static String randomAlphaNumString ( int length ) { StringBuilder sb = new StringBuilder ( ) ; for ( int i = 0 ; i < length ; i ++ ) { sb . append ( ALPHANUM . charAt ( RANDOM . nextInt ( ALPHANUM . length ( ) ) ) ) ; } return sb . toString ( ) ; }
Generates a random alphanumeric string of the given length .
12,350
public static < T > T createNewClassInstance ( Class < T > cls , Class < ? > [ ] ctorClassArgs , Object [ ] ctorArgs ) { try { if ( ctorClassArgs == null ) { return cls . newInstance ( ) ; } Constructor < T > ctor = cls . getConstructor ( ctorClassArgs ) ; return ctor . newInstance ( ctorArgs ) ; } catch ( InvocationTargetException e ) { throw new RuntimeException ( e . getCause ( ) ) ; } catch ( ReflectiveOperationException e ) { throw new RuntimeException ( e ) ; } }
Creates new instance of a class by calling a constructor that receives ctorClassArgs arguments .
12,351
public static List < String > getUnixGroups ( String user ) throws IOException { String result ; List < String > groups = new ArrayList < > ( ) ; try { result = ShellUtils . execCommand ( ShellUtils . getGroupsForUserCommand ( user ) ) ; } catch ( ExitCodeException e ) { LOG . warn ( "got exception trying to get groups for user " + user + ": " + e . getMessage ( ) ) ; return groups ; } StringTokenizer tokenizer = new StringTokenizer ( result , ShellUtils . TOKEN_SEPARATOR_REGEX ) ; while ( tokenizer . hasMoreTokens ( ) ) { groups . add ( tokenizer . nextToken ( ) ) ; } return groups ; }
Gets the current user s group list from Unix by running the command groups NOTE . For non - existing user it will return EMPTY list . This method may return duplicate groups .
12,352
public static < T > T waitForResult ( String description , Supplier < T > operation , WaitForOptions options ) throws InterruptedException , TimeoutException { T t ; long start = System . currentTimeMillis ( ) ; int interval = options . getInterval ( ) ; int timeout = options . getTimeoutMs ( ) ; while ( ( t = operation . get ( ) ) == null ) { if ( timeout != WaitForOptions . NEVER && System . currentTimeMillis ( ) - start > timeout ) { throw new TimeoutException ( "Timed out waiting for " + description + " options: " + options ) ; } Thread . sleep ( interval ) ; } return t ; }
Waits for an operation to return a non - null value with a specified timeout .
12,353
public static String getPrimaryGroupName ( String userName , AlluxioConfiguration conf ) throws IOException { List < String > groups = getGroups ( userName , conf ) ; return ( groups != null && groups . size ( ) > 0 ) ? groups . get ( 0 ) : "" ; }
Gets the primary group name of a user .
12,354
public static String stripSuffixIfPresent ( final String key , final String suffix ) { if ( key . endsWith ( suffix ) ) { return key . substring ( 0 , key . length ( ) - suffix . length ( ) ) ; } return key ; }
Strips the suffix if it exists . This method will leave keys without a suffix unaltered .
12,355
public static Throwable getRootCause ( Throwable e ) { while ( e . getCause ( ) != null && ! ( e . getCause ( ) instanceof StatusRuntimeException ) ) { e = e . getCause ( ) ; } return e ; }
Gets the root cause of an exception . It stops at encountering gRPC s StatusRuntimeException .
12,356
public static < T > Iterator < T > singleElementIterator ( final T element ) { return new Iterator < T > ( ) { private boolean mHasNext = true ; public boolean hasNext ( ) { return mHasNext ; } public T next ( ) { if ( ! hasNext ( ) ) { throw new NoSuchElementException ( ) ; } mHasNext = false ; return element ; } public void remove ( ) { throw new UnsupportedOperationException ( "remove is not supported." ) ; } } ; }
Returns an iterator that iterates on a single element .
12,357
public static RuntimeException closeAndRethrow ( Closer closer , Throwable t ) throws IOException { try { throw closer . rethrow ( t ) ; } finally { closer . close ( ) ; } }
Closes the Closer and re - throws the Throwable . Any exceptions thrown while closing the Closer will be added as suppressed exceptions to the Throwable . This method always throws the given Throwable wrapping it in a RuntimeException if it s a non - IOException checked exception .
12,358
public static String convertMsToDate ( long millis , String dateFormatPattern ) { DateFormat dateFormat = new SimpleDateFormat ( dateFormatPattern ) ; return dateFormat . format ( new Date ( millis ) ) ; }
Converts a millisecond number to a formatted date String .
12,359
private static Map < Tag , String > createTags ( String ufsName , UfsStatus status ) { Map < Tag , String > tagMap = new HashMap < > ( ) ; tagMap . put ( Tag . UFS , ufsName ) ; tagMap . put ( Tag . OWNER , status . getOwner ( ) ) ; tagMap . put ( Tag . GROUP , status . getGroup ( ) ) ; tagMap . put ( Tag . MODE , String . valueOf ( status . getMode ( ) ) ) ; if ( status instanceof UfsFileStatus ) { tagMap . put ( Tag . TYPE , Type . FILE . name ( ) ) ; tagMap . put ( Tag . CONTENT_HASH , ( ( UfsFileStatus ) status ) . getContentHash ( ) ) ; } else { tagMap . put ( Tag . TYPE , Type . DIRECTORY . name ( ) ) ; } return tagMap ; }
Parses the input status and returns a tag map .
12,360
public boolean isValid ( ) { if ( mValues . isEmpty ( ) ) { return false ; } for ( Tag tag : REQUIRED_TAGS ) { if ( ! mValues . containsKey ( tag ) ) { return false ; } } return true ; }
Checks if the fingerprint object was generated from an INVALID_UFS_FINGERPRINT .
12,361
public boolean matchMetadata ( Fingerprint fp ) { for ( Tag tag : METADATA_TAGS ) { if ( ! getTag ( tag ) . equals ( fp . getTag ( tag ) ) ) { return false ; } } return true ; }
Returns true if the serialized fingerprint matches the fingerprint in metadata .
12,362
public boolean matchContent ( Fingerprint fp ) { for ( Tag tag : CONTENT_TAGS ) { if ( ! getTag ( tag ) . equals ( fp . getTag ( tag ) ) ) { return false ; } } return true ; }
Returns true if the serialized fingerprint matches the fingerprint in the content part .
12,363
public void putTag ( Tag tag , String value ) { if ( value != null ) { mValues . put ( tag , sanitizeString ( value ) ) ; } }
Updates a specific tag in the fingerprint . If the new value is null the fingerprint is kept unchanged .
12,364
private void openStream ( ) throws IOException { if ( mIn != null ) { return ; } GetObjectRequest getReq = new GetObjectRequest ( mBucketName , mKey ) ; if ( mPos > 0 ) { getReq . setRange ( mPos ) ; } AmazonS3Exception lastException = null ; while ( mRetryPolicy . attempt ( ) ) { try { mIn = mClient . getObject ( getReq ) . getObjectContent ( ) ; return ; } catch ( AmazonS3Exception e ) { LOG . warn ( "Attempt {} to open key {} in bucket {} failed with exception : {}" , mRetryPolicy . getAttemptCount ( ) , mKey , mBucketName , e . toString ( ) ) ; if ( e . getStatusCode ( ) != HttpStatus . SC_NOT_FOUND ) { throw new IOException ( e ) ; } lastException = e ; } } throw new IOException ( lastException ) ; }
Opens a new stream at mPos if the wrapped stream mIn is null .
12,365
public void removeChild ( DirectedAcyclicGraphNode < T > child ) { Preconditions . checkState ( mChildren . contains ( child ) ) ; mChildren . remove ( child ) ; }
Removes a child node from the node .
12,366
public void setEntry ( AclEntry entry ) { switch ( entry . getType ( ) ) { case NAMED_USER : case NAMED_GROUP : case MASK : if ( mExtendedEntries == null ) { mExtendedEntries = new ExtendedACLEntries ( ) ; } mExtendedEntries . setEntry ( entry ) ; return ; case OWNING_USER : Mode modeOwner = new Mode ( mMode ) ; modeOwner . setOwnerBits ( entry . getActions ( ) . toModeBits ( ) ) ; mMode = modeOwner . toShort ( ) ; return ; case OWNING_GROUP : Mode modeGroup = new Mode ( mMode ) ; modeGroup . setGroupBits ( entry . getActions ( ) . toModeBits ( ) ) ; mMode = modeGroup . toShort ( ) ; return ; case OTHER : Mode modeOther = new Mode ( mMode ) ; modeOther . setOtherBits ( entry . getActions ( ) . toModeBits ( ) ) ; mMode = modeOther . toShort ( ) ; return ; default : throw new IllegalStateException ( "Unknown ACL entry type: " + entry . getType ( ) ) ; } }
Sets an entry into the access control list . If an entry with the same type and subject already exists overwrites the existing entry ; Otherwise adds this new entry . After we modify entries for NAMED_GROUP OWNING_GROUP NAMED_USER we need to update the mask .
12,367
public boolean checkPermission ( String user , List < String > groups , AclAction action ) { return getPermission ( user , groups ) . contains ( action ) ; }
Checks whether the user has the permission to perform the action .
12,368
public AclActions getPermission ( String user , List < String > groups ) { if ( user . equals ( mOwningUser ) ) { return new AclActions ( getOwningUserActions ( ) ) ; } if ( hasExtended ( ) ) { AclActions actions = mExtendedEntries . getNamedUser ( user ) ; if ( actions != null ) { AclActions result = new AclActions ( actions ) ; result . mask ( mExtendedEntries . mMaskActions ) ; return result ; } } boolean isGroupKnown = false ; AclActions groupActions = new AclActions ( ) ; if ( groups . contains ( mOwningGroup ) ) { isGroupKnown = true ; groupActions . merge ( getOwningGroupActions ( ) ) ; } if ( hasExtended ( ) ) { for ( String group : groups ) { AclActions actions = mExtendedEntries . getNamedGroup ( group ) ; if ( actions != null ) { isGroupKnown = true ; groupActions . merge ( actions ) ; } } } if ( isGroupKnown ) { if ( hasExtended ( ) ) { groupActions . mask ( mExtendedEntries . mMaskActions ) ; } return groupActions ; } return getOtherActions ( ) ; }
Gets the permitted actions for a user .
12,369
public static AccessControlList fromStringEntries ( String owner , String owningGroup , List < String > stringEntries ) { AccessControlList acl ; if ( stringEntries . size ( ) > 0 ) { AclEntry aclEntry = AclEntry . fromCliString ( stringEntries . get ( 0 ) ) ; if ( aclEntry . isDefault ( ) ) { acl = new DefaultAccessControlList ( ) ; } else { acl = new AccessControlList ( ) ; } } else { acl = new DefaultAccessControlList ( ) ; } acl . setOwningUser ( owner ) ; acl . setOwningGroup ( owningGroup ) ; for ( String stringEntry : stringEntries ) { AclEntry aclEntry = AclEntry . fromCliString ( stringEntry ) ; acl . setEntry ( aclEntry ) ; } return acl ; }
Converts a list of string entries into an AccessControlList or a DefaultAccessControlList . It assumes the stringEntries contain all default entries or normal entries .
12,370
public < V > boolean contains ( IndexDefinition < T , V > indexDefinition , V value ) { FieldIndex < T , V > index = ( FieldIndex < T , V > ) mIndices . get ( indexDefinition ) ; if ( index == null ) { throw new IllegalStateException ( "the given index isn't defined for this IndexedSet" ) ; } return index . containsField ( value ) ; }
Whether there is an object with the specified index field value in the set .
12,371
public < V > Set < T > getByField ( IndexDefinition < T , V > indexDefinition , V value ) { FieldIndex < T , V > index = ( FieldIndex < T , V > ) mIndices . get ( indexDefinition ) ; if ( index == null ) { throw new IllegalStateException ( "the given index isn't defined for this IndexedSet" ) ; } return index . getByField ( value ) ; }
Gets a subset of objects with the specified field value . If there is no object with the specified field value a newly created empty set is returned .
12,372
public < V > T getFirstByField ( IndexDefinition < T , V > indexDefinition , V value ) { FieldIndex < T , V > index = ( FieldIndex < T , V > ) mIndices . get ( indexDefinition ) ; if ( index == null ) { throw new IllegalStateException ( "the given index isn't defined for this IndexedSet" ) ; } return index . getFirst ( value ) ; }
Gets the object from the set of objects with the specified field value .
12,373
public boolean remove ( Object object ) { if ( object == null ) { return false ; } synchronized ( object ) { if ( mPrimaryIndex . containsObject ( ( T ) object ) ) { @ SuppressWarnings ( "unchecked" ) T tObj = ( T ) object ; removeFromIndices ( tObj ) ; return true ; } else { return false ; } } }
Removes an object from the set .
12,374
private void removeFromIndices ( T object ) { for ( FieldIndex < T , ? > fieldValue : mIndices . values ( ) ) { fieldValue . remove ( object ) ; } }
Helper method that removes an object from the indices .
12,375
public < V > int removeByField ( IndexDefinition < T , V > indexDefinition , V value ) { Set < T > toRemove = getByField ( indexDefinition , value ) ; int removed = 0 ; for ( T o : toRemove ) { if ( remove ( o ) ) { removed ++ ; } } return removed ; }
Removes the subset of objects with the specified index field value .
12,376
public synchronized void updateConfigurationDefaults ( InetSocketAddress address ) throws AlluxioStatusException { Pair < AlluxioConfiguration , PathConfiguration > conf = ConfigurationUtils . loadClusterAndPathDefaults ( address , mConf , mPathConf ) ; mConf = conf . getFirst ( ) ; mPathConf = conf . getSecond ( ) ; }
This method will attempt to load the cluster and path level configuration defaults and update the configuration if necessary .
12,377
public synchronized int waitForAndKillPrimaryMaster ( int timeoutMs ) throws TimeoutException , InterruptedException { int index = getPrimaryMasterIndex ( timeoutMs ) ; mMasters . get ( index ) . close ( ) ; return index ; }
Kills the primary master .
12,378
public synchronized int getPrimaryMasterIndex ( int timeoutMs ) throws TimeoutException , InterruptedException { final FileSystem fs = getFileSystemClient ( ) ; final MasterInquireClient inquireClient = getMasterInquireClient ( ) ; CommonUtils . waitFor ( "a primary master to be serving" , ( ) -> { try { fs . getStatus ( new AlluxioURI ( "/" ) ) ; return true ; } catch ( UnavailableException e ) { return false ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } , WaitForOptions . defaults ( ) . setTimeoutMs ( timeoutMs ) ) ; int primaryRpcPort ; try { primaryRpcPort = inquireClient . getPrimaryRpcAddress ( ) . getPort ( ) ; } catch ( UnavailableException e ) { throw new RuntimeException ( e ) ; } for ( int i = 0 ; i < mMasterAddresses . size ( ) ; i ++ ) { if ( mMasterAddresses . get ( i ) . getRpcPort ( ) == primaryRpcPort ) { return i ; } } throw new RuntimeException ( String . format ( "No master found with RPC port %d. Master addresses: %s" , primaryRpcPort , mMasterAddresses ) ) ; }
Gets the index of the primary master .
12,379
public synchronized void waitForAllNodesRegistered ( int timeoutMs ) throws TimeoutException , InterruptedException { MetaMasterClient metaMasterClient = getMetaMasterClient ( ) ; CommonUtils . waitFor ( "all nodes registered" , ( ) -> { try { MasterInfo masterInfo = metaMasterClient . getMasterInfo ( Collections . emptySet ( ) ) ; int liveNodeNum = masterInfo . getMasterAddressesList ( ) . size ( ) + masterInfo . getWorkerAddressesList ( ) . size ( ) ; if ( liveNodeNum == ( mNumMasters + mNumWorkers ) ) { return true ; } else { LOG . info ( "Master addresses: {}. Worker addresses: {}" , masterInfo . getMasterAddressesList ( ) , masterInfo . getWorkerAddressesList ( ) ) ; return false ; } } catch ( UnavailableException e ) { return false ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } , WaitForOptions . defaults ( ) . setInterval ( 200 ) . setTimeoutMs ( timeoutMs ) ) ; }
Waits for all nodes to be registered .
12,380
public synchronized void saveWorkdir ( ) throws IOException { Preconditions . checkState ( mState == State . STARTED , "cluster must be started before you can save its work directory" ) ; ARTIFACTS_DIR . mkdirs ( ) ; File tarball = new File ( mWorkDir . getParentFile ( ) , mWorkDir . getName ( ) + ".tar.gz" ) ; ProcessBuilder pb = new ProcessBuilder ( "tar" , "-czf" , tarball . getName ( ) , mWorkDir . getName ( ) ) ; pb . directory ( mWorkDir . getParentFile ( ) ) ; pb . redirectOutput ( Redirect . appendTo ( TESTS_LOG ) ) ; pb . redirectError ( Redirect . appendTo ( TESTS_LOG ) ) ; Process p = pb . start ( ) ; try { p . waitFor ( ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; throw new RuntimeException ( e ) ; } File finalTarball = new File ( ARTIFACTS_DIR , tarball . getName ( ) ) ; FileUtils . moveFile ( tarball , finalTarball ) ; LOG . info ( "Saved cluster {} to {}" , mClusterName , finalTarball . getAbsolutePath ( ) ) ; }
Copies the work directory to the artifacts folder .
12,381
public synchronized void destroy ( ) throws IOException { if ( mState == State . DESTROYED ) { return ; } if ( ! mSuccess ) { saveWorkdir ( ) ; } mCloser . close ( ) ; LOG . info ( "Destroyed cluster {}" , mClusterName ) ; mState = State . DESTROYED ; }
Destroys the cluster . It may not be re - started after being destroyed .
12,382
public synchronized void startMaster ( int i ) throws IOException { Preconditions . checkState ( mState == State . STARTED , "Must be in a started state to start masters" ) ; mMasters . get ( i ) . start ( ) ; }
Starts the specified master .
12,383
public synchronized void startWorker ( int i ) throws IOException { Preconditions . checkState ( mState == State . STARTED , "Must be in a started state to start workers" ) ; mWorkers . get ( i ) . start ( ) ; }
Starts the specified worker .
12,384
public synchronized void updateDeployMode ( DeployMode mode ) { mDeployMode = mode ; if ( mDeployMode == DeployMode . EMBEDDED ) { for ( int i = 0 ; i < mMasters . size ( ) ; i ++ ) { Master master = mMasters . get ( i ) ; MasterNetAddress address = mMasterAddresses . get ( i ) ; master . updateConf ( PropertyKey . MASTER_EMBEDDED_JOURNAL_PORT , Integer . toString ( address . getEmbeddedJournalPort ( ) ) ) ; File journalDir = new File ( mWorkDir , "journal" + i ) ; journalDir . mkdirs ( ) ; master . updateConf ( PropertyKey . MASTER_JOURNAL_FOLDER , journalDir . getAbsolutePath ( ) ) ; } } }
Updates the cluster s deploy mode .
12,385
private synchronized Master createMaster ( int i ) throws IOException { Preconditions . checkState ( mState == State . STARTED , "Must be in a started state to create masters" ) ; MasterNetAddress address = mMasterAddresses . get ( i ) ; File confDir = new File ( mWorkDir , "conf-master" + i ) ; File metastoreDir = new File ( mWorkDir , "metastore-master" + i ) ; File logsDir = new File ( mWorkDir , "logs-master" + i ) ; logsDir . mkdirs ( ) ; Map < PropertyKey , String > conf = new HashMap < > ( ) ; conf . put ( PropertyKey . LOGGER_TYPE , "MASTER_LOGGER" ) ; conf . put ( PropertyKey . CONF_DIR , confDir . getAbsolutePath ( ) ) ; conf . put ( PropertyKey . MASTER_METASTORE_DIR , metastoreDir . getAbsolutePath ( ) ) ; conf . put ( PropertyKey . LOGS_DIR , logsDir . getAbsolutePath ( ) ) ; conf . put ( PropertyKey . MASTER_HOSTNAME , address . getHostname ( ) ) ; conf . put ( PropertyKey . MASTER_RPC_PORT , Integer . toString ( address . getRpcPort ( ) ) ) ; conf . put ( PropertyKey . MASTER_WEB_PORT , Integer . toString ( address . getWebPort ( ) ) ) ; conf . put ( PropertyKey . MASTER_EMBEDDED_JOURNAL_PORT , Integer . toString ( address . getEmbeddedJournalPort ( ) ) ) ; if ( mDeployMode . equals ( DeployMode . EMBEDDED ) ) { File journalDir = new File ( mWorkDir , "journal" + i ) ; journalDir . mkdirs ( ) ; conf . put ( PropertyKey . MASTER_JOURNAL_FOLDER , journalDir . getAbsolutePath ( ) ) ; } Master master = mCloser . register ( new Master ( logsDir , conf ) ) ; mMasters . add ( master ) ; return master ; }
Creates the specified master without starting it .
12,386
private synchronized Worker createWorker ( int i ) throws IOException { Preconditions . checkState ( mState == State . STARTED , "Must be in a started state to create workers" ) ; File confDir = new File ( mWorkDir , "conf-worker" + i ) ; File logsDir = new File ( mWorkDir , "logs-worker" + i ) ; File ramdisk = new File ( mWorkDir , "ramdisk" + i ) ; logsDir . mkdirs ( ) ; ramdisk . mkdirs ( ) ; int rpcPort = getNewPort ( ) ; int dataPort = getNewPort ( ) ; int webPort = getNewPort ( ) ; Map < PropertyKey , String > conf = new HashMap < > ( ) ; conf . put ( PropertyKey . LOGGER_TYPE , "WORKER_LOGGER" ) ; conf . put ( PropertyKey . CONF_DIR , confDir . getAbsolutePath ( ) ) ; conf . put ( PropertyKey . Template . WORKER_TIERED_STORE_LEVEL_DIRS_PATH . format ( 0 ) , ramdisk . getAbsolutePath ( ) ) ; conf . put ( PropertyKey . LOGS_DIR , logsDir . getAbsolutePath ( ) ) ; conf . put ( PropertyKey . WORKER_RPC_PORT , Integer . toString ( rpcPort ) ) ; conf . put ( PropertyKey . WORKER_WEB_PORT , Integer . toString ( webPort ) ) ; Worker worker = mCloser . register ( new Worker ( logsDir , conf ) ) ; mWorkers . add ( worker ) ; LOG . info ( "Created worker with (rpc, data, web) ports ({}, {}, {})" , rpcPort , dataPort , webPort ) ; return worker ; }
Creates the specified worker without starting it .
12,387
public synchronized void formatJournal ( ) throws IOException { if ( mDeployMode == DeployMode . EMBEDDED ) { for ( Master master : mMasters ) { File journalDir = new File ( master . getConf ( ) . get ( PropertyKey . MASTER_JOURNAL_FOLDER ) ) ; FileUtils . deleteDirectory ( journalDir ) ; journalDir . mkdirs ( ) ; } return ; } try ( Closeable c = new ConfigurationRule ( mProperties , ServerConfiguration . global ( ) ) . toResource ( ) ) { Format . format ( Format . Mode . MASTER , ServerConfiguration . global ( ) ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } }
Formats the cluster journal .
12,388
private void writeConf ( ) throws IOException { for ( int i = 0 ; i < mNumMasters ; i ++ ) { File confDir = new File ( mWorkDir , "conf-master" + i ) ; writeConfToFile ( confDir , mMasterProperties . getOrDefault ( i , new HashMap < > ( ) ) ) ; } for ( int i = 0 ; i < mNumWorkers ; i ++ ) { File confDir = new File ( mWorkDir , "conf-worker" + i ) ; writeConfToFile ( confDir , mWorkerProperties . getOrDefault ( i , new HashMap < > ( ) ) ) ; } }
Writes the contents of properties to the configuration file .
12,389
private void writeConfToFile ( File dir , Map < PropertyKey , String > properties ) throws IOException { Map < PropertyKey , String > map = new HashMap < > ( mProperties ) ; for ( Map . Entry < PropertyKey , String > entry : properties . entrySet ( ) ) { map . put ( entry . getKey ( ) , entry . getValue ( ) ) ; } StringBuilder sb = new StringBuilder ( ) ; for ( Entry < PropertyKey , String > entry : map . entrySet ( ) ) { sb . append ( String . format ( "%s=%s%n" , entry . getKey ( ) , entry . getValue ( ) ) ) ; } dir . mkdirs ( ) ; try ( FileOutputStream fos = new FileOutputStream ( new File ( dir , "alluxio-site.properties" ) ) ) { fos . write ( sb . toString ( ) . getBytes ( Charsets . UTF_8 ) ) ; } }
Creates the conf directory and file . Writes the properties to the generated file .
12,390
public CreateDirectoryContext setDefaultAcl ( List < AclEntry > defaultAcl ) { mDefaultAcl = ImmutableList . copyOf ( defaultAcl ) ; return getThis ( ) ; }
Sets the default ACL in the context .
12,391
protected void scheduleAsyncPersist ( ) throws IOException { try ( CloseableResource < FileSystemMasterClient > masterClient = mContext . acquireMasterClientResource ( ) ) { ScheduleAsyncPersistencePOptions persistOptions = FileSystemOptions . scheduleAsyncPersistDefaults ( mContext . getPathConf ( mUri ) ) . toBuilder ( ) . setCommonOptions ( mOptions . getCommonOptions ( ) ) . build ( ) ; masterClient . get ( ) . scheduleAsyncPersist ( mUri , persistOptions ) ; } }
Schedules the async persistence of the current file .
12,392
public static void printPassInfo ( boolean pass ) { if ( pass ) { System . out . println ( Constants . ANSI_GREEN + "Passed the test!" + Constants . ANSI_RESET ) ; } else { System . out . println ( Constants . ANSI_RED + "Failed the test!" + Constants . ANSI_RESET ) ; } }
Prints information of the test result .
12,393
public static boolean runExample ( final Callable < Boolean > example ) { boolean result ; try { result = example . call ( ) ; } catch ( Exception e ) { LOG . error ( "Exception running test: " + example , e ) ; result = false ; } CliUtils . printPassInfo ( result ) ; return result ; }
Runs an example .
12,394
public static AlluxioBlockStore create ( FileSystemContext context ) { return new AlluxioBlockStore ( context , TieredIdentityFactory . localIdentity ( context . getClusterConf ( ) ) ) ; }
Creates an Alluxio block store with default local hostname .
12,395
public BlockInfo getInfo ( long blockId ) throws IOException { try ( CloseableResource < BlockMasterClient > masterClientResource = mContext . acquireBlockMasterClientResource ( ) ) { return masterClientResource . get ( ) . getBlockInfo ( blockId ) ; } }
Gets the block info of a block if it exists .
12,396
public BlockInStream getInStream ( long blockId , InStreamOptions options ) throws IOException { return getInStream ( blockId , options , ImmutableMap . of ( ) ) ; }
Gets a stream to read the data of a block . This method is primarily responsible for determining the data source and type of data source . The latest BlockInfo will be fetched from the master to ensure the locations are up to date .
12,397
public BlockOutStream getOutStream ( long blockId , long blockSize , WorkerNetAddress address , OutStreamOptions options ) throws IOException { if ( blockSize == - 1 ) { try ( CloseableResource < BlockMasterClient > blockMasterClientResource = mContext . acquireBlockMasterClientResource ( ) ) { blockSize = blockMasterClientResource . get ( ) . getBlockInfo ( blockId ) . getLength ( ) ; } } if ( address == null ) { throw new ResourceExhaustedException ( ExceptionMessage . NO_SPACE_FOR_BLOCK_ON_WORKER . getMessage ( FormatUtils . getSizeFromBytes ( blockSize ) ) ) ; } LOG . debug ( "Create block outstream for {} of block size {} at address {}, using options: {}" , blockId , blockSize , address , options ) ; return BlockOutStream . create ( mContext , blockId , blockSize , address , options ) ; }
Gets a stream to write data to a block . The stream can only be backed by Alluxio storage .
12,398
public BlockOutStream getOutStream ( long blockId , long blockSize , OutStreamOptions options ) throws IOException { WorkerNetAddress address ; BlockLocationPolicy locationPolicy = Preconditions . checkNotNull ( options . getLocationPolicy ( ) , PreconditionMessage . BLOCK_WRITE_LOCATION_POLICY_UNSPECIFIED ) ; GetWorkerOptions workerOptions = GetWorkerOptions . defaults ( ) . setBlockInfo ( new BlockInfo ( ) . setBlockId ( blockId ) . setLength ( blockSize ) ) . setBlockWorkerInfos ( new ArrayList < > ( getEligibleWorkers ( ) ) ) ; int initialReplicas = ( options . getWriteType ( ) == WriteType . ASYNC_THROUGH && options . getReplicationDurable ( ) > options . getReplicationMin ( ) ) ? options . getReplicationDurable ( ) : options . getReplicationMin ( ) ; if ( initialReplicas <= 1 ) { address = locationPolicy . getWorker ( workerOptions ) ; if ( address == null ) { throw new UnavailableException ( ExceptionMessage . NO_SPACE_FOR_BLOCK_ON_WORKER . getMessage ( blockSize ) ) ; } return getOutStream ( blockId , blockSize , address , options ) ; } Map < String , Set < BlockWorkerInfo > > blockWorkersByHost = new HashMap < > ( ) ; for ( BlockWorkerInfo blockWorker : workerOptions . getBlockWorkerInfos ( ) ) { String hostName = blockWorker . getNetAddress ( ) . getHost ( ) ; if ( blockWorkersByHost . containsKey ( hostName ) ) { blockWorkersByHost . get ( hostName ) . add ( blockWorker ) ; } else { blockWorkersByHost . put ( hostName , com . google . common . collect . Sets . newHashSet ( blockWorker ) ) ; } } List < WorkerNetAddress > workerAddressList = new ArrayList < > ( ) ; List < BlockWorkerInfo > updatedInfos = Lists . newArrayList ( workerOptions . getBlockWorkerInfos ( ) ) ; for ( int i = 0 ; i < initialReplicas ; i ++ ) { address = locationPolicy . getWorker ( workerOptions ) ; if ( address == null ) { break ; } workerAddressList . add ( address ) ; updatedInfos . removeAll ( blockWorkersByHost . get ( address . getHost ( ) ) ) ; workerOptions . setBlockWorkerInfos ( updatedInfos ) ; } if ( workerAddressList . size ( ) < initialReplicas ) { throw new alluxio . exception . status . ResourceExhaustedException ( String . format ( "Not enough workers for replications, %d workers selected but %d required" , workerAddressList . size ( ) , initialReplicas ) ) ; } return BlockOutStream . createReplicatedBlockOutStream ( mContext , blockId , blockSize , workerAddressList , options ) ; }
Gets a stream to write data to a block based on the options . The stream can only be backed by Alluxio storage .
12,399
public long getCapacityBytes ( ) throws IOException { try ( CloseableResource < BlockMasterClient > blockMasterClientResource = mContext . acquireBlockMasterClientResource ( ) ) { return blockMasterClientResource . get ( ) . getCapacityBytes ( ) ; } }
Gets the total capacity of Alluxio s BlockStore .