idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
12,200
public synchronized void close ( ) throws IOException { if ( ! mClosed ) { mClosed = true ; if ( mCachingEnabled ) { Factory . FILESYSTEM_CACHE . remove ( new FileSystemKey ( mFsContext . getClientContext ( ) ) ) ; } mFsContext . close ( ) ; } }
Shuts down the FileSystem . Closes all thread pools and resources used to perform operations . If any operations are called after closing the context the behavior is undefined .
12,201
public void onNext ( CreateLocalBlockRequest request ) { final String methodName = request . getOnlyReserveSpace ( ) ? "ReserveSpace" : "CreateBlock" ; RpcUtils . streamingRPCAndLog ( LOG , new RpcUtils . StreamingRpcCallable < CreateLocalBlockResponse > ( ) { public CreateLocalBlockResponse call ( ) throws Exception { if ( request . getOnlyReserveSpace ( ) ) { mBlockWorker . requestSpace ( mSessionId , request . getBlockId ( ) , request . getSpaceToReserve ( ) ) ; return CreateLocalBlockResponse . newBuilder ( ) . build ( ) ; } else { Preconditions . checkState ( mRequest == null ) ; mRequest = request ; if ( mSessionId == INVALID_SESSION_ID ) { mSessionId = IdUtils . createSessionId ( ) ; String path = mBlockWorker . createBlock ( mSessionId , request . getBlockId ( ) , mStorageTierAssoc . getAlias ( request . getTier ( ) ) , request . getSpaceToReserve ( ) ) ; CreateLocalBlockResponse response = CreateLocalBlockResponse . newBuilder ( ) . setPath ( path ) . build ( ) ; return response ; } else { LOG . warn ( "Create block {} without closing the previous session {}." , request . getBlockId ( ) , mSessionId ) ; throw new InvalidWorkerStateException ( ExceptionMessage . SESSION_NOT_CLOSED . getMessage ( mSessionId ) ) ; } } } public void exceptionCaught ( Throwable throwable ) { if ( mSessionId != INVALID_SESSION_ID ) { if ( throwable instanceof alluxio . exception . WorkerOutOfSpaceException && request . hasCleanupOnFailure ( ) && ! request . getCleanupOnFailure ( ) ) { mResponseObserver . onError ( GrpcExceptionUtils . fromThrowable ( throwable ) ) ; return ; } mBlockWorker . cleanupSession ( mSessionId ) ; mSessionId = INVALID_SESSION_ID ; } mResponseObserver . onError ( GrpcExceptionUtils . fromThrowable ( throwable ) ) ; } } , methodName , true , false , mResponseObserver , "Session=%d, Request=%s" , mSessionId , request ) ; }
Handles request to create local block . No exceptions should be thrown .
12,202
public void handleBlockCompleteRequest ( boolean isCanceled ) { final String methodName = isCanceled ? "AbortBlock" : "CommitBlock" ; RpcUtils . streamingRPCAndLog ( LOG , new RpcUtils . StreamingRpcCallable < CreateLocalBlockResponse > ( ) { public CreateLocalBlockResponse call ( ) throws Exception { if ( mRequest == null ) { return null ; } Context newContext = Context . current ( ) . fork ( ) ; Context previousContext = newContext . attach ( ) ; try { if ( isCanceled ) { mBlockWorker . abortBlock ( mSessionId , mRequest . getBlockId ( ) ) ; } else { mBlockWorker . commitBlock ( mSessionId , mRequest . getBlockId ( ) ) ; } } finally { newContext . detach ( previousContext ) ; } mSessionId = INVALID_SESSION_ID ; return null ; } public void exceptionCaught ( Throwable throwable ) { mResponseObserver . onError ( GrpcExceptionUtils . fromThrowable ( throwable ) ) ; mSessionId = INVALID_SESSION_ID ; } } , methodName , false , ! isCanceled , mResponseObserver , "Session=%d, Request=%s" , mSessionId , mRequest ) ; }
Handles complete block request . No exceptions should be thrown .
12,203
public static Configuration createConfiguration ( UnderFileSystemConfiguration conf ) { Configuration wasbConf = HdfsUnderFileSystem . createConfiguration ( conf ) ; for ( Map . Entry < String , String > entry : conf . toMap ( ) . entrySet ( ) ) { String key = entry . getKey ( ) ; String value = entry . getValue ( ) ; if ( PropertyKey . Template . UNDERFS_AZURE_ACCOUNT_KEY . matches ( key ) ) { wasbConf . set ( key , value ) ; } } wasbConf . set ( "fs.AbstractFileSystem.wasb.impl" , "org.apache.hadoop.fs.azure.Wasb" ) ; wasbConf . set ( "fs.wasb.impl" , "org.apache.hadoop.fs.azure.NativeAzureFileSystem" ) ; return wasbConf ; }
Prepares the configuration for this Wasb as an HDFS configuration .
12,204
public static < U > CompletableFuture < U > supplyAsync ( Supplier < U > supplier , Executor executor ) { return asyncSupplyStage ( screenExecutor ( executor ) , supplier ) ; }
Returns a new CompletableFuture that is asynchronously completed by a task running in the given executor with the value obtained by calling the given Supplier .
12,205
public static CompletableFuture < Void > runAsync ( Runnable runnable , Executor executor ) { return asyncRunStage ( screenExecutor ( executor ) , runnable ) ; }
Returns a new CompletableFuture that is asynchronously completed by a task running in the given executor after it runs the given action .
12,206
public static < U > CompletableFuture < U > completedFuture ( U value ) { return new CompletableFuture < U > ( ( value == null ) ? NIL : value ) ; }
Returns a new CompletableFuture that is already completed with the given value .
12,207
public static CompletableFuture < Object > anyOf ( CompletableFuture < ? > ... cfs ) { int n ; Object r ; if ( ( n = cfs . length ) <= 1 ) return ( n == 0 ) ? new CompletableFuture < Object > ( ) : ( CompletableFuture < Object > ) uniCopyStage ( cfs [ 0 ] ) ; for ( CompletableFuture < ? > cf : cfs ) if ( ( r = cf . result ) != null ) return new CompletableFuture < Object > ( encodeRelay ( r ) ) ; cfs = cfs . clone ( ) ; CompletableFuture < Object > d = new CompletableFuture < Object > ( ) ; for ( CompletableFuture < ? > cf : cfs ) cf . unipush ( new AnyOf ( d , cf , cfs ) ) ; if ( d . result != null ) for ( int i = 0 , len = cfs . length ; i < len ; i ++ ) if ( cfs [ i ] . result != null ) for ( i ++ ; i < len ; i ++ ) if ( cfs [ i ] . result == null ) cfs [ i ] . cleanStack ( ) ; return d ; }
Returns a new CompletableFuture that is completed when any of the given CompletableFutures complete with the same result . Otherwise if it completed exceptionally the returned CompletableFuture also does so with a CompletionException holding this exception as its cause . If no CompletableFutures are provided returns an incomplete CompletableFuture .
12,208
public static < U > CompletableFuture < U > failedFuture ( Throwable ex ) { return new CompletableFuture < U > ( new AltResult ( Objects . requireNonNull ( ex ) ) ) ; }
Returns a new CompletableFuture that is already completed exceptionally with the given exception .
12,209
final boolean tryPushStack ( Completion c ) { Completion h = stack ; lazySetNext ( c , h ) ; return U . compareAndSwapObject ( this , STACK , h , c ) ; }
Returns true if successfully pushed c onto stack .
12,210
final boolean completeValue ( T t ) { return U . compareAndSwapObject ( this , RESULT , null , ( t == null ) ? NIL : t ) ; }
Completes with a non - exceptional result unless already completed .
12,211
final void cleanStack ( ) { boolean unlinked = false ; Completion p ; while ( ( p = stack ) != null && ! p . isLive ( ) ) unlinked = casStack ( p , p . next ) ; if ( p != null && ! unlinked ) { for ( Completion q = p . next ; q != null ; ) { Completion s = q . next ; if ( q . isLive ( ) ) { p = q ; q = s ; } else { casNext ( p , q , s ) ; break ; } } } }
Traverses stack and unlinks one or more dead Completions if found .
12,212
final void unipush ( Completion c ) { if ( c != null ) { while ( ! tryPushStack ( c ) ) { if ( result != null ) { lazySetNext ( c , null ) ; break ; } } if ( result != null ) c . tryFire ( SYNC ) ; } }
Pushes the given completion unless it completes while trying . Caller should first check that result is null .
12,213
final void bipush ( CompletableFuture < ? > b , BiCompletion < ? , ? , ? > c ) { if ( c != null ) { while ( result == null ) { if ( tryPushStack ( c ) ) { if ( b . result == null ) b . unipush ( new CoCompletion ( c ) ) ; else if ( result != null ) c . tryFire ( SYNC ) ; return ; } } b . unipush ( c ) ; } }
Pushes completion to this and b unless both done . Caller should first check that either result or b . result is null .
12,214
final CompletableFuture < T > postFire ( CompletableFuture < ? > a , CompletableFuture < ? > b , int mode ) { if ( b != null && b . stack != null ) { Object r ; if ( ( r = b . result ) == null ) b . cleanStack ( ) ; if ( mode >= 0 && ( r != null || b . result != null ) ) b . postComplete ( ) ; } return postFire ( a , mode ) ; }
Post - processing after successful BiCompletion tryFire .
12,215
final void orpush ( CompletableFuture < ? > b , BiCompletion < ? , ? , ? > c ) { if ( c != null ) { while ( ! tryPushStack ( c ) ) { if ( result != null ) { lazySetNext ( c , null ) ; break ; } } if ( result != null ) c . tryFire ( SYNC ) ; else b . unipush ( new CoCompletion ( c ) ) ; } }
Pushes completion to this and b unless either done . Caller should first check that result and b . result are both null .
12,216
public T get ( long timeout , TimeUnit unit ) throws InterruptedException , ExecutionException , TimeoutException { long nanos = unit . toNanos ( timeout ) ; Object r ; if ( ( r = result ) == null ) r = timedGet ( nanos ) ; return ( T ) reportGet ( r ) ; }
Waits if necessary for at most the given time for this future to complete and then returns its result if available .
12,217
public int getNumberOfDependents ( ) { int count = 0 ; for ( Completion p = stack ; p != null ; p = p . next ) ++ count ; return count ; }
Returns the estimated number of CompletableFutures whose completions are awaiting completion of this CompletableFuture . This method is designed for use in monitoring system state not for synchronization control .
12,218
public CompletableFuture < T > completeAsync ( Supplier < ? extends T > supplier , Executor executor ) { if ( supplier == null || executor == null ) throw new NullPointerException ( ) ; executor . execute ( new AsyncSupply < T > ( this , supplier ) ) ; return this ; }
Completes this CompletableFuture with the result of the given Supplier function invoked from an asynchronous task using the given executor .
12,219
public CompletableFuture < T > completeOnTimeout ( T value , long timeout , TimeUnit unit ) { Objects . requireNonNull ( unit ) ; if ( result == null ) whenComplete ( new Canceller ( Delayer . delay ( new DelayedCompleter < T > ( this , value ) , timeout , unit ) ) ) ; return this ; }
Completes this CompletableFuture with the given value if not otherwise completed before the given timeout .
12,220
public static void main ( String [ ] args ) { if ( args . length != 0 ) { LOG . info ( "java -cp {} {}" , RuntimeConstants . ALLUXIO_JAR , AlluxioProxy . class . getCanonicalName ( ) ) ; System . exit ( - 1 ) ; } if ( ! ConfigurationUtils . masterHostConfigured ( ServerConfiguration . global ( ) ) ) { ProcessUtils . fatalError ( LOG , ConfigurationUtils . getMasterHostNotConfiguredMessage ( "Alluxio proxy" ) ) ; } CommonUtils . PROCESS_TYPE . set ( CommonUtils . ProcessType . PROXY ) ; ProxyProcess process = ProxyProcess . Factory . create ( ) ; ProcessUtils . run ( process ) ; }
Starts the Alluxio proxy .
12,221
public static String toDefault ( String stringEntry ) { if ( stringEntry == null ) { throw new IllegalArgumentException ( "Input acl string is null" ) ; } List < String > components = Arrays . stream ( stringEntry . split ( ":" ) ) . map ( String :: trim ) . collect ( Collectors . toList ( ) ) ; if ( components != null && components . size ( ) > 0 && components . get ( 0 ) . equals ( DEFAULT_KEYWORD ) ) { return stringEntry ; } else { return DEFAULT_PREFIX + stringEntry ; } }
Convert a normal ACL to a string representing a default ACL .
12,222
public WorkerNetAddress getWorker ( GetWorkerOptions options ) { Set < WorkerNetAddress > eligibleAddresses = new HashSet < > ( ) ; for ( BlockWorkerInfo info : options . getBlockWorkerInfos ( ) ) { eligibleAddresses . add ( info . getNetAddress ( ) ) ; } WorkerNetAddress address = mBlockLocationCache . get ( options . getBlockInfo ( ) . getBlockId ( ) ) ; if ( address != null && eligibleAddresses . contains ( address ) ) { return address ; } else { address = null ; } if ( ! mInitialized ) { mWorkerInfoList = Lists . newArrayList ( options . getBlockWorkerInfos ( ) ) ; Collections . shuffle ( mWorkerInfoList ) ; mIndex = 0 ; mInitialized = true ; } for ( int i = 0 ; i < mWorkerInfoList . size ( ) ; i ++ ) { WorkerNetAddress candidate = mWorkerInfoList . get ( mIndex ) . getNetAddress ( ) ; BlockWorkerInfo workerInfo = findBlockWorkerInfo ( options . getBlockWorkerInfos ( ) , candidate ) ; mIndex = ( mIndex + 1 ) % mWorkerInfoList . size ( ) ; if ( workerInfo != null && workerInfo . getCapacityBytes ( ) >= options . getBlockInfo ( ) . getLength ( ) && eligibleAddresses . contains ( candidate ) ) { address = candidate ; break ; } } mBlockLocationCache . put ( options . getBlockInfo ( ) . getBlockId ( ) , address ) ; return address ; }
The policy uses the first fetch of worker info list as the base and visits each of them in a round - robin manner in the subsequent calls . The policy doesn t assume the list of worker info in the subsequent calls has the same order from the first and it will skip the workers that are no longer active .
12,223
public static Inode wrap ( InodeView delegate ) { if ( delegate instanceof Inode ) { return ( Inode ) delegate ; } if ( delegate . isFile ( ) ) { Preconditions . checkState ( delegate instanceof InodeFileView ) ; return new InodeFile ( ( InodeFileView ) delegate ) ; } else { Preconditions . checkState ( delegate instanceof InodeDirectoryView ) ; return new InodeDirectory ( ( InodeDirectoryView ) delegate ) ; } }
Wraps an InodeView providing read - only access . Modifications to the underlying inode will affect the created read - only inode .
12,224
private void validateReadRequest ( alluxio . grpc . ReadRequest request ) throws InvalidArgumentException { if ( request . getBlockId ( ) < 0 ) { throw new InvalidArgumentException ( String . format ( "Invalid blockId (%d) in read request." , request . getBlockId ( ) ) ) ; } if ( request . getOffset ( ) < 0 || request . getLength ( ) <= 0 ) { throw new InvalidArgumentException ( String . format ( "Invalid read bounds in read request %s." , request . toString ( ) ) ) ; } }
Validates a read request .
12,225
public List < Container > allocateContainers ( ) throws Exception { for ( int attempt = 0 ; attempt < MAX_WORKER_CONTAINER_REQUEST_ATTEMPTS ; attempt ++ ) { LOG . debug ( "Attempt {} of {} to allocate containers" , attempt , MAX_WORKER_CONTAINER_REQUEST_ATTEMPTS ) ; int numContainersToRequest = mTargetNumContainers - mAllocatedContainerHosts . size ( ) ; LOG . debug ( "Requesting {} containers" , numContainersToRequest ) ; mOutstandingContainerRequestsLatch = new CountDownLatch ( numContainersToRequest ) ; requestContainers ( ) ; mOutstandingContainerRequestsLatch . await ( ) ; if ( mAllocatedContainerHosts . size ( ) == mTargetNumContainers ) { break ; } } if ( mAllocatedContainers . size ( ) != mTargetNumContainers ) { throw new RuntimeException ( String . format ( "Failed to allocate %d %s containers" , mTargetNumContainers , mContainerName ) ) ; } return mAllocatedContainers ; }
Allocates the containers specified by the constructor .
12,226
public static void sleepMs ( Logger logger , long timeMs ) { try { Thread . sleep ( timeMs ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; if ( logger != null ) { logger . warn ( e . getMessage ( ) , e ) ; } } }
Sleeps for the given number of milliseconds reporting interruptions using the given logger .
12,227
private static boolean validateRemote ( String node , String target , String name , CommandLine cmd ) throws InterruptedException { System . out . format ( "Validating %s environment on %s...%n" , target , node ) ; if ( ! Utils . isAddressReachable ( node , 22 ) ) { System . err . format ( "Unable to reach ssh port 22 on node %s.%n" , node ) ; return false ; } String argStr = String . join ( " " , cmd . getArgs ( ) ) ; String homeDir = ServerConfiguration . get ( PropertyKey . HOME ) ; String remoteCommand = String . format ( "%s/bin/alluxio validateEnv %s %s %s" , homeDir , target , name == null ? "" : name , argStr ) ; String localCommand = String . format ( "ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no -tt %s \"bash %s\"" , node , remoteCommand ) ; String [ ] command = { "bash" , "-c" , localCommand } ; try { ProcessBuilder builder = new ProcessBuilder ( command ) ; builder . redirectErrorStream ( true ) ; builder . redirectOutput ( ProcessBuilder . Redirect . INHERIT ) ; builder . redirectInput ( ProcessBuilder . Redirect . INHERIT ) ; Process process = builder . start ( ) ; process . waitFor ( ) ; return process . exitValue ( ) == 0 ; } catch ( IOException e ) { System . err . format ( "Unable to validate on node %s: %s.%n" , node , e . getMessage ( ) ) ; return false ; } }
validates environment on remote node
12,228
private static boolean validateLocal ( String target , String name , CommandLine cmd ) throws InterruptedException { int validationCount = 0 ; Map < ValidationTask . TaskResult , Integer > results = new HashMap < > ( ) ; Map < String , String > optionsMap = new HashMap < > ( ) ; for ( Option opt : cmd . getOptions ( ) ) { optionsMap . put ( opt . getOpt ( ) , opt . getValue ( ) ) ; } Collection < ValidationTask > tasks = TARGET_TASKS . get ( target ) ; System . out . format ( "Validating %s environment...%n" , target ) ; for ( ValidationTask task : tasks ) { String taskName = TASKS . get ( task ) ; if ( name != null && ! taskName . startsWith ( name ) ) { continue ; } System . out . format ( "Validating %s...%n" , taskName ) ; ValidationTask . TaskResult result = task . validate ( optionsMap ) ; results . put ( result , results . getOrDefault ( result , 0 ) + 1 ) ; switch ( result ) { case OK : System . out . print ( Constants . ANSI_GREEN ) ; break ; case WARNING : System . out . print ( Constants . ANSI_YELLOW ) ; break ; case FAILED : System . out . print ( Constants . ANSI_RED ) ; break ; case SKIPPED : System . out . print ( Constants . ANSI_PURPLE ) ; break ; default : break ; } System . out . print ( result . name ( ) ) ; System . out . println ( Constants . ANSI_RESET ) ; validationCount ++ ; } if ( results . containsKey ( ValidationTask . TaskResult . FAILED ) ) { System . err . format ( "%d failures " , results . get ( ValidationTask . TaskResult . FAILED ) ) ; } if ( results . containsKey ( ValidationTask . TaskResult . WARNING ) ) { System . err . format ( "%d warnings " , results . get ( ValidationTask . TaskResult . WARNING ) ) ; } if ( results . containsKey ( ValidationTask . TaskResult . SKIPPED ) ) { System . err . format ( "%d skipped " , results . get ( ValidationTask . TaskResult . SKIPPED ) ) ; } System . err . println ( ) ; if ( validationCount == 0 ) { System . err . format ( "No validation task matched name \"%s\".%n" , name ) ; return false ; } if ( results . containsKey ( ValidationTask . TaskResult . FAILED ) ) { return false ; } System . out . println ( "Validation succeeded." ) ; return true ; }
runs validation tasks in local environment
12,229
public static int validate ( String ... argv ) throws InterruptedException { if ( argv . length < 1 ) { printHelp ( "Target not specified." ) ; return - 2 ; } String command = argv [ 0 ] ; String name = null ; String [ ] args ; int argsLength = 0 ; while ( argsLength < argv . length && ! argv [ argsLength ] . startsWith ( "-" ) ) { argsLength ++ ; } if ( argsLength > 1 ) { name = argv [ 1 ] ; args = Arrays . copyOfRange ( argv , 2 , argv . length ) ; } else { args = Arrays . copyOfRange ( argv , 1 , argv . length ) ; } CommandLine cmd ; try { cmd = parseArgsAndOptions ( OPTIONS , args ) ; } catch ( InvalidArgumentException e ) { System . err . format ( "Invalid argument: %s.%n" , e . getMessage ( ) ) ; return - 1 ; } if ( command != null && command . equals ( "list" ) ) { printTasks ( ) ; return 0 ; } return runTasks ( command , name , cmd ) ; }
Validates environment .
12,230
public static boolean matches ( TieredIdentity . LocalityTier tier , TieredIdentity . LocalityTier otherTier , boolean resolveIpAddress ) { String otherTierName = otherTier . getTierName ( ) ; if ( ! tier . getTierName ( ) . equals ( otherTierName ) ) { return false ; } String otherTierValue = otherTier . getValue ( ) ; if ( tier . getValue ( ) != null && tier . getValue ( ) . equals ( otherTierValue ) ) { return true ; } if ( resolveIpAddress ) { if ( Constants . LOCALITY_NODE . equals ( tier . getTierName ( ) ) ) { try { String tierIpAddress = NetworkAddressUtils . resolveIpAddress ( tier . getValue ( ) ) ; String otherTierIpAddress = NetworkAddressUtils . resolveIpAddress ( otherTierValue ) ; if ( tierIpAddress != null && tierIpAddress . equals ( otherTierIpAddress ) ) { return true ; } } catch ( UnknownHostException e ) { return false ; } } } return false ; }
Locality comparison for wire type locality tiers two locality tiers matches if both name and values are equal or for the node tier if the node names resolve to the same IP address .
12,231
public static void main ( String [ ] args ) throws IOException { if ( ! parseInputArgs ( args ) ) { usage ( ) ; System . exit ( EXIT_FAILED ) ; } if ( sHelp ) { usage ( ) ; System . exit ( EXIT_SUCCEEDED ) ; } dumpJournal ( ) ; }
Dumps a ufs journal in human - readable format .
12,232
public static void main ( String [ ] args ) { LOG . info ( "Validating configuration." ) ; try { new InstancedConfiguration ( ConfigurationUtils . defaults ( ) ) . validate ( ) ; LOG . info ( "Configuration is valid." ) ; } catch ( IllegalStateException e ) { LOG . error ( "Configuration is invalid: {}" , e . getMessage ( ) ) ; System . exit ( - 1 ) ; } System . exit ( 0 ) ; }
Console program that validates the configuration .
12,233
public static void main ( String [ ] args ) { ExtensionsShell extensionShell = new ExtensionsShell ( ServerConfiguration . global ( ) ) ; System . exit ( extensionShell . run ( args ) ) ; }
Manage Alluxio extensions .
12,234
public static void run ( JobConfig config , int attempts , AlluxioConfiguration alluxioConf ) throws InterruptedException { CountingRetry retryPolicy = new CountingRetry ( attempts ) ; while ( retryPolicy . attempt ( ) ) { long jobId ; try ( JobMasterClient client = JobMasterClient . Factory . create ( JobMasterClientContext . newBuilder ( ClientContext . create ( alluxioConf ) ) . build ( ) ) ) { jobId = client . run ( config ) ; } catch ( Exception e ) { LOG . warn ( "Exception encountered when starting a job." , e ) ; continue ; } JobInfo jobInfo = waitFor ( jobId , alluxioConf ) ; if ( jobInfo == null ) { break ; } if ( jobInfo . getStatus ( ) == Status . COMPLETED || jobInfo . getStatus ( ) == Status . CANCELED ) { return ; } LOG . warn ( "Job {} failed to complete: {}" , jobId , jobInfo . getErrorMessage ( ) ) ; } throw new RuntimeException ( "Failed to successfully complete the job." ) ; }
Runs the specified job and waits for it to finish . If the job fails it is retried the given number of times . If the job does not complete in the given number of attempts an exception is thrown .
12,235
public static Thread createProgressThread ( final long intervalMs , final PrintStream stream ) { Thread thread = new Thread ( new Runnable ( ) { public void run ( ) { while ( true ) { CommonUtils . sleepMs ( intervalMs ) ; if ( Thread . interrupted ( ) ) { return ; } stream . print ( "." ) ; } } } ) ; thread . setDaemon ( true ) ; return thread ; }
Creates a thread which will write . to the given print stream at the given interval . The created thread is not started by this method . The created thread will be daemonic and will halt when interrupted .
12,236
private int getNextAvailDirInTier ( StorageTierView tierView , long blockSize ) { int dirViewIndex = mTierAliasToLastDirMap . get ( tierView . getTierViewAlias ( ) ) ; for ( int i = 0 ; i < tierView . getDirViews ( ) . size ( ) ; i ++ ) { dirViewIndex = ( dirViewIndex + 1 ) % tierView . getDirViews ( ) . size ( ) ; if ( tierView . getDirView ( dirViewIndex ) . getAvailableBytes ( ) >= blockSize ) { return dirViewIndex ; } } return - 1 ; }
Finds an available dir in a given tier for a block with blockSize .
12,237
private TempBlockMeta createBlockMetaInternal ( long sessionId , long blockId , BlockStoreLocation location , long initialBlockSize , boolean newBlock ) throws BlockAlreadyExistsException { try ( LockResource r = new LockResource ( mMetadataWriteLock ) ) { if ( newBlock ) { checkTempBlockIdAvailable ( blockId ) ; } StorageDirView dirView = mAllocator . allocateBlockWithView ( sessionId , initialBlockSize , location , getUpdatedView ( ) ) ; if ( dirView == null ) { return null ; } TempBlockMeta tempBlock = dirView . createTempBlockMeta ( sessionId , blockId , initialBlockSize ) ; try { mMetaManager . addTempBlockMeta ( tempBlock ) ; } catch ( WorkerOutOfSpaceException | BlockAlreadyExistsException e ) { LOG . error ( "Unexpected failure: {} bytes allocated at {} by allocator, " + "but addTempBlockMeta failed" , initialBlockSize , location ) ; throw Throwables . propagate ( e ) ; } return tempBlock ; } }
Creates a temp block meta only if allocator finds available space . This method will not trigger any eviction .
12,238
private Pair < Boolean , BlockStoreLocation > requestSpaceInternal ( long blockId , long additionalBytes ) throws BlockDoesNotExistException { try ( LockResource r = new LockResource ( mMetadataWriteLock ) ) { TempBlockMeta tempBlockMeta = mMetaManager . getTempBlockMeta ( blockId ) ; if ( tempBlockMeta . getParentDir ( ) . getAvailableBytes ( ) < additionalBytes ) { return new Pair < > ( false , tempBlockMeta . getBlockLocation ( ) ) ; } try { mMetaManager . resizeTempBlockMeta ( tempBlockMeta , tempBlockMeta . getBlockSize ( ) + additionalBytes ) ; } catch ( InvalidWorkerStateException e ) { throw Throwables . propagate ( e ) ; } return new Pair < > ( true , null ) ; } }
Increases the temp block size only if this temp block s parent dir has enough available space .
12,239
private void removeBlockInternal ( long sessionId , long blockId , BlockStoreLocation location ) throws InvalidWorkerStateException , BlockDoesNotExistException , IOException { long lockId = mLockManager . lockBlock ( sessionId , blockId , BlockLockType . WRITE ) ; try { String filePath ; BlockMeta blockMeta ; try ( LockResource r = new LockResource ( mMetadataReadLock ) ) { if ( mMetaManager . hasTempBlockMeta ( blockId ) ) { throw new InvalidWorkerStateException ( ExceptionMessage . REMOVE_UNCOMMITTED_BLOCK , blockId ) ; } blockMeta = mMetaManager . getBlockMeta ( blockId ) ; filePath = blockMeta . getPath ( ) ; } if ( ! blockMeta . getBlockLocation ( ) . belongsTo ( location ) ) { throw new BlockDoesNotExistException ( ExceptionMessage . BLOCK_NOT_FOUND_AT_LOCATION , blockId , location ) ; } Files . delete ( Paths . get ( filePath ) ) ; try ( LockResource r = new LockResource ( mMetadataWriteLock ) ) { mMetaManager . removeBlockMeta ( blockMeta ) ; } catch ( BlockDoesNotExistException e ) { throw Throwables . propagate ( e ) ; } } finally { mLockManager . unlockBlock ( lockId ) ; } }
Removes a block .
12,240
public void updatePinnedInodes ( Set < Long > inodes ) { LOG . debug ( "updatePinnedInodes: inodes={}" , inodes ) ; synchronized ( mPinnedInodes ) { mPinnedInodes . clear ( ) ; mPinnedInodes . addAll ( Preconditions . checkNotNull ( inodes ) ) ; } }
Updates the pinned blocks .
12,241
public void removeDir ( StorageDir dir ) { try ( LockResource r = new LockResource ( mMetadataWriteLock ) ) { String tierAlias = dir . getParentTier ( ) . getTierAlias ( ) ; dir . getParentTier ( ) . removeStorageDir ( dir ) ; synchronized ( mBlockStoreEventListeners ) { for ( BlockStoreEventListener listener : mBlockStoreEventListeners ) { dir . getBlockIds ( ) . forEach ( listener :: onBlockLost ) ; listener . onStorageLost ( tierAlias , dir . getDirPath ( ) ) ; } } } }
Removes a storage directory .
12,242
public void remove ( PropertyKey key ) { if ( mUserProps . containsKey ( key ) ) { mUserProps . remove ( key ) ; mSources . remove ( key ) ; } }
Remove the value set for key .
12,243
public boolean isSet ( PropertyKey key ) { if ( isSetByUser ( key ) ) { return true ; } return PropertyKey . fromString ( key . toString ( ) ) . getDefaultValue ( ) != null ; }
Checks if there is a value set for the given key .
12,244
public void forEach ( BiConsumer < ? super PropertyKey , ? super String > action ) { for ( Map . Entry < PropertyKey , String > entry : entrySet ( ) ) { action . accept ( entry . getKey ( ) , entry . getValue ( ) ) ; } }
Iterates over all the key value pairs and performs the given action .
12,245
public void setSource ( PropertyKey key , Source source ) { mSources . put ( key , source ) ; }
Sets the source for a given key .
12,246
public void record ( String metric , double value ) { mTimeSeries . compute ( metric , ( metricName , timeSeries ) -> { if ( timeSeries == null ) { timeSeries = new TimeSeries ( metricName ) ; } timeSeries . record ( value ) ; return timeSeries ; } ) ; }
Records a value for the given metric at the current time .
12,247
public void release ( InputStream inputStream ) throws IOException { if ( ! ( inputStream instanceof CachedSeekableInputStream ) || ! CACHE_ENABLED ) { inputStream . close ( ) ; return ; } synchronized ( mFileIdToInputStreamIds ) { if ( ! mFileIdToInputStreamIds . containsKey ( ( ( CachedSeekableInputStream ) inputStream ) . getFileId ( ) ) ) { LOG . debug ( "The resource {} is already expired" , ( ( CachedSeekableInputStream ) inputStream ) . getResourceId ( ) ) ; inputStream . close ( ) ; return ; } UfsInputStreamIdSet resources = mFileIdToInputStreamIds . get ( ( ( CachedSeekableInputStream ) inputStream ) . getFileId ( ) ) ; if ( ! resources . release ( ( ( CachedSeekableInputStream ) inputStream ) . getResourceId ( ) ) ) { LOG . debug ( "Close the expired input stream resource of {}" , ( ( CachedSeekableInputStream ) inputStream ) . getResourceId ( ) ) ; inputStream . close ( ) ; } } }
Releases an input stream . The input stream is closed if it s already expired .
12,248
public void invalidate ( CachedSeekableInputStream inputStream ) throws IOException { mUnderFileInputStreamCache . invalidate ( inputStream . getResourceId ( ) ) ; release ( inputStream ) ; }
Invalidates an input stream from the cache .
12,249
public InputStream acquire ( UnderFileSystem ufs , String path , long fileId , OpenOptions openOptions ) throws IOException { return acquire ( ufs , path , fileId , openOptions , true ) ; }
Acquires an input stream . For seekable input streams if there is an available input stream in the cache reuse it and repositions the offset otherwise the manager opens a new input stream .
12,250
public InputStream acquire ( UnderFileSystem ufs , String path , long fileId , OpenOptions openOptions , boolean reuse ) throws IOException { if ( ! ufs . isSeekable ( ) || ! CACHE_ENABLED ) { return ufs . openExistingFile ( path , openOptions ) ; } mUnderFileInputStreamCache . cleanUp ( ) ; UfsInputStreamIdSet resources ; synchronized ( mFileIdToInputStreamIds ) { if ( mFileIdToInputStreamIds . containsKey ( fileId ) ) { resources = mFileIdToInputStreamIds . get ( fileId ) ; } else { resources = new UfsInputStreamIdSet ( ) ; mFileIdToInputStreamIds . put ( fileId , resources ) ; } } synchronized ( resources ) { long nextId = UNAVAILABLE_RESOURCE_ID ; CachedSeekableInputStream inputStream = null ; if ( reuse ) { for ( long id : resources . availableIds ( ) ) { inputStream = mUnderFileInputStreamCache . getIfPresent ( id ) ; if ( inputStream != null ) { nextId = id ; LOG . debug ( "Reused the under file input stream resource of {}" , nextId ) ; inputStream . seek ( openOptions . getOffset ( ) ) ; break ; } } } if ( nextId == UNAVAILABLE_RESOURCE_ID ) { nextId = IdUtils . getRandomNonNegativeLong ( ) ; final long newId = nextId ; try { inputStream = mUnderFileInputStreamCache . get ( nextId , ( ) -> { SeekableUnderFileInputStream ufsStream = ( SeekableUnderFileInputStream ) ufs . openExistingFile ( path , OpenOptions . defaults ( ) . setOffset ( openOptions . getOffset ( ) ) ) ; LOG . debug ( "Created the under file input stream resource of {}" , newId ) ; return new CachedSeekableInputStream ( ufsStream , newId , fileId , path ) ; } ) ; } catch ( ExecutionException e ) { LOG . warn ( "Failed to create a new cached ufs instream of file id {} and path {}" , fileId , path ) ; return ufs . openExistingFile ( path , OpenOptions . defaults ( ) . setOffset ( openOptions . getOffset ( ) ) ) ; } } resources . acquire ( nextId ) ; return inputStream ; } }
Acquires an input stream . For seekable input streams if there is an available input stream in the cache and reuse mode is specified reuse it and repositions the offset otherwise the manager opens a new input stream .
12,251
public void heartbeat ( ) { MetaCommand command = null ; try { if ( mMasterId . get ( ) == UNINITIALIZED_MASTER_ID ) { setIdAndRegister ( ) ; } command = mMasterClient . heartbeat ( mMasterId . get ( ) ) ; handleCommand ( command ) ; } catch ( IOException e ) { if ( command == null ) { LOG . error ( "Failed to receive leader master heartbeat command." , e ) ; } else { LOG . error ( "Failed to execute leader master heartbeat command: {}" , command , e ) ; } mMasterClient . disconnect ( ) ; } }
Heartbeats to the leader master node .
12,252
private void handleCommand ( MetaCommand cmd ) throws IOException { if ( cmd == null ) { return ; } switch ( cmd ) { case MetaCommand_Nothing : break ; case MetaCommand_Register : setIdAndRegister ( ) ; break ; case MetaCommand_Unknown : LOG . error ( "Master heartbeat sends unknown command {}" , cmd ) ; break ; default : throw new RuntimeException ( "Un-recognized command from leader master " + cmd ) ; } }
Handles a leader master command .
12,253
private void setIdAndRegister ( ) throws IOException { mMasterId . set ( mMasterClient . getId ( mMasterAddress ) ) ; mMasterClient . register ( mMasterId . get ( ) , ConfigurationUtils . getConfiguration ( ServerConfiguration . global ( ) , Scope . MASTER ) ) ; }
Sets the master id and registers with the Alluxio leader master .
12,254
private StorageDirView getCandidateDirInTier ( StorageTierView tierView , long blockSize ) { StorageDirView candidateDirView = null ; long maxFreeBytes = blockSize - 1 ; for ( StorageDirView dirView : tierView . getDirViews ( ) ) { if ( dirView . getAvailableBytes ( ) > maxFreeBytes ) { maxFreeBytes = dirView . getAvailableBytes ( ) ; candidateDirView = dirView ; } } return candidateDirView ; }
Finds a directory view in a tier view that has max free space and is able to store the block .
12,255
public void removeStorageDir ( StorageDir dir ) { if ( mDirs . remove ( dir ) ) { mCapacityBytes -= dir . getCapacityBytes ( ) ; } mLostStorage . add ( dir . getDirPath ( ) ) ; }
Removes a directory .
12,256
public static < T > Response call ( RestUtils . RestCallable < T > callable , AlluxioConfiguration alluxioConf ) { return call ( callable , alluxioConf , null ) ; }
Call response .
12,257
public static Response . ResponseBuilder makeCORS ( Response . ResponseBuilder responseBuilder , String returnMethod ) { Response . ResponseBuilder rb = responseBuilder . header ( "Access-Control-Allow-Origin" , "*" ) . header ( "Access-Control-Allow-Methods" , "GET, POST, OPTIONS" ) ; if ( ! "" . equals ( returnMethod ) ) { rb . header ( "Access-Control-Allow-Headers" , returnMethod ) ; } return rb ; }
Makes the responseBuilder CORS compatible .
12,258
private void startMaster ( ) throws IOException , ConnectionFailedException { mMaster = AlluxioJobMasterProcess . Factory . create ( ) ; ServerConfiguration . set ( PropertyKey . JOB_MASTER_RPC_PORT , String . valueOf ( mMaster . getRpcAddress ( ) . getPort ( ) ) ) ; Runnable runMaster = new Runnable ( ) { public void run ( ) { try { mMaster . start ( ) ; } catch ( Exception e ) { throw new RuntimeException ( e + " \n Start Master Error \n" + e . getMessage ( ) , e ) ; } } } ; mMasterThread = new Thread ( runMaster ) ; mMasterThread . start ( ) ; }
Runs a master .
12,259
private void startWorker ( ) throws IOException , ConnectionFailedException { mWorker = JobWorkerProcess . Factory . create ( ) ; Runnable runWorker = new Runnable ( ) { public void run ( ) { try { mWorker . start ( ) ; } catch ( Exception e ) { throw new RuntimeException ( e + " \n Start Worker Error \n" + e . getMessage ( ) , e ) ; } } } ; mWorkerThread = new Thread ( runWorker ) ; mWorkerThread . start ( ) ; }
Runs a worker .
12,260
public void setEntry ( AclEntry entry ) { switch ( entry . getType ( ) ) { case NAMED_USER : mNamedUserActions . put ( entry . getSubject ( ) , entry . getActions ( ) ) ; return ; case NAMED_GROUP : mNamedGroupActions . put ( entry . getSubject ( ) , entry . getActions ( ) ) ; return ; case MASK : mMaskActions = entry . getActions ( ) ; return ; case OWNING_USER : case OWNING_GROUP : case OTHER : throw new IllegalStateException ( "Deleting base entry is not allowed. entry: " + entry ) ; default : throw new IllegalStateException ( "Unknown ACL entry type: " + entry . getType ( ) ) ; } }
Sets an entry into the access control list . If an entry with the same type and subject already exists overwrites the existing entry ; Otherwise adds this new entry .
12,261
public void updateMask ( AclActions groupActions ) { AclActions result = new AclActions ( groupActions ) ; for ( Map . Entry < String , AclActions > kv : mNamedUserActions . entrySet ( ) ) { AclActions userAction = kv . getValue ( ) ; result . merge ( userAction ) ; for ( AclAction action : AclAction . values ( ) ) { if ( result . contains ( action ) || userAction . contains ( action ) ) { result . add ( action ) ; } } } for ( Map . Entry < String , AclActions > kv : mNamedGroupActions . entrySet ( ) ) { AclActions userAction = kv . getValue ( ) ; result . merge ( userAction ) ; for ( AclAction action : AclAction . values ( ) ) { if ( result . contains ( action ) || userAction . contains ( action ) ) { result . add ( action ) ; } } } mMaskActions = result ; }
Update the mask to be the union of owning group entry named user entry and named group entry .
12,262
private Status run ( JavaSparkContext sc , PrintWriter reportWriter , AlluxioConfiguration conf ) { Status driverStatus = CheckerUtils . performIntegrationChecks ( ) ; String driverAddress = sc . getConf ( ) . get ( "spark.driver.host" ) ; switch ( driverStatus ) { case FAIL_TO_FIND_CLASS : reportWriter . printf ( "Spark driver: %s failed to recognize Alluxio classes.%n%n" , driverAddress ) ; return driverStatus ; case FAIL_TO_FIND_FS : reportWriter . printf ( "Spark driver: %s failed to recognize Alluxio filesystem.%n%n" , driverAddress ) ; return driverStatus ; default : reportWriter . printf ( "Spark driver: %s can recognize Alluxio filesystem.%n%n" , driverAddress ) ; break ; } if ( ! CheckerUtils . supportAlluxioHA ( reportWriter , conf ) ) { return Status . FAIL_TO_SUPPORT_HA ; } return runSparkJob ( sc , reportWriter ) ; }
Implements Spark with Alluxio integration checker .
12,263
private Status runSparkJob ( JavaSparkContext sc , PrintWriter reportWriter ) { List < Integer > nums = IntStream . rangeClosed ( 1 , mPartitions ) . boxed ( ) . collect ( Collectors . toList ( ) ) ; JavaRDD < Integer > dataSet = sc . parallelize ( nums , mPartitions ) ; JavaPairRDD < Status , String > extractedStatus = dataSet . mapToPair ( s -> new Tuple2 < > ( CheckerUtils . performIntegrationChecks ( ) , CheckerUtils . getLocalAddress ( ) ) ) ; JavaPairRDD < Status , String > mergeStatus = extractedStatus . reduceByKey ( ( a , b ) -> a . contains ( b ) ? a : ( b . contains ( a ) ? b : a + " " + b ) , ( mPartitions < 10 ? 1 : mPartitions / 10 ) ) ; mSparkJobResult = mergeStatus . collect ( ) ; Map < Status , List < String > > resultMap = new HashMap < > ( ) ; for ( Tuple2 < Status , String > op : mSparkJobResult ) { List < String > addresses = resultMap . getOrDefault ( op . _1 , new ArrayList < > ( ) ) ; addresses . add ( op . _2 ) ; resultMap . put ( op . _1 , addresses ) ; } return CheckerUtils . printNodesResults ( resultMap , reportWriter ) ; }
Spark job to check whether Spark executors can recognize Alluxio filesystem .
12,264
private void printConfigInfo ( SparkConf conf , PrintWriter reportWriter ) { if ( conf . contains ( "spark.master" ) ) { reportWriter . printf ( "Spark master is: %s.%n%n" , conf . get ( "spark.master" ) ) ; } if ( conf . contains ( "spark.submit.deployMode" ) ) { reportWriter . printf ( "spark-submit deploy mode is: %s.%n%n" , conf . get ( "spark.submit.deployMode" ) ) ; } if ( conf . contains ( "spark.driver.extraClassPath" ) ) { reportWriter . printf ( "spark.driver.extraClassPath includes jar paths: %s.%n%n" , conf . get ( "spark.driver.extraClassPath" ) ) ; } if ( conf . contains ( "spark.executor.extraClassPath" ) ) { reportWriter . printf ( "spark.executor.extraClassPath includes jar paths: %s.%n%n" , conf . get ( "spark.executor.extraClassPath" ) ) ; } }
Saves related Spark and Alluxio configuration information .
12,265
private void printResultInfo ( Status resultStatus , PrintWriter reportWriter ) { switch ( resultStatus ) { case FAIL_TO_FIND_CLASS : reportWriter . println ( FAIL_TO_FIND_CLASS_MESSAGE ) ; reportWriter . println ( TEST_FAILED_MESSAGE ) ; break ; case FAIL_TO_FIND_FS : reportWriter . println ( FAIL_TO_FIND_FS_MESSAGE ) ; reportWriter . println ( TEST_FAILED_MESSAGE ) ; break ; case FAIL_TO_SUPPORT_HA : reportWriter . println ( FAIL_TO_SUPPORT_HA_MESSAGE ) ; reportWriter . println ( TEST_FAILED_MESSAGE ) ; break ; default : reportWriter . println ( TEST_PASSED_MESSAGE ) ; break ; } }
Saves Spark with Alluixo integration checker results .
12,266
public static void main ( String [ ] args ) throws Exception { AlluxioConfiguration alluxioConf = new InstancedConfiguration ( ConfigurationUtils . defaults ( ) ) ; SparkIntegrationChecker checker = new SparkIntegrationChecker ( ) ; JCommander jCommander = new JCommander ( checker , args ) ; jCommander . setProgramName ( "SparkIntegrationChecker" ) ; try ( PrintWriter reportWriter = CheckerUtils . initReportFile ( ) ) { SparkConf conf = new SparkConf ( ) . setAppName ( SparkIntegrationChecker . class . getName ( ) ) ; JavaSparkContext sc = new JavaSparkContext ( conf ) ; checker . printConfigInfo ( conf , reportWriter ) ; Status resultStatus = checker . run ( sc , reportWriter , alluxioConf ) ; checker . printResultInfo ( resultStatus , reportWriter ) ; reportWriter . flush ( ) ; System . exit ( resultStatus . equals ( Status . SUCCESS ) ? 0 : 1 ) ; } }
Main function will be triggered via spark - submit .
12,267
private static AlluxioFuseOptions parseOptions ( String [ ] args , AlluxioConfiguration alluxioConf ) { final Options opts = new Options ( ) ; final Option mntPoint = Option . builder ( "m" ) . hasArg ( ) . required ( true ) . longOpt ( "mount-point" ) . desc ( "Desired local mount point for alluxio-fuse." ) . build ( ) ; final Option alluxioRoot = Option . builder ( "r" ) . hasArg ( ) . required ( true ) . longOpt ( "alluxio-root" ) . desc ( "Path within alluxio that will be used as the root of the FUSE mount " + "(e.g., /users/foo; defaults to /)" ) . build ( ) ; final Option help = Option . builder ( "h" ) . required ( false ) . desc ( "Print this help" ) . build ( ) ; final Option fuseOption = Option . builder ( "o" ) . valueSeparator ( ',' ) . required ( false ) . hasArgs ( ) . desc ( "FUSE mount options" ) . build ( ) ; opts . addOption ( mntPoint ) ; opts . addOption ( alluxioRoot ) ; opts . addOption ( help ) ; opts . addOption ( fuseOption ) ; final CommandLineParser parser = new DefaultParser ( ) ; try { CommandLine cli = parser . parse ( opts , args ) ; if ( cli . hasOption ( "h" ) ) { final HelpFormatter fmt = new HelpFormatter ( ) ; fmt . printHelp ( AlluxioFuse . class . getName ( ) , opts ) ; return null ; } String mntPointValue = cli . getOptionValue ( "m" ) ; String alluxioRootValue = cli . getOptionValue ( "r" ) ; List < String > fuseOpts = new ArrayList < > ( ) ; boolean noUserMaxWrite = true ; if ( cli . hasOption ( "o" ) ) { String [ ] fopts = cli . getOptionValues ( "o" ) ; for ( final String fopt : fopts ) { fuseOpts . add ( "-o" + fopt ) ; if ( noUserMaxWrite && fopt . startsWith ( "max_write" ) ) { noUserMaxWrite = false ; } } } if ( noUserMaxWrite ) { final long maxWrite = alluxioConf . getBytes ( PropertyKey . FUSE_MAXWRITE_BYTES ) ; fuseOpts . add ( String . format ( "-omax_write=%d" , maxWrite ) ) ; } final boolean fuseDebug = alluxioConf . getBoolean ( PropertyKey . FUSE_DEBUG_ENABLED ) ; return new AlluxioFuseOptions ( mntPointValue , alluxioRootValue , fuseDebug , fuseOpts ) ; } catch ( ParseException e ) { System . err . println ( "Error while parsing CLI: " + e . getMessage ( ) ) ; final HelpFormatter fmt = new HelpFormatter ( ) ; fmt . printHelp ( AlluxioFuse . class . getName ( ) , opts ) ; return null ; } }
Parses CLI options .
12,268
public void add ( T payload , List < T > parents ) { Preconditions . checkState ( ! contains ( payload ) , "the payload already exists in the DAG" ) ; DirectedAcyclicGraphNode < T > newNode = new DirectedAcyclicGraphNode < > ( payload ) ; mIndex . put ( payload , newNode ) ; if ( parents . isEmpty ( ) ) { mRoots . add ( newNode ) ; } else { for ( T parent : parents ) { Preconditions . checkState ( contains ( parent ) , "the parent payload " + parent + " does not exist in the DAG" ) ; DirectedAcyclicGraphNode < T > parentNode = mIndex . get ( parent ) ; parentNode . addChild ( newNode ) ; newNode . addParent ( parentNode ) ; } } }
Adds a node to the DAG that takes the given payload and depends on the specified parents .
12,269
public void deleteLeaf ( T payload ) { Preconditions . checkState ( contains ( payload ) , "the node does not exist" ) ; DirectedAcyclicGraphNode < T > node = mIndex . get ( payload ) ; Preconditions . checkState ( node . getChildren ( ) . isEmpty ( ) , "the node is not a leaf" ) ; for ( DirectedAcyclicGraphNode < T > parent : node . getParents ( ) ) { parent . removeChild ( node ) ; } mIndex . remove ( payload ) ; if ( node . getParents ( ) . isEmpty ( ) ) { mRoots . remove ( node ) ; } }
Deletes a leaf DAG node that carries the given payload .
12,270
public List < T > getChildren ( T payload ) { List < T > children = new ArrayList < > ( ) ; if ( ! mIndex . containsKey ( payload ) ) { return children ; } DirectedAcyclicGraphNode < T > node = mIndex . get ( payload ) ; for ( DirectedAcyclicGraphNode < T > child : node . getChildren ( ) ) { children . add ( child . getPayload ( ) ) ; } return children ; }
Gets the payloads for the children of the given node .
12,271
public List < T > getParents ( T payload ) { List < T > parents = new ArrayList < > ( ) ; if ( ! mIndex . containsKey ( payload ) ) { return parents ; } DirectedAcyclicGraphNode < T > node = mIndex . get ( payload ) ; for ( DirectedAcyclicGraphNode < T > parent : node . getParents ( ) ) { parents . add ( parent . getPayload ( ) ) ; } return parents ; }
Gets the payloads for the given nodes parents .
12,272
public boolean isRoot ( T payload ) { if ( ! contains ( payload ) ) { return false ; } return mRoots . contains ( mIndex . get ( payload ) ) ; }
Checks if a given payload is in a root of the DAG .
12,273
public List < T > getRoots ( ) { List < T > roots = new ArrayList < > ( ) ; for ( DirectedAcyclicGraphNode < T > root : mRoots ) { roots . add ( root . getPayload ( ) ) ; } return roots ; }
Gets the payloads of all the root nodes of the DAG .
12,274
public List < T > sortTopologically ( Set < T > payloads ) { List < T > result = new ArrayList < > ( ) ; Set < T > input = new HashSet < > ( payloads ) ; Deque < DirectedAcyclicGraphNode < T > > toVisit = new ArrayDeque < > ( mRoots ) ; while ( ! toVisit . isEmpty ( ) ) { DirectedAcyclicGraphNode < T > visit = toVisit . removeFirst ( ) ; T payload = visit . getPayload ( ) ; if ( input . remove ( payload ) ) { result . add ( visit . getPayload ( ) ) ; } toVisit . addAll ( visit . getChildren ( ) ) ; } Preconditions . checkState ( input . isEmpty ( ) , "Not all the given payloads are in the DAG: " , input ) ; return result ; }
Sorts a given set of payloads topologically based on the DAG . This method requires all the payloads to be in the DAG .
12,275
private < ReqT , RespT > boolean authenticateCall ( ServerCall < ReqT , RespT > call , Metadata headers ) { if ( call . isCancelled ( ) ) { LOG . debug ( "Server call has been cancelled: %s" , call . getMethodDescriptor ( ) . getFullMethodName ( ) ) ; return false ; } UUID channelId = headers . get ( ChannelIdInjector . S_CLIENT_ID_KEY ) ; boolean callAuthenticated = false ; if ( channelId != null ) { try { AuthenticatedUserInfo userInfo = mAuthenticationServer . getUserInfoForChannel ( channelId ) ; if ( userInfo != null ) { AuthenticatedClientUser . set ( userInfo . getAuthorizedUserName ( ) ) ; AuthenticatedClientUser . setConnectionUser ( userInfo . getConnectionUserName ( ) ) ; AuthenticatedClientUser . setAuthMethod ( userInfo . getAuthMethod ( ) ) ; } else { AuthenticatedClientUser . remove ( ) ; } callAuthenticated = true ; } catch ( UnauthenticatedException e ) { String message = String . format ( "Channel: %s is not authenticated for call: %s" , channelId . toString ( ) , call . getMethodDescriptor ( ) . getFullMethodName ( ) ) ; call . close ( Status . UNAUTHENTICATED . withDescription ( message ) , headers ) ; } } else { String message = String . format ( "Channel Id is missing for call: %s." , call . getMethodDescriptor ( ) . getFullMethodName ( ) ) ; call . close ( Status . UNAUTHENTICATED . withDescription ( message ) , headers ) ; } return callAuthenticated ; }
Authenticates given call against auth - server state . Fails the call if it s not originating from an authenticated client channel . It sets thread - local authentication information for the call with the user information that is kept on auth - server .
12,276
public int run ( ) throws IOException { List < WorkerLostStorageInfo > workerLostStorageList = mBlockMasterClient . getWorkerLostStorage ( ) ; if ( workerLostStorageList . size ( ) == 0 ) { mPrintStream . println ( "All worker storage paths are in working state." ) ; return 0 ; } for ( WorkerLostStorageInfo info : workerLostStorageList ) { Map < String , StorageList > lostStorageMap = info . getLostStorageMap ( ) ; if ( lostStorageMap . size ( ) != 0 ) { mPrintStream . printf ( "The following storage paths are lost in worker %s: %n" , info . getAddress ( ) . getHost ( ) ) ; for ( Map . Entry < String , StorageList > tierStorage : lostStorageMap . entrySet ( ) ) { for ( String storage : tierStorage . getValue ( ) . getStorageList ( ) ) { mPrintStream . printf ( "%s (%s)%n" , storage , tierStorage . getKey ( ) ) ; } } } } return 0 ; }
Runs doctor storage command .
12,277
private void setReplication ( AlluxioURI path , Integer replicationMax , Integer replicationMin , boolean recursive ) throws AlluxioException , IOException { SetAttributePOptions . Builder optionsBuilder = SetAttributePOptions . newBuilder ( ) . setRecursive ( recursive ) ; String message = "Changed the replication level of " + path + "\n" ; if ( replicationMax != null ) { optionsBuilder . setReplicationMax ( replicationMax ) ; message += "replicationMax was set to " + replicationMax + "\n" ; } if ( replicationMin != null ) { optionsBuilder . setReplicationMin ( replicationMin ) ; message += "replicationMin was set to " + replicationMin + "\n" ; } mFileSystem . setAttribute ( path , optionsBuilder . build ( ) ) ; System . out . println ( message ) ; }
Changes the replication level of directory or file with the path specified in args .
12,278
public static void limitLife ( final long lifetimeMs ) { new Thread ( ( ) -> { CommonUtils . sleepMs ( lifetimeMs ) ; LOG . info ( "Process has timed out after {}ms, exiting now" , lifetimeMs ) ; System . exit ( - 1 ) ; } , "life-limiter" ) . start ( ) ; }
Launches a thread to terminate the current process after the specified period has elapsed .
12,279
public MasterWebUIMetrics setUfsOps ( Map < String , Map < String , Long > > UfsOps ) { mUfsOps = UfsOps ; return this ; }
Sets ufs ops .
12,280
private void catchUp ( JournalStateMachine stateMachine , CopycatClient client ) throws TimeoutException , InterruptedException { long startTime = System . currentTimeMillis ( ) ; CommonUtils . waitFor ( "snapshotting to finish" , ( ) -> ! stateMachine . isSnapshotting ( ) , WaitForOptions . defaults ( ) . setTimeoutMs ( 10 * Constants . MINUTE_MS ) ) ; while ( true ) { if ( mPrimarySelector . getState ( ) != PrimarySelector . State . PRIMARY ) { return ; } long lastAppliedSN = stateMachine . getLastAppliedSequenceNumber ( ) ; long gainPrimacySN = ThreadLocalRandom . current ( ) . nextLong ( Long . MIN_VALUE , 0 ) ; LOG . info ( "Performing catchup. Last applied SN: {}. Catchup ID: {}" , lastAppliedSN , gainPrimacySN ) ; CompletableFuture < Void > future = client . submit ( new JournalEntryCommand ( JournalEntry . newBuilder ( ) . setSequenceNumber ( gainPrimacySN ) . build ( ) ) ) ; try { future . get ( 5 , TimeUnit . SECONDS ) ; } catch ( TimeoutException | ExecutionException e ) { LOG . info ( "Exception submitting term start entry: {}" , e . toString ( ) ) ; continue ; } try { CommonUtils . waitFor ( "term start entry " + gainPrimacySN + " to be applied to state machine" , ( ) -> stateMachine . getLastPrimaryStartSequenceNumber ( ) == gainPrimacySN , WaitForOptions . defaults ( ) . setInterval ( Constants . SECOND_MS ) . setTimeoutMs ( 5 * Constants . SECOND_MS ) ) ; } catch ( TimeoutException e ) { LOG . info ( e . toString ( ) ) ; continue ; } CommonUtils . sleepMs ( 2 * mConf . getElectionTimeoutMs ( ) ) ; if ( stateMachine . getLastAppliedSequenceNumber ( ) != lastAppliedSN || stateMachine . getLastPrimaryStartSequenceNumber ( ) != gainPrimacySN ) { continue ; } LOG . info ( "Caught up in {}ms. Last sequence number from previous term: {}." , System . currentTimeMillis ( ) - startTime , stateMachine . getLastAppliedSequenceNumber ( ) ) ; return ; } }
Attempts to catch up . If the master loses leadership during this method it will return early .
12,281
public static void main ( String [ ] argv ) throws IOException { int ret ; InstancedConfiguration conf = new InstancedConfiguration ( ConfigurationUtils . defaults ( ) ) ; if ( ! ConfigurationUtils . masterHostConfigured ( conf ) && argv . length > 0 ) { System . out . println ( ConfigurationUtils . getMasterHostNotConfiguredMessage ( "Alluxio job shell" ) ) ; System . exit ( 1 ) ; } try ( JobShell shell = new JobShell ( conf ) ) { ret = shell . run ( argv ) ; } System . exit ( ret ) ; }
Main method starts a new JobShell .
12,282
private void lockInodeInternal ( Inode inode , LockMode mode ) { Preconditions . checkState ( ! endsInInode ( ) ) ; String lastEdgeName = ( ( EdgeEntry ) lastEntry ( ) ) . getEdge ( ) . getName ( ) ; Preconditions . checkState ( inode . getName ( ) . equals ( lastEdgeName ) , "Expected to lock inode %s but locked inode %s" , lastEdgeName , inode . getName ( ) ) ; mLockedInodes . add ( inode ) ; mEntries . add ( new InodeEntry ( mInodeLockManager . lockInode ( inode , mode ) , inode ) ) ; }
Locks the next inode without checking or updating the mode .
12,283
public void lockEdge ( String childName , LockMode mode ) { Preconditions . checkState ( endsInInode ( ) ) ; Preconditions . checkState ( mLockMode == LockMode . READ ) ; lockEdgeInternal ( childName , mode ) ; mLockMode = mode ; }
Locks an edge leading out of the last inode in the list .
12,284
public void lockEdgeInternal ( String childName , LockMode mode ) { Preconditions . checkState ( endsInInode ( ) ) ; Inode lastInode = get ( numLockedInodes ( ) - 1 ) ; Edge edge = new Edge ( lastInode . getId ( ) , childName ) ; mEntries . add ( new EdgeEntry ( mInodeLockManager . lockEdge ( edge , mode ) , edge ) ) ; }
Locks the next edge without checking or updating the mode .
12,285
public void lockRootEdge ( LockMode mode ) { Preconditions . checkState ( mEntries . isEmpty ( ) ) ; mEntries . add ( new EdgeEntry ( mInodeLockManager . lockEdge ( ROOT_EDGE , mode ) , ROOT_EDGE ) ) ; mLockMode = mode ; }
Locks the root edge in the specified mode .
12,286
public void pushWriteLockedEdge ( Inode inode , String childName ) { Preconditions . checkState ( ! endsInInode ( ) ) ; Preconditions . checkState ( mLockMode == LockMode . WRITE ) ; if ( mEntries . isEmpty ( ) ) { return ; } int edgeIndex = mEntries . size ( ) - 1 ; lockInodeInternal ( inode , LockMode . READ ) ; lockEdgeInternal ( childName , LockMode . WRITE ) ; downgradeEdge ( edgeIndex ) ; }
Leapfrogs the edge write lock forward reducing the lock list s write - locked scope .
12,287
public void unlockLastInode ( ) { Preconditions . checkState ( endsInInode ( ) ) ; Preconditions . checkState ( ! mEntries . isEmpty ( ) ) ; mLockedInodes . remove ( mLockedInodes . size ( ) - 1 ) ; mEntries . remove ( mEntries . size ( ) - 1 ) . mLock . close ( ) ; mLockMode = LockMode . READ ; }
Unlocks the last locked inode .
12,288
public void downgradeLastInode ( ) { Preconditions . checkState ( endsInInode ( ) ) ; Preconditions . checkState ( ! mEntries . isEmpty ( ) ) ; Preconditions . checkState ( mLockMode == LockMode . WRITE ) ; InodeEntry last = ( InodeEntry ) mEntries . get ( mEntries . size ( ) - 1 ) ; LockResource lock = mInodeLockManager . lockInode ( last . getInode ( ) , LockMode . READ ) ; last . getLock ( ) . close ( ) ; mEntries . set ( mEntries . size ( ) - 1 , new InodeEntry ( lock , last . mInode ) ) ; mLockMode = LockMode . READ ; }
Downgrades the last inode from a write lock to a read lock . The read lock is acquired before releasing the write lock .
12,289
public void downgradeLastEdge ( ) { Preconditions . checkNotNull ( ! endsInInode ( ) ) ; Preconditions . checkState ( ! mEntries . isEmpty ( ) ) ; Preconditions . checkState ( mLockMode == LockMode . WRITE ) ; downgradeEdge ( mEntries . size ( ) - 1 ) ; mLockMode = LockMode . READ ; }
Downgrades the last edge lock in the lock list from WRITE lock to READ lock .
12,290
public void downgradeEdgeToInode ( Inode inode , LockMode mode ) { Preconditions . checkState ( ! endsInInode ( ) ) ; Preconditions . checkState ( ! mEntries . isEmpty ( ) ) ; Preconditions . checkState ( mLockMode == LockMode . WRITE ) ; EdgeEntry last = ( EdgeEntry ) mEntries . get ( mEntries . size ( ) - 1 ) ; LockResource inodeLock = mInodeLockManager . lockInode ( inode , mode ) ; LockResource edgeLock = mInodeLockManager . lockEdge ( last . mEdge , LockMode . READ ) ; last . getLock ( ) . close ( ) ; mEntries . set ( mEntries . size ( ) - 1 , new EdgeEntry ( edgeLock , last . getEdge ( ) ) ) ; mEntries . add ( new InodeEntry ( inodeLock , inode ) ) ; mLockedInodes . add ( inode ) ; mLockMode = mode ; }
Downgrades from edge write - locking to inode write - locking . This reduces the scope of the write lock by pushing it forward one entry .
12,291
private void downgradeEdge ( int edgeEntryIndex ) { EdgeEntry entry = ( EdgeEntry ) mEntries . get ( edgeEntryIndex ) ; LockResource lock = mInodeLockManager . lockEdge ( entry . mEdge , LockMode . READ ) ; entry . getLock ( ) . close ( ) ; mEntries . set ( edgeEntryIndex , new EdgeEntry ( lock , entry . getEdge ( ) ) ) ; }
Downgrades the edge at the specified entry index .
12,292
public static boolean hasWindowsDrive ( String path , boolean slashed ) { int start = slashed ? 1 : 0 ; return path . length ( ) >= start + 2 && ( ! slashed || path . charAt ( 0 ) == '/' ) && path . charAt ( start + 1 ) == ':' && ( ( path . charAt ( start ) >= 'A' && path . charAt ( start ) <= 'Z' ) || ( path . charAt ( start ) >= 'a' && path . charAt ( start ) <= 'z' ) ) ; }
Checks if the path is a windows path . This should be platform independent .
12,293
public boolean isAncestorOf ( AlluxioURI alluxioURI ) throws InvalidPathException { if ( ! Objects . equals ( getAuthority ( ) , alluxioURI . getAuthority ( ) ) ) { return false ; } if ( ! Objects . equals ( getScheme ( ) , alluxioURI . getScheme ( ) ) ) { return false ; } return PathUtils . hasPrefix ( PathUtils . normalizePath ( alluxioURI . getPath ( ) , SEPARATOR ) , PathUtils . normalizePath ( getPath ( ) , SEPARATOR ) ) ; }
Returns true if the current AlluxioURI is an ancestor of another AlluxioURI . otherwise return false .
12,294
private void ensureReserved ( long pos ) throws IOException { if ( pos <= mPosReserved ) { return ; } long toReserve = Math . max ( pos - mPosReserved , mFileBufferBytes ) ; CreateLocalBlockRequest request = mCreateRequest . toBuilder ( ) . setSpaceToReserve ( toReserve ) . setOnlyReserveSpace ( true ) . build ( ) ; mStream . send ( request , mDataTimeoutMs ) ; CreateLocalBlockResponse response = mStream . receive ( mDataTimeoutMs ) ; Preconditions . checkState ( response != null , String . format ( "Stream closed while waiting for reserve request %s" , request . toString ( ) ) ) ; Preconditions . checkState ( ! response . hasPath ( ) , String . format ( "Invalid response for reserve request %s" , request . toString ( ) ) ) ; mPosReserved += toReserve ; }
Reserves enough space in the block worker .
12,295
public static void logLevel ( String [ ] args , AlluxioConfiguration alluxioConf ) throws ParseException , IOException { CommandLineParser parser = new DefaultParser ( ) ; CommandLine cmd = parser . parse ( OPTIONS , args , true ) ; List < TargetInfo > targets = parseOptTarget ( cmd , alluxioConf ) ; String logName = parseOptLogName ( cmd ) ; String level = parseOptLevel ( cmd ) ; for ( TargetInfo targetInfo : targets ) { setLogLevel ( targetInfo , logName , level ) ; } }
Implements log level setting and getting .
12,296
public static void main ( String [ ] args ) { int exitCode = 1 ; try { logLevel ( args , new InstancedConfiguration ( ConfigurationUtils . defaults ( ) ) ) ; exitCode = 0 ; } catch ( ParseException e ) { printHelp ( "Unable to parse input args: " + e . getMessage ( ) ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; System . err . println ( String . format ( "Failed to set log level: %s" , e . getMessage ( ) ) ) ; } System . exit ( exitCode ) ; }
Sets or gets log level of master and worker through their REST API .
12,297
public void validate ( ) { Preconditions . checkState ( getMaxLogSize ( ) <= Integer . MAX_VALUE , "{} has value {} but must not exceed {}" , PropertyKey . MASTER_JOURNAL_LOG_SIZE_BYTES_MAX , getMaxLogSize ( ) , Integer . MAX_VALUE ) ; Preconditions . checkState ( getHeartbeatIntervalMs ( ) < getElectionTimeoutMs ( ) / 2 , "Heartbeat interval (%sms) should be less than half of the election timeout (%sms)" , getHeartbeatIntervalMs ( ) , getElectionTimeoutMs ( ) ) ; Preconditions . checkState ( getClusterAddresses ( ) . contains ( getLocalAddress ( ) ) , "The cluster addresses (%s) must contain the local master address (%s)" , getClusterAddresses ( ) , getLocalAddress ( ) ) ; }
Validates the configuration .
12,298
public List < String > getGroups ( String user ) throws IOException { if ( ! mCacheEnabled ) { return mService . getGroups ( user ) ; } try { return mCache . get ( user ) ; } catch ( ExecutionException e ) { throw new IOException ( e ) ; } }
Gets a list of groups for the given user .
12,299
public synchronized void registerNewConf ( Address address , List < ConfigProperty > configList ) { Preconditions . checkNotNull ( address , "address should not be null" ) ; Preconditions . checkNotNull ( configList , "configuration list should not be null" ) ; mConfMap . put ( address , configList . stream ( ) . map ( c -> new ConfigRecord ( ) . setKey ( toPropertyKey ( c . getName ( ) ) ) . setSource ( c . getSource ( ) ) . setValue ( c . getValue ( ) ) ) . collect ( Collectors . toList ( ) ) ) ; mLostNodes . remove ( address ) ; for ( Runnable function : mChangeListeners ) { function . run ( ) ; } }
Registers new configuration information .