idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
12,500
public static short translateBucketAcl ( AccessControlList acl , String userId ) { short mode = ( short ) 0 ; for ( Grant grant : acl . getGrantsAsList ( ) ) { Permission perm = grant . getPermission ( ) ; Grantee grantee = grant . getGrantee ( ) ; if ( perm . equals ( Permission . Read ) ) { if ( isUserIdInGrantee ( grantee , userId ) ) { mode |= ( short ) 0500 ; } } else if ( perm . equals ( Permission . Write ) ) { if ( isUserIdInGrantee ( grantee , userId ) ) { mode |= ( short ) 0200 ; } } else if ( perm . equals ( Permission . FullControl ) ) { if ( isUserIdInGrantee ( grantee , userId ) ) { mode |= ( short ) 0700 ; } } } return mode ; }
Translates S3 bucket ACL to Alluxio owner mode .
12,501
private int compareScheme ( URI other ) { String scheme = getScheme ( ) ; String otherScheme = other . getScheme ( ) ; if ( scheme == null && otherScheme == null ) { return 0 ; } if ( scheme != null ) { if ( otherScheme != null ) { return scheme . compareToIgnoreCase ( otherScheme ) ; } return 1 ; } return - 1 ; }
Compares the schemes of this URI and a given URI .
12,502
public long applyAndJournal ( Supplier < JournalContext > context , NewBlockEntry entry ) { try { long id = applyNewBlock ( entry ) ; context . get ( ) . append ( JournalEntry . newBuilder ( ) . setNewBlock ( entry ) . build ( ) ) ; return id ; } catch ( Throwable t ) { ProcessUtils . fatalError ( LOG , t , "Failed to apply %s" , entry ) ; throw t ; } }
Allocates and returns the next block ID for the indicated inode .
12,503
public void applyAndJournal ( Supplier < JournalContext > context , RenameEntry entry ) { try { applyRename ( entry ) ; context . get ( ) . append ( JournalEntry . newBuilder ( ) . setRename ( entry ) . build ( ) ) ; } catch ( Throwable t ) { ProcessUtils . fatalError ( LOG , t , "Failed to apply %s" , entry ) ; throw t ; } }
Renames an inode .
12,504
public void applyAndJournal ( Supplier < JournalContext > context , SetAclEntry entry ) { try { applySetAcl ( entry ) ; context . get ( ) . append ( JournalEntry . newBuilder ( ) . setSetAcl ( entry ) . build ( ) ) ; } catch ( Throwable t ) { ProcessUtils . fatalError ( LOG , t , "Failed to apply %s" , entry ) ; throw t ; } }
Sets an ACL for an inode .
12,505
public void applyAndJournal ( Supplier < JournalContext > context , UpdateInodeEntry entry ) { try { applyUpdateInode ( entry ) ; context . get ( ) . append ( JournalEntry . newBuilder ( ) . setUpdateInode ( entry ) . build ( ) ) ; } catch ( Throwable t ) { ProcessUtils . fatalError ( LOG , t , "Failed to apply %s" , entry ) ; throw t ; } }
Updates an inode s state . This is used for state common to both files and directories .
12,506
public void applyAndJournal ( Supplier < JournalContext > context , UpdateInodeDirectoryEntry entry ) { try { applyUpdateInodeDirectory ( entry ) ; context . get ( ) . append ( JournalEntry . newBuilder ( ) . setUpdateInodeDirectory ( entry ) . build ( ) ) ; } catch ( Throwable t ) { ProcessUtils . fatalError ( LOG , t , "Failed to apply %s" , entry ) ; throw t ; } }
Updates an inode directory s state .
12,507
public void applyAndJournal ( Supplier < JournalContext > context , UpdateInodeFileEntry entry ) { try { applyUpdateInodeFile ( entry ) ; context . get ( ) . append ( JournalEntry . newBuilder ( ) . setUpdateInodeFile ( entry ) . build ( ) ) ; } catch ( Throwable t ) { ProcessUtils . fatalError ( LOG , t , "Failed to apply %s" , entry ) ; throw t ; } }
Updates an inode file s state .
12,508
public void applyAndJournal ( Supplier < JournalContext > context , MutableInode < ? > inode ) { try { applyCreateInode ( inode ) ; context . get ( ) . append ( inode . toJournalEntry ( ) ) ; } catch ( Throwable t ) { ProcessUtils . fatalError ( LOG , t , "Failed to apply %s" , inode ) ; throw t ; } }
Adds an inode to the inode tree .
12,509
public void tick ( ) throws InterruptedException { if ( mPreviousTickMs != 0 ) { long executionTimeMs = mClock . millis ( ) - mPreviousTickMs ; if ( executionTimeMs > mIntervalMs ) { mLogger . warn ( "{} last execution took {} ms. Longer than the interval {}" , mThreadName , executionTimeMs , mIntervalMs ) ; } else { mSleeper . sleep ( Duration . ofMillis ( mIntervalMs - executionTimeMs ) ) ; } } mPreviousTickMs = mClock . millis ( ) ; }
Enforces the thread waits for the given interval between consecutive ticks .
12,510
@ SuppressWarnings ( "unused" ) private static synchronized void setTimerClass ( String name , Class < ? extends HeartbeatTimer > timerClass ) { if ( timerClass == null ) { sTimerClasses . remove ( name ) ; } else { sTimerClasses . put ( name , timerClass ) ; } }
Sets the timer class to use for the specified executor thread .
12,511
public static URI getJournalLocation ( ) { String journalDirectory = ServerConfiguration . get ( PropertyKey . MASTER_JOURNAL_FOLDER ) ; if ( ! journalDirectory . endsWith ( AlluxioURI . SEPARATOR ) ) { journalDirectory += AlluxioURI . SEPARATOR ; } try { return new URI ( journalDirectory ) ; } catch ( URISyntaxException e ) { throw new RuntimeException ( e ) ; } }
Returns a URI for the configured location for the specified journal .
12,512
public static void writeJournalEntryCheckpoint ( OutputStream output , JournalEntryIterable iterable ) throws IOException , InterruptedException { output = new CheckpointOutputStream ( output , CheckpointType . JOURNAL_ENTRY ) ; Iterator < JournalEntry > it = iterable . getJournalEntryIterator ( ) ; LOG . info ( "Write journal entry checkpoint" ) ; while ( it . hasNext ( ) ) { if ( Thread . interrupted ( ) ) { throw new InterruptedException ( ) ; } it . next ( ) . writeDelimitedTo ( output ) ; } output . flush ( ) ; }
Writes a checkpoint of the entries in the given iterable .
12,513
public static void restoreJournalEntryCheckpoint ( CheckpointInputStream input , Journaled journaled ) throws IOException { Preconditions . checkState ( input . getType ( ) == CheckpointType . JOURNAL_ENTRY , "Unrecognized checkpoint type when restoring %s: %s" , journaled . getCheckpointName ( ) , input . getType ( ) ) ; journaled . resetState ( ) ; LOG . info ( "Reading journal entries" ) ; JournalEntryStreamReader reader = new JournalEntryStreamReader ( input ) ; JournalEntry entry ; while ( ( entry = reader . readEntry ( ) ) != null ) { try { journaled . processJournalEntry ( entry ) ; } catch ( Throwable t ) { handleJournalReplayFailure ( LOG , t , "Failed to process journal entry %s from a journal checkpoint" , entry ) ; } } }
Restores the given journaled object from the journal entries in the input stream .
12,514
public static void writeToCheckpoint ( OutputStream output , List < ? extends Checkpointed > components ) throws IOException , InterruptedException { OutputChunked chunked = new OutputChunked ( new CheckpointOutputStream ( output , CheckpointType . COMPOUND ) , 64 * Constants . KB ) ; for ( Checkpointed component : components ) { chunked . writeString ( component . getCheckpointName ( ) . toString ( ) ) ; component . writeToCheckpoint ( chunked ) ; chunked . endChunks ( ) ; } chunked . flush ( ) ; }
Writes a composite checkpoint for the given checkpointed components .
12,515
public static void restoreFromCheckpoint ( CheckpointInputStream input , List < ? extends Checkpointed > components ) throws IOException { CompoundCheckpointReader reader = new CompoundCheckpointReader ( input ) ; Optional < Entry > next ; while ( ( next = reader . nextCheckpoint ( ) ) . isPresent ( ) ) { Entry nextEntry = next . get ( ) ; boolean found = false ; for ( Checkpointed component : components ) { if ( component . getCheckpointName ( ) . equals ( nextEntry . getName ( ) ) ) { component . restoreFromCheckpoint ( nextEntry . getStream ( ) ) ; found = true ; break ; } } if ( ! found ) { throw new RuntimeException ( String . format ( "Unrecognized checkpoint name: %s. Existing components: %s" , nextEntry . getName ( ) , Arrays . toString ( StreamUtils . map ( Checkpointed :: getCheckpointName , components ) . toArray ( ) ) ) ) ; } } }
Restores the given checkpointed components from a composite checkpoint .
12,516
private void printMetric ( String metricName , String nickName , boolean valueIsBytes ) { if ( mMetricsMap == null || ! mMetricsMap . containsKey ( metricName ) ) { return ; } MetricValue metricValue = mMetricsMap . get ( metricName ) ; String formattedValue = valueIsBytes ? FormatUtils . getSizeFromBytes ( metricValue . getLongValue ( ) ) : getFormattedValue ( metricValue ) ; mPrintStream . println ( INDENT + String . format ( mInfoFormat , nickName == null ? metricName : nickName , formattedValue ) ) ; mMetricsMap . remove ( metricName ) ; }
Prints the metrics information .
12,517
private String getFormattedValue ( MetricValue metricValue ) { if ( metricValue . hasDoubleValue ( ) ) { return DECIMAL_FORMAT . format ( metricValue . getDoubleValue ( ) ) ; } else { return DECIMAL_FORMAT . format ( metricValue . getLongValue ( ) ) ; } }
Gets the formatted metric value .
12,518
public List < Long > getTimedOutSessions ( ) { List < Long > ret = new ArrayList < > ( ) ; synchronized ( mSessions ) { for ( Entry < Long , SessionInfo > entry : mSessions . entrySet ( ) ) { if ( entry . getValue ( ) . timeout ( ) ) { ret . add ( entry . getKey ( ) ) ; } } } return ret ; }
Gets the sessions that timed out .
12,519
public void sessionHeartbeat ( long sessionId ) { synchronized ( mSessions ) { if ( mSessions . containsKey ( sessionId ) ) { mSessions . get ( sessionId ) . heartbeat ( ) ; } else { int sessionTimeoutMs = ( int ) ServerConfiguration . getMs ( PropertyKey . WORKER_SESSION_TIMEOUT_MS ) ; mSessions . put ( sessionId , new SessionInfo ( sessionId , sessionTimeoutMs ) ) ; } } }
Performs session heartbeat .
12,520
private String lookupRecursively ( String base , Set < String > seen ) throws UnresolvablePropertyException { if ( base == null ) { throw new UnresolvablePropertyException ( "Can't resolve property with null value" ) ; } String resolved = base ; Matcher matcher = CONF_REGEX . matcher ( base ) ; while ( matcher . find ( ) ) { String match = matcher . group ( 2 ) . trim ( ) ; if ( ! seen . add ( match ) ) { throw new RuntimeException ( ExceptionMessage . KEY_CIRCULAR_DEPENDENCY . getMessage ( match ) ) ; } if ( ! PropertyKey . isValid ( match ) ) { throw new RuntimeException ( ExceptionMessage . INVALID_CONFIGURATION_KEY . getMessage ( match ) ) ; } String value = lookupRecursively ( mProperties . get ( PropertyKey . fromString ( match ) ) , seen ) ; seen . remove ( match ) ; if ( value == null ) { throw new UnresolvablePropertyException ( ExceptionMessage . UNDEFINED_CONFIGURATION_KEY . getMessage ( match ) ) ; } LOG . debug ( "Replacing {} with {}" , matcher . group ( 1 ) , value ) ; resolved = resolved . replaceFirst ( REGEX_STRING , Matcher . quoteReplacement ( value ) ) ; } return resolved ; }
Actual recursive lookup replacement .
12,521
private void checkWorkerPorts ( ) { int maxWorkersPerHost = getInt ( PropertyKey . INTEGRATION_YARN_WORKERS_PER_HOST_MAX ) ; if ( maxWorkersPerHost > 1 ) { String message = "%s cannot be specified when allowing multiple workers per host with " + PropertyKey . Name . INTEGRATION_YARN_WORKERS_PER_HOST_MAX + "=" + maxWorkersPerHost ; Preconditions . checkState ( System . getProperty ( PropertyKey . Name . WORKER_RPC_PORT ) == null , String . format ( message , PropertyKey . WORKER_RPC_PORT ) ) ; Preconditions . checkState ( System . getProperty ( PropertyKey . Name . WORKER_WEB_PORT ) == null , String . format ( message , PropertyKey . WORKER_WEB_PORT ) ) ; } }
Validates worker port configuration .
12,522
private void checkTimeouts ( ) { long waitTime = getMs ( PropertyKey . MASTER_WORKER_CONNECT_WAIT_TIME ) ; long retryInterval = getMs ( PropertyKey . USER_RPC_RETRY_MAX_SLEEP_MS ) ; if ( waitTime < retryInterval ) { LOG . warn ( "{}={}ms is smaller than {}={}ms. Workers might not have enough time to register. " + "Consider either increasing {} or decreasing {}" , PropertyKey . Name . MASTER_WORKER_CONNECT_WAIT_TIME , waitTime , PropertyKey . Name . USER_RPC_RETRY_MAX_SLEEP_MS , retryInterval , PropertyKey . Name . MASTER_WORKER_CONNECT_WAIT_TIME , PropertyKey . Name . USER_RPC_RETRY_MAX_SLEEP_MS ) ; } checkHeartbeatTimeout ( PropertyKey . MASTER_MASTER_HEARTBEAT_INTERVAL , PropertyKey . MASTER_HEARTBEAT_TIMEOUT ) ; }
Validates timeout related configuration .
12,523
private void checkHeartbeatTimeout ( PropertyKey intervalKey , PropertyKey timeoutKey ) { long interval = getMs ( intervalKey ) ; long timeout = getMs ( timeoutKey ) ; Preconditions . checkState ( interval < timeout , "heartbeat interval (%s=%s) must be less than heartbeat timeout (%s=%s)" , intervalKey , interval , timeoutKey , timeout ) ; }
Checks that the interval is shorter than the timeout .
12,524
private void checkUserFileBufferBytes ( ) { if ( ! isSet ( PropertyKey . USER_FILE_BUFFER_BYTES ) ) { return ; } long usrFileBufferBytes = getBytes ( PropertyKey . USER_FILE_BUFFER_BYTES ) ; Preconditions . checkState ( ( usrFileBufferBytes & Integer . MAX_VALUE ) == usrFileBufferBytes , PreconditionMessage . INVALID_USER_FILE_BUFFER_BYTES . toString ( ) , PropertyKey . Name . USER_FILE_BUFFER_BYTES , usrFileBufferBytes ) ; }
Validates the user file buffer size is a non - negative number .
12,525
private void checkZkConfiguration ( ) { Preconditions . checkState ( isSet ( PropertyKey . ZOOKEEPER_ADDRESS ) == getBoolean ( PropertyKey . ZOOKEEPER_ENABLED ) , PreconditionMessage . INCONSISTENT_ZK_CONFIGURATION . toString ( ) , PropertyKey . Name . ZOOKEEPER_ADDRESS , PropertyKey . Name . ZOOKEEPER_ENABLED ) ; }
Validates Zookeeper - related configuration and prints warnings for possible sources of error .
12,526
private void checkTieredLocality ( ) { Set < String > tiers = Sets . newHashSet ( getList ( PropertyKey . LOCALITY_ORDER , "," ) ) ; Set < PropertyKey > predefinedKeys = new HashSet < > ( PropertyKey . defaultKeys ( ) ) ; for ( PropertyKey key : mProperties . keySet ( ) ) { if ( predefinedKeys . contains ( key ) ) { continue ; } Matcher matcher = Template . LOCALITY_TIER . match ( key . toString ( ) ) ; if ( matcher . matches ( ) && matcher . group ( 1 ) != null ) { String tierName = matcher . group ( 1 ) ; if ( ! tiers . contains ( tierName ) ) { throw new IllegalStateException ( String . format ( "Tier %s is configured by %s, but does not exist in the tier list %s " + "configured by %s" , tierName , key , tiers , PropertyKey . LOCALITY_ORDER ) ) ; } } } }
Checks that tiered locality configuration is consistent .
12,527
public static void changeLocalFileGroup ( String path , String group ) throws IOException { UserPrincipalLookupService lookupService = FileSystems . getDefault ( ) . getUserPrincipalLookupService ( ) ; PosixFileAttributeView view = Files . getFileAttributeView ( Paths . get ( path ) , PosixFileAttributeView . class , LinkOption . NOFOLLOW_LINKS ) ; GroupPrincipal groupPrincipal = lookupService . lookupPrincipalByGroupName ( group ) ; view . setGroup ( groupPrincipal ) ; }
Changes the local file s group .
12,528
public static void changeLocalFilePermission ( String filePath , String perms ) throws IOException { Files . setPosixFilePermissions ( Paths . get ( filePath ) , PosixFilePermissions . fromString ( perms ) ) ; }
Changes local file s permission .
12,529
public static String getLocalFileOwner ( String filePath ) throws IOException { PosixFileAttributes attr = Files . readAttributes ( Paths . get ( filePath ) , PosixFileAttributes . class ) ; return attr . owner ( ) . getName ( ) ; }
Gets local file s owner .
12,530
public static String getLocalFileGroup ( String filePath ) throws IOException { PosixFileAttributes attr = Files . readAttributes ( Paths . get ( filePath ) , PosixFileAttributes . class ) ; return attr . group ( ) . getName ( ) ; }
Gets local file s group .
12,531
public static short getLocalFileMode ( String filePath ) throws IOException { Set < PosixFilePermission > permission = Files . readAttributes ( Paths . get ( filePath ) , PosixFileAttributes . class ) . permissions ( ) ; return translatePosixPermissionToMode ( permission ) ; }
Gets local file s permission mode .
12,532
public static short translatePosixPermissionToMode ( Set < PosixFilePermission > permission ) { int mode = 0 ; for ( PosixFilePermission action : PosixFilePermission . values ( ) ) { mode = mode << 1 ; mode += permission . contains ( action ) ? 1 : 0 ; } return ( short ) mode ; }
Translate posix file permissions to short mode .
12,533
public static void changeLocalFileUser ( String path , String user ) throws IOException { UserPrincipalLookupService lookupService = FileSystems . getDefault ( ) . getUserPrincipalLookupService ( ) ; PosixFileAttributeView view = Files . getFileAttributeView ( Paths . get ( path ) , PosixFileAttributeView . class , LinkOption . NOFOLLOW_LINKS ) ; UserPrincipal userPrincipal = lookupService . lookupPrincipalByName ( user ) ; view . setOwner ( userPrincipal ) ; }
Changes the local file s user .
12,534
public static void createBlockPath ( String path , String workerDataFolderPermissions ) throws IOException { try { createStorageDirPath ( PathUtils . getParent ( path ) , workerDataFolderPermissions ) ; } catch ( InvalidPathException e ) { throw new IOException ( "Failed to create block path, get parent path of " + path + "failed" , e ) ; } catch ( IOException e ) { throw new IOException ( "Failed to create block path " + path , e ) ; } }
Creates the local block path and all the parent directories . Also sets the appropriate permissions .
12,535
public static void delete ( String path ) throws IOException { if ( ! Files . deleteIfExists ( Paths . get ( path ) ) ) { throw new IOException ( "Failed to delete " + path ) ; } }
Deletes the file or directory .
12,536
public static void deletePathRecursively ( String path ) throws IOException { if ( ! exists ( path ) ) { return ; } Path root = Paths . get ( path ) ; Files . walkFileTree ( root , new SimpleFileVisitor < Path > ( ) { public FileVisitResult visitFile ( Path file , BasicFileAttributes attrs ) throws IOException { Files . delete ( file ) ; return FileVisitResult . CONTINUE ; } public FileVisitResult postVisitDirectory ( Path dir , IOException e ) throws IOException { if ( e == null ) { Files . delete ( dir ) ; return FileVisitResult . CONTINUE ; } else { throw e ; } } } ) ; }
Deletes a file or a directory recursively if it is a directory .
12,537
public static boolean createStorageDirPath ( String path , String workerDataFolderPermissions ) throws IOException { if ( Files . exists ( Paths . get ( path ) ) ) { return false ; } Path storagePath ; try { storagePath = Files . createDirectories ( Paths . get ( path ) ) ; } catch ( UnsupportedOperationException | SecurityException | IOException e ) { throw new IOException ( "Failed to create folder " + path , e ) ; } String absolutePath = storagePath . toAbsolutePath ( ) . toString ( ) ; changeLocalFilePermission ( absolutePath , workerDataFolderPermissions ) ; setLocalDirStickyBit ( absolutePath ) ; return true ; }
Creates the storage directory path including any necessary but nonexistent parent directories . If the directory already exists do nothing .
12,538
public static void createFile ( String filePath ) throws IOException { Path storagePath = Paths . get ( filePath ) ; Files . createDirectories ( storagePath . getParent ( ) ) ; Files . createFile ( storagePath ) ; }
Creates an empty file and its intermediate directories if necessary .
12,539
public static boolean isStorageDirAccessible ( String path ) { Path filePath = Paths . get ( path ) ; return Files . exists ( filePath ) && Files . isReadable ( filePath ) && Files . isWritable ( filePath ) && Files . isExecutable ( filePath ) ; }
Checks if a storage directory path is accessible .
12,540
public void backup ( OutputStream os ) throws IOException { int count = 0 ; GzipCompressorOutputStream zipStream = new GzipCompressorOutputStream ( os ) ; for ( Master master : mRegistry . getServers ( ) ) { Iterator < JournalEntry > it = master . getJournalEntryIterator ( ) ; while ( it . hasNext ( ) ) { it . next ( ) . toBuilder ( ) . clearSequenceNumber ( ) . build ( ) . writeDelimitedTo ( zipStream ) ; count ++ ; } } zipStream . finish ( ) ; LOG . info ( "Created backup with {} entries" , count ) ; }
Writes a backup to the specified stream .
12,541
public void initFromBackup ( InputStream is ) throws IOException { int count = 0 ; try ( GzipCompressorInputStream gzIn = new GzipCompressorInputStream ( is ) ; JournalEntryStreamReader reader = new JournalEntryStreamReader ( gzIn ) ) { List < Master > masters = mRegistry . getServers ( ) ; JournalEntry entry ; Map < String , Master > mastersByName = Maps . uniqueIndex ( masters , Master :: getName ) ; while ( ( entry = reader . readEntry ( ) ) != null ) { String masterName = JournalEntryAssociation . getMasterForEntry ( entry ) ; Master master = mastersByName . get ( masterName ) ; try { master . processJournalEntry ( entry ) ; } catch ( Throwable t ) { JournalUtils . handleJournalReplayFailure ( LOG , t , "Failed to process journal entry %s when init from backup" , entry ) ; } try ( JournalContext jc = master . createJournalContext ( ) ) { jc . append ( entry ) ; count ++ ; } } } LOG . info ( "Restored {} entries from backup" , count ) ; }
Restores master state from the specified backup .
12,542
public Optional < V > get ( K key ) { if ( cacheIsFull ( ) ) { Entry entry = mMap . get ( key ) ; if ( entry == null ) { return load ( key ) ; } return Optional . ofNullable ( entry . mValue ) ; } Entry result = mMap . compute ( key , ( k , entry ) -> { if ( entry != null ) { entry . mReferenced = true ; return entry ; } Optional < V > value = load ( key ) ; if ( value . isPresent ( ) ) { onCacheUpdate ( key , value . get ( ) ) ; Entry newEntry = new Entry ( key , value . get ( ) ) ; newEntry . mDirty = false ; return newEntry ; } return null ; } ) ; if ( result == null || result . mValue == null ) { return Optional . empty ( ) ; } wakeEvictionThreadIfNecessary ( ) ; return Optional . of ( result . mValue ) ; }
Retrieves a value from the cache loading it from the backing store if necessary .
12,543
public void remove ( K key ) { mMap . compute ( key , ( k , entry ) -> { onRemove ( key ) ; if ( entry == null && cacheIsFull ( ) ) { removeFromBackingStore ( k ) ; return null ; } onCacheUpdate ( key , null ) ; if ( entry == null ) { entry = new Entry ( key , null ) ; } else { entry . mValue = null ; } entry . mReferenced = false ; entry . mDirty = true ; return entry ; } ) ; wakeEvictionThreadIfNecessary ( ) ; }
Removes a key from the cache .
12,544
public void flush ( ) throws InterruptedException { List < Entry > toFlush = new ArrayList < > ( mEvictBatchSize ) ; Iterator < Entry > it = mMap . values ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { if ( Thread . interrupted ( ) ) { throw new InterruptedException ( ) ; } while ( toFlush . size ( ) < mEvictBatchSize && it . hasNext ( ) ) { Entry candidate = it . next ( ) ; if ( candidate . mDirty ) { toFlush . add ( candidate ) ; } } flushEntries ( toFlush ) ; toFlush . clear ( ) ; } }
Flushes all data to the backing store .
12,545
public void clear ( ) { mMap . forEach ( ( key , value ) -> { onCacheUpdate ( key , value . mValue ) ; onRemove ( key ) ; } ) ; mMap . clear ( ) ; }
Clears all entries from the map . This is not threadsafe and requires external synchronization to prevent concurrent modifications to the cache .
12,546
public void removeLastInode ( ) { Preconditions . checkState ( fullPathExists ( ) ) ; if ( ! isImplicitlyLocked ( ) ) { mLockList . unlockLastInode ( ) ; mLockList . unlockLastEdge ( ) ; } mExistingInodes . remove ( mExistingInodes . size ( ) - 1 ) ; }
Removes the last inode from the list . This is necessary when the last inode is deleted and we want to continue using the inodepath . This operation is only supported when the path is complete .
12,547
public void addNextInode ( Inode inode ) { Preconditions . checkState ( mLockPattern == LockPattern . WRITE_EDGE ) ; Preconditions . checkState ( ! fullPathExists ( ) ) ; Preconditions . checkState ( inode . getName ( ) . equals ( mPathComponents [ mExistingInodes . size ( ) ] ) ) ; if ( ! isImplicitlyLocked ( ) && mExistingInodes . size ( ) < mPathComponents . length - 1 ) { mLockList . pushWriteLockedEdge ( inode , mPathComponents [ mExistingInodes . size ( ) + 1 ] ) ; } mExistingInodes . add ( inode ) ; }
Adds the next inode to the path . This tries to reduce the scope of locking by moving the write lock forward to the new final edge downgrading the previous write lock to a read lock . If the path is implicitly locked the inode is added but no downgrade occurs .
12,548
public void downgradeToPattern ( LockPattern desiredLockPattern ) { switch ( desiredLockPattern ) { case READ : if ( mLockPattern == LockPattern . WRITE_INODE ) { Preconditions . checkState ( ! isImplicitlyLocked ( ) ) ; mLockList . downgradeLastInode ( ) ; } else if ( mLockPattern == LockPattern . WRITE_EDGE ) { downgradeEdgeToInode ( LockMode . READ ) ; } break ; case WRITE_INODE : if ( mLockPattern == LockPattern . WRITE_EDGE ) { downgradeEdgeToInode ( LockMode . WRITE ) ; } else { Preconditions . checkState ( mLockPattern == LockPattern . WRITE_INODE ) ; } break ; case WRITE_EDGE : Preconditions . checkState ( mLockPattern == LockPattern . WRITE_EDGE ) ; break ; default : throw new IllegalStateException ( "Unknown lock pattern: " + desiredLockPattern ) ; } mLockPattern = desiredLockPattern ; }
Downgrades from the current locking scheme to the desired locking scheme .
12,549
public LockedInodePath lockDescendant ( AlluxioURI descendantUri , LockPattern lockPattern ) throws InvalidPathException { LockedInodePath path = new LockedInodePath ( descendantUri , this , PathUtils . getPathComponents ( descendantUri . getPath ( ) ) , lockPattern ) ; path . traverseOrClose ( ) ; return path ; }
Locks a descendant of the current path and returns a new locked inode path . The path is traversed according to the lock pattern . Closing the new path will have no effect on the current path .
12,550
public LockedInodePath lockChild ( Inode child , LockPattern lockPattern ) throws InvalidPathException { return lockChild ( child , lockPattern , addComponent ( mPathComponents , child . getName ( ) ) ) ; }
Returns a new locked inode path composed of the current path plus the child inode . The path is traversed according to the lock pattern . The original locked inode path is unaffected .
12,551
public LockedInodePath lockFinalEdgeWrite ( ) throws InvalidPathException { Preconditions . checkState ( ! fullPathExists ( ) ) ; LockedInodePath newPath = new LockedInodePath ( mUri , this , mPathComponents , LockPattern . WRITE_EDGE ) ; newPath . traverse ( ) ; return newPath ; }
Returns a copy of the path with the final edge write locked . This requires that we haven t already locked the final edge i . e . the path is incomplete .
12,552
public void traverse ( ) throws InvalidPathException { if ( mLockList . getLockMode ( ) == LockMode . WRITE ) { traverseWithoutLocking ( ) ; return ; } bootstrapTraversal ( ) ; while ( ! fullPathExists ( ) ) { int lastInodeIndex = mLockList . getLockedInodes ( ) . size ( ) - 1 ; String nextComponent = mPathComponents [ lastInodeIndex + 1 ] ; boolean isFinalComponent = lastInodeIndex == mPathComponents . length - 2 ; if ( mLockList . endsInInode ( ) ) { if ( mLockPattern == LockPattern . WRITE_EDGE && isFinalComponent ) { mLockList . lockEdge ( nextComponent , LockMode . WRITE ) ; } else { mLockList . lockEdge ( nextComponent , LockMode . READ ) ; } } else { Inode lastInode = mLockList . getLockedInodes ( ) . get ( lastInodeIndex ) ; if ( ! lastInode . isDirectory ( ) ) { throw new InvalidPathException ( String . format ( "Traversal failed for path %s. Component %s(%s) is a file, not a directory." , mUri , lastInodeIndex , lastInode . getName ( ) ) ) ; } Optional < Inode > nextInodeOpt = mInodeStore . getChild ( lastInode . asDirectory ( ) , nextComponent ) ; if ( ! nextInodeOpt . isPresent ( ) && mLockPattern == LockPattern . WRITE_EDGE && ! isFinalComponent ) { mLockList . unlockLastEdge ( ) ; mLockList . lockEdge ( nextComponent , LockMode . WRITE ) ; nextInodeOpt = mInodeStore . getChild ( lastInode . asDirectory ( ) , nextComponent ) ; if ( nextInodeOpt . isPresent ( ) ) { mLockList . downgradeLastEdge ( ) ; } } if ( ! nextInodeOpt . isPresent ( ) ) { if ( mLockPattern != LockPattern . WRITE_EDGE ) { mLockList . unlockLastEdge ( ) ; } return ; } Inode nextInode = nextInodeOpt . get ( ) ; if ( isFinalComponent ) { if ( mLockPattern == LockPattern . READ ) { mLockList . lockInode ( nextInode , LockMode . READ ) ; } else if ( mLockPattern == LockPattern . WRITE_INODE ) { mLockList . lockInode ( nextInode , LockMode . WRITE ) ; } else if ( mLockPattern == LockPattern . WRITE_EDGE ) { if ( mLockList . numLockedInodes ( ) == mExistingInodes . size ( ) ) { mExistingInodes . add ( nextInode ) ; } } } else { mLockList . lockInode ( nextInode , LockMode . READ ) ; } if ( mLockList . numLockedInodes ( ) > mExistingInodes . size ( ) ) { mExistingInodes . add ( nextInode ) ; } } } }
Traverses the inode path according to its lock pattern . If the inode path is already partially traversed this method will pick up where the previous traversal left off . If the path already ends in a write lock traverse will populate the inodes list without taking any additional locks .
12,553
private static String getFullInstanceId ( String hostname , String id ) { String str = hostname == null ? "" : hostname ; str = str . replace ( '.' , '_' ) ; str += ( id == null ? "" : "-" + id ) ; return str ; }
Gets the full instance id of the concatenation of hostname and the id . The dots in the hostname replaced by underscores .
12,554
public void putWorkerMetrics ( String hostname , List < Metric > metrics ) { if ( metrics . isEmpty ( ) ) { return ; } synchronized ( mWorkerMetrics ) { mWorkerMetrics . removeByField ( ID_INDEX , getFullInstanceId ( hostname , null ) ) ; for ( Metric metric : metrics ) { if ( metric . getHostname ( ) == null ) { continue ; } mWorkerMetrics . add ( metric ) ; } } }
Put the metrics from a worker with a hostname . If all the old metrics associated with this instance will be removed and then replaced by the latest .
12,555
public void putClientMetrics ( String hostname , String clientId , List < Metric > metrics ) { if ( metrics . isEmpty ( ) ) { return ; } LOG . debug ( "Removing metrics for id {} to replace with {}" , clientId , metrics ) ; synchronized ( mClientMetrics ) { mClientMetrics . removeByField ( ID_INDEX , getFullInstanceId ( hostname , clientId ) ) ; for ( Metric metric : metrics ) { if ( metric . getHostname ( ) == null ) { continue ; } mClientMetrics . add ( metric ) ; } } }
Put the metrics from a client with a hostname and a client id . If all the old metrics associated with this instance will be removed and then replaced by the latest .
12,556
public Set < Metric > getMetricsByInstanceTypeAndName ( MetricsSystem . InstanceType instanceType , String name ) { if ( instanceType == InstanceType . MASTER ) { return getMasterMetrics ( name ) ; } if ( instanceType == InstanceType . WORKER ) { synchronized ( mWorkerMetrics ) { return mWorkerMetrics . getByField ( NAME_INDEX , name ) ; } } else if ( instanceType == InstanceType . CLIENT ) { synchronized ( mClientMetrics ) { return mClientMetrics . getByField ( NAME_INDEX , name ) ; } } else { throw new IllegalArgumentException ( "Unsupported instance type " + instanceType ) ; } }
Gets all the metrics by instance type and the metric name . The supported instance types are worker and client .
12,557
public FSDataOutputStream create ( Path path , FsPermission permission , boolean overwrite , int bufferSize , short replication , long blockSize , Progressable progress ) throws IOException { LOG . debug ( "create({}, {}, {}, {}, {}, {}, {})" , path , permission , overwrite , bufferSize , replication , blockSize , progress ) ; if ( mStatistics != null ) { mStatistics . incrementWriteOps ( 1 ) ; } AlluxioURI uri = new AlluxioURI ( HadoopUtils . getPathWithoutScheme ( path ) ) ; CreateFilePOptions options = CreateFilePOptions . newBuilder ( ) . setBlockSizeBytes ( blockSize ) . setMode ( new Mode ( permission . toShort ( ) ) . toProto ( ) ) . setRecursive ( true ) . build ( ) ; FileOutStream outStream ; try { outStream = mFileSystem . createFile ( uri , options ) ; } catch ( AlluxioException e ) { try { if ( mFileSystem . exists ( uri ) ) { if ( ! overwrite ) { throw new IOException ( ExceptionMessage . FILE_ALREADY_EXISTS . getMessage ( uri ) ) ; } if ( mFileSystem . getStatus ( uri ) . isFolder ( ) ) { throw new IOException ( ExceptionMessage . FILE_CREATE_IS_DIRECTORY . getMessage ( uri ) ) ; } mFileSystem . delete ( uri ) ; } outStream = mFileSystem . createFile ( uri , options ) ; } catch ( AlluxioException e2 ) { throw new IOException ( e2 ) ; } } return new FSDataOutputStream ( outStream , mStatistics ) ; }
Attempts to create a file . Overwrite will not succeed if the path exists and is a folder .
12,558
public boolean delete ( Path path , boolean recursive ) throws IOException { LOG . debug ( "delete({}, {})" , path , recursive ) ; if ( mStatistics != null ) { mStatistics . incrementWriteOps ( 1 ) ; } AlluxioURI uri = new AlluxioURI ( HadoopUtils . getPathWithoutScheme ( path ) ) ; DeletePOptions options = DeletePOptions . newBuilder ( ) . setRecursive ( recursive ) . build ( ) ; try { mFileSystem . delete ( uri , options ) ; return true ; } catch ( InvalidPathException | FileDoesNotExistException e ) { LOG . warn ( "delete failed: {}" , e . getMessage ( ) ) ; return false ; } catch ( AlluxioException e ) { throw new IOException ( e ) ; } }
Attempts to delete the file or directory with the specified path .
12,559
public void setPermission ( Path path , FsPermission permission ) throws IOException { LOG . debug ( "setMode({},{})" , path , permission . toString ( ) ) ; AlluxioURI uri = new AlluxioURI ( HadoopUtils . getPathWithoutScheme ( path ) ) ; SetAttributePOptions options = SetAttributePOptions . newBuilder ( ) . setMode ( new Mode ( permission . toShort ( ) ) . toProto ( ) ) . setRecursive ( false ) . build ( ) ; try { mFileSystem . setAttribute ( uri , options ) ; } catch ( AlluxioException e ) { throw new IOException ( e ) ; } }
Changes permission of a path .
12,560
private Map < String , Object > getConfigurationFromUri ( URI uri ) { AlluxioURI alluxioUri = new AlluxioURI ( uri . toString ( ) ) ; Map < String , Object > alluxioConfProperties = new HashMap < > ( ) ; if ( alluxioUri . getAuthority ( ) instanceof ZookeeperAuthority ) { ZookeeperAuthority authority = ( ZookeeperAuthority ) alluxioUri . getAuthority ( ) ; alluxioConfProperties . put ( PropertyKey . ZOOKEEPER_ENABLED . getName ( ) , true ) ; alluxioConfProperties . put ( PropertyKey . ZOOKEEPER_ADDRESS . getName ( ) , authority . getZookeeperAddress ( ) ) ; } else if ( alluxioUri . getAuthority ( ) instanceof SingleMasterAuthority ) { SingleMasterAuthority authority = ( SingleMasterAuthority ) alluxioUri . getAuthority ( ) ; alluxioConfProperties . put ( PropertyKey . MASTER_HOSTNAME . getName ( ) , authority . getHost ( ) ) ; alluxioConfProperties . put ( PropertyKey . MASTER_RPC_PORT . getName ( ) , authority . getPort ( ) ) ; alluxioConfProperties . put ( PropertyKey . ZOOKEEPER_ENABLED . getName ( ) , false ) ; alluxioConfProperties . put ( PropertyKey . ZOOKEEPER_ADDRESS . getName ( ) , null ) ; alluxioConfProperties . put ( PropertyKey . MASTER_EMBEDDED_JOURNAL_ADDRESSES . getName ( ) , null ) ; alluxioConfProperties . put ( PropertyKey . MASTER_RPC_ADDRESSES . getName ( ) , null ) ; } else if ( alluxioUri . getAuthority ( ) instanceof MultiMasterAuthority ) { MultiMasterAuthority authority = ( MultiMasterAuthority ) alluxioUri . getAuthority ( ) ; alluxioConfProperties . put ( PropertyKey . MASTER_RPC_ADDRESSES . getName ( ) , authority . getMasterAddresses ( ) ) ; alluxioConfProperties . put ( PropertyKey . ZOOKEEPER_ENABLED . getName ( ) , false ) ; alluxioConfProperties . put ( PropertyKey . ZOOKEEPER_ADDRESS . getName ( ) , null ) ; } return alluxioConfProperties ; }
Gets the connection configuration from the input uri .
12,561
public boolean mkdirs ( Path path , FsPermission permission ) throws IOException { LOG . debug ( "mkdirs({}, {})" , path , permission ) ; if ( mStatistics != null ) { mStatistics . incrementWriteOps ( 1 ) ; } AlluxioURI uri = new AlluxioURI ( HadoopUtils . getPathWithoutScheme ( path ) ) ; CreateDirectoryPOptions options = CreateDirectoryPOptions . newBuilder ( ) . setRecursive ( true ) . setAllowExists ( true ) . setMode ( new Mode ( permission . toShort ( ) ) . toProto ( ) ) . build ( ) ; try { mFileSystem . createDirectory ( uri , options ) ; return true ; } catch ( AlluxioException e ) { throw new IOException ( e ) ; } }
Attempts to create a folder with the specified path . Parent directories will be created .
12,562
public static Mode parse ( String value ) { if ( StringUtils . isBlank ( value ) ) { throw new IllegalArgumentException ( ExceptionMessage . INVALID_MODE . getMessage ( value ) ) ; } try { return parseNumeric ( value ) ; } catch ( NumberFormatException e ) { return parseSymbolic ( value ) ; } }
Parses the given value as a mode .
12,563
public void run ( ) { long lastCheckMs = System . currentTimeMillis ( ) ; while ( mRunning ) { long lastIntervalMs = System . currentTimeMillis ( ) - lastCheckMs ; long toSleepMs = mCheckIntervalMs - lastIntervalMs ; if ( toSleepMs > 0 ) { CommonUtils . sleepMs ( LOG , toSleepMs ) ; } else { LOG . warn ( "Session cleanup took: {}, expected: {}" , lastIntervalMs , mCheckIntervalMs ) ; } lastCheckMs = System . currentTimeMillis ( ) ; for ( long session : mSessions . getTimedOutSessions ( ) ) { mSessions . removeSession ( session ) ; for ( SessionCleanable sc : mSessionCleanables ) { sc . cleanupSession ( session ) ; } } } }
Main loop for the cleanup continuously looks for zombie sessions .
12,564
public CommandBuilder addArg ( String opt , Object arg ) { mArgs . add ( opt + " " + String . valueOf ( arg ) ) ; return this ; }
Adds the string value of the given option argument to the command .
12,565
public static void main ( String [ ] argv ) throws IOException { int ret ; InstancedConfiguration conf = new InstancedConfiguration ( ConfigurationUtils . defaults ( ) ) ; if ( ! ConfigurationUtils . masterHostConfigured ( conf ) && argv . length > 0 && ! argv [ 0 ] . equals ( "help" ) ) { System . out . println ( ConfigurationUtils . getMasterHostNotConfiguredMessage ( "Alluxio fs shell" ) ) ; System . exit ( 1 ) ; } conf . set ( PropertyKey . USER_RPC_RETRY_MAX_DURATION , "5s" , Source . DEFAULT ) ; try ( FileSystemShell shell = new FileSystemShell ( conf ) ) { ret = shell . run ( argv ) ; } System . exit ( ret ) ; }
Main method starts a new FileSystemShell .
12,566
public List < BlockMeta > getEvictableBlocks ( ) { List < BlockMeta > filteredList = new ArrayList < > ( ) ; for ( BlockMeta blockMeta : mDir . getBlocks ( ) ) { long blockId = blockMeta . getBlockId ( ) ; if ( mManagerView . isBlockEvictable ( blockId ) ) { filteredList . add ( blockMeta ) ; } } return filteredList ; }
Gets a filtered list of block metadata for blocks that are neither pinned or being blocked .
12,567
public long getEvitableBytes ( ) { long bytes = 0 ; for ( BlockMeta blockMeta : mDir . getBlocks ( ) ) { long blockId = blockMeta . getBlockId ( ) ; if ( mManagerView . isBlockEvictable ( blockId ) ) { bytes += blockMeta . getBlockSize ( ) ; } } return bytes ; }
Gets evictable bytes for this dir i . e . the total bytes of total evictable blocks .
12,568
public void add ( Supplier < JournalContext > journalContext , AlluxioURI alluxioUri , AlluxioURI ufsUri , long mountId , MountPOptions options ) throws FileAlreadyExistsException , InvalidPathException { String alluxioPath = alluxioUri . getPath ( ) . isEmpty ( ) ? "/" : alluxioUri . getPath ( ) ; LOG . info ( "Mounting {} at {}" , ufsUri , alluxioPath ) ; try ( LockResource r = new LockResource ( mWriteLock ) ) { if ( mState . getMountTable ( ) . containsKey ( alluxioPath ) ) { throw new FileAlreadyExistsException ( ExceptionMessage . MOUNT_POINT_ALREADY_EXISTS . getMessage ( alluxioPath ) ) ; } for ( Map . Entry < String , MountInfo > entry : mState . getMountTable ( ) . entrySet ( ) ) { AlluxioURI mountedUfsUri = entry . getValue ( ) . getUfsUri ( ) ; if ( ( ufsUri . getScheme ( ) == null || ufsUri . getScheme ( ) . equals ( mountedUfsUri . getScheme ( ) ) ) && ( ufsUri . getAuthority ( ) . toString ( ) . equals ( mountedUfsUri . getAuthority ( ) . toString ( ) ) ) ) { String ufsPath = ufsUri . getPath ( ) . isEmpty ( ) ? "/" : ufsUri . getPath ( ) ; String mountedUfsPath = mountedUfsUri . getPath ( ) . isEmpty ( ) ? "/" : mountedUfsUri . getPath ( ) ; if ( PathUtils . hasPrefix ( ufsPath , mountedUfsPath ) ) { throw new InvalidPathException ( ExceptionMessage . MOUNT_POINT_PREFIX_OF_ANOTHER . getMessage ( mountedUfsUri . toString ( ) , ufsUri . toString ( ) ) ) ; } if ( PathUtils . hasPrefix ( mountedUfsPath , ufsPath ) ) { throw new InvalidPathException ( ExceptionMessage . MOUNT_POINT_PREFIX_OF_ANOTHER . getMessage ( ufsUri . toString ( ) , mountedUfsUri . toString ( ) ) ) ; } } } Map < String , String > properties = options . getPropertiesMap ( ) ; mState . applyAndJournal ( journalContext , AddMountPointEntry . newBuilder ( ) . addAllProperties ( properties . entrySet ( ) . stream ( ) . map ( entry -> StringPairEntry . newBuilder ( ) . setKey ( entry . getKey ( ) ) . setValue ( entry . getValue ( ) ) . build ( ) ) . collect ( Collectors . toList ( ) ) ) . setAlluxioPath ( alluxioPath ) . setMountId ( mountId ) . setReadOnly ( options . getReadOnly ( ) ) . setShared ( options . getShared ( ) ) . setUfsPath ( ufsUri . toString ( ) ) . build ( ) ) ; } }
Mounts the given UFS path at the given Alluxio path . The Alluxio path should not be nested under an existing mount point .
12,569
public boolean delete ( Supplier < JournalContext > journalContext , AlluxioURI uri ) { String path = uri . getPath ( ) ; LOG . info ( "Unmounting {}" , path ) ; if ( path . equals ( ROOT ) ) { LOG . warn ( "Cannot unmount the root mount point." ) ; return false ; } try ( LockResource r = new LockResource ( mWriteLock ) ) { if ( mState . getMountTable ( ) . containsKey ( path ) ) { for ( String mountPath : mState . getMountTable ( ) . keySet ( ) ) { try { if ( PathUtils . hasPrefix ( mountPath , path ) && ( ! path . equals ( mountPath ) ) ) { LOG . warn ( "The path to unmount {} contains another nested mountpoint {}" , path , mountPath ) ; return false ; } } catch ( InvalidPathException e ) { LOG . warn ( "Invalid path {} encountered when checking for nested mount point" , path ) ; } } mUfsManager . removeMount ( mState . getMountTable ( ) . get ( path ) . getMountId ( ) ) ; mState . applyAndJournal ( journalContext , DeleteMountPointEntry . newBuilder ( ) . setAlluxioPath ( path ) . build ( ) ) ; return true ; } LOG . warn ( "Mount point {} does not exist." , path ) ; return false ; } }
Unmounts the given Alluxio path . The path should match an existing mount point .
12,570
public String getMountPoint ( AlluxioURI uri ) throws InvalidPathException { String path = uri . getPath ( ) ; String lastMount = ROOT ; try ( LockResource r = new LockResource ( mReadLock ) ) { for ( Map . Entry < String , MountInfo > entry : mState . getMountTable ( ) . entrySet ( ) ) { String mount = entry . getKey ( ) ; if ( ! mount . equals ( ROOT ) && PathUtils . hasPrefix ( path , mount ) && PathUtils . hasPrefix ( mount , lastMount ) ) { lastMount = mount ; } } return lastMount ; } }
Returns the closest ancestor mount point the given path is nested under .
12,571
public Map < String , MountInfo > getMountTable ( ) { try ( LockResource r = new LockResource ( mReadLock ) ) { return new HashMap < > ( mState . getMountTable ( ) ) ; } }
Returns a copy of the current mount table the mount table is a map from Alluxio file system URIs to the corresponding mount point information .
12,572
public AlluxioURI reverseResolve ( AlluxioURI ufsUri ) { AlluxioURI returnVal = null ; for ( Map . Entry < String , MountInfo > mountInfoEntry : getMountTable ( ) . entrySet ( ) ) { try { if ( mountInfoEntry . getValue ( ) . getUfsUri ( ) . isAncestorOf ( ufsUri ) ) { returnVal = reverseResolve ( mountInfoEntry . getValue ( ) . getAlluxioUri ( ) , mountInfoEntry . getValue ( ) . getUfsUri ( ) , ufsUri ) ; } } catch ( InvalidPathException | RuntimeException e ) { LOG . info ( Throwables . getStackTraceAsString ( e ) ) ; } if ( returnVal != null ) { return returnVal ; } } return null ; }
REsolves the given Ufs path . If the given UFs path is mounted in Alluxio space it returns the associated Alluxio path .
12,573
public UfsManager . UfsClient getUfsClient ( long mountId ) { try { return mUfsManager . get ( mountId ) ; } catch ( NotFoundException | UnavailableException e ) { LOG . warn ( "failed to get ufsclient for mountid {}, exception {}" , mountId , e ) ; } return null ; }
Get the associated ufs client with the mount id .
12,574
public Resolution resolve ( AlluxioURI uri ) throws InvalidPathException { try ( LockResource r = new LockResource ( mReadLock ) ) { String path = uri . getPath ( ) ; LOG . debug ( "Resolving {}" , path ) ; String mountPoint = getMountPoint ( uri ) ; if ( mountPoint != null ) { MountInfo info = mState . getMountTable ( ) . get ( mountPoint ) ; AlluxioURI ufsUri = info . getUfsUri ( ) ; UfsManager . UfsClient ufsClient ; AlluxioURI resolvedUri ; try { ufsClient = mUfsManager . get ( info . getMountId ( ) ) ; try ( CloseableResource < UnderFileSystem > ufsResource = ufsClient . acquireUfsResource ( ) ) { UnderFileSystem ufs = ufsResource . get ( ) ; resolvedUri = ufs . resolveUri ( ufsUri , path . substring ( mountPoint . length ( ) ) ) ; } } catch ( NotFoundException | UnavailableException e ) { throw new RuntimeException ( String . format ( "No UFS information for %s for mount Id %d, we should never reach here" , uri , info . getMountId ( ) ) , e ) ; } return new Resolution ( resolvedUri , ufsClient , info . getOptions ( ) . getShared ( ) , info . getMountId ( ) ) ; } return new Resolution ( uri , null , false , IdUtils . INVALID_MOUNT_ID ) ; } }
Resolves the given Alluxio path . If the given Alluxio path is nested under a mount point the resolution maps the Alluxio path to the corresponding UFS path . Otherwise the resolution is a no - op .
12,575
public void checkUnderWritableMountPoint ( AlluxioURI alluxioUri ) throws InvalidPathException , AccessControlException { try ( LockResource r = new LockResource ( mReadLock ) ) { String mountPoint = getMountPoint ( alluxioUri ) ; MountInfo mountInfo = mState . getMountTable ( ) . get ( mountPoint ) ; if ( mountInfo . getOptions ( ) . getReadOnly ( ) ) { throw new AccessControlException ( ExceptionMessage . MOUNT_READONLY , alluxioUri , mountPoint ) ; } } }
Checks to see if a write operation is allowed for the specified Alluxio path by determining if it is under a readonly mount point .
12,576
protected void schedule ( ) { try ( LockResource r = new LockResource ( mLock ) ) { Preconditions . checkState ( ! mScheduled , "Called schedule twice without waiting for any ticks" ) ; mScheduled = true ; mTickCondition . signal ( ) ; HeartbeatScheduler . removeTimer ( this ) ; } }
Schedules execution of the heartbeat .
12,577
public void tick ( ) throws InterruptedException { try ( LockResource r = new LockResource ( mLock ) ) { HeartbeatScheduler . addTimer ( this ) ; while ( ! mScheduled ) { mTickCondition . await ( ) ; } mScheduled = false ; } }
Waits until the heartbeat is scheduled for execution .
12,578
public boolean isBlockMarked ( long blockId ) { for ( StorageTierView tierView : mTierViews ) { for ( StorageDirView dirView : tierView . getDirViews ( ) ) { if ( dirView . isMarkedToMoveOut ( blockId ) ) { return true ; } } } return false ; }
Tests if the block is marked to move out of its current dir in this view .
12,579
public StorageTierView getNextTier ( StorageTierView tierView ) { int nextOrdinal = tierView . getTierViewOrdinal ( ) + 1 ; if ( nextOrdinal < mTierViews . size ( ) ) { return mTierViews . get ( nextOrdinal ) ; } return null ; }
Gets the next storage tier view .
12,580
public void writeFallbackInitRequest ( long pos ) throws IOException { Preconditions . checkState ( mPartialRequest . getType ( ) == RequestType . UFS_FALLBACK_BLOCK ) ; Protocol . CreateUfsBlockOptions ufsBlockOptions = mPartialRequest . getCreateUfsBlockOptions ( ) . toBuilder ( ) . setBytesInBlockStore ( pos ) . build ( ) ; WriteRequest writeRequest = WriteRequest . newBuilder ( ) . setCommand ( mPartialRequest . toBuilder ( ) . setOffset ( 0 ) . setCreateUfsBlockOptions ( ufsBlockOptions ) ) . build ( ) ; mPosToQueue = pos ; mStream . send ( writeRequest , mDataTimeoutMs ) ; }
Notifies the server UFS fallback endpoint to start writing a new block by resuming the given number of bytes from block store .
12,581
public WorkerWebUIBlockInfo setFileBlocksOnTier ( List < ImmutablePair < String , List < UIFileBlockInfo > > > FileBlocksOnTier ) { mFileBlocksOnTier = FileBlocksOnTier ; return this ; }
Sets file blocks on tier .
12,582
public static synchronized void startSinksFromConfig ( MetricsConfig config ) { if ( sSinks != null ) { LOG . info ( "Sinks have already been started." ) ; return ; } LOG . info ( "Starting sinks with config: {}." , config ) ; sSinks = new ArrayList < > ( ) ; Map < String , Properties > sinkConfigs = MetricsConfig . subProperties ( config . getProperties ( ) , SINK_REGEX ) ; for ( Map . Entry < String , Properties > entry : sinkConfigs . entrySet ( ) ) { String classPath = entry . getValue ( ) . getProperty ( "class" ) ; if ( classPath != null ) { LOG . info ( "Starting sink {}." , classPath ) ; try { Sink sink = ( Sink ) Class . forName ( classPath ) . getConstructor ( Properties . class , MetricRegistry . class ) . newInstance ( entry . getValue ( ) , METRIC_REGISTRY ) ; sink . start ( ) ; sSinks . add ( sink ) ; } catch ( Exception e ) { LOG . error ( "Sink class {} cannot be instantiated" , classPath , e ) ; } } } }
Starts sinks from a given metrics configuration . This is made public for unit test .
12,583
public static synchronized void stopSinks ( ) { if ( sSinks != null ) { for ( Sink sink : sSinks ) { sink . stop ( ) ; } } sSinks = null ; }
Stops all the sinks .
12,584
public static String getMetricName ( String name ) { switch ( CommonUtils . PROCESS_TYPE . get ( ) ) { case CLIENT : return getClientMetricName ( name ) ; case MASTER : return getMasterMetricName ( name ) ; case PROXY : return getProxyMetricName ( name ) ; case WORKER : return getWorkerMetricName ( name ) ; case JOB_MASTER : return getJobMasterMetricName ( name ) ; case JOB_WORKER : return getJobWorkerMetricName ( name ) ; default : throw new IllegalStateException ( "Unknown process type" ) ; } }
Converts a simple string to a qualified metric name based on the process type .
12,585
private static String getMasterMetricName ( String name ) { String result = CACHED_METRICS . get ( name ) ; if ( result != null ) { return result ; } return CACHED_METRICS . computeIfAbsent ( name , n -> InstanceType . MASTER . toString ( ) + "." + name ) ; }
Builds metric registry names for master instance . The pattern is instance . metricName .
12,586
private static String getWorkerMetricName ( String name ) { String result = CACHED_METRICS . get ( name ) ; if ( result != null ) { return result ; } return CACHED_METRICS . computeIfAbsent ( name , n -> getMetricNameWithUniqueId ( InstanceType . WORKER , name ) ) ; }
Builds metric registry name for worker instance . The pattern is instance . uniqueId . metricName .
12,587
private static String getClientMetricName ( String name ) { String result = CACHED_METRICS . get ( name ) ; if ( result != null ) { return result ; } return CACHED_METRICS . computeIfAbsent ( name , n -> getMetricNameWithUniqueId ( InstanceType . CLIENT , name ) ) ; }
Builds metric registry name for client instance . The pattern is instance . uniqueId . metricName .
12,588
private static String getJobMasterMetricName ( String name ) { return Joiner . on ( "." ) . join ( InstanceType . JOB_MASTER , name ) ; }
Builds metric registry names for the job master instance . The pattern is instance . metricName .
12,589
public static void checkMinimalPollingPeriod ( TimeUnit pollUnit , int pollPeriod ) throws IllegalArgumentException { int period = ( int ) MINIMAL_POLL_UNIT . convert ( pollPeriod , pollUnit ) ; Preconditions . checkArgument ( period >= MINIMAL_POLL_PERIOD , "Polling period %d %d is below the minimal polling period" , pollPeriod , pollUnit ) ; }
Checks if the poll period is smaller that the minimal poll period which is 1 second .
12,590
public static String stripInstanceAndHost ( String metricsName ) { String [ ] pieces = metricsName . split ( "\\." ) ; Preconditions . checkArgument ( pieces . length > 1 , "Incorrect metrics name: %s." , metricsName ) ; if ( ! pieces [ 0 ] . equals ( MetricsSystem . InstanceType . MASTER . toString ( ) ) ) { pieces [ 1 ] = null ; } pieces [ 0 ] = null ; return Joiner . on ( "." ) . skipNulls ( ) . join ( pieces ) ; }
Removes the instance and host from the given metric name returning the result .
12,591
public static synchronized < T > void registerGaugeIfAbsent ( String name , Gauge < T > metric ) { if ( ! METRIC_REGISTRY . getGauges ( ) . containsKey ( name ) ) { METRIC_REGISTRY . register ( name , metric ) ; } }
Registers a gauge if it has not been registered .
12,592
public static void resetAllCounters ( ) { for ( Map . Entry < String , Counter > entry : METRIC_REGISTRY . getCounters ( ) . entrySet ( ) ) { entry . getValue ( ) . dec ( entry . getValue ( ) . getCount ( ) ) ; } }
Resets all the counters to 0 for testing .
12,593
public AccessControlList generateChildFileACL ( Short umask ) { Mode defaultMode = new Mode ( umask ) ; AccessControlList acl = new AccessControlList ( ) ; acl . mOwningUser = mOwningUser ; acl . mOwningGroup = mOwningGroup ; acl . mMode = mMode ; if ( mExtendedEntries == null ) { acl . mExtendedEntries = null ; acl . mMode = Mode . and ( new Mode ( mMode ) , defaultMode ) . toShort ( ) ; } else { acl . mExtendedEntries = new ExtendedACLEntries ( mExtendedEntries ) ; AclActions mask = acl . mExtendedEntries . getMask ( ) ; AclActions groupAction = new AclActions ( ) ; groupAction . updateByModeBits ( defaultMode . getGroupBits ( ) ) ; mask . mask ( groupAction ) ; Mode updateMode = new Mode ( mMode ) ; updateMode . setOwnerBits ( updateMode . getOwnerBits ( ) . and ( defaultMode . getOwnerBits ( ) ) ) ; updateMode . setOtherBits ( updateMode . getOtherBits ( ) . and ( defaultMode . getOtherBits ( ) ) ) ; acl . mMode = updateMode . toShort ( ) ; } return acl ; }
create a child file s accessACL based on the default ACL .
12,594
public Pair < AccessControlList , DefaultAccessControlList > generateChildDirACL ( Short umask ) { AccessControlList acl = generateChildFileACL ( umask ) ; DefaultAccessControlList dAcl = new DefaultAccessControlList ( acl ) ; dAcl . setEmpty ( false ) ; dAcl . mOwningUser = mOwningUser ; dAcl . mOwningGroup = mOwningGroup ; dAcl . mMode = mMode ; if ( mExtendedEntries == null ) { dAcl . mExtendedEntries = null ; } else { dAcl . mExtendedEntries = new ExtendedACLEntries ( mExtendedEntries ) ; } return new Pair < > ( acl , dAcl ) ; }
Creates a child directory s access ACL and default ACL based on the default ACL .
12,595
public static void stopProcessOnShutdown ( final Process process ) { Runtime . getRuntime ( ) . addShutdownHook ( new Thread ( ( ) -> { try { process . stop ( ) ; } catch ( Throwable t ) { LOG . error ( "Failed to stop process" , t ) ; } } , "alluxio-process-shutdown-hook" ) ) ; }
Adds a shutdown hook that will be invoked when a signal is sent to this process .
12,596
public synchronized void writeToCheckpoint ( OutputStream output ) throws IOException , InterruptedException { LOG . info ( "Creating rocksdb checkpoint at {}" , mDbCheckpointPath ) ; long startNano = System . nanoTime ( ) ; CheckpointOutputStream out = new CheckpointOutputStream ( output , CheckpointType . ROCKS ) ; try { FileUtils . deletePathRecursively ( mDbCheckpointPath ) ; mCheckpoint . createCheckpoint ( mDbCheckpointPath ) ; } catch ( RocksDBException e ) { throw new IOException ( e ) ; } LOG . info ( "Checkpoint complete, creating tarball" ) ; TarUtils . writeTarGz ( Paths . get ( mDbCheckpointPath ) , out ) ; LOG . info ( "Completed rocksdb checkpoint in {}ms" , ( System . nanoTime ( ) - startNano ) / 1_000_000 ) ; FileUtils . deletePathRecursively ( mDbCheckpointPath ) ; }
Writes a checkpoint of the database s content to the given output stream .
12,597
public synchronized void restoreFromCheckpoint ( CheckpointInputStream input ) throws IOException { LOG . info ( "Restoring rocksdb from checkpoint" ) ; long startNano = System . nanoTime ( ) ; Preconditions . checkState ( input . getType ( ) == CheckpointType . ROCKS , "Unexpected checkpoint type in RocksStore: " + input . getType ( ) ) ; stopDb ( ) ; FileUtils . deletePathRecursively ( mDbPath ) ; TarUtils . readTarGz ( Paths . get ( mDbPath ) , input ) ; try { createDb ( ) ; } catch ( RocksDBException e ) { throw new IOException ( e ) ; } LOG . info ( "Restored rocksdb checkpoint in {}ms" , ( System . nanoTime ( ) - startNano ) / Constants . MS_NANO ) ; }
Restores the database from a checkpoint .
12,598
public TrieNode insert ( String path ) { TrieNode current = this ; for ( String component : path . split ( "/" ) ) { if ( ! current . mChildren . containsKey ( component ) ) { current . mChildren . put ( component , new TrieNode ( ) ) ; } current = current . mChildren . get ( component ) ; } current . mIsTerminal = true ; return current ; }
Inserts a path into the trie .
12,599
public List < TrieNode > search ( String path ) { List < TrieNode > terminal = new ArrayList < > ( ) ; TrieNode current = this ; if ( current . mIsTerminal ) { terminal . add ( current ) ; } for ( String component : path . split ( "/" ) ) { if ( current . mChildren . containsKey ( component ) ) { current = current . mChildren . get ( component ) ; if ( current . mIsTerminal ) { terminal . add ( current ) ; } } else { break ; } } return terminal ; }
Traverses the trie along the path components until the traversal cannot proceed any more .