idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
12,700 | private static boolean isValidAddress ( InetAddress address , int timeoutMs ) throws IOException { return ! address . isAnyLocalAddress ( ) && ! address . isLinkLocalAddress ( ) && ! address . isLoopbackAddress ( ) && address . isReachable ( timeoutMs ) && ( address instanceof Inet4Address ) ; } | Tests if the address is externally resolvable . Address must not be wildcard link local loopback address non - IPv4 or other unreachable addresses . |
12,701 | public static String resolveIpAddress ( String hostname ) throws UnknownHostException { Preconditions . checkNotNull ( hostname , "hostname" ) ; Preconditions . checkArgument ( ! hostname . isEmpty ( ) , "Cannot resolve IP address for empty hostname" ) ; return InetAddress . getByName ( hostname ) . getHostAddress ( ) ; } | Resolves a given hostname IP address . |
12,702 | public static InetSocketAddress getRpcPortSocketAddress ( WorkerNetAddress netAddress ) { String host = netAddress . getHost ( ) ; int port = netAddress . getRpcPort ( ) ; return new InetSocketAddress ( host , port ) ; } | Extracts rpcPort InetSocketAddress from Alluxio representation of network address . |
12,703 | public static SocketAddress getDataPortSocketAddress ( WorkerNetAddress netAddress , AlluxioConfiguration conf ) { SocketAddress address ; if ( NettyUtils . isDomainSocketSupported ( netAddress , conf ) ) { address = new DomainSocketAddress ( netAddress . getDomainSocketPath ( ) ) ; } else { String host = netAddress . getHost ( ) ; int port = netAddress . getDataPort ( ) ; address = new InetSocketAddress ( host , port ) ; } return address ; } | Extracts dataPort socket address from Alluxio representation of network address . |
12,704 | public static void pingService ( InetSocketAddress address , alluxio . grpc . ServiceType serviceType , AlluxioConfiguration conf ) throws AlluxioStatusException { Preconditions . checkNotNull ( address , "address" ) ; Preconditions . checkNotNull ( serviceType , "serviceType" ) ; GrpcChannel channel = GrpcChannelBuilder . newBuilder ( new GrpcServerAddress ( address ) , conf ) . build ( ) ; ServiceVersionClientServiceGrpc . ServiceVersionClientServiceBlockingStub versionClient = ServiceVersionClientServiceGrpc . newBlockingStub ( channel ) ; versionClient . getServiceVersion ( GetServiceVersionPRequest . newBuilder ( ) . setServiceType ( serviceType ) . build ( ) ) ; channel . shutdown ( ) ; } | Test if the input address is serving an Alluxio service . This method make use of the gRPC protocol for performing service communication . |
12,705 | public synchronized void regenerateReport ( ) { Map < PropertyKey , Map < Optional < String > , List < String > > > confMap = generateConfMap ( ) ; Map < Scope , List < InconsistentProperty > > confErrors = new HashMap < > ( ) ; Map < Scope , List < InconsistentProperty > > confWarns = new HashMap < > ( ) ; for ( Map . Entry < PropertyKey , Map < Optional < String > , List < String > > > entry : confMap . entrySet ( ) ) { if ( entry . getValue ( ) . size ( ) >= 2 ) { PropertyKey key = entry . getKey ( ) ; InconsistentProperty inconsistentProperty = new InconsistentProperty ( ) . setName ( key . getName ( ) ) . setValues ( entry . getValue ( ) ) ; Scope scope = key . getScope ( ) . equals ( Scope . ALL ) ? Scope . SERVER : key . getScope ( ) ; if ( entry . getKey ( ) . getConsistencyLevel ( ) . equals ( ConsistencyCheckLevel . ENFORCE ) ) { confErrors . putIfAbsent ( scope , new ArrayList < > ( ) ) ; confErrors . get ( scope ) . add ( inconsistentProperty ) ; } else { confWarns . putIfAbsent ( scope , new ArrayList < > ( ) ) ; confWarns . get ( scope ) . add ( inconsistentProperty ) ; } } } ConfigStatus status = confErrors . values ( ) . stream ( ) . anyMatch ( a -> a . size ( ) > 0 ) ? ConfigStatus . FAILED : confWarns . values ( ) . stream ( ) . anyMatch ( a -> a . size ( ) > 0 ) ? ConfigStatus . WARN : ConfigStatus . PASSED ; if ( ! status . equals ( mConfigCheckReport . getConfigStatus ( ) ) ) { logConfigReport ( ) ; } mConfigCheckReport = new ConfigCheckReport ( confErrors , confWarns , status ) ; } | Checks the server - side configurations and records the check results . |
12,706 | public synchronized void logConfigReport ( ) { ConfigStatus reportStatus = mConfigCheckReport . getConfigStatus ( ) ; if ( reportStatus . equals ( ConfigStatus . PASSED ) ) { LOG . info ( CONSISTENT_CONFIGURATION_INFO ) ; } else if ( reportStatus . equals ( ConfigStatus . WARN ) ) { LOG . warn ( "{}\nWarnings: {}" , INCONSISTENT_CONFIGURATION_INFO , mConfigCheckReport . getConfigWarns ( ) . values ( ) . stream ( ) . map ( Object :: toString ) . limit ( LOG_CONF_SIZE ) . collect ( Collectors . joining ( ", " ) ) ) ; } else { LOG . error ( "{}\nErrors: {}\nWarnings: {}" , INCONSISTENT_CONFIGURATION_INFO , mConfigCheckReport . getConfigErrors ( ) . values ( ) . stream ( ) . map ( Object :: toString ) . limit ( LOG_CONF_SIZE ) . collect ( Collectors . joining ( ", " ) ) , mConfigCheckReport . getConfigWarns ( ) . values ( ) . stream ( ) . map ( Object :: toString ) . limit ( LOG_CONF_SIZE ) . collect ( Collectors . joining ( ", " ) ) ) ; } } | Logs the configuration check report information . |
12,707 | private void fillConfMap ( Map < PropertyKey , Map < Optional < String > , List < String > > > targetMap , Map < Address , List < ConfigRecord > > recordMap ) { for ( Map . Entry < Address , List < ConfigRecord > > record : recordMap . entrySet ( ) ) { Address address = record . getKey ( ) ; String addressStr = String . format ( "%s:%s" , address . getHost ( ) , address . getRpcPort ( ) ) ; for ( ConfigRecord conf : record . getValue ( ) ) { PropertyKey key = conf . getKey ( ) ; if ( key . getConsistencyLevel ( ) == ConsistencyCheckLevel . IGNORE ) { continue ; } Optional < String > value = conf . getValue ( ) ; targetMap . putIfAbsent ( key , new HashMap < > ( ) ) ; Map < Optional < String > , List < String > > values = targetMap . get ( key ) ; values . putIfAbsent ( value , new ArrayList < > ( ) ) ; values . get ( value ) . add ( addressStr ) ; } } } | Fills the configuration map . |
12,708 | private void updateStream ( ) throws IOException { if ( mBlockInStream != null && mBlockInStream . remaining ( ) > 0 ) { return ; } if ( mBlockInStream != null && mBlockInStream . remaining ( ) == 0 ) { closeBlockInStream ( mBlockInStream ) ; } long blockId = mStatus . getBlockIds ( ) . get ( Math . toIntExact ( mPosition / mBlockSize ) ) ; mBlockInStream = mBlockStore . getInStream ( blockId , mOptions , mFailedWorkers ) ; long offset = mPosition % mBlockSize ; mBlockInStream . seek ( offset ) ; } | Initializes the underlying block stream if necessary . This method must be called before reading from mBlockInStream . |
12,709 | private void triggerAsyncCaching ( BlockInStream stream ) throws IOException { boolean cache = ReadType . fromProto ( mOptions . getOptions ( ) . getReadType ( ) ) . isCache ( ) ; boolean overReplicated = mStatus . getReplicationMax ( ) > 0 && mStatus . getFileBlockInfos ( ) . get ( ( int ) ( getPos ( ) / mBlockSize ) ) . getBlockInfo ( ) . getLocations ( ) . size ( ) >= mStatus . getReplicationMax ( ) ; cache = cache && ! overReplicated ; WorkerNetAddress dataSource = stream . getAddress ( ) ; long blockId = stream . getId ( ) ; if ( cache && ( mLastBlockIdCached != blockId ) ) { WorkerNetAddress worker ; if ( mPassiveCachingEnabled && mContext . hasLocalWorker ( ) ) { worker = mContext . getLocalWorker ( ) ; } else { worker = dataSource ; } try { long blockLength = mOptions . getBlockInfo ( blockId ) . getLength ( ) ; AsyncCacheRequest request = AsyncCacheRequest . newBuilder ( ) . setBlockId ( blockId ) . setLength ( blockLength ) . setOpenUfsBlockOptions ( mOptions . getOpenUfsBlockOptions ( blockId ) ) . setSourceHost ( dataSource . getHost ( ) ) . setSourcePort ( dataSource . getDataPort ( ) ) . build ( ) ; BlockWorkerClient blockWorker = mContext . acquireBlockWorkerClient ( worker ) ; try { blockWorker . asyncCache ( request ) ; mLastBlockIdCached = blockId ; } finally { mContext . releaseBlockWorkerClient ( worker , blockWorker ) ; } } catch ( Exception e ) { LOG . warn ( "Failed to complete async cache request for block {} at worker {}: {}" , blockId , worker , e . getMessage ( ) ) ; } } } | Send an async cache request to a worker based on read type and passive cache options . |
12,710 | private long getTimeToNextBackup ( ) { LocalDateTime now = LocalDateTime . now ( Clock . systemUTC ( ) ) ; DateTimeFormatter formatter = DateTimeFormatter . ofPattern ( "H:mm" ) ; LocalTime backupTime = LocalTime . parse ( ServerConfiguration . get ( PropertyKey . MASTER_DAILY_BACKUP_TIME ) , formatter ) ; LocalDateTime nextBackupTime = now . withHour ( backupTime . getHour ( ) ) . withMinute ( backupTime . getMinute ( ) ) ; if ( nextBackupTime . isBefore ( now ) ) { nextBackupTime = nextBackupTime . plusDays ( 1 ) ; } return ChronoUnit . MILLIS . between ( now , nextBackupTime ) ; } | Gets the time gap between now and next backup time . |
12,711 | private void dailyBackup ( ) { try { BackupResponse resp = mMetaMaster . backup ( BackupPOptions . newBuilder ( ) . setTargetDirectory ( mBackupDir ) . setLocalFileSystem ( mIsLocal ) . build ( ) ) ; if ( mIsLocal ) { LOG . info ( "Successfully backed up journal to {} on master {}" , resp . getBackupUri ( ) , resp . getHostname ( ) ) ; } else { LOG . info ( "Successfully backed up journal to {}" , resp . getBackupUri ( ) ) ; } } catch ( Throwable t ) { LOG . error ( "Failed to execute daily backup at {}" , mBackupDir , t ) ; return ; } try { deleteStaleBackups ( ) ; } catch ( Throwable t ) { LOG . error ( "Failed to delete outdated backup files at {}" , mBackupDir , t ) ; } } | The daily backup task . |
12,712 | private void deleteStaleBackups ( ) throws Exception { UfsStatus [ ] statuses = mUfs . listStatus ( mBackupDir ) ; if ( statuses . length <= mRetainedFiles ) { return ; } TreeMap < Instant , String > timeToFile = new TreeMap < > ( ( a , b ) -> ( a . isBefore ( b ) ? - 1 : a . isAfter ( b ) ? 1 : 0 ) ) ; for ( UfsStatus status : statuses ) { if ( status . isFile ( ) ) { Matcher matcher = BackupManager . BACKUP_FILE_PATTERN . matcher ( status . getName ( ) ) ; if ( matcher . matches ( ) ) { timeToFile . put ( Instant . ofEpochMilli ( Long . parseLong ( matcher . group ( 1 ) ) ) , status . getName ( ) ) ; } } } int toDeleteFileNum = timeToFile . size ( ) - mRetainedFiles ; if ( toDeleteFileNum <= 0 ) { return ; } for ( int i = 0 ; i < toDeleteFileNum ; i ++ ) { String toDeleteFile = PathUtils . concatPath ( mBackupDir , timeToFile . pollFirstEntry ( ) . getValue ( ) ) ; mUfs . deleteExistingFile ( toDeleteFile ) ; } LOG . info ( "Deleted {} stale metadata backup files at {}" , toDeleteFileNum , mBackupDir ) ; } | Deletes stale backup files to avoid consuming too many spaces . |
12,713 | public void submitRequest ( AsyncCacheRequest request ) { ASYNC_CACHE_REQUESTS . inc ( ) ; long blockId = request . getBlockId ( ) ; long blockLength = request . getLength ( ) ; if ( mPendingRequests . putIfAbsent ( blockId , request ) != null ) { ASYNC_CACHE_DUPLICATE_REQUESTS . inc ( ) ; return ; } try { mAsyncCacheExecutor . submit ( ( ) -> { boolean result = false ; try { long lockId = mBlockWorker . lockBlockNoException ( Sessions . ASYNC_CACHE_SESSION_ID , blockId ) ; if ( lockId != BlockLockManager . INVALID_LOCK_ID ) { try { mBlockWorker . unlockBlock ( lockId ) ; } catch ( BlockDoesNotExistException e ) { LOG . error ( "Failed to unlock block on async caching. We should never reach here" , e ) ; } ASYNC_CACHE_DUPLICATE_REQUESTS . inc ( ) ; return ; } Protocol . OpenUfsBlockOptions openUfsBlockOptions = request . getOpenUfsBlockOptions ( ) ; boolean isSourceLocal = mLocalWorkerHostname . equals ( request . getSourceHost ( ) ) ; if ( isSourceLocal ) { ASYNC_CACHE_UFS_BLOCKS . inc ( ) ; result = cacheBlockFromUfs ( blockId , blockLength , openUfsBlockOptions ) ; } else { ASYNC_CACHE_REMOTE_BLOCKS . inc ( ) ; InetSocketAddress sourceAddress = new InetSocketAddress ( request . getSourceHost ( ) , request . getSourcePort ( ) ) ; result = cacheBlockFromRemoteWorker ( blockId , blockLength , sourceAddress , openUfsBlockOptions ) ; } LOG . debug ( "Result of async caching block {}: {}" , blockId , result ) ; } catch ( Exception e ) { LOG . warn ( "Async cache request failed.\n{}\nError: {}" , request , e ) ; } finally { if ( result ) { ASYNC_CACHE_SUCCEEDED_BLOCKS . inc ( ) ; } else { ASYNC_CACHE_FAILED_BLOCKS . inc ( ) ; } mPendingRequests . remove ( blockId ) ; } } ) ; } catch ( Exception e ) { LOG . warn ( "Failed to submit async cache request.\n{}\nError: {}" , request , e ) ; ASYNC_CACHE_FAILED_BLOCKS . inc ( ) ; mPendingRequests . remove ( blockId ) ; } } | Handles a request to cache a block asynchronously . This is a non - blocking call . |
12,714 | private boolean cacheBlockFromUfs ( long blockId , long blockSize , Protocol . OpenUfsBlockOptions openUfsBlockOptions ) { try { if ( ! mBlockWorker . openUfsBlock ( Sessions . ASYNC_CACHE_SESSION_ID , blockId , openUfsBlockOptions ) ) { LOG . warn ( "Failed to async cache block {} from UFS on opening the block" , blockId ) ; return false ; } } catch ( BlockAlreadyExistsException e ) { return true ; } try ( BlockReader reader = mBlockWorker . readUfsBlock ( Sessions . ASYNC_CACHE_SESSION_ID , blockId , 0 ) ) { long offset = 0 ; while ( offset < blockSize ) { long bufferSize = Math . min ( 8 * Constants . MB , blockSize - offset ) ; reader . read ( offset , bufferSize ) ; offset += bufferSize ; } } catch ( AlluxioException | IOException e ) { LOG . warn ( "Failed to async cache block {} from UFS on copying the block: {}" , blockId , e ) ; return false ; } finally { try { mBlockWorker . closeUfsBlock ( Sessions . ASYNC_CACHE_SESSION_ID , blockId ) ; } catch ( AlluxioException | IOException ee ) { LOG . warn ( "Failed to close UFS block {}: {}" , blockId , ee ) ; return false ; } } return true ; } | Caches the block via the local worker to read from UFS . |
12,715 | public void flush ( final long targetCounter ) throws IOException , JournalClosedException { if ( targetCounter <= mFlushCounter . get ( ) ) { return ; } FlushTicket ticket = new FlushTicket ( targetCounter ) ; try ( LockResource lr = new LockResource ( mTicketLock ) ) { mTicketList . add ( ticket ) ; } try { mFlushSemaphore . release ( ) ; ticket . waitCompleted ( ) ; } catch ( InterruptedException ie ) { throw new AlluxioStatusException ( Status . CANCELLED . withCause ( ie ) ) ; } catch ( Throwable e ) { if ( e instanceof IOException ) { throw ( IOException ) e ; } if ( e instanceof JournalClosedException ) { throw ( JournalClosedException ) e ; } throw new AlluxioStatusException ( Status . INTERNAL . withCause ( e ) ) ; } finally { mFlushSemaphore . tryAcquire ( ) ; } } | Submits a ticket to flush thread and waits until ticket is served . |
12,716 | public void updateFromEntry ( UpdateInodeFileEntry entry ) { if ( entry . hasPersistJobId ( ) ) { setPersistJobId ( entry . getPersistJobId ( ) ) ; } if ( entry . hasReplicationMax ( ) ) { setReplicationMax ( entry . getReplicationMax ( ) ) ; } if ( entry . hasReplicationMin ( ) ) { setReplicationMin ( entry . getReplicationMin ( ) ) ; } if ( entry . hasTempUfsPath ( ) ) { setTempUfsPath ( entry . getTempUfsPath ( ) ) ; } if ( entry . hasBlockSizeBytes ( ) ) { setBlockSizeBytes ( entry . getBlockSizeBytes ( ) ) ; } if ( entry . hasCacheable ( ) ) { setCacheable ( entry . getCacheable ( ) ) ; } if ( entry . hasCompleted ( ) ) { setCompleted ( entry . getCompleted ( ) ) ; } if ( entry . hasLength ( ) ) { setLength ( entry . getLength ( ) ) ; } if ( entry . getSetBlocksCount ( ) > 0 ) { setBlockIds ( entry . getSetBlocksList ( ) ) ; } } | Updates this inode file s state from the given entry . |
12,717 | public static byte [ ] serialize ( Object obj ) throws IOException { if ( obj == null ) { return null ; } try ( ByteArrayOutputStream b = new ByteArrayOutputStream ( ) ) { try ( ObjectOutputStream o = new ObjectOutputStream ( b ) ) { o . writeObject ( obj ) ; } return b . toByteArray ( ) ; } } | Serializes an object into a byte array . When the object is null returns null . |
12,718 | public static Serializable deserialize ( byte [ ] bytes ) throws IOException , ClassNotFoundException { if ( bytes == null ) { return null ; } try ( ByteArrayInputStream b = new ByteArrayInputStream ( bytes ) ) { try ( ObjectInputStream o = new ObjectInputStream ( b ) ) { return ( Serializable ) o . readObject ( ) ; } } } | Deserializes a byte array into an object . When the bytes are null returns null . |
12,719 | private List < Map . Entry < Long , Double > > getSortedCRF ( ) { List < Map . Entry < Long , Double > > sortedCRF = new ArrayList < > ( mBlockIdToCRFValue . entrySet ( ) ) ; sortedCRF . sort ( Comparator . comparingDouble ( Entry :: getValue ) ) ; return sortedCRF ; } | Sorts all blocks in ascending order of CRF . |
12,720 | public static File [ ] listExtensions ( String extensionDir ) { File [ ] extensions = new File ( extensionDir ) . listFiles ( file -> file . getPath ( ) . toLowerCase ( ) . endsWith ( Constants . EXTENSION_JAR ) ) ; if ( extensions == null ) { return EMPTY_EXTENSIONS_LIST ; } return extensions ; } | List extension jars from the configured extensions directory . |
12,721 | public boolean belongsTo ( BlockStoreLocation location ) { boolean tierInRange = tierAlias ( ) . equals ( location . tierAlias ( ) ) || location . tierAlias ( ) . equals ( ANY_TIER ) ; boolean dirInRange = ( dir ( ) == location . dir ( ) ) || ( location . dir ( ) == ANY_DIR ) ; return tierInRange && dirInRange ; } | Returns whether this location belongs to the specific location . |
12,722 | public void stop ( ) throws Exception { for ( Connector connector : mServer . getConnectors ( ) ) { connector . stop ( ) ; } mServer . stop ( ) ; } | Shuts down the web server . |
12,723 | public void start ( ) { try { mServer . start ( ) ; LOG . info ( "{} started @ {}" , mServiceName , mAddress ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } | Starts the web server . |
12,724 | static final void eraseThreadLocals ( Thread thread ) { U . putObject ( thread , THREADLOCALS , null ) ; U . putObject ( thread , INHERITABLETHREADLOCALS , null ) ; } | Erases ThreadLocals by nulling out Thread maps . |
12,725 | public void awaitTermination ( boolean waitQuietPeriod ) { LOG . info ( "{}: Journal checkpointer shutdown has been initiated." , mMaster . getName ( ) ) ; mWaitQuietPeriod = waitQuietPeriod ; mShutdownInitiated = true ; synchronized ( mCheckpointingLock ) { if ( mCheckpointing ) { interrupt ( ) ; } } try { join ( ) ; LOG . info ( "{}: Journal shutdown complete" , mMaster . getName ( ) ) ; } catch ( InterruptedException e ) { LOG . error ( "{}: journal checkpointer shutdown is interrupted." , mMaster . getName ( ) , e ) ; throw new RuntimeException ( e ) ; } mStopped = true ; } | Initiates the shutdown of this checkpointer thread and also waits for it to finish . |
12,726 | private void maybeCheckpoint ( ) { if ( mShutdownInitiated ) { return ; } long nextSequenceNumber = mJournalReader . getNextSequenceNumber ( ) ; if ( nextSequenceNumber - mNextSequenceNumberToCheckpoint < mCheckpointPeriodEntries ) { return ; } try { mNextSequenceNumberToCheckpoint = mJournal . getNextSequenceNumberToCheckpoint ( ) ; } catch ( IOException e ) { LOG . warn ( "{}: Failed to get the next sequence number to checkpoint with error {}." , mMaster . getName ( ) , e . getMessage ( ) ) ; return ; } if ( nextSequenceNumber - mNextSequenceNumberToCheckpoint < mCheckpointPeriodEntries ) { return ; } writeCheckpoint ( nextSequenceNumber ) ; } | Creates a new checkpoint if necessary . |
12,727 | public void addTempBlockMeta ( TempBlockMeta tempBlockMeta ) throws WorkerOutOfSpaceException , BlockAlreadyExistsException { StorageDir dir = tempBlockMeta . getParentDir ( ) ; dir . addTempBlockMeta ( tempBlockMeta ) ; } | Adds a temp block . |
12,728 | public void cleanupSessionTempBlocks ( long sessionId , List < Long > tempBlockIds ) { for ( StorageTier tier : mTiers ) { for ( StorageDir dir : tier . getStorageDirs ( ) ) { dir . cleanupSessionTempBlocks ( sessionId , tempBlockIds ) ; } } } | Cleans up the metadata of the given temp block ids . |
12,729 | public BlockMeta getBlockMeta ( long blockId ) throws BlockDoesNotExistException { for ( StorageTier tier : mTiers ) { for ( StorageDir dir : tier . getStorageDirs ( ) ) { if ( dir . hasBlockMeta ( blockId ) ) { return dir . getBlockMeta ( blockId ) ; } } } throw new BlockDoesNotExistException ( ExceptionMessage . BLOCK_META_NOT_FOUND , blockId ) ; } | Gets the metadata of a block given its block id . |
12,730 | public List < TempBlockMeta > getSessionTempBlocks ( long sessionId ) { List < TempBlockMeta > sessionTempBlocks = new ArrayList < > ( ) ; for ( StorageTier tier : mTiers ) { for ( StorageDir dir : tier . getStorageDirs ( ) ) { sessionTempBlocks . addAll ( dir . getSessionTempBlocks ( sessionId ) ) ; } } return sessionTempBlocks ; } | Gets all the temporary blocks associated with a session empty list is returned if the session has no temporary blocks . |
12,731 | public boolean hasBlockMeta ( long blockId ) { for ( StorageTier tier : mTiers ) { for ( StorageDir dir : tier . getStorageDirs ( ) ) { if ( dir . hasBlockMeta ( blockId ) ) { return true ; } } } return false ; } | Checks if the storage has a given block . |
12,732 | public BlockMeta moveBlockMeta ( BlockMeta blockMeta , TempBlockMeta tempBlockMeta ) throws BlockDoesNotExistException , WorkerOutOfSpaceException , BlockAlreadyExistsException { StorageDir srcDir = blockMeta . getParentDir ( ) ; StorageDir dstDir = tempBlockMeta . getParentDir ( ) ; srcDir . removeBlockMeta ( blockMeta ) ; BlockMeta newBlockMeta = new BlockMeta ( blockMeta . getBlockId ( ) , blockMeta . getBlockSize ( ) , dstDir ) ; dstDir . removeTempBlockMeta ( tempBlockMeta ) ; dstDir . addBlockMeta ( newBlockMeta ) ; return newBlockMeta ; } | Moves an existing block to another location currently hold by a temp block . |
12,733 | public void removeBlockMeta ( BlockMeta block ) throws BlockDoesNotExistException { StorageDir dir = block . getParentDir ( ) ; dir . removeBlockMeta ( block ) ; } | Removes the metadata of a specific block . |
12,734 | public void resizeTempBlockMeta ( TempBlockMeta tempBlockMeta , long newSize ) throws InvalidWorkerStateException { StorageDir dir = tempBlockMeta . getParentDir ( ) ; dir . resizeTempBlockMeta ( tempBlockMeta , newSize ) ; } | Modifies the size of a temp block . |
12,735 | public void start ( String channelId ) throws AlluxioStatusException { try { LOG . debug ( "Starting SASL handshake for ChannelId:{}" , channelId ) ; mRequestObserver . onNext ( mSaslHandshakeClientHandler . getInitialMessage ( channelId ) ) ; mAuthenticated . get ( mGrpcAuthTimeoutMs , TimeUnit . MILLISECONDS ) ; } catch ( SaslException se ) { throw new UnauthenticatedException ( se . getMessage ( ) , se ) ; } catch ( InterruptedException ie ) { Thread . currentThread ( ) . interrupt ( ) ; throw new UnavailableException ( ie . getMessage ( ) , ie ) ; } catch ( ExecutionException e ) { Throwable cause = ( e . getCause ( ) != null ) ? e . getCause ( ) : e ; if ( cause != null && cause instanceof StatusRuntimeException ) { StatusRuntimeException sre = ( StatusRuntimeException ) cause ; if ( sre . getStatus ( ) . getCode ( ) == Status . Code . UNIMPLEMENTED ) { throw new UnauthenticatedException ( "Authentication is disabled on target host." ) ; } throw AlluxioStatusException . fromStatusRuntimeException ( ( StatusRuntimeException ) cause ) ; } throw new UnknownException ( cause . getMessage ( ) , cause ) ; } catch ( TimeoutException e ) { throw new UnavailableException ( e ) ; } } | Starts authentication with the server and wait until completion . |
12,736 | public static String [ ] convertToNames ( UfsStatus [ ] children ) { if ( children == null ) { return null ; } String [ ] ret = new String [ children . length ] ; for ( int i = 0 ; i < children . length ; ++ i ) { ret [ i ] = children [ i ] . getName ( ) ; } return ret ; } | Converts an array of UFS file status to a listing result where each element in the array is a file or directory name . |
12,737 | public static void createMasters ( MasterRegistry registry , MasterContext context ) { List < Callable < Void > > callables = new ArrayList < > ( ) ; for ( final MasterFactory factory : alluxio . master . ServiceUtils . getMasterServiceLoader ( ) ) { callables . add ( ( ) -> { if ( factory . isEnabled ( ) ) { factory . create ( registry , context ) ; } return null ; } ) ; } try { CommonUtils . invokeAll ( callables , 10 * Constants . MINUTE_MS ) ; } catch ( Exception e ) { throw new RuntimeException ( "Failed to start masters" , e ) ; } } | Creates all the masters and registers them to the master registry . |
12,738 | public static StorageDirView selectDirWithRequestedSpace ( long bytesToBeAvailable , BlockStoreLocation location , BlockMetadataManagerView mManagerView ) { if ( location . equals ( BlockStoreLocation . anyTier ( ) ) ) { for ( StorageTierView tierView : mManagerView . getTierViews ( ) ) { for ( StorageDirView dirView : tierView . getDirViews ( ) ) { if ( dirView . getAvailableBytes ( ) >= bytesToBeAvailable ) { return dirView ; } } } return null ; } String tierAlias = location . tierAlias ( ) ; StorageTierView tierView = mManagerView . getTierView ( tierAlias ) ; if ( location . equals ( BlockStoreLocation . anyDirInTier ( tierAlias ) ) ) { for ( StorageDirView dirView : tierView . getDirViews ( ) ) { if ( dirView . getAvailableBytes ( ) >= bytesToBeAvailable ) { return dirView ; } } return null ; } StorageDirView dirView = tierView . getDirView ( location . dir ( ) ) ; return ( dirView . getAvailableBytes ( ) >= bytesToBeAvailable ) ? dirView : null ; } | Finds a directory in the given location range with capacity upwards of the given bound . |
12,739 | public void onNext ( OpenLocalBlockRequest request ) { RpcUtils . streamingRPCAndLog ( LOG , new RpcUtils . StreamingRpcCallable < OpenLocalBlockResponse > ( ) { public OpenLocalBlockResponse call ( ) throws Exception { Preconditions . checkState ( mRequest == null ) ; mRequest = request ; if ( mLockId == BlockLockManager . INVALID_LOCK_ID ) { mSessionId = IdUtils . createSessionId ( ) ; if ( mRequest . getPromote ( ) ) { try { mWorker . moveBlock ( mSessionId , mRequest . getBlockId ( ) , mStorageTierAssoc . getAlias ( 0 ) ) ; } catch ( BlockDoesNotExistException e ) { LOG . debug ( "Block {} to promote does not exist in Alluxio: {}" , mRequest . getBlockId ( ) , e . getMessage ( ) ) ; } catch ( Exception e ) { LOG . warn ( "Failed to promote block {}: {}" , mRequest . getBlockId ( ) , e . getMessage ( ) ) ; } } mLockId = mWorker . lockBlock ( mSessionId , mRequest . getBlockId ( ) ) ; mWorker . accessBlock ( mSessionId , mRequest . getBlockId ( ) ) ; } else { LOG . warn ( "Lock block {} without releasing previous block lock {}." , mRequest . getBlockId ( ) , mLockId ) ; throw new InvalidWorkerStateException ( ExceptionMessage . LOCK_NOT_RELEASED . getMessage ( mLockId ) ) ; } OpenLocalBlockResponse response = OpenLocalBlockResponse . newBuilder ( ) . setPath ( mWorker . readBlock ( mSessionId , mRequest . getBlockId ( ) , mLockId ) ) . build ( ) ; return response ; } public void exceptionCaught ( Throwable e ) { if ( mLockId != BlockLockManager . INVALID_LOCK_ID ) { try { mWorker . unlockBlock ( mLockId ) ; } catch ( BlockDoesNotExistException ee ) { LOG . error ( "Failed to unlock block {}." , mRequest . getBlockId ( ) , e ) ; } mLockId = BlockLockManager . INVALID_LOCK_ID ; } mResponseObserver . onError ( GrpcExceptionUtils . fromThrowable ( e ) ) ; } } , "OpenBlock" , true , false , mResponseObserver , "Session=%d, Request=%s" , mSessionId , mRequest ) ; } | Handles block open request . |
12,740 | public void onCompleted ( ) { RpcUtils . streamingRPCAndLog ( LOG , new RpcUtils . StreamingRpcCallable < OpenLocalBlockResponse > ( ) { public OpenLocalBlockResponse call ( ) throws Exception { if ( mLockId != BlockLockManager . INVALID_LOCK_ID ) { mWorker . unlockBlock ( mLockId ) ; mLockId = BlockLockManager . INVALID_LOCK_ID ; } else if ( mRequest != null ) { LOG . warn ( "Close a closed block {}." , mRequest . getBlockId ( ) ) ; } return null ; } public void exceptionCaught ( Throwable e ) { mResponseObserver . onError ( GrpcExceptionUtils . fromThrowable ( e ) ) ; mLockId = BlockLockManager . INVALID_LOCK_ID ; } } , "CloseBlock" , false , true , mResponseObserver , "Session=%d, Request=%s" , mSessionId , mRequest ) ; } | Handles block close request . No exceptions should be thrown . |
12,741 | public void run ( ) { Protos . FrameworkInfo . Builder frameworkInfo = Protos . FrameworkInfo . newBuilder ( ) . setName ( "alluxio" ) . setCheckpoint ( true ) ; if ( ServerConfiguration . isSet ( PropertyKey . INTEGRATION_MESOS_ROLE ) ) { frameworkInfo . setRole ( ServerConfiguration . get ( PropertyKey . INTEGRATION_MESOS_ROLE ) ) ; } if ( ServerConfiguration . isSet ( PropertyKey . INTEGRATION_MESOS_USER ) ) { frameworkInfo . setUser ( ServerConfiguration . get ( PropertyKey . INTEGRATION_MESOS_USER ) ) ; } else { frameworkInfo . setUser ( "" ) ; } if ( ServerConfiguration . isSet ( PropertyKey . INTEGRATION_MESOS_PRINCIPAL ) ) { frameworkInfo . setPrincipal ( ServerConfiguration . get ( PropertyKey . INTEGRATION_MESOS_PRINCIPAL ) ) ; } String masterWebUrl = createMasterWebUrl ( ) ; frameworkInfo . setWebuiUrl ( masterWebUrl ) ; Scheduler scheduler = new AlluxioScheduler ( mAlluxioMasterHostname ) ; Protos . Credential cred = createCredential ( ) ; MesosSchedulerDriver driver ; if ( cred == null ) { driver = new MesosSchedulerDriver ( scheduler , frameworkInfo . build ( ) , mMesosMaster ) ; } else { driver = new MesosSchedulerDriver ( scheduler , frameworkInfo . build ( ) , mMesosMaster , cred ) ; } int status = driver . run ( ) == Protos . Status . DRIVER_STOPPED ? 0 : 1 ; System . exit ( status ) ; } | Runs the mesos framework . |
12,742 | private static String createMasterWebUrl ( ) { InetSocketAddress masterWeb = NetworkAddressUtils . getConnectAddress ( ServiceType . MASTER_WEB , ServerConfiguration . global ( ) ) ; return "http://" + masterWeb . getHostString ( ) + ":" + masterWeb . getPort ( ) ; } | Create AlluxioMaster web url . |
12,743 | public static void main ( String [ ] args ) throws Exception { AlluxioFramework framework = new AlluxioFramework ( ) ; JCommander jc = new JCommander ( framework ) ; try { jc . parse ( args ) ; } catch ( Exception e ) { System . out . println ( e . getMessage ( ) ) ; jc . usage ( ) ; System . exit ( 1 ) ; } framework . run ( ) ; } | Starts the Alluxio framework . |
12,744 | static TieredIdentity create ( AlluxioConfiguration conf ) { TieredIdentity scriptIdentity = fromScript ( conf ) ; List < LocalityTier > tiers = new ArrayList < > ( ) ; List < String > orderedTierNames = conf . getList ( PropertyKey . LOCALITY_ORDER , "," ) ; for ( int i = 0 ; i < orderedTierNames . size ( ) ; i ++ ) { String tierName = orderedTierNames . get ( i ) ; String value = null ; if ( scriptIdentity != null ) { LocalityTier scriptTier = scriptIdentity . getTier ( i ) ; Preconditions . checkState ( scriptTier . getTierName ( ) . equals ( tierName ) ) ; value = scriptTier . getValue ( ) ; } if ( conf . isSet ( Template . LOCALITY_TIER . format ( tierName ) ) ) { value = conf . get ( Template . LOCALITY_TIER . format ( tierName ) ) ; } tiers . add ( new LocalityTier ( tierName , value ) ) ; } if ( tiers . size ( ) > 0 && tiers . get ( 0 ) . getTierName ( ) . equals ( Constants . LOCALITY_NODE ) && tiers . get ( 0 ) . getValue ( ) == null ) { String name = NetworkAddressUtils . getLocalNodeName ( conf ) ; tiers . set ( 0 , new LocalityTier ( Constants . LOCALITY_NODE , name ) ) ; } return new TieredIdentity ( tiers ) ; } | Creates a tiered identity based on configuration . |
12,745 | public UnderFileSystemConfiguration createMountSpecificConf ( Map < String , String > mountConf ) { UnderFileSystemConfiguration ufsConf = new UnderFileSystemConfiguration ( mProperties . copy ( ) ) ; ufsConf . mProperties . merge ( mountConf , Source . MOUNT_OPTION ) ; ufsConf . mReadOnly = mReadOnly ; ufsConf . mShared = mShared ; return ufsConf ; } | Creates a new instance from the current configuration and adds in new properties . |
12,746 | private void registerWithMaster ( ) throws IOException { BlockStoreMeta storeMeta = mBlockWorker . getStoreMetaFull ( ) ; StorageTierAssoc storageTierAssoc = new WorkerStorageTierAssoc ( ) ; List < ConfigProperty > configList = ConfigurationUtils . getConfiguration ( ServerConfiguration . global ( ) , Scope . WORKER ) ; mMasterClient . register ( mWorkerId . get ( ) , storageTierAssoc . getOrderedStorageAliases ( ) , storeMeta . getCapacityBytesOnTiers ( ) , storeMeta . getUsedBytesOnTiers ( ) , storeMeta . getBlockList ( ) , storeMeta . getLostStorage ( ) , configList ) ; } | Registers with the Alluxio master . This should be called before the continuous heartbeat thread begins . |
12,747 | public void heartbeat ( ) { BlockHeartbeatReport blockReport = mBlockWorker . getReport ( ) ; BlockStoreMeta storeMeta = mBlockWorker . getStoreMeta ( ) ; Command cmdFromMaster = null ; List < alluxio . grpc . Metric > metrics = new ArrayList < > ( ) ; for ( Metric metric : MetricsSystem . allWorkerMetrics ( ) ) { metrics . add ( metric . toProto ( ) ) ; } try { cmdFromMaster = mMasterClient . heartbeat ( mWorkerId . get ( ) , storeMeta . getCapacityBytesOnTiers ( ) , storeMeta . getUsedBytesOnTiers ( ) , blockReport . getRemovedBlocks ( ) , blockReport . getAddedBlocks ( ) , blockReport . getLostStorage ( ) , metrics ) ; handleMasterCommand ( cmdFromMaster ) ; mLastSuccessfulHeartbeatMs = System . currentTimeMillis ( ) ; } catch ( IOException | ConnectionFailedException e ) { if ( cmdFromMaster == null ) { LOG . error ( "Failed to receive master heartbeat command." , e ) ; } else { LOG . error ( "Failed to receive or execute master heartbeat command: {}" , cmdFromMaster . toString ( ) , e ) ; } mMasterClient . disconnect ( ) ; if ( mHeartbeatTimeoutMs > 0 ) { if ( System . currentTimeMillis ( ) - mLastSuccessfulHeartbeatMs >= mHeartbeatTimeoutMs ) { if ( ServerConfiguration . getBoolean ( PropertyKey . TEST_MODE ) ) { throw new RuntimeException ( "Master heartbeat timeout exceeded: " + mHeartbeatTimeoutMs ) ; } ProcessUtils . fatalError ( LOG , "Master heartbeat timeout exceeded: %d" , mHeartbeatTimeoutMs ) ; } } } } | Heartbeats to the master node about the change in the worker s managed space . |
12,748 | private void applyEntry ( JournalEntry entry ) { Preconditions . checkState ( entry . getAllFields ( ) . size ( ) <= 1 || ( entry . getAllFields ( ) . size ( ) == 2 && entry . hasSequenceNumber ( ) ) , "Raft journal entries should never set multiple fields in addition to sequence " + "number, but found %s" , entry ) ; if ( entry . getJournalEntriesCount ( ) > 0 ) { for ( JournalEntry e : entry . getJournalEntriesList ( ) ) { applyEntry ( e ) ; } } else if ( entry . getSequenceNumber ( ) < 0 ) { mLastPrimaryStartSequenceNumber = entry . getSequenceNumber ( ) ; } else if ( entry . toBuilder ( ) . clearSequenceNumber ( ) . build ( ) . equals ( JournalEntry . getDefaultInstance ( ) ) ) { } else { applySingleEntry ( entry ) ; } } | Applies the journal entry ignoring empty entries and expanding multi - entries . |
12,749 | public static List < UnderFileSystemFactory > findAll ( String path , UnderFileSystemConfiguration ufsConf , AlluxioConfiguration alluxioConf ) { List < UnderFileSystemFactory > eligibleFactories = sRegistryInstance . findAll ( path , ufsConf , alluxioConf ) ; if ( eligibleFactories . isEmpty ( ) && ufsConf != null ) { List < UnderFileSystemFactory > factories = sRegistryInstance . findAll ( path , null , alluxioConf ) ; List < String > supportedVersions = new java . util . ArrayList < > ( ) ; for ( UnderFileSystemFactory factory : factories ) { if ( ! factory . getVersion ( ) . isEmpty ( ) ) { supportedVersions . add ( factory . getVersion ( ) ) ; } } if ( ! supportedVersions . isEmpty ( ) ) { String configuredVersion = ufsConf . get ( PropertyKey . UNDERFS_VERSION ) ; LOG . warn ( "Versions [{}] are supported for path {} but you have configured version: {}" , StringUtils . join ( supportedVersions , "," ) , path , configuredVersion ) ; } } return eligibleFactories ; } | Finds all the Under File System factories that support the given path . |
12,750 | private void load ( AlluxioURI filePath , int replication ) throws AlluxioException , IOException , InterruptedException { URIStatus status = mFileSystem . getStatus ( filePath ) ; if ( status . isFolder ( ) ) { List < URIStatus > statuses = mFileSystem . listStatus ( filePath ) ; for ( URIStatus uriStatus : statuses ) { AlluxioURI newPath = new AlluxioURI ( uriStatus . getPath ( ) ) ; load ( newPath , replication ) ; } } else { Thread thread = JobGrpcClientUtils . createProgressThread ( System . out ) ; thread . start ( ) ; try { JobGrpcClientUtils . run ( new LoadConfig ( filePath . getPath ( ) , replication ) , 3 , mFsContext . getPathConf ( filePath ) ) ; } finally { thread . interrupt ( ) ; } } System . out . println ( filePath + " loaded" ) ; } | Loads a file or directory in Alluxio space makes it resident in memory . |
12,751 | private UnderFileSystem getOrAdd ( AlluxioURI ufsUri , UnderFileSystemConfiguration ufsConf ) { Key key = new Key ( ufsUri , ufsConf . getMountSpecificConf ( ) ) ; UnderFileSystem cachedFs = mUnderFileSystemMap . get ( key ) ; if ( cachedFs != null ) { return cachedFs ; } synchronized ( mLock ) { cachedFs = mUnderFileSystemMap . get ( key ) ; if ( cachedFs != null ) { return cachedFs ; } UnderFileSystem fs = UnderFileSystem . Factory . create ( ufsUri . toString ( ) , ufsConf ) ; boolean useManagedBlocking = fs . isObjectStorage ( ) ; if ( ufsConf . isSet ( PropertyKey . UNDERFS_RUN_WITH_MANAGEDBLOCKING ) ) { useManagedBlocking = ufsConf . getBoolean ( PropertyKey . UNDERFS_RUN_WITH_MANAGEDBLOCKING ) ; } if ( useManagedBlocking ) { fs = new ManagedBlockingUfsForwarder ( fs ) ; } mUnderFileSystemMap . putIfAbsent ( key , fs ) ; mCloser . register ( fs ) ; try { connectUfs ( fs ) ; } catch ( IOException e ) { LOG . warn ( "Failed to perform initial connect to UFS {}: {}" , ufsUri , e . getMessage ( ) ) ; } return fs ; } } | Return a UFS instance if it already exists in the cache otherwise creates a new instance and return this . |
12,752 | private static List < AlluxioURI > getAlluxioURIs ( FileSystem alluxioClient , AlluxioURI inputURI , AlluxioURI parentDir ) throws IOException { List < AlluxioURI > res = new ArrayList < > ( ) ; List < URIStatus > statuses ; try { statuses = alluxioClient . listStatus ( parentDir ) ; } catch ( AlluxioException e ) { throw new IOException ( e ) ; } for ( URIStatus status : statuses ) { AlluxioURI fileURI = new AlluxioURI ( inputURI . getScheme ( ) , inputURI . getAuthority ( ) , status . getPath ( ) ) ; if ( match ( fileURI , inputURI ) ) { res . add ( fileURI ) ; } else { if ( status . isFolder ( ) ) { AlluxioURI dirURI = new AlluxioURI ( inputURI . getScheme ( ) , inputURI . getAuthority ( ) , status . getPath ( ) ) ; String prefix = inputURI . getLeadingPath ( dirURI . getDepth ( ) ) ; if ( prefix != null && match ( dirURI , new AlluxioURI ( prefix ) ) ) { res . addAll ( getAlluxioURIs ( alluxioClient , inputURI , dirURI ) ) ; } } } } return res ; } | The utility function used to implement getAlluxioURIs . |
12,753 | private static List < File > getFiles ( String inputPath , String parent ) { List < File > res = new ArrayList < > ( ) ; File pFile = new File ( parent ) ; if ( ! pFile . exists ( ) || ! pFile . isDirectory ( ) ) { return res ; } if ( pFile . isDirectory ( ) && pFile . canRead ( ) ) { File [ ] fileList = pFile . listFiles ( ) ; if ( fileList == null ) { return res ; } for ( File file : fileList ) { if ( match ( file . getPath ( ) , inputPath ) ) { res . add ( file ) ; } else { if ( file . isDirectory ( ) ) { AlluxioURI dirURI = new AlluxioURI ( file . getPath ( ) ) ; String prefix = new AlluxioURI ( inputPath ) . getLeadingPath ( dirURI . getDepth ( ) ) ; if ( prefix != null && match ( dirURI , new AlluxioURI ( prefix ) ) ) { res . addAll ( getFiles ( inputPath , dirURI . getPath ( ) ) ) ; } } } } } return res ; } | The utility function used to implement getFiles . |
12,754 | public static int getIntArg ( CommandLine cl , Option option , int defaultValue ) { int arg = defaultValue ; if ( cl . hasOption ( option . getLongOpt ( ) ) ) { String argOption = cl . getOptionValue ( option . getLongOpt ( ) ) ; arg = Integer . parseInt ( argOption ) ; } return arg ; } | Gets the value of an option from the command line . |
12,755 | public static long getMs ( String time ) { try { return FormatUtils . parseTimeSize ( time ) ; } catch ( Exception e ) { throw new RuntimeException ( ExceptionMessage . INVALID_TIME . getMessage ( time ) ) ; } } | Converts the input time into millisecond unit . |
12,756 | private static boolean match ( AlluxioURI fileURI , AlluxioURI patternURI ) { return escape ( fileURI . getPath ( ) ) . matches ( replaceWildcards ( patternURI . getPath ( ) ) ) ; } | Returns whether or not fileURI matches the patternURI . |
12,757 | public static boolean match ( String filePath , String patternPath ) { return match ( new AlluxioURI ( filePath ) , new AlluxioURI ( patternPath ) ) ; } | Returns whether or not filePath matches patternPath . |
12,758 | public synchronized void setUfsMode ( Supplier < JournalContext > journalContext , AlluxioURI ufsPath , UfsMode ufsMode ) throws InvalidPathException { LOG . info ( "Set ufs mode for {} to {}" , ufsPath , ufsMode ) ; String root = ufsPath . getRootPath ( ) ; if ( ! mUfsRoots . contains ( root ) ) { LOG . warn ( "No managed ufs for physical ufs path {}" , root ) ; throw new InvalidPathException ( String . format ( "Unknown Ufs path %s" , root ) ) ; } mState . applyAndJournal ( journalContext , UpdateUfsModeEntry . newBuilder ( ) . setUfsPath ( ufsPath . getRootPath ( ) ) . setUfsMode ( File . UfsMode . valueOf ( ufsMode . name ( ) ) ) . build ( ) ) ; } | Set the operation mode the given physical ufs . |
12,759 | public final CountedCompleter < ? > getRoot ( ) { CountedCompleter < ? > a = this , p ; while ( ( p = a . completer ) != null ) a = p ; return a ; } | Returns the root of the current computation ; i . e . this task if it has no completer else its completer s root . |
12,760 | public final void helpComplete ( int maxTasks ) { Thread t ; ForkJoinWorkerThread wt ; if ( maxTasks > 0 && status >= 0 ) { if ( ( t = Thread . currentThread ( ) ) instanceof ForkJoinWorkerThread ) ( wt = ( ForkJoinWorkerThread ) t ) . pool . helpComplete ( wt . workQueue , this , maxTasks ) ; else ForkJoinPool . common . externalHelpComplete ( this , maxTasks ) ; } } | If this task has not completed attempts to process at most the given number of other unprocessed tasks for which this task is on the completion path if any are known to exist . |
12,761 | void internalPropagateException ( Throwable ex ) { CountedCompleter < ? > a = this , s = a ; while ( a . onExceptionalCompletion ( ex , s ) && ( a = ( s = a ) . completer ) != null && a . status >= 0 && a . recordExceptionalCompletion ( ex ) == EXCEPTIONAL ) ; } | Supports ForkJoinTask exception propagation . |
12,762 | public static PStatus toProto ( Status status ) { switch ( status . getCode ( ) ) { case ABORTED : return PStatus . ABORTED ; case ALREADY_EXISTS : return PStatus . ALREADY_EXISTS ; case CANCELLED : return PStatus . CANCELED ; case DATA_LOSS : return PStatus . DATA_LOSS ; case DEADLINE_EXCEEDED : return PStatus . DEADLINE_EXCEEDED ; case FAILED_PRECONDITION : return PStatus . FAILED_PRECONDITION ; case INTERNAL : return PStatus . INTERNAL ; case INVALID_ARGUMENT : return PStatus . INVALID_ARGUMENT ; case NOT_FOUND : return PStatus . NOT_FOUND ; case OK : return PStatus . OK ; case OUT_OF_RANGE : return PStatus . OUT_OF_RANGE ; case PERMISSION_DENIED : return PStatus . PERMISSION_DENIED ; case RESOURCE_EXHAUSTED : return PStatus . RESOURCE_EXHAUSTED ; case UNAUTHENTICATED : return PStatus . UNAUTHENTICATED ; case UNAVAILABLE : return PStatus . UNAVAILABLE ; case UNIMPLEMENTED : return PStatus . UNIMPLEMENTED ; case UNKNOWN : return PStatus . UNKNOWN ; default : return PStatus . UNKNOWN ; } } | Converts an internal exception status to a protocol buffer type status . |
12,763 | public static SetAclAction fromProto ( File . PSetAclAction pSetAclAction ) { if ( pSetAclAction == null ) { throw new IllegalStateException ( "Null proto set acl action." ) ; } switch ( pSetAclAction ) { case REPLACE : return SetAclAction . REPLACE ; case MODIFY : return SetAclAction . MODIFY ; case REMOVE : return SetAclAction . REMOVE ; case REMOVE_ALL : return SetAclAction . REMOVE_ALL ; case REMOVE_DEFAULT : return SetAclAction . REMOVE_DEFAULT ; default : throw new IllegalStateException ( "Unrecognized proto set acl action: " + pSetAclAction ) ; } } | Converts proto type to wire type . |
12,764 | public int run ( ) throws IOException { ConfigCheckReport report = mMetaMasterClient . getConfigReport ( ) ; ConfigStatus configStatus = report . getConfigStatus ( ) ; if ( configStatus == ConfigStatus . PASSED ) { mPrintStream . println ( "No server-side configuration errors or warnings." ) ; return 0 ; } Map < Scope , List < InconsistentProperty > > errors = report . getConfigErrors ( ) ; if ( errors . size ( ) != 0 ) { mPrintStream . println ( "Server-side configuration errors " + "(those properties are required to be identical): " ) ; printInconsistentProperties ( errors ) ; } Map < Scope , List < InconsistentProperty > > warnings = report . getConfigWarns ( ) ; if ( warnings . size ( ) != 0 ) { mPrintStream . println ( "\nServer-side configuration warnings " + "(those properties are recommended to be identical): " ) ; printInconsistentProperties ( warnings ) ; } return 0 ; } | Runs doctor configuration command . |
12,765 | private void printInconsistentProperties ( Map < Scope , List < InconsistentProperty > > inconsistentProperties ) { for ( List < InconsistentProperty > list : inconsistentProperties . values ( ) ) { for ( InconsistentProperty prop : list ) { mPrintStream . println ( "key: " + prop . getName ( ) ) ; for ( Map . Entry < Optional < String > , List < String > > entry : prop . getValues ( ) . entrySet ( ) ) { mPrintStream . println ( " value: " + String . format ( "%s (%s)" , entry . getKey ( ) . orElse ( "no value set" ) , String . join ( ", " , entry . getValue ( ) ) ) ) ; } } } } | Prints the inconsistent properties in server - side configuration . |
12,766 | private void startProxy ( ) throws Exception { mProxyProcess = ProxyProcess . Factory . create ( ) ; Runnable runProxy = ( ) -> { try { mProxyProcess . start ( ) ; } catch ( InterruptedException e ) { } catch ( Exception e ) { LOG . error ( "Start proxy error" , e ) ; throw new RuntimeException ( e + " \n Start Proxy Error \n" + e . getMessage ( ) , e ) ; } } ; mProxyThread = new Thread ( runProxy ) ; mProxyThread . setName ( "ProxyThread-" + System . identityHashCode ( mProxyThread ) ) ; mProxyThread . start ( ) ; TestUtils . waitForReady ( mProxyProcess ) ; } | Configures and starts the proxy . |
12,767 | public void formatAndRestartMasters ( ) throws Exception { stopMasters ( ) ; Format . format ( Format . Mode . MASTER , ServerConfiguration . global ( ) ) ; startMasters ( ) ; } | Stops the masters formats them and then restarts them . This is useful if a fresh state is desired for example when restoring from a backup . |
12,768 | protected void stopProxy ( ) throws Exception { mProxyProcess . stop ( ) ; if ( mProxyThread != null ) { while ( mProxyThread . isAlive ( ) ) { LOG . info ( "Stopping thread {}." , mProxyThread . getName ( ) ) ; mProxyThread . interrupt ( ) ; mProxyThread . join ( 1000 ) ; } mProxyThread = null ; } } | Stops the proxy . |
12,769 | public void stopWorkers ( ) throws Exception { if ( mWorkers == null ) { return ; } for ( WorkerProcess worker : mWorkers ) { worker . stop ( ) ; } for ( Thread thread : mWorkerThreads ) { while ( thread . isAlive ( ) ) { LOG . info ( "Stopping thread {}." , thread . getName ( ) ) ; thread . interrupt ( ) ; thread . join ( 1000 ) ; } } mWorkerThreads . clear ( ) ; } | Stops the workers . |
12,770 | public void waitForWorkersRegistered ( int timeoutMs ) throws TimeoutException , InterruptedException , IOException { try ( MetaMasterClient client = new RetryHandlingMetaMasterClient ( MasterClientContext . newBuilder ( ClientContext . create ( ServerConfiguration . global ( ) ) ) . build ( ) ) ) { CommonUtils . waitFor ( "workers registered" , ( ) -> { try { return client . getMasterInfo ( Collections . emptySet ( ) ) . getWorkerAddressesList ( ) . size ( ) == mNumWorkers ; } catch ( UnavailableException e ) { return false ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } , WaitForOptions . defaults ( ) . setInterval ( 200 ) . setTimeoutMs ( timeoutMs ) ) ; } } | Waits for all workers registered with master . |
12,771 | public static LocalAlluxioMaster create ( boolean includeSecondary ) throws IOException { String workDirectory = uniquePath ( ) ; FileUtils . deletePathRecursively ( workDirectory ) ; ServerConfiguration . set ( PropertyKey . WORK_DIR , workDirectory ) ; return create ( workDirectory , includeSecondary ) ; } | Creates a new local Alluxio master with an isolated work directory and port . |
12,772 | public static LocalAlluxioMaster create ( String workDirectory , boolean includeSecondary ) throws IOException { if ( ! Files . isDirectory ( Paths . get ( workDirectory ) ) ) { Files . createDirectory ( Paths . get ( workDirectory ) ) ; } return new LocalAlluxioMaster ( includeSecondary ) ; } | Creates a new local Alluxio master with a isolated port . |
12,773 | public void start ( ) { mMasterProcess = AlluxioMasterProcess . Factory . create ( ) ; Runnable runMaster = new Runnable ( ) { public void run ( ) { try { LOG . info ( "Starting Alluxio master {}." , mMasterProcess ) ; mMasterProcess . start ( ) ; } catch ( InterruptedException e ) { } catch ( Exception e ) { LOG . error ( "Start master error" , e ) ; throw new RuntimeException ( e + " \n Start Master Error \n" + e . getMessage ( ) , e ) ; } } } ; mMasterThread = new Thread ( runMaster ) ; mMasterThread . setName ( "MasterThread-" + System . identityHashCode ( mMasterThread ) ) ; mMasterThread . start ( ) ; TestUtils . waitForReady ( mMasterProcess ) ; if ( ServerConfiguration . getEnum ( PropertyKey . MASTER_JOURNAL_TYPE , JournalType . class ) == JournalType . EMBEDDED ) { return ; } if ( ! mIncludeSecondary ) { return ; } mSecondaryMaster = new AlluxioSecondaryMaster ( ) ; Runnable runSecondaryMaster = new Runnable ( ) { public void run ( ) { try { LOG . info ( "Starting secondary master {}." , mSecondaryMaster ) ; mSecondaryMaster . start ( ) ; } catch ( InterruptedException e ) { } catch ( Exception e ) { LOG . error ( "Start secondary master error" , e ) ; throw new RuntimeException ( e + " \n Start Secondary Master Error \n" + e . getMessage ( ) , e ) ; } } } ; mSecondaryMasterThread = new Thread ( runSecondaryMaster ) ; mSecondaryMasterThread . setName ( "SecondaryMasterThread-" + System . identityHashCode ( mSecondaryMasterThread ) ) ; mSecondaryMasterThread . start ( ) ; TestUtils . waitForReady ( mSecondaryMaster ) ; } | Starts the master . |
12,774 | public void stop ( ) throws Exception { if ( mSecondaryMasterThread != null ) { mSecondaryMaster . stop ( ) ; while ( mSecondaryMasterThread . isAlive ( ) ) { LOG . info ( "Stopping thread {}." , mSecondaryMasterThread . getName ( ) ) ; mSecondaryMasterThread . join ( 1000 ) ; } mSecondaryMasterThread = null ; } if ( mMasterThread != null ) { mMasterProcess . stop ( ) ; while ( mMasterThread . isAlive ( ) ) { LOG . info ( "Stopping thread {}." , mMasterThread . getName ( ) ) ; mMasterThread . interrupt ( ) ; mMasterThread . join ( 1000 ) ; } mMasterThread = null ; } clearClients ( ) ; System . clearProperty ( "alluxio.web.resources" ) ; System . clearProperty ( "alluxio.master.min.worker.threads" ) ; } | Stops the master processes and cleans up client connections . |
12,775 | private synchronized void init ( MasterInquireClient masterInquireClient ) { mMasterInquireClient = masterInquireClient ; mFileSystemMasterClientPool = new FileSystemMasterClientPool ( mClientContext , mMasterInquireClient ) ; mBlockMasterClientPool = new BlockMasterClientPool ( mClientContext , mMasterInquireClient ) ; mClosed . set ( false ) ; if ( mClientContext . getConf ( ) . getBoolean ( PropertyKey . USER_METRICS_COLLECTION_ENABLED ) ) { mMetricsMasterClient = new MetricsMasterClient ( MasterClientContext . newBuilder ( mClientContext ) . setMasterInquireClient ( mMasterInquireClient ) . build ( ) ) ; mClientMasterSync = new ClientMasterSync ( mMetricsMasterClient , mAppId ) ; mExecutorService = Executors . newFixedThreadPool ( 1 , ThreadFactoryUtils . build ( "metrics-master-heartbeat-%d" , true ) ) ; mExecutorService . submit ( new HeartbeatThread ( HeartbeatContext . MASTER_METRICS_SYNC , mClientMasterSync , ( int ) mClientContext . getConf ( ) . getMs ( PropertyKey . USER_METRICS_HEARTBEAT_INTERVAL_MS ) , mClientContext . getConf ( ) ) ) ; try { Runtime . getRuntime ( ) . addShutdownHook ( new MetricsMasterSyncShutDownHook ( ) ) ; } catch ( IllegalStateException e ) { } catch ( SecurityException e ) { LOG . info ( "Not registering metrics shutdown hook due to security exception. Regular " + "heartbeats will still be performed to collect metrics data, but no final heartbeat " + "will be performed on JVM exit. Security exception: {}" , e . toString ( ) ) ; } } } | Initializes the context . Only called in the factory methods . |
12,776 | public void releaseBlockWorkerClient ( WorkerNetAddress workerNetAddress , BlockWorkerClient client ) { SocketAddress address = NetworkAddressUtils . getDataPortSocketAddress ( workerNetAddress , getClusterConf ( ) ) ; ClientPoolKey key = new ClientPoolKey ( address , AuthenticationUserUtils . getImpersonationUser ( mClientContext . getSubject ( ) , getClusterConf ( ) ) ) ; if ( mBlockWorkerClientPool . containsKey ( key ) ) { mBlockWorkerClientPool . get ( key ) . release ( client ) ; } else { LOG . warn ( "No client pool for key {}, closing client instead. Context is closed: {}" , key , mClosed . get ( ) ) ; try { client . close ( ) ; } catch ( IOException e ) { LOG . warn ( "Error closing block worker client for key {}" , key , e ) ; } } } | Releases a block worker client to the client pools . |
12,777 | private Journal . JournalEntry readInternal ( ) throws IOException { if ( mInputStream == null ) { return null ; } JournalEntry entry = mInputStream . mReader . readEntry ( ) ; if ( entry != null ) { return entry ; } if ( mInputStream . mFile . isIncompleteLog ( ) ) { return null ; } else { Preconditions . checkState ( mInputStream . mFile . isCompletedLog ( ) , "Expected log to be either checkpoint, incomplete, or complete" ) ; ProcessUtils . fatalError ( LOG , "Journal entry %s was truncated" , mNextSequenceNumber ) ; return null ; } } | Reads the next journal entry . |
12,778 | private void updateInputStream ( ) throws IOException { if ( mInputStream != null && ( mInputStream . mFile . isIncompleteLog ( ) || ! mInputStream . isDone ( ) ) ) { return ; } if ( mInputStream != null ) { mInputStream . close ( ) ; mInputStream = null ; } if ( mFilesToProcess . isEmpty ( ) ) { UfsJournalSnapshot snapshot = UfsJournalSnapshot . getSnapshot ( mJournal ) ; if ( snapshot . getCheckpoints ( ) . isEmpty ( ) && snapshot . getLogs ( ) . isEmpty ( ) ) { return ; } int index = 0 ; if ( ! snapshot . getCheckpoints ( ) . isEmpty ( ) ) { UfsJournalFile checkpoint = snapshot . getLatestCheckpoint ( ) ; if ( mNextSequenceNumber < checkpoint . getEnd ( ) ) { String location = checkpoint . getLocation ( ) . toString ( ) ; LOG . info ( "Reading checkpoint {}" , location ) ; mCheckpointStream = new CheckpointInputStream ( mUfs . open ( location , OpenOptions . defaults ( ) . setRecoverFailedOpen ( true ) ) ) ; mNextSequenceNumber = checkpoint . getEnd ( ) ; } for ( ; index < snapshot . getLogs ( ) . size ( ) ; index ++ ) { UfsJournalFile file = snapshot . getLogs ( ) . get ( index ) ; if ( file . getEnd ( ) > checkpoint . getEnd ( ) ) { break ; } } } for ( ; index < snapshot . getLogs ( ) . size ( ) ; index ++ ) { UfsJournalFile file = snapshot . getLogs ( ) . get ( index ) ; if ( ( ! mReadIncompleteLog && file . isIncompleteLog ( ) ) || mNextSequenceNumber >= file . getEnd ( ) ) { continue ; } mFilesToProcess . add ( snapshot . getLogs ( ) . get ( index ) ) ; } } if ( ! mFilesToProcess . isEmpty ( ) ) { mInputStream = new JournalInputStream ( mFilesToProcess . poll ( ) ) ; } } | Updates the journal input stream by closing the current journal input stream if it is done and opening a new one . |
12,779 | private void printMetaMasterInfo ( ) throws IOException { mIndentationLevel ++ ; Set < MasterInfoField > masterInfoFilter = new HashSet < > ( Arrays . asList ( MasterInfoField . LEADER_MASTER_ADDRESS , MasterInfoField . WEB_PORT , MasterInfoField . RPC_PORT , MasterInfoField . START_TIME_MS , MasterInfoField . UP_TIME_MS , MasterInfoField . VERSION , MasterInfoField . SAFE_MODE , MasterInfoField . ZOOKEEPER_ADDRESSES ) ) ; MasterInfo masterInfo = mMetaMasterClient . getMasterInfo ( masterInfoFilter ) ; print ( "Master Address: " + masterInfo . getLeaderMasterAddress ( ) ) ; print ( "Web Port: " + masterInfo . getWebPort ( ) ) ; print ( "Rpc Port: " + masterInfo . getRpcPort ( ) ) ; print ( "Started: " + CommonUtils . convertMsToDate ( masterInfo . getStartTimeMs ( ) , mDateFormatPattern ) ) ; print ( "Uptime: " + CommonUtils . convertMsToClockTime ( masterInfo . getUpTimeMs ( ) ) ) ; print ( "Version: " + masterInfo . getVersion ( ) ) ; print ( "Safe Mode: " + masterInfo . getSafeMode ( ) ) ; List < String > zookeeperAddresses = masterInfo . getZookeeperAddressesList ( ) ; if ( zookeeperAddresses == null || zookeeperAddresses . isEmpty ( ) ) { print ( "Zookeeper Enabled: false" ) ; } else { print ( "Zookeeper Enabled: true" ) ; print ( "Zookeeper Addresses: " ) ; mIndentationLevel ++ ; for ( String zkAddress : zookeeperAddresses ) { print ( zkAddress ) ; } mIndentationLevel -- ; } } | Prints Alluxio meta master information . |
12,780 | private void printBlockMasterInfo ( ) throws IOException { Set < BlockMasterInfoField > blockMasterInfoFilter = new HashSet < > ( Arrays . asList ( BlockMasterInfoField . LIVE_WORKER_NUM , BlockMasterInfoField . LOST_WORKER_NUM , BlockMasterInfoField . CAPACITY_BYTES , BlockMasterInfoField . USED_BYTES , BlockMasterInfoField . FREE_BYTES , BlockMasterInfoField . CAPACITY_BYTES_ON_TIERS , BlockMasterInfoField . USED_BYTES_ON_TIERS ) ) ; BlockMasterInfo blockMasterInfo = mBlockMasterClient . getBlockMasterInfo ( blockMasterInfoFilter ) ; print ( "Live Workers: " + blockMasterInfo . getLiveWorkerNum ( ) ) ; print ( "Lost Workers: " + blockMasterInfo . getLostWorkerNum ( ) ) ; print ( "Total Capacity: " + FormatUtils . getSizeFromBytes ( blockMasterInfo . getCapacityBytes ( ) ) ) ; mIndentationLevel ++ ; Map < String , Long > totalCapacityOnTiers = new TreeMap < > ( ( a , b ) -> ( FileSystemAdminShellUtils . compareTierNames ( a , b ) ) ) ; totalCapacityOnTiers . putAll ( blockMasterInfo . getCapacityBytesOnTiers ( ) ) ; for ( Map . Entry < String , Long > capacityBytesTier : totalCapacityOnTiers . entrySet ( ) ) { print ( "Tier: " + capacityBytesTier . getKey ( ) + " Size: " + FormatUtils . getSizeFromBytes ( capacityBytesTier . getValue ( ) ) ) ; } mIndentationLevel -- ; print ( "Used Capacity: " + FormatUtils . getSizeFromBytes ( blockMasterInfo . getUsedBytes ( ) ) ) ; mIndentationLevel ++ ; Map < String , Long > usedCapacityOnTiers = new TreeMap < > ( ( a , b ) -> ( FileSystemAdminShellUtils . compareTierNames ( a , b ) ) ) ; usedCapacityOnTiers . putAll ( blockMasterInfo . getUsedBytesOnTiers ( ) ) ; for ( Map . Entry < String , Long > usedBytesTier : usedCapacityOnTiers . entrySet ( ) ) { print ( "Tier: " + usedBytesTier . getKey ( ) + " Size: " + FormatUtils . getSizeFromBytes ( usedBytesTier . getValue ( ) ) ) ; } mIndentationLevel -- ; print ( "Free Capacity: " + FormatUtils . getSizeFromBytes ( blockMasterInfo . getFreeBytes ( ) ) ) ; } | Prints Alluxio block master information . |
12,781 | public long getId ( final Address address ) throws IOException { return retryRPC ( ( ) -> mClient . getMasterId ( GetMasterIdPRequest . newBuilder ( ) . setMasterAddress ( address . toProto ( ) ) . build ( ) ) . getMasterId ( ) ) ; } | Returns a master id for a master address . |
12,782 | public MetaCommand heartbeat ( final long masterId ) throws IOException { return retryRPC ( ( ) -> mClient . masterHeartbeat ( MasterHeartbeatPRequest . newBuilder ( ) . setMasterId ( masterId ) . build ( ) ) . getCommand ( ) ) ; } | Sends a heartbeat to the leader master . Standby masters periodically execute this method so that the leader master knows they are still running . |
12,783 | public void register ( final long masterId , final List < ConfigProperty > configList ) throws IOException { retryRPC ( ( ) -> { mClient . registerMaster ( RegisterMasterPRequest . newBuilder ( ) . setMasterId ( masterId ) . setOptions ( RegisterMasterPOptions . newBuilder ( ) . addAllConfigs ( configList ) . build ( ) ) . build ( ) ) ; return null ; } ) ; } | Registers with the leader master . |
12,784 | public List < String > getGroups ( String user ) throws IOException { List < String > groups = CommonUtils . getUnixGroups ( user ) ; return new ArrayList < > ( new LinkedHashSet < > ( groups ) ) ; } | Returns list of groups for a user . |
12,785 | @ Path ( WEBUI_OVERVIEW ) @ ReturnType ( "alluxio.wire.WorkerWebUIOverview" ) public Response getWebUIOverview ( ) { return RestUtils . call ( ( ) -> { WorkerWebUIOverview response = new WorkerWebUIOverview ( ) ; response . setWorkerInfo ( new UIWorkerInfo ( mWorkerProcess . getRpcAddress ( ) . toString ( ) , mWorkerProcess . getStartTimeMs ( ) , ServerConfiguration . get ( PropertyKey . USER_DATE_FORMAT_PATTERN ) ) ) ; BlockStoreMeta storeMeta = mBlockWorker . getStoreMeta ( ) ; long capacityBytes = 0L ; long usedBytes = 0L ; Map < String , Long > capacityBytesOnTiers = storeMeta . getCapacityBytesOnTiers ( ) ; Map < String , Long > usedBytesOnTiers = storeMeta . getUsedBytesOnTiers ( ) ; List < UIUsageOnTier > usageOnTiers = new ArrayList < > ( ) ; for ( Map . Entry < String , Long > entry : capacityBytesOnTiers . entrySet ( ) ) { String tier = entry . getKey ( ) ; long capacity = entry . getValue ( ) ; Long nullableUsed = usedBytesOnTiers . get ( tier ) ; long used = nullableUsed == null ? 0 : nullableUsed ; capacityBytes += capacity ; usedBytes += used ; usageOnTiers . add ( new UIUsageOnTier ( tier , capacity , used ) ) ; } response . setCapacityBytes ( FormatUtils . getSizeFromBytes ( capacityBytes ) ) . setUsedBytes ( FormatUtils . getSizeFromBytes ( usedBytes ) ) . setUsageOnTiers ( usageOnTiers ) . setVersion ( RuntimeConstants . VERSION ) ; List < UIStorageDir > storageDirs = new ArrayList < > ( storeMeta . getCapacityBytesOnDirs ( ) . size ( ) ) ; for ( Pair < String , String > tierAndDirPath : storeMeta . getCapacityBytesOnDirs ( ) . keySet ( ) ) { storageDirs . add ( new UIStorageDir ( tierAndDirPath . getFirst ( ) , tierAndDirPath . getSecond ( ) , storeMeta . getCapacityBytesOnDirs ( ) . get ( tierAndDirPath ) , storeMeta . getUsedBytesOnDirs ( ) . get ( tierAndDirPath ) ) ) ; } response . setStorageDirs ( storageDirs ) ; return response ; } , ServerConfiguration . global ( ) ) ; } | Gets web ui overview page data . |
12,786 | @ Path ( WEBUI_METRICS ) @ ReturnType ( "alluxio.wire.WorkerWebUIMetrics" ) public Response getWebUIMetrics ( ) { return RestUtils . call ( ( ) -> { WorkerWebUIMetrics response = new WorkerWebUIMetrics ( ) ; MetricRegistry mr = MetricsSystem . METRIC_REGISTRY ; Long workerCapacityTotal = ( Long ) mr . getGauges ( ) . get ( MetricsSystem . getMetricName ( DefaultBlockWorker . Metrics . CAPACITY_TOTAL ) ) . getValue ( ) ; Long workerCapacityUsed = ( Long ) mr . getGauges ( ) . get ( MetricsSystem . getMetricName ( DefaultBlockWorker . Metrics . CAPACITY_USED ) ) . getValue ( ) ; int workerCapacityUsedPercentage = ( workerCapacityTotal > 0 ) ? ( int ) ( 100L * workerCapacityUsed / workerCapacityTotal ) : 0 ; response . setWorkerCapacityUsedPercentage ( workerCapacityUsedPercentage ) ; response . setWorkerCapacityFreePercentage ( 100 - workerCapacityUsedPercentage ) ; Map < String , Counter > counters = mr . getCounters ( new MetricFilter ( ) { public boolean matches ( String name , Metric metric ) { return ! ( name . endsWith ( "Ops" ) ) ; } } ) ; Map < String , Counter > rpcInvocations = mr . getCounters ( new MetricFilter ( ) { public boolean matches ( String name , Metric metric ) { return name . endsWith ( "Ops" ) ; } } ) ; Map < String , Metric > operations = new TreeMap < > ( ) ; for ( Map . Entry < String , Counter > entry : counters . entrySet ( ) ) { operations . put ( MetricsSystem . stripInstanceAndHost ( entry . getKey ( ) ) , entry . getValue ( ) ) ; } String filesPinnedProperty = MetricsSystem . getMetricName ( MasterMetrics . FILES_PINNED ) ; operations . put ( MetricsSystem . stripInstanceAndHost ( filesPinnedProperty ) , mr . getGauges ( ) . get ( filesPinnedProperty ) ) ; response . setOperationMetrics ( operations ) ; Map < String , Counter > rpcInvocationsUpdated = new TreeMap < > ( ) ; for ( Map . Entry < String , Counter > entry : rpcInvocations . entrySet ( ) ) { rpcInvocationsUpdated . put ( MetricsSystem . stripInstanceAndHost ( entry . getKey ( ) ) , entry . getValue ( ) ) ; } response . setRpcInvocationMetrics ( rpcInvocations ) ; return response ; } , ServerConfiguration . global ( ) ) ; } | Gets web ui metrics page data . |
12,787 | private int getSkipCorrectedReference ( List < Integer > tokenPositions , int refNumber ) { int correctedRef = 0 ; int i = 0 ; for ( int tokenPosition : tokenPositions ) { if ( i ++ >= refNumber ) { break ; } correctedRef += tokenPosition ; } return correctedRef - 1 ; } | when there s a skip we need to adapt the reference number |
12,788 | public int compareTo ( RuleMatch other ) { Objects . requireNonNull ( other ) ; return Integer . compare ( getFromPos ( ) , other . getFromPos ( ) ) ; } | Compare by start position . |
12,789 | private boolean matchPostagRegexp ( AnalyzedTokenReadings aToken , Pattern pattern ) { for ( AnalyzedToken analyzedToken : aToken ) { String posTag = analyzedToken . getPOSTag ( ) ; if ( posTag == null ) { posTag = "UNKNOWN" ; } final Matcher m = pattern . matcher ( posTag ) ; if ( m . matches ( ) ) { return true ; } } return false ; } | Match POS tag with regular expression |
12,790 | public static Language getLanguageForName ( String languageName ) { for ( Language element : getStaticAndDynamicLanguages ( ) ) { if ( languageName . equals ( element . getName ( ) ) ) { return element ; } } return null ; } | Get the Language object for the given language name . |
12,791 | public static Language getLanguageForShortCode ( String langCode , List < String > noopLanguageCodes ) { Language language = getLanguageForShortCodeOrNull ( langCode ) ; if ( language == null ) { if ( noopLanguageCodes . contains ( langCode ) ) { return NOOP_LANGUAGE ; } else { List < String > codes = new ArrayList < > ( ) ; for ( Language realLanguage : getStaticAndDynamicLanguages ( ) ) { codes . add ( realLanguage . getShortCodeWithCountryAndVariant ( ) ) ; } Collections . sort ( codes ) ; throw new IllegalArgumentException ( "'" + langCode + "' is not a language code known to LanguageTool." + " Supported language codes are: " + String . join ( ", " , codes ) + ". The list of languages is read from " + PROPERTIES_PATH + " in the Java classpath. See http://wiki.languagetool.org/java-api for details." ) ; } } return language ; } | Get the Language object for the given language code . |
12,792 | protected final boolean isSatisfied ( AnalyzedToken aToken , Map < String , List < String > > uFeatures ) { if ( allFeatsIn && equivalencesMatched . isEmpty ( ) ) { return false ; } if ( uFeatures == null ) { throw new RuntimeException ( "isSatisfied called without features being set" ) ; } unificationFeats = uFeatures ; boolean unified = true ; if ( allFeatsIn ) { unified = checkNext ( aToken , uFeatures ) ; } else { while ( equivalencesMatched . size ( ) <= tokCnt ) { equivalencesMatched . add ( new ConcurrentHashMap < > ( ) ) ; } for ( Map . Entry < String , List < String > > feat : uFeatures . entrySet ( ) ) { List < String > types = feat . getValue ( ) ; if ( types == null || types . isEmpty ( ) ) { types = equivalenceFeatures . get ( feat . getKey ( ) ) ; } for ( String typeName : types ) { PatternToken testElem = equivalenceTypes . get ( new EquivalenceTypeLocator ( feat . getKey ( ) , typeName ) ) ; if ( testElem == null ) { return false ; } if ( testElem . isMatched ( aToken ) ) { if ( ! equivalencesMatched . get ( tokCnt ) . containsKey ( feat . getKey ( ) ) ) { Set < String > typeSet = new HashSet < > ( ) ; typeSet . add ( typeName ) ; equivalencesMatched . get ( tokCnt ) . put ( feat . getKey ( ) , typeSet ) ; } else { equivalencesMatched . get ( tokCnt ) . get ( feat . getKey ( ) ) . add ( typeName ) ; } } } unified = equivalencesMatched . get ( tokCnt ) . containsKey ( feat . getKey ( ) ) ; if ( ! unified ) { equivalencesMatched . remove ( tokCnt ) ; break ; } } if ( unified ) { if ( tokCnt == 0 || tokSequence . isEmpty ( ) ) { tokSequence . add ( new AnalyzedTokenReadings ( aToken , 0 ) ) ; List < Map < String , Set < String > > > equivList = new ArrayList < > ( ) ; equivList . add ( equivalencesMatched . get ( tokCnt ) ) ; tokSequenceEquivalences . add ( equivList ) ; } else { tokSequence . get ( 0 ) . addReading ( aToken ) ; tokSequenceEquivalences . get ( 0 ) . add ( equivalencesMatched . get ( tokCnt ) ) ; } tokCnt ++ ; } } return unified ; } | Tests if a token has shared features with other tokens . |
12,793 | public final void startUnify ( ) { allFeatsIn = true ; for ( int i = 0 ; i < tokCnt ; i ++ ) { featuresFound . add ( false ) ; } tmpFeaturesFound = new ArrayList < > ( featuresFound ) ; } | Starts testing only those equivalences that were previously matched . |
12,794 | public final boolean getFinalUnificationValue ( Map < String , List < String > > uFeatures ) { int tokUnified = 0 ; for ( int j = 0 ; j < tokSequence . size ( ) ; j ++ ) { boolean unifiedTokensFound = false ; for ( int i = 0 ; i < tokSequenceEquivalences . get ( j ) . size ( ) ; i ++ ) { int featUnified = 0 ; if ( tokSequenceEquivalences . get ( j ) . get ( i ) . containsKey ( UNIFY_IGNORE ) ) { if ( i == 0 ) { tokUnified ++ ; } unifiedTokensFound = true ; continue ; } else { for ( Map . Entry < String , List < String > > feat : uFeatures . entrySet ( ) ) { if ( tokSequenceEquivalences . get ( j ) . get ( i ) . containsKey ( feat . getKey ( ) ) && tokSequenceEquivalences . get ( j ) . get ( i ) . get ( feat . getKey ( ) ) . isEmpty ( ) ) { featUnified = 0 ; } else { featUnified ++ ; } if ( featUnified == unificationFeats . entrySet ( ) . size ( ) && tokUnified <= j ) { tokUnified ++ ; unifiedTokensFound = true ; break ; } } } } if ( ! unifiedTokensFound ) { return false ; } } if ( tokUnified == tokSequence . size ( ) ) { return true ; } return false ; } | Make sure that we really matched all the required features of the unification . |
12,795 | public final void reset ( ) { equivalencesMatched . clear ( ) ; allFeatsIn = false ; tokCnt = 0 ; featuresFound . clear ( ) ; tmpFeaturesFound . clear ( ) ; tokSequence . clear ( ) ; tokSequenceEquivalences . clear ( ) ; readingsCounter = 1 ; uniMatched = false ; uniAllMatched = false ; inUnification = false ; } | Resets after use of unification . Required . |
12,796 | public final AnalyzedTokenReadings [ ] getUnifiedTokens ( ) { if ( tokSequence . isEmpty ( ) ) { return null ; } List < AnalyzedTokenReadings > uTokens = new ArrayList < > ( ) ; for ( int j = 0 ; j < tokSequence . size ( ) ; j ++ ) { boolean unifiedTokensFound = false ; for ( int i = 0 ; i < tokSequenceEquivalences . get ( j ) . size ( ) ; i ++ ) { int featUnified = 0 ; if ( tokSequenceEquivalences . get ( j ) . get ( i ) . containsKey ( UNIFY_IGNORE ) ) { addTokenToSequence ( uTokens , tokSequence . get ( j ) . getAnalyzedToken ( i ) , j ) ; unifiedTokensFound = true ; } else { for ( Map . Entry < String , List < String > > feat : unificationFeats . entrySet ( ) ) { if ( tokSequenceEquivalences . get ( j ) . get ( i ) . containsKey ( feat . getKey ( ) ) && tokSequenceEquivalences . get ( j ) . get ( i ) . get ( feat . getKey ( ) ) . isEmpty ( ) ) { featUnified = 0 ; } else { featUnified ++ ; } if ( featUnified == unificationFeats . entrySet ( ) . size ( ) ) { addTokenToSequence ( uTokens , tokSequence . get ( j ) . getAnalyzedToken ( i ) , j ) ; unifiedTokensFound = true ; } } } } if ( ! unifiedTokensFound ) { return null ; } } return uTokens . toArray ( new AnalyzedTokenReadings [ 0 ] ) ; } | Gets a full sequence of filtered tokens . |
12,797 | public final boolean isUnified ( AnalyzedToken matchToken , Map < String , List < String > > uFeatures , boolean lastReading , boolean isMatched ) { if ( inUnification ) { if ( isMatched ) { uniMatched |= isSatisfied ( matchToken , uFeatures ) ; } uniAllMatched = uniMatched ; if ( lastReading ) { startNextToken ( ) ; uniMatched = false ; } return uniAllMatched && getFinalUnificationValue ( uFeatures ) ; } else { if ( isMatched ) { isSatisfied ( matchToken , uFeatures ) ; } } if ( lastReading ) { inUnification = true ; uniMatched = false ; startUnify ( ) ; } return true ; } | Tests if the token sequence is unified . |
12,798 | protected boolean isSurrogatePairCombination ( String word ) { if ( word . length ( ) > 1 && word . length ( ) % 2 == 0 && word . codePointCount ( 0 , word . length ( ) ) != word . length ( ) ) { boolean isSurrogatePairCombination = true ; for ( int i = 0 ; i < word . length ( ) && isSurrogatePairCombination ; i += 2 ) { isSurrogatePairCombination &= Character . isSurrogatePair ( word . charAt ( i ) , word . charAt ( i + 1 ) ) ; } return isSurrogatePairCombination ; } return false ; } | Checks whether a given String consists only of surrogate pairs . |
12,799 | public boolean canBeIgnoredFor ( AnalyzedSentence sentence ) { return ( ! simpleRuleTokens . isEmpty ( ) && ! sentence . getTokenSet ( ) . containsAll ( simpleRuleTokens ) ) || ( ! inflectedRuleTokens . isEmpty ( ) && ! sentence . getLemmaSet ( ) . containsAll ( inflectedRuleTokens ) ) ; } | A fast check whether this rule can be ignored for the given sentence because it can never match . Used internally for performance optimization . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.