idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
12,600
public static void checkNumOfArgsEquals ( Command cmd , CommandLine cl , int n ) throws InvalidArgumentException { if ( cl . getArgs ( ) . length != n ) { throw new InvalidArgumentException ( ExceptionMessage . INVALID_ARGS_NUM . getMessage ( cmd . getCommandName ( ) , n , cl . getArgs ( ) . length ) ) ; } }
Checks the number of non - option arguments equals n for command .
12,601
public static void checkNumOfArgsNoLessThan ( Command cmd , CommandLine cl , int n ) throws InvalidArgumentException { if ( cl . getArgs ( ) . length < n ) { throw new InvalidArgumentException ( ExceptionMessage . INVALID_ARGS_NUM_INSUFFICIENT . getMessage ( cmd . getCommandName ( ) , n , cl . getArgs ( ) . length ) ) ; } }
Checks the number of non - option arguments is no less than n for command .
12,602
public static void checkNumOfArgsNoMoreThan ( Command cmd , CommandLine cl , int n ) throws InvalidArgumentException { if ( cl . getArgs ( ) . length > n ) { throw new InvalidArgumentException ( ExceptionMessage . INVALID_ARGS_NUM_TOO_MANY . getMessage ( cmd . getCommandName ( ) , n , cl . getArgs ( ) . length ) ) ; } }
Checks the number of non - option arguments is no more than n for command .
12,603
public int run ( CommandLine cl ) throws IOException { if ( cl . hasOption ( ReportCommand . HELP_OPTION_NAME ) ) { System . out . println ( getUsage ( ) ) ; return 0 ; } GetWorkerReportOptions options = getOptions ( cl ) ; generateCapacityReport ( options ) ; return 0 ; }
Runs report capacity command .
12,604
public void generateCapacityReport ( GetWorkerReportOptions options ) throws IOException { List < WorkerInfo > workerInfoList = mBlockMasterClient . getWorkerReport ( options ) ; if ( workerInfoList . size ( ) == 0 ) { print ( "No workers found." ) ; return ; } Collections . sort ( workerInfoList , new WorkerInfo . LastContactSecComparator ( ) ) ; collectWorkerInfo ( workerInfoList ) ; printAggregatedInfo ( options ) ; printWorkerInfo ( workerInfoList ) ; }
Generates capacity report .
12,605
private void collectWorkerInfo ( List < WorkerInfo > workerInfoList ) { initVariables ( ) ; for ( WorkerInfo workerInfo : workerInfoList ) { long usedBytes = workerInfo . getUsedBytes ( ) ; long capacityBytes = workerInfo . getCapacityBytes ( ) ; mSumCapacityBytes += capacityBytes ; mSumUsedBytes += usedBytes ; String workerName = workerInfo . getAddress ( ) . getHost ( ) ; Map < String , Long > totalBytesOnTiers = workerInfo . getCapacityBytesOnTiers ( ) ; for ( Map . Entry < String , Long > totalBytesTier : totalBytesOnTiers . entrySet ( ) ) { String tier = totalBytesTier . getKey ( ) ; long value = totalBytesTier . getValue ( ) ; mSumCapacityBytesOnTierMap . put ( tier , value + mSumCapacityBytesOnTierMap . getOrDefault ( tier , 0L ) ) ; Map < String , String > map = mCapacityTierInfoMap . getOrDefault ( tier , new HashMap < > ( ) ) ; map . put ( workerName , FormatUtils . getSizeFromBytes ( value ) ) ; mCapacityTierInfoMap . put ( tier , map ) ; } Map < String , Long > usedBytesOnTiers = workerInfo . getUsedBytesOnTiers ( ) ; for ( Map . Entry < String , Long > usedBytesTier : usedBytesOnTiers . entrySet ( ) ) { String tier = usedBytesTier . getKey ( ) ; long value = usedBytesTier . getValue ( ) ; mSumUsedBytesOnTierMap . put ( tier , value + mSumUsedBytesOnTierMap . getOrDefault ( tier , 0L ) ) ; Map < String , String > map = mUsedTierInfoMap . getOrDefault ( tier , new HashMap < > ( ) ) ; map . put ( workerName , FormatUtils . getSizeFromBytes ( value ) ) ; mUsedTierInfoMap . put ( tier , map ) ; } } }
Collects worker capacity information .
12,606
private void printAggregatedInfo ( GetWorkerReportOptions options ) { mIndentationLevel = 0 ; print ( String . format ( "Capacity information for %s workers: " , options . getWorkerRange ( ) . toString ( ) . toLowerCase ( ) ) ) ; mIndentationLevel ++ ; print ( "Total Capacity: " + FormatUtils . getSizeFromBytes ( mSumCapacityBytes ) ) ; mIndentationLevel ++ ; for ( Map . Entry < String , Long > totalBytesTier : mSumCapacityBytesOnTierMap . entrySet ( ) ) { long value = totalBytesTier . getValue ( ) ; print ( "Tier: " + totalBytesTier . getKey ( ) + " Size: " + FormatUtils . getSizeFromBytes ( value ) ) ; } mIndentationLevel -- ; print ( "Used Capacity: " + FormatUtils . getSizeFromBytes ( mSumUsedBytes ) ) ; mIndentationLevel ++ ; for ( Map . Entry < String , Long > usedBytesTier : mSumUsedBytesOnTierMap . entrySet ( ) ) { long value = usedBytesTier . getValue ( ) ; print ( "Tier: " + usedBytesTier . getKey ( ) + " Size: " + FormatUtils . getSizeFromBytes ( value ) ) ; } mIndentationLevel -- ; if ( mSumCapacityBytes != 0 ) { int usedPercentage = ( int ) ( 100L * mSumUsedBytes / mSumCapacityBytes ) ; print ( String . format ( "Used Percentage: " + "%s%%" , usedPercentage ) ) ; print ( String . format ( "Free Percentage: " + "%s%%" , 100 - usedPercentage ) ) ; } }
Prints aggregated worker capacity information .
12,607
private void printWorkerInfo ( List < WorkerInfo > workerInfoList ) { mIndentationLevel = 0 ; if ( mCapacityTierInfoMap . size ( ) == 0 ) { return ; } else if ( mCapacityTierInfoMap . size ( ) == 1 ) { printShortWorkerInfo ( workerInfoList ) ; return ; } Set < String > tiers = mCapacityTierInfoMap . keySet ( ) ; String tiersInfo = String . format ( Strings . repeat ( "%-14s" , tiers . size ( ) ) , tiers . toArray ( ) ) ; String longInfoFormat = getInfoFormat ( workerInfoList , false ) ; print ( String . format ( "%n" + longInfoFormat , "Worker Name" , "Last Heartbeat" , "Storage" , "Total" , tiersInfo ) ) ; for ( WorkerInfo info : workerInfoList ) { String workerName = info . getAddress ( ) . getHost ( ) ; long usedBytes = info . getUsedBytes ( ) ; long capacityBytes = info . getCapacityBytes ( ) ; String usedPercentageInfo = "" ; if ( capacityBytes != 0 ) { int usedPercentage = ( int ) ( 100L * usedBytes / capacityBytes ) ; usedPercentageInfo = String . format ( " (%s%%)" , usedPercentage ) ; } String capacityTierInfo = getWorkerFormattedTierValues ( mCapacityTierInfoMap , workerName ) ; String usedTierInfo = getWorkerFormattedTierValues ( mUsedTierInfoMap , workerName ) ; print ( String . format ( longInfoFormat , workerName , info . getLastContactSec ( ) , "capacity" , FormatUtils . getSizeFromBytes ( capacityBytes ) , capacityTierInfo ) ) ; print ( String . format ( longInfoFormat , "" , "" , "used" , FormatUtils . getSizeFromBytes ( usedBytes ) + usedPercentageInfo , usedTierInfo ) ) ; } }
Prints worker capacity information .
12,608
private void printShortWorkerInfo ( List < WorkerInfo > workerInfoList ) { String tier = mCapacityTierInfoMap . firstKey ( ) ; String shortInfoFormat = getInfoFormat ( workerInfoList , true ) ; print ( String . format ( "%n" + shortInfoFormat , "Worker Name" , "Last Heartbeat" , "Storage" , tier ) ) ; for ( WorkerInfo info : workerInfoList ) { long capacityBytes = info . getCapacityBytes ( ) ; long usedBytes = info . getUsedBytes ( ) ; String usedPercentageInfo = "" ; if ( capacityBytes != 0 ) { int usedPercentage = ( int ) ( 100L * usedBytes / capacityBytes ) ; usedPercentageInfo = String . format ( " (%s%%)" , usedPercentage ) ; } print ( String . format ( shortInfoFormat , info . getAddress ( ) . getHost ( ) , info . getLastContactSec ( ) , "capacity" , FormatUtils . getSizeFromBytes ( capacityBytes ) ) ) ; print ( String . format ( shortInfoFormat , "" , "" , "used" , FormatUtils . getSizeFromBytes ( usedBytes ) + usedPercentageInfo ) ) ; } }
Prints worker information when only one tier exists .
12,609
private String getInfoFormat ( List < WorkerInfo > workerInfoList , boolean isShort ) { int maxWorkerNameLength = workerInfoList . stream ( ) . map ( w -> w . getAddress ( ) . getHost ( ) . length ( ) ) . max ( Comparator . comparing ( Integer :: intValue ) ) . get ( ) ; int firstIndent = 16 ; if ( firstIndent <= maxWorkerNameLength ) { firstIndent = maxWorkerNameLength + 5 ; } if ( isShort ) { return "%-" + firstIndent + "s %-16s %-13s %s" ; } return "%-" + firstIndent + "s %-16s %-13s %-16s %s" ; }
Gets the info format according to the longest worker name .
12,610
private GetWorkerReportOptions getOptions ( CommandLine cl ) throws IOException { if ( cl . getOptions ( ) . length > 1 ) { System . out . println ( getUsage ( ) ) ; throw new InvalidArgumentException ( "Too many arguments passed in." ) ; } GetWorkerReportOptions workerOptions = GetWorkerReportOptions . defaults ( ) ; Set < WorkerInfoField > fieldRange = new HashSet < > ( Arrays . asList ( WorkerInfoField . ADDRESS , WorkerInfoField . WORKER_CAPACITY_BYTES , WorkerInfoField . WORKER_CAPACITY_BYTES_ON_TIERS , WorkerInfoField . LAST_CONTACT_SEC , WorkerInfoField . WORKER_USED_BYTES , WorkerInfoField . WORKER_USED_BYTES_ON_TIERS ) ) ; workerOptions . setFieldRange ( fieldRange ) ; if ( cl . hasOption ( ReportCommand . LIVE_OPTION_NAME ) ) { workerOptions . setWorkerRange ( WorkerRange . LIVE ) ; } else if ( cl . hasOption ( ReportCommand . LOST_OPTION_NAME ) ) { workerOptions . setWorkerRange ( WorkerRange . LOST ) ; } else if ( cl . hasOption ( ReportCommand . SPECIFIED_OPTION_NAME ) ) { workerOptions . setWorkerRange ( WorkerRange . SPECIFIED ) ; String addressString = cl . getOptionValue ( ReportCommand . SPECIFIED_OPTION_NAME ) ; String [ ] addressArray = addressString . split ( "," ) ; workerOptions . setAddresses ( new HashSet < > ( Arrays . asList ( addressArray ) ) ) ; } return workerOptions ; }
Gets the worker info options .
12,611
private static String getWorkerFormattedTierValues ( Map < String , Map < String , String > > map , String workerName ) { return map . values ( ) . stream ( ) . map ( ( tierMap ) -> ( String . format ( "%-14s" , tierMap . getOrDefault ( workerName , "-" ) ) ) ) . collect ( Collectors . joining ( "" ) ) ; }
Gets the formatted tier values of a worker .
12,612
private void initVariables ( ) { mSumCapacityBytes = 0 ; mSumUsedBytes = 0 ; mSumCapacityBytesOnTierMap = new TreeMap < > ( FileSystemAdminShellUtils :: compareTierNames ) ; mSumUsedBytesOnTierMap = new TreeMap < > ( FileSystemAdminShellUtils :: compareTierNames ) ; mCapacityTierInfoMap = new TreeMap < > ( FileSystemAdminShellUtils :: compareTierNames ) ; mUsedTierInfoMap = new TreeMap < > ( FileSystemAdminShellUtils :: compareTierNames ) ; }
Initializes member variables used to collect worker info .
12,613
private void print ( String text ) { String indent = Strings . repeat ( " " , mIndentationLevel * INDENT_SIZE ) ; mPrintStream . println ( indent + text ) ; }
Prints indented information .
12,614
public static boolean register ( PropertyKey key ) { String name = key . getName ( ) ; String [ ] aliases = key . getAliases ( ) ; if ( DEFAULT_KEYS_MAP . containsKey ( name ) ) { if ( DEFAULT_KEYS_MAP . get ( name ) . isBuiltIn ( ) || ! key . isBuiltIn ( ) ) { return false ; } } DEFAULT_KEYS_MAP . put ( name , key ) ; if ( aliases != null ) { for ( String alias : aliases ) { DEFAULT_ALIAS_MAP . put ( alias , key ) ; } } return true ; }
Registers the given key to the global key map .
12,615
public static void unregister ( PropertyKey key ) { String name = key . getName ( ) ; DEFAULT_KEYS_MAP . remove ( name ) ; DEFAULT_ALIAS_MAP . remove ( name ) ; }
Unregisters the given key from the global key map .
12,616
public static BlockOutStream createReplicatedBlockOutStream ( FileSystemContext context , long blockId , long blockSize , java . util . List < WorkerNetAddress > workerNetAddresses , OutStreamOptions options ) throws IOException { List < DataWriter > dataWriters = new ArrayList < > ( ) ; for ( WorkerNetAddress address : workerNetAddresses ) { DataWriter dataWriter = DataWriter . Factory . create ( context , blockId , blockSize , address , options ) ; dataWriters . add ( dataWriter ) ; } return new BlockOutStream ( dataWriters , blockSize , workerNetAddresses ) ; }
Creates a new remote block output stream .
12,617
public void write ( io . netty . buffer . ByteBuf buf ) throws IOException { write ( buf , 0 , buf . readableBytes ( ) ) ; }
Writes the data in the specified byte buf to this output stream .
12,618
public void write ( io . netty . buffer . ByteBuf buf , int off , int len ) throws IOException { if ( len == 0 ) { return ; } while ( len > 0 ) { updateCurrentChunk ( false ) ; int toWrite = Math . min ( len , mCurrentChunk . writableBytes ( ) ) ; mCurrentChunk . writeBytes ( buf , off , toWrite ) ; off += toWrite ; len -= toWrite ; } updateCurrentChunk ( false ) ; }
Writes len bytes from the specified byte buf starting at offset off to this output stream .
12,619
private void updateCurrentChunk ( boolean lastChunk ) throws IOException { if ( mCurrentChunk != null && mCurrentChunk . writableBytes ( ) > 0 && ! lastChunk ) { return ; } if ( mCurrentChunk == null ) { if ( ! lastChunk ) { mCurrentChunk = allocateBuffer ( ) ; } return ; } if ( mCurrentChunk . writableBytes ( ) == 0 || lastChunk ) { try { if ( mCurrentChunk . readableBytes ( ) > 0 ) { for ( DataWriter dataWriter : mDataWriters ) { mCurrentChunk . retain ( ) ; dataWriter . writeChunk ( mCurrentChunk . duplicate ( ) ) ; } } else { Preconditions . checkState ( lastChunk ) ; } } finally { mCurrentChunk . release ( ) ; mCurrentChunk = null ; } } if ( ! lastChunk ) { mCurrentChunk = allocateBuffer ( ) ; } }
Updates the current chunk .
12,620
public T removeAcl ( List < AclEntry > entries ) { for ( AclEntry entry : entries ) { if ( entry . isDefault ( ) ) { AccessControlList defaultAcl = getDefaultACL ( ) ; defaultAcl . removeEntry ( entry ) ; } else { mAcl . removeEntry ( entry ) ; } } updateMask ( entries ) ; return getThis ( ) ; }
Removes ACL entries .
12,621
public T replaceAcl ( List < AclEntry > entries ) { boolean clearACL = false ; for ( AclEntry entry : entries ) { if ( ! entry . isDefault ( ) ) { clearACL = true ; } } if ( clearACL ) { mAcl . clearEntries ( ) ; } return setAcl ( entries ) ; }
Replaces all existing ACL entries with a new list of entries .
12,622
public T updateMask ( List < AclEntry > entries ) { boolean needToUpdateACL = false ; boolean needToUpdateDefaultACL = false ; for ( AclEntry entry : entries ) { if ( entry . getType ( ) . equals ( AclEntryType . NAMED_USER ) || entry . getType ( ) . equals ( AclEntryType . NAMED_GROUP ) || entry . getType ( ) . equals ( AclEntryType . OWNING_GROUP ) ) { if ( entry . isDefault ( ) ) { needToUpdateDefaultACL = true ; } else { needToUpdateACL = true ; } } if ( entry . getType ( ) . equals ( AclEntryType . MASK ) ) { return getThis ( ) ; } } if ( needToUpdateACL ) { mAcl . updateMask ( ) ; } if ( needToUpdateDefaultACL ) { getDefaultACL ( ) . updateMask ( ) ; } return getThis ( ) ; }
Update Mask for the Inode . This method should be called after updates to ACL and defaultACL .
12,623
public T setAcl ( List < AclEntry > entries ) { if ( entries == null || entries . isEmpty ( ) ) { return getThis ( ) ; } for ( AclEntry entry : entries ) { if ( entry . isDefault ( ) ) { getDefaultACL ( ) . setEntry ( entry ) ; } else { mAcl . setEntry ( entry ) ; } } updateMask ( entries ) ; return getThis ( ) ; }
Sets ACL entries into the internal ACL . The entries will overwrite any existing correspondent entries in the internal ACL .
12,624
public void updateFromEntry ( UpdateInodeEntry entry ) { if ( entry . hasAcl ( ) ) { setInternalAcl ( ProtoUtils . fromProto ( entry . getAcl ( ) ) ) ; } if ( entry . hasCreationTimeMs ( ) ) { setCreationTimeMs ( entry . getCreationTimeMs ( ) ) ; } if ( entry . hasGroup ( ) ) { setGroup ( entry . getGroup ( ) ) ; } if ( entry . hasLastModificationTimeMs ( ) ) { setLastModificationTimeMs ( entry . getLastModificationTimeMs ( ) , entry . getOverwriteModificationTime ( ) ) ; } if ( entry . hasMode ( ) ) { setMode ( ( short ) entry . getMode ( ) ) ; } if ( entry . hasName ( ) ) { setName ( entry . getName ( ) ) ; } if ( entry . hasOwner ( ) ) { setOwner ( entry . getOwner ( ) ) ; } if ( entry . hasParentId ( ) ) { setParentId ( entry . getParentId ( ) ) ; } if ( entry . hasPersistenceState ( ) ) { setPersistenceState ( PersistenceState . valueOf ( entry . getPersistenceState ( ) ) ) ; } if ( entry . hasPinned ( ) ) { setPinned ( entry . getPinned ( ) ) ; } if ( entry . hasTtl ( ) ) { setTtl ( entry . getTtl ( ) ) ; } if ( entry . hasTtlAction ( ) ) { setTtlAction ( ProtobufUtils . fromProtobuf ( entry . getTtlAction ( ) ) ) ; } if ( entry . hasUfsFingerprint ( ) ) { setUfsFingerprint ( entry . getUfsFingerprint ( ) ) ; } }
Updates this inode s state from the given entry .
12,625
private static void formatWorkerDataFolder ( String folder ) throws IOException { Path path = Paths . get ( folder ) ; FileUtils . deletePathRecursively ( folder ) ; Files . createDirectory ( path ) ; String permissions = ServerConfiguration . get ( PropertyKey . WORKER_DATA_FOLDER_PERMISSIONS ) ; Set < PosixFilePermission > perms = PosixFilePermissions . fromString ( permissions ) ; Files . setPosixFilePermissions ( path , perms ) ; FileUtils . setLocalDirStickyBit ( path . toAbsolutePath ( ) . toString ( ) ) ; }
Formats the worker data folder .
12,626
public static int compareTierNames ( String a , String b ) { int aValue = getTierRankValue ( a ) ; int bValue = getTierRankValue ( b ) ; if ( aValue == bValue ) { return a . compareTo ( b ) ; } return aValue - bValue ; }
Compares two tier names according to their rank values .
12,627
public static void checkMasterClientService ( AlluxioConfiguration alluxioConf ) throws IOException { try ( CloseableResource < FileSystemMasterClient > client = FileSystemContext . create ( ClientContext . create ( alluxioConf ) ) . acquireMasterClientResource ( ) ) { InetSocketAddress address = client . get ( ) . getAddress ( ) ; List < InetSocketAddress > addresses = Arrays . asList ( address ) ; MasterInquireClient inquireClient = new PollingMasterInquireClient ( addresses , ( ) -> new ExponentialBackoffRetry ( 50 , 100 , 2 ) , alluxioConf ) ; inquireClient . getPrimaryRpcAddress ( ) ; } catch ( UnavailableException e ) { throw new IOException ( "Cannot connect to Alluxio leader master." ) ; } }
Checks if the master client service is available . Throws an exception if fails to determine that the master client service is running .
12,628
private static int getTierRankValue ( String input ) { List < String > tierOrder = Arrays . asList ( "MEM" , "SSD" , "HDD" ) ; int rank = tierOrder . indexOf ( input ) ; if ( rank == - 1 ) { return Integer . MAX_VALUE ; } return rank ; }
Assigns a rank value to the input string .
12,629
private void readChunk ( ) throws IOException { if ( mDataReader == null ) { mDataReader = mDataReaderFactory . create ( mPos , mLength - mPos ) ; } if ( mCurrentChunk != null && mCurrentChunk . readableBytes ( ) == 0 ) { mCurrentChunk . release ( ) ; mCurrentChunk = null ; } if ( mCurrentChunk == null ) { mCurrentChunk = mDataReader . readChunk ( ) ; } }
Reads a new chunk from the channel if all of the current chunk is read .
12,630
private void closeDataReader ( ) throws IOException { if ( mCurrentChunk != null ) { mCurrentChunk . release ( ) ; mCurrentChunk = null ; } if ( mDataReader != null ) { mDataReader . close ( ) ; } mDataReader = null ; }
Close the current data reader .
12,631
public K setAcl ( List < AclEntry > acl ) { mAcl = ImmutableList . copyOf ( acl ) ; return getThis ( ) ; }
Sets an immutable copy of acl as the internal access control list .
12,632
public void commitBlock ( final long workerId , final long usedBytesOnTier , final String tierAlias , final long blockId , final long length ) throws IOException { retryRPC ( ( RpcCallable < Void > ) ( ) -> { CommitBlockPRequest request = CommitBlockPRequest . newBuilder ( ) . setWorkerId ( workerId ) . setUsedBytesOnTier ( usedBytesOnTier ) . setTierAlias ( tierAlias ) . setBlockId ( blockId ) . setLength ( length ) . build ( ) ; mClient . commitBlock ( request ) ; return null ; } ) ; }
Commits a block on a worker .
12,633
public void commitBlockInUfs ( final long blockId , final long length ) throws IOException { retryRPC ( ( RpcCallable < Void > ) ( ) -> { CommitBlockInUfsPRequest request = CommitBlockInUfsPRequest . newBuilder ( ) . setBlockId ( blockId ) . setLength ( length ) . build ( ) ; mClient . commitBlockInUfs ( request ) ; return null ; } ) ; }
Commits a block in Ufs .
12,634
public long getId ( final WorkerNetAddress address ) throws IOException { return retryRPC ( ( RpcCallable < Long > ) ( ) -> { GetWorkerIdPRequest request = GetWorkerIdPRequest . newBuilder ( ) . setWorkerNetAddress ( GrpcUtils . toProto ( address ) ) . build ( ) ; return mClient . getWorkerId ( request ) . getWorkerId ( ) ; } ) ; }
Returns a worker id for a workers net address .
12,635
public void stop ( ) { mSessionCleaner . stop ( ) ; try { CommonUtils . waitFor ( "block worker executor shutdown" , ( ) -> { getExecutorService ( ) . shutdownNow ( ) ; try { return getExecutorService ( ) . awaitTermination ( 100 , TimeUnit . MILLISECONDS ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; throw new RuntimeException ( e ) ; } } ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; throw new RuntimeException ( e ) ; } catch ( TimeoutException e ) { throw new RuntimeException ( e ) ; } mBlockMasterClientPool . release ( mBlockMasterClient ) ; try { mBlockMasterClientPool . close ( ) ; } catch ( IOException e ) { LOG . warn ( "Failed to close the block master client pool with error {}." , e . getMessage ( ) ) ; } mFileSystemMasterClient . close ( ) ; }
Stops the block worker . This method should only be called to terminate the worker .
12,636
public long skip ( long n ) throws IOException { if ( mInputStream . available ( ) >= n ) { return mInputStream . skip ( n ) ; } mInputStream . close ( ) ; mPos += n ; try { mObject = mClient . getObject ( mBucketName , mKey , null , null , null , null , mPos , null ) ; mInputStream = new BufferedInputStream ( mObject . getDataInputStream ( ) ) ; } catch ( ServiceException e ) { throw new IOException ( e ) ; } return n ; }
This method leverages the ability to open a stream from GCS from a given offset . When the underlying stream has fewer bytes buffered than the skip request the stream is closed and a new stream is opened starting at the requested offset .
12,637
public static short translateBucketAcl ( GSAccessControlList acl , String userId ) { short mode = ( short ) 0 ; for ( GrantAndPermission gp : acl . getGrantAndPermissions ( ) ) { Permission perm = gp . getPermission ( ) ; GranteeInterface grantee = gp . getGrantee ( ) ; if ( perm . equals ( Permission . PERMISSION_READ ) ) { if ( isUserIdInGrantee ( grantee , userId ) ) { mode |= ( short ) 0500 ; } } else if ( perm . equals ( Permission . PERMISSION_WRITE ) ) { if ( isUserIdInGrantee ( grantee , userId ) ) { mode |= ( short ) 0200 ; } } else if ( perm . equals ( Permission . PERMISSION_FULL_CONTROL ) ) { if ( isUserIdInGrantee ( grantee , userId ) ) { mode |= ( short ) 0700 ; } } } return mode ; }
Translates GCS bucket owner ACL to Alluxio owner mode .
12,638
public static SyncPointInfo fromProto ( alluxio . grpc . SyncPointInfo syncPointInfo ) { SyncStatus syncStatus ; switch ( syncPointInfo . getSyncStatus ( ) ) { case Not_Initially_Synced : syncStatus = SyncStatus . NOT_INITIALLY_SYNCED ; break ; case Syncing : syncStatus = SyncStatus . SYNCING ; break ; case Initially_Synced : syncStatus = SyncStatus . INITIALLY_SYNCED ; break ; default : syncStatus = SyncStatus . NOT_INITIALLY_SYNCED ; } return new SyncPointInfo ( new AlluxioURI ( syncPointInfo . getSyncPointUri ( ) ) , syncStatus ) ; }
Generate sync point information from the proto representation .
12,639
public GrpcChannelBuilder setChannelType ( Class < ? extends io . netty . channel . Channel > channelType ) { mChannelKey . setChannelType ( channelType ) ; return this ; }
Sets the channel type .
12,640
@ Path ( ServiceConstants . GET_STATUS ) @ JacksonFeatures ( serializationEnable = { SerializationFeature . INDENT_OUTPUT } ) public Response getStatus ( @ QueryParam ( "jobId" ) final long jobId ) { return RestUtils . call ( new RestUtils . RestCallable < JobInfo > ( ) { public JobInfo call ( ) throws Exception { return mJobMaster . getStatus ( jobId ) ; } } , ServerConfiguration . global ( ) ) ; }
Gets the job status .
12,641
@ Path ( ServiceConstants . LIST ) public Response list ( ) { return RestUtils . call ( new RestUtils . RestCallable < List < Long > > ( ) { public List < Long > call ( ) throws Exception { return mJobMaster . list ( ) ; } } , ServerConfiguration . global ( ) ) ; }
Lists all the jobs in the history .
12,642
@ Path ( ServiceConstants . RUN ) @ Consumes ( MediaType . APPLICATION_JSON ) public Response run ( final JobConfig jobConfig ) { return RestUtils . call ( new RestUtils . RestCallable < Long > ( ) { public Long call ( ) throws Exception { return mJobMaster . run ( jobConfig ) ; } } , ServerConfiguration . global ( ) ) ; }
Runs a job .
12,643
public void add ( Supplier < JournalContext > ctx , String path , Map < PropertyKey , String > properties ) { try ( LockResource r = new LockResource ( mLock . writeLock ( ) ) ) { if ( ! properties . isEmpty ( ) ) { Map < String , String > newProperties = mState . getProperties ( path ) ; properties . forEach ( ( key , value ) -> newProperties . put ( key . getName ( ) , value ) ) ; mState . applyAndJournal ( ctx , PathPropertiesEntry . newBuilder ( ) . setPath ( path ) . putAllProperties ( newProperties ) . build ( ) ) ; } } }
Adds properties for path .
12,644
public void remove ( Supplier < JournalContext > ctx , String path , Set < String > keys ) { try ( LockResource r = new LockResource ( mLock . writeLock ( ) ) ) { Map < String , String > properties = mState . getProperties ( path ) ; if ( ! properties . isEmpty ( ) ) { keys . forEach ( key -> properties . remove ( key ) ) ; if ( properties . isEmpty ( ) ) { mState . applyAndJournal ( ctx , RemovePathPropertiesEntry . newBuilder ( ) . setPath ( path ) . build ( ) ) ; } else { mState . applyAndJournal ( ctx , PathPropertiesEntry . newBuilder ( ) . setPath ( path ) . putAllProperties ( properties ) . build ( ) ) ; } } } }
Removes the specified set of keys from the properties for path .
12,645
public void removeAll ( Supplier < JournalContext > ctx , String path ) { try ( LockResource r = new LockResource ( mLock . writeLock ( ) ) ) { Map < String , String > properties = mState . getProperties ( path ) ; if ( ! properties . isEmpty ( ) ) { mState . applyAndJournal ( ctx , RemovePathPropertiesEntry . newBuilder ( ) . setPath ( path ) . build ( ) ) ; } } }
Removes all properties for path .
12,646
private TtlBucket getBucketContaining ( InodeView inode ) { if ( inode . getTtl ( ) == Constants . NO_TTL ) { return null ; } long ttlEndTimeMs = inode . getCreationTimeMs ( ) + inode . getTtl ( ) ; TtlBucket bucket = mBucketList . floor ( new TtlBucket ( ttlEndTimeMs ) ) ; if ( bucket == null || bucket . getTtlIntervalEndTimeMs ( ) < ttlEndTimeMs || ( bucket . getTtlIntervalEndTimeMs ( ) == ttlEndTimeMs && TtlBucket . getTtlIntervalMs ( ) != 0 ) ) { return null ; } return bucket ; }
Gets the bucket in the list that contains the inode .
12,647
public void remove ( InodeView inode ) { TtlBucket bucket = getBucketContaining ( inode ) ; if ( bucket != null ) { bucket . removeInode ( inode ) ; } }
Removes a inode from the bucket containing it if the inode is in one of the buckets otherwise do nothing .
12,648
public void recover ( ) { try { boolean checkpointExists = mUfs . isFile ( mCheckpoint . toString ( ) ) ; boolean backupCheckpointExists = mUfs . isFile ( mBackupCheckpoint . toString ( ) ) ; boolean tempBackupCheckpointExists = mUfs . isFile ( mTempBackupCheckpoint . toString ( ) ) ; Preconditions . checkState ( ! ( checkpointExists && backupCheckpointExists && tempBackupCheckpointExists ) , "checkpoint, temp backup checkpoint, and backup checkpoint should never all exist " ) ; if ( tempBackupCheckpointExists ) { UnderFileSystemUtils . deleteFileIfExists ( mUfs , mCheckpoint . toString ( ) ) ; mUfs . renameFile ( mTempBackupCheckpoint . toString ( ) , mCheckpoint . toString ( ) ) ; } if ( backupCheckpointExists ) { if ( checkpointExists ) { mWriter . deleteCompletedLogs ( ) ; mUfs . deleteFile ( mBackupCheckpoint . toString ( ) ) ; } else { mUfs . renameFile ( mBackupCheckpoint . toString ( ) , mCheckpoint . toString ( ) ) ; } } } catch ( IOException e ) { throw new RuntimeException ( e ) ; } }
Recovers the checkpoint in case the master crashed while updating it .
12,649
public void update ( URI location ) { try { if ( mUfs . isFile ( mCheckpoint . toString ( ) ) ) { UnderFileSystemUtils . deleteFileIfExists ( mUfs , mTempBackupCheckpoint . toString ( ) ) ; UnderFileSystemUtils . deleteFileIfExists ( mUfs , mBackupCheckpoint . toString ( ) ) ; mUfs . renameFile ( mCheckpoint . toString ( ) , mTempBackupCheckpoint . toString ( ) ) ; mUfs . renameFile ( mTempBackupCheckpoint . toString ( ) , mBackupCheckpoint . toString ( ) ) ; LOG . info ( "Backed up the checkpoint file to {}" , mBackupCheckpoint . toString ( ) ) ; } mUfs . renameFile ( location . getPath ( ) , mCheckpoint . toString ( ) ) ; LOG . info ( "Renamed the checkpoint file from {} to {}" , location , mCheckpoint . toString ( ) ) ; mWriter . deleteCompletedLogs ( ) ; UnderFileSystemUtils . deleteFileIfExists ( mUfs , mBackupCheckpoint . toString ( ) ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } }
Updates the checkpoint to the specified URI .
12,650
public static String getMetricNameWithUserTag ( String metricName , String userName ) { UserMetricKey k = new UserMetricKey ( metricName , userName ) ; String result = CACHED_METRICS . get ( k ) ; if ( result != null ) { return result ; } return CACHED_METRICS . computeIfAbsent ( k , key -> metricName + "." + CommonMetrics . TAG_USER + TAG_SEPARATOR + userName ) ; }
Gets a metric name with a specific user tag .
12,651
public static Metric from ( String fullName , double value ) { String [ ] pieces = fullName . split ( "\\." ) ; Preconditions . checkArgument ( pieces . length > 1 , "Incorrect metrics name: %s." , fullName ) ; String hostname = null ; String id = null ; String name = null ; int tagStartIdx = 0 ; if ( pieces [ 0 ] . equals ( MetricsSystem . InstanceType . MASTER . toString ( ) ) || pieces [ 0 ] . equals ( MetricsSystem . CLUSTER . toString ( ) ) ) { name = pieces [ 1 ] ; tagStartIdx = 2 ; } else { if ( pieces [ 1 ] . contains ( ID_SEPARATOR ) ) { String [ ] ids = pieces [ 1 ] . split ( ID_SEPARATOR ) ; hostname = ids [ 0 ] ; id = ids [ 1 ] ; } else { hostname = pieces [ 1 ] ; } name = pieces [ 2 ] ; tagStartIdx = 3 ; } MetricsSystem . InstanceType instance = MetricsSystem . InstanceType . fromString ( pieces [ 0 ] ) ; Metric metric = new Metric ( instance , hostname , id , name , value ) ; for ( int i = tagStartIdx ; i < pieces . length ; i ++ ) { String tagStr = pieces [ i ] ; if ( ! tagStr . contains ( TAG_SEPARATOR ) ) { continue ; } int tagSeparatorIdx = tagStr . indexOf ( TAG_SEPARATOR ) ; metric . addTag ( tagStr . substring ( 0 , tagSeparatorIdx ) , tagStr . substring ( tagSeparatorIdx + 1 ) ) ; } return metric ; }
Creates the metric from the full name and the value .
12,652
public static Metric fromProto ( alluxio . grpc . Metric metric ) { Metric created = new Metric ( MetricsSystem . InstanceType . fromString ( metric . getInstance ( ) ) , metric . getHostname ( ) , metric . hasInstanceId ( ) ? metric . getInstanceId ( ) : null , metric . getName ( ) , metric . getValue ( ) ) ; for ( Entry < String , String > entry : metric . getTagsMap ( ) . entrySet ( ) ) { created . addTag ( entry . getKey ( ) , entry . getValue ( ) ) ; } return created ; }
Constructs the metric object from the proto format .
12,653
private boolean delete ( String path , boolean recursive ) throws IOException { IOException te = null ; FileSystem hdfs = getFs ( ) ; RetryPolicy retryPolicy = new CountingRetry ( MAX_TRY ) ; while ( retryPolicy . attempt ( ) ) { try { return hdfs . delete ( new Path ( path ) , recursive ) ; } catch ( IOException e ) { LOG . warn ( "Attempt count {} : {}" , retryPolicy . getAttemptCount ( ) , e . getMessage ( ) ) ; te = e ; } } throw te ; }
Delete a file or directory at path .
12,654
private boolean rename ( String src , String dst ) throws IOException { IOException te = null ; FileSystem hdfs = getFs ( ) ; RetryPolicy retryPolicy = new CountingRetry ( MAX_TRY ) ; while ( retryPolicy . attempt ( ) ) { try { return hdfs . rename ( new Path ( src ) , new Path ( dst ) ) ; } catch ( IOException e ) { LOG . warn ( "{} try to rename {} to {} : {}" , retryPolicy . getAttemptCount ( ) , src , dst , e . getMessage ( ) ) ; te = e ; } } throw te ; }
Rename a file or folder to a file or folder .
12,655
protected void populateCounterValues ( Map < String , Metric > operations , Map < String , Counter > rpcInvocations , HttpServletRequest request ) { for ( Map . Entry < String , Metric > entry : operations . entrySet ( ) ) { if ( entry . getValue ( ) instanceof Gauge ) { request . setAttribute ( entry . getKey ( ) , ( ( Gauge < ? > ) entry . getValue ( ) ) . getValue ( ) ) ; } else if ( entry . getValue ( ) instanceof Counter ) { request . setAttribute ( entry . getKey ( ) , ( ( Counter ) entry . getValue ( ) ) . getCount ( ) ) ; } } for ( Map . Entry < String , Counter > entry : rpcInvocations . entrySet ( ) ) { request . setAttribute ( entry . getKey ( ) , entry . getValue ( ) . getCount ( ) ) ; } }
Populates operation metrics for displaying in the UI .
12,656
public static synchronized void cleanDirectBuffer ( ByteBuffer buffer ) { Preconditions . checkNotNull ( buffer , "buffer" ) ; Preconditions . checkArgument ( buffer . isDirect ( ) , "buffer isn't a DirectByteBuffer" ) ; try { if ( sByteBufferCleanerMethod == null ) { sByteBufferCleanerMethod = buffer . getClass ( ) . getMethod ( "cleaner" ) ; sByteBufferCleanerMethod . setAccessible ( true ) ; } final Object cleaner = sByteBufferCleanerMethod . invoke ( buffer ) ; if ( cleaner == null ) { if ( buffer . capacity ( ) > 0 ) { LOG . warn ( "Failed to get cleaner for ByteBuffer: {}" , buffer . getClass ( ) . getName ( ) ) ; } return ; } if ( sCleanerCleanMethod == null ) { sCleanerCleanMethod = cleaner . getClass ( ) . getMethod ( "clean" ) ; } sCleanerCleanMethod . invoke ( cleaner ) ; } catch ( Exception e ) { LOG . warn ( "Failed to unmap direct ByteBuffer: {}, error message: {}" , buffer . getClass ( ) . getName ( ) , e . getMessage ( ) ) ; } finally { buffer = null ; } }
Forces to unmap a direct buffer if this buffer is no longer used . After calling this method this direct buffer should be discarded . This is unsafe operation and currently a work - around to avoid huge memory occupation caused by memory map .
12,657
public static byte [ ] getIncreasingByteArray ( int start , int len ) { byte [ ] ret = new byte [ len ] ; for ( int k = 0 ; k < len ; k ++ ) { ret [ k ] = ( byte ) ( k + start ) ; } return ret ; }
Gets an increasing sequence of bytes starting with the given value .
12,658
public static boolean equalConstantByteArray ( byte value , int len , byte [ ] arr ) { if ( arr == null || arr . length != len ) { return false ; } for ( int k = 0 ; k < len ; k ++ ) { if ( arr [ k ] != value ) { return false ; } } return true ; }
Checks if the given byte array starts with a constant sequence of bytes of the given value and length .
12,659
public static boolean equalIncreasingByteArray ( int start , int len , byte [ ] arr ) { if ( arr == null || arr . length != len ) { return false ; } for ( int k = 0 ; k < len ; k ++ ) { if ( arr [ k ] != ( byte ) ( start + k ) ) { return false ; } } return true ; }
Checks if the given byte array starts with an increasing sequence of bytes of the given length starting from the given value . The array length must be equal to the length checked .
12,660
public static void writeBufferToFile ( String path , byte [ ] buffer ) throws IOException { try ( FileOutputStream os = new FileOutputStream ( path ) ) { os . write ( buffer ) ; } }
Writes buffer to the given file path .
12,661
public synchronized void cancel ( ) { for ( int taskId : mJobInfo . getTaskIdList ( ) ) { mCommandManager . submitCancelTaskCommand ( mJobInfo . getId ( ) , taskId , mTaskIdToWorkerInfo . get ( taskId ) . getId ( ) ) ; } }
Cancels the current job .
12,662
public synchronized void updateTasks ( List < TaskInfo > taskInfoList ) { for ( TaskInfo taskInfo : taskInfoList ) { mJobInfo . setTaskInfo ( taskInfo . getTaskId ( ) , taskInfo ) ; } updateStatus ( ) ; }
Updates internal status with given tasks .
12,663
public synchronized void setJobAsFailed ( String errorMessage ) { mJobInfo . setStatus ( Status . FAILED ) ; mJobInfo . setErrorMessage ( errorMessage ) ; }
Sets the job as failed with given error message .
12,664
public synchronized void failTasksForWorker ( long workerId ) { Integer taskId = mWorkerIdToTaskId . get ( workerId ) ; if ( taskId == null ) { return ; } TaskInfo taskInfo = mJobInfo . getTaskInfo ( taskId ) ; if ( taskInfo . getStatus ( ) . isFinished ( ) ) { return ; } taskInfo . setStatus ( Status . FAILED ) ; taskInfo . setErrorMessage ( "Job worker was lost before the task could complete" ) ; updateStatus ( ) ; }
Fails any incomplete tasks being run on the specified worker .
12,665
private void updateStatus ( ) { int completed = 0 ; List < TaskInfo > taskInfoList = mJobInfo . getTaskInfoList ( ) ; for ( TaskInfo info : taskInfoList ) { switch ( info . getStatus ( ) ) { case FAILED : mJobInfo . setStatus ( Status . FAILED ) ; if ( mJobInfo . getErrorMessage ( ) . isEmpty ( ) ) { mJobInfo . setErrorMessage ( "Task execution failed: " + info . getErrorMessage ( ) ) ; } return ; case CANCELED : if ( mJobInfo . getStatus ( ) != Status . FAILED ) { mJobInfo . setStatus ( Status . CANCELED ) ; } return ; case RUNNING : if ( mJobInfo . getStatus ( ) != Status . FAILED && mJobInfo . getStatus ( ) != Status . CANCELED ) { mJobInfo . setStatus ( Status . RUNNING ) ; } break ; case COMPLETED : completed ++ ; break ; case CREATED : break ; default : throw new IllegalArgumentException ( "Unsupported status " + info . getStatus ( ) ) ; } } if ( completed == taskInfoList . size ( ) ) { if ( mJobInfo . getStatus ( ) == Status . COMPLETED ) { return ; } try { mJobInfo . setStatus ( Status . COMPLETED ) ; mJobInfo . setResult ( join ( taskInfoList ) ) ; } catch ( Exception e ) { mJobInfo . setStatus ( Status . FAILED ) ; mJobInfo . setErrorMessage ( e . getMessage ( ) ) ; } } }
Updates the status of the job . When all the tasks are completed run the join method in the definition .
12,666
private String join ( List < TaskInfo > taskInfoList ) throws Exception { JobDefinition < JobConfig , Serializable , Serializable > definition = JobDefinitionRegistry . INSTANCE . getJobDefinition ( mJobInfo . getJobConfig ( ) ) ; Map < WorkerInfo , Serializable > taskResults = Maps . newHashMap ( ) ; for ( TaskInfo taskInfo : taskInfoList ) { taskResults . put ( mTaskIdToWorkerInfo . get ( taskInfo . getTaskId ( ) ) , taskInfo . getResult ( ) ) ; } return definition . join ( mJobInfo . getJobConfig ( ) , taskResults ) ; }
Joins the task results and produces a final result .
12,667
private void openStream ( ) throws IOException { if ( mClosed ) { throw new IOException ( "Stream closed" ) ; } if ( mMultiRangeChunkSize <= 0 ) { throw new IOException ( ExceptionMessage . BLOCK_SIZE_INVALID . getMessage ( mMultiRangeChunkSize ) ) ; } if ( mStream != null ) { return ; } final long endPos = mPos + mMultiRangeChunkSize - ( mPos % mMultiRangeChunkSize ) ; mEndPos = endPos ; mStream = createStream ( mPos , endPos ) ; }
Opens a new stream at mPos if the wrapped stream mStream is null .
12,668
public static UfsJournalSnapshot getSnapshot ( UfsJournal journal ) throws IOException { List < UfsJournalFile > checkpoints = new ArrayList < > ( ) ; UfsStatus [ ] statuses = journal . getUfs ( ) . listStatus ( journal . getCheckpointDir ( ) . toString ( ) ) ; if ( statuses != null ) { for ( UfsStatus status : statuses ) { UfsJournalFile file = UfsJournalFile . decodeCheckpointFile ( journal , status . getName ( ) ) ; if ( file != null ) { checkpoints . add ( file ) ; } } Collections . sort ( checkpoints ) ; } List < UfsJournalFile > logs = new ArrayList < > ( ) ; statuses = journal . getUfs ( ) . listStatus ( journal . getLogDir ( ) . toString ( ) ) ; if ( statuses != null ) { for ( UfsStatus status : statuses ) { UfsJournalFile file = UfsJournalFile . decodeLogFile ( journal , status . getName ( ) ) ; if ( file != null ) { logs . add ( file ) ; } } Collections . sort ( logs ) ; } List < UfsJournalFile > tmpCheckpoints = new ArrayList < > ( ) ; statuses = journal . getUfs ( ) . listStatus ( journal . getTmpDir ( ) . toString ( ) ) ; if ( statuses != null ) { for ( UfsStatus status : statuses ) { tmpCheckpoints . add ( UfsJournalFile . decodeTemporaryCheckpointFile ( journal , status . getName ( ) ) ) ; } } return new UfsJournalSnapshot ( checkpoints , logs , tmpCheckpoints ) ; }
Creates a snapshot of the journal .
12,669
static long getNextLogSequenceNumberToCheckpoint ( UfsJournal journal ) throws IOException { List < UfsJournalFile > checkpoints = new ArrayList < > ( ) ; UfsStatus [ ] statuses = journal . getUfs ( ) . listStatus ( journal . getCheckpointDir ( ) . toString ( ) ) ; if ( statuses != null ) { for ( UfsStatus status : statuses ) { UfsJournalFile file = UfsJournalFile . decodeCheckpointFile ( journal , status . getName ( ) ) ; if ( file != null ) { checkpoints . add ( file ) ; } } Collections . sort ( checkpoints ) ; } if ( checkpoints . isEmpty ( ) ) { return 0 ; } return checkpoints . get ( checkpoints . size ( ) - 1 ) . getEnd ( ) ; }
Gets the first journal log sequence number that is not yet checkpointed .
12,670
public static String cleanPath ( String path ) throws InvalidPathException { validatePath ( path ) ; return FilenameUtils . separatorsToUnix ( FilenameUtils . normalizeNoEndSeparator ( path ) ) ; }
Checks and normalizes the given path .
12,671
public static String getParent ( String path ) throws InvalidPathException { String cleanedPath = cleanPath ( path ) ; String name = FilenameUtils . getName ( cleanedPath ) ; String parent = cleanedPath . substring ( 0 , cleanedPath . length ( ) - name . length ( ) - 1 ) ; if ( parent . isEmpty ( ) ) { return AlluxioURI . SEPARATOR ; } return parent ; }
Gets the parent of the file at a path .
12,672
public static String [ ] getPathComponents ( String path ) throws InvalidPathException { path = cleanPath ( path ) ; if ( isRoot ( path ) ) { return new String [ ] { "" } ; } return path . split ( AlluxioURI . SEPARATOR ) ; }
Gets the path components of the given path . The first component will be an empty string .
12,673
public static String subtractPaths ( String path , String prefix ) throws InvalidPathException { String cleanedPath = cleanPath ( path ) ; String cleanedPrefix = cleanPath ( prefix ) ; if ( cleanedPath . equals ( cleanedPrefix ) ) { return "" ; } if ( ! hasPrefix ( cleanedPath , cleanedPrefix ) ) { throw new RuntimeException ( String . format ( "Cannot subtract %s from %s because it is not a prefix" , prefix , path ) ) ; } int prefixLen = cleanedPrefix . length ( ) ; int charsToDrop = PathUtils . isRoot ( cleanedPrefix ) ? prefixLen : prefixLen + 1 ; return cleanedPath . substring ( charsToDrop , cleanedPath . length ( ) ) ; }
Removes the prefix from the path yielding a relative path from the second path to the first .
12,674
public static void validatePath ( String path ) throws InvalidPathException { boolean invalid = ( path == null || path . isEmpty ( ) ) ; if ( ! OSUtils . isWindows ( ) ) { invalid = ( invalid || ! path . startsWith ( AlluxioURI . SEPARATOR ) ) ; } if ( invalid ) { throw new InvalidPathException ( ExceptionMessage . PATH_INVALID . getMessage ( path ) ) ; } }
Checks if the given path is properly formed .
12,675
public static String temporaryFileName ( long nonce , String path ) { return path + String . format ( TEMPORARY_SUFFIX_FORMAT , nonce ) ; }
Generates a deterministic temporary file name for the a path and a file id and a nonce .
12,676
public static String uniqPath ( ) { StackTraceElement caller = new Throwable ( ) . getStackTrace ( ) [ 1 ] ; long time = System . nanoTime ( ) ; return "/" + caller . getClassName ( ) + "/" + caller . getMethodName ( ) + "/" + time ; }
Creates a unique path based off the caller .
12,677
public static String normalizePath ( String path , String separator ) { return path . endsWith ( separator ) ? path : path + separator ; }
Adds a trailing separator if it does not exist in path .
12,678
public synchronized void start ( ) throws IOException { Preconditions . checkState ( mProcess == null , "Worker is already running" ) ; mProcess = new ExternalProcess ( mProperties , LimitedLifeWorkerProcess . class , new File ( mLogsDir , "worker.out" ) ) ; mProcess . start ( ) ; }
Launches the worker process .
12,679
public void add ( long delay , T value ) { mQueue . add ( new DelayNode < > ( value , delay + mPastTime ) ) ; }
Adds a new node into the queue .
12,680
private void maybeRecoverFromUfsFailures ( ) throws IOException , JournalClosedException { if ( ! mNeedsRecovery ) { return ; } long lastPersistSeq = recoverLastPersistedJournalEntry ( ) ; createNewLogFile ( lastPersistSeq + 1 ) ; if ( ! mEntriesToFlush . isEmpty ( ) ) { JournalEntry firstEntryToFlush = mEntriesToFlush . peek ( ) ; if ( firstEntryToFlush . getSequenceNumber ( ) > lastPersistSeq + 1 ) { throw new RuntimeException ( ExceptionMessage . JOURNAL_ENTRY_MISSING . getMessageWithUrl ( RuntimeConstants . ALLUXIO_DEBUG_DOCS_URL , lastPersistSeq + 1 , firstEntryToFlush . getSequenceNumber ( ) ) ) ; } long retryEndSeq = lastPersistSeq ; LOG . info ( "Retry writing unwritten journal entries from seq {}" , lastPersistSeq + 1 ) ; for ( JournalEntry entry : mEntriesToFlush ) { if ( entry . getSequenceNumber ( ) > lastPersistSeq ) { try { entry . toBuilder ( ) . build ( ) . writeDelimitedTo ( mJournalOutputStream ) ; retryEndSeq = entry . getSequenceNumber ( ) ; } catch ( IOJournalClosedException e ) { throw e . toJournalClosedException ( ) ; } catch ( IOException e ) { throw new IOException ( ExceptionMessage . JOURNAL_WRITE_FAILURE . getMessageWithUrl ( RuntimeConstants . ALLUXIO_DEBUG_DOCS_URL , mJournalOutputStream . currentLog ( ) , e . getMessage ( ) ) , e ) ; } } } LOG . info ( "Finished writing unwritten journal entries from {} to {}." , lastPersistSeq + 1 , retryEndSeq ) ; } mNeedsRecovery = false ; }
Core logic of UFS journal recovery from UFS failures .
12,681
private long recoverLastPersistedJournalEntry ( ) throws IOException { UfsJournalSnapshot snapshot = UfsJournalSnapshot . getSnapshot ( mJournal ) ; long lastPersistSeq = - 1 ; UfsJournalFile currentLog = snapshot . getCurrentLog ( mJournal ) ; if ( currentLog != null ) { LOG . info ( "Recovering from previous UFS journal write failure." + " Scanning for the last persisted journal entry." ) ; try ( JournalEntryStreamReader reader = new JournalEntryStreamReader ( mUfs . open ( currentLog . getLocation ( ) . toString ( ) , OpenOptions . defaults ( ) . setRecoverFailedOpen ( true ) ) ) ) { JournalEntry entry ; while ( ( entry = reader . readEntry ( ) ) != null ) { if ( entry . getSequenceNumber ( ) > lastPersistSeq ) { lastPersistSeq = entry . getSequenceNumber ( ) ; } } } catch ( IOException e ) { throw e ; } completeLog ( currentLog , lastPersistSeq + 1 ) ; } if ( lastPersistSeq < 0 ) { snapshot = UfsJournalSnapshot . getSnapshot ( mJournal ) ; List < UfsJournalFile > journalFiles = snapshot . getLogs ( ) ; if ( ! journalFiles . isEmpty ( ) ) { UfsJournalFile journal = journalFiles . get ( journalFiles . size ( ) - 1 ) ; lastPersistSeq = journal . getEnd ( ) - 1 ; LOG . info ( "Found last persisted journal entry with seq {} in {}." , lastPersistSeq , journal . getLocation ( ) . toString ( ) ) ; } } return lastPersistSeq ; }
Examine the UFS to determine the most recent journal entry and return its sequence number .
12,682
private void maybeRotateLog ( ) throws IOException { if ( ! mRotateLogForNextWrite ) { return ; } if ( mJournalOutputStream != null ) { mJournalOutputStream . close ( ) ; mJournalOutputStream = null ; } createNewLogFile ( mNextSequenceNumber ) ; mRotateLogForNextWrite = false ; }
Closes the current journal output stream and creates a new one . The implementation must be idempotent so that it can work when retrying during failures .
12,683
private void completeLog ( UfsJournalFile currentLog , long nextSequenceNumber ) throws IOException { String current = currentLog . getLocation ( ) . toString ( ) ; if ( nextSequenceNumber <= currentLog . getStart ( ) ) { LOG . info ( "No journal entry found in current journal file {}. Deleting it" , current ) ; if ( ! mUfs . deleteFile ( current ) ) { LOG . warn ( "Failed to delete empty journal file {}" , current ) ; } return ; } LOG . info ( "Completing log {} with next sequence number {}" , current , nextSequenceNumber ) ; String completed = UfsJournalFile . encodeLogFileLocation ( mJournal , currentLog . getStart ( ) , nextSequenceNumber ) . toString ( ) ; if ( ! mUfs . renameFile ( current , completed ) ) { if ( ! mUfs . exists ( completed ) ) { throw new IOException ( String . format ( "Failed to rename journal log from %s to %s" , current , completed ) ) ; } if ( mUfs . exists ( current ) ) { LOG . info ( "Deleting current log {}" , current ) ; if ( ! mUfs . deleteFile ( current ) ) { LOG . warn ( "Failed to delete current log file {}" , current ) ; } } } }
Completes the given log .
12,684
public static ReadableBuffer getBufferFromStream ( InputStream stream ) { if ( ! sZeroCopyReceiveSupported || ! stream . getClass ( ) . equals ( sReadableBufferField . getDeclaringClass ( ) ) ) { return null ; } try { return ( ReadableBuffer ) sReadableBufferField . get ( stream ) ; } catch ( Exception e ) { LOG . warn ( "Failed to get data buffer from stream." , e ) ; return null ; } }
Gets a buffer directly from a gRPC input stream .
12,685
public static ByteBuf getByteBufFromReadableBuffer ( ReadableBuffer buffer ) { if ( ! sZeroCopyReceiveSupported ) { return null ; } try { if ( buffer instanceof CompositeReadableBuffer ) { Queue < ReadableBuffer > buffers = ( Queue < ReadableBuffer > ) sCompositeBuffers . get ( buffer ) ; if ( buffers . size ( ) == 1 ) { return getByteBufFromReadableBuffer ( buffers . peek ( ) ) ; } else { CompositeByteBuf buf = PooledByteBufAllocator . DEFAULT . compositeBuffer ( ) ; for ( ReadableBuffer readableBuffer : buffers ) { ByteBuf subBuffer = getByteBufFromReadableBuffer ( readableBuffer ) ; if ( subBuffer == null ) { return null ; } buf . addComponent ( true , subBuffer ) ; } return buf ; } } else if ( buffer . getClass ( ) . equals ( sReadableByteBuf . getDeclaringClass ( ) ) ) { return ( ByteBuf ) sReadableByteBuf . get ( buffer ) ; } } catch ( Exception e ) { LOG . warn ( "Failed to get data buffer from stream: {}." , e . getMessage ( ) ) ; return null ; } return null ; }
Gets a Netty buffer directly from a gRPC ReadableBuffer .
12,686
public static boolean addBuffersToStream ( ByteBuf [ ] buffers , OutputStream stream ) { if ( ! sZeroCopySendSupported || ! stream . getClass ( ) . equals ( sBufferList . getDeclaringClass ( ) ) ) { return false ; } try { if ( sCurrent . get ( stream ) != null ) { return false ; } for ( ByteBuf buffer : buffers ) { Object nettyBuffer = sNettyWritableBufferConstructor . newInstance ( buffer ) ; List list = ( List ) sBufferList . get ( stream ) ; list . add ( nettyBuffer ) ; buffer . retain ( ) ; sCurrent . set ( stream , nettyBuffer ) ; } return true ; } catch ( Exception e ) { LOG . warn ( "Failed to add data buffer to stream: {}." , e . getMessage ( ) ) ; return false ; } }
Add the given buffers directly to the gRPC output stream .
12,687
public static ServerServiceDefinition overrideMethods ( final ServerServiceDefinition service , final Map < MethodDescriptor , MethodDescriptor > marshallers ) { List < ServerMethodDefinition < ? , ? > > newMethods = new ArrayList < ServerMethodDefinition < ? , ? > > ( ) ; List < MethodDescriptor < ? , ? > > newDescriptors = new ArrayList < MethodDescriptor < ? , ? > > ( ) ; for ( final ServerMethodDefinition < ? , ? > definition : service . getMethods ( ) ) { ServerMethodDefinition < ? , ? > newMethod = interceptMethod ( definition , marshallers ) ; newDescriptors . add ( newMethod . getMethodDescriptor ( ) ) ; newMethods . add ( newMethod ) ; } final ServerServiceDefinition . Builder serviceBuilder = ServerServiceDefinition . builder ( new ServiceDescriptor ( service . getServiceDescriptor ( ) . getName ( ) , newDescriptors ) ) ; for ( ServerMethodDefinition < ? , ? > definition : newMethods ) { serviceBuilder . addMethod ( definition ) ; } return serviceBuilder . build ( ) ; }
Creates a service definition that uses custom marshallers .
12,688
@ SuppressWarnings ( "unchecked" ) static < T extends Throwable > void uncheckedThrow ( Throwable t ) throws T { if ( t != null ) throw ( T ) t ; else throw new Error ( "Unknown Exception" ) ; }
The sneaky part of sneaky throw relying on generics limitations to evade compiler complaints about rethrowing unchecked exceptions .
12,689
public static int getQueuedTaskCount ( ) { Thread t ; ForkJoinPool . WorkQueue q ; if ( ( t = Thread . currentThread ( ) ) instanceof ForkJoinWorkerThread ) q = ( ( ForkJoinWorkerThread ) t ) . workQueue ; else q = ForkJoinPool . commonSubmitterQueue ( ) ; return ( q == null ) ? 0 : q . queueSize ( ) ; }
Returns an estimate of the number of tasks that have been forked by the current worker thread but not yet executed . This value may be useful for heuristic decisions about whether to fork other tasks .
12,690
protected static ForkJoinTask < ? > peekNextLocalTask ( ) { Thread t ; ForkJoinPool . WorkQueue q ; if ( ( t = Thread . currentThread ( ) ) instanceof ForkJoinWorkerThread ) q = ( ( ForkJoinWorkerThread ) t ) . workQueue ; else q = ForkJoinPool . commonSubmitterQueue ( ) ; return ( q == null ) ? null : q . peek ( ) ; }
Returns but does not unschedule or execute a task queued by the current thread but not yet executed if one is immediately available . There is no guarantee that this task will actually be polled or executed next . Conversely this method may return null even if a task exists but cannot be accessed without contention with other threads . This method is designed primarily to support extensions and is unlikely to be useful otherwise .
12,691
protected static ForkJoinTask < ? > pollNextLocalTask ( ) { Thread t ; return ( ( t = Thread . currentThread ( ) ) instanceof ForkJoinWorkerThread ) ? ( ( ForkJoinWorkerThread ) t ) . workQueue . nextLocalTask ( ) : null ; }
Unschedules and returns without executing the next task queued by the current thread but not yet executed if the current thread is operating in a ForkJoinPool . This method is designed primarily to support extensions and is unlikely to be useful otherwise .
12,692
private Throwable getThrowableException ( ) { int h = System . identityHashCode ( this ) ; ExceptionNode e ; final ReentrantLock lock = exceptionTableLock ; lock . lock ( ) ; try { expungeStaleExceptions ( ) ; ExceptionNode [ ] t = exceptionTable ; e = t [ h & ( t . length - 1 ) ] ; while ( e != null && e . get ( ) != this ) e = e . next ; } finally { lock . unlock ( ) ; } Throwable ex ; if ( e == null || ( ex = e . ex ) == null ) return null ; if ( e . thrower != Thread . currentThread ( ) . getId ( ) ) { try { Constructor < ? > noArgCtor = null ; for ( Constructor < ? > c : ex . getClass ( ) . getConstructors ( ) ) { Class < ? > [ ] ps = c . getParameterTypes ( ) ; if ( ps . length == 0 ) noArgCtor = c ; else if ( ps . length == 1 && ps [ 0 ] == Throwable . class ) return ( Throwable ) c . newInstance ( ex ) ; } if ( noArgCtor != null ) { Throwable wx = ( Throwable ) noArgCtor . newInstance ( ) ; wx . initCause ( ex ) ; return wx ; } } catch ( Exception ignore ) { } } return ex ; }
Returns a rethrowable exception for this task if available . To provide accurate stack traces if the exception was not thrown by the current thread we try to create a new exception of the same type as the one thrown but with the recorded exception as its cause . If there is no such constructor we instead try to use a no - arg constructor followed by initCause to the same effect . If none of these apply or any fail due to other exceptions we return the recorded exception which is still correct although it may contain a misleading stack trace .
12,693
public final short setForkJoinTaskTag ( short newValue ) { for ( int s ; ; ) { if ( U . compareAndSwapInt ( this , STATUS , s = status , ( s & ~ SMASK ) | ( newValue & SMASK ) ) ) return ( short ) s ; } }
Atomically sets the tag value for this task and returns the old value .
12,694
public static InetSocketAddress getBindAddress ( ServiceType service , AlluxioConfiguration conf ) { int port = getPort ( service , conf ) ; assertValidPort ( port ) ; return new InetSocketAddress ( getBindHost ( service , conf ) , getPort ( service , conf ) ) ; }
Helper method to get the bind hostname for a given service .
12,695
public static String getClientHostName ( AlluxioConfiguration conf ) { if ( conf . isSet ( PropertyKey . USER_HOSTNAME ) ) { return conf . get ( PropertyKey . USER_HOSTNAME ) ; } return getLocalHostName ( ( int ) conf . getMs ( PropertyKey . NETWORK_HOST_RESOLUTION_TIMEOUT_MS ) ) ; }
Gets the local hostname to be used by the client . If this isn t configured a non - loopback local hostname will be looked up .
12,696
public static String getLocalNodeName ( AlluxioConfiguration conf ) { switch ( CommonUtils . PROCESS_TYPE . get ( ) ) { case JOB_MASTER : if ( conf . isSet ( PropertyKey . JOB_MASTER_HOSTNAME ) ) { return conf . get ( PropertyKey . JOB_MASTER_HOSTNAME ) ; } break ; case JOB_WORKER : if ( conf . isSet ( PropertyKey . JOB_WORKER_HOSTNAME ) ) { return conf . get ( PropertyKey . JOB_WORKER_HOSTNAME ) ; } break ; case CLIENT : if ( conf . isSet ( PropertyKey . USER_HOSTNAME ) ) { return conf . get ( PropertyKey . USER_HOSTNAME ) ; } break ; case MASTER : if ( conf . isSet ( PropertyKey . MASTER_HOSTNAME ) ) { return conf . get ( PropertyKey . MASTER_HOSTNAME ) ; } break ; case WORKER : if ( conf . isSet ( PropertyKey . WORKER_HOSTNAME ) ) { return conf . get ( PropertyKey . WORKER_HOSTNAME ) ; } break ; default : break ; } return getLocalHostName ( ( int ) conf . getMs ( PropertyKey . NETWORK_HOST_RESOLUTION_TIMEOUT_MS ) ) ; }
Gets a local node name from configuration if it is available falling back on localhost lookup .
12,697
public static synchronized String getLocalHostName ( int timeoutMs ) { if ( sLocalHost != null ) { return sLocalHost ; } try { sLocalHost = InetAddress . getByName ( getLocalIpAddress ( timeoutMs ) ) . getCanonicalHostName ( ) ; return sLocalHost ; } catch ( UnknownHostException e ) { throw new RuntimeException ( e ) ; } }
Gets a local host name for the host this JVM is running on .
12,698
public static synchronized String getLocalHostMetricName ( int timeoutMs ) { if ( sLocalHostMetricName != null ) { return sLocalHostMetricName ; } sLocalHostMetricName = getLocalHostName ( timeoutMs ) . replace ( '.' , '_' ) ; return sLocalHostMetricName ; }
Gets a local hostname for the host this JVM is running on with . replaced with _ for metrics usage .
12,699
public static synchronized String getLocalIpAddress ( int timeoutMs ) { if ( sLocalIP != null ) { return sLocalIP ; } try { InetAddress address = InetAddress . getLocalHost ( ) ; LOG . debug ( "address: {} isLoopbackAddress: {}, with host {} {}" , address , address . isLoopbackAddress ( ) , address . getHostAddress ( ) , address . getHostName ( ) ) ; if ( ! isValidAddress ( address , timeoutMs ) ) { Enumeration < NetworkInterface > networkInterfaces = NetworkInterface . getNetworkInterfaces ( ) ; if ( ! WINDOWS ) { List < NetworkInterface > netIFs = Collections . list ( networkInterfaces ) ; Collections . reverse ( netIFs ) ; networkInterfaces = Collections . enumeration ( netIFs ) ; } while ( networkInterfaces . hasMoreElements ( ) ) { NetworkInterface ni = networkInterfaces . nextElement ( ) ; Enumeration < InetAddress > addresses = ni . getInetAddresses ( ) ; while ( addresses . hasMoreElements ( ) ) { address = addresses . nextElement ( ) ; if ( isValidAddress ( address , timeoutMs ) ) { sLocalIP = address . getHostAddress ( ) ; return sLocalIP ; } } } LOG . warn ( "Your hostname, {} resolves to a loopback/non-reachable address: {}, " + "but we couldn't find any external IP address!" , InetAddress . getLocalHost ( ) . getHostName ( ) , address . getHostAddress ( ) ) ; } sLocalIP = address . getHostAddress ( ) ; return sLocalIP ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } }
Gets a local IP address for the host this JVM is running on .