idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
33,600
private void createImageValidation ( File imageFile ) throws IOException { synchronized ( imageValidatorLock ) { InjectionHandler . processEvent ( InjectionEvent . STANDBY_VALIDATE_CREATE ) ; if ( ! running ) { InjectionHandler . processEvent ( InjectionEvent . STANDBY_VALIDATE_CREATE_FAIL ) ; throw new IOException ( "Standby: standby is quiescing" ) ; } imageValidator = new ImageValidator ( imageFile ) ; imageValidator . start ( ) ; } }
Creates image validation thread .
33,601
private void interruptImageValidation ( ) throws IOException { synchronized ( imageValidatorLock ) { if ( imageValidator != null ) { imageValidator . interrupt ( ) ; try { imageValidator . join ( ) ; } catch ( InterruptedException e ) { throw new InterruptedIOException ( "Standby: received interruption" ) ; } } } }
Interrupts and joins ongoing image validation .
33,602
void initSecondary ( Configuration conf ) throws IOException { fsName = AvatarNode . getRemoteNamenodeHttpName ( conf , avatarNode . getInstanceId ( ) ) ; checkpointEnabled = conf . getBoolean ( "fs.checkpoint.enabled" , false ) ; checkpointPeriod = conf . getLong ( "fs.checkpoint.period" , 3600 ) ; checkpointTxnCount = NNStorageConfiguration . getCheckpointTxnCount ( conf ) ; delayedScheduledCheckpointTime = conf . getBoolean ( "fs.checkpoint.delayed" , false ) ? AvatarNode . now ( ) + checkpointPeriod * 1000 : 0 ; String infoAddr = NetUtils . getServerAddress ( conf , "dfs.secondary.info.bindAddress" , "dfs.secondary.info.port" , "dfs.secondary.http.address" ) ; InetSocketAddress infoSocAddr = NetUtils . createSocketAddr ( infoAddr ) ; String infoBindIpAddress = infoSocAddr . getAddress ( ) . getHostAddress ( ) ; int tmpInfoPort = infoSocAddr . getPort ( ) ; infoServer = new HttpServer ( "secondary" , infoBindIpAddress , tmpInfoPort , tmpInfoPort == 0 , conf ) ; infoServer . setAttribute ( "name.system.image" , fsImage ) ; this . infoServer . setAttribute ( "name.conf" , conf ) ; infoServer . addInternalServlet ( "getimage" , "/getimage" , GetImageServlet . class ) ; infoServer . start ( ) ; avatarNode . httpServer . setAttribute ( "avatar.node" , avatarNode ) ; avatarNode . httpServer . addInternalServlet ( "outstandingnodes" , "/outstandingnodes" , OutStandingDatanodesServlet . class ) ; infoPort = infoServer . getPort ( ) ; conf . set ( "dfs.secondary.http.address" , infoBindIpAddress + ":" + infoPort ) ; LOG . info ( "Secondary Web-server up at: " + infoBindIpAddress + ":" + infoPort ) ; LOG . warn ( "Checkpoint Period :" + checkpointPeriod + " secs " + "(" + checkpointPeriod / 60 + " min)" ) ; if ( delayedScheduledCheckpointTime > 0 ) { LOG . warn ( "Standby: Checkpointing will be delayed by: " + checkpointPeriod + " seconds" ) ; } LOG . warn ( "Log Size Trigger :" + checkpointTxnCount + " transactions." ) ; }
Initialize the webserver so that the primary namenode can fetch transaction logs from standby via http .
33,603
private void assertState ( StandbyIngestState ... expectedStates ) throws IOException { for ( StandbyIngestState s : expectedStates ) { if ( currentIngestState == s ) return ; } throw new IOException ( "Standby: illegal state - current: " + currentIngestState ) ; }
Assert that the standby is in the expected state
33,604
public List < ByteBuffer > readFullyScatterGather ( long position , int length ) throws IOException { byte [ ] buf = new byte [ length ] ; readFully ( position , buf , 0 , length ) ; LinkedList < ByteBuffer > results = new LinkedList < ByteBuffer > ( ) ; results . add ( ByteBuffer . wrap ( buf , 0 , length ) ) ; return results ; }
This can be optimized to avoid buffer copies . It allows the underlying system to return a list of ByteBuffers that contain the data . A FileSystem implementation can override this method to make it avoid redundant memory copies .
33,605
public void flush ( boolean durable ) throws IOException { numSync ++ ; long start = System . nanoTime ( ) ; flushAndSync ( durable ) ; long time = DFSUtil . getElapsedTimeMicroSeconds ( start ) ; totalTimeSync += time ; if ( sync != null ) { sync . inc ( time ) ; } }
Flush data to persistent store . Collect sync metrics .
33,606
public void setOwner ( Path p , String username , String groupname ) throws IOException { if ( username == null && groupname == null ) { throw new IOException ( "username == null && groupname == null" ) ; } if ( username == null ) { execCommand ( pathToFile ( p ) , Shell . SET_GROUP_COMMAND , groupname ) ; } else { String s = username + ( groupname == null ? "" : ":" + groupname ) ; execCommand ( pathToFile ( p ) , Shell . SET_OWNER_COMMAND , s ) ; } }
Use the command chown to set owner .
33,607
public void setPermission ( Path p , FsPermission permission ) throws IOException { FsAction user = permission . getUserAction ( ) ; FsAction group = permission . getGroupAction ( ) ; FsAction other = permission . getOtherAction ( ) ; File f = pathToFile ( p ) ; if ( group != other ) { execSetPermission ( f , permission ) ; return ; } boolean rv = true ; rv = f . setReadable ( group . implies ( FsAction . READ ) , false ) ; checkReturnValue ( rv , p , permission ) ; if ( group . implies ( FsAction . READ ) != user . implies ( FsAction . READ ) ) { f . setReadable ( user . implies ( FsAction . READ ) , true ) ; checkReturnValue ( rv , p , permission ) ; } rv = f . setWritable ( group . implies ( FsAction . WRITE ) , false ) ; checkReturnValue ( rv , p , permission ) ; if ( group . implies ( FsAction . WRITE ) != user . implies ( FsAction . WRITE ) ) { f . setWritable ( user . implies ( FsAction . WRITE ) , true ) ; checkReturnValue ( rv , p , permission ) ; } rv = f . setExecutable ( group . implies ( FsAction . EXECUTE ) , false ) ; checkReturnValue ( rv , p , permission ) ; if ( group . implies ( FsAction . EXECUTE ) != user . implies ( FsAction . EXECUTE ) ) { f . setExecutable ( user . implies ( FsAction . EXECUTE ) , true ) ; checkReturnValue ( rv , p , permission ) ; } }
Use the command chmod to set permission .
33,608
public void anonymize ( ) throws Exception { EventRecord er = null ; SerializedRecord sr = null ; BufferedWriter bfw = new BufferedWriter ( new FileWriter ( logfile . getName ( ) + ".anonymized" ) ) ; System . out . println ( "Anonymizing log records..." ) ; while ( ( er = parser . getNext ( ) ) != null ) { if ( er . isValid ( ) ) { sr = new SerializedRecord ( er ) ; Anonymizer . anonymize ( sr ) ; bfw . write ( LocalStore . pack ( sr ) . toString ( ) ) ; bfw . write ( LocalStore . RECORD_SEPARATOR ) ; } } bfw . flush ( ) ; bfw . close ( ) ; System . out . println ( "Anonymized log records written to " + logfile . getName ( ) + ".anonymized" ) ; System . out . println ( "Compressing output file..." ) ; LocalStore . zipCompress ( logfile . getName ( ) + ".anonymized" ) ; System . out . println ( "Compressed output file written to " + logfile . getName ( ) + ".anonymized" + LocalStore . COMPRESSION_SUFFIX ) ; }
Performs anonymization for the log file . Log entries are read one by one and EventRecords are created which are then anonymized and written to the output .
33,609
protected void initializePieces ( ) { pieces . add ( new Piece ( "x" , " x /xxx/ x " , false , oneRotation ) ) ; pieces . add ( new Piece ( "v" , "x /x /xxx" , false , fourRotations ) ) ; pieces . add ( new Piece ( "t" , "xxx/ x / x " , false , fourRotations ) ) ; pieces . add ( new Piece ( "w" , " x/ xx/xx " , false , fourRotations ) ) ; pieces . add ( new Piece ( "u" , "x x/xxx" , false , fourRotations ) ) ; pieces . add ( new Piece ( "i" , "xxxxx" , false , twoRotations ) ) ; pieces . add ( new Piece ( "f" , " xx/xx / x " , false , fourRotations ) ) ; pieces . add ( new Piece ( "p" , "xx/xx/x " , false , fourRotations ) ) ; pieces . add ( new Piece ( "z" , "xx / x / xx" , false , twoRotations ) ) ; pieces . add ( new Piece ( "n" , "xx / xxx" , false , fourRotations ) ) ; pieces . add ( new Piece ( "y" , " x /xxxx" , false , fourRotations ) ) ; pieces . add ( new Piece ( "l" , " x/xxxx" , false , fourRotations ) ) ; pieces . add ( new Piece ( "F" , "xx / xx/ x " , false , fourRotations ) ) ; pieces . add ( new Piece ( "P" , "xx/xx/ x" , false , fourRotations ) ) ; pieces . add ( new Piece ( "Z" , " xx/ x /xx " , false , twoRotations ) ) ; pieces . add ( new Piece ( "N" , " xx/xxx " , false , fourRotations ) ) ; pieces . add ( new Piece ( "Y" , " x /xxxx" , false , fourRotations ) ) ; pieces . add ( new Piece ( "L" , "x /xxxx" , false , fourRotations ) ) ; }
Define the one sided pieces . The flipped pieces have the same name with a capital letter .
33,610
public static void main ( String [ ] args ) { Pentomino model = new OneSidedPentomino ( 3 , 30 ) ; int solutions = model . solve ( ) ; System . out . println ( solutions + " solutions found." ) ; }
Solve the 3x30 puzzle .
33,611
private < T > T get ( Class < T > clazz ) { try { return clazz . cast ( get ( ) ) ; } catch ( ClassCastException e ) { return null ; } }
Returns enclosed object casted on given class if enclosed object is not instance of given class returns null .
33,612
public TaskStatus getTaskStatus ( ) { TaskStatusUpdate update = get ( TaskStatusUpdate . class ) ; if ( update == null ) { return null ; } return update . getStatus ( ) ; }
Returns TaskStatus if this update carries one null otherwise
33,613
public long setMaxItems ( long n ) { if ( n >= numItems ) { this . maxNumItems = n ; } else if ( this . maxNumItems >= this . numItems ) { this . maxNumItems = this . numItems ; } return this . maxNumItems ; }
Set the limit on the number of unique values
33,614
void add ( BlockInfo block , int numReplicas ) { synchronized ( pendingReplications ) { PendingBlockInfo found = pendingReplications . get ( block ) ; if ( found == null ) { pendingReplications . put ( block , new PendingBlockInfo ( numReplicas ) ) ; } else { found . incrementReplicas ( numReplicas ) ; found . setTimeStamp ( ) ; } } }
Add a block to the list of pending Replications
33,615
void remove ( Block block ) { synchronized ( pendingReplications ) { PendingBlockInfo found = pendingReplications . get ( block ) ; if ( found != null ) { if ( FSNamesystem . LOG . isDebugEnabled ( ) ) { FSNamesystem . LOG . debug ( "Removing pending replication for block" + block ) ; } found . decrementReplicas ( ) ; if ( found . getNumReplicas ( ) <= 0 ) { pendingReplications . remove ( block ) ; } } } }
One replication request for this block has finished . Decrement the number of pending replication requests for this block .
33,616
int getNumReplicas ( Block block ) { synchronized ( pendingReplications ) { PendingBlockInfo found = pendingReplications . get ( block ) ; if ( found != null ) { return found . getNumReplicas ( ) ; } } return 0 ; }
How many copies of this block is pending replication?
33,617
BlockInfo [ ] getTimedOutBlocks ( ) { synchronized ( timedOutItems ) { if ( timedOutItems . size ( ) <= 0 ) { return null ; } BlockInfo [ ] blockList = timedOutItems . toArray ( new BlockInfo [ timedOutItems . size ( ) ] ) ; timedOutItems . clear ( ) ; return blockList ; } }
Returns a list of blocks that have timed out their replication requests . Returns null if no blocks have timed out .
33,618
public static ReadaheadPool getInstance ( ) { synchronized ( ReadaheadPool . class ) { if ( instance == null && NativeIO . isAvailable ( ) ) { instance = new ReadaheadPool ( ) ; } return instance ; } }
Return the singleton instance for the current process .
33,619
public ReadaheadRequest readaheadStream ( String identifier , FileDescriptor fd , long curPos , long readaheadLength , long maxOffsetToRead , ReadaheadRequest lastReadahead ) { Preconditions . checkArgument ( curPos <= maxOffsetToRead , "Readahead position %s higher than maxOffsetToRead %s" , curPos , maxOffsetToRead ) ; if ( readaheadLength <= 0 ) { return null ; } long lastOffset = Long . MIN_VALUE ; if ( lastReadahead != null ) { lastOffset = lastReadahead . getOffset ( ) ; } long nextOffset = lastOffset + readaheadLength / 2 ; if ( curPos >= nextOffset ) { if ( lastReadahead != null ) { lastReadahead . cancel ( ) ; lastReadahead = null ; } long length = Math . min ( readaheadLength , maxOffsetToRead - curPos ) ; if ( length <= 0 ) { return null ; } return submitReadahead ( identifier , fd , curPos , length ) ; } else { return lastReadahead ; } }
Issue a request to readahead on the given file descriptor .
33,620
public ReadaheadRequest submitReadahead ( String identifier , FileDescriptor fd , long off , long len ) { ReadaheadRequestImpl req = new ReadaheadRequestImpl ( identifier , fd , off , len ) ; pool . execute ( req ) ; if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( "submit readahead: " + req ) ; } return req ; }
Submit a request to readahead on the given file descriptor .
33,621
public long getSaving ( ) { if ( lastRaidStatistics == null ) { return - 1 ; } long saving = 0 ; for ( Codec codec : Codec . getCodecs ( ) ) { String code = codec . id ; long s = lastRaidStatistics . get ( code ) . getSaving ( conf ) ; if ( s == - 1 ) { return - 1 ; } saving += s ; } return saving ; }
Get the total RAID saving in bytes
33,622
public void populateSaving ( RaidNodeMetrics metrics ) { if ( lastRaidStatistics == null ) { return ; } long saving = 0 ; for ( Codec codec : Codec . getCodecs ( ) ) { String code = codec . id ; long s = lastRaidStatistics . get ( code ) . getSaving ( conf ) ; if ( s > 0 ) { metrics . savingForCode . get ( code ) . set ( s ) ; saving += s ; } } if ( saving > 0 ) { metrics . saving . set ( saving ) ; } }
Populate RAID savings by code and total .
33,623
private void sortPathByDepth ( Path [ ] paths ) { Arrays . sort ( paths , new Comparator < Path > ( ) { public int compare ( Path o1 , Path o2 ) { return ( ( Integer ) o1 . depth ( ) ) . compareTo ( o2 . depth ( ) ) ; } } ) ; }
Sort the path array by depth
33,624
private List < Path > mergeRoots ( Path [ ] dupRoots ) { sortPathByDepth ( dupRoots ) ; List < Path > roots = new ArrayList < Path > ( ) ; for ( Path candidate : dupRoots ) { boolean shouldAdd = true ; for ( Path root : roots ) { if ( isAncestorPath ( root . toUri ( ) . getPath ( ) , candidate . toUri ( ) . getPath ( ) ) ) { shouldAdd = false ; break ; } } if ( shouldAdd ) { roots . add ( candidate ) ; } } return roots ; }
merge the roots get the top ones .
33,625
private List < FileStatus > submitRaidJobsWhenPossible ( PolicyInfo info , List < FileStatus > filesToRaid , boolean submitAll ) { if ( ! submitRaidJobs || ! info . getShouldRaid ( ) ) { return filesToRaid ; } try { int maxFilesPerJob = configManager . getMaxFilesPerJob ( ) ; int maxJobs = configManager . getMaxJobsPerPolicy ( ) ; while ( ! filesToRaid . isEmpty ( ) && ( submitAll || filesToRaid . size ( ) >= maxFilesPerJob ) && raidNode . getRunningJobsForPolicy ( info . getName ( ) ) < maxJobs ) { int numFiles = Math . min ( maxFilesPerJob , filesToRaid . size ( ) ) ; LOG . info ( "Invoking raidFiles with " + numFiles + " files" ) ; raidNode . raidFiles ( info , filesToRaid . subList ( 0 , numFiles ) ) ; filesToRaid = filesToRaid . subList ( numFiles , filesToRaid . size ( ) ) ; } } catch ( IOException e ) { LOG . warn ( "Failed to raid files for policy:" + info . getName ( ) , e ) ; } return filesToRaid ; }
Raiding a given list of files
33,626
public static void authorize ( Subject user , Class < ? > protocol ) throws AuthorizationException { Permission permission = protocolToPermissionMap . get ( protocol ) ; if ( permission == null ) { permission = new ConnectionPermission ( protocol ) ; protocolToPermissionMap . put ( protocol , permission ) ; } checkPermission ( user , permission ) ; }
Authorize the user to access the protocol being used .
33,627
public long getFinalizedBlockLength ( int namespaceId , Block b ) throws IOException { DatanodeBlockInfo info = volumeMap . get ( namespaceId , b ) ; if ( info == null ) { throw new IOException ( "Can't find block " + b + " in volumeMap" ) ; } return info . getFinalizedSize ( ) ; }
Find the block s on - disk length
33,628
public File getBlockFile ( int namespaceId , Block b ) throws IOException { File f = validateBlockFile ( namespaceId , b ) ; if ( f == null ) { if ( InterDatanodeProtocol . LOG . isDebugEnabled ( ) ) { InterDatanodeProtocol . LOG . debug ( "b=" + b + ", volumeMap=" + volumeMap ) ; } throw new IOException ( "Block " + b + ", namespace= " + namespaceId + " is not valid." ) ; } return f ; }
Get File name for a given block .
33,629
public boolean detachBlock ( int namespaceId , Block block , int numLinks ) throws IOException { DatanodeBlockInfo info = null ; lock . readLock ( ) . lock ( ) ; try { info = volumeMap . get ( namespaceId , block ) ; } finally { lock . readLock ( ) . unlock ( ) ; } return info . detachBlock ( namespaceId , block , numLinks ) ; }
Make a copy of the block if this block is linked to an existing snapshot . This ensures that modifying this block does not modify data in any existing snapshots .
33,630
private boolean interruptAndJoinThreads ( List < Thread > threads ) { for ( Thread t : threads ) { t . interrupt ( ) ; } for ( Thread t : threads ) { try { t . join ( ) ; } catch ( InterruptedException e ) { DataNode . LOG . warn ( "interruptOngoingCreates: t=" + t , e ) ; return false ; } } return true ; }
Try to interrupt all of the given threads and join on them . If interrupted returns false indicating some threads may still be running .
33,631
private ArrayList < Thread > getActiveThreads ( int namespaceId , Block block ) { lock . writeLock ( ) . lock ( ) ; try { final ActiveFile activefile = volumeMap . getOngoingCreates ( namespaceId , block ) ; if ( activefile != null && ! activefile . threads . isEmpty ( ) ) { for ( Iterator < Thread > i = activefile . threads . iterator ( ) ; i . hasNext ( ) ; ) { final Thread t = i . next ( ) ; if ( ! t . isAlive ( ) ) { i . remove ( ) ; } } if ( ! activefile . threads . isEmpty ( ) ) { return new ArrayList < Thread > ( activefile . threads ) ; } } } finally { lock . writeLock ( ) . unlock ( ) ; } return null ; }
Return a list of active writer threads for the given block .
33,632
private boolean isBlockFinalizedWithLock ( int namespaceId , Block b ) { lock . readLock ( ) . lock ( ) ; try { return isBlockFinalizedInternal ( namespaceId , b , true ) ; } finally { lock . readLock ( ) . unlock ( ) ; } }
is this block finalized? Returns true if the block is already finalized otherwise returns false .
33,633
private boolean delBlockFromDisk ( File blockFile , File metaFile , Block b ) { if ( blockFile == null ) { DataNode . LOG . warn ( "No file exists for block: " + b ) ; return true ; } if ( ! blockFile . delete ( ) ) { DataNode . LOG . warn ( "Not able to delete the block file: " + blockFile ) ; return false ; } else { if ( metaFile != null && ! metaFile . delete ( ) ) { DataNode . LOG . warn ( "Not able to delete the meta block file: " + metaFile ) ; return false ; } } return true ; }
Remove a block from disk
33,634
public Block [ ] getBlocksBeingWrittenReport ( int namespaceId ) throws IOException { LightWeightHashSet < Block > blockSet = new LightWeightHashSet < Block > ( ) ; volumes . getBlocksBeingWrittenInfo ( namespaceId , blockSet ) ; Block blockTable [ ] = new Block [ blockSet . size ( ) ] ; int i = 0 ; for ( Iterator < Block > it = blockSet . iterator ( ) ; it . hasNext ( ) ; i ++ ) { blockTable [ i ] = it . next ( ) ; } return blockTable ; }
Return a table of blocks being written data
33,635
public Block [ ] getBlockReport ( int namespaceId ) throws IOException { ArrayList < Block > ret = new ArrayList < Block > ( ) ; org . apache . hadoop . hdfs . server . datanode . NamespaceMap nm = volumeMap . getNamespaceMap ( namespaceId ) ; if ( nm == null ) { return new Block [ 0 ] ; } int n = nm . getNumBucket ( ) ; for ( int i = 0 ; i < n ; i ++ ) { BlockBucket bb = nm . getBucket ( i ) ; bb . getBlockReport ( ret ) ; } return ret . toArray ( new Block [ ret . size ( ) ] ) ; }
Get the list of finalized blocks from in - memory blockmap for a block pool .
33,636
public boolean isValidBlock ( int namespaceId , Block b , boolean checkSize ) throws IOException { File f = null ; ; try { f = getValidateBlockFile ( namespaceId , b , checkSize ) ; } catch ( IOException e ) { DataNode . LOG . warn ( "Block " + b + " is not valid:" , e ) ; } return ( ( f != null ) ? isBlockFinalizedWithLock ( namespaceId , b ) : false ) ; }
Check whether the given block is a valid one .
33,637
public File getFile ( int namespaceId , Block b ) { lock . readLock ( ) . lock ( ) ; try { DatanodeBlockInfo info = volumeMap . get ( namespaceId , b ) ; if ( info != null ) { return info . getDataFileToRead ( ) ; } return null ; } finally { lock . readLock ( ) . unlock ( ) ; } }
Turn the block identifier into a filename .
33,638
public void checkDataDir ( ) throws DiskErrorException { long total_blocks = 0 , removed_blocks = 0 ; List < FSVolume > failed_vols = null ; failed_vols = volumes . checkDirs ( ) ; if ( failed_vols == null ) return ; long mlsec = System . currentTimeMillis ( ) ; lock . writeLock ( ) . lock ( ) ; try { volumeMap . removeUnhealthyVolumes ( failed_vols ) ; } finally { lock . writeLock ( ) . unlock ( ) ; } mlsec = System . currentTimeMillis ( ) - mlsec ; DataNode . LOG . warn ( ">>>>>>>>>>>>Removed " + removed_blocks + " out of " + total_blocks + "(took " + mlsec + " millisecs)" ) ; StringBuilder sb = new StringBuilder ( ) ; for ( FSVolume fv : failed_vols ) { sb . append ( fv . toString ( ) + ";" ) ; } throw new DiskErrorException ( "DataNode failed volumes:" + sb ) ; }
check if a data directory is healthy if some volumes failed - make sure to remove all the blocks that belong to these volumes
33,639
public void removeVolumes ( Configuration conf , List < File > directories ) throws Exception { if ( directories == null || directories . isEmpty ( ) ) { DataNode . LOG . warn ( "There were no directories to remove. Exiting " ) ; return ; } List < FSVolume > volArray = null ; lock . readLock ( ) . lock ( ) ; try { volArray = volumes . removeBVolumes ( directories ) ; } finally { lock . readLock ( ) . unlock ( ) ; } long mlsec = System . currentTimeMillis ( ) ; lock . writeLock ( ) . lock ( ) ; try { volumeMap . removeUnhealthyVolumes ( volArray ) ; } finally { lock . writeLock ( ) . unlock ( ) ; } mlsec = System . currentTimeMillis ( ) - mlsec ; DataNode . LOG . warn ( ">>>>>>>>>Removing these blocks took " + mlsec + " millisecs in refresh<<<<<<<<<<<<<<< " ) ; StringBuilder sb = new StringBuilder ( ) ; for ( FSVolume fv : volArray ) { sb . append ( fv . toString ( ) + ";" ) ; } throw new DiskErrorException ( "These volumes were removed: " + sb ) ; }
remove directories that are given from the list of volumes to use . This function also makes sure to remove all the blocks that belong to these volumes .
33,640
public void copyFile ( File src , File dst , boolean hardlink ) throws IOException { if ( src == null || dst == null ) { throw new IOException ( "src/dst file is null" ) ; } try { if ( hardlink && shouldHardLinkBlockCopy ) { if ( dst . exists ( ) ) { if ( ! dst . delete ( ) ) { throw new IOException ( "Deletion of file : " + dst + " failed" ) ; } } NativeIO . link ( src , dst ) ; DataNode . LOG . info ( "Hard Link Created from : " + src + " to " + dst ) ; return ; } } catch ( IOException e ) { DataNode . LOG . warn ( "Hard link failed from : " + src + " to " + dst + " continuing with regular file copy" ) ; } FileChannel input = null ; FileChannel output = null ; try { input = new FileInputStream ( src ) . getChannel ( ) ; output = new FileOutputStream ( dst ) . getChannel ( ) ; if ( input == null || output == null ) { throw new IOException ( "Could not create file channels for src : " + src + " dst : " + dst ) ; } long bytesLeft = input . size ( ) ; long position = 0 ; while ( bytesLeft > 0 ) { long bytesWritten = output . transferFrom ( input , position , bytesLeft ) ; bytesLeft -= bytesWritten ; position += bytesWritten ; } if ( datanode . syncOnClose ) { output . force ( true ) ; } } finally { if ( input != null ) { input . close ( ) ; } if ( output != null ) { output . close ( ) ; } } }
Copies a file as fast as possible . Tries to do a hardlink instead of a copy if the hardlink parameter is specified .
33,641
private FSVolume findVolumeForHardLink ( String srcFileSystem , int srcNamespaceId , Block srcBlock , File srcBlockFile ) throws IOException { FSVolume dstVol = null ; if ( srcBlockFile == null || ! srcBlockFile . exists ( ) ) { throw new IOException ( "File " + srcBlockFile + " is not valid or does not have" + " a valid block file" ) ; } DatanodeBlockInfo blockInfo = volumeMap . get ( srcNamespaceId , srcBlock ) ; if ( blockInfo != null ) { dstVol = blockInfo . getBlockDataFile ( ) . getVolume ( ) ; } else { for ( FSVolume volume : volumes . getVolumes ( ) ) { String volFileSystem = volume . getFileSystem ( ) ; if ( volFileSystem . equals ( srcFileSystem ) ) { dstVol = volume ; break ; } } } return dstVol ; }
Find a volume on the datanode for the destination block to be placed on . It tries to place the destination block on the same volume as the source block since hardlinks can be performed only between two files on the same disk
33,642
private boolean copyBlockLocalAdd ( String srcFileSystem , File srcBlockFile , int srcNamespaceId , Block srcBlock , int dstNamespaceId , Block dstBlock ) throws IOException { boolean hardlink = true ; File dstBlockFile = null ; lock . writeLock ( ) . lock ( ) ; try { if ( isValidBlock ( dstNamespaceId , dstBlock , false ) || volumeMap . getOngoingCreates ( dstNamespaceId , dstBlock ) != null ) { throw new BlockAlreadyExistsException ( "Block " + dstBlock + " already exists" ) ; } if ( srcBlockFile == null || ! srcBlockFile . exists ( ) ) { throw new IOException ( "Block " + srcBlock . getBlockName ( ) + " is not valid or does not have a valid block file" ) ; } boolean inlineChecksum = Block . isInlineChecksumBlockFilename ( srcBlockFile . getName ( ) ) ; FSVolume dstVol = null ; if ( shouldHardLinkBlockCopy ) { dstVol = findVolumeForHardLink ( srcFileSystem , srcNamespaceId , srcBlock , srcBlockFile ) ; } if ( dstVol == null ) { dstVol = volumes . getNextVolume ( srcBlock . getNumBytes ( ) ) ; hardlink = false ; } int checksumType = DataChecksum . CHECKSUM_UNKNOWN ; int bytesPerChecksum = - 1 ; if ( inlineChecksum ) { GenStampAndChecksum sac = BlockInlineChecksumReader . getGenStampAndChecksumFromInlineChecksumFile ( srcBlockFile . getName ( ) ) ; checksumType = sac . checksumType ; bytesPerChecksum = sac . bytesPerChecksum ; } List < Thread > threads = null ; dstBlockFile = createTmpFile ( dstNamespaceId , dstVol , dstBlock , true , inlineChecksum , checksumType , bytesPerChecksum ) ; DatanodeBlockInfo binfo = new DatanodeBlockInfo ( dstVol , dstBlockFile , DatanodeBlockInfo . UNFINALIZED , true , inlineChecksum , checksumType , bytesPerChecksum , false , 0 ) ; volumeMap . add ( dstNamespaceId , dstBlock , binfo ) ; volumeMap . addOngoingCreates ( dstNamespaceId , dstBlock , new ActiveFile ( binfo , threads , ActiveFile . UNKNOWN_SIZE , false ) ) ; } finally { lock . writeLock ( ) . unlock ( ) ; } if ( dstBlockFile == null ) { throw new IOException ( "Could not allocate block file for : " + dstBlock . getBlockName ( ) ) ; } return hardlink ; }
Finds a volume for the dstBlock and adds the new block to the FSDataset data structures to indicate we are going to start writing to the block .
33,643
private void copyBlockLocalFinalize ( int dstNamespaceId , Block dstBlock , File dstBlockFile ) throws IOException { boolean inlineChecksum = Block . isInlineChecksumBlockFilename ( dstBlockFile . getName ( ) ) ; long blkSize = 0 ; long fileSize = dstBlockFile . length ( ) ; lock . writeLock ( ) . lock ( ) ; try { DatanodeBlockInfo info = volumeMap . get ( dstNamespaceId , dstBlock ) ; if ( info == null ) { throw new IOException ( "Could not find information for " + dstBlock ) ; } if ( inlineChecksum ) { blkSize = BlockInlineChecksumReader . getBlockSizeFromFileLength ( fileSize , info . getChecksumType ( ) , info . getBytesPerChecksum ( ) ) ; } else { blkSize = fileSize ; } FSVolume dstVol = info . getBlockDataFile ( ) . getVolume ( ) ; File dest = dstVol . addBlock ( dstNamespaceId , dstBlock , dstBlockFile , info . isInlineChecksum ( ) , info . getChecksumType ( ) , info . getBytesPerChecksum ( ) ) ; volumeMap . add ( dstNamespaceId , dstBlock , new DatanodeBlockInfo ( dstVol , dest , blkSize , true , inlineChecksum , info . getChecksumType ( ) , info . getBytesPerChecksum ( ) , false , 0 ) ) ; volumeMap . removeOngoingCreates ( dstNamespaceId , dstBlock ) ; } finally { lock . writeLock ( ) . unlock ( ) ; } }
Finalize the block in FSDataset .
33,644
public static String byteArray2String ( byte [ ] [ ] pathComponents ) { if ( pathComponents . length == 0 ) return "" ; if ( pathComponents . length == 1 && pathComponents [ 0 ] . length == 0 ) { return Path . SEPARATOR ; } StringBuilder result = new StringBuilder ( ) ; for ( int i = 0 ; i < pathComponents . length ; i ++ ) { String converted = bytes2String ( pathComponents [ i ] ) ; if ( converted == null ) return null ; result . append ( converted ) ; if ( i < pathComponents . length - 1 ) { result . append ( Path . SEPARATOR_CHAR ) ; } } return result . toString ( ) ; }
Given a list of path components returns a path as a UTF8 String
33,645
public static String bytes2String ( byte [ ] bytes ) { try { final int len = bytes . length ; char [ ] charArray = UTF8 . getCharArray ( len ) ; for ( int i = 0 ; i < bytes . length ; i ++ ) { if ( bytes [ i ] < UTF8 . MIN_ASCII_CODE ) { return new String ( bytes , utf8charsetName ) ; } charArray [ i ] = ( char ) bytes [ i ] ; } return new String ( charArray , 0 , len ) ; } catch ( UnsupportedEncodingException e ) { assert false : "UTF8 encoding is not supported " ; } return null ; }
Converts a byte array to a string using UTF8 encoding .
33,646
public static byte [ ] string2Bytes ( String str ) { try { final int len = str . length ( ) ; byte [ ] rawBytes = new byte [ len ] ; char [ ] charArray = UTF8 . getCharArray ( len ) ; str . getChars ( 0 , len , charArray , 0 ) ; for ( int i = 0 ; i < len ; i ++ ) { if ( charArray [ i ] > UTF8 . MAX_ASCII_CODE ) { return str . getBytes ( utf8charsetName ) ; } rawBytes [ i ] = ( byte ) charArray [ i ] ; } return rawBytes ; } catch ( UnsupportedEncodingException e ) { assert false : "UTF8 encoding is not supported " ; } return null ; }
Converts a string to a byte array using UTF8 encoding .
33,647
public static byte [ ] [ ] bytes2byteArray ( byte [ ] bytes , int len , byte separator ) { assert len <= bytes . length ; int splits = 0 ; if ( len == 0 ) { return new byte [ ] [ ] { null } ; } for ( int i = 0 ; i < len ; i ++ ) { if ( bytes [ i ] == separator ) { splits ++ ; } } int last = len - 1 ; while ( last > - 1 && bytes [ last -- ] == separator ) { splits -- ; } if ( splits == 0 && bytes [ 0 ] == separator ) { return new byte [ ] [ ] { new byte [ 0 ] } ; } splits ++ ; byte [ ] [ ] result = new byte [ splits ] [ ] ; int startIndex = 0 ; int nextIndex = 0 ; int index = 0 ; while ( index < splits ) { while ( nextIndex < len && bytes [ nextIndex ] != separator ) { nextIndex ++ ; } result [ index ] = new byte [ nextIndex - startIndex ] ; System . arraycopy ( bytes , startIndex , result [ index ] , 0 , nextIndex - startIndex ) ; index ++ ; startIndex = nextIndex + 1 ; nextIndex = startIndex ; } return result ; }
Splits first len bytes in bytes to array of arrays of bytes on byte separator
33,648
private static byte [ ] extractBytes ( String str , int startIndex , int endIndex , char [ ] charArray , boolean canFastConvert ) throws UnsupportedEncodingException { if ( canFastConvert ) { final int len = endIndex - startIndex ; byte [ ] strBytes = new byte [ len ] ; for ( int i = 0 ; i < len ; i ++ ) { strBytes [ i ] = ( byte ) charArray [ startIndex + i ] ; } return strBytes ; } return str . substring ( startIndex , endIndex ) . getBytes ( utf8charsetName ) ; }
Helper for extracting bytes either from str or from the array of chars charArray depending if fast conversion is possible specified by canFastConvert
33,649
private static String getConfValue ( String defaultValue , String keySuffix , Configuration conf , String ... keys ) { String value = null ; for ( String key : keys ) { if ( keySuffix != null ) { key += "." + keySuffix ; } value = conf . get ( key ) ; if ( value != null ) { break ; } } if ( value == null ) { value = defaultValue ; } return value ; }
Given a list of keys in the order of preference returns a value for the key in the given order from the configuration .
33,650
public static List < InetSocketAddress > getAddresses ( Configuration conf , String defaultAddress , String ... keys ) { return getAddresses ( conf , getNameServiceIds ( conf ) , defaultAddress , keys ) ; }
Returns list of InetSocketAddress for a given set of keys .
33,651
public static List < InetSocketAddress > getRPCAddresses ( String suffix , Configuration conf , Collection < String > serviceIds , String ... keys ) throws IOException { String defaultAddress = null ; try { defaultAddress = conf . get ( FileSystem . FS_DEFAULT_NAME_KEY + suffix ) ; if ( defaultAddress != null ) { defaultAddress = NameNode . getDefaultAddress ( conf ) ; } } catch ( IllegalArgumentException e ) { defaultAddress = null ; } for ( int i = 0 ; i < keys . length ; i ++ ) { keys [ i ] += suffix ; } List < InetSocketAddress > addressList = DFSUtil . getAddresses ( conf , serviceIds , defaultAddress , keys ) ; if ( addressList == null ) { String keyStr = "" ; for ( String key : keys ) { keyStr += key + " " ; } throw new IOException ( "Incorrect configuration: namenode address " + keyStr + " is not configured." ) ; } return addressList ; }
Returns list of InetSocketAddresses corresponding to namenodes from the configuration .
33,652
public static String [ ] setGenericConf ( String [ ] argv , Configuration conf ) { String [ ] serviceId = new String [ 1 ] ; serviceId [ 0 ] = "" ; String [ ] filteredArgv = getServiceName ( argv , serviceId ) ; if ( ! serviceId [ 0 ] . equals ( "" ) ) { if ( ! NameNode . validateServiceName ( conf , serviceId [ 0 ] ) ) { throw new IllegalArgumentException ( "Service Id doesn't match the config" ) ; } setGenericConf ( conf , serviceId [ 0 ] , NameNode . NAMESERVICE_SPECIFIC_KEYS ) ; NameNode . setupDefaultURI ( conf ) ; } return filteredArgv ; }
Set the configuration based on the service id given in the argv
33,653
public static String [ ] getServiceName ( String [ ] argv , String [ ] serviceId ) throws IllegalArgumentException { ArrayList < String > newArgvList = new ArrayList < String > ( ) ; for ( int i = 0 ; i < argv . length ; i ++ ) { if ( "-service" . equals ( argv [ i ] ) ) { if ( i + 1 == argv . length ) { throw new IllegalArgumentException ( "Doesn't have service id" ) ; } serviceId [ 0 ] = argv [ ++ i ] ; } else { newArgvList . add ( argv [ i ] ) ; } } String [ ] newArgvs = new String [ newArgvList . size ( ) ] ; newArgvList . toArray ( newArgvs ) ; return newArgvs ; }
Get the service name arguments and return the filtered argument list
33,654
public static List < InetSocketAddress > getAddresses ( Configuration conf , Collection < String > serviceIds , String defaultAddress , String ... keys ) { Collection < String > nameserviceIds = getNameServiceIds ( conf ) ; List < InetSocketAddress > isas = new ArrayList < InetSocketAddress > ( ) ; if ( nameserviceIds == null || nameserviceIds . isEmpty ( ) ) { String address = getConfValue ( defaultAddress , null , conf , keys ) ; if ( address == null ) { return null ; } isas . add ( NetUtils . createSocketAddr ( address ) ) ; } else { for ( String nameserviceId : nameserviceIds ) { String address = getConfValue ( null , nameserviceId , conf , keys ) ; if ( address == null ) { return null ; } isas . add ( NetUtils . createSocketAddr ( address ) ) ; } } return isas ; }
Return list of InetSocketAddress for a given set of services
33,655
public static List < InetSocketAddress > getClientRpcAddresses ( Configuration conf , Collection < String > suffixes ) throws IOException { List < InetSocketAddress > addressList ; if ( suffixes != null && ! suffixes . isEmpty ( ) ) { addressList = new ArrayList < InetSocketAddress > ( ) ; for ( String s : suffixes ) { addressList . addAll ( getRPCAddresses ( s , conf , getNameServiceIds ( conf ) , FSConstants . DFS_NAMENODE_RPC_ADDRESS_KEY ) ) ; } } else { String defaultAddress ; try { defaultAddress = NameNode . getDefaultAddress ( conf ) ; } catch ( IllegalArgumentException e ) { defaultAddress = null ; } addressList = getAddresses ( conf , defaultAddress , FSConstants . DFS_NAMENODE_RPC_ADDRESS_KEY ) ; } if ( addressList == null || addressList . isEmpty ( ) ) { throw new IOException ( "Incorrect configuration: namenode address " + FSConstants . DFS_NAMENODE_RPC_ADDRESS_KEY + " is not configured." ) ; } return addressList ; }
Returns list of InetSocketAddresses corresponding to namenodes from the configuration . Note this is to be used by clients to get the list of namenode addresses to talk to .
33,656
public static List < InetSocketAddress > getNNServiceRpcAddresses ( Configuration conf ) throws IOException { String defaultAddress ; try { defaultAddress = NameNode . getDefaultAddress ( conf ) ; } catch ( IllegalArgumentException e ) { defaultAddress = null ; } List < InetSocketAddress > addressList = getAddresses ( conf , defaultAddress , NameNode . DATANODE_PROTOCOL_ADDRESS , FSConstants . DFS_NAMENODE_RPC_ADDRESS_KEY ) ; if ( addressList == null ) { throw new IOException ( "Incorrect configuration: namenode address " + NameNode . DATANODE_PROTOCOL_ADDRESS + " or " + FSConstants . DFS_NAMENODE_RPC_ADDRESS_KEY + " is not configured." ) ; } return addressList ; }
Returns list of InetSocketAddresses corresponding to namenodes from the configuration . Note this is to be used by datanodes to get the list of namenode addresses to talk to .
33,657
public static String getInfoServer ( InetSocketAddress namenode , Configuration conf , boolean isAvatar ) { String httpAddressDefault = NetUtils . getServerAddress ( conf , "dfs.info.bindAddress" , "dfs.info.port" , "dfs.http.address" ) ; String httpAddress = null ; if ( namenode != null ) { if ( ! isAvatar ) { String nameServiceId = DFSUtil . getNameServiceIdFromAddress ( conf , namenode , FSConstants . DFS_NAMENODE_RPC_ADDRESS_KEY ) ; if ( nameServiceId != null ) { httpAddress = conf . get ( DFSUtil . getNameServiceIdKey ( FSConstants . DFS_NAMENODE_HTTP_ADDRESS_KEY , nameServiceId ) ) ; } } else { String suffix = "0" ; String nameServiceId = DFSUtil . getNameServiceIdFromAddress ( conf , namenode , FSConstants . DFS_NAMENODE_RPC_ADDRESS_KEY + "0" ) ; if ( nameServiceId == null ) { nameServiceId = DFSUtil . getNameServiceIdFromAddress ( conf , namenode , FSConstants . DFS_NAMENODE_RPC_ADDRESS_KEY + "1" ) ; suffix = "1" ; } if ( nameServiceId != null ) { httpAddress = conf . get ( DFSUtil . getNameServiceIdKey ( FSConstants . DFS_NAMENODE_HTTP_ADDRESS_KEY + suffix , nameServiceId ) ) ; } if ( httpAddress != null ) { return httpAddress ; } httpAddress = getNonFederatedAvatarInfoServer ( namenode , "0" , conf ) ; if ( httpAddress != null ) { return httpAddress ; } httpAddress = getNonFederatedAvatarInfoServer ( namenode , "1" , conf ) ; } } if ( httpAddress == null ) { httpAddress = conf . get ( FSConstants . DFS_NAMENODE_HTTP_ADDRESS_KEY , httpAddressDefault ) ; } return httpAddress ; }
return server http address from the configuration
33,658
public static boolean isDefaultNamenodeAddress ( Configuration conf , InetSocketAddress address , String ... keys ) { for ( String key : keys ) { String candidateAddress = conf . get ( key ) ; if ( candidateAddress != null && address . equals ( NetUtils . createSocketAddr ( candidateAddress ) ) ) return true ; } return false ; }
Given the InetSocketAddress for any configured communication with a namenode this method determines whether it is the configured communication channel for the default namenode . It does a reverse lookup on the list of default communication parameters to see if the given address matches any of them . Since the process of resolving URIs to Addresses is slightly expensive this utility method should not be used in performance - critical routines .
33,659
public static void setGenericConf ( Configuration conf , String nameserviceId , String ... keys ) { for ( String key : keys ) { String value = conf . get ( getNameServiceIdKey ( key , nameserviceId ) ) ; if ( value != null ) { conf . set ( key , value ) ; } } }
Sets the node specific setting into generic configuration key . Looks up value of key . nameserviceId and if found sets that value into generic key in the conf . Note that this only modifies the runtime conf .
33,660
public static void verifyChunkedSums ( int bytesPerSum , int checksumType , ByteBuffer sums , ByteBuffer data , String fileName , long basePos ) throws ChecksumException { nativeVerifyChunkedSums ( bytesPerSum , checksumType , sums , sums . position ( ) , data , data . position ( ) , data . remaining ( ) , fileName , basePos ) ; }
Verify the given buffers of data and checksums and throw an exception if any checksum is invalid . The buffers given to this function should have their position initially at the start of the data and their limit set at the end of the data . The position limit and mark are not modified .
33,661
public int compareTo ( ShortWritable o ) { short thisValue = this . value ; short thatValue = ( o ) . value ; return ( thisValue < thatValue ? - 1 : ( thisValue == thatValue ? 0 : 1 ) ) ; }
Compares two ShortWritable .
33,662
private synchronized void addToMap ( Class clazz , byte id ) { if ( classToIdMap . containsKey ( clazz ) ) { byte b = classToIdMap . get ( clazz ) ; if ( b != id ) { throw new IllegalArgumentException ( "Class " + clazz . getName ( ) + " already registered but maps to " + b + " and not " + id ) ; } } if ( idToClassMap . containsKey ( id ) ) { Class c = idToClassMap . get ( id ) ; if ( ! c . equals ( clazz ) ) { throw new IllegalArgumentException ( "Id " + id + " exists but maps to " + c . getName ( ) + " and not " + clazz . getName ( ) ) ; } } classToIdMap . put ( clazz , id ) ; idToClassMap . put ( id , clazz ) ; }
Used to add predefined classes and by Writable to copy new classes .
33,663
protected synchronized void addToMap ( Class clazz ) { if ( classToIdMap . containsKey ( clazz ) ) { return ; } if ( newClasses + 1 > Byte . MAX_VALUE ) { throw new IndexOutOfBoundsException ( "adding an additional class would" + " exceed the maximum number allowed" ) ; } byte id = ++ newClasses ; addToMap ( clazz , id ) ; }
Add a Class to the maps if it is not already present .
33,664
protected synchronized void copy ( Writable other ) { if ( other != null ) { try { DataOutputBuffer out = new DataOutputBuffer ( ) ; other . write ( out ) ; DataInputBuffer in = new DataInputBuffer ( ) ; in . reset ( out . getData ( ) , out . getLength ( ) ) ; readFields ( in ) ; } catch ( IOException e ) { throw new IllegalArgumentException ( "map cannot be copied: " + e . getMessage ( ) ) ; } } else { throw new IllegalArgumentException ( "source map cannot be null" ) ; } }
Used by child copy constructors .
33,665
boolean preUpgradeAction ( NamespaceInfo nsInfo ) throws IOException { int nsUpgradeVersion = nsInfo . getDistributedUpgradeVersion ( ) ; if ( nsUpgradeVersion >= getVersion ( ) ) return false ; String errorMsg = "\n Data-node missed a distributed upgrade and will shutdown." + "\n " + getDescription ( ) + "." + " Name-node version = " + nsInfo . getLayoutVersion ( ) + "." ; DataNode . LOG . fatal ( errorMsg ) ; try { dataNode . getNSNamenode ( nsInfo . getNamespaceID ( ) ) . errorReport ( dataNode . getDNRegistrationForNS ( nsInfo . getNamespaceID ( ) ) , DatanodeProtocol . NOTIFY , errorMsg ) ; } catch ( SocketTimeoutException e ) { DataNode . LOG . info ( "Problem connecting to server: " + dataNode . getNSNamenode ( nsInfo . getNamespaceID ( ) ) . toString ( ) ) ; } throw new IOException ( errorMsg ) ; }
Specifies what to do before the upgrade is started .
33,666
private BlockInfo removeBlockFromMap ( Block b ) { if ( b == null ) { return null ; } ns . decrementSafeBlockCountForBlockRemoval ( b ) ; return blocks . remove ( b ) ; }
All removals from the blocks map goes through this function .
33,667
BlockInfo addINode ( Block b , INodeFile iNode , short replication ) { BlockInfo info = checkBlockInfo ( b , replication ) ; info . inode = iNode ; return info ; }
Add block b belonging to the specified file inode to the map
33,668
BlockInfo addINodeForLoading ( Block b , INodeFile iNode ) { BlockInfo info = checkBlockInfo ( b , iNode . getReplication ( ) , iNode . isHardlinkFile ( ) ) ; info . inode = iNode ; return info ; }
Add block b belonging to the specified file inode to the map . Does not check for block existence for non - hardlinked files .
33,669
public BlockInfo updateINode ( BlockInfo oldBlock , Block newBlock , INodeFile iNode , short replication , boolean forceUpdate ) throws IOException { List < DatanodeDescriptor > locations = null ; if ( oldBlock != null && ( ! oldBlock . equals ( newBlock ) || forceUpdate ) ) { if ( oldBlock . getBlockId ( ) != newBlock . getBlockId ( ) ) { throw new IOException ( "block ids don't match : " + oldBlock + ", " + newBlock ) ; } if ( forceUpdate ) { locations = new ArrayList < DatanodeDescriptor > ( ) ; for ( int i = 0 ; i < oldBlock . numNodes ( ) ; i ++ ) { locations . add ( oldBlock . getDatanode ( i ) ) ; } } else { if ( ! iNode . isUnderConstruction ( ) ) { throw new IOException ( "Try to update generation of a finalized block old block: " + oldBlock + ", new block: " + newBlock ) ; } } removeBlock ( oldBlock ) ; } BlockInfo info = checkBlockInfo ( newBlock , replication ) ; info . set ( newBlock . getBlockId ( ) , newBlock . getNumBytes ( ) , newBlock . getGenerationStamp ( ) ) ; info . inode = iNode ; if ( locations != null ) { if ( locations != null ) { for ( DatanodeDescriptor d : locations ) { d . addBlock ( info ) ; } } } return info ; }
Add block b belonging to the specified file inode to the map this overwrites the map with the new block information .
33,670
void removeINode ( Block b ) { BlockInfo info = blocks . get ( b ) ; if ( info != null ) { info . inode = null ; if ( info . getDatanode ( 0 ) == null ) { removeBlockFromMap ( b ) ; } } }
Remove INode reference from block b . If it does not belong to any file and data - nodes then remove the block from the block map .
33,671
void removeBlock ( Block block ) { BlockInfo blockInfo = removeBlockFromMap ( block ) ; if ( blockInfo == null ) return ; blockInfo . inode = null ; for ( int idx = blockInfo . numNodes ( ) - 1 ; idx >= 0 ; idx -- ) { DatanodeDescriptor dn = blockInfo . getDatanode ( idx ) ; dn . removeBlock ( blockInfo ) ; } }
Remove the block from the block map ; remove it from all data - node lists it belongs to ; and remove all data - node locations associated with the block .
33,672
int numNodes ( Block b ) { BlockInfo info = blocks . get ( b ) ; return info == null ? 0 : info . numNodes ( ) ; }
counts number of containing nodes . Better than using iterator .
33,673
boolean addNode ( Block b , DatanodeDescriptor node , int replication ) { BlockInfo info = checkBlockInfo ( b , replication ) ; return node . addBlock ( info ) ; }
returns true if the node does not already exists and is added . false if the node already exists .
33,674
boolean removeNode ( Block b , DatanodeDescriptor node ) { BlockInfo info = blocks . get ( b ) ; if ( info == null ) return false ; boolean removed = node . removeBlock ( info ) ; if ( info . getDatanode ( 0 ) == null && info . inode == null ) { removeBlockFromMap ( b ) ; } return removed ; }
Remove data - node reference from the block . Remove the block from the block map only if it does not belong to any file and data - nodes .
33,675
List < Iterator < BlockInfo > > getBlocksIterarors ( int numShards ) { List < Iterator < BlockInfo > > iterators = new ArrayList < Iterator < BlockInfo > > ( ) ; if ( numShards <= 0 ) { throw new IllegalArgumentException ( "Number of shards must be greater than 0" ) ; } for ( int i = 0 ; i < numShards ; i ++ ) { Iterator < BlockInfo > iterator = blocks . shardIterator ( i , numShards ) ; if ( iterator != null ) { iterators . add ( iterator ) ; } } return iterators ; }
Get a list of shard iterators . Each iterator will travers only a part of the blocks map .
33,676
boolean contains ( Block block , DatanodeDescriptor datanode ) { BlockInfo info = blocks . get ( block ) ; if ( info == null ) return false ; if ( - 1 == info . findDatanode ( datanode ) ) return false ; return true ; }
Check if the replica at the given datanode exists in map
33,677
static long readCheckpointTime ( StorageDirectory sd ) throws IOException { File timeFile = NNStorage . getStorageFile ( sd , NameNodeFile . TIME ) ; long timeStamp = 0L ; if ( timeFile . exists ( ) && timeFile . canRead ( ) ) { DataInputStream in = new DataInputStream ( new FileInputStream ( timeFile ) ) ; try { timeStamp = in . readLong ( ) ; } finally { IOUtils . cleanup ( LOG , in ) ; } } return timeStamp ; }
Determine the checkpoint time of the specified StorageDirectory
33,678
protected synchronized int [ ] getMaxMapAndReduceLoad ( int localMaxMapLoad , int localMaxReduceLoad ) { final int numTaskTrackers = taskTrackerManager . getClusterStatus ( ) . getTaskTrackers ( ) ; int maxMapLoad = 0 ; int maxReduceLoad = 0 ; int neededMaps = 0 ; int neededReduces = 0 ; Collection < JobInProgress > jobQueue = jobQueueJobInProgressListener . getJobQueue ( ) ; synchronized ( jobQueue ) { for ( JobInProgress job : jobQueue ) { if ( job . getStatus ( ) . getRunState ( ) == JobStatus . RUNNING ) { neededMaps += job . desiredMaps ( ) - job . finishedMaps ( ) ; neededReduces += job . desiredReduces ( ) - job . finishedReduces ( ) ; } } } if ( numTaskTrackers > 0 ) { maxMapLoad = Math . min ( localMaxMapLoad , ( int ) Math . ceil ( ( double ) neededMaps / numTaskTrackers ) ) ; maxReduceLoad = Math . min ( localMaxReduceLoad , ( int ) Math . ceil ( ( double ) neededReduces / numTaskTrackers ) ) ; } return new int [ ] { maxMapLoad , maxReduceLoad } ; }
Determine the maximum number of maps or reduces that we are willing to run on a taskTracker which accept a maximum of localMaxMapLoad maps and localMaxReduceLoad reduces
33,679
public void add ( final String metricsName , final MetricsBase theMetricsObj ) { if ( metricsList . putIfAbsent ( metricsName , theMetricsObj ) != null ) { throw new IllegalArgumentException ( "Duplicate metricsName:" + metricsName ) ; } }
Add a new metrics to the registry
33,680
public static void main ( String [ ] argv ) throws Exception { System . exit ( ToolRunner . run ( null , new PiEstimator ( ) , argv ) ) ; }
main method for running it as a stand alone command .
33,681
public int compareTo ( Object o ) { FileStatus other = ( FileStatus ) o ; return this . getPath ( ) . compareTo ( other . getPath ( ) ) ; }
Compare this object to another object
33,682
public int compareTo ( ID o ) { JobID that = ( JobID ) o ; int jtComp = this . jtIdentifier . compareTo ( that . jtIdentifier ) ; if ( jtComp == 0 ) { return this . id - that . id ; } else return jtComp ; }
Compare JobIds by first jtIdentifiers then by job numbers
33,683
public StringBuilder appendTo ( StringBuilder builder ) { builder . append ( SEPARATOR ) ; builder . append ( jtIdentifier ) ; builder . append ( SEPARATOR ) ; builder . append ( idFormat . format ( id ) ) ; return builder ; }
Add the stuff after the job prefix to the given builder . This is useful because the sub - ids use this substring at the start of their string .
33,684
public DatanodeInfo [ ] bestNode ( LocatedBlocks blks ) throws IOException { TreeMap < DatanodeInfo , NodeRecord > map = new TreeMap < DatanodeInfo , NodeRecord > ( ) ; for ( int i = 0 ; i < blks . getLocatedBlocks ( ) . size ( ) ; i ++ ) { DatanodeInfo [ ] nodes = blks . get ( i ) . getLocations ( ) ; for ( int j = 0 ; j < nodes . length ; j ++ ) { NodeRecord obj = map . get ( nodes [ j ] ) ; if ( obj != null ) { obj . frequency ++ ; } else { map . put ( nodes [ j ] , new NodeRecord ( nodes [ j ] , 1 ) ) ; } } } Collection < NodeRecord > values = map . values ( ) ; NodeRecord [ ] nodes = ( NodeRecord [ ] ) values . toArray ( new NodeRecord [ values . size ( ) ] ) ; Arrays . sort ( nodes , new NodeRecordComparator ( ) ) ; try { List < NodeRecord > candidates = bestNode ( nodes , false ) ; return candidates . toArray ( new DatanodeInfo [ candidates . size ( ) ] ) ; } catch ( IOException e ) { return new DatanodeInfo [ ] { randomNode ( ) } ; } }
Get an array of nodes that can serve the streaming request The best one is the first in the array which has maximum local copies of all blocks
33,685
public static DatanodeInfo bestNode ( LocatedBlock blk ) throws IOException { DatanodeInfo [ ] nodes = blk . getLocations ( ) ; return bestNode ( nodes , true ) . get ( 0 ) ; }
return a random node from the replicas of this block
33,686
public static < T extends DatanodeID > List < T > bestNode ( T [ ] nodes , boolean doRandom ) throws IOException { TreeSet < T > deadNodes = new TreeSet < T > ( ) ; T chosenNode = null ; int failures = 0 ; Socket s = null ; int index = - 1 ; if ( nodes == null || nodes . length == 0 ) { throw new IOException ( "No nodes contain this block" ) ; } while ( s == null ) { if ( chosenNode == null ) { do { if ( doRandom ) { index = rand . nextInt ( nodes . length ) ; } else { index ++ ; } chosenNode = nodes [ index ] ; } while ( deadNodes . contains ( chosenNode ) ) ; } chosenNode = nodes [ index ] ; InetSocketAddress targetAddr = NetUtils . createSocketAddr ( chosenNode . getHost ( ) + ":" + chosenNode . getInfoPort ( ) ) ; try { s = new Socket ( ) ; s . connect ( targetAddr , HdfsConstants . READ_TIMEOUT ) ; s . setSoTimeout ( HdfsConstants . READ_TIMEOUT ) ; } catch ( IOException e ) { HttpServer . LOG . warn ( "Failed to connect to " + chosenNode . name , e ) ; deadNodes . add ( chosenNode ) ; s . close ( ) ; s = null ; failures ++ ; } finally { if ( s != null ) { s . close ( ) ; } } if ( failures == nodes . length ) throw new IOException ( "Could not reach the block containing the data. Please try again" ) ; } List < T > candidates ; if ( doRandom ) { candidates = new ArrayList < T > ( 1 ) ; candidates . add ( chosenNode ) ; } else { candidates = new ArrayList < T > ( nodes . length - index ) ; for ( int i = index ; i < nodes . length - index ; i ++ ) { candidates . add ( nodes [ i ] ) ; } } return candidates ; }
Choose a list datanodes from the specified list . The best one is the first one in the list .
33,687
public static String getUrlParam ( String name , String val ) { return val == null ? "" : "&" + name + "=" + val ; }
Returns the url parameter for the given bpid string .
33,688
public static DFSClient getDFSClient ( final HttpServletRequest request , final Configuration conf ) throws IOException , InterruptedException { final String nnAddr = request . getParameter ( JspHelper . NAMENODE_ADDRESS ) ; return new DFSClient ( DFSUtil . getSocketAddress ( nnAddr ) , conf ) ; }
Get DFSClient for a namenode corresponding to the BPID from a datanode
33,689
public void reduce ( K key , Iterator < V > values , OutputCollector < K , V > output , Reporter reporter ) throws IOException { while ( values . hasNext ( ) ) { output . collect ( key , values . next ( ) ) ; } }
Writes all keys and values directly to output .
33,690
private int restartTaskTracker ( boolean forceFlag , int batchSize ) throws IOException { CoronaConf conf = new CoronaConf ( getConf ( ) ) ; InetSocketAddress address = NetUtils . createSocketAddr ( conf . getClusterManagerAddress ( ) ) ; TFramedTransport transport = new TFramedTransport ( new TSocket ( address . getHostName ( ) , address . getPort ( ) ) ) ; ClusterManagerService . Client client = new ClusterManagerService . Client ( new TBinaryProtocol ( transport ) ) ; int restartBatch = ( batchSize > 0 ) ? batchSize : conf . getCoronaNodeRestartBatch ( ) ; try { transport . open ( ) ; RestartNodesArgs restartNodeArgs = new RestartNodesArgs ( forceFlag , restartBatch ) ; client . restartNodes ( restartNodeArgs ) ; } catch ( SafeModeException e ) { System . err . println ( "ClusterManager is in Safe Mode" ) ; } catch ( TException e ) { throw new IOException ( e ) ; } return 0 ; }
Command to ask the Cluster Manager to restart all the task tracker
33,691
private int setSafeMode ( boolean safeMode ) throws IOException { CoronaConf conf = new CoronaConf ( getConf ( ) ) ; InetSocketAddress address = NetUtils . createSocketAddr ( conf . getClusterManagerAddress ( ) ) ; TFramedTransport transport = new TFramedTransport ( new TSocket ( address . getHostName ( ) , address . getPort ( ) ) ) ; ClusterManagerService . Client client = new ClusterManagerService . Client ( new TBinaryProtocol ( transport ) ) ; try { transport . open ( ) ; if ( client . setSafeMode ( safeMode ) ) { System . out . println ( "The safeMode is: " + ( safeMode ? "ON" : "OFF" ) ) ; } else { System . err . println ( "Could not set the safeMode flag" ) ; } } catch ( TException e ) { throw new IOException ( e ) ; } return 0 ; }
Turns on the Safe Mode if safeMode is true . Turns off the Safe Mode if safeMode is false .
33,692
private int forceSetSafeModeOnPJT ( boolean safeMode ) throws IOException { CoronaConf conf = new CoronaConf ( getConf ( ) ) ; try { ClusterManagerAvailabilityChecker . getPJTClient ( conf ) . setClusterManagerSafeModeFlag ( safeMode ) ; } catch ( IOException e ) { System . err . println ( "Could not set the Safe Mode flag on the PJT: " + e ) ; } catch ( TException e ) { System . err . println ( "Could not set the Safe Mode flag on the PJT: " + e ) ; } return 0 ; }
Forcefully set the Safe Mode on the PJT
33,693
public static void main ( String [ ] args ) throws Exception { int result = ToolRunner . run ( new CoronaAdmin ( ) , args ) ; System . exit ( result ) ; }
Entry point for the tool .
33,694
public static String urlInJobHistory ( Path jobHistoryFileLocation , String jobId ) throws IOException { try { FileSystem fs = jobHistoryFileLocation . getFileSystem ( conf ) ; fs . getFileStatus ( jobHistoryFileLocation ) ; } catch ( FileNotFoundException e ) { return null ; } return "http://" + LOCALMACHINE + ":" + LOCALPORT + "/coronajobdetailshistory.jsp?jobid=" + jobId + "&logFile=" + URLEncoder . encode ( jobHistoryFileLocation . toString ( ) ) ; }
Given the path to the jobHistoryFile check if the file already exists . 1 . If FileNoFoundException is caught means the job is not yet finished and there is not job hisotry log file in the done directory 2 . If not it means we get a hit for the jobHistoryFile directly recover the url to the coronoajobdetailshistory page .
33,695
public void addNextValue ( Object val ) { String valCountStr = val . toString ( ) ; int pos = valCountStr . lastIndexOf ( "\t" ) ; String valStr = valCountStr ; String countStr = "1" ; if ( pos >= 0 ) { valStr = valCountStr . substring ( 0 , pos ) ; countStr = valCountStr . substring ( pos + 1 ) ; } Long count = ( Long ) this . items . get ( valStr ) ; long inc = Long . parseLong ( countStr ) ; if ( count == null ) { count = inc ; } else { count = count . longValue ( ) + inc ; } items . put ( valStr , count ) ; }
add the given val to the aggregator .
33,696
public int run ( String [ ] args ) throws Exception { int exitCode = 0 ; exitCode = init ( args ) ; if ( exitCode != 0 ) { return exitCode ; } genFiles ( ) ; return exitCode ; }
Main function . It first parses the command line arguments . It then reads the directory structure from the input directory structure file and creates directory structure in the file system namespace . Afterwards it reads the file attributes and creates files in the file . All file content is filled with a .
33,697
@ SuppressWarnings ( "unused" ) private void genDirStructure ( ) throws IOException { BufferedReader in = new BufferedReader ( new FileReader ( new File ( inDir , StructureGenerator . DIR_STRUCTURE_FILE_NAME ) ) ) ; String line ; while ( ( line = in . readLine ( ) ) != null ) { fs . mkdirs ( new Path ( root + line ) ) ; } }
Read directory structure file under the input directory . Create each directory under the specified root . The directory names are relative to the specified root .
33,698
private void genFiles ( ) throws IOException { config = new Configuration ( getConf ( ) ) ; config . setInt ( "dfs.replication" , 3 ) ; config . set ( "dfs.rootdir" , root . toString ( ) ) ; JobConf job = new JobConf ( config , DataGenerator . class ) ; job . setJobName ( "data-genarator" ) ; FileOutputFormat . setOutputPath ( job , new Path ( "data-generator-result" ) ) ; Path inputPath = new Path ( ROOT + "load_input" ) ; fs . mkdirs ( inputPath ) ; fs . copyFromLocalFile ( new Path ( inDir + "/" + StructureGenerator . FILE_STRUCTURE_FILE_NAME ) , inputPath ) ; FileInputFormat . setInputPaths ( job , new Path ( ROOT + "load_input" ) ) ; job . setInputFormat ( TextInputFormat . class ) ; job . setOutputKeyClass ( Text . class ) ; job . setOutputValueClass ( Text . class ) ; job . setMapperClass ( CreateFiles . class ) ; job . setNumMapTasks ( nFiles / 10 ) ; job . setNumReduceTasks ( 0 ) ; JobClient . runJob ( job ) ; }
Read file structure file under the input directory . Create each file under the specified root . The file names are relative to the root .
33,699
void startTaskTrackers ( ClusterStory clusterStory , long now ) { int port = 10000 ; long ms = now + 100 ; for ( MachineNode node : clusterStory . getMachines ( ) ) { String hostname = node . getName ( ) ; RackNode rackNode = node . getRackNode ( ) ; StaticMapping . addNodeToRack ( hostname , rackNode . getName ( ) ) ; String taskTrackerName = "tracker_" + hostname + ":localhost/127.0.0.1:" + port ; port ++ ; SimulatorTaskTracker tt = new SimulatorTaskTracker ( jt , taskTrackerName , hostname , node . getMapSlots ( ) , node . getReduceSlots ( ) ) ; queue . addAll ( tt . init ( ms ++ ) ) ; } }
Start simulated task trackers based on topology .