idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
32,900 | public void resetLastInodeId ( long newValue ) throws IOException { try { inodeId . skipTo ( newValue ) ; } catch ( IllegalStateException ise ) { throw new IOException ( ise ) ; } } | Set the last allocated inode id when fsimage or editlog is loaded . |
32,901 | void waitForReady ( ) { if ( ! ready ) { writeLock ( ) ; try { while ( ! ready ) { try { cond . await ( 5000 , TimeUnit . MILLISECONDS ) ; } catch ( InterruptedException ie ) { } } } finally { writeUnlock ( ) ; } } } | Block until the object is ready to be used . |
32,902 | INodeFileUnderConstruction addFile ( String path , String [ ] names , byte [ ] [ ] components , INode [ ] inodes , PermissionStatus permissions , short replication , long preferredBlockSize , String clientName , String clientMachine , DatanodeDescriptor clientNode , long generationStamp , long accessTime ) throws IOException { waitForReady ( ) ; long modTime = FSNamesystem . now ( ) ; if ( inodes [ inodes . length - 2 ] == null ) { if ( ! mkdirs ( names [ names . length - 1 ] , names , components , inodes , inodes . length - 1 , permissions , true , modTime ) ) { return null ; } } else if ( ! inodes [ inodes . length - 2 ] . isDirectory ( ) ) { NameNode . stateChangeLog . info ( "DIR* FSDirectory.addFile: " + "failed to add " + path + " as its parent is not a directory" ) ; throw new FileNotFoundException ( "Parent path is not a directory: " + path ) ; } if ( accessTime == - 1 ) { accessTime = modTime ; } INodeFileUnderConstruction newNode = new INodeFileUnderConstruction ( allocateNewInodeId ( ) , permissions , replication , preferredBlockSize , modTime , accessTime , clientName , clientMachine , clientNode ) ; newNode . setLocalName ( components [ inodes . length - 1 ] ) ; writeLock ( ) ; try { newNode = addChild ( inodes , inodes . length - 1 , newNode , - 1 , false ) ; } finally { writeUnlock ( ) ; } if ( newNode == null ) { NameNode . stateChangeLog . info ( "DIR* FSDirectory.addFile: " + "failed to add " + path + " to the file system" ) ; return null ; } fsImage . getEditLog ( ) . logOpenFile ( path , newNode ) ; if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "DIR* FSDirectory.addFile: " + path + " is added to the file system" ) ; } return newNode ; } | Add the given filename to the fs . |
32,903 | INodeDirectory addToParent ( byte [ ] src , INodeDirectory parentINode , INode newNode , boolean propagateModTime , int childIndex ) { INodeDirectory newParent = null ; writeLock ( ) ; try { try { newParent = rootDir . addToParent ( src , newNode , parentINode , false , propagateModTime , childIndex ) ; cacheName ( newNode ) ; } catch ( FileNotFoundException e ) { return null ; } if ( newParent == null ) return null ; if ( ! newNode . isDirectory ( ) ) { INodeFile newF = ( INodeFile ) newNode ; BlockInfo [ ] blocks = newF . getBlocks ( ) ; for ( int i = 0 ; i < blocks . length ; i ++ ) { newF . setBlock ( i , getFSNamesystem ( ) . blocksMap . addINodeForLoading ( blocks [ i ] , newF ) ) ; } } } finally { writeUnlock ( ) ; } return newParent ; } | Add node to parent node when loading the image . |
32,904 | Block addBlock ( String path , INode [ ] inodes , Block block ) throws IOException { waitForReady ( ) ; writeLock ( ) ; try { INodeFile fileNode = ( INodeFile ) inodes [ inodes . length - 1 ] ; INode . enforceRegularStorageINode ( fileNode , "addBlock only works for regular file, not " + path ) ; updateCount ( inodes , inodes . length - 1 , 0 , fileNode . getPreferredBlockSize ( ) * fileNode . getReplication ( ) , true ) ; BlockInfo blockInfo = getFSNamesystem ( ) . blocksMap . addINode ( block , fileNode , fileNode . getReplication ( ) ) ; fileNode . getStorage ( ) . addBlock ( blockInfo ) ; if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "DIR* FSDirectory.addFile: " + path + " with " + block + " block is added to the in-memory " + "file system" ) ; } } finally { writeUnlock ( ) ; } return block ; } | Add a block to the file . Returns a reference to the added block . |
32,905 | void persistBlocks ( String path , INodeFileUnderConstruction file ) throws IOException { waitForReady ( ) ; writeLock ( ) ; try { fsImage . getEditLog ( ) . logOpenFile ( path , file ) ; if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "DIR* FSDirectory.persistBlocks: " + path + " with " + file . getBlocks ( ) . length + " blocks is persisted to the file system" ) ; } } finally { writeUnlock ( ) ; } } | Persist the block list for the inode . |
32,906 | void closeFile ( String path , INodeFile file ) throws IOException { waitForReady ( ) ; writeLock ( ) ; try { long now = FSNamesystem . now ( ) ; file . setModificationTimeForce ( now ) ; fsImage . getEditLog ( ) . logCloseFile ( path , file ) ; if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "DIR* FSDirectory.closeFile: " + path + " with " + file . getBlocks ( ) . length + " blocks is persisted to the file system" ) ; } } finally { writeUnlock ( ) ; } } | Close file . |
32,907 | boolean removeBlock ( String path , INodeFileUnderConstruction fileNode , Block block ) throws IOException { waitForReady ( ) ; writeLock ( ) ; try { fileNode . removeBlock ( block ) ; getFSNamesystem ( ) . blocksMap . removeBlock ( block ) ; getFSNamesystem ( ) . corruptReplicas . removeFromCorruptReplicasMap ( block ) ; fsImage . getEditLog ( ) . logOpenFile ( path , fileNode ) ; if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "DIR* FSDirectory.removeBlock: " + path + " with " + block + " block is removed from the file system" ) ; } } finally { writeUnlock ( ) ; } return true ; } | Remove the block from a file . |
32,908 | public long getHardLinkId ( String src ) throws IOException { byte [ ] [ ] components = INode . getPathComponents ( src ) ; readLock ( ) ; try { INodeFile node = this . getFileINode ( components ) ; if ( ( ! exists ( node ) ) || ( ! ( node instanceof INodeHardLinkFile ) ) ) { throw new IOException ( src + " is not a valid hardlink file" ) ; } return ( ( INodeHardLinkFile ) node ) . getHardLinkID ( ) ; } finally { readUnlock ( ) ; } } | Retrieves a the hardlink id for a given file . |
32,909 | boolean unprotectedHardLinkTo ( String src , String dst , long timestamp ) throws QuotaExceededException , FileNotFoundException { return unprotectedHardLinkTo ( src , null , null , null , dst , null , null , null , timestamp ) ; } | hard link the dst path to the src path |
32,910 | public void raidFile ( INode sourceINodes [ ] , String source , RaidCodec codec , short expectedSourceRepl , Block [ ] parityBlocks ) throws IOException { waitForReady ( ) ; long now = FSNamesystem . now ( ) ; unprotectedRaidFile ( sourceINodes , source , codec , expectedSourceRepl , parityBlocks , now ) ; fsImage . getEditLog ( ) . logRaidFile ( source , codec . id , expectedSourceRepl , now ) ; } | Convert a file into a raid file format |
32,911 | boolean unprotectedRenameTo ( String src , String dst , long timestamp ) throws QuotaExceededException { return unprotectedRenameTo ( src , null , null , null , dst , null , null , timestamp ) ; } | Change a path name |
32,912 | BlockInfo [ ] setReplication ( String src , short replication , int [ ] oldReplication ) throws IOException { waitForReady ( ) ; BlockInfo [ ] fileBlocks = unprotectedSetReplication ( src , replication , oldReplication ) ; if ( fileBlocks != null ) fsImage . getEditLog ( ) . logSetReplication ( src , replication ) ; return fileBlocks ; } | Set file replication |
32,913 | long getPreferredBlockSize ( String filename ) throws IOException { byte [ ] [ ] components = INodeDirectory . getPathComponents ( filename ) ; readLock ( ) ; try { INode fileNode = rootDir . getNode ( components ) ; if ( fileNode == null ) { throw new IOException ( "Unknown file: " + filename ) ; } if ( fileNode . isDirectory ( ) ) { throw new IOException ( "Getting block size of a directory: " + filename ) ; } return ( ( INodeFile ) fileNode ) . getPreferredBlockSize ( ) ; } finally { readUnlock ( ) ; } } | Get the blocksize of a file |
32,914 | INode getINode ( String src ) { src = normalizePath ( src ) ; byte [ ] [ ] components = INodeDirectory . getPathComponents ( src ) ; readLock ( ) ; try { INode inode = rootDir . getNode ( components ) ; return inode ; } finally { readUnlock ( ) ; } } | This is a method required in addition to getFileInode It returns the INode regardless of its type getFileInode only returns the inode if it is of a file type |
32,915 | INode getINode ( long id ) { readLock ( ) ; try { INode inode = inodeMap . get ( id ) ; return inode ; } finally { readUnlock ( ) ; } } | Get the inode from inodeMap based on its inode id . |
32,916 | public void mergeInternal ( INode parityINodes [ ] , INode sourceINodes [ ] , String parity , String source , RaidCodec codec , int [ ] checksums ) throws IOException { waitForReady ( ) ; long now = FSNamesystem . now ( ) ; unprotectedMerge ( parityINodes , sourceINodes , parity , source , codec , checksums , now ) ; fsImage . getEditLog ( ) . logMerge ( parity , source , codec . id , checksums , now ) ; } | Merge all the blocks in the parity file into source file . Source file will be converted into INodeRaidStorage format to include both parity blocks and source blocks |
32,917 | INode delete ( String src , INode [ ] inodes , List < BlockInfo > collectedBlocks , int blocksLimit ) { if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "DIR* FSDirectory.delete: " + src ) ; } waitForReady ( ) ; long now = FSNamesystem . now ( ) ; INode deletedNode = unprotectedDelete ( src , inodes , collectedBlocks , blocksLimit , now ) ; if ( deletedNode != null ) { fsImage . getEditLog ( ) . logDelete ( src , now ) ; } return deletedNode ; } | Remove the file from management return up to blocksLimit number of blocks |
32,918 | INode unprotectedDelete ( String src , long modificationTime ) { return unprotectedDelete ( src , this . getExistingPathINodes ( src ) , null , BLOCK_DELETION_NO_LIMIT , modificationTime ) ; } | Delete a path from the name space Update the count at each ancestor directory with quota |
32,919 | INode unprotectedDelete ( String src , INode inodes [ ] , List < BlockInfo > toBeDeletedBlocks , int blocksLimit , long modificationTime ) { src = normalizePath ( src ) ; writeLock ( ) ; try { INode targetNode = inodes [ inodes . length - 1 ] ; if ( targetNode == null ) { if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "DIR* FSDirectory.unprotectedDelete: " + "failed to remove " + src + " because it does not exist" ) ; } return null ; } else if ( inodes . length == 1 ) { NameNode . stateChangeLog . warn ( "DIR* FSDirectory.unprotectedDelete: " + "failed to remove " + src + " because the root is not allowed to be deleted" ) ; return null ; } else { try { removeChild ( inodes , inodes . length - 1 ) ; inodes [ inodes . length - 2 ] . setModificationTime ( modificationTime ) ; if ( toBeDeletedBlocks == null ) { toBeDeletedBlocks = new ArrayList < BlockInfo > ( ) ; blocksLimit = BLOCK_DELETION_NO_LIMIT ; } List < INode > removedINodes = new ArrayList < INode > ( ) ; int filesRemoved = targetNode . collectSubtreeBlocksAndClear ( toBeDeletedBlocks , blocksLimit , removedINodes ) ; FSNamesystem . incrDeletedFileCount ( getFSNamesystem ( ) , filesRemoved ) ; getFSNamesystem ( ) . removePathAndBlocks ( src , toBeDeletedBlocks ) ; if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "DIR* FSDirectory.unprotectedDelete: " + src + " is removed" ) ; } targetNode . parent = null ; removeFromInodeMap ( removedINodes ) ; return targetNode ; } catch ( IOException e ) { NameNode . stateChangeLog . warn ( "DIR* FSDirectory.unprotectedDelete: " + "failed to remove " + src + " because " + e . getMessage ( ) ) ; return null ; } } } finally { writeUnlock ( ) ; } } | Delete a path from the name space Update the count at each ancestor directory with quota Up to blocksLimit blocks will be put in toBeDeletedBlocks to be removed later |
32,920 | void removeFromInodeMap ( List < INode > inodes ) { if ( inodes != null ) { for ( INode inode : inodes ) { if ( inode != null ) { inodeMap . remove ( inode ) ; } } } } | Remove inodes from inodeMap Not thread safe needs lock on operations |
32,921 | void replaceNode ( String path , INodeFile oldnode , INodeFile newnode ) throws IOException { replaceNode ( path , null , oldnode , newnode , true ) ; } | Replaces the specified inode with the specified one . |
32,922 | public List < FileStatusExtended > getRandomFileStats ( double percent ) { readLock ( ) ; try { List < FileStatusExtended > stats = new LinkedList < FileStatusExtended > ( ) ; for ( INodeFile file : getRandomFiles ( percent ) ) { try { String path = file . getFullPathName ( ) ; FileStatus stat = createFileStatus ( path , file ) ; Lease lease = this . getFSNamesystem ( ) . leaseManager . getLeaseByPath ( path ) ; String holder = ( lease == null ) ? null : lease . getHolder ( ) ; long hardlinkId = ( file instanceof INodeHardLinkFile ) ? ( ( INodeHardLinkFile ) file ) . getHardLinkID ( ) : - 1 ; stats . add ( new FileStatusExtended ( stat , file . getBlocks ( ) , holder , hardlinkId ) ) ; } catch ( IOException ioe ) { } } return stats ; } finally { readUnlock ( ) ; } } | Retrieves a list of random files with some information . |
32,923 | Block [ ] getFileBlocks ( String src ) { waitForReady ( ) ; byte [ ] [ ] components = INodeDirectory . getPathComponents ( src ) ; readLock ( ) ; try { INode targetNode = rootDir . getNode ( components ) ; if ( targetNode == null ) return null ; if ( targetNode . isDirectory ( ) ) return null ; return ( ( INodeFile ) targetNode ) . getBlocks ( ) ; } finally { readUnlock ( ) ; } } | Get the blocks associated with the file . |
32,924 | public INode [ ] getExistingPathINodes ( String path ) { byte [ ] [ ] components = INode . getPathComponents ( path ) ; INode [ ] inodes = new INode [ components . length ] ; readLock ( ) ; try { rootDir . getExistingPathINodes ( components , inodes ) ; return inodes ; } finally { readUnlock ( ) ; } } | Retrieve the existing INodes along the given path . |
32,925 | boolean isValidToCreate ( String src ) { String srcs = normalizePath ( src ) ; byte [ ] [ ] components = INodeDirectory . getPathComponents ( srcs ) ; readLock ( ) ; try { if ( srcs . startsWith ( "/" ) && ! srcs . endsWith ( "/" ) && rootDir . getNode ( components ) == null ) { return true ; } else { return false ; } } finally { readUnlock ( ) ; } } | Check whether the filepath could be created |
32,926 | boolean isDir ( String src ) { byte [ ] [ ] components = INodeDirectory . getPathComponents ( normalizePath ( src ) ) ; readLock ( ) ; try { INode node = rootDir . getNode ( components ) ; return isDir ( node ) ; } finally { readUnlock ( ) ; } } | Check whether the path specifies a directory |
32,927 | private void updateCount ( INode [ ] inodes , int numOfINodes , long nsDelta , long dsDelta , boolean checkQuota ) throws QuotaExceededException { this . updateCount ( inodes , 0 , numOfINodes , nsDelta , dsDelta , checkQuota ) ; } | Update count of each inode with quota in the inodes array from the position of 0 to the position of numOfINodes |
32,928 | private void updateCount ( INode [ ] inodes , int dsUpdateStartPos , int endPos , long nsDelta , long dsDelta , boolean checkQuota ) throws QuotaExceededException { if ( ! ready ) { return ; } if ( endPos > inodes . length ) { endPos = inodes . length ; } if ( checkQuota ) { verifyQuota ( inodes , 0 , dsUpdateStartPos , endPos , nsDelta , dsDelta ) ; } for ( int i = 0 ; i < endPos ; i ++ ) { if ( inodes [ i ] . isQuotaSet ( ) ) { INodeDirectoryWithQuota node = ( INodeDirectoryWithQuota ) inodes [ i ] ; if ( i >= dsUpdateStartPos ) { node . updateNumItemsInTree ( nsDelta , dsDelta ) ; } else { node . updateNumItemsInTree ( nsDelta , 0 ) ; } } } } | Update NS quota of each inode with quota in the inodes array from the position 0 to the position of endPos . |
32,929 | private void updateCountNoQuotaCheck ( INode [ ] inodes , int startPos , int endPos , long nsDelta , long dsDelta ) { try { updateCount ( inodes , startPos , endPos , nsDelta , dsDelta , false ) ; } catch ( QuotaExceededException e ) { NameNode . LOG . warn ( "FSDirectory.updateCountNoQuotaCheck - unexpected " , e ) ; } } | Update quota of each inode in the inodes array from the position of startPos to the position of endPos . But it won t throw out the QuotaExceededException . |
32,930 | private void unprotectedUpdateCount ( INode [ ] inodes , int numOfINodes , long nsDelta , long dsDelta ) { for ( int i = 0 ; i < numOfINodes ; i ++ ) { if ( inodes [ i ] . isQuotaSet ( ) ) { INodeDirectoryWithQuota node = ( INodeDirectoryWithQuota ) inodes [ i ] ; node . updateNumItemsInTree ( nsDelta , dsDelta ) ; } } } | updates quota without verification callers responsibility is to make sure quota is not exceeded |
32,931 | static String getFullPathName ( byte [ ] [ ] names ) { StringBuilder fullPathName = new StringBuilder ( ) ; for ( int i = 1 ; i < names . length ; i ++ ) { byte [ ] name = names [ i ] ; fullPathName . append ( Path . SEPARATOR_CHAR ) . append ( DFSUtil . bytes2String ( name ) ) ; } return fullPathName . toString ( ) ; } | Return the name of the path represented by the byte array |
32,932 | static INode [ ] getINodeArray ( INode inode ) throws IOException { int depth = getPathDepth ( inode ) ; INode [ ] inodes = new INode [ depth ] ; for ( int i = 0 ; i < depth ; i ++ ) { inodes [ depth - i - 1 ] = inode ; inode = inode . parent ; } return inodes ; } | Return the inode array representing the given inode s full path name |
32,933 | static private int getPathDepth ( INode inode ) throws IOException { int depth = 1 ; INode node ; for ( node = inode ; node . parent != null ; node = node . parent ) { depth ++ ; } if ( node . isRoot ( ) ) { return depth ; } throw new IOException ( "Invalid inode: " + inode . getLocalName ( ) ) ; } | Get the depth of node inode from root |
32,934 | static byte [ ] [ ] getINodeByteArray ( INode inode ) throws IOException { int depth = getPathDepth ( inode ) ; byte [ ] [ ] names = new byte [ depth ] [ ] ; for ( int i = 0 ; i < depth ; i ++ ) { names [ depth - i - 1 ] = inode . getLocalNameBytes ( ) ; inode = inode . parent ; } return names ; } | Return the byte array representing the given inode s full path name |
32,935 | static String getFullPathName ( INode inode ) throws IOException { INode [ ] inodes = getINodeArray ( inode ) ; return getFullPathName ( inodes , inodes . length - 1 ) ; } | Return the full path name of the specified inode |
32,936 | private < T extends INode > T addNode ( String src , T child , long childDiskspace , boolean inheritPermission ) throws QuotaExceededException { byte [ ] [ ] components = INode . getPathComponents ( src ) ; byte [ ] path = components [ components . length - 1 ] ; child . setLocalName ( path ) ; cacheName ( child ) ; INode [ ] inodes = new INode [ components . length ] ; writeLock ( ) ; try { rootDir . getExistingPathINodes ( components , inodes ) ; return addChild ( inodes , inodes . length - 1 , child , childDiskspace , inheritPermission ) ; } finally { writeUnlock ( ) ; } } | Add a node child to the namespace . The full path name of the node is src . childDiskspace should be - 1 if unknown . QuotaExceededException is thrown if it violates quota limit |
32,937 | private void verifyQuota ( INode [ ] inodes , int nsQuotaStartPos , int dsQuotaStartPos , int endPos , long nsDelta , long dsDelta ) throws QuotaExceededException { if ( ! ready ) { return ; } if ( endPos > inodes . length ) { endPos = inodes . length ; } int i = endPos - 1 ; Assert . assertTrue ( "nsQuotaStartPos shall be less or equal than the dsQuotaStartPos" , ( nsQuotaStartPos <= dsQuotaStartPos ) ) ; try { for ( ; i >= nsQuotaStartPos ; i -- ) { if ( inodes [ i ] . isQuotaSet ( ) ) { INodeDirectoryWithQuota node = ( INodeDirectoryWithQuota ) inodes [ i ] ; if ( i >= dsQuotaStartPos ) { node . verifyQuota ( nsDelta , dsDelta ) ; } else { node . verifyQuota ( nsDelta , 0 ) ; } } } } catch ( QuotaExceededException e ) { e . setPathName ( getFullPathName ( inodes , i ) ) ; throw e ; } } | Verify quota for adding or moving a new INode with required namespace and diskspace to a given position . |
32,938 | private static void updateCountForINodeWithQuota ( INodeDirectory dir , INode . DirCounts counts , ArrayList < INode > nodesInPath ) { long parentNamespace = counts . nsCount ; long parentDiskspace = counts . dsCount ; counts . nsCount = 1L ; counts . dsCount = 0L ; nodesInPath . add ( dir ) ; for ( INode child : dir . getChildren ( ) ) { if ( child . isDirectory ( ) ) { updateCountForINodeWithQuota ( ( INodeDirectory ) child , counts , nodesInPath ) ; } else { counts . nsCount += 1 ; counts . dsCount += ( ( INodeFile ) child ) . diskspaceConsumed ( ) ; } } if ( dir . isQuotaSet ( ) ) { ( ( INodeDirectoryWithQuota ) dir ) . setSpaceConsumed ( counts . nsCount , counts . dsCount ) ; if ( ( dir . getNsQuota ( ) >= 0 && counts . nsCount > dir . getNsQuota ( ) ) || ( dir . getDsQuota ( ) >= 0 && counts . dsCount > dir . getDsQuota ( ) ) ) { StringBuilder path = new StringBuilder ( 512 ) ; for ( INode n : nodesInPath ) { path . append ( '/' ) ; path . append ( n . getLocalName ( ) ) ; } NameNode . LOG . warn ( "Quota violation in image for " + path + " (Namespace quota : " + dir . getNsQuota ( ) + " consumed : " + counts . nsCount + ")" + " (Diskspace quota : " + dir . getDsQuota ( ) + " consumed : " + counts . dsCount + ")." ) ; } } nodesInPath . remove ( nodesInPath . size ( ) - 1 ) ; counts . nsCount += parentNamespace ; counts . dsCount += parentDiskspace ; } | Update the count of the directory if it has a quota and return the count |
32,939 | static FileStatus createFileStatus ( String path , INode node ) { return new FileStatus ( node . isDirectory ( ) ? 0 : node . computeContentSummary ( ) . getLength ( ) , node . isDirectory ( ) , node . isDirectory ( ) ? 0 : ( ( INodeFile ) node ) . getReplication ( ) , node . isDirectory ( ) ? 0 : ( ( INodeFile ) node ) . getPreferredBlockSize ( ) , node . getModificationTime ( ) , node . getAccessTime ( ) , node . getFsPermission ( ) , node . getUserName ( ) , node . getGroupName ( ) , new Path ( path ) ) ; } | Create FileStatus by file INode |
32,940 | private static HdfsFileStatus createHdfsFileStatus ( byte [ ] path , INode node ) { long size = 0 ; short replication = 0 ; long blocksize = 0 ; if ( node instanceof INodeFile ) { INodeFile fileNode = ( INodeFile ) node ; size = fileNode . getFileSize ( ) ; replication = fileNode . getReplication ( ) ; blocksize = fileNode . getPreferredBlockSize ( ) ; } else if ( node . isDirectory ( ) ) { INodeDirectory dirNode = ( INodeDirectory ) node ; size = dirNode . getChildren ( ) . size ( ) ; } return new HdfsFileStatus ( size , node . isDirectory ( ) , replication , blocksize , node . getModificationTime ( ) , node . getAccessTime ( ) , node . getFsPermission ( ) , node . getUserName ( ) , node . getGroupName ( ) , path ) ; } | Create HdfsFileStatus by file INode |
32,941 | private LocatedBlocks createLocatedBlocks ( INode node ) throws IOException { LocatedBlocks loc = null ; if ( node instanceof INodeFile ) { loc = getFSNamesystem ( ) . getBlockLocationsInternal ( ( INodeFile ) node , 0L , Long . MAX_VALUE , Integer . MAX_VALUE ) ; } if ( loc == null ) { loc = EMPTY_BLOCK_LOCS ; } return loc ; } | Create FileStatus with location info by file INode |
32,942 | public synchronized void close ( ) throws IOException { TreeMap < TaskAttemptID , TaskInProgress > tasksToClose = new TreeMap < TaskAttemptID , TaskInProgress > ( ) ; tasksToClose . putAll ( tasks ) ; for ( TaskInProgress tip : tasksToClose . values ( ) ) { tip . jobHasFinished ( false ) ; } this . running = false ; if ( pulseChecker != null ) { pulseChecker . shutdown ( ) ; } if ( versionBeanName != null ) { MBeanUtil . unregisterMBean ( versionBeanName ) ; } if ( asyncDiskService != null ) { asyncDiskService . cleanupAllVolumes ( ) ; asyncDiskService . shutdown ( ) ; try { if ( ! asyncDiskService . awaitTermination ( 10000 ) ) { asyncDiskService . shutdownNow ( ) ; asyncDiskService = null ; } } catch ( InterruptedException e ) { asyncDiskService . shutdownNow ( ) ; asyncDiskService = null ; } } if ( this . mapEventsFetcher != null ) { this . mapEventsFetcher . interrupt ( ) ; } this . mapLauncher . interrupt ( ) ; this . reduceLauncher . interrupt ( ) ; if ( this . heartbeatMonitor != null ) { this . heartbeatMonitor . interrupt ( ) ; } if ( this . taskMemoryManager != null ) { this . taskMemoryManager . shutdown ( ) ; } this . cgroupMemoryWatcher . shutdown ( ) ; getTaskLogsMonitor ( ) . interrupt ( ) ; jvmManager . stop ( ) ; RPC . stopProxy ( jobClient ) ; for ( boolean done = false ; ! done ; ) { try { if ( this . mapEventsFetcher != null ) { this . mapEventsFetcher . join ( ) ; } done = true ; } catch ( InterruptedException e ) { } } if ( taskReportServer != null ) { taskReportServer . stop ( ) ; taskReportServer = null ; } if ( healthChecker != null ) { healthChecker . stop ( ) ; healthChecker = null ; } if ( this . server != null ) { try { LOG . info ( "Shutting down StatusHttpServer" ) ; this . server . stop ( ) ; LOG . info ( "Shutting down Netty MapOutput Server" ) ; if ( this . nettyMapOutputServer != null ) { this . nettyMapOutputServer . stop ( ) ; } } catch ( Exception e ) { LOG . warn ( "Exception shutting down TaskTracker" , e ) ; } } } | Close down the TaskTracker and all its components . We must also shutdown any running tasks or threads and cleanup disk space . A new TaskTracker within the same process space might be restarted so everything must be clean . |
32,943 | private static TaskCompletionEvent getTceFromStore ( TaskCompletionEvent t ) { synchronized ( taskCompletionEventsStore ) { WeakReference < TaskCompletionEvent > e = taskCompletionEventsStore . get ( t ) ; if ( e == null ) { taskCompletionEventsStore . put ( t , new WeakReference < TaskCompletionEvent > ( t ) ) ; return t ; } TaskCompletionEvent tceFromStore = e . get ( ) ; if ( tceFromStore == null ) { taskCompletionEventsStore . put ( t , new WeakReference < TaskCompletionEvent > ( t ) ) ; return t ; } return tceFromStore ; } } | Given a TaskCompletionEvent it checks the store and returns an equivalent copy that can be used instead . If not in the store it adds it to the store and returns the same supplied TaskCompletionEvent . If the caller uses the stored copy we have an opportunity to save memory . |
32,944 | private List < TaskCompletionEvent > queryJobTracker ( IntWritable fromEventId , JobID jobId , InterTrackerProtocol jobClient ) throws IOException { if ( jobClient == null ) { List < TaskCompletionEvent > empty = Collections . emptyList ( ) ; return empty ; } TaskCompletionEvent t [ ] = jobClient . getTaskCompletionEvents ( jobId , fromEventId . get ( ) , probe_sample_size ) ; List < TaskCompletionEvent > recentMapEvents = new ArrayList < TaskCompletionEvent > ( ) ; for ( int i = 0 ; i < t . length ; i ++ ) { if ( t [ i ] . isMap ) { if ( useTaskCompletionEventsStore ) { recentMapEvents . add ( getTceFromStore ( t [ i ] ) ) ; } else { recentMapEvents . add ( t [ i ] ) ; } } } fromEventId . set ( fromEventId . get ( ) + t . length ) ; return recentMapEvents ; } | Queries the job tracker for a set of outputs ready to be copied |
32,945 | protected HeartbeatResponse transmitHeartBeat ( InterTrackerProtocol jobClient , short heartbeatResponseId , TaskTrackerStatus status ) throws IOException { boolean askForNewTask ; long localMinSpaceStart ; synchronized ( this ) { askForNewTask = ( ( status . countOccupiedMapSlots ( ) < maxMapSlots || status . countOccupiedReduceSlots ( ) < maxReduceSlots ) && acceptNewTasks ) ; localMinSpaceStart = minSpaceStart ; } if ( askForNewTask ) { checkLocalDirs ( getLocalDirsFromConf ( fConf ) ) ; askForNewTask = enoughFreeSpace ( localMinSpaceStart ) ; gatherResourceStatus ( status ) ; } TaskTrackerHealthStatus healthStatus = status . getHealthStatus ( ) ; synchronized ( this ) { if ( healthChecker != null ) { healthChecker . setHealthStatus ( healthStatus ) ; } else { healthStatus . setNodeHealthy ( true ) ; healthStatus . setLastReported ( 0L ) ; healthStatus . setHealthReport ( "" ) ; } } HeartbeatResponse heartbeatResponse = jobClient . heartbeat ( status , justStarted , justInited , askForNewTask , heartbeatResponseId ) ; synchronized ( this ) { for ( TaskStatus taskStatus : status . getTaskReports ( ) ) { if ( taskStatus . getRunState ( ) != TaskStatus . State . RUNNING && taskStatus . getRunState ( ) != TaskStatus . State . UNASSIGNED && taskStatus . getRunState ( ) != TaskStatus . State . COMMIT_PENDING && ! taskStatus . inTaskCleanupPhase ( ) ) { if ( taskStatus . getIsMap ( ) ) { mapTotal -- ; } else { reduceTotal -- ; } try { myInstrumentation . completeTask ( taskStatus . getTaskID ( ) ) ; } catch ( MetricsException me ) { LOG . warn ( "Caught: " + StringUtils . stringifyException ( me ) ) ; } removeRunningTask ( taskStatus . getTaskID ( ) ) ; if ( fConf . getBoolean ( LOG_FINISHED_TASK_COUNTERS , true ) ) { String logHeader = "TaskCountersLogged " + taskStatus . getTaskID ( ) + " " + taskStatus . getFinishTime ( ) / 1000 + " " ; if ( fConf . getInt ( FINISHED_TASK_COUNTERS_LOG_FORMAT , 0 ) == 0 ) { LOG . warn ( logHeader + taskStatus . getCounters ( ) . makeJsonString ( ) ) ; } else { LOG . warn ( logHeader + taskStatus . getCounters ( ) . makeCompactString ( ) ) ; } } } } for ( TaskInProgress tip : runningTasks . values ( ) ) { tip . getStatus ( ) . clearStatus ( ) ; } } return heartbeatResponse ; } | Build and transmit the heart beat to the JobTracker |
32,946 | private boolean reinitTaskTracker ( TaskTrackerAction [ ] actions ) { if ( actions != null ) { for ( TaskTrackerAction action : actions ) { if ( action . getActionId ( ) == TaskTrackerAction . ActionType . REINIT_TRACKER ) { LOG . info ( "Recieved RenitTrackerAction from JobTracker" ) ; return true ; } } } return false ; } | Check if the jobtracker directed a reset of the tasktracker . |
32,947 | protected synchronized void markUnresponsiveTasks ( ) throws IOException { long now = System . currentTimeMillis ( ) ; for ( TaskInProgress tip : runningTasks . values ( ) ) { if ( tip . getRunState ( ) == TaskStatus . State . RUNNING || tip . getRunState ( ) == TaskStatus . State . COMMIT_PENDING || tip . isCleaningup ( ) ) { long jobTaskTimeout = tip . getTaskTimeout ( ) ; if ( jobTaskTimeout == 0 ) { continue ; } long timeSinceLastReport = now - tip . getLastProgressReport ( ) ; if ( timeSinceLastReport > jobTaskTimeout && ! tip . wasKilled ) { String msg = "Task " + tip . getTask ( ) . getTaskID ( ) + " failed to report status for " + ( timeSinceLastReport / 1000 ) + " seconds. Killing!" ; LOG . info ( tip . getTask ( ) . getTaskID ( ) + ": " + msg ) ; ReflectionUtils . logThreadInfo ( LOG , "lost task" , 30 ) ; tip . reportDiagnosticInfo ( msg ) ; myInstrumentation . timedoutTask ( tip . getTask ( ) . getTaskID ( ) ) ; purgeTask ( tip , true ) ; } } } } | Kill any tasks that have not reported progress in the last X seconds . |
32,948 | protected synchronized void purgeJob ( KillJobAction action ) throws IOException { JobID jobId = action . getJobID ( ) ; LOG . info ( "Received 'KillJobAction' for job: " + jobId ) ; RunningJob rjob = null ; synchronized ( runningJobs ) { rjob = runningJobs . get ( jobId ) ; } if ( rjob == null ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Unknown job " + jobId + " being deleted." ) ; } } else { synchronized ( rjob ) { for ( TaskInProgress tip : rjob . tasks ) { tip . jobHasFinished ( false ) ; Task t = tip . getTask ( ) ; if ( t . isMapTask ( ) ) { indexCache . removeMap ( tip . getTask ( ) . getTaskID ( ) . toString ( ) ) ; } if ( this . runningTasks . containsKey ( t . getTaskID ( ) ) ) { LOG . info ( "Remove " + t . getTaskID ( ) + " from runningTask by purgeJob" ) ; this . runningTasks . remove ( t . getTaskID ( ) ) ; } } if ( ! rjob . keepJobFiles ) { PathDeletionContext [ ] contexts = buildPathDeletionContexts ( localFs , getLocalFiles ( fConf , getLocalJobDir ( rjob . getJobID ( ) . toString ( ) ) ) ) ; directoryCleanupThread . addToQueue ( contexts ) ; } rjob . tasks . clear ( ) ; } } synchronized ( runningJobs ) { runningJobs . remove ( jobId ) ; } } | The task tracker is done with this job so we need to clean up . |
32,949 | private void purgeTask ( TaskInProgress tip , boolean wasFailure ) throws IOException { if ( tip != null ) { LOG . info ( "About to purge task: " + tip . getTask ( ) . getTaskID ( ) ) ; removeTaskFromJob ( tip . getTask ( ) . getJobID ( ) , tip ) ; tip . jobHasFinished ( wasFailure ) ; if ( tip . getTask ( ) . isMapTask ( ) ) { indexCache . removeMap ( tip . getTask ( ) . getTaskID ( ) . toString ( ) ) ; } } } | Remove the tip and update all relevant state . |
32,950 | protected void killOverflowingTasks ( ) throws IOException { long localMinSpaceKill ; synchronized ( this ) { localMinSpaceKill = minSpaceKill ; } if ( ! enoughFreeSpace ( localMinSpaceKill ) ) { acceptNewTasks = false ; synchronized ( this ) { TaskInProgress killMe = findTaskToKill ( null ) ; if ( killMe != null ) { String msg = "Tasktracker running out of space." + " Killing task." ; LOG . info ( killMe . getTask ( ) . getTaskID ( ) + ": " + msg ) ; killMe . reportDiagnosticInfo ( msg ) ; purgeTask ( killMe , false ) ; } } } } | Check if we re dangerously low on disk space If so kill jobs to free up space and make sure we don t accept any new tasks Try killing the reduce jobs first since I believe they use up most space Then pick the one with least progress |
32,951 | long getLogDiskFreeSpace ( ) throws IOException { String logDir = fConf . getLogDir ( ) ; if ( logDir == null ) { return Long . MAX_VALUE ; } DF df = localDirsDf . get ( logDir ) ; if ( df == null ) { df = new DF ( new File ( logDir ) , fConf ) ; localDirsDf . put ( logDir , df ) ; } return df . getAvailable ( ) ; } | Obtain the free space on the log disk . If the log disk is not configured returns Long . MAX_VALUE |
32,952 | long tryToGetOutputSize ( TaskAttemptID taskId , JobConf conf ) { try { TaskInProgress tip ; synchronized ( this ) { tip = tasks . get ( taskId ) ; } if ( tip == null ) return - 1 ; if ( ! tip . getTask ( ) . isMapTask ( ) || tip . getRunState ( ) != TaskStatus . State . SUCCEEDED ) { return - 1 ; } MapOutputFile mapOutputFile = new MapOutputFile ( ) ; mapOutputFile . setJobId ( taskId . getJobID ( ) ) ; mapOutputFile . setConf ( conf ) ; if ( this . simulatedTaskMode ) { return 0 ; } Path tmp_output = null ; try { tmp_output = mapOutputFile . getOutputFile ( taskId ) ; } catch ( DiskErrorException dex ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Error getting map output of a task " + taskId , dex ) ; } } if ( tmp_output == null ) return 0 ; FileSystem localFS = FileSystem . getLocal ( conf ) ; FileStatus stat = localFS . getFileStatus ( tmp_output ) ; if ( stat == null ) return 0 ; else return stat . getLen ( ) ; } catch ( IOException e ) { LOG . info ( e ) ; return - 1 ; } } | Try to get the size of output for this task . Returns - 1 if it can t be found . |
32,953 | private void startNewTask ( TaskInProgress tip ) { try { boolean launched = localizeAndLaunchTask ( tip ) ; if ( ! launched ) { tip . kill ( true ) ; tip . cleanup ( true ) ; } } catch ( Throwable e ) { String msg = ( "Error initializing " + tip . getTask ( ) . getTaskID ( ) + ":\n" + StringUtils . stringifyException ( e ) ) ; LOG . error ( msg , e ) ; tip . reportDiagnosticInfo ( msg ) ; try { tip . kill ( true ) ; tip . cleanup ( true ) ; } catch ( IOException ie2 ) { LOG . info ( "Error cleaning up " + tip . getTask ( ) . getTaskID ( ) + ":\n" + StringUtils . stringifyException ( ie2 ) ) ; } if ( e instanceof Error ) { throw ( ( Error ) e ) ; } } } | Start a new task . All exceptions are handled locally so that we don t mess up the task tracker . |
32,954 | private boolean localizeAndLaunchTask ( final TaskInProgress tip ) throws IOException { FutureTask < Boolean > task = new FutureTask < Boolean > ( new Callable < Boolean > ( ) { public Boolean call ( ) throws IOException { JobConf localConf = localizeJob ( tip ) ; boolean launched = false ; synchronized ( tip ) { tip . setJobConf ( localConf ) ; launched = tip . launchTask ( ) ; } return launched ; } } ) ; String threadName = "Localizing " + tip . getTask ( ) . toString ( ) ; Thread thread = new Thread ( task ) ; thread . setName ( threadName ) ; thread . setDaemon ( true ) ; thread . start ( ) ; boolean launched = false ; try { launched = task . get ( LOCALIZE_TASK_TIMEOUT , TimeUnit . MILLISECONDS ) ; } catch ( Exception e ) { task . cancel ( true ) ; try { LOG . info ( "Wait the localizeTask thread to finish" ) ; thread . join ( LOCALIZE_TASK_TIMEOUT ) ; } catch ( InterruptedException ie ) { } if ( thread . isAlive ( ) ) { LOG . error ( "Stacktrace of " + threadName + "\n" + StringUtils . stackTraceOfThread ( thread ) ) ; LOG . fatal ( "Cannot kill the localizeTask thread." + threadName + " TaskTracker has to die!!" ) ; System . exit ( - 1 ) ; } throw new IOException ( "TaskTracker got stuck for localized Task:" + tip . getTask ( ) . getTaskID ( ) , e ) ; } return launched ; } | Localize and launch the task . If it takes too long try cancel the thread that does localization . If the thread cannot be terminated kill the JVM . The TaskTracker will die . |
32,955 | private void notifyTTAboutTaskCompletion ( ) { if ( oobHeartbeatOnTaskCompletion ) { synchronized ( finishedCount ) { int value = finishedCount . get ( ) ; finishedCount . set ( value + 1 ) ; finishedCount . notifyAll ( ) ; } } } | Notify the tasktracker to send an out - of - band heartbeat . |
32,956 | public synchronized JvmTask getTask ( JvmContext context ) throws IOException { JVMId jvmId = context . jvmId ; LOG . debug ( "JVM with ID : " + jvmId + " asked for a task" ) ; jvmManager . setPidToJvm ( jvmId , context . pid ) ; if ( ! jvmManager . isJvmKnown ( jvmId ) ) { LOG . info ( "Killing unknown JVM " + jvmId ) ; return new JvmTask ( null , true ) ; } RunningJob rjob = runningJobs . get ( jvmId . getJobId ( ) ) ; if ( rjob == null ) { LOG . info ( "Killing JVM " + jvmId + " since job " + jvmId . getJobId ( ) + " is dead" ) ; jvmManager . killJvm ( jvmId ) ; return new JvmTask ( null , true ) ; } TaskInProgress tip = jvmManager . getTaskForJvm ( jvmId ) ; if ( tip == null ) { return new JvmTask ( null , false ) ; } if ( taskMemoryControlGroupEnabled ) { long limit = getMemoryLimit ( tip . getJobConf ( ) , tip . getTask ( ) . isMapTask ( ) ) ; ttMemCgroup . addTask ( tip . getTask ( ) . getTaskID ( ) . toString ( ) , context . pid , limit ) ; } if ( taskCPUControlGroupEnabled ) ttCPUCgroup . addTask ( tip . getTask ( ) . getTaskID ( ) . toString ( ) , context . pid ) ; if ( tasks . get ( tip . getTask ( ) . getTaskID ( ) ) != null ) { LOG . info ( "JVM with ID: " + jvmId + " given task: " + tip . getTask ( ) . getTaskID ( ) ) ; return new JvmTask ( tip . getTask ( ) , false ) ; } else { LOG . info ( "Killing JVM with ID: " + jvmId + " since scheduled task: " + tip . getTask ( ) . getTaskID ( ) + " is " + tip . taskStatus . getRunState ( ) ) ; return new JvmTask ( null , true ) ; } } | Called upon startup by the child process to fetch Task data . |
32,957 | public synchronized boolean statusUpdate ( TaskAttemptID taskid , TaskStatus taskStatus ) throws IOException { TaskInProgress tip = tasks . get ( taskid ) ; if ( tip != null ) { tip . reportProgress ( taskStatus ) ; myInstrumentation . statusUpdate ( tip . getTask ( ) , taskStatus ) ; return true ; } else { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Progress from unknown child task: " + taskid ) ; } return false ; } } | Called periodically to report Task progress from 0 . 0 to 1 . 0 . |
32,958 | public synchronized void reportDiagnosticInfo ( TaskAttemptID taskid , String info ) throws IOException { TaskInProgress tip = tasks . get ( taskid ) ; if ( tip != null ) { tip . reportDiagnosticInfo ( info ) ; } else { LOG . warn ( "Error from unknown child task: " + taskid + ". Ignored." ) ; } } | Called when the task dies before completion and we want to report back diagnostic info |
32,959 | public synchronized void commitPending ( TaskAttemptID taskid , TaskStatus taskStatus ) throws IOException { LOG . info ( "Task " + taskid + " is in commit-pending," + "" + " task state:" + taskStatus . getRunState ( ) ) ; statusUpdate ( taskid , taskStatus ) ; reportTaskFinished ( taskid , true ) ; } | Task is reporting that it is in commit_pending and it is waiting for the commit Response |
32,960 | public synchronized void done ( TaskAttemptID taskid ) throws IOException { TaskInProgress tip = tasks . get ( taskid ) ; commitResponses . remove ( taskid ) ; if ( tip != null ) { tip . reportDone ( ) ; } else { LOG . warn ( "Unknown child task done: " + taskid + ". Ignored." ) ; } } | The task is done . |
32,961 | public synchronized void shuffleError ( TaskAttemptID taskId , String message ) throws IOException { LOG . fatal ( "Task: " + taskId + " - Killed due to Shuffle Failure: " + message ) ; TaskInProgress tip = runningTasks . get ( taskId ) ; if ( tip != null ) { tip . reportDiagnosticInfo ( "Shuffle Error: " + message ) ; purgeTask ( tip , true ) ; } } | A reduce - task failed to shuffle the map - outputs . Kill the task . |
32,962 | public synchronized void fsError ( TaskAttemptID taskId , String message ) throws IOException { LOG . fatal ( "Task: " + taskId + " - Killed due to FSError: " + message ) ; TaskInProgress tip = runningTasks . get ( taskId ) ; if ( tip != null ) { tip . reportDiagnosticInfo ( "FSError: " + message ) ; purgeTask ( tip , true ) ; } if ( isDiskOutOfSpaceError ( message ) ) { this . myInstrumentation . diskOutOfSpaceTask ( taskId ) ; } } | A child task had a local filesystem error . Kill the task . |
32,963 | public synchronized void fatalError ( TaskAttemptID taskId , String msg ) throws IOException { LOG . fatal ( "Task: " + taskId + " - Killed : " + msg ) ; TaskInProgress tip = runningTasks . get ( taskId ) ; if ( tip != null ) { tip . reportDiagnosticInfo ( "Error: " + msg ) ; purgeTask ( tip , true ) ; } } | A child task had a fatal error . Kill the task . |
32,964 | void reportTaskFinished ( TaskAttemptID taskid , boolean commitPending ) { TaskInProgress tip ; synchronized ( this ) { tip = tasks . get ( taskid ) ; } if ( tip != null ) { tip . reportTaskFinished ( commitPending ) ; } else { LOG . warn ( "Unknown child task finished: " + taskid + ". Ignored." ) ; } } | The task is no longer running . It may not have completed successfully |
32,965 | public synchronized void mapOutputLost ( TaskAttemptID taskid , String errorMsg ) throws IOException { TaskInProgress tip = tasks . get ( taskid ) ; if ( tip != null ) { tip . mapOutputLost ( errorMsg ) ; } else { LOG . warn ( "Unknown child with bad map output: " + taskid + ". Ignored." ) ; } } | A completed map task s output has been lost . |
32,966 | synchronized List < TaskStatus > getRunningTaskStatuses ( ) { List < TaskStatus > result = new ArrayList < TaskStatus > ( runningTasks . size ( ) ) ; for ( TaskInProgress tip : runningTasks . values ( ) ) { result . add ( tip . getStatus ( ) ) ; } return result ; } | Get the list of tasks that will be reported back to the job tracker in the next heartbeat cycle . |
32,967 | public synchronized List < TaskStatus > getNonRunningTasks ( ) { List < TaskStatus > result = new ArrayList < TaskStatus > ( tasks . size ( ) ) ; for ( Map . Entry < TaskAttemptID , TaskInProgress > task : tasks . entrySet ( ) ) { if ( ! runningTasks . containsKey ( task . getKey ( ) ) ) { result . add ( task . getValue ( ) . getStatus ( ) ) ; } } return result ; } | Get the list of stored tasks on this task tracker . |
32,968 | synchronized List < TaskStatus > getTasksFromRunningJobs ( ) { List < TaskStatus > result = new ArrayList < TaskStatus > ( tasks . size ( ) ) ; for ( Map . Entry < JobID , RunningJob > item : runningJobs . entrySet ( ) ) { RunningJob rjob = item . getValue ( ) ; synchronized ( rjob ) { for ( TaskInProgress tip : rjob . tasks ) { result . add ( tip . getStatus ( ) ) ; } } } return result ; } | Get the list of tasks from running jobs on this task tracker . |
32,969 | Path [ ] getLocalFiles ( JobConf conf , String subdir ) throws IOException { String [ ] localDirs = getLocalDirsFromConf ( conf ) ; Path [ ] paths = new Path [ localDirs . length ] ; FileSystem localFs = FileSystem . getLocal ( conf ) ; for ( int i = 0 ; i < localDirs . length ; i ++ ) { paths [ i ] = new Path ( localDirs [ i ] , subdir ) ; paths [ i ] = paths [ i ] . makeQualified ( localFs ) ; } return paths ; } | get the full paths of the directory in all the local disks . |
32,970 | Path [ ] getLocalDirs ( ) throws IOException { String [ ] localDirs = getLocalDirsFromConf ( fConf ) ; Path [ ] paths = new Path [ localDirs . length ] ; FileSystem localFs = FileSystem . getLocal ( fConf ) ; for ( int i = 0 ; i < localDirs . length ; i ++ ) { paths [ i ] = new Path ( localDirs [ i ] ) ; paths [ i ] = paths [ i ] . makeQualified ( localFs ) ; } return paths ; } | get the paths in all the local disks . |
32,971 | int getAveMapSlotRefillMsecs ( ) { synchronized ( mapSlotRefillMsecsQueue ) { if ( mapSlotRefillMsecsQueue . isEmpty ( ) ) { return - 1 ; } int totalMapSlotRefillMsecs = 0 ; for ( int refillMsecs : mapSlotRefillMsecsQueue ) { totalMapSlotRefillMsecs += refillMsecs ; } return totalMapSlotRefillMsecs / mapSlotRefillMsecsQueue . size ( ) ; } } | Get the average time in milliseconds to refill a free map slot . This average is calculated on a rotating buffer . |
32,972 | void addAveMapSlotRefillMsecs ( int refillMsecs ) { synchronized ( mapSlotRefillMsecsQueue ) { mapSlotRefillMsecsQueue . add ( refillMsecs ) ; if ( mapSlotRefillMsecsQueue . size ( ) >= maxRefillQueueSize ) { mapSlotRefillMsecsQueue . remove ( ) ; } } } | Add this new refill time for the map slot refill queue . Delete the oldest value if the maximum size has been met . |
32,973 | int getAveReduceSlotRefillMsecs ( ) { synchronized ( reduceSlotRefillMsecsQueue ) { if ( reduceSlotRefillMsecsQueue . isEmpty ( ) ) { return - 1 ; } int totalReduceSlotRefillMsecs = 0 ; for ( int refillMsecs : reduceSlotRefillMsecsQueue ) { totalReduceSlotRefillMsecs += refillMsecs ; } return totalReduceSlotRefillMsecs / reduceSlotRefillMsecsQueue . size ( ) ; } } | Get the average time in milliseconds to refill a free reduce slot . This average is calculated on a rotating buffer . |
32,974 | void addAveReduceSlotRefillMsecs ( int refillMsecs ) { synchronized ( reduceSlotRefillMsecsQueue ) { reduceSlotRefillMsecsQueue . add ( refillMsecs ) ; if ( reduceSlotRefillMsecsQueue . size ( ) >= maxRefillQueueSize ) { reduceSlotRefillMsecsQueue . remove ( ) ; } } } | Add this new refill time for the reduce slot refill queue . Delete the oldest value if the maximum size has been met . |
32,975 | private void initializeMemoryManagement ( ) { if ( fConf . get ( MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY ) != null ) { LOG . warn ( JobConf . deprecatedString ( MAPRED_TASKTRACKER_VMEM_RESERVED_PROPERTY ) ) ; } if ( fConf . get ( MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY ) != null ) { LOG . warn ( JobConf . deprecatedString ( MAPRED_TASKTRACKER_PMEM_RESERVED_PROPERTY ) ) ; } if ( fConf . get ( JobConf . MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY ) != null ) { LOG . warn ( JobConf . deprecatedString ( JobConf . MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY ) ) ; } if ( fConf . get ( JobConf . UPPER_LIMIT_ON_TASK_VMEM_PROPERTY ) != null ) { LOG . warn ( JobConf . deprecatedString ( JobConf . UPPER_LIMIT_ON_TASK_VMEM_PROPERTY ) ) ; } totalVirtualMemoryOnTT = resourceCalculatorPlugin . getVirtualMemorySize ( ) ; totalPhysicalMemoryOnTT = resourceCalculatorPlugin . getPhysicalMemorySize ( ) ; mapSlotMemorySizeOnTT = fConf . getLong ( JobTracker . MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY , JobConf . DISABLED_MEMORY_LIMIT ) ; reduceSlotSizeMemoryOnTT = fConf . getLong ( JobTracker . MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY , JobConf . DISABLED_MEMORY_LIMIT ) ; totalMemoryAllottedForTasks = maxMapSlots * mapSlotMemorySizeOnTT + maxReduceSlots * reduceSlotSizeMemoryOnTT ; if ( totalMemoryAllottedForTasks < 0 ) { long memoryAllotedForSlot = fConf . normalizeMemoryConfigValue ( fConf . getLong ( JobConf . MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY , JobConf . DISABLED_MEMORY_LIMIT ) ) ; long limitVmPerTask = fConf . normalizeMemoryConfigValue ( fConf . getLong ( JobConf . UPPER_LIMIT_ON_TASK_VMEM_PROPERTY , JobConf . DISABLED_MEMORY_LIMIT ) ) ; if ( memoryAllotedForSlot == JobConf . DISABLED_MEMORY_LIMIT ) { totalMemoryAllottedForTasks = JobConf . DISABLED_MEMORY_LIMIT ; } else { if ( memoryAllotedForSlot > limitVmPerTask ) { LOG . info ( "DefaultMaxVmPerTask is mis-configured. " + "It shouldn't be greater than task limits" ) ; totalMemoryAllottedForTasks = JobConf . DISABLED_MEMORY_LIMIT ; } else { totalMemoryAllottedForTasks = ( maxMapSlots + maxReduceSlots ) * ( memoryAllotedForSlot / ( 1024 * 1024 ) ) ; } } } if ( totalMemoryAllottedForTasks > totalPhysicalMemoryOnTT ) { LOG . info ( "totalMemoryAllottedForTasks > totalPhysicalMemoryOnTT." + " Thrashing might happen." ) ; } else if ( totalMemoryAllottedForTasks > totalVirtualMemoryOnTT ) { LOG . info ( "totalMemoryAllottedForTasks > totalVirtualMemoryOnTT." + " Thrashing might happen." ) ; } setTaskMemoryManagerEnabledFlag ( ) ; if ( isTaskMemoryManagerEnabled ( ) ) { taskMemoryManager = new TaskMemoryManagerThread ( this ) ; taskMemoryManager . setDaemon ( true ) ; taskMemoryManager . start ( ) ; } } | Memory - related setup |
32,976 | synchronized void cleanUpOverMemoryTask ( TaskAttemptID tid , boolean wasFailure , String diagnosticMsg ) { TaskInProgress tip = runningTasks . get ( tid ) ; if ( tip != null ) { tip . reportDiagnosticInfo ( diagnosticMsg ) ; try { purgeTask ( tip , wasFailure ) ; } catch ( IOException ioe ) { LOG . warn ( "Couldn't purge the task of " + tid + ". Error : " + ioe ) ; } } } | Clean - up the task that TaskMemoryMangerThread requests to do so . |
32,977 | public String getUserName ( TaskAttemptID taskId ) { TaskInProgress tip = tasks . get ( taskId ) ; if ( tip != null ) { return tip . getJobConf ( ) . getUser ( ) ; } return null ; } | Obtain username from TaskId |
32,978 | protected int getMaxSlots ( JobConf conf , int numCpuOnTT , TaskType type ) { int maxSlots ; String cpuToSlots ; if ( type == TaskType . MAP ) { maxSlots = conf . getInt ( "mapred.tasktracker.map.tasks.maximum" , 2 ) ; cpuToSlots = conf . get ( "mapred.tasktracker.cpus.to.maptasks" ) ; } else { maxSlots = conf . getInt ( "mapred.tasktracker.reduce.tasks.maximum" , 2 ) ; cpuToSlots = conf . get ( "mapred.tasktracker.cpus.to.reducetasks" ) ; } if ( cpuToSlots != null ) { try { for ( String str : cpuToSlots . split ( "," ) ) { String [ ] pair = str . split ( ":" ) ; int numCpu = Integer . parseInt ( pair [ 0 ] . trim ( ) ) ; int max = Integer . parseInt ( pair [ 1 ] . trim ( ) ) ; if ( numCpu == numCpuOnTT ) { maxSlots = max ; break ; } } } catch ( Exception e ) { LOG . warn ( "Error parsing number of CPU to map slots configuration" , e ) ; } } return maxSlots ; } | Obtain the max number of task slots based on the configuration and CPU |
32,979 | public DatanodeInfo chooseTargetNodes ( Set < DatanodeInfo > excludedNodes ) throws IOException { DatanodeInfo target = cluster . getNodeOnDifferentRack ( excludedNodes ) ; if ( target == null ) { throw new IOException ( "Error choose datanode" ) ; } return target ; } | explicitly choose the target nodes . |
32,980 | protected void getChecksumInfo ( long blockLength ) { bytesPerChecksum = checksum . getBytesPerChecksum ( ) ; if ( bytesPerChecksum > 10 * 1024 * 1024 && bytesPerChecksum > blockLength ) { checksum = DataChecksum . newDataChecksum ( checksum . getChecksumType ( ) , Math . max ( ( int ) blockLength , 10 * 1024 * 1024 ) ) ; bytesPerChecksum = checksum . getBytesPerChecksum ( ) ; } checksumSize = checksum . getChecksumSize ( ) ; } | Populate bytes per checksum and checksum size information from local checksum object . |
32,981 | @ SuppressWarnings ( "unchecked" ) public boolean contains ( final Object key ) { if ( key == null ) { throw new IllegalArgumentException ( "Null element is not supported." ) ; } final int hashCode = ( ( T ) key ) . hashCode ( ) ; final int index = getIndex ( hashCode ) ; return containsElem ( index , ( T ) key , hashCode ) ; } | Check if the set contains given element |
32,982 | protected boolean containsElem ( int index , final T key , int hashCode ) { for ( LinkedElement < T > e = entries [ index ] ; e != null ; e = e . next ) { if ( hashCode == e . hashCode && e . element . equals ( key ) ) { return true ; } } return false ; } | Check if the set contains given element at given index . |
32,983 | public boolean addAll ( Collection < ? extends T > toAdd ) { boolean changed = false ; for ( T elem : toAdd ) { changed |= addElem ( elem ) ; } expandIfNecessary ( ) ; return changed ; } | All all elements in the collection . Expand if necessary . |
32,984 | @ SuppressWarnings ( "unchecked" ) public boolean remove ( final Object key ) { if ( key == null ) { throw new IllegalArgumentException ( "Null element is not supported." ) ; } LinkedElement < T > removed = removeElem ( ( T ) key ) ; shrinkIfNecessary ( ) ; return removed == null ? false : true ; } | Remove the element corresponding to the key . |
32,985 | public List < T > pollAll ( ) { List < T > retList = new ArrayList < T > ( size ) ; for ( int i = 0 ; i < entries . length ; i ++ ) { LinkedElement < T > current = entries [ i ] ; while ( current != null ) { retList . add ( current . element ) ; current = current . next ; } } this . clear ( ) ; return retList ; } | Remove all elements from the set and return them . Clear the entries . |
32,986 | @ SuppressWarnings ( "unchecked" ) public T [ ] pollToArray ( T [ ] array ) { int currentIndex = 0 ; LinkedElement < T > current = null ; if ( array . length == 0 ) { return array ; } if ( array . length > size ) { array = ( T [ ] ) java . lang . reflect . Array . newInstance ( array . getClass ( ) . getComponentType ( ) , size ) ; } if ( array . length == size ) { for ( int i = 0 ; i < entries . length ; i ++ ) { current = entries [ i ] ; while ( current != null ) { array [ currentIndex ++ ] = current . element ; current = current . next ; } } this . clear ( ) ; return array ; } boolean done = false ; int currentBucketIndex = 0 ; while ( ! done ) { current = entries [ currentBucketIndex ] ; while ( current != null ) { array [ currentIndex ++ ] = current . element ; current = current . next ; entries [ currentBucketIndex ] = current ; size -- ; modification ++ ; if ( currentIndex == array . length ) { done = true ; break ; } } currentBucketIndex ++ ; } shrinkIfNecessary ( ) ; return array ; } | Get array . length elements from the set and put them into the array . |
32,987 | private int computeCapacity ( int initial ) { if ( initial < MINIMUM_CAPACITY ) { return MINIMUM_CAPACITY ; } if ( initial > MAXIMUM_CAPACITY ) { return MAXIMUM_CAPACITY ; } int capacity = 1 ; while ( capacity < initial ) { capacity <<= 1 ; } return capacity ; } | Compute capacity given initial capacity . |
32,988 | @ SuppressWarnings ( "unchecked" ) private void resize ( int cap ) { int newCapacity = computeCapacity ( cap ) ; if ( newCapacity == this . capacity ) { return ; } this . capacity = newCapacity ; this . expandThreshold = ( int ) ( capacity * maxLoadFactor ) ; this . shrinkThreshold = ( int ) ( capacity * minLoadFactor ) ; this . hash_mask = capacity - 1 ; LinkedElement < T > [ ] temp = entries ; entries = new LinkedElement [ capacity ] ; for ( int i = 0 ; i < temp . length ; i ++ ) { LinkedElement < T > curr = temp [ i ] ; while ( curr != null ) { LinkedElement < T > next = curr . next ; int index = getIndex ( curr . hashCode ) ; curr . next = entries [ index ] ; entries [ index ] = curr ; curr = next ; } } } | Resize the internal table to given capacity . |
32,989 | @ SuppressWarnings ( "unchecked" ) public void clear ( ) { this . capacity = this . initialCapacity ; this . hash_mask = capacity - 1 ; this . expandThreshold = ( int ) ( capacity * maxLoadFactor ) ; this . shrinkThreshold = ( int ) ( capacity * minLoadFactor ) ; entries = new LinkedElement [ capacity ] ; size = 0 ; modification ++ ; } | Clear the set . Resize it to the original capacity . |
32,990 | private synchronized void resortInitQueue ( ) { Comparator < JobInProgress > comp = new Comparator < JobInProgress > ( ) { public int compare ( JobInProgress o1 , JobInProgress o2 ) { int res = o1 . getPriority ( ) . compareTo ( o2 . getPriority ( ) ) ; if ( res == 0 ) { if ( o1 . getStartTime ( ) < o2 . getStartTime ( ) ) res = - 1 ; else res = ( o1 . getStartTime ( ) == o2 . getStartTime ( ) ? 0 : 1 ) ; } return res ; } } ; synchronized ( jobInitQueue ) { Collections . sort ( jobInitQueue , comp ) ; } } | Sort jobs by priority and then by start time . |
32,991 | private void jobStateChanged ( JobStatusChangeEvent event ) { if ( event . getEventType ( ) == EventType . START_TIME_CHANGED || event . getEventType ( ) == EventType . PRIORITY_CHANGED ) { synchronized ( jobInitQueue ) { resortInitQueue ( ) ; } } } | called when the job s status is changed |
32,992 | public static void readStartObjectToken ( JsonParser jsonParser , String parentFieldName ) throws IOException { readToken ( jsonParser , parentFieldName , JsonToken . START_OBJECT ) ; } | This is a helper method that reads a JSON token using a JsonParser instance and throws an exception if the next token is not START_OBJECT . |
32,993 | public static void readStartArrayToken ( JsonParser jsonParser , String parentFieldName ) throws IOException { readToken ( jsonParser , parentFieldName , JsonToken . START_ARRAY ) ; } | This is a helper method that reads a JSON token using a JsonParser instance and throws an exception if the next token is not START_ARRAY . |
32,994 | public static void readEndObjectToken ( JsonParser jsonParser , String parentFieldName ) throws IOException { readToken ( jsonParser , parentFieldName , JsonToken . END_OBJECT ) ; } | This is a helper method that reads a JSON token using a JsonParser instance and throws an exception if the next token is not END_OBJECT . |
32,995 | public static void readEndArrayToken ( JsonParser jsonParser , String parentFieldName ) throws IOException { readToken ( jsonParser , parentFieldName , JsonToken . END_ARRAY ) ; } | This is a helper method that reads a JSON token using a JsonParser instance and throws an exception if the next token is not END_ARRAY . |
32,996 | public static JsonGenerator createJsonGenerator ( CoronaConf conf ) throws IOException { OutputStream outputStream = new FileOutputStream ( conf . getCMStateFile ( ) ) ; if ( conf . getCMCompressStateFlag ( ) ) { outputStream = new GZIPOutputStream ( outputStream ) ; } ObjectMapper mapper = new ObjectMapper ( ) ; JsonGenerator jsonGenerator = new JsonFactory ( ) . createJsonGenerator ( outputStream , JsonEncoding . UTF8 ) ; jsonGenerator . setCodec ( mapper ) ; if ( ! conf . getCMCompressStateFlag ( ) ) { jsonGenerator . setPrettyPrinter ( new DefaultPrettyPrinter ( ) ) ; } return jsonGenerator ; } | This is a helper method which creates a JsonGenerator instance for writing the state of the ClusterManager to the state file . The JsonGenerator instance writes to a compressed file if we have the compression flag turned on . |
32,997 | public static JsonParser createJsonParser ( CoronaConf conf ) throws IOException { InputStream inputStream = new FileInputStream ( conf . getCMStateFile ( ) ) ; if ( conf . getCMCompressStateFlag ( ) ) { inputStream = new GZIPInputStream ( inputStream ) ; } ObjectMapper mapper = new ObjectMapper ( ) ; mapper . configure ( DeserializationConfig . Feature . FAIL_ON_UNKNOWN_PROPERTIES , false ) ; JsonFactory jsonFactory = new JsonFactory ( ) ; jsonFactory . setCodec ( mapper ) ; return jsonFactory . createJsonParser ( inputStream ) ; } | This is a helper method which creates a JsonParser instance for reading back the state of the ClusterManager from the state file . The JsonParser instance reads from a compressed file if we have the compression flag turned on . |
32,998 | public static void readField ( JsonParser jsonParser , String expectedFieldName ) throws IOException { readToken ( jsonParser , expectedFieldName , JsonToken . FIELD_NAME ) ; String fieldName = jsonParser . getCurrentName ( ) ; if ( ! fieldName . equals ( expectedFieldName ) ) { foundUnknownField ( fieldName , expectedFieldName ) ; } } | The method reads a field from the JSON stream and checks if the field read is the same as the expect field . |
32,999 | void replaceChild ( INode newChild ) { if ( children == null ) { throw new IllegalArgumentException ( "The directory is empty" ) ; } int low = Collections . binarySearch ( children , newChild . name ) ; if ( low >= 0 ) { INode oldChild = children . get ( low ) ; children . set ( low , newChild ) ; newChild . parent = this ; if ( newChild . isDirectory ( ) && oldChild . isDirectory ( ) ) { if ( ( ( INodeDirectory ) oldChild ) . getChildren ( ) != null ) { for ( INode oldGrandChild : ( ( INodeDirectory ) oldChild ) . getChildren ( ) ) { oldGrandChild . parent = ( INodeDirectory ) newChild ; } } } } else { throw new IllegalArgumentException ( "No child exists to be replaced" ) ; } } | Replace a child that has the same name as newChild by newChild . This is only working on one child case |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.