idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
32,100
public synchronized void close ( ) throws IOException { if ( clientRunning ) { leasechecker . close ( ) ; leasechecker . closeRenewal ( ) ; if ( blockLocationRenewal != null ) { blockLocationRenewal . stop ( ) ; } clientRunning = false ; try { leasechecker . interruptAndJoin ( ) ; if ( blockLocationRenewal != null ) { blockLocationRenewal . join ( ) ; } } catch ( InterruptedException ie ) { } RPC . stopProxy ( rpcNamenode ) ; } }
Close the file system abandoning all of the leases and files being created and close connections to the namenode .
32,101
public BlockLocation [ ] getBlockLocations ( String src , long start , long length ) throws IOException { LocatedBlocks blocks = callGetBlockLocations ( namenode , src , start , length , isMetaInfoSuppoted ( namenodeProtocolProxy ) ) ; return DFSUtil . locatedBlocks2Locations ( blocks ) ; }
Get block location info about file
32,102
DFSInputStream open ( String src , int buffersize , boolean verifyChecksum , FileSystem . Statistics stats , boolean clearOsBuffer , ReadOptions options ) throws IOException { checkOpen ( ) ; incFileReadToStats ( ) ; DFSInputStream stream = new DFSInputStream ( this , src , buffersize , verifyChecksum , clearOsBuffer , options ) ; if ( blockLocationRenewal != null ) { blockLocationRenewal . add ( stream ) ; } return stream ; }
Create an input stream that obtains a nodelist from the namenode and then reads from all the right places . Creates inner subclass of InputStream that does the right out - of - band work .
32,103
public OutputStream create ( String src , boolean overwrite ) throws IOException { return create ( src , overwrite , defaultReplication , defaultBlockSize , null ) ; }
Create a new dfs file and return an output stream for writing into it .
32,104
public OutputStream create ( String src , boolean overwrite , short replication , long blockSize ) throws IOException { return create ( src , overwrite , replication , blockSize , null ) ; }
Create a new dfs file with the specified block replication and return an output stream for writing into the file .
32,105
public boolean raidFile ( String source , String codecId , short expectedSourceRepl ) throws IOException { checkOpen ( ) ; try { return namenode . raidFile ( source , codecId , expectedSourceRepl ) ; } catch ( RemoteException re ) { throw re . unwrapRemoteException ( AccessControlException . class , NSQuotaExceededException . class , DSQuotaExceededException . class ) ; } }
Raid a file with given codec No guarantee file will be raided when the call returns . Namenode will schedule raiding asynchronously . If raiding is done when raidFile is called again namenode will set replication of source blocks to expectedSourceRepl
32,106
boolean recoverLease ( String src , boolean discardLastBlock ) throws IOException { checkOpen ( ) ; leasechecker . remove ( src ) ; if ( this . namenodeProtocolProxy == null ) { return versionBasedRecoverLease ( src ) ; } return methodBasedRecoverLease ( src , discardLastBlock ) ; }
Recover a file s lease
32,107
private boolean versionBasedRecoverLease ( String src ) throws IOException { if ( namenodeVersion < ClientProtocol . RECOVER_LEASE_VERSION ) { OutputStream out ; try { out = append ( src , conf . getInt ( "io.file.buffer.size" , 4096 ) , null ) ; } catch ( RemoteException re ) { IOException e = re . unwrapRemoteException ( AlreadyBeingCreatedException . class ) ; if ( e instanceof AlreadyBeingCreatedException ) { return false ; } throw re ; } out . close ( ) ; return true ; } else if ( namenodeVersion < ClientProtocol . CLOSE_RECOVER_LEASE_VERSION ) { try { namenode . recoverLease ( src , clientName ) ; } catch ( RemoteException re ) { throw re . unwrapRemoteException ( FileNotFoundException . class , AccessControlException . class ) ; } return ! namenode . getBlockLocations ( src , 0 , Long . MAX_VALUE ) . isUnderConstruction ( ) ; } else { try { return namenode . closeRecoverLease ( src , clientName , false ) ; } catch ( RemoteException re ) { throw re . unwrapRemoteException ( FileNotFoundException . class , AccessControlException . class ) ; } } }
recover lease based on version
32,108
private boolean methodBasedRecoverLease ( String src , boolean discardLastBlock ) throws IOException { if ( namenodeProtocolProxy . isMethodSupported ( "closeRecoverLease" , String . class , String . class , boolean . class ) ) { try { return namenode . closeRecoverLease ( src , clientName , discardLastBlock ) ; } catch ( RemoteException re ) { throw re . unwrapRemoteException ( FileNotFoundException . class , AccessControlException . class ) ; } } else if ( namenodeProtocolProxy . isMethodSupported ( "closeRecoverLease" , String . class , String . class ) ) { try { return namenode . closeRecoverLease ( src , clientName ) ; } catch ( RemoteException re ) { throw re . unwrapRemoteException ( FileNotFoundException . class , AccessControlException . class ) ; } } if ( namenodeProtocolProxy . isMethodSupported ( "recoverLease" , String . class , String . class ) ) { try { namenode . recoverLease ( src , clientName ) ; } catch ( RemoteException re ) { throw re . unwrapRemoteException ( FileNotFoundException . class , AccessControlException . class ) ; } return ! namenode . getBlockLocations ( src , 0 , Long . MAX_VALUE ) . isUnderConstruction ( ) ; } OutputStream out ; try { out = append ( src , conf . getInt ( "io.file.buffer.size" , 4096 ) , null ) ; } catch ( RemoteException re ) { IOException e = re . unwrapRemoteException ( AlreadyBeingCreatedException . class ) ; if ( e instanceof AlreadyBeingCreatedException ) { return false ; } throw re ; } out . close ( ) ; return true ; }
recover lease based on method name
32,109
OutputStream append ( String src , int buffersize , Progressable progress ) throws IOException { checkOpen ( ) ; clearFileStatusCache ( ) ; FileStatus stat = null ; LocatedBlock lastBlock = null ; boolean success = false ; try { stat = getFileInfo ( src ) ; if ( namenodeProtocolProxy != null && namenodeProtocolProxy . isMethodSupported ( "appendAndFetchOldGS" , String . class , String . class ) ) { LocatedBlockWithOldGS loc = namenode . appendAndFetchOldGS ( src , clientName ) ; lastBlock = loc ; if ( loc != null ) { updateNamespaceIdIfNeeded ( loc . getNamespaceID ( ) ) ; updateDataTransferProtocolVersionIfNeeded ( loc . getDataProtocolVersion ( ) ) ; getNewNameNodeIfNeeded ( loc . getMethodFingerPrint ( ) ) ; } } else if ( namenodeProtocolProxy != null && dataTransferVersion >= DataTransferProtocol . APPEND_BLOCK_VERSION ) { throw new IOException ( "DataTransferVersion " + dataTransferVersion + "requires the method appendAndFetchOldGS is supported in Namenode" ) ; } else if ( namenodeProtocolProxy != null && namenodeProtocolProxy . isMethodSupported ( "appendAndFetchMetaInfo" , String . class , String . class ) ) { LocatedBlockWithMetaInfo loc = namenode . appendAndFetchMetaInfo ( src , clientName ) ; lastBlock = loc ; if ( loc != null ) { updateNamespaceIdIfNeeded ( loc . getNamespaceID ( ) ) ; updateDataTransferProtocolVersionIfNeeded ( loc . getDataProtocolVersion ( ) ) ; getNewNameNodeIfNeeded ( loc . getMethodFingerPrint ( ) ) ; } } else { lastBlock = namenode . append ( src , clientName ) ; } OutputStream result = new DFSOutputStream ( this , src , buffersize , progress , lastBlock , stat , conf . getInt ( "io.bytes.per.checksum" , 512 ) , namespaceId ) ; leasechecker . put ( src , result ) ; success = true ; return result ; } catch ( RemoteException re ) { throw re . unwrapRemoteException ( FileNotFoundException . class , AccessControlException . class , NSQuotaExceededException . class , DSQuotaExceededException . class ) ; } finally { if ( ! success ) { try { namenode . abandonFile ( src , clientName ) ; } catch ( RemoteException e ) { if ( e . unwrapRemoteException ( ) instanceof LeaseExpiredException ) { LOG . debug ( String . format ( "client %s attempting to abandon file %s which it does not own" , clientName , src ) , e ) ; } else { throw e ; } } } } }
Append to an existing HDFS file .
32,110
public boolean setReplication ( String src , short replication ) throws IOException { try { return namenode . setReplication ( src , replication ) ; } catch ( RemoteException re ) { throw re . unwrapRemoteException ( AccessControlException . class , NSQuotaExceededException . class , DSQuotaExceededException . class ) ; } }
Set replication for an existing file .
32,111
public void merge ( String parity , String source , String codecId , int [ ] checksums ) throws IOException { checkOpen ( ) ; try { namenode . merge ( parity , source , codecId , checksums ) ; } catch ( RemoteException re ) { throw re . unwrapRemoteException ( AccessControlException . class , NSQuotaExceededException . class , DSQuotaExceededException . class ) ; } }
Merge parity file and source file together into a raided file
32,112
public boolean delete ( String src , boolean recursive ) throws IOException { checkOpen ( ) ; clearFileStatusCache ( ) ; try { return namenode . delete ( src , recursive ) ; } catch ( RemoteException re ) { throw re . unwrapRemoteException ( AccessControlException . class ) ; } }
delete file or directory . delete contents of the directory if non empty and recursive set to true
32,113
public FileStatus [ ] listPaths ( String src ) throws IOException { checkOpen ( ) ; metrics . incLsCalls ( ) ; try { if ( namenodeProtocolProxy == null ) { return versionBasedListPath ( src ) ; } return methodBasedListPath ( src ) ; } catch ( RemoteException re ) { throw re . unwrapRemoteException ( AccessControlException . class ) ; } }
Get a listing of the indicated directory
32,114
private RemoteIterator < LocatedFileStatus > versionBasedListPathWithLocation ( final String src ) throws IOException { if ( namenodeVersion >= ClientProtocol . BULK_BLOCK_LOCATIONS_VERSION ) { return iteratorListing ( src ) ; } else { return arrayListing ( src ) ; } }
List a directory with location based on version
32,115
private RemoteIterator < LocatedFileStatus > methodBasedListPathWithLocation ( final String src ) throws IOException { if ( namenodeProtocolProxy . isMethodSupported ( "getLocatedPartialListing" , String . class , byte [ ] . class ) ) { return iteratorListing ( src ) ; } else { return arrayListing ( src ) ; } }
List a directory with location based on method
32,116
private RemoteIterator < LocatedFileStatus > arrayListing ( final String src ) throws IOException { return new RemoteIterator < LocatedFileStatus > ( ) { private FileStatus [ ] stats ; private int i = 0 ; { stats = listPaths ( src ) ; if ( stats == null ) { throw new FileNotFoundException ( "File " + src + " does not exist." ) ; } } public boolean hasNext ( ) throws IOException { return i < stats . length ; } public LocatedFileStatus next ( ) throws IOException { if ( ! hasNext ( ) ) { throw new NoSuchElementException ( "No more entry in " + src ) ; } FileStatus result = stats [ i ++ ] ; BlockLocation [ ] locs = result . isDir ( ) ? null : getBlockLocations ( result . getPath ( ) . toUri ( ) . getPath ( ) , 0 , result . getLen ( ) ) ; return new LocatedFileStatus ( result , locs ) ; } } ; }
create the iterator from an array of file status
32,117
private RemoteIterator < LocatedFileStatus > iteratorListing ( final String src ) throws IOException { return new RemoteIterator < LocatedFileStatus > ( ) { private LocatedDirectoryListing thisListing ; private int i ; { thisListing = namenode . getLocatedPartialListing ( src , HdfsFileStatus . EMPTY_NAME ) ; if ( thisListing == null ) { throw new FileNotFoundException ( "File " + src + " does not exist." ) ; } } public boolean hasNext ( ) throws IOException { if ( i >= thisListing . getPartialListing ( ) . length && thisListing . hasMore ( ) ) { thisListing = namenode . getLocatedPartialListing ( src , thisListing . getLastName ( ) ) ; if ( thisListing == null ) { throw new FileNotFoundException ( "File " + src + " does not exist." ) ; } i = 0 ; } return i < thisListing . getPartialListing ( ) . length ; } public LocatedFileStatus next ( ) throws IOException { if ( ! hasNext ( ) ) { throw new java . util . NoSuchElementException ( "No more entry in " + src ) ; } return HdfsFileStatus . toLocatedFileStatus ( thisListing . getPartialListing ( ) [ i ] , thisListing . getBlockLocations ( ) [ i ++ ] , src ) ; } } ; }
create the iterator from the iterative listing with block locations
32,118
private FileStatus [ ] iterativeListing ( String src ) throws IOException { DirectoryListing thisListing = namenode . getPartialListing ( src , HdfsFileStatus . EMPTY_NAME ) ; if ( thisListing == null ) { return null ; } HdfsFileStatus [ ] partialListing = thisListing . getPartialListing ( ) ; if ( ! thisListing . hasMore ( ) ) { FileStatus [ ] stats = new FileStatus [ partialListing . length ] ; for ( int i = 0 ; i < partialListing . length ; i ++ ) { stats [ i ] = HdfsFileStatus . toFileStatus ( partialListing [ i ] , src ) ; } return stats ; } int totalNumEntries = partialListing . length + thisListing . getRemainingEntries ( ) ; ArrayList < FileStatus > listing = new ArrayList < FileStatus > ( totalNumEntries ) ; for ( HdfsFileStatus fileStatus : partialListing ) { listing . add ( HdfsFileStatus . toFileStatus ( fileStatus , src ) ) ; } do { thisListing = namenode . getPartialListing ( src , thisListing . getLastName ( ) ) ; if ( thisListing == null ) { return null ; } partialListing = thisListing . getPartialListing ( ) ; for ( HdfsFileStatus fileStatus : partialListing ) { listing . add ( HdfsFileStatus . toFileStatus ( fileStatus , src ) ) ; } } while ( thisListing . hasMore ( ) ) ; return listing . toArray ( new FileStatus [ listing . size ( ) ] ) ; }
List the given path iteratively if the directory is large
32,119
int getFileCrc ( String src ) throws IOException { checkOpen ( ) ; return getFileCrc ( dataTransferVersion , src , namenode , namenodeProtocolProxy , socketFactory , socketTimeout ) ; }
Get the CRC32 Checksum of a file .
32,120
public void setPermission ( String src , FsPermission permission ) throws IOException { checkOpen ( ) ; clearFileStatusCache ( ) ; try { namenode . setPermission ( src , permission ) ; } catch ( RemoteException re ) { throw re . unwrapRemoteException ( AccessControlException . class , FileNotFoundException . class ) ; } }
Set permissions to a file or directory .
32,121
public void setOwner ( String src , String username , String groupname ) throws IOException { checkOpen ( ) ; clearFileStatusCache ( ) ; try { namenode . setOwner ( src , username , groupname ) ; } catch ( RemoteException re ) { throw re . unwrapRemoteException ( AccessControlException . class , FileNotFoundException . class ) ; } }
Set file or directory owner .
32,122
private CorruptFileBlocks versionBasedListCorruptFileBlocks ( String path , String cookie ) throws IOException { if ( namenodeVersion < ClientProtocol . LIST_CORRUPT_FILEBLOCKS_VERSION ) { LOG . info ( "NameNode version is " + namenodeVersion + " Using older version of getCorruptFiles." ) ; if ( cookie != null ) { return new CorruptFileBlocks ( new String [ 0 ] , "" ) ; } ArrayList < String > str = new ArrayList < String > ( ) ; for ( FileStatus stat : namenode . getCorruptFiles ( ) ) { String filename = stat . getPath ( ) . toUri ( ) . getPath ( ) ; if ( filename . startsWith ( path ) ) { str . add ( filename ) ; } } return new CorruptFileBlocks ( str . toArray ( new String [ str . size ( ) ] ) , "" ) ; } return namenode . listCorruptFileBlocks ( path , cookie ) ; }
Version based list corrupt file blocks
32,123
private CorruptFileBlocks methodBasedListCorruptFileBlocks ( String path , String cookie ) throws IOException { if ( ! namenodeProtocolProxy . isMethodSupported ( "listCorruptFileBlocks" , String . class , String . class ) ) { LOG . info ( "NameNode version is " + namenodeVersion + " Using older version of getCorruptFiles." ) ; if ( cookie != null ) { return new CorruptFileBlocks ( new String [ 0 ] , "" ) ; } ArrayList < String > str = new ArrayList < String > ( ) ; for ( FileStatus stat : namenode . getCorruptFiles ( ) ) { String filename = stat . getPath ( ) . toUri ( ) . getPath ( ) ; if ( filename . startsWith ( path ) ) { str . add ( filename ) ; } } return new CorruptFileBlocks ( str . toArray ( new String [ str . size ( ) ] ) , "" ) ; } return namenode . listCorruptFileBlocks ( path , cookie ) ; }
Method based listCorruptFileBlocks
32,124
private void versionBasedSaveNamespace ( boolean force , boolean uncompressed ) throws AccessControlException , IOException { if ( namenodeVersion >= ClientProtocol . SAVENAMESPACE_FORCE ) { namenode . saveNamespace ( force , uncompressed ) ; } else { namenode . saveNamespace ( ) ; } }
Version - based save namespace
32,125
private void methodBasedSaveNamespace ( boolean force , boolean uncompressed ) throws AccessControlException , IOException { if ( namenodeProtocolProxy . isMethodSupported ( "saveNamespace" , boolean . class , boolean . class ) ) { namenode . saveNamespace ( force , uncompressed ) ; } else { namenode . saveNamespace ( ) ; } }
Method - based save namespace
32,126
void setQuota ( String src , long namespaceQuota , long diskspaceQuota ) throws IOException { if ( ( namespaceQuota <= 0 && namespaceQuota != FSConstants . QUOTA_DONT_SET && namespaceQuota != FSConstants . QUOTA_RESET ) || ( diskspaceQuota <= 0 && diskspaceQuota != FSConstants . QUOTA_DONT_SET && diskspaceQuota != FSConstants . QUOTA_RESET ) ) { throw new IllegalArgumentException ( "Invalid values for quota : " + namespaceQuota + " and " + diskspaceQuota ) ; } try { namenode . setQuota ( src , namespaceQuota , diskspaceQuota ) ; } catch ( RemoteException re ) { throw re . unwrapRemoteException ( AccessControlException . class , FileNotFoundException . class , NSQuotaExceededException . class , DSQuotaExceededException . class ) ; } }
Sets or resets quotas for a directory .
32,127
public void setTimes ( String src , long mtime , long atime ) throws IOException { checkOpen ( ) ; clearFileStatusCache ( ) ; try { namenode . setTimes ( src , mtime , atime ) ; } catch ( RemoteException re ) { throw re . unwrapRemoteException ( AccessControlException . class , FileNotFoundException . class ) ; } }
set the modification and access time of a file
32,128
static void checkBlockRange ( List < LocatedBlock > blockRange , long offset , long length ) throws IOException { boolean isValid = false ; if ( ! blockRange . isEmpty ( ) ) { int numBlocks = blockRange . size ( ) ; LocatedBlock firstBlock = blockRange . get ( 0 ) ; LocatedBlock lastBlock = blockRange . get ( numBlocks - 1 ) ; long segmentEnd = offset + length ; if ( firstBlock . getStartOffset ( ) <= offset && ( segmentEnd <= lastBlock . getStartOffset ( ) + lastBlock . getBlockSize ( ) ) ) { isValid = true ; LocatedBlock prevBlock = firstBlock ; for ( int i = 1 ; i < numBlocks ; ++ i ) { long prevBlkEnd = prevBlock . getStartOffset ( ) + prevBlock . getBlockSize ( ) ; LocatedBlock curBlock = blockRange . get ( i ) ; long curBlkOffset = curBlock . getStartOffset ( ) ; if ( prevBlkEnd != curBlkOffset || prevBlkEnd <= offset || segmentEnd <= curBlkOffset ) { isValid = false ; break ; } prevBlock = curBlock ; } } } if ( ! isValid ) { throw new IOException ( "Got incorrect block range for " + "offset=" + offset + ", length=" + length + ": " + blockRange ) ; } }
Checks that the given block range covers the given file segment and consists of contiguous blocks . This function assumes that the length of the queried segment is non - zero and a non - empty block list is expected .
32,129
void reportChecksumFailure ( String file , LocatedBlock lblocks [ ] ) { try { reportBadBlocks ( lblocks ) ; } catch ( IOException ie ) { LOG . info ( "Found corruption while reading " + file + ". Error repairing corrupt blocks. Bad blocks remain. " + StringUtils . stringifyException ( ie ) ) ; } }
just reports checksum failure and ignores any exception during the report .
32,130
public int getDataTransferProtocolVersion ( ) throws IOException { synchronized ( dataTransferVersion ) { if ( dataTransferVersion == - 1 ) { try { int remoteDataTransferVersion = namenode . getDataTransferProtocolVersion ( ) ; updateDataTransferProtocolVersionIfNeeded ( remoteDataTransferVersion ) ; } catch ( RemoteException re ) { IOException ioe = re . unwrapRemoteException ( IOException . class ) ; if ( ioe . getMessage ( ) . startsWith ( IOException . class . getName ( ) + ": " + NoSuchMethodException . class . getName ( ) ) ) { dataTransferVersion = 14 ; } else { throw ioe ; } } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Data Transfer Protocal Version is " + dataTransferVersion ) ; } } return dataTransferVersion ; } }
Get the data transfer protocol version supported in the cluster assuming all the datanodes have the same version .
32,131
public boolean isInLocalRack ( InetSocketAddress addr ) { if ( dnsToSwitchMapping == null || this . localhostNetworkLocation == null ) { return false ; } ArrayList < String > tempList = new ArrayList < String > ( ) ; tempList . add ( addr . getAddress ( ) . getHostAddress ( ) ) ; List < String > retList = dnsToSwitchMapping . resolve ( tempList ) ; if ( retList != null && retList . size ( ) > 0 ) { return retList . get ( 0 ) . equals ( this . localhostNetworkLocation ) ; } else { return false ; } }
Determine whether the input address is in the same rack as local machine
32,132
int getTotalCount ( ) { int ret = 0 ; for ( RaidMissingBlocksPerCodec queue : queues . values ( ) ) { ret += queue . getTotalCount ( ) ; } return ret ; }
Get the total count of the missing blocks
32,133
boolean remove ( BlockInfo blockInfo , RaidCodec codec ) { if ( queues . get ( codec ) . remove ( blockInfo ) ) { if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "BLOCK* NameSystem.RaidMissingBlocks.remove:" + blockInfo + " file " + blockInfo . getINode ( ) + " codec " + codec . id ) ; } return true ; } return false ; }
Remove a missing Raided block from its codec queue .
32,134
public synchronized void initTasks ( ) throws IOException { boolean loggingEnabled = LOG . isDebugEnabled ( ) ; if ( loggingEnabled ) { LOG . debug ( "(initTasks@SJIP) Starting Initialization for " + jobId ) ; } numMapTasks = jobStory . getNumberMaps ( ) ; numReduceTasks = jobStory . getNumberReduces ( ) ; JobHistory . JobInfo . logSubmitted ( getJobID ( ) , conf , jobFile . toString ( ) , this . startTime , hasRestarted ( ) ) ; if ( loggingEnabled ) { LOG . debug ( "(initTasks@SJIP) Logged to job history for " + jobId ) ; } if ( loggingEnabled ) { LOG . debug ( "(initTasks@SJIP) Checked task limits for " + jobId ) ; } final String jobFile = "default" ; splits = getRawSplits ( jobStory . getInputSplits ( ) ) ; if ( loggingEnabled ) { LOG . debug ( "(initTasks@SJIP) Created splits for job = " + jobId + " number of splits = " + splits . length ) ; } numMapTasks = splits . length ; maps = new TaskInProgress [ numMapTasks ] ; for ( int i = 0 ; i < numMapTasks ; ++ i ) { inputLength += splits [ i ] . getDataLength ( ) ; maps [ i ] = new TaskInProgress ( jobId , jobFile , splits [ i ] , conf , this , i , numSlotsPerMap ) ; } if ( numMapTasks > 0 ) { nonRunningMapCache = createCache ( splits , maxLevel ) ; if ( loggingEnabled ) { LOG . debug ( "initTasks:numMaps=" + numMapTasks + " Size of nonRunningMapCache=" + nonRunningMapCache . size ( ) + " for " + jobId ) ; } } this . launchTime = JobTracker . getClock ( ) . getTime ( ) ; this . reduces = new TaskInProgress [ numReduceTasks ] ; for ( int i = 0 ; i < numReduceTasks ; i ++ ) { reduces [ i ] = new TaskInProgress ( jobId , jobFile , numMapTasks , i , conf , this , numSlotsPerReduce ) ; nonRunningReduces . add ( reduces [ i ] ) ; } completedMapsForReduceSlowstart = ( int ) Math . ceil ( ( conf . getFloat ( "mapred.reduce.slowstart." + "completed.maps" , DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART ) * numMapTasks ) ) ; tasksInited . set ( true ) ; if ( loggingEnabled ) { LOG . debug ( "Initializing job, nowstatus = " + JobStatus . getJobRunState ( getStatus ( ) . getRunState ( ) ) ) ; } setupComplete ( ) ; if ( loggingEnabled ) { LOG . debug ( "Initializing job, inited-status = " + JobStatus . getJobRunState ( getStatus ( ) . getRunState ( ) ) ) ; } }
for initTasks update information from JobStory object
32,135
@ SuppressWarnings ( "deprecation" ) private synchronized TaskAttemptInfo getMapTaskAttemptInfo ( TaskTracker taskTracker , TaskAttemptID taskAttemptID ) { assert ( taskAttemptID . isMap ( ) ) ; JobID jobid = ( JobID ) taskAttemptID . getJobID ( ) ; assert ( jobid == getJobID ( ) ) ; RawSplit split = splits [ taskAttemptID . getTaskID ( ) . getId ( ) ] ; int locality = getClosestLocality ( taskTracker , split ) ; TaskID taskId = taskAttemptID . getTaskID ( ) ; if ( ! taskId . isMap ( ) ) { assert false : "Task " + taskId + " is not MAP :" ; } TaskAttemptInfo taskAttemptInfo = jobStory . getMapTaskAttemptInfoAdjusted ( taskId . getId ( ) , taskAttemptID . getId ( ) , locality ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "get an attempt: " + taskAttemptID . toString ( ) + ", state=" + taskAttemptInfo . getRunState ( ) + ", runtime=" + ( ( taskId . isMap ( ) ) ? taskAttemptInfo . getRuntime ( ) : ( ( ReduceTaskAttemptInfo ) taskAttemptInfo ) . getReduceRuntime ( ) ) ) ; } return taskAttemptInfo ; }
Given the map taskAttemptID returns the TaskAttemptInfo . Deconstructs the map s taskAttemptID and looks up the jobStory with the parts taskType id of task id of task attempt .
32,136
private TaskAttemptInfo getReduceTaskAttemptInfo ( TaskTracker taskTracker , TaskAttemptID taskAttemptID ) { assert ( ! taskAttemptID . isMap ( ) ) ; TaskID taskId = taskAttemptID . getTaskID ( ) ; TaskType taskType ; if ( taskAttemptID . isMap ( ) ) { taskType = TaskType . MAP ; } else { taskType = TaskType . REDUCE ; } TaskAttemptInfo taskAttemptInfo = jobStory . getTaskAttemptInfo ( taskType , taskId . getId ( ) , taskAttemptID . getId ( ) ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "get an attempt: " + taskAttemptID . toString ( ) + ", state=" + taskAttemptInfo . getRunState ( ) + ", runtime=" + ( ( taskAttemptID . isMap ( ) ) ? taskAttemptInfo . getRuntime ( ) : ( ( ReduceTaskAttemptInfo ) taskAttemptInfo ) . getReduceRuntime ( ) ) ) ; } return taskAttemptInfo ; }
Given the reduce taskAttemptID returns the TaskAttemptInfo . Deconstructs the reduce taskAttemptID and looks up the jobStory with the parts taskType id of task id of task attempt .
32,137
public static void alignDatanodes ( DatanodeInfo [ ] dstLocs , DatanodeInfo [ ] srcLocs ) { for ( int i = 0 ; i < dstLocs . length ; i ++ ) { for ( int j = 0 ; j < srcLocs . length ; j ++ ) { if ( i == j ) continue ; if ( dstLocs [ i ] . equals ( srcLocs [ j ] ) ) { if ( i < j ) { swap ( i , j , srcLocs ) ; } else { swap ( i , j , dstLocs ) ; } break ; } } } }
Aligns the source and destination locations such that common locations appear at the same index .
32,138
public void shutdown ( ) throws IOException { Iterator < ClientDatanodeProtocol > connections = datanodeMap . values ( ) . iterator ( ) ; while ( connections . hasNext ( ) ) { ClientDatanodeProtocol cnxn = connections . next ( ) ; RPC . stopProxy ( cnxn ) ; } datanodeMap . clear ( ) ; executor . shutdownNow ( ) ; synchronized ( leaseCheckers ) { for ( LeaseChecker checker : leaseCheckers . values ( ) ) { checker . closeRenewal ( ) ; } } }
Tears down all RPC connections you MUST call this once you are done .
32,139
public void copy ( List < FastFileCopyRequest > requests ) throws Exception { List < Future < CopyResult > > results = new ArrayList < Future < CopyResult > > ( ) ; for ( FastFileCopyRequest r : requests ) { Callable < CopyResult > fastFileCopy = new FastFileCopy ( r . getSrc ( ) , r . getDestination ( ) , r . srcFs , r . dstFs ) ; Future < CopyResult > f = executor . submit ( fastFileCopy ) ; results . add ( f ) ; } for ( Future < CopyResult > f : results ) { f . get ( ) ; } }
Performs fast copy for a list of fast file copy requests . Uses a thread pool to perform fast file copy in parallel .
32,140
private static void getDirectoryListing ( FileStatus root , FileSystem fs , List < CopyPath > result , Path dstPath ) throws IOException { if ( ! root . isDir ( ) ) { result . add ( new CopyPath ( root . getPath ( ) , dstPath ) ) ; return ; } for ( FileStatus child : fs . listStatus ( root . getPath ( ) ) ) { getDirectoryListing ( child , fs , result , new Path ( dstPath , child . getPath ( ) . getName ( ) ) ) ; } }
Recursively lists out all the files under a given path .
32,141
private static List < CopyPath > expandDirectories ( FileSystem fs , List < Path > paths , Path dstPath ) throws IOException { List < CopyPath > newList = new ArrayList < CopyPath > ( ) ; FileSystem dstFs = dstPath . getFileSystem ( defaultConf ) ; boolean isDstFile = false ; try { FileStatus dstPathStatus = dstFs . getFileStatus ( dstPath ) ; if ( ! dstPathStatus . isDir ( ) ) { isDstFile = true ; } } catch ( FileNotFoundException e ) { isDstFile = true ; } for ( Path path : paths ) { FileStatus pathStatus = fs . getFileStatus ( path ) ; if ( ! pathStatus . isDir ( ) ) { if ( isDstFile ) { newList . add ( new CopyPath ( path , dstPath ) ) ; } else { newList . add ( new CopyPath ( path , new Path ( dstPath , path . getName ( ) ) ) ) ; } } else { Path rootPath = dstPath ; if ( dstFs . exists ( dstPath ) ) { rootPath = new Path ( dstPath , pathStatus . getPath ( ) . getName ( ) ) ; } getDirectoryListing ( pathStatus , fs , newList , rootPath ) ; } } return newList ; }
Get the listing of all files under the given directories .
32,142
private static List < CopyPath > expandSingle ( Path src , Path dstPath ) throws IOException { List < Path > expandedPaths = new ArrayList < Path > ( ) ; FileSystem fs = src . getFileSystem ( defaultConf ) ; FileStatus [ ] stats = fs . globStatus ( src ) ; if ( stats == null || stats . length == 0 ) { throw new IOException ( "Path : " + src + " is invalid" ) ; } for ( FileStatus stat : stats ) { expandedPaths . add ( stat . getPath ( ) ) ; } List < CopyPath > expandedDirs = expandDirectories ( fs , expandedPaths , dstPath ) ; return expandedDirs ; }
Expand a single file if its a file pattern list out all files matching the pattern if its a directory return all files under the directory .
32,143
private static List < CopyPath > expandSrcs ( List < Path > srcs , Path dstPath ) throws IOException { List < CopyPath > expandedSrcs = new ArrayList < CopyPath > ( ) ; for ( Path src : srcs ) { expandedSrcs . addAll ( expandSingle ( src , dstPath ) ) ; } return expandedSrcs ; }
Expands all sources if they are file pattern expand to list out all files matching the pattern if they are a directory expand to list out all files under the directory .
32,144
public float getProgress ( ) throws IOException { if ( end == start ) { return 0.0f ; } else { return Math . min ( 1.0f , ( in . getPosition ( ) - start ) / ( float ) ( end - start ) ) ; } }
Return the progress within the input split
32,145
protected String findPattern ( String strPattern , String text , int grp ) { Pattern pattern = Pattern . compile ( strPattern , Pattern . MULTILINE ) ; Matcher matcher = pattern . matcher ( text ) ; if ( matcher . find ( 0 ) ) return matcher . group ( grp ) ; return null ; }
Find the first occurence ofa pattern in a piece of text and return a specific group .
32,146
protected String findAll ( String strPattern , String text , int grp , String separator ) { String retval = "" ; boolean firstTime = true ; Pattern pattern = Pattern . compile ( strPattern ) ; Matcher matcher = pattern . matcher ( text ) ; while ( matcher . find ( ) ) { retval += ( firstTime ? "" : separator ) + matcher . group ( grp ) ; firstTime = false ; } return retval ; }
Finds all occurences of a pattern in a piece of text and returns the matching groups .
32,147
private int renderMBeans ( JsonGenerator jg , String [ ] mBeanNames ) throws IOException , MalformedObjectNameException { jg . writeStartObject ( ) ; Set < ObjectName > nameQueries , queriedObjects ; nameQueries = new HashSet < ObjectName > ( ) ; queriedObjects = new HashSet < ObjectName > ( ) ; if ( mBeanNames == null ) { nameQueries . add ( null ) ; } else { for ( String mBeanName : mBeanNames ) { if ( mBeanName != null ) { nameQueries . add ( new ObjectName ( mBeanName ) ) ; } } } for ( ObjectName nameQuery : nameQueries ) { queriedObjects . addAll ( mBeanServer . queryNames ( nameQuery , null ) ) ; } for ( ObjectName objectName : queriedObjects ) { renderMBean ( jg , objectName ) ; } jg . writeEndObject ( ) ; return HttpServletResponse . SC_OK ; }
Renders MBean attributes to jg . The queries parameter allows selection of a subset of mbeans .
32,148
private void renderMBean ( JsonGenerator jg , ObjectName objectName ) throws IOException { MBeanInfo beanInfo ; String className ; jg . writeObjectFieldStart ( objectName . toString ( ) ) ; jg . writeStringField ( "beanName" , objectName . toString ( ) ) ; try { beanInfo = mBeanServer . getMBeanInfo ( objectName ) ; className = beanInfo . getClassName ( ) ; if ( "org.apache.commons.modeler.BaseModelMBean" . equals ( className ) ) { try { className = ( String ) mBeanServer . getAttribute ( objectName , "modelerType" ) ; } catch ( Exception e ) { } } jg . writeStringField ( "className" , className ) ; for ( MBeanAttributeInfo attr : beanInfo . getAttributes ( ) ) { writeAttribute ( jg , objectName , attr ) ; } } catch ( OperationsException e ) { writeException ( jg , e ) ; } catch ( ReflectionException e ) { writeException ( jg , e ) ; } jg . writeEndObject ( ) ; }
Render a particular MBean s attributes to jg .
32,149
public DatanodeCommand [ ] sendHeartbeat ( DatanodeRegistration registration , long capacity , long dfsUsed , long remaining , long namespaceUsed , int xmitsInProgress , int xceiverCount ) throws IOException { throw new IOException ( "sendHeartbeat" + errMessage ) ; }
This method should not be invoked on the composite DatanodeProtocols object . You can call these on the individual DatanodeProcol objects .
32,150
void setPolicyInfo ( Collection < PolicyInfo > all ) throws IOException { this . all = all ; this . pathToPolicy . clear ( ) ; for ( PolicyInfo pinfo : all ) { pathToPolicy . add ( new PathToPolicy ( pinfo . getSrcPath ( ) , pinfo ) ) ; for ( PathInfo d : pinfo . getDestPaths ( ) ) { pathToPolicy . add ( new PathToPolicy ( d . rpath , pinfo ) ) ; } } Comparator < PathToPolicy > comp = new Comparator < PathToPolicy > ( ) { public int compare ( PathToPolicy p1 , PathToPolicy p2 ) { return 0 - p1 . spath . compareTo ( p2 . spath ) ; } } ; Collections . sort ( pathToPolicy , comp ) ; }
The list of all configured policies .
32,151
public void run ( ) { while ( running ) { try { LOG . info ( "FileFixer continuing to run..." ) ; doFindFiles ( ) ; } catch ( Exception e ) { LOG . error ( StringUtils . stringifyException ( e ) ) ; } catch ( Error err ) { LOG . error ( "Exiting after encountering " + StringUtils . stringifyException ( err ) ) ; shutdown ( ) ; throw err ; } try { Thread . sleep ( blockFixInterval ) ; } catch ( InterruptedException ie ) { LOG . error ( "Encountering InturruptedException " + StringUtils . stringifyException ( ie ) ) ; } } }
A singleton thread that finds corrupted files and then schedules blocks to be copied . This thread talks only to NameNodes and does not talk to any datanodes .
32,152
static ClientDatanodeProtocol createClientDatanodeProtocolProxy ( DatanodeInfo datanodeid , Configuration conf ) throws IOException { InetSocketAddress addr = NetUtils . createSocketAddr ( datanodeid . getHost ( ) + ":" + datanodeid . getIpcPort ( ) ) ; if ( ClientDatanodeProtocol . LOG . isDebugEnabled ( ) ) { ClientDatanodeProtocol . LOG . info ( "ClientDatanodeProtocol addr=" + addr ) ; } try { return ( ClientDatanodeProtocol ) RPC . getProxy ( ClientDatanodeProtocol . class , ClientDatanodeProtocol . versionID , addr , conf ) ; } catch ( RPC . VersionMismatch e ) { long clientVersion = e . getClientVersion ( ) ; long datanodeVersion = e . getServerVersion ( ) ; if ( clientVersion > datanodeVersion && ! ProtocolCompatible . isCompatibleClientDatanodeProtocol ( clientVersion , datanodeVersion ) ) { throw new RPC . VersionIncompatible ( ClientDatanodeProtocol . class . getName ( ) , clientVersion , datanodeVersion ) ; } return ( ClientDatanodeProtocol ) e . getProxy ( ) ; } }
Setup a session with the specified datanode
32,153
public static GaloisField getInstance ( int fieldSize , int primitivePolynomial ) { int key = ( ( fieldSize << 16 ) & 0xFFFF0000 ) + ( primitivePolynomial & 0x0000FFFF ) ; GaloisField gf ; synchronized ( instances ) { gf = instances . get ( key ) ; if ( gf == null ) { gf = new GaloisField ( fieldSize , primitivePolynomial ) ; instances . put ( key , gf ) ; } } return gf ; }
Get the object performs Galois field arithmetics
32,154
public int add ( int x , int y ) { assert ( x >= 0 && x < getFieldSize ( ) && y >= 0 && y < getFieldSize ( ) ) ; return x ^ y ; }
Compute the sum of two fields
32,155
public int multiply ( int x , int y ) { assert ( x >= 0 && x < getFieldSize ( ) && y >= 0 && y < getFieldSize ( ) ) ; return mulTable [ x ] [ y ] ; }
Compute the multiplication of two fields
32,156
public int divide ( int x , int y ) { assert ( x >= 0 && x < getFieldSize ( ) && y > 0 && y < getFieldSize ( ) ) ; return divTable [ x ] [ y ] ; }
Compute the division of two fields
32,157
public int power ( int x , int n ) { assert ( x >= 0 && x < getFieldSize ( ) ) ; if ( n == 0 ) { return 1 ; } if ( x == 0 ) { return 0 ; } x = logTable [ x ] * n ; if ( x < primitivePeriod ) { return powTable [ x ] ; } x = x % primitivePeriod ; return powTable [ x ] ; }
Compute power n of a field
32,158
public void solveVandermondeSystem ( int [ ] x , byte [ ] [ ] y , int len , int dataStart , int dataLen ) { assert ( x . length <= len && y . length <= len ) ; int dataEnd = dataStart + dataLen ; for ( int i = 0 ; i < len - 1 ; i ++ ) { for ( int j = len - 1 ; j > i ; j -- ) { for ( int k = dataStart ; k < dataEnd ; k ++ ) { y [ j ] [ k ] = ( byte ) ( y [ j ] [ k ] ^ mulTable [ x [ i ] ] [ y [ j - 1 ] [ k ] & 0x000000FF ] ) ; } } } for ( int i = len - 1 ; i >= 0 ; i -- ) { for ( int j = i + 1 ; j < len ; j ++ ) { for ( int k = dataStart ; k < dataEnd ; k ++ ) { y [ j ] [ k ] = ( byte ) ( divTable [ y [ j ] [ k ] & 0x000000FF ] [ x [ j ] ^ x [ j - i - 1 ] ] ) ; } } for ( int j = i ; j < len - 1 ; j ++ ) { for ( int k = dataStart ; k < dataEnd ; k ++ ) { y [ j ] [ k ] = ( byte ) ( y [ j ] [ k ] ^ y [ j + 1 ] [ k ] ) ; } } } }
A bulk version of the solveVandermondeSystem
32,159
public void substitute ( byte [ ] [ ] p , byte [ ] q , int x ) { substitute ( p , q , x , 0 , p [ 0 ] . length ) ; }
A bulk version of the substitute . Tends to be 2X faster than the int substitute in a loop .
32,160
private void snapshotConfig ( ) { synchronized ( configManager ) { maximum = configManager . getPoolMaximum ( poolInfo , getType ( ) ) ; minimum = configManager . getPoolMinimum ( poolInfo , getType ( ) ) ; weight = configManager . getWeight ( poolInfo ) ; priority = configManager . getPriority ( poolInfo ) ; preemptable = configManager . isPoolPreemptable ( poolInfo ) ; shareStarvingRatio = configManager . getShareStarvingRatio ( ) ; minPreemptPeriod = configManager . getMinPreemptPeriod ( ) ; starvingTimeForShare = configManager . getStarvingTimeForShare ( ) ; starvingTimeForMinimum = configManager . getStarvingTimeForMinimum ( ) ; } }
Get the snapshot of the configuration from the configuration manager . Synchronized on config manager to ensure that config is atomically updated per pool .
32,161
public void addSession ( String id , Session session ) { synchronized ( session ) { SessionSchedulable schedulable = new SessionSchedulable ( session , getType ( ) ) ; idToSession . put ( id , schedulable ) ; } }
Add a session to the pool
32,162
public Queue < SessionSchedulable > getScheduleQueue ( ) { if ( scheduleQueue == null ) { scheduleQueue = createSessionQueue ( configManager . getPoolComparator ( poolInfo ) ) ; } return scheduleQueue ; }
Get the queue of sessions sorted for scheduling
32,163
public Queue < SessionSchedulable > getPreemptQueue ( ) { if ( preemptQueue == null ) { ScheduleComparator comparator = null ; switch ( configManager . getPoolComparator ( poolInfo ) ) { case FIFO : comparator = ScheduleComparator . FIFO_PREEMPT ; break ; case FAIR : comparator = ScheduleComparator . FAIR_PREEMPT ; break ; case DEADLINE : comparator = ScheduleComparator . DEADLINE_PREEMPT ; break ; default : throw new IllegalArgumentException ( "Unknown comparator" ) ; } preemptQueue = createSessionQueue ( comparator ) ; } return preemptQueue ; }
Get the queue of sessions sorted for preemption
32,164
public Queue < SessionSchedulable > createSessionQueue ( ScheduleComparator comparator ) { int initCapacity = snapshotSessions . size ( ) == 0 ? 1 : snapshotSessions . size ( ) ; Queue < SessionSchedulable > sessionQueue = new PriorityQueue < SessionSchedulable > ( initCapacity , comparator ) ; sessionQueue . addAll ( snapshotSessions ) ; return sessionQueue ; }
Get the queue of sessions in the pool sorted by comparator
32,165
public boolean isStarving ( long now ) { double starvingShare = getShare ( ) * shareStarvingRatio ; if ( getGranted ( ) >= Math . ceil ( starvingShare ) ) { lastTimeAboveStarvingShare = now ; } if ( getGranted ( ) >= Math . min ( getShare ( ) , getMinimum ( ) ) ) { lastTimeAboveMinimum = now ; } if ( now - lastPreemptTime < getMinPreemptPeriod ( ) ) { return false ; } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Pool:" + getName ( ) + " lastTimeAboveMinimum:" + lastTimeAboveMinimum + " lastTimeAboveStarvingShare:" + lastTimeAboveStarvingShare + " minimumStarvingTime:" + getMinimumStarvingTime ( now ) + " shareStarvingTime:" + getShareStarvingTime ( now ) + " starvingTime:" + getStarvingTime ( now ) ) ; } if ( getMinimumStarvingTime ( now ) >= 0 ) { LOG . info ( "Pool:" + getName ( ) + " for type:" + getType ( ) + " is starving min:" + getMinimum ( ) + " granted:" + getGranted ( ) ) ; lastPreemptTime = now ; return true ; } if ( getShareStarvingTime ( now ) >= 0 ) { LOG . info ( "Pool:" + getName ( ) + " for type:" + getType ( ) + " is starving share:" + getShare ( ) + " starvingRatio:" + shareStarvingRatio + " starvingShare:" + starvingShare + " granted:" + getGranted ( ) ) ; lastPreemptTime = now ; return true ; } return false ; }
Check if the pool is starving now or not
32,166
public long getStarvingTime ( long now ) { long starvingTime = Math . max ( getMinimumStarvingTime ( now ) , getShareStarvingTime ( now ) ) ; return starvingTime ; }
Get the amount of time the pool was starving for either its min share or its share
32,167
private static int getMapCount ( int srcCount , int numNodes ) { int numMaps = ( int ) ( srcCount / OP_PER_MAP ) ; numMaps = Math . min ( numMaps , numNodes * MAX_MAPS_PER_NODE ) ; return Math . max ( numMaps , 1 ) ; }
Calculate how many maps to run .
32,168
public synchronized void registerPrimarySsId ( String address , Long ssid ) throws IOException { String node = getSsIdNode ( address ) ; zkCreateRecursively ( node , SerializableUtils . toBytes ( ssid ) , true , ssid . toString ( ) ) ; }
Creates a node in zookeeper denoting the current session id of the primary avatarnode of the cluster . The primary avatarnode always syncs this information to zookeeper when it starts .
32,169
public synchronized void registerLastTxId ( String address , ZookeeperTxId lastTxid ) throws IOException { String node = getLastTxIdNode ( address ) ; zkCreateRecursively ( node , lastTxid . toBytes ( ) , true , lastTxid . toString ( ) ) ; }
Creates a node in zookeeper denoting the current session id and the last transaction id processed by the primary avatarnode . This is used by the primary avatarnode when it shuts down cleanly .
32,170
public Long getPrimarySsId ( String address , boolean sync ) throws IOException , KeeperException , InterruptedException , ClassNotFoundException { Stat stat = new Stat ( ) ; String node = getSsIdNode ( address ) ; byte [ ] data = getNodeData ( node , stat , false , sync ) ; if ( data == null ) { return null ; } return ( Long ) SerializableUtils . getFromBytes ( data , Long . class ) ; }
Retrieves the current session id for the cluster from zookeeper .
32,171
public ZookeeperTxId getPrimaryLastTxId ( String address , boolean sync ) throws IOException , KeeperException , InterruptedException , ClassNotFoundException { Stat stat = new Stat ( ) ; String node = getLastTxIdNode ( address ) ; byte [ ] data = getNodeData ( node , stat , false , sync ) ; if ( data == null ) { return null ; } return ZookeeperTxId . getFromBytes ( data ) ; }
Retrieves the last transaction id of the primary from zookeeper .
32,172
public String getPrimaryAvatarAddress ( String address , Stat stat , boolean retry ) throws IOException , KeeperException , InterruptedException { return getPrimaryAvatarAddress ( address , stat , retry , false ) ; }
Retrieves the primary address for the cluster this does not perform a sync before it reads the znode .
32,173
private void addJobJarToClassPath ( String localJarFile , StringBuffer classPath ) { File jobCacheDir = new File ( new Path ( localJarFile ) . getParent ( ) . toString ( ) ) ; File [ ] libs = new File ( jobCacheDir , "lib" ) . listFiles ( ) ; String sep = System . getProperty ( "path.separator" ) ; if ( libs != null ) { for ( int i = 0 ; i < libs . length ; i ++ ) { classPath . append ( sep ) ; classPath . append ( libs [ i ] ) ; } } classPath . append ( sep ) ; classPath . append ( new File ( jobCacheDir , "classes" ) ) ; classPath . append ( sep ) ; classPath . append ( jobCacheDir ) ; }
Given the path to the localized job jar file add it s constituents to the classpath
32,174
private static void appendSystemClasspath ( JobConf conf , String pathSeparator , StringBuffer classPath ) { String debugRuntime = conf . get ( "mapred.task.debug.runtime.classpath" ) ; if ( debugRuntime != null ) { classPath . append ( pathSeparator ) ; classPath . append ( debugRuntime ) ; } String systemClasspath = System . getenv ( MAPREDUCE_TASK_SYSTEM_CLASSPATH_PROPERTY ) ; if ( systemClasspath == null ) { systemClasspath = System . getProperty ( "java.class.path" ) ; } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "System classpath " + systemClasspath ) ; } classPath . append ( pathSeparator ) ; classPath . append ( systemClasspath ) ; }
Add the system class path to a class path
32,175
public synchronized void setNewWindows ( ArrayList < Long > newWindows ) throws IOException { if ( newWindows . size ( ) != windows . size ( ) ) { throw new IOException ( "Number of new windows need to be the same as that of old ones" ) ; } Collections . sort ( newWindows ) ; for ( int i = 0 ; i < newWindows . size ( ) ; i ++ ) { if ( newWindows . get ( i ) > windows . get ( i ) ) { throw new IOException ( "New window " + newWindows . get ( i ) + " should be smaller than the old one " + windows . get ( i ) ) ; } windows . set ( i , newWindows . get ( i ) ) ; } }
Only for testing
32,176
private void reorderJobs ( JobInProgress job , JobSchedulingInfo oldInfo , QueueInfo qi ) { if ( qi . removeWaitingJob ( oldInfo ) != null ) { qi . addWaitingJob ( job ) ; } if ( qi . removeRunningJob ( oldInfo ) != null ) { qi . addRunningJob ( job ) ; } }
because of the change in the job priority or job start - time .
32,177
private void makeJobRunning ( JobInProgress job , JobSchedulingInfo oldInfo , QueueInfo qi ) { qi . addRunningJob ( job ) ; }
This is used to move a job from the waiting queue to the running queue .
32,178
private void jobStateChanged ( JobStatusChangeEvent event , QueueInfo qi ) { JobInProgress job = event . getJobInProgress ( ) ; JobSchedulingInfo oldJobStateInfo = new JobSchedulingInfo ( event . getOldStatus ( ) ) ; if ( event . getEventType ( ) == EventType . PRIORITY_CHANGED || event . getEventType ( ) == EventType . START_TIME_CHANGED ) { reorderJobs ( job , oldJobStateInfo , qi ) ; } else if ( event . getEventType ( ) == EventType . RUN_STATE_CHANGED ) { int runState = job . getStatus ( ) . getRunState ( ) ; if ( runState == JobStatus . SUCCEEDED || runState == JobStatus . FAILED || runState == JobStatus . KILLED ) { jobCompleted ( job , oldJobStateInfo , qi ) ; } else if ( runState == JobStatus . RUNNING ) { makeJobRunning ( job , oldJobStateInfo , qi ) ; } } }
Update the scheduler as job s state has changed
32,179
void splitKeyVal ( byte [ ] line , int length , Text key , Text val ) throws IOException { int numKeyFields = getNumOfKeyFields ( ) ; byte [ ] separator = getFieldSeparator ( ) ; int pos = UTF8ByteArrayUtils . findBytes ( line , 0 , length , separator ) ; for ( int k = 1 ; k < numKeyFields && pos != - 1 ; k ++ ) { pos = UTF8ByteArrayUtils . findBytes ( line , pos + separator . length , length , separator ) ; } try { if ( pos == - 1 ) { key . set ( line , 0 , length ) ; val . set ( "" ) ; } else { StreamKeyValUtil . splitKeyVal ( line , 0 , length , key , val , pos , separator . length ) ; } } catch ( CharacterCodingException e ) { LOG . warn ( StringUtils . stringifyException ( e ) ) ; } }
Split a line into key and value .
32,180
void write ( Object value ) throws IOException { if ( clientInputSerializer != null ) { clientInputSerializer . serialize ( value ) ; return ; } byte [ ] bval ; int valSize ; if ( value instanceof BytesWritable ) { BytesWritable val = ( BytesWritable ) value ; bval = val . getBytes ( ) ; valSize = val . getLength ( ) ; } else if ( value instanceof Text ) { Text val = ( Text ) value ; bval = val . getBytes ( ) ; valSize = val . getLength ( ) ; } else { String sval = value . toString ( ) ; bval = sval . getBytes ( "UTF-8" ) ; valSize = bval . length ; } clientOut_ . write ( bval , 0 , valSize ) ; }
Write a value to the output stream using UTF - 8 encoding
32,181
private void writeTag ( String tag , String value ) throws IOException { printIndents ( ) ; if ( value . length ( ) > 0 ) { write ( "<" + tag + ">" + value + "</" + tag + ">\n" ) ; } else { write ( "<" + tag + "/>\n" ) ; } }
Write an XML tag
32,182
void updateLeasedFiles ( SnapshotStorage ssStore ) throws IOException { FSNamesystem fsNamesys = ssStore . getFSNamesystem ( ) ; List < Block > blocksForNN = new ArrayList < Block > ( ) ; leaseUpdateThreadPool = new ThreadPoolExecutor ( 1 , maxLeaseUpdateThreads , 60 , TimeUnit . SECONDS , new LinkedBlockingQueue < Runnable > ( ) ) ; ( ( ThreadPoolExecutor ) leaseUpdateThreadPool ) . allowCoreThreadTimeOut ( true ) ; LightWeightLinkedSet < Lease > sortedLeases = fsNamesys . leaseManager . getSortedLeases ( ) ; Iterator < Lease > itr = sortedLeases . iterator ( ) ; while ( itr . hasNext ( ) ) { Lease lease = itr . next ( ) ; for ( String path : lease . getPaths ( ) ) { leaseUpdateThreadPool . execute ( new LeaseUpdateWorker ( conf , path , fsNamesys , blocksForNN ) ) ; } } try { leaseUpdateThreadPool . shutdown ( ) ; if ( ! leaseUpdateThreadPool . awaitTermination ( 1200 , TimeUnit . SECONDS ) ) { throw new IOException ( "Updating lease files failed" ) ; } } catch ( InterruptedException e ) { throw new IOException ( "Snapshot creation interrupted while updating leased files" ) ; } long [ ] blockIds = new long [ blocksForNN . size ( ) ] ; for ( int i = 0 ; i < blocksForNN . size ( ) ; ++ i ) { blockIds [ i ] = blocksForNN . get ( i ) . getBlockId ( ) ; } long [ ] lengths = namenode . getBlockLengths ( blockIds ) ; for ( int i = 0 ; i < blocksForNN . size ( ) ; ++ i ) { if ( lengths [ i ] == - 1 ) { LOG . error ( "Couldn't update length for block " + blocksForNN . get ( i ) ) ; } else { blocksForNN . get ( i ) . setNumBytes ( lengths [ i ] ) ; } } }
Tries to get the most up to date lengths of files under construction .
32,183
void downloadSnapshotFiles ( SnapshotStorage ssStore ) throws IOException { CheckpointSignature start = namenode . getCheckpointSignature ( ) ; ssStore . storage . setStorageInfo ( start ) ; CheckpointSignature end = null ; boolean success ; do { prepareDownloadDirs ( ) ; File [ ] srcNames = ssStore . getImageFiles ( ) ; assert srcNames . length == 1 : "No snapshot temporary dir." ; TransferFsImage . downloadImageToStorage ( fileServer , HdfsConstants . INVALID_TXID , ssStore , true , srcNames ) ; LOG . info ( "Downloaded file " + srcNames [ 0 ] . getName ( ) + " size " + srcNames [ 0 ] . length ( ) + " bytes." ) ; srcNames = ssStore . getEditsFiles ( ) ; assert srcNames . length == 1 : "No snapshot temporary dir." ; TransferFsImage . downloadEditsToStorage ( fileServer , new RemoteEditLog ( ) , ssStore , false ) ; LOG . info ( "Downloaded file " + srcNames [ 0 ] . getName ( ) + " size " + srcNames [ 0 ] . length ( ) + " bytes." ) ; try { srcNames = ssStore . getEditsNewFiles ( ) ; assert srcNames . length == 1 : "No snapshot temporary dir." ; TransferFsImage . downloadEditsToStorage ( fileServer , new RemoteEditLog ( ) , ssStore , true ) ; LOG . info ( "Downloaded file " + srcNames [ 0 ] . getName ( ) + " size " + srcNames [ 0 ] . length ( ) + " bytes." ) ; } catch ( FileNotFoundException e ) { } end = namenode . getCheckpointSignature ( ) ; success = end . checkpointTime == start . checkpointTime && end . checkpointState != CheckpointStates . UPLOAD_DONE ; start = end ; } while ( ! success ) ; }
Download fsimage edits and edits . new files from the name - node . Files will be downloaded in CURRENT_DIR
32,184
synchronized Lease reassignLease ( Lease lease , String src , String newHolder ) { assert newHolder != null : "new lease holder is null" ; LeaseOpenTime leaseOpenTime = null ; if ( lease != null ) { leaseOpenTime = removeLease ( lease , src ) ; } return addLease ( newHolder , src , leaseOpenTime != null ? leaseOpenTime . openTime : System . currentTimeMillis ( ) ) ; }
Reassign lease for file src to the new holder .
32,185
synchronized String findPath ( INodeFileUnderConstruction pendingFile ) throws IOException { Lease lease = getLease ( pendingFile . getClientName ( ) ) ; if ( lease != null ) { String src = lease . findPath ( pendingFile ) ; if ( src != null ) { return src ; } } throw new IOException ( "pendingFile (=" + pendingFile + ") not found." + "(lease=" + lease + ")" ) ; }
Find the pathname for the specified pendingFile
32,186
synchronized LeaseOpenTime removeLease ( Lease lease , String src ) { LeaseOpenTime leaseOpenTime = sortedLeasesByPath . remove ( src ) ; if ( ! lease . removePath ( src ) ) { LOG . error ( src + " not found in lease.paths (=" + lease . paths + ")" ) ; } if ( ! lease . hasPath ( ) ) { leases . remove ( lease . holder ) ; if ( ! sortedLeases . remove ( lease ) ) { LOG . error ( lease + " not found in sortedLeases" ) ; } } return leaseOpenTime ; }
Remove the specified lease and src .
32,187
synchronized void removeLease ( String holder , String src ) { Lease lease = getLease ( holder ) ; if ( lease != null ) { removeLease ( lease , src ) ; } }
Remove the lease for the specified holder and src
32,188
synchronized void checkLeases ( ) { int numPathsChecked = 0 ; for ( ; sortedLeases . size ( ) > 0 ; ) { final Lease oldest = sortedLeases . first ( ) ; if ( ! oldest . expiredHardLimit ( ) ) { return ; } String [ ] leasePaths = new String [ oldest . getPaths ( ) . size ( ) ] ; oldest . getPaths ( ) . toArray ( leasePaths ) ; LOG . info ( "Lease " + oldest + " has expired hard limit. Recovering lease for paths: " + Arrays . toString ( leasePaths ) ) ; for ( String p : leasePaths ) { if ( ++ numPathsChecked > this . maxPathsPerCheck ) { return ; } try { fsnamesystem . getFSNamesystemMetrics ( ) . numLeaseRecoveries . inc ( ) ; fsnamesystem . internalReleaseLeaseOne ( oldest , p , this . discardLastBlockIfNoSync ) ; } catch ( IOException e ) { LOG . error ( "Cannot release the path " + p + " in the lease " + oldest , e ) ; removeLease ( oldest , p ) ; fsnamesystem . getFSNamesystemMetrics ( ) . numLeaseManagerMonitorExceptions . inc ( ) ; } } } }
Check the leases beginning from the oldest .
32,189
public Progress addPhase ( String status ) { Progress phase = addPhase ( ) ; phase . setStatus ( status ) ; return phase ; }
Adds a named node to the tree .
32,190
public void complete ( ) { Progress myParent ; synchronized ( this ) { progress = 1.0f ; myParent = parent ; } if ( myParent != null ) { myParent . startNextPhase ( ) ; } }
Completes this node moving the parent node to its next child .
32,191
public synchronized float get ( ) { Progress node = this ; while ( node . parent != null ) { node = parent ; } return node . getInternal ( ) ; }
and the node s parent never changes . Still it doesn t hurt .
32,192
private synchronized float getInternal ( ) { int phaseCount = phases . size ( ) ; if ( phaseCount != 0 ) { float subProgress = currentPhase < phaseCount ? phase ( ) . getInternal ( ) : 0.0f ; return progressPerPhase * ( currentPhase + subProgress ) ; } else { return progress ; } }
Computes progress in this node .
32,193
private synchronized void reloadLocations ( ) { map . clear ( ) ; for ( HadoopServer location : ServerRegistry . getInstance ( ) . getServers ( ) ) map . put ( location , new DFSLocation ( provider , location ) ) ; }
Recompute the map of Hadoop locations
32,194
public static void incrLogMetrics ( Map < String , Long > incrMetrics ) { if ( incrMetrics == null || incrMetrics . size ( ) == 0 ) { return ; } MetricsRegistry registry = RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) . getMetricsRegistry ( ) ; Map < String , MetricsTimeVaryingLong > logMetrics = RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) . logMetrics ; synchronized ( logMetrics ) { for ( String key : incrMetrics . keySet ( ) ) { if ( ! logMetrics . containsKey ( key ) ) { logMetrics . put ( key , new MetricsTimeVaryingLong ( key , registry ) ) ; } ( ( MetricsTimeVaryingLong ) logMetrics . get ( key ) ) . inc ( incrMetrics . get ( key ) ) ; } } }
Increase logMetrics in the Raidnode metrics
32,195
static java . net . InetAddress getLocalAddress ( ) throws IOException { try { return java . net . InetAddress . getLocalHost ( ) ; } catch ( java . net . UnknownHostException e ) { throw new IOException ( e ) ; } }
A helper function to get the local address of the machine
32,196
private ServerSocket initializeServer ( CoronaConf conf ) throws IOException { ServerSocket sessionServerSocket = new ServerSocket ( 0 , 0 , getLocalAddress ( ) ) ; TServerSocket tServerSocket = new TServerSocket ( sessionServerSocket , conf . getCMSoTimeout ( ) ) ; TFactoryBasedThreadPoolServer . Args args = new TFactoryBasedThreadPoolServer . Args ( tServerSocket ) ; args . processor ( new SessionDriverServiceProcessor ( incoming ) ) ; args . transportFactory ( new TTransportFactory ( ) ) ; args . protocolFactory ( new TBinaryProtocol . Factory ( true , true ) ) ; args . stopTimeoutVal = 0 ; server = new TFactoryBasedThreadPoolServer ( args , new TFactoryBasedThreadPoolServer . DaemonThreadFactory ( ) ) ; return sessionServerSocket ; }
Start the SessionDriver callback server
32,197
public void setName ( String name ) throws IOException { if ( failException != null ) { throw failException ; } if ( name == null || name . length ( ) == 0 ) { return ; } sessionInfo . name = name ; SessionInfo newInfo = new SessionInfo ( sessionInfo ) ; cmNotifier . addCall ( new ClusterManagerService . sessionUpdateInfo_args ( sessionId , newInfo ) ) ; }
Set the name for this session in the ClusterManager
32,198
public void setPriority ( SessionPriority prio ) throws IOException { if ( failException != null ) { throw failException ; } sessionInfo . priority = prio ; SessionInfo newInfo = new SessionInfo ( sessionInfo ) ; cmNotifier . addCall ( new ClusterManagerService . sessionUpdateInfo_args ( sessionId , newInfo ) ) ; }
Set the priority for this session in the ClusterManager
32,199
public void setDeadline ( long sessionDeadline ) throws IOException { if ( failException != null ) { throw failException ; } sessionInfo . deadline = sessionDeadline ; SessionInfo newInfo = new SessionInfo ( sessionInfo ) ; cmNotifier . addCall ( new ClusterManagerService . sessionUpdateInfo_args ( sessionId , newInfo ) ) ; }
Set the deadline for this session in the ClusterManager