idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
32,400
public void checkForRotation ( ) { try { BufferedReader probe = new BufferedReader ( new FileReader ( file . getAbsoluteFile ( ) ) ) ; if ( firstLine == null || ( ! firstLine . equals ( probe . readLine ( ) ) ) ) { probe . close ( ) ; reader . close ( ) ; reader = new BufferedReader ( new FileReader ( file . getAbsoluteFile ( ) ) ) ; firstLine = null ; offset = 0 ; } } catch ( IOException e ) { e . printStackTrace ( ) ; } }
Check whether the log file has been rotated . If so start reading the file from the beginning .
32,401
public static int getPartitionStatic ( LongWritable key , LongWritable value , int numPartitions ) { return ( int ) ( Math . abs ( key . get ( ) ) % numPartitions ) ; }
Get a partition from the key only
32,402
private static int getMapperId ( long key , int numReducers , int maxKeySpace ) { key = key - getFirstSumKey ( numReducers , maxKeySpace ) ; return ( int ) ( key / numReducers ) ; }
Which mapper sent this key sum?
32,403
public static int getAttemptId ( Configuration conf ) throws IllegalArgumentException { if ( conf == null ) { throw new NullPointerException ( "Conf is null" ) ; } String taskId = conf . get ( "mapred.task.id" ) ; if ( taskId == null ) { throw new IllegalArgumentException ( "Configuration does not contain the property mapred.task.id" ) ; } String [ ] parts = taskId . split ( "_" ) ; if ( parts . length != 6 || ! parts [ 0 ] . equals ( "attempt" ) || ( ! "m" . equals ( parts [ 3 ] ) && ! "r" . equals ( parts [ 3 ] ) ) ) { throw new IllegalArgumentException ( "TaskAttemptId string : " + taskId + " is not properly formed" ) ; } return Integer . parseInt ( parts [ 5 ] ) ; }
Get the attempt number
32,404
public void updateRegInfo ( DatanodeID nodeReg ) { name = nodeReg . getName ( ) ; infoPort = nodeReg . getInfoPort ( ) ; ipcPort = nodeReg . getIpcPort ( ) ; }
Update fields when a new registration request comes in . Note that this does not update storageID .
32,405
public void decodeBulk ( byte [ ] [ ] readBufs , byte [ ] [ ] writeBufs , int [ ] erasedLocations ) throws IOException { int [ ] tmpInput = new int [ readBufs . length ] ; int [ ] tmpOutput = new int [ erasedLocations . length ] ; int numBytes = readBufs [ 0 ] . length ; for ( int idx = 0 ; idx < numBytes ; idx ++ ) { for ( int i = 0 ; i < tmpOutput . length ; i ++ ) { tmpOutput [ i ] = 0 ; } for ( int i = 0 ; i < tmpInput . length ; i ++ ) { tmpInput [ i ] = readBufs [ i ] [ idx ] & 0x000000FF ; } decode ( tmpInput , erasedLocations , tmpOutput ) ; for ( int i = 0 ; i < tmpOutput . length ; i ++ ) { writeBufs [ i ] [ idx ] = ( byte ) tmpOutput [ i ] ; } } }
This method would be overridden in the subclass so that the subclass will have its own decodeBulk behavior .
32,406
static Comparer < byte [ ] > getBestComparer ( ) { try { Class < ? > theClass = Class . forName ( UNSAFE_COMPARER_NAME ) ; @ SuppressWarnings ( "unchecked" ) Comparer < byte [ ] > comparer = ( Comparer < byte [ ] > ) theClass . getEnumConstants ( ) [ 0 ] ; return comparer ; } catch ( Throwable t ) { LOG . error ( "Loading lexicographicalComparerJavaImpl..." ) ; return lexicographicalComparerJavaImpl ( ) ; } }
Returns the Unsafe - using Comparer or falls back to the pure - Java implementation if unable to do so .
32,407
public String [ ] getAttributeNames ( ) { String [ ] result = new String [ attributeMap . size ( ) ] ; int i = 0 ; Iterator it = attributeMap . keySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { result [ i ++ ] = ( String ) it . next ( ) ; } return result ; }
Returns the names of all the factory s attributes .
32,408
public static synchronized MetricsContext getNullContext ( String contextName ) { MetricsContext nullContext = nullContextMap . get ( contextName ) ; if ( nullContext == null ) { nullContext = new NullContext ( ) ; nullContextMap . put ( contextName , nullContext ) ; } return nullContext ; }
Returns a null context - one which does nothing .
32,409
public List < String > parse ( String [ ] args , int pos ) { List < String > parameters = new ArrayList < String > ( ) ; for ( ; pos < args . length ; pos ++ ) { if ( args [ pos ] . charAt ( 0 ) == '-' && args [ pos ] . length ( ) > 1 ) { String opt = args [ pos ] . substring ( 1 ) ; if ( options . containsKey ( opt ) ) options . put ( opt , Boolean . TRUE ) ; else { try { Long . parseLong ( args [ pos ] ) ; parameters . add ( args [ pos ] ) ; } catch ( NumberFormatException e ) { throw new IllegalArgumentException ( "Illegal option " + args [ pos ] ) ; } } } else parameters . add ( args [ pos ] ) ; } int psize = parameters . size ( ) ; if ( psize < minPar || psize > maxPar ) throw new IllegalArgumentException ( "Illegal number of arguments" ) ; return parameters ; }
Parse parameters starting from the given position
32,410
public void emitRecord ( String contextName , String recordName , OutputRecord outRec ) throws IOException { Calendar currentDate = Calendar . getInstance ( ) ; if ( fileName != null ) { if ( currentDate . get ( Calendar . DAY_OF_MONTH ) != lastRecordDate . get ( Calendar . DAY_OF_MONTH ) ) { file = new File ( getFullFileName ( currentDate ) ) ; if ( writer != null ) writer . close ( ) ; writer = new PrintWriter ( new FileWriter ( file , true ) ) ; } } writer . print ( recordDateFormat . format ( currentDate . getTime ( ) ) ) ; writer . print ( " " ) ; writer . print ( contextName ) ; writer . print ( "." ) ; writer . print ( recordName ) ; String separator = ": " ; for ( String tagName : outRec . getTagNames ( ) ) { writer . print ( separator ) ; separator = ", " ; writer . print ( tagName ) ; writer . print ( "=" ) ; writer . print ( outRec . getTag ( tagName ) ) ; } for ( String metricName : outRec . getMetricNames ( ) ) { writer . print ( separator ) ; separator = ", " ; writer . print ( metricName ) ; writer . print ( "=" ) ; writer . print ( outRec . getMetric ( metricName ) ) ; } writer . println ( ) ; lastRecordDate = currentDate ; }
Emits a metrics record to a file .
32,411
private void adjustChecksumBytes ( int dataLen ) { int requiredSize = ( ( dataLen + bytesPerChecksum - 1 ) / bytesPerChecksum ) * checksumSize ; if ( checksumBytes == null || requiredSize > checksumBytes . capacity ( ) ) { checksumBytes = ByteBuffer . wrap ( new byte [ requiredSize ] ) ; } else { checksumBytes . clear ( ) ; } checksumBytes . limit ( requiredSize ) ; }
Makes sure that checksumBytes has enough capacity and limit is set to the number of checksum bytes needed to be read .
32,412
private synchronized void readBlockSizeInfo ( ) throws IOException { if ( ! transferBlockSize ) { return ; } blkLenInfoUpdated = true ; isBlockFinalized = in . readBoolean ( ) ; updatedBlockLength = in . readLong ( ) ; if ( dataTransferVersion >= DataTransferProtocol . READ_PROFILING_VERSION ) { readDataNodeProfilingData ( ) ; } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "ifBlockComplete? " + isBlockFinalized + " block size: " + updatedBlockLength ) ; } }
Read the block length information from data stream
32,413
public static BlockReader newBlockReader ( int dataTransferVersion , int namespaceId , Socket sock , String file , long blockId , long genStamp , long startOffset , long len , int bufferSize , boolean verifyChecksum ) throws IOException { return newBlockReader ( dataTransferVersion , namespaceId , sock , file , blockId , genStamp , startOffset , len , bufferSize , verifyChecksum , "" , Long . MAX_VALUE , - 1 , false , null , new ReadOptions ( ) ) ; }
Java Doc required
32,414
protected RunningJob createRunningJob ( JobID jobId , TaskInProgress tip ) throws IOException { CoronaSessionInfo info = ( CoronaSessionInfo ) ( tip . getExtensible ( ) ) ; RunningJob rJob = new RunningJob ( jobId , null , info ) ; JobTrackerReporter reporter = new JobTrackerReporter ( rJob , info . getJobTrackerAddr ( ) , info . getSecondaryTracker ( ) , info . getSessionHandle ( ) ) ; reporter . setName ( "JobTrackerReporter for " + jobId ) ; reporter . start ( ) ; jobTrackerReporters . put ( jobId , reporter ) ; return rJob ; }
Override this method to create the proper jobClient and the thread that sends jobTracker heartbeat .
32,415
protected synchronized void purgeJob ( KillJobAction action ) throws IOException { JobID jobId = action . getJobID ( ) ; JobTrackerReporter reporter = jobTrackerReporters . remove ( jobId ) ; if ( reporter != null ) { reporter . shutdown ( ) ; } super . purgeJob ( action ) ; crReleaseManager . returnRelease ( jobId ) ; }
Override this to shutdown the heartbeat the the corresponding jobtracker
32,416
void launchTaskJVM ( TaskController . TaskControllerContext context ) throws IOException { JvmEnv env = context . env ; List < String > wrappedCommand = TaskLog . captureOutAndError ( env . setup , env . vargs , env . stdout , env . stderr , env . logSize , true ) ; ShellCommandExecutor shexec = new ShellCommandExecutor ( wrappedCommand . toArray ( new String [ 0 ] ) , env . workDir , env . env ) ; context . shExec = shexec ; shexec . execute ( ) ; }
Launch a new JVM for the task .
32,417
void refresh ( ) { if ( this . viewer == null ) return ; Display . getDefault ( ) . asyncExec ( new Runnable ( ) { public void run ( ) { DFSContentProvider . this . viewer . refresh ( ) ; } } ) ; }
Ask the viewer for this content to refresh
32,418
void refresh ( final DFSContent content ) { if ( this . sviewer != null ) { Display . getDefault ( ) . asyncExec ( new Runnable ( ) { public void run ( ) { DFSContentProvider . this . sviewer . refresh ( content ) ; } } ) ; } else { refresh ( ) ; } }
Ask the viewer to refresh a single element
32,419
public static String getTaskLogUrl ( String taskTrackerHostName , String httpPort , String taskAttemptID ) { return ( "http://" + taskTrackerHostName + ":" + httpPort + "/tasklog?taskid=" + taskAttemptID ) ; }
Construct the taskLogUrl
32,420
private static int findFirstQuotable ( byte [ ] data , int offset , int end ) { while ( offset < end ) { switch ( data [ offset ] ) { case '<' : case '>' : case '&' : return offset ; default : offset += 1 ; } } return offset ; }
Find the next quotable character in the given array .
32,421
public void snapshot ( ) { snapshotPoolGroups = new ArrayList < PoolGroupSchedulable > ( nameToPoolGroup . values ( ) ) ; for ( PoolGroupSchedulable poolGroup : snapshotPoolGroups ) { poolGroup . snapshot ( ) ; } scheduleQueue = null ; preemptQueue = null ; Collection < PoolInfo > configuredPoolInfos = configManager . getConfiguredPoolInfos ( ) ; if ( configuredPoolInfos != null ) { for ( PoolInfo poolInfo : configuredPoolInfos ) { getPoolSchedulable ( poolInfo ) ; } } }
Take snapshots for all pools groups and sessions .
32,422
public Queue < PoolGroupSchedulable > getScheduleQueue ( ) { if ( scheduleQueue == null ) { scheduleQueue = createPoolGroupQueue ( ScheduleComparator . FAIR ) ; } return scheduleQueue ; }
Get the queue of pool groups sorted for scheduling
32,423
public Queue < PoolGroupSchedulable > getPreemptQueue ( ) { if ( preemptQueue == null ) { preemptQueue = createPoolGroupQueue ( ScheduleComparator . FAIR_PREEMPT ) ; } return preemptQueue ; }
Get the queue of the pool groups sorted for preemption
32,424
private Queue < PoolGroupSchedulable > createPoolGroupQueue ( ScheduleComparator comparator ) { int initCapacity = snapshotPoolGroups . size ( ) == 0 ? 1 : snapshotPoolGroups . size ( ) ; Queue < PoolGroupSchedulable > poolGroupQueue = new PriorityQueue < PoolGroupSchedulable > ( initCapacity , comparator ) ; poolGroupQueue . addAll ( snapshotPoolGroups ) ; return poolGroupQueue ; }
Put all the pool groups into the priority queue sorted by a comparator
32,425
public void addSession ( String id , Session session ) { PoolInfo poolInfo = getPoolInfo ( session ) ; LOG . info ( "Session " + id + " added to pool info " + poolInfo + " (originally " + session . getInfo ( ) . getPoolInfoStrings ( ) + ") for " + type ) ; getPoolSchedulable ( poolInfo ) . addSession ( id , session ) ; }
Add a session to the scheduler
32,426
public static void checkPoolInfoIfStrict ( PoolInfo poolInfo , ConfigManager configManager , CoronaConf conf ) throws InvalidSessionHandle { if ( ! conf . onlyAllowConfiguredPools ( ) ) { return ; } if ( poolInfo == null ) { throw new InvalidSessionHandle ( "This cluster is operating in " + "configured pools only mode. The pool group " + "and pool was not specified. Please use the Corona parameter " + CoronaConf . EXPLICIT_POOL_PROPERTY + " to set a valid poolgroup and " + "pool in the format '<poolgroup>.<pool>'" ) ; } if ( ! configManager . isConfiguredPoolInfo ( poolInfo ) ) { throw new InvalidSessionHandle ( "This cluster is operating in " + "configured pools only mode. The pool group " + "and pool was specified as '" + poolInfo . getPoolGroupName ( ) + "." + poolInfo . getPoolName ( ) + "' and is not part of this cluster. " + "Please use the Corona parameter " + CoronaConf . EXPLICIT_POOL_PROPERTY + " to set a valid pool " + "group and pool in the format <poolgroup>.<pool>" ) ; } if ( ! PoolInfo . isLegalPoolInfo ( poolInfo ) ) { throw new InvalidSessionHandle ( "This cluster is operating in " + "configured pools only mode. The pool group " + "and pool was specified as '" + poolInfo . getPoolGroupName ( ) + "." + poolInfo . getPoolName ( ) + "' and has illegal characters (Something not in " + PoolInfo . INVALID_REGEX + "). Please use the Corona parameter " + CoronaConf . EXPLICIT_POOL_PROPERTY + " to set a valid pool " + "group and pool in the format <poolgroup>.<pool>" ) ; } }
If the cluster is set to configured pools only do not allow unset pool information or pool info that doesn t match a valid pool info . Throws an InvalidSessionHandle exception in either of the failure cases .
32,427
public static PoolInfo getPoolInfo ( Session session ) { PoolInfo poolInfo = session . getPoolInfo ( ) ; if ( poolInfo == null || poolInfo . getPoolName ( ) . equals ( "" ) ) { poolInfo = new PoolInfo ( DEFAULT_POOL_GROUP , session . getUserId ( ) ) ; } if ( ! PoolInfo . isLegalPoolInfo ( poolInfo ) ) { LOG . warn ( "Illegal pool info :" + poolInfo + " from session " + session . getSessionId ( ) ) ; return DEFAULT_POOL_INFO ; } return poolInfo ; }
Get the pool name for a given session using the default pool information if the name is illegal . Redirection should happen prior to this .
32,428
private PoolSchedulable getPoolSchedulable ( PoolInfo poolInfo ) { PoolGroupSchedulable poolGroup = nameToPoolGroup . get ( poolInfo . getPoolGroupName ( ) ) ; if ( poolGroup == null ) { poolGroup = new PoolGroupSchedulable ( poolInfo . getPoolGroupName ( ) , type , configManager ) ; PoolGroupSchedulable prevPoolGroup = nameToPoolGroup . putIfAbsent ( poolInfo . getPoolGroupName ( ) , poolGroup ) ; if ( prevPoolGroup != null ) { poolGroup = prevPoolGroup ; } } return poolGroup . getPool ( poolInfo ) ; }
Get the Schedulable representing the pool with a given name If it doesn t exist - create it and add it to the list
32,429
public static int ioprioGetIfPossible ( ) throws IOException { if ( nativeLoaded && ioprioPossible ) { try { return ioprio_get ( ) ; } catch ( UnsupportedOperationException uoe ) { LOG . warn ( "ioprioGetIfPossible() failed" , uoe ) ; ioprioPossible = false ; } catch ( UnsatisfiedLinkError ule ) { LOG . warn ( "ioprioGetIfPossible() failed" , ule ) ; ioprioPossible = false ; } catch ( NativeIOException nie ) { LOG . warn ( "ioprioGetIfPossible() failed" , nie ) ; throw nie ; } } return - 1 ; }
Call ioprio_get for this thread .
32,430
public static void posixFadviseIfPossible ( FileDescriptor fd , long offset , long len , int flags ) throws NativeIOException { if ( nativeLoaded && fadvisePossible ) { try { posix_fadvise ( fd , offset , len , flags ) ; InjectionHandler . processEvent ( InjectionEventCore . NATIVEIO_POSIX_FADVISE , flags ) ; } catch ( UnsupportedOperationException uoe ) { LOG . warn ( "posixFadviseIfPossible() failed" , uoe ) ; fadvisePossible = false ; } catch ( UnsatisfiedLinkError ule ) { LOG . warn ( "posixFadviseIfPossible() failed" , ule ) ; fadvisePossible = false ; } catch ( NativeIOException nie ) { LOG . warn ( "posixFadviseIfPossible() failed" , nie ) ; throw nie ; } } }
Call posix_fadvise on the given file descriptor . See the manpage for this syscall for more information . On systems where this call is not available does nothing .
32,431
public static void syncFileRangeIfPossible ( FileDescriptor fd , long offset , long nbytes , int flags ) throws NativeIOException { InjectionHandler . processEvent ( InjectionEventCore . NATIVEIO_SYNC_FILE_RANGE , flags ) ; if ( nativeLoaded && syncFileRangePossible ) { try { sync_file_range ( fd , offset , nbytes , flags ) ; } catch ( UnsupportedOperationException uoe ) { LOG . warn ( "syncFileRangeIfPossible() failed" , uoe ) ; syncFileRangePossible = false ; } catch ( UnsatisfiedLinkError ule ) { LOG . warn ( "syncFileRangeIfPossible() failed" , ule ) ; syncFileRangePossible = false ; } catch ( NativeIOException nie ) { LOG . warn ( "syncFileRangeIfPossible() failed: fd " + fd + " offset " + offset + " nbytes " + nbytes + " flags " + flags , nie ) ; throw nie ; } } }
Call sync_file_range on the given file descriptor . See the manpage for this syscall for more information . On systems where this call is not available does nothing .
32,432
public static WriteOptions writeOptions ( Boolean overwrite , Boolean forceSync ) { WriteOptions wo = new WriteOptions ( ) ; if ( overwrite != null ) wo . setOverwrite ( overwrite ) ; if ( forceSync != null ) wo . setForcesync ( forceSync ) ; return wo ; }
Creates a WriteOptions from given overwrite and forceSync values . If null passed it will use their default value .
32,433
public static CreateOptions getOpt ( Class < ? extends CreateOptions > theClass , CreateOptions ... opts ) { if ( opts == null ) return null ; CreateOptions result = null ; for ( int i = 0 ; i < opts . length ; ++ i ) { if ( opts [ i ] . getClass ( ) == theClass ) { if ( result != null ) throw new IllegalArgumentException ( "Multiple args with type " + theClass ) ; result = opts [ i ] ; } } return result ; }
Get an option of desired type
32,434
public static < T extends CreateOptions > CreateOptions [ ] setOpt ( T newValue , CreateOptions ... opts ) { boolean alreadyInOpts = false ; if ( opts != null ) { for ( int i = 0 ; i < opts . length ; ++ i ) { if ( opts [ i ] . getClass ( ) == newValue . getClass ( ) ) { if ( alreadyInOpts ) throw new IllegalArgumentException ( "Multiple args with type " + newValue . getClass ( ) ) ; alreadyInOpts = true ; opts [ i ] = newValue ; } } } CreateOptions [ ] resultOpt = opts ; if ( ! alreadyInOpts ) { CreateOptions [ ] newOpts = new CreateOptions [ opts . length + 1 ] ; System . arraycopy ( opts , 0 , newOpts , 0 , opts . length ) ; newOpts [ opts . length ] = newValue ; resultOpt = newOpts ; } return resultOpt ; }
set an option
32,435
private void handleMirrorOutError ( IOException ioe ) throws IOException { LOG . info ( datanode . getDatanodeInfo ( ) + ": Exception writing block " + block + " namespaceId: " + namespaceId + " to mirror " + mirrorAddr + "\n" + StringUtils . stringifyException ( ioe ) ) ; if ( Thread . interrupted ( ) ) { throw ioe ; } else { mirrorError = true ; } }
While writing to mirrorOut failure to write to mirror should not affect this datanode .
32,436
private void verifyChunks ( byte [ ] dataBuf , int dataOff , int len , byte [ ] checksumBuf , int checksumOff , int firstChunkOffset , int packetVersion ) throws IOException { int chunkOffset = firstChunkOffset ; while ( len > 0 ) { int chunkLen = Math . min ( len , bytesPerChecksum - chunkOffset ) ; chunkOffset = 0 ; checksum . update ( dataBuf , dataOff , chunkLen ) ; dataOff += chunkLen ; boolean checksumCorrect ; if ( packetVersion == DataTransferProtocol . PACKET_VERSION_CHECKSUM_FIRST ) { checksumCorrect = checksum . compare ( checksumBuf , checksumOff ) ; checksumOff += checksumSize ; } else { checksumCorrect = checksum . compare ( dataBuf , dataOff ) ; dataOff += checksumSize ; } if ( ! checksumCorrect ) { if ( srcDataNode != null ) { try { LOG . info ( "report corrupt block " + block + " from datanode " + srcDataNode + " to namenode" ) ; LocatedBlock lb = new LocatedBlock ( block , new DatanodeInfo [ ] { srcDataNode } ) ; datanode . reportBadBlocks ( namespaceId , new LocatedBlock [ ] { lb } ) ; } catch ( IOException e ) { LOG . warn ( "Failed to report bad block " + block + " from datanode " + srcDataNode + " to namenode" ) ; } } throw new IOException ( "Unexpected checksum mismatch " + "while writing " + block + " from " + inAddr ) ; } checksum . reset ( ) ; len -= chunkLen ; } }
Verify multiple CRC chunks .
32,437
public static URI stringAsURI ( String s ) throws IOException { URI u = null ; try { u = new URI ( s ) ; } catch ( URISyntaxException e ) { LOG . error ( "Syntax error in URI " + s + ". Please check hdfs configuration." , e ) ; } if ( u == null || u . getScheme ( ) == null ) { LOG . warn ( "Path " + s + " should be specified as a URI " + "in configuration files. Please update hdfs configuration." ) ; u = fileAsURI ( new File ( s ) ) ; } return u ; }
Interprets the passed string as a URI . In case of error it assumes the specified string is a file .
32,438
public static Collection < URI > stringCollectionAsURIs ( Collection < String > names ) { Collection < URI > uris = new ArrayList < URI > ( names . size ( ) ) ; for ( String name : names ) { try { uris . add ( stringAsURI ( name ) ) ; } catch ( IOException e ) { LOG . error ( "Error while processing URI: " + name , e ) ; } } return uris ; }
Converts a collection of strings into a collection of URIs .
32,439
static String getRealTaskLogFilePath ( String location , LogName filter ) throws IOException { return FileUtil . makeShellPath ( new File ( getBaseDir ( location ) , filter . toString ( ) ) ) ; }
Get the real task - log file - path
32,440
public static List < String > captureOutAndError ( List < String > cmd , File stdoutFilename , File stderrFilename , long tailLength ) throws IOException { return captureOutAndError ( null , cmd , stdoutFilename , stderrFilename , tailLength , false ) ; }
Wrap a command in a shell to capture stdout and stderr to files . If the tailLength is 0 the entire output will be saved .
32,441
public static List < String > captureOutAndError ( List < String > setup , List < String > cmd , File stdoutFilename , File stderrFilename , long tailLength , boolean useSetsid ) throws IOException { List < String > result = new ArrayList < String > ( 3 ) ; result . add ( bashCommand ) ; result . add ( "-c" ) ; String mergedCmd = buildCommandLine ( setup , cmd , stdoutFilename , stderrFilename , tailLength , useSetsid ) ; result . add ( mergedCmd . toString ( ) ) ; return result ; }
Wrap a command in a shell to capture stdout and stderr to files . Setup commands such as setting memory limit can be passed which will be executed before exec . If the tailLength is 0 the entire output will be saved .
32,442
public static String addCommand ( List < String > cmd , boolean isExecutable ) throws IOException { StringBuffer command = new StringBuffer ( ) ; for ( String s : cmd ) { command . append ( '\'' ) ; if ( isExecutable ) { command . append ( FileUtil . makeShellPath ( new File ( s ) ) ) ; isExecutable = false ; } else { command . append ( s ) ; } command . append ( '\'' ) ; command . append ( " " ) ; } return command . toString ( ) ; }
Add quotes to each of the command strings and return as a single string
32,443
public static List < String > captureDebugOut ( List < String > cmd , File debugoutFilename ) throws IOException { String debugout = FileUtil . makeShellPath ( debugoutFilename ) ; List < String > result = new ArrayList < String > ( 3 ) ; result . add ( bashCommand ) ; result . add ( "-c" ) ; StringBuffer mergedCmd = new StringBuffer ( ) ; mergedCmd . append ( "exec " ) ; boolean isExecutable = true ; for ( String s : cmd ) { if ( isExecutable ) { mergedCmd . append ( FileUtil . makeShellPath ( new File ( s ) ) ) ; isExecutable = false ; } else { mergedCmd . append ( s ) ; } mergedCmd . append ( " " ) ; } mergedCmd . append ( " < /dev/null " ) ; mergedCmd . append ( " >" ) ; mergedCmd . append ( debugout ) ; mergedCmd . append ( " 2>&1 " ) ; result . add ( mergedCmd . toString ( ) ) ; return result ; }
Wrap a command in a shell to capture debug script s stdout and stderr to debugout .
32,444
public void start ( ) { if ( refreshInterval > 0 ) { refreshUsed = new Thread ( new DURefreshThread ( ) , "refreshUsed-" + dirPath ) ; refreshUsed . setDaemon ( true ) ; refreshUsed . start ( ) ; } }
Start the disk usage checking thread .
32,445
public void shutdown ( ) { this . shouldRun = false ; this . namespaceSliceDUMap . clear ( ) ; if ( this . refreshUsed != null ) { this . refreshUsed . interrupt ( ) ; try { this . refreshUsed . join ( ) ; this . refreshUsed = null ; } catch ( InterruptedException ie ) { } } }
Shut down the refreshing thread .
32,446
private int killSession ( String sessionId ) throws IOException { try { System . out . printf ( "Killing %s" , sessionId ) ; ClusterManagerService . Client client = getCMSClient ( ) ; try { client . killSession ( sessionId ) ; } catch ( SafeModeException e ) { throw new IOException ( "Cannot kill session yet, ClusterManager is in Safe Mode" ) ; } System . err . printf ( "%s killed" , sessionId ) ; } catch ( TException e ) { throw new IOException ( e ) ; } return 0 ; }
Tells the cluster manager to kill the session with a given id
32,447
private int listSessions ( ) throws IOException { try { ClusterManagerService . Client client = getCMSClient ( ) ; List < RunningSession > sessions ; try { sessions = client . getSessions ( ) ; } catch ( SafeModeException e ) { throw new IOException ( "Cannot list sessions, ClusterManager is in Safe Mode" ) ; } System . out . printf ( "%d sessions currently running:\n" , sessions . size ( ) ) ; System . out . printf ( "SessionID\t" + "Session Name\t" + "Session User\t" + "Session Poolgroup\t" + "Session Pool\t" + "Session Priority\t" + "Running Mappers\t" + "Running Reducers\t" + "Running Jobtrackers\n" ) ; for ( RunningSession session : sessions ) { SessionPriority priority = session . getPriority ( ) ; if ( priority == null ) { priority = SessionPriority . NORMAL ; } System . out . printf ( "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" , session . getHandle ( ) , session . getName ( ) . replace ( "\t" , "\\t" ) . replace ( "\n" , "\\n" ) , session . getUserId ( ) , session . getPoolInfo ( ) . getPoolGroupName ( ) , session . getPoolInfo ( ) . getPoolName ( ) , priority , session . getRunningResources ( ) . get ( ResourceType . MAP ) , session . getRunningResources ( ) . get ( ResourceType . REDUCE ) , session . getRunningResources ( ) . get ( ResourceType . JOBTRACKER ) ) ; } } catch ( TException e ) { throw new IOException ( e ) ; } return 0 ; }
Gets a list of the sessions from the cluster manager and outputs them on the console
32,448
static public EditsVisitor getEditsVisitor ( String filename , String processor , Tokenizer tokenizer , boolean printToScreen ) throws IOException { if ( processor . toLowerCase ( ) . equals ( "xml" ) ) { return new XmlEditsVisitor ( filename , tokenizer , printToScreen ) ; } else if ( processor . toLowerCase ( ) . equals ( "stats" ) ) { return new StatisticsEditsVisitor ( filename , tokenizer , printToScreen ) ; } else if ( processor . toLowerCase ( ) . equals ( "binary" ) ) { return new BinaryEditsVisitor ( filename , tokenizer , printToScreen ) ; } else { throw new IOException ( "Unknown proccesor " + processor + " (valid processors: xml, binary, stats)" ) ; } }
Factory function that creates an EditsVisitor object
32,449
private void runCommand ( TaskCommands taskCommand , String user , List < String > cmdArgs , File workDir , Map < String , String > env ) throws IOException { ShellCommandExecutor shExec = buildTaskControllerExecutor ( taskCommand , user , cmdArgs , workDir , env ) ; try { shExec . execute ( ) ; } catch ( Exception e ) { LOG . warn ( "Exit code from " + taskCommand . toString ( ) + " is : " + shExec . getExitCode ( ) ) ; LOG . warn ( "Exception thrown by " + taskCommand . toString ( ) + " : " + StringUtils . stringifyException ( e ) ) ; LOG . info ( "Output from LinuxTaskController's " + taskCommand . toString ( ) + " follows:" ) ; logOutput ( shExec . getOutput ( ) ) ; throw new IOException ( e ) ; } if ( LOG . isDebugEnabled ( ) ) { LOG . info ( "Output from LinuxTaskController's " + taskCommand . toString ( ) + " follows:" ) ; logOutput ( shExec . getOutput ( ) ) ; } }
Helper method that runs a LinuxTaskController command
32,450
private String getJobId ( TaskControllerContext context ) { String taskId = context . task . getTaskID ( ) . toString ( ) ; TaskAttemptID tId = TaskAttemptID . forName ( taskId ) ; String jobId = tId . getJobID ( ) . toString ( ) ; return jobId ; }
get the Job ID from the information in the TaskControllerContext
32,451
private String getDirectoryChosenForTask ( File directory , TaskControllerContext context ) { String jobId = getJobId ( context ) ; String taskId = context . task . getTaskID ( ) . toString ( ) ; for ( String dir : mapredLocalDirs ) { File mapredDir = new File ( dir ) ; File taskDir = new File ( mapredDir , TaskTracker . getLocalTaskDir ( jobId , taskId , context . task . isTaskCleanupTask ( ) ) ) ; if ( directory . equals ( taskDir ) ) { return dir ; } } LOG . error ( "Couldn't parse task cache directory correctly" ) ; throw new IllegalArgumentException ( "invalid task cache directory " + directory . getAbsolutePath ( ) ) ; }
this task .
32,452
private void setupTaskLogFileAccess ( TaskControllerContext context ) { TaskAttemptID taskId = context . task . getTaskID ( ) ; File f = TaskLog . getTaskLogFile ( taskId , TaskLog . LogName . SYSLOG ) ; String taskAttemptLogDir = f . getParentFile ( ) . getAbsolutePath ( ) ; changeDirectoryPermissions ( taskAttemptLogDir , FILE_PERMISSIONS , false ) ; }
the task log directory
32,453
private void setupTaskCacheFileAccess ( TaskControllerContext context ) { String taskId = context . task . getTaskID ( ) . toString ( ) ; JobID jobId = JobID . forName ( getJobId ( context ) ) ; for ( String localDir : mapredLocalDirs ) { File f = new File ( localDir ) ; File taskCacheDir = new File ( f , TaskTracker . getLocalTaskDir ( jobId . toString ( ) , taskId , context . task . isTaskCleanupTask ( ) ) ) ; if ( taskCacheDir . exists ( ) ) { changeDirectoryPermissions ( taskCacheDir . getPath ( ) , FILE_PERMISSIONS , true ) ; } } }
the files under the job and task cache directories
32,454
private void changeDirectoryPermissions ( String dir , String mode , boolean isRecursive ) { int ret = 0 ; try { ret = FileUtil . chmod ( dir , mode , isRecursive ) ; } catch ( Exception e ) { LOG . warn ( "Exception in changing permissions for directory " + dir + ". Exception: " + e . getMessage ( ) ) ; } if ( ret != 0 ) { LOG . warn ( "Could not change permissions for directory " + dir ) ; } }
convenience method to execute chmod .
32,455
private String getTaskCacheDirectory ( TaskControllerContext context ) { String taskId = context . task . getTaskID ( ) . toString ( ) ; File cacheDirForJob = context . env . workDir . getParentFile ( ) . getParentFile ( ) ; if ( context . task . isTaskCleanupTask ( ) ) { taskId = taskId + TaskTracker . TASK_CLEANUP_SUFFIX ; } return new File ( cacheDirForJob , taskId ) . getAbsolutePath ( ) ; }
Return the task specific directory under the cache .
32,456
private void writeCommand ( String cmdLine , String directory ) throws IOException { PrintWriter pw = null ; String commandFile = directory + File . separator + COMMAND_FILE ; LOG . info ( "Writing commands to " + commandFile ) ; try { FileWriter fw = new FileWriter ( commandFile ) ; BufferedWriter bw = new BufferedWriter ( fw ) ; pw = new PrintWriter ( bw ) ; pw . write ( cmdLine ) ; } catch ( IOException ioe ) { LOG . error ( "Caught IOException while writing JVM command line to file. " + ioe . getMessage ( ) ) ; } finally { if ( pw != null ) { pw . close ( ) ; } File f = new File ( commandFile ) ; if ( f . exists ( ) ) { f . setReadable ( true , false ) ; f . setExecutable ( true , false ) ; } } }
a file and execute it .
32,457
private void finishTask ( TaskControllerContext context , TaskCommands command ) throws IOException { if ( context . task == null ) { LOG . info ( "Context task null not killing the JVM" ) ; return ; } ShellCommandExecutor shExec = buildTaskControllerExecutor ( command , context . env . conf . getUser ( ) , buildKillTaskCommandArgs ( context ) , context . env . workDir , context . env . env ) ; try { shExec . execute ( ) ; } catch ( Exception e ) { LOG . warn ( "Output from task-contoller is : " + shExec . getOutput ( ) ) ; throw new IOException ( e ) ; } }
Convenience method used to sending appropriate Kill signal to the task VM
32,458
public static String getHistoryFilePath ( JobID jobId ) { MovedFileInfo info = jobHistoryFileMap . get ( jobId ) ; if ( info == null ) { return null ; } return info . historyFile ; }
Given the job id return the history file path from the cache
32,459
public static boolean init ( JobHistoryObserver jobTracker , JobConf conf , String hostname , long jobTrackerStartTime ) { try { LOG_DIR = conf . get ( "hadoop.job.history.location" , "file:///" + new File ( System . getProperty ( "hadoop.log.dir" , "/tmp" ) ) . getAbsolutePath ( ) + File . separator + "history" ) ; JOBTRACKER_UNIQUE_STRING = hostname + "_" + String . valueOf ( jobTrackerStartTime ) + "_" ; jobtrackerHostname = hostname ; Path logDir = new Path ( LOG_DIR ) ; LOGDIR_FS = logDir . getFileSystem ( conf ) ; if ( ! LOGDIR_FS . exists ( logDir ) ) { if ( ! LOGDIR_FS . mkdirs ( logDir , new FsPermission ( HISTORY_DIR_PERMISSION ) ) ) { throw new IOException ( "Mkdirs failed to create " + logDir . toString ( ) ) ; } } conf . set ( "hadoop.job.history.location" , LOG_DIR ) ; disableHistory = false ; jobHistoryBlockSize = conf . getLong ( "mapred.jobtracker.job.history.block.size" , 3 * 1024 * 1024 ) ; jtConf = conf ; fileManager = new JobHistoryFilesManager ( conf , jobTracker , logDir ) ; } catch ( IOException e ) { LOG . error ( "Failed to initialize JobHistory log file" , e ) ; disableHistory = true ; } fileManager . startIOExecutor ( ) ; return ! ( disableHistory ) ; }
Initialize JobHistory files .
32,460
private static void parseLine ( String line , Listener l , boolean isEscaped ) throws IOException { int idx = line . indexOf ( ' ' ) ; String recType = line . substring ( 0 , idx ) ; String data = line . substring ( idx + 1 , line . length ( ) ) ; Matcher matcher = pattern . matcher ( data ) ; Map < Keys , String > parseBuffer = new HashMap < Keys , String > ( ) ; while ( matcher . find ( ) ) { String tuple = matcher . group ( 0 ) ; String [ ] parts = StringUtils . split ( tuple , StringUtils . ESCAPE_CHAR , '=' ) ; String value = parts [ 1 ] . substring ( 1 , parts [ 1 ] . length ( ) - 1 ) ; if ( isEscaped ) { value = StringUtils . unEscapeString ( value , StringUtils . ESCAPE_CHAR , charsToEscape ) ; } parseBuffer . put ( Keys . valueOf ( parts [ 0 ] ) , value ) ; } l . handle ( RecordTypes . valueOf ( recType ) , parseBuffer ) ; parseBuffer . clear ( ) ; }
Parse a single line of history .
32,461
public static void log ( PrintWriter out , RecordTypes recordType , Keys key , String value ) { value = escapeString ( value ) ; out . println ( recordType . name ( ) + DELIMITER + key + "=\"" + value + "\"" + DELIMITER + LINE_DELIMITER_CHAR ) ; }
Log a raw record type with keys and values . This is method is generally not used directly .
32,462
public static String getTaskLogsUrl ( JobHistory . TaskAttempt attempt ) { if ( attempt . get ( Keys . HTTP_PORT ) . equals ( "" ) || attempt . get ( Keys . TRACKER_NAME ) . equals ( "" ) || attempt . get ( Keys . TASK_ATTEMPT_ID ) . equals ( "" ) ) { return null ; } String taskTrackerName = JobInProgress . convertTrackerNameToHostName ( attempt . get ( Keys . TRACKER_NAME ) ) ; return TaskLogServlet . getTaskLogUrl ( taskTrackerName , attempt . get ( Keys . HTTP_PORT ) , attempt . get ( Keys . TASK_ATTEMPT_ID ) ) ; }
Return the TaskLogsUrl of a particular TaskAttempt
32,463
public int getAvailableSlots ( TaskType taskType ) { int availableSlots = 0 ; if ( taskType == TaskType . MAP ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( trackerName + " getAvailSlots:" + " max(m)=" + status . getMaxMapSlots ( ) + " occupied(m)=" + status . countOccupiedMapSlots ( ) ) ; } availableSlots = status . getAvailableMapSlots ( ) ; } else { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( trackerName + " getAvailSlots:" + " max(r)=" + status . getMaxReduceSlots ( ) + " occupied(r)=" + status . countOccupiedReduceSlots ( ) ) ; } availableSlots = status . getAvailableReduceSlots ( ) ; } return availableSlots ; }
Get the number of currently available slots on this tasktracker for the given type of the task .
32,464
private void sendIncrementalBlockReport ( long startTime ) throws Exception { Block [ ] receivedAndDeletedBlockArray = null ; int numBlocksReceivedAndDeleted = 0 ; int currentPendingRequests = 0 ; synchronized ( receivedAndDeletedBlockList ) { lastDeletedReport = startTime ; numBlocksReceivedAndDeleted = receivedAndDeletedBlockList . size ( ) ; if ( numBlocksReceivedAndDeleted > 0 ) { receivedAndDeletedBlockArray = receivedAndDeletedBlockList . toArray ( new Block [ numBlocksReceivedAndDeleted ] ) ; receivedAndDeletedBlockList . clear ( ) ; currentPendingRequests = pendingReceivedRequests ; pendingReceivedRequests = 0 ; } } if ( receivedAndDeletedBlockArray != null ) { long [ ] failed = null ; try { IncrementalBlockReport ibr = new IncrementalBlockReport ( receivedAndDeletedBlockArray ) ; long rpcStartTime = 0 ; if ( LOG . isDebugEnabled ( ) ) { rpcStartTime = System . nanoTime ( ) ; LOG . debug ( "sending blockReceivedAndDeletedNew " + receivedAndDeletedBlockArray . length + " blocks to " + namenodeAddress ) ; } failed = avatarnode . blockReceivedAndDeletedNew ( nsRegistration , ibr ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "finished blockReceivedAndDeletedNew " + "to " + namenodeAddress + " time: " + ( System . nanoTime ( ) - rpcStartTime ) + " ns" ) ; } boolean isPrimaryCached = isPrimaryServiceCached ( ) ; if ( isPrimaryCached && failed != null ) { throw new IOException ( "Primary started acting as standby" ) ; } else if ( ! isPrimaryCached && failed == null ) { String msg = "Received null response from standby for incremental" + " block report. " ; if ( clearPrimaryCommandProcessed ) { LOG . info ( msg + "Failover is in progress" + " - will not clear primary again" ) ; } else { LOG . info ( msg + "Standby is acting as primary. Clearing primary" ) ; this . clearPrimary ( ) ; } } } catch ( Exception e ) { processFailedBlocks ( receivedAndDeletedBlockArray , currentPendingRequests ) ; throw e ; } if ( failed != null && failed . length != 0 ) { processFailedReceivedDeleted ( failed , receivedAndDeletedBlockArray ) ; } } }
Sends an incremental block report to the Namenode .
32,465
private boolean shouldSendIncrementalReport ( long startTime ) { boolean isPrimary = isPrimaryServiceCached ( ) || donotDelayIncrementalBlockReports ; boolean deleteIntervalTrigger = ( startTime - lastDeletedReport > anode . deletedReportInterval ) ; boolean sendReportDefault = pendingReceivedRequests > 0 || deleteIntervalTrigger ; if ( isPrimary ) { return sendReportDefault ; } else { boolean sendIfStandby = ( lastBlockReceivedFailed + blockReceivedRetryInterval < startTime ) && sendReportDefault ; return sendIfStandby ; } }
Checks if an incremental block report should be sent .
32,466
private void processFailedBlocks ( Block [ ] failed , int failedPendingRequests ) { synchronized ( receivedAndDeletedBlockList ) { for ( int i = failed . length - 1 ; i >= 0 ; i -- ) { receivedAndDeletedBlockList . add ( 0 , failed [ i ] ) ; } pendingReceivedRequests += failedPendingRequests ; } }
Adds blocks of incremental block report back to the receivedAndDeletedBlockList when handling an exception
32,467
private boolean checkFailover ( ) throws InterruptedException { boolean isPrimary = isPrimaryServiceCached ( ) ; if ( ! isPrimary && isPrimaryService ( ) ) { this . servicePair . setPrimaryOfferService ( this ) ; return true ; } return false ; }
Determines whether a failover has happened and accordingly takes the appropriate action .
32,468
private boolean processCommand ( DatanodeCommand [ ] cmds , long processStartTime ) throws InterruptedException { if ( cmds != null ) { boolean switchedFromStandbyToPrimary = checkFailover ( ) ; for ( DatanodeCommand cmd : cmds ) { try { if ( switchedFromStandbyToPrimary && cmd . getAction ( ) == DatanodeProtocol . DNA_REGISTER ) { this . clearPrimary ( ) ; } if ( ! isPrimaryServiceCached ( ) && ! isValidStandbyCommand ( cmd ) ) { LOG . warn ( "Received an invalid command " + cmd . getAction ( ) + " from standby " + this . namenodeAddress ) ; continue ; } if ( processCommand ( cmd , processStartTime ) == false ) { return false ; } } catch ( IOException ioe ) { LOG . warn ( "Error processing datanode Command" , ioe ) ; } } } return true ; }
Process an array of datanode commands . This function has logic to check for failover . Any commands should be processed using this function as an entry point .
32,469
private void prepareFailover ( ) { LOG . info ( "PREPARE FAILOVER requested by : " + this . avatarnodeAddress ) ; setBackoff ( false ) ; this . donotDelayIncrementalBlockReports = true ; InjectionHandler . processEvent ( InjectionEvent . OFFERSERVICE_PREPARE_FAILOVER , nsRegistration . toString ( ) ) ; }
Take actions in preparation for failover .
32,470
private boolean clearPrimary ( ) throws InterruptedException { try { if ( ! isPrimaryServiceCached ( ) ) { InetSocketAddress addr1 = servicePair . avatarAddr1 ; InetSocketAddress addr2 = servicePair . avatarAddr2 ; if ( avatarnodeAddress . equals ( addr2 ) ) { LOG . info ( "Restarting service for AvatarNode : " + addr1 ) ; servicePair . restartService1 ( ) ; } else if ( avatarnodeAddress . equals ( addr1 ) ) { LOG . info ( "Restarting service for AvatarNode : " + addr2 ) ; servicePair . restartService2 ( ) ; } else { throw new IOException ( "Address : " + avatarnodeAddress + " does not match any avatar address" ) ; } LOG . info ( "Finished Processing CLEAR PRIMARY requested by : " + this . avatarnodeAddress ) ; this . forceIncrementalReport = true ; this . donotDelayIncrementalBlockReports = true ; } InjectionHandler . processEvent ( InjectionEvent . OFFERSERVICE_CLEAR_PRIMARY ) ; } catch ( IOException e ) { LOG . error ( "Exception processing CLEAR PRIMARY" , e ) ; return false ; } return true ; }
This is clears up the thread heartbeating to the primary Avatar by restarting it . This makes sure all commands from the primary have been processed by the datanode . This method is used during failover .
32,471
public void scheduleBlockReport ( long delay ) { if ( delay > 0 ) { lastBlockReport = System . currentTimeMillis ( ) - ( anode . blockReportInterval - R . nextInt ( ( int ) ( delay ) ) ) ; } else { lastBlockReport = lastHeartbeat - anode . blockReportInterval ; } resetBlockReportTime = true ; }
This methods arranges for the data node to send the block report at the next heartbeat .
32,472
void removeReceivedBlocks ( Block [ ] removeList ) { long start = AvatarDataNode . now ( ) ; synchronized ( receivedAndDeletedBlockList ) { ReceivedBlockInfo block = new ReceivedBlockInfo ( ) ; block . setDelHints ( ReceivedBlockInfo . WILDCARD_HINT ) ; for ( Block bi : removeList ) { block . set ( bi . getBlockId ( ) , bi . getNumBytes ( ) , bi . getGenerationStamp ( ) ) ; while ( receivedAndDeletedBlockList . remove ( block ) ) { LOG . info ( "Block deletion command deleted from receivedDeletedBlockList " + bi ) ; } } } long stop = AvatarDataNode . now ( ) ; LOG . info ( "Pruning blocks from the received list took " + ( stop - start ) + "ms for: " + removeList . length + "blocks, queue length: " + receivedAndDeletedBlockList . size ( ) ) ; }
Remove blocks from blockReceived queues
32,473
static int transform ( int crc , int [ ] [ ] lookupTable ) { int cb1 = lookupTable [ 0 ] [ crc & 0xff ] ; int cb2 = lookupTable [ 1 ] [ ( crc >>>= 8 ) & 0xff ] ; int cb3 = lookupTable [ 2 ] [ ( crc >>>= 8 ) & 0xff ] ; int cb4 = lookupTable [ 3 ] [ ( crc >>>= 8 ) & 0xff ] ; return cb1 ^ cb2 ^ cb3 ^ cb4 ; }
Helper function to transform a CRC using lookup table . Currently it is used for calculating CRC after adding bytes zeros to source byte array . The special lookup table needs to be passed in for this specific transformation
32,474
static public int concatCrc ( int crc1 , int crc2 , int order ) { int crcForCrc1 = crc1 ; int orderRemained = order ; for ( LookupTable lookupTable : lookupTables ) { while ( orderRemained >= lookupTable . getOrder ( ) ) { crcForCrc1 = transform ( crcForCrc1 , lookupTable . getLookupTable ( ) ) ; orderRemained -= lookupTable . getOrder ( ) ; } } if ( orderRemained > 0 ) { int initial = CrcConcatLookupTables . initCrcMap [ orderRemained ] ; NativeCrc32 pjc = new NativeCrc32 ( ) ; pjc . setValue ( crcForCrc1 ) ; byte [ ] zeros = new byte [ orderRemained ] ; pjc . update ( zeros , 0 , zeros . length ) ; crcForCrc1 = ( int ) pjc . getValue ( ) ^ initial ; } return crcForCrc1 ^ crc2 ; }
Concatenate two CRCs
32,475
public static void localRunnerNotification ( JobConf conf , JobStatus status ) { JobEndStatusInfo notification = createNotification ( conf , status ) ; if ( notification != null ) { while ( notification . configureForRetry ( ) ) { try { int code = httpNotification ( notification . getUri ( ) ) ; if ( code != 200 ) { throw new IOException ( "Invalid response status code: " + code ) ; } else { break ; } } catch ( IOException ioex ) { LOG . error ( "Notification error [" + notification . getUri ( ) + "]" , ioex ) ; } catch ( Exception ex ) { LOG . error ( "Notification error [" + notification . getUri ( ) + "]" , ex ) ; } try { synchronized ( Thread . currentThread ( ) ) { Thread . currentThread ( ) . sleep ( notification . getRetryInterval ( ) ) ; } } catch ( InterruptedException iex ) { LOG . error ( "Notification retry error [" + notification + "]" , iex ) ; } } } }
simple synchronous way
32,476
public static boolean canBePreempted ( ResourceType type ) { switch ( type ) { case MAP : return true ; case REDUCE : return true ; case JOBTRACKER : return false ; default : throw new RuntimeException ( "Undefined Preemption behavior for " + type ) ; } }
Tells if preemption is allowed for a resource type .
32,477
public static List < LocalityLevel > neededLocalityLevels ( ResourceType type ) { List < LocalityLevel > l = new ArrayList < LocalityLevel > ( ) ; switch ( type ) { case MAP : l . add ( LocalityLevel . NODE ) ; l . add ( LocalityLevel . RACK ) ; l . add ( LocalityLevel . ANY ) ; break ; case REDUCE : l . add ( LocalityLevel . ANY ) ; break ; case JOBTRACKER : l . add ( LocalityLevel . ANY ) ; break ; default : throw new RuntimeException ( "Undefined locality behavior for " + type ) ; } return l ; }
Returns the required locality levels in order of preference for the resource type .
32,478
private int findImageVersion ( DataInputStream in ) throws IOException { in . mark ( 42 ) ; int version = in . readInt ( ) ; in . reset ( ) ; return version ; }
Check an fsimage datainputstream s version number .
32,479
public static void setBadHostsAndRacks ( Set < String > racks , Set < String > hosts ) { badRacks = racks ; badHosts = hosts ; }
A function to be used by unit tests only
32,480
private void initParityConfigs ( ) { Set < String > acceptedCodecIds = new HashSet < String > ( ) ; for ( String s : conf . get ( "dfs.f4.accepted.codecs" , "rs,xor" ) . split ( "," ) ) { acceptedCodecIds . add ( s ) ; } for ( Codec c : Codec . getCodecs ( ) ) { if ( acceptedCodecIds . contains ( c . id ) ) { FSNamesystem . LOG . info ( "F4: Parity info." + " Id: " + c . id + " Parity Length: " + c . parityLength + " Parity Stripe Length: " + c . stripeLength + " Parity directory: " + c . parityDirectory + " Parity temp directory: " + c . tmpParityDirectory ) ; acceptedCodecs . add ( c ) ; if ( c . stripeLength > this . stripeLen ) { this . stripeLen = c . stripeLength ; } } } FSNamesystem . LOG . info ( "F4: Initialized stripe len to: " + this . stripeLen ) ; }
This function initializes configuration for the supported parities .
32,481
private HashMap < String , HashSet < Node > > getRackToHostsMapForStripe ( String srcFileName , String parityFileName , int stripeLen , int parityLen , int stripeIndex ) throws IOException { HashMap < String , HashSet < Node > > rackToHosts = new HashMap < String , HashSet < Node > > ( ) ; if ( srcFileName != null ) { rackToHosts = getRackToHostsMapForStripe ( srcFileName , stripeIndex , stripeLen ) ; } if ( parityFileName != null ) { HashMap < String , HashSet < Node > > rackToHostsForParity = getRackToHostsMapForStripe ( parityFileName , stripeIndex , parityLen ) ; for ( Map . Entry < String , HashSet < Node > > e : rackToHostsForParity . entrySet ( ) ) { HashSet < Node > nodes = rackToHosts . get ( e . getKey ( ) ) ; if ( nodes == null ) { nodes = new HashSet < Node > ( ) ; rackToHosts . put ( e . getKey ( ) , nodes ) ; } for ( Node n : e . getValue ( ) ) { nodes . add ( n ) ; } } } for ( Map . Entry < String , HashSet < Node > > e : rackToHosts . entrySet ( ) ) { if ( e . getValue ( ) . size ( ) > 1 ) { FSNamesystem . LOG . warn ( "F4: Rack " + e . getKey ( ) + " being overused for stripe: " + stripeIndex ) ; } } return rackToHosts ; }
reside and the hosts within those racks that host those blocks
32,482
private boolean getGoodNode ( HashMap < String , HashSet < Node > > candidateNodesByRacks , boolean considerLoad , long blockSize , List < DatanodeDescriptor > results ) { List < Map . Entry < String , HashSet < Node > > > sorted = new ArrayList < Map . Entry < String , HashSet < Node > > > ( ) ; for ( Map . Entry < String , HashSet < Node > > entry : candidateNodesByRacks . entrySet ( ) ) { sorted . add ( entry ) ; } Collections . sort ( sorted , new RackComparator ( blockSize ) ) ; int count = sorted . size ( ) / 4 ; Collections . shuffle ( sorted . subList ( 0 , count ) ) ; for ( Map . Entry < String , HashSet < Node > > e : sorted ) { if ( getGoodNode ( e . getValue ( ) , considerLoad , blockSize , results ) ) { return true ; } } return false ; }
Helper function to choose less occupied racks first .
32,483
private boolean getGoodNode ( Set < Node > candidateNodes , boolean considerLoad , long blockSize , List < DatanodeDescriptor > results ) { List < DatanodeDescriptor > sorted = new ArrayList < DatanodeDescriptor > ( ) ; for ( Node n : candidateNodes ) { sorted . add ( ( DatanodeDescriptor ) n ) ; } final long blocksize = blockSize ; Collections . sort ( sorted , new Comparator < DatanodeDescriptor > ( ) { public int compare ( DatanodeDescriptor n1 , DatanodeDescriptor n2 ) { long ret = ( n2 . getRemaining ( ) - ( n2 . getBlocksScheduled ( ) * blocksize ) ) - ( n1 . getRemaining ( ) - ( n1 . getBlocksScheduled ( ) * blocksize ) ) ; return ret == 0 ? 0 : ( ret > 0 ) ? 1 : - 1 ; } } ) ; int count = sorted . size ( ) / 2 ; Collections . shuffle ( sorted . subList ( 0 , count ) ) ; for ( DatanodeDescriptor n : sorted ) { if ( this . isGoodTarget ( ( DatanodeDescriptor ) n , blocksize , 1 , considerLoad , results ) ) { results . add ( ( DatanodeDescriptor ) n ) ; return true ; } } return false ; }
Helper function to find a good node . Returns true if found .
32,484
private static String checkImageStorage ( URI sharedImage , URI sharedEdits ) { if ( sharedImage . getScheme ( ) . equals ( NNStorage . LOCAL_URI_SCHEME ) ) { return "" ; } else if ( sharedImage . getScheme ( ) . equals ( QuorumJournalManager . QJM_URI_SCHEME ) && sharedImage . equals ( sharedEdits ) ) { return "" ; } return "Shared image uri: " + sharedImage + " must be either file storage" + " or be equal to shared edits storage " + sharedEdits + ". " ; }
Shared image needs to be in file storage or QJM providing that QJM also stores edits .
32,485
private static String checkFileURIScheme ( Collection < URI > uris ) { for ( URI uri : uris ) if ( uri . getScheme ( ) . compareTo ( JournalType . FILE . name ( ) . toLowerCase ( ) ) != 0 ) return "The specified path is not a file." + "Avatar supports file non-shared storage only... " ; return "" ; }
For non - shared storage we enforce file uris
32,486
public OutputStream getCheckpointOutputStream ( long imageTxId ) throws IOException { String fileName = NNStorage . getCheckpointImageFileName ( imageTxId ) ; return new FileOutputStream ( new File ( sd . getCurrentDir ( ) , fileName ) ) ; }
Get file output stream
32,487
private static void renameCheckpointInDir ( StorageDirectory sd , long txid ) throws IOException { File ckpt = NNStorage . getStorageFile ( sd , NameNodeFile . IMAGE_NEW , txid ) ; File curFile = NNStorage . getStorageFile ( sd , NameNodeFile . IMAGE , txid ) ; LOG . info ( "Renaming " + ckpt . getAbsolutePath ( ) + " to " + curFile . getAbsolutePath ( ) ) ; if ( ! ckpt . renameTo ( curFile ) ) { if ( ! curFile . delete ( ) || ! ckpt . renameTo ( curFile ) ) { throw new IOException ( "renaming " + ckpt . getAbsolutePath ( ) + " to " + curFile . getAbsolutePath ( ) + " FAILED" ) ; } } }
Rolls checkpointed image .
32,488
private void reportError ( StorageDirectory sd ) { if ( storage instanceof NNStorage ) { ( ( NNStorage ) storage ) . reportErrorsOnDirectory ( sd , null ) ; } else { LOG . error ( "Failed direcory: " + sd . getCurrentDir ( ) ) ; } }
Reports error to underlying storage .
32,489
public ResourceMetadata getResourceMetadata ( ) { if ( poolInfo . getPoolName ( ) == null || ! counters . containsKey ( MetricName . MIN ) || ! counters . containsKey ( MetricName . MAX ) || ! counters . containsKey ( MetricName . GRANTED ) || ! counters . containsKey ( MetricName . REQUESTED ) ) { return null ; } return new ResourceMetadata ( PoolInfo . createStringFromPoolInfo ( poolInfo ) , counters . get ( MetricName . MIN ) . intValue ( ) , counters . get ( MetricName . MAX ) . intValue ( ) , counters . get ( MetricName . GRANTED ) . intValue ( ) , counters . get ( MetricName . REQUESTED ) . intValue ( ) ) ; }
Get a snapshot of the resource metadata for this pool . Used for collecting metrics . Will not collect resource metadata for PoolGroup objects or if any counters are missing .
32,490
public void updateMetricsRecord ( ) { for ( Map . Entry < MetricName , Long > entry : counters . entrySet ( ) ) { String name = ( entry . getKey ( ) + "_" + type ) . toLowerCase ( ) ; record . setMetric ( name , entry . getValue ( ) ) ; } record . update ( ) ; }
Update the metrics record associated with this object .
32,491
public void configure ( JobConf jobConf ) { String prefix = getPrefix ( isMap ) ; chainJobConf = jobConf ; SerializationFactory serializationFactory = new SerializationFactory ( chainJobConf ) ; int index = jobConf . getInt ( prefix + CHAIN_MAPPER_SIZE , 0 ) ; for ( int i = 0 ; i < index ; i ++ ) { Class < ? extends Mapper > klass = jobConf . getClass ( prefix + CHAIN_MAPPER_CLASS + i , null , Mapper . class ) ; JobConf mConf = getChainElementConf ( jobConf , prefix + CHAIN_MAPPER_CONFIG + i ) ; Mapper mapper = ReflectionUtils . newInstance ( klass , mConf ) ; mappers . add ( mapper ) ; if ( mConf . getBoolean ( MAPPER_BY_VALUE , true ) ) { mappersKeySerialization . add ( serializationFactory . getSerialization ( mConf . getClass ( MAPPER_OUTPUT_KEY_CLASS , null ) ) ) ; mappersValueSerialization . add ( serializationFactory . getSerialization ( mConf . getClass ( MAPPER_OUTPUT_VALUE_CLASS , null ) ) ) ; } else { mappersKeySerialization . add ( null ) ; mappersValueSerialization . add ( null ) ; } } Class < ? extends Reducer > klass = jobConf . getClass ( prefix + CHAIN_REDUCER_CLASS , null , Reducer . class ) ; if ( klass != null ) { JobConf rConf = getChainElementConf ( jobConf , prefix + CHAIN_REDUCER_CONFIG ) ; reducer = ReflectionUtils . newInstance ( klass , rConf ) ; if ( rConf . getBoolean ( REDUCER_BY_VALUE , true ) ) { reducerKeySerialization = serializationFactory . getSerialization ( rConf . getClass ( REDUCER_OUTPUT_KEY_CLASS , null ) ) ; reducerValueSerialization = serializationFactory . getSerialization ( rConf . getClass ( REDUCER_OUTPUT_VALUE_CLASS , null ) ) ; } else { reducerKeySerialization = null ; reducerValueSerialization = null ; } } }
Configures all the chain elements for the task .
32,492
public void close ( ) throws IOException { for ( Mapper map : mappers ) { map . close ( ) ; } if ( reducer != null ) { reducer . close ( ) ; } }
Closes all the chain elements .
32,493
public void fsck ( ) throws IOException { InjectionHandler . processEvent ( InjectionEvent . NAMENODE_FSCK_START ) ; try { FileStatus [ ] files = nn . namesystem . dir . getListing ( path ) ; FsckResult res = new FsckResult ( ) ; if ( ! this . showFiles && ! this . showBlocks && ! this . showLocations && ! this . showRacks ) { res . totalRacks = nn . getNetworkTopology ( ) . getNumOfRacks ( ) ; res . totalDatanodes = nn . namesystem . getNumberOfDatanodes ( DatanodeReportType . LIVE ) ; } res . setReplication ( ( short ) conf . getInt ( "dfs.replication" , 3 ) ) ; if ( files != null ) { if ( showCorruptFileBlocks && showOpenFiles ) { listCorruptOpenFiles ( ) ; return ; } if ( showCorruptFileBlocks ) { listCorruptFileBlocks ( ) ; return ; } for ( int i = 0 ; i < files . length ; i ++ ) { check ( files [ i ] , res ) ; } out . println ( res ) ; if ( res . isHealthy ( ) ) { out . print ( "\n\nThe filesystem under path '" + path + "' " + HEALTHY_STATUS ) ; } else { out . print ( "\n\nThe filesystem under path '" + path + "' " + CORRUPT_STATUS ) ; } } else { out . print ( "\n\nPath '" + path + "' " + NONEXISTENT_STATUS ) ; } } catch ( Throwable e ) { String errMsg = "Fsck on path '" + path + "' " + FAILURE_STATUS ; LOG . warn ( errMsg , e ) ; out . println ( e . getMessage ( ) ) ; out . print ( "\n\n" + errMsg ) ; } finally { out . close ( ) ; } }
Check files on DFS starting from the indicated path .
32,494
public String getPrettyReport ( JobID jobId ) { Map < TaskAttemptID , TaskLaunch > lastLaunch = new HashMap < TaskAttemptID , CoronaStateUpdate . TaskLaunch > ( ) ; Map < TaskAttemptID , TaskStatus . State > lastKnownStatus = new HashMap < TaskAttemptID , TaskStatus . State > ( ) ; JTFailoverMetrics jtFailoverMetrics = new JTFailoverMetrics ( ) ; for ( CoronaStateUpdate update : updates ) { if ( update . getTaskLaunch ( ) != null ) { TaskLaunch launch = update . getTaskLaunch ( ) ; lastLaunch . put ( launch . getTaskId ( ) , launch ) ; } else if ( update . getTaskStatus ( ) != null ) { TaskStatus status = update . getTaskStatus ( ) ; lastKnownStatus . put ( status . getTaskID ( ) , status . getRunState ( ) ) ; jtFailoverMetrics . update ( status ) ; } } StringBuilder result = new StringBuilder ( ) ; result . append ( "CoronaJTState report" ) ; if ( jobId != null ) { result . append ( " for job " ) . append ( jobId ) ; } for ( CoronaStateUpdate update : updates ) { TaskLaunch launch = update . getTaskLaunch ( ) ; if ( launch != null ) { result . append ( "\n" ) . append ( launch ) . append ( " last known " ) ; result . append ( lastKnownStatus . get ( launch . getTaskId ( ) ) ) ; } } if ( sessionId != null && ! sessionId . isEmpty ( ) ) { result . append ( "\n Session id " ) . append ( sessionId ) ; } result . append ( "\nThis remoteJobTracker failover totally saved: " ) ; result . append ( "\nmappers " ) . append ( jtFailoverMetrics . savedMappers ) . append ( " map cpu " ) . append ( jtFailoverMetrics . savedMapCPU ) . append ( " map wallclock " ) . append ( jtFailoverMetrics . savedMapWallclock ) ; result . append ( "\nreducers " ) . append ( jtFailoverMetrics . savedReducers ) . append ( " reduce cpu " ) . append ( jtFailoverMetrics . savedReduceCPU ) . append ( " reduce wallclock " ) . append ( jtFailoverMetrics . savedReduceWallclock ) ; return result . toString ( ) ; }
Creates pretty report of saved state
32,495
protected static String [ ] getFileSystemCounterNames ( String uriScheme ) { String scheme = uriScheme . toUpperCase ( ) ; return new String [ ] { scheme + "_BYTES_READ" , scheme + "_BYTES_WRITTEN" , scheme + "_FILES_CREATED" , scheme + "_BYTES_READ_LOCAL" , scheme + "_BYTES_READ_RACK" , scheme + "_READ_EXCEPTIONS" , scheme + "_WRITE_EXCEPTIONS" } ; }
Counters to measure the usage of the different file systems . Always return the String array with two elements . First one is the name of BYTES_READ counter and second one is of the BYTES_WRITTEN counter .
32,496
protected void reportNextRecordRange ( final TaskUmbilicalProtocol umbilical , long nextRecIndex ) throws IOException { long len = nextRecIndex - currentRecStartIndex + 1 ; SortedRanges . Range range = new SortedRanges . Range ( currentRecStartIndex , len ) ; taskStatus . setNextRecordRange ( range ) ; LOG . debug ( "sending reportNextRecordRange " + range ) ; umbilical . reportNextRecordRange ( taskId , range ) ; }
Reports the next executing record range to TaskTracker .
32,497
void updateGCcounters ( ) { long gccount = 0 ; long gctime = 0 ; for ( GarbageCollectorMXBean gc : ManagementFactory . getGarbageCollectorMXBeans ( ) ) { long count = gc . getCollectionCount ( ) ; if ( count >= 0 ) { gccount += count ; } long time = gc . getCollectionTime ( ) ; if ( time >= 0 ) { gctime += time ; } } Iterator beans = ManagementFactory . getMemoryPoolMXBeans ( ) . iterator ( ) ; long aftergc = 0 ; long maxaftergc = 0 ; while ( beans . hasNext ( ) ) { MemoryPoolMXBean bean = ( MemoryPoolMXBean ) beans . next ( ) ; String beanname = bean . getName ( ) ; if ( ! beanname . toUpperCase ( ) . contains ( "OLD GEN" ) ) continue ; MemoryUsage mu = bean . getCollectionUsage ( ) ; if ( mu == null ) continue ; aftergc = mu . getUsed ( ) ; if ( aftergc > maxaftergc ) { maxaftergc = aftergc ; } } counters . findCounter ( GC_COUNTER_GROUP , "Total number of GC" ) . setValue ( gccount ) ; counters . findCounter ( GC_COUNTER_GROUP , "Total time of GC in milliseconds" ) . setValue ( gctime ) ; counters . findCounter ( GC_COUNTER_GROUP , "Heap size after last GC in bytes" ) . setValue ( maxaftergc ) ; long currentMax = counters . findCounter ( GC_COUNTER_GROUP , "Max heap size after GC in bytes" ) . getValue ( ) ; if ( maxaftergc > currentMax ) { counters . findCounter ( GC_COUNTER_GROUP , "Max heap size after GC in bytes" ) . setValue ( maxaftergc ) ; } }
Update counters about Garbage Collection
32,498
void updateResourceCounters ( ) { if ( resourceCalculator == null ) { return ; } ProcResourceValues res = resourceCalculator . getProcResourceValues ( ) ; long cpuTime = res . getCumulativeCpuTime ( ) ; long pMem = res . getPhysicalMemorySize ( ) ; long vMem = res . getVirtualMemorySize ( ) ; long cpuJvmTime = this . jmxThreadInfoTracker . getCumulativeCPUTime ( ) ; cpuTime -= initCpuCumulativeTime ; cpuJvmTime -= this . initJvmCpuCumulativeTime ; counters . findCounter ( Counter . CPU_MILLISECONDS ) . setValue ( cpuTime ) ; counters . findCounter ( Counter . PHYSICAL_MEMORY_BYTES ) . setValue ( pMem ) ; counters . findCounter ( Counter . VIRTUAL_MEMORY_BYTES ) . setValue ( vMem ) ; counters . findCounter ( Counter . CPU_MILLISECONDS_JVM ) . setValue ( cpuJvmTime ) ; if ( isMapTask ( ) ) { counters . findCounter ( MapCounter . MAP_CPU_MILLISECONDS ) . setValue ( cpuTime ) ; } else { counters . findCounter ( ReduceCounter . REDUCE_CPU_MILLISECONDS ) . setValue ( cpuTime ) ; } }
Update resource information counters
32,499
public static void loadStaticResolutions ( Configuration conf ) { String hostToResolved [ ] = conf . getStrings ( "hadoop.net.static.resolutions" ) ; if ( hostToResolved != null ) { for ( String str : hostToResolved ) { String name = str . substring ( 0 , str . indexOf ( '=' ) ) ; String resolvedName = str . substring ( str . indexOf ( '=' ) + 1 ) ; NetUtils . addStaticResolution ( name , resolvedName ) ; } } }
Load the static resolutions from configuration . This is required for junit to work on testcases that simulate multiple nodes on a single physical node .